blob_id
stringlengths
40
40
directory_id
stringlengths
40
40
path
stringlengths
2
327
content_id
stringlengths
40
40
detected_licenses
listlengths
0
91
license_type
stringclasses
2 values
repo_name
stringlengths
5
134
snapshot_id
stringlengths
40
40
revision_id
stringlengths
40
40
branch_name
stringclasses
46 values
visit_date
timestamp[us]date
2016-08-02 22:44:29
2023-09-06 08:39:28
revision_date
timestamp[us]date
1977-08-08 00:00:00
2023-09-05 12:13:49
committer_date
timestamp[us]date
1977-08-08 00:00:00
2023-09-05 12:13:49
github_id
int64
19.4k
671M
star_events_count
int64
0
40k
fork_events_count
int64
0
32.4k
gha_license_id
stringclasses
14 values
gha_event_created_at
timestamp[us]date
2012-06-21 16:39:19
2023-09-14 21:52:42
gha_created_at
timestamp[us]date
2008-05-25 01:21:32
2023-06-28 13:19:12
gha_language
stringclasses
60 values
src_encoding
stringclasses
24 values
language
stringclasses
1 value
is_vendor
bool
2 classes
is_generated
bool
2 classes
length_bytes
int64
7
9.18M
extension
stringclasses
20 values
filename
stringlengths
1
141
content
stringlengths
7
9.18M
4e223b1f5b7326d4d81346a3b24954f23fbffeae
72d9009d19e92b721d5cc0e8f8045e1145921130
/terra/man/time.Rd
fcad8722c54b0cb64ad785a636049f2f895dcf04
[]
no_license
akhikolla/TestedPackages-NoIssues
be46c49c0836b3f0cf60e247087089868adf7a62
eb8d498cc132def615c090941bc172e17fdce267
refs/heads/master
2023-03-01T09:10:17.227119
2021-01-25T19:44:44
2021-01-25T19:44:44
332,027,727
1
0
null
null
null
null
UTF-8
R
false
false
712
rd
time.Rd
\name{time} \alias{time} \alias{time<-} \alias{time,SpatRaster-method} \alias{time<-,SpatRaster-method} \title{time of SpatRaster layers} \description{ Get or set the time of the layers of a SpatRaster. Experimental. Currently only Date's allowed. } \usage{ \S4method{time}{SpatRaster}(x, ...) \S4method{time}{SpatRaster}(x)<-value } \seealso{\code{\link{depth}}} \arguments{ \item{x}{SpatRaster} \item{value}{"Date", "POSIXt", or numeric} \item{...}{additional arguments. None implemented} } \value{ Date } \examples{ s <- rast(system.file("ex/logo.tif", package="terra")) time(s) <- as.Date("2001-05-04") + 0:2 time(s) } \keyword{spatial}
bd2737d4a78e583d69aa2dbb0d1c93f3a252ca15
adb6cc1648bb33b06e73be9dd35112d6f639b2db
/plot2.R
3d002f73d99ec4961c1f390aade368ba6024e315
[]
no_license
ohjho/ExData_Plotting1
f5ccdaed99d27a4e86e9ced7a92a58735503472f
5a730c71728c644b9f1451ca5333f1c81e47b1a8
refs/heads/master
2021-05-31T13:22:23.532957
2016-04-18T09:52:20
2016-04-18T09:52:20
56,209,764
0
0
null
null
null
null
UTF-8
R
false
false
824
r
plot2.R
# Loading and cleaning Data fname <- "household_power_consumption.txt" if (!file.exists(fname)){ message(fname, " not found. Exiting script...") stop("See information on the dataset from README.md or download here: https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip") } raw <- read.table(fname, header=TRUE,sep=";", na.string="?") raw$Date <- as.Date(raw$Date, format="%d/%m/%Y") energy <- subset(raw, Date >= "2007-02-01" & Date <= "2007-02-02") rm(raw) # Image Creation message("Data loaded. Created Image...") png(file="plot2.png",width=480, height=480) with(energy,plot(as.POSIXct(paste(Date,Time)),Global_active_power, ylab="Global Active Power (kilowatts)", xlab="", type="l") ) dev.off() rm(energy) message("Successful.")
a83dec7bbc49c79dd3644db244321f3c33d1732c
63f78bd8589218f2b0317158acbeeec3c3c43340
/R/TIMEDEC/TIMEDEC.R
565de35c977e334152d5b84de08e12a3ac3685c0
[]
no_license
Decision-Neuroscience-Lab/boPro
f462d94e9800ff6fb2df3ae3e73a55731c943b24
8e0f9dca1ea6999d9e6ca9cdacf2fbd45db0a53a
refs/heads/master
2020-05-27T21:15:41.627997
2017-03-02T06:51:03
2017-03-02T06:51:03
83,645,323
2
1
null
null
null
null
UTF-8
R
false
false
11,175
r
TIMEDEC.R
# TIMEDEC ## Bowen J Fung, 2015 # Repeated measures for feedback manipulation feedMan <- read.csv("~/Documents/R/TIMEDEC/feedbackMan.csv") # Subset to only ECG data feedMan <- subset(feedMan, id < 121) require(PMCMR) h <- kruskal.test(k-k2 ~ as.factor(condition), data = feedMan) posthoc.kruskal.nemenyi.test(k-k2 ~ as.factor(condition), data = feedMan, g = as.factor(condition), method="Tukey") # Find mean ranks in MATLAB pairwise.wilcox.test((feedMan$k-feedMan$k2), g = as.factor(feedMan$condition), p.adj="none", exact=T) # Test if 'overestimate' < 'underestimate' for increase (more negative) in k s <- wilcox.test(feedMan$k[feedMan$condition == 2]-feedMan$k2[feedMan$condition == 2], feedMan$k[feedMan$condition == 3]-feedMan$k2[feedMan$condition == 3]) # Test if 'control' < 'underestimate' for increase (more negative) in k wilcox.test(feedMan$k[feedMan$condition == 4]-feedMan$k2[feedMan$condition == 4], feedMan$k[feedMan$condition == 3]-feedMan$k2[feedMan$condition == 3]) # Test if 'control' > 'overestimate' for increase (more negative) in k wilcox.test(feedMan$k[feedMan$condition == 4]-feedMan$k2[feedMan$condition == 4], feedMan$k[feedMan$condition == 2]-feedMan$k2[feedMan$condition == 2]) # Correlations and multiple comparison corrections data <- read.table("~/Documents/R/TIMEDEC/data.csv") edr <- read.csv("/Users/Bowen/Documents/R/TIMEDEC/timedecKubios.csv") names(edr)[1] <- "id" edr <- edr[,c("id","edr")] data <- merge(data,edr,by = "id", all.x = T, all.y = F) # Physiological censoring data$meanHR[data$meanHR == 0] <- NA data$sdnn[data$sdnn == 0] = NA data$sdnni[data$sdnni == 0] = NA data$meanHR[data$meanHR > 140] = NA data$sdnn[data$sdnn < 20] = NA data$sdnni[data$sdnni < 20] = NA data$hfpowfft[data$hfpowfft > 4000] = NA data$lfpowfft[data$lfpowfft > 4000] = NA data <- subset(data, filter == 1 & id < 121) # Subset to clean data attach(data) require(Hmisc) require(corrgram) require(ggplot2) require(xtable) require(ppcor) source("/Users/Bowen/Documents/R/misc functions/corrstars.R") source("/Users/Bowen/Documents/R/misc functions/xtable.decimal.R") # DR and HR vars <- c("meanDiff","cvReproduction","stevens1","stevens2","meanHR","sdnn","hfpowfft","lfpowfft") temp <- data[vars] corrgram(temp,order=TRUE,lower.panel = panel.ellipse, upper.panel = panel.pts, cor.method = "spearman") corr <- rcorr(as.matrix(temp), type = "spearman") fdrCorr <- apply(corr$P, 2, p.adjust, method = "holm") # Methods are bonferroni, holm, hochberg, hommel, BH, or BY corrstars(as.matrix(temp), type = "spearman", method = "none") xtable(corrstars(as.matrix(temp))) # Control for respiration (by including heart rate) require(ppcor) vars <- c("stevens2","hfpowfft","meanHR") temp <- data[vars] temp <- temp[complete.cases(temp),] pcor(temp, method = "spearman") spcor(temp, method = "spearman") # Control for respiration (by including peak HF) vars <- c("stevens2","hfpowfft","hfpeakfft") temp <- data[vars] temp <- temp[complete.cases(temp),] pcor(temp, method = "spearman") spcor(temp, method = "spearman") # Control for respiration (by including edr) vars <- c("stevens2","hfpowfft","edr") temp <- data[vars] temp <- temp[complete.cases(temp),] pcor(temp, method = "spearman") spcor(temp, method = "spearman") # Plots ggplot(data, aes(x = stevens1, y = lfpowfft)) + geom_point(aes(color = stevens1)) + geom_smooth(method = "lm", se = TRUE) write.table(round(corr$r, digits = 2), file = "~/Documents/R/TIMEDEC/DRHRcorrR.csv") write.table(round(corr$P, digits = 3), file = "~/Documents/R/TIMEDEC/DRHRcorrP.csv") write.table(round(fdrCorr,digits = 3), file = "~/Documents/R/TIMEDEC/DRHRcorrFDR.csv") # TD and HR vars <- c("bayesLogK","meanHR","sdnn","hfpowfft","lfpowfft") temp2 <- data[vars] corrgram(temp2,order=TRUE,lower.panel=panel.ellipse,cor.method = "spearman") corr <- rcorr(as.matrix(temp2), type = "spearman") fdrCorr <- apply(corr$P, 1, p.adjust, method = "BH") # Methods are bonferroni, holm, hochberg, hommel, BH, or BY corrstars(as.matrix(temp2), type = "spearman", method = "none") xtable(corrstars(as.matrix(temp2))) write.table(round(corr$r, digits = 2), file = "~/Documents/R/TIMEDEC/TDHRcorrR.csv") write.table(round(corr$P, digits = 3), file = "~/Documents/R/TIMEDEC/TDHRcorrP.csv") write.table(round(fdrCorr,digits = 3), file = "~/Documents/R/TIMEDEC/TDHRcorrFDR.csv") # Questionnaire and HR vars <- c("meanHR","sdnn","sdnni","hfpowfft","lfpowfft","ipipN","ipipE","ipipO","ipipA","ipipC","BIS","BASdrive","BASfun","BASreward","zaubAUC") temp3 <- data[vars] corrgram(temp3,order=TRUE,lower.panel=panel.ellipse,cor.method = "spearman") corr <- rcorr(as.matrix(temp3), type = "spearman") fdrCorr <- apply(corr$P, 1, p.adjust, method = "BH") # Methods are bonferroni, holm, hochberg, hommel, BH, or BY corrstars(as.matrix(temp3), type = "spearman", method = "none") xtable.decimal(corrstars(as.matrix(temp3), type = "spearman", method = "none")) write.table(round(corr$r, digits = 2), file = "~/Documents/R/TIMEDEC/QHRcorrR.csv") write.table(round(corr$P, digits = 3), file = "~/Documents/R/TIMEDEC/QHRcorrP.csv") write.table(round(fdrCorr,digits = 3), file = "~/Documents/R/TIMEDEC/QHRcorrFDR.csv") # TD and AUC vars <- c("k3","meanHR","zaubAUC") temp4 <- data[vars] corrgram(temp4,order=TRUE,lower.panel=panel.ellipse,cor.method = "spearman") corr <- rcorr(as.matrix(temp4), type = "spearman") fdrCorr <- apply(corr$P, 1, p.adjust, method = "BH") # Methods are bonferroni, holm, hochberg, hommel, BH, or BY corrstars(as.matrix(temp4), type = "spearman", method = "none") xtable(corrstars(as.matrix(temp4))) # Questionnaire, TD, DR vars <- c("k3","meanDiff","cvReproduction","stevens1","stevens2") #"bayesLogK","magEffect","zaubAUC") temp5 <- data[vars] corrgram(temp5,order=TRUE,lower.panel=panel.ellipse,cor.method = "spearman") corr <- rcorr(as.matrix(temp5), type = "spearman") fdrCorr <- apply(corr$P, 1, p.adjust, method = "BH") # Methods are bonferroni, holm, hochberg, hommel, BH, or BY corrstars(as.matrix(temp5), type = "spearman", method = "none") xtable.decimal(corrstars(as.matrix(temp5), type = "spearman", method = "none")) ## Correlation between carry-over effects and HR data coefs <- read.csv("~/Documents/R/TIMEDEC/COcoefs.csv") coefs <- subset(coefs,id %in% data$id) # Subset to clean data data <- cbind(data,coefs) vars <- c("sample","meanHR","sdnn","hfpowfft","lfpowfft") temp6 <- data[vars] corrgram(temp6,order=TRUE,lower.panel=panel.ellipse,cor.method = "spearman") corr <- rcorr(as.matrix(temp6), type = "spearman") fdrCorr <- apply(corr$P, 1, p.adjust, method = "BH") # Methods are bonferroni, holm, hochberg, hommel, BH, or BY corrstars(as.matrix(temp6), type = "spearman", method = "none") xtable.decimal(corrstars(as.matrix(temp6), type = "spearman", method = "none")) # Partial correlations (control for respiration) vars <- c("k3","stevens1","stevens2","meanDiff","meanHR","stevens1","stevens2","sdnn","hfpowfft","lfpowfft") temp <- data[vars] temp <- temp[complete.cases(temp),] x <- temp$stevens2 y <- temp$hfpowfft z <- temp$meanHR pcor.test(x, y, z, method = "spearman") spcor.test(x, y, z, method = "spearman") detach(data) # Check physiological data between feedback conditions require(PMCMR) kruskal.test(data$meanHR~ as.factor(condition), data = data) kruskal.test(data$sdnn~ as.factor(condition), data = data) kruskal.test(data$hfpowfft~ as.factor(condition), data = data) kruskal.test(data$lfpowfft~ as.factor(condition), data = data) posthoc.kruskal.nemenyi.test(data$meanHR ~ as.factor(condition), data = data, g = as.factor(condition), method="Tukey") # Find mean ranks in MATLAB posthoc.kruskal.nemenyi.test(data$sdnn ~ as.factor(condition), data = data, g = as.factor(condition), method="Tukey") # Find mean ranks in MATLAB posthoc.kruskal.nemenyi.test(data$hfpowfft ~ as.factor(condition), data = data, g = as.factor(condition), method="Tukey") # Find mean ranks in MATLAB posthoc.kruskal.nemenyi.test(data$lfpowfft ~ as.factor(condition), data = data, g = as.factor(condition), method="Tukey") # Find mean ranks in MATLAB pairwise.wilcox.test(data$meanHR, data$condition, p.adjust.method = "none", na.rm = T) wilcox.test(data[condition == 3,"meanHR"], data[condition == 4,"meanHR"], p.adjust.method = "none", na.rm = T) pairwise.wilcox.test(data$sdnn, data$condition, p.adjust.method = "none", na.rm = T) pairwise.wilcox.test(data$hfpowfft, data$condition, p.adjust.method = "none", na.rm = T) pairwise.wilcox.test(data$lfpowfft, data$condition, p.adjust.method = "none", na.rm = T) wilcox.test(data[condition == 2,"lfpowfft"], data[condition == 3,"lfpowfft"], p.adjust.method = "none", na.rm = T) # Control for feedback conditions ## Partial correlations vars <- c("stevens1","lfpowfft","condition") temp <- data[vars] temp <- temp[complete.cases(temp),] x <- temp$stevens1 y <- temp$lfpowfft z <- temp$condition pcor.test(x, y, z, method = "spearman") rcorr(x,y, type = "spearman") ## Quantile regression require("quantreg") qs = 1:9/10 # Mean HR and discount rate fit1 <- rq(k3 ~ meanHR, data = data, tau = qs) summary(fit1, se = "nid") plot(fit1) fit2 <- rq(k3 ~ meanHR + as.factor(condition), data = data, tau = qs) summary(fit2, se = "nid") plot(fit2) # HF and exponent fit1 <- rq(stevens2 ~ hfpowfft, data = data, tau = 0.5) summary(fit1, se = "nid") fit2 <- rq(stevens2 ~ hfpowfft + as.factor(condition), data = data, tau = 0.5) summary(fit2, se = "nid") # LF and exponent fit1 <- rq(stevens2 ~ lfpowfft, data = data, tau = 0.5) summary(fit1, se = "nid") fit2 <- rq(stevens2 ~ lfpowfft + as.factor(condition), data = data, tau = 0.5) summary(fit2, se = "nid") # LF and scale fit1 <- rq(stevens1 ~ lfpowfft, data = data, tau = 0.6) summary(fit1, se = "nid") fit2 <- rq(stevens1 ~ lfpowfft + as.factor(condition), data = data, tau = 0.6) summary(fit2, se = "nid") # Figures ## Duration reproduction timeSeries <- read.csv("~/Documents/R/TIMEDEC/timeSeries.csv", header = T) intervals <- read.csv("~/Documents/R/TIMEDEC/intervals.csv", header = T) kruskal.test(meanReproduction ~ as.factor(sampleInterval), data = intervals) kruskal.test(meanDiff ~ as.factor(sampleInterval), data = intervals) kruskal.test(stdReproduction ~ as.factor(sampleInterval), data = intervals) kruskal.test(cvReproduction ~ as.factor(sampleInterval), data = intervals) require("quantreg") qs = 1:9/10 fit1 <- rq(diff ~ sample, data = timeSeries, tau = qs) summary(fit1, se = "nid") plot(fit1) fit2 <- rq(k3 ~ meanHR + as.factor(condition), data = data, tau = qs) summary(fit2, se = "nid") plot(fit2) attach(timeSeries) require(ggplot2) require(ggExtra) plot_center = ggplot(timeSeries, aes(x=sample,y=reproduction)) + geom_point(aes(colour = factor(sample))) + stat_smooth(method = "lm", formula = y ~ poly(x,2), size = 1) ggMarginal(plot_center, type="density", margins = "y") ## Regression summary(lm(stevens2 ~ hfpowfft + lfpowfft, data = data)) # Grossman and Kollai (1993) suggestion (PNS activity only indexed by HF-HRV if HR taken into account) summary(lm(stevens2 ~ hfpowfft, data = data)) summary(lm(stevens2 ~ hfpowfft+ meanHR, data = data)) # Effect sizes for some tests cohensD(stevens2-1) t.test(stevens2-1)
97793633733242c523763bb1762471f8f016a425
f2d61b91feef89fa7523e2fedd8a3a0461ef0cba
/R/wp/wp_film.R
fcbade88e2c08661a9dad6973df7c6edd26431d6
[]
no_license
MarcinKosinski/trigeR5
c94f49476ecdf09da8277d5e81e7b2966b5642ce
1711e9b43a13d2b0073bd6e2cfbe967671dba427
refs/heads/master
2021-01-19T21:11:38.640114
2017-05-13T10:12:03
2017-05-13T10:12:03
88,619,642
4
11
null
2017-05-12T21:34:36
2017-04-18T11:56:39
JavaScript
UTF-8
R
false
false
1,941
r
wp_film.R
db <- dbConnect(drv = SQLite(), dbname = "data/wp.db") #### FILM #### adress <- "http://film.wp.pl/" adresses <- adress %>% read_html() %>% html_nodes(css = "._1lXcfrU") %>% html_text %>% tolower() %>% gsub("[[:space:][:punct:]]", "", .) %>% chartr("ąćęłńóśźż", "acelnoszz", .) %>% paste0(adress, .) adresses <- adresses[-6] links <- pblapply(adresses, function(one_ad) { ad_html <- read_html(one_ad) ad_nodes <- html_nodes(ad_html, css = "script") ad_text <- ad_nodes %>% grep("film.wp.pl", .) %>% ad_nodes[.] %>% as.character() biggest_list <- ad_text %>% stri_count_regex("film.wp.pl") %>% which.max() big_links <- biggest_list %>% ad_text[.] %>% stri_extract_all_regex("[[:alnum:]]+-[[:alnum:]-]+[[:digit:]]+[ag]") big_links }) %>% unlist() %>% unique %>% paste0(adress, .) # art_links <- links %>% # grep("a$", ., value = TRUE) # gal_links <- links %>% # grep("g$", ., value = TRUE) # Filter out galleries links <- links %>% grep("a$", ., value = TRUE) # db <- dbConnect(drv = SQLite(), dbname = "../data/wp.db") db_links <- dbGetQuery(db, "SELECT links FROM wp_film") links <- setdiff(links, db_links$links) bodies <- pblapply(links, function(link) { tryCatch(link %>% read_html() %>% html_nodes(css = "p , ._1HGmjUl , ._1xAmRvR") %>% html_text() %>% paste0(collapse = " "), error = function(e) { "Hmm... Nie ma takiej strony." }) }) %>% unlist() %>% gsub("'", "''", .) wp_film <- data_frame(links = links, bodies = bodies) %>% filter(bodies != "Hmm... Nie ma takiej strony.") db_next <- "', '" for (i in 1:nrow(wp_film)) { dbGetQuery(db, paste0("INSERT INTO wp_film (links, bodies) VALUES ('", wp_film$links[i], db_next, wp_film$bodies[i], "')")) } dbDisconnect(db) update_csv('wp_film')
3f3b27619ecbb722e4389d242d16a05076c1c851
4f8a077dc78236d66b3b81569990f7eddb3d45c3
/h2o-r/tests/testdir_demos/runit_demo_glrm_walking_gait.R
3cfdf82c41cbddf73bca102ee978f3e3c6d02166
[ "Apache-2.0" ]
permissive
konor/h2o-3
22b8e7c0e64597d18693f34a06079f242826cd92
77b27109c84c4739f9f1b7a3078f8992beefc813
refs/heads/master
2021-01-14T11:20:29.772798
2015-10-11T01:56:36
2015-10-11T01:56:36
null
0
0
null
null
null
null
UTF-8
R
false
false
4,001
r
runit_demo_glrm_walking_gait.R
setwd(normalizePath(dirname(R.utils::commandArgs(asValues=TRUE)$"f"))) source('../h2o-runit.R') # Connect to a cluster # Set this to True if you want to fetch the data directly from S3. # This is useful if your cluster is running in EC2. data_source_is_s3 = F locate_source <- function(s) { if (data_source_is_s3) myPath <- paste0("s3n://h2o-public-test-data/", s) else myPath <- locate(s) } test.walking_gait.demo <- function(conn) { Log.info("Import and parse walking gait data...") gait.hex <- h2o.importFile(locate("smalldata/glrm_test/subject01_walk1.csv"), destination_frame = "gait.hex") print(summary(gait.hex)) Log.info("Basic GLRM using quadratic loss and no regularization (PCA)") gait.glrm <- h2o.glrm(training_frame = gait.hex, x = 2:ncol(gait.hex), k = 5, init = "PlusPlus", loss = "Quadratic", regularization_x = "None", regularization_y = "None", max_iterations = 1000) print(gait.glrm) Log.info("Archetype to feature mapping (Y):") gait.y <- gait.glrm@model$archetypes print(gait.y) Log.info("Plot first archetype on z-coordinate features") feat_cols <- seq(3, ncol(gait.y), by = 3) plot(1:length(feat_cols), gait.y[1,feat_cols], xlab = "Feature", ylab = "Archetypal Weight", main = "First Archetype's Z-Coordinate Feature Weights", col = "blue", pch = 19, lty = "solid") text(1:length(feat_cols), gait.y[1,feat_cols], labels = colnames(gait.y[1,feat_cols]), cex = 0.7, pos = 3) abline(0, 0, lty = "dashed") Log.info("Projection into archetype space (X):") gait.x <- h2o.getFrame(gait.glrm@model$loading_key$name) print(head(gait.x)) time.df <- as.data.frame(gait.hex$Time[1:150])[,1] gait.x.df <- as.data.frame(gait.x[1:150,]) Log.info(paste0("Plot archetypes over time range [", time.df[1], ",", time.df[2], "]")) matplot(time.df, gait.x.df, xlab = "Time", ylab = "Archetypal Projection", main = "Archetypes over Time", type = "l", lty = 1, col = 1:5) legend("topright", legend = colnames(gait.x.df), col = 1:5, pch = 1) # Log.info("Reconstruct data from matrix product XY") # gait.pred <- predict(gait.glrm, gait.hex) # print(head(gait.pred)) # # Log.info(paste0("Plot original and reconstructed L.Acromium.X over time range [", time.df[1], ",", time.df[2], "]")) # lacro.df <- as.data.frame(gait.hex$L.Acromium.X[1:150]) # lacro.pred.df <- as.data.frame(gait.pred$reconstr_L.Acromium.X[1:150]) # matplot(time.df, cbind(lacro.df, lacro.pred.df), xlab = "Time", ylab = "X-Coordinate of Left Acromium", main = "Position of Left Acromium over Time", type = "l", lty = 1, col = 1:2) # legend("topright", legend = c("Original", "Reconstructed"), col = 1:2, pch = 1) Log.info("Import and parse walking gait data with missing values...") gait.miss <- h2o.importFile(locate("smalldata/glrm_test/subject01_walk1_miss15.csv"), destination_frame = "gait.miss") print(summary(gait.miss)) Log.info("Basic GLRM using quadratic loss and no regularization (PCA)") gait.glrm2 <- h2o.glrm(training_frame = gait.miss, validation_frame = gait.hex, x = 2:ncol(gait.miss), k = 15, init = "PlusPlus", loss = "Quadratic", regularization_x = "None", regularization_y = "None", max_iterations = 500, min_step_size = 1e-7) print(gait.glrm2) Log.info("Impute missing data from X and Y") gait.pred2 <- predict(gait.glrm2, gait.miss) print(head(gait.pred2)) Log.info(paste0("Plot original and imputed L.Acromium.X over time range [", time.df[1], ",", time.df[2], "]")) lacro.df2 <- as.data.frame(gait.hex$L.Acromium.X[1:150]) lacro.pred.df2 <- as.data.frame(gait.pred2$reconstr_L.Acromium.X[1:150]) matplot(time.df, cbind(lacro.df2, lacro.pred.df2), xlab = "Time", ylab = "X-Coordinate of Left Acromium", main = "Position of Left Acromium over Time", type = "l", lty = 1, col = 1:2) legend("topright", legend = c("Original", "Imputed"), col = 1:2, pch = 1) } doTest("Test out Walking Gait Demo", test.walking_gait.demo)
920434b2ed55d90f8bdb9a65a29f2de6e5a0d3b0
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
/data/genthat_extracted_code/LOGICOIL/examples/LOGICOILfit.Rd.R
0d9f51f439ea4959a0c4a08ad8653c8ad1d58216
[]
no_license
surayaaramli/typeRrh
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
66e6996f31961bc8b9aafe1a6a6098327b66bf71
refs/heads/master
2023-05-05T04:05:31.617869
2019-04-25T22:10:06
2019-04-25T22:10:06
null
0
0
null
null
null
null
UTF-8
R
false
false
272
r
LOGICOILfit.Rd.R
library(LOGICOIL) ### Name: LOGICOILfit ### Title: Fit of the multinomial log-linear model obtained from the ### LOGICOIL training dataset. ### Aliases: LOGICOILfit ### Keywords: datasets ### ** Examples data(LOGICOILfit) names(LOGICOILfit) LOGICOILfit$coefnames
4aa0c34af98e037f8aa3f71b30cb3e7c8511af40
e09d229dd1ad18879fb051e4cb7d97c1475f49aa
/R/trace_backwards.R
bf1e8c2586a0df22a1815e13dbeaf1e4efc35061
[ "MIT" ]
permissive
hamishgibbs/rtrackr
15bc922c8f8dfb765ee5b5da80df66b84eb16b16
2a353b73f8507e96c71c32c1ea557cfc04f9c0b2
refs/heads/master
2022-11-11T17:35:52.513669
2020-06-20T12:19:33
2020-06-20T12:19:33
271,510,902
1
0
NOASSERTION
2020-06-12T14:45:06
2020-06-11T09:54:51
R
UTF-8
R
false
false
756
r
trace_backwards.R
# trace_backwards # # @description recursively traverse a log file tree to identify parent nodes of a given trackr_id # # @param target_id string, a trackr_id # # @return list, the trackr_ids of parent record(s) trace_backwards <- function(target_id){ parent_id <- target_id parents <- list() i = 1 while(length(parent_id) == 1){ prev_id <- parent_id parent_id <- get_parent_backwards(parent_id) parents[[i]] <- list(name = prev_id, children = parent_id, type = 'node') i = i + 1 } if(is.null(parents[[i - 1]]$children)){ parents[[i - 1]]$type = 'root' parents <- parents[1:(i - 1)] } if(length(parents[[i - 1]]$children) > 1){ parents[[i - 1]]$type = 'break_point' } return(parents) }
93e8075f55939bbee2fdec4ec001999ac8560c58
150ddbd54cf97ddf83f614e956f9f7133e9778c0
/R/avg.R
9d445f48f1bf7469c5aabf1231792268eb849743
[ "CC-BY-4.0" ]
permissive
debruine/webmorphR
1119fd3bdca5be4049e8793075b409b7caa61aad
f46a9c8e1f1b5ecd89e8ca68bb6378f83f2e41cb
refs/heads/master
2023-04-14T22:37:58.281172
2022-08-14T12:26:57
2022-08-14T12:26:57
357,819,230
6
4
CC-BY-4.0
2023-02-23T04:56:01
2021-04-14T07:47:17
R
UTF-8
R
false
false
3,361
r
avg.R
#' Average Images #' #' Create an average from a list of delineated stimuli. #' #' @details #' #' ### Normalisation options #' #' * none: averages will have all coordinates as the mathematical average of the coordinates in the component templates #' * twopoint: all images are first aligned to the 2 alignment points designated in `normpoint`. Their position is set to their position in the first image in stimuli #' * rigid: procrustes aligns all images to the position of the first image in stimuli #' #' ### Texture #' #' This applies a representative texture to the average, resulting in composite images with more realistic texture instead of the very smooth, bland texture most other averaging programs create. See the papers below for methodological details. #' #' B. Tiddeman, M. Stirrat and D. Perrett (2005). Towards realism in facial prototyping: results of a wavelet MRF method. Theory and Practice of Computer Graphics. #' #' B. Tiddeman, D.M. Burt and D. Perrett (2001). Computer Graphics in Facial Perception Research. IEEE Computer Graphics and Applications, 21(5), 42-50. #' #' @param stimuli list of stimuli to average #' @param texture logical; whether textured should be averaged #' @param norm how to normalise; see Details #' @param normpoint points for twopoint normalisation #' #' @return list of stimuli with the average image and template #' @export #' @family webmorph #' #' @examples #' \donttest{ #' if (webmorph_up()) { #' demo_stim() |> avg() #' } #' } avg <- function(stimuli, texture = TRUE, norm = c("none", "twopoint", "rigid"), normpoint = 0:1) { stimuli <- require_tems(stimuli, TRUE) if (length(stimuli) > 100) { stop("We can't average more than 100 images at a time. You can create sub-averages with equal numbers of faces and average those together.") } if (!webmorph_up()) { stop("Webmorph.org can't be reached. Check if you are connected to the internet.") } norm <- match.arg(norm) format <- "jpg" #match.arg(format) # save images locally tdir <- tempfile() files <- write_stim(stimuli, tdir, format = "jpg") |> unlist() upload <- lapply(files, httr::upload_file) names(upload) <- sprintf("upload[%d]", 0:(length(upload)-1)) settings <- list( texture = ifelse(isTRUE(as.logical(texture)), "true", "false"), norm = norm, normPoint0 = normpoint[[1]], normPoint1 = normpoint[[2]], format = format ) # send request to webmorph and handle zip file ziptmp <- paste0(tdir, "/avg.zip") httr::timeout(30 + 10*length(stimuli)) httr::set_config( httr::config( ssl_verifypeer = 0L ) ) url <- paste0(wm_opts("server"), "/scripts/webmorphR_avg") r <- httr::POST(url, body = c(upload, settings) , httr::write_disk(ziptmp, TRUE)) utils::unzip(ziptmp, exdir = paste0(tdir, "/avg")) #resp <- httr::content(r) avg <- paste0(tdir, "/avg") |> read_stim() |> rename_stim("avg") unlink(tdir, recursive = TRUE) # clean up temp directory avg } #' Check if webmorph.org is available #' #' @export #' @family webmorph #' @examples #' webmorph_up() webmorph_up <- function() { tryCatch({ paste0(wm_opts("server"), "/scripts/status") |> httr::HEAD() |> httr::status_code() |> identical(200L) }, error = function(e) { return(FALSE) }) }
22317d7d5f468b9f01d76e6704ee1951d1cae712
8327aedc9fca9c1d5f11c160d440ecc082fb915d
/man/per.Rd
5da25bae9fca76698a7ee2c4106a0dca850b47d7
[]
no_license
SESjo/SES
f741a26e9e819eca8f37fab71c095a4310f14ed3
e0eb9a13f1846832db58fe246c45f107743dff49
refs/heads/master
2020-05-17T14:41:01.774764
2014-04-17T09:48:14
2014-04-17T09:48:14
null
0
0
null
null
null
null
UTF-8
R
false
false
953
rd
per.Rd
\name{per} \alias{per} \title{Decompose an atomic vector to its successive values and their length.} \usage{ per(x, idx = FALSE) } \arguments{ \item{x}{The atomic vector to examine.} \item{idx}{Should the indexes (start and end) of homogeneous sequences be returned as well ?} } \value{ A data frame with values and lengths of the homogeneous sequences of x. The class of the column 'value' is copied from the input. } \description{ The reverse of 'base::rep()' function: decompose an atomic vector to its successive values and their length. } \examples{ (x <- rep(LETTERS[1:10], 10:1)) (y <- per(x)) # 'per()' is the reverse of 'rep()' # identical(rep(y$value, y$length), x) # TRUE # Because characters are not converted to factors # inherits(y$value, class(x)) # TRUE } \seealso{ Other generalUtils: \code{\link{SESname}}; \code{\link{convertTime}}; \code{\link{replaceMissing}}; \code{\link{sunPosition}}; \code{\link{sunPos}} }
cd19b39a4fba632862748005ab8d96566406ab65
532d0fe3ec396c2898574944a66e3e43d750b4b9
/IslandOfLostScripts/cm_v1.R
5ff985c3469167341077bb883d39186d1d36d3fa
[]
no_license
myellen/MF850_Computational_Finance_Final
6d91653abe90d48a84df5eca08ed178bab2870fe
7410931178707089e286867f9af49d7aee7f6434
refs/heads/master
2020-06-11T13:02:40.224997
2016-12-19T10:37:40
2016-12-19T10:37:40
75,659,108
0
1
null
2016-12-19T09:52:48
2016-12-05T19:32:11
R
UTF-8
R
false
false
4,576
r
cm_v1.R
<<<<<<< HEAD ## Load the data mydata<-read.csv(file="mf850-finalproject-data.csv", header=TRUE, sep=",") ======= library(glmnet) ##Read Test Data mytestdata<-read.csv(file="/Users/leighm888/Desktop/Test_set_v2.csv", header=TRUE, sep=",") #turn categorical variables into factors date<-mytestdata[,1] RETMONTH_SPX<-mytestdata[,2] compid<-mytestdata[,3] Close<-mytestdata[,4] Adj_Close<-mytestdata[,5] Volume<-mytestdata[,6] Adj_Volume<-mytestdata[,7] Industry<-mytestdata[,8] RETMONTH<-mytestdata[,9] MARKETCAP<-mytestdata[,10] REVENUEUSD<-mytestdata[,11] COR<-mytestdata[,12] GP<-mytestdata[,13] RND<-mytestdata[,14] SGNA<-mytestdata[,15] OPEX<-mytestdata[,16] OPINC<-mytestdata[,17] EBITUSD<-mytestdata[,18] INTEXP<-mytestdata[,19] TAXEXP<-mytestdata[,20] CONSOLINC<-mytestdata[,21] NETINCNCI<-mytestdata[,22] NETINC<-mytestdata[,23] PREFDIVIS<-mytestdata[,24] NETINCCMNUSD<-mytestdata[,25] EPSUSD<-mytestdata[,26] SHARESWA<-mytestdata[,27] DPS<-mytestdata[,28] DEPAMOR<-mytestdata[,29] SBCOMP<-mytestdata[,30] CAPEX<-mytestdata[,31] NCF<-mytestdata[,32] ASSETS<-mytestdata[,33] CASHNEQUSD<-mytestdata[,34] INVENTORY<-mytestdata[,35] LIABILITIES<-mytestdata[,36] DEBTUSD<-mytestdata[,37] INVESTMENTS<-mytestdata[,38] EQUITY<-mytestdata[,39] BM<-mytestdata[,40] SHARESBAS<-mytestdata[,41] SHAREFACTOR<-mytestdata[,42] TAXASSETS<-mytestdata[,43] TAXLIABILITIES<-mytestdata[,44] DIVYIELD<-mytestdata[,45] EBITDAUSD<-mytestdata[,46] EBITDAMARGIN<-mytestdata[,47] DE<-mytestdata[,48] EVEBIT<-mytestdata[,49] EVEBITDA<-mytestdata[,50] FCFPS<-mytestdata[,51] GROSSMARGIN<-mytestdata[,52] NETMARGIN<-mytestdata[,53] PE<-mytestdata[,54] PS<-mytestdata[,55] PB<-mytestdata[,56] ROIC<-mytestdata[,57] SPS<-mytestdata[,58] PAYOUTRATIO<-mytestdata[,59] ROA<-mytestdata[,60] ROE<-mytestdata[,61] ROS<-mytestdata[,62] mytestdata$Industry=as.factor(mytestdata$Industry) contrasts(mytestdata$Industry) = contr.treatment(213) mytestdata$industry.f[1:213] linear_v1<-lm(RETMONTH ~ RETMONTH_SPX + Close + Adj_Close + Volume + Adj_Volume + Industry + MARKETCAP + REVENUEUSD + COR +GP + RND+SGNA + OPEX + OPINC + EBITUSD + INTEXP + TAXEXP + CONSOLINC + NETINCNCI + NETINC + PREFDIVIS+ NETINCCMNUSD + EPSUSD + SHARESWA + DPS + DEPAMOR + SBCOMP + CAPEX + NCF + ASSETS + CASHNEQUSD + INVENTORY + LIABILITIES + DEBTUSD + INVESTMENTS + EQUITY + BM + SHARESBAS + SHAREFACTOR + TAXASSETS + TAXLIABILITIES + DIVYIELD + EBITDAUSD + EBITDAMARGIN + DE + EVEBIT + EVEBITDA + FCFPS + GROSSMARGIN + NETMARGIN + PE +PS + PB +ROIC + SPS +PAYOUTRATIO + ROA + ROE + ROS) summary(linear_v1) ##Close, Adj_Close, Volume, Adj_Volume, Industry 48, 83,138,139,149,EPSUSD, ##SHARESWA,SHARESBAS,DIVYIELD,DE,NETMARGIN,PB,ROIC,SPS,ROA linear_sig<-lm(RETMONTH ~ RETMONTH_SPX + Close + Adj_Close + Volume + Adj_Volume + Industry + EPSUSD + SHARESWA + SHARESBAS + DIVYIELD + DE + NETMARGIN + PB + ROIC + SPS + ROA) #define matrix of explanatory variables exp_var<-mytestdata[,c(2,4:8,10:62)] fit<-glmnet(exp_var,RETMONTH) testdata_matrix<-data.matrix(exp_var) fit=glmnet(testdata_matrix,RETMONTH) x = model.matrix(RETMONTH~ RETMONTH_SPX + Close + Adj_Close + Volume + Adj_Volume + Industry + MARKETCAP + REVENUEUSD + COR +GP + RND+SGNA + OPEX + OPINC + EBITUSD + INTEXP + TAXEXP + CONSOLINC + NETINCNCI + NETINC + PREFDIVIS+ NETINCCMNUSD + EPSUSD + SHARESWA + DPS + DEPAMOR + SBCOMP + CAPEX + NCF + ASSETS + CASHNEQUSD + INVENTORY + LIABILITIES + DEBTUSD + INVESTMENTS + EQUITY + BM + SHARESBAS + SHAREFACTOR + TAXASSETS + TAXLIABILITIES + DIVYIELD + EBITDAUSD + EBITDAMARGIN + DE + EVEBIT + EVEBITDA + FCFPS + GROSSMARGIN + NETMARGIN + PE + PS + PB +ROIC + SPS +PAYOUTRATIO + ROA + ROE + ROS, data = mytestdata) lasso<-glmnet(x, RETMONTH,alpha=1) ridge<-glmnet(x, RETMONTH,alpha=0) ##Industry, Close, Volume, Industry,opinc, netincnci,epsusd,ncf,bm,sharesbas,divyield, ## ebitdamargin, de, roic, sps, payoutratio, roa linear_lasso<-lm(RETMONTH~Close + Volume + Industry + OPINC + NETINCNCI + EPSUSD + NCF + BM + SHARESBAS + DIVYIELD + EBITDAMARGIN + DE + ROIC + SPS + PAYOUTRATIO + ROA) test_set_cat<-read.csv(file="/Users/leighm888/Desktop/Test_set_cat.csv", header=TRUE, sep=",") test_set_cat$Industry=as.factor(test_set_cat$Industry) contrasts(test_set_cat$Industry) = contr.treatment(213) y<-test_set_cat$RETMONTHCAT linear_cat<-glm(y~.,data = test_set_cat) >>>>>>> master
e190f56c61f4acf90546e56ed6023e1ffdf188b6
521fa790f4faa0d25d617bc40604a6bcbfa7e324
/code/define_couples_mutate.R
fe46761676b4ad69f7265e4c98f9e5e896dc0e93
[]
no_license
CedricBezy/stat_sante_git
deaced07b3c21e6161611b2165ef432d66076120
7177aab8f1106fae54472de5779f631d419af2f5
refs/heads/master
2021-05-15T04:56:03.235445
2018-02-02T16:16:35
2018-02-02T16:16:35
118,431,016
0
0
null
null
null
null
UTF-8
R
false
false
8,227
r
define_couples_mutate.R
#--------------------------------------------------- # Cedric Bezy # 25 / 01 / 2018 # Projet Stat Sante #--------------------------------------------------- rm(list = ls()) library(dplyr) library(tibble) load('stat_sante_copy/data/couples_init.RData') ##================================================== # Functions ##================================================== contains_values <- function(text, vect){ any(sapply(vect, grepl, x = text)) } count_na <- function(x){ sum(is.na(x)) } na_barplot <- function(df){ nb_na <- sapply(df, count_na) nb_na <- nb_na[which(nb_na != 0)] if(length(nb_na)){ barplot(nb_na, main="Number of NA") } return(nb_na) } strfind <- function(x, vect, xsep = ";"){ return(any(strsplit(x, split = xsep)[[1]] %in% vect)) } ##================================================================ # Valeurs manquantes ##================================================================ ##-----------------------------------. # Bar_Plot ##-----------------------------------. na_barplot(couples_init) ##-----------------------------------. # Filter ##-----------------------------------. couples <- couples_init %>% dplyr::filter( !is.na(bmi_h) & between(bmi_h, 15, 45), !is.na(diplome_h), !is.na(age_f) ) # barplot na_barplot(couples) ##================================================================. # Creation de variables ##================================================================. ##-----------------------------------. # Difference age ##-----------------------------------. diff_age <- couples$age_h - couples$age_f couples <- couples %>% tibble::add_column(diff_age, .after = "fecondite") ##-----------------------------------. # Duree Infertilite ##-----------------------------------. duree_infertilite_class <- with(couples, { cut(duree_infertilite, breaks = c(0, 24, max(duree_infertilite, na.rm = TRUE) + 1), labels = c("inf_24", "24_sup"), include.lowest = FALSE, right = TRUE, ordered_result = FALSE ) }) couples <- couples %>% add_column(duree_infertilite_class, .after = "duree_infertilite") ##-----------------------------------. # BMI ##-----------------------------------. # <16 : Anorexie ; # 16 < Maigreur < 18,5 ; # 18,5< normal < 25 ; # 25< surpoids < 30 ; # 30 < obese < 40 ; # >40 massive bmi_h_class_6 <- with(couples, { cut(bmi_h, breaks = c(10, 16, 18.5, 25, 30, 40, 60), labels = c("Anorexie", "Maigreur", "Normal", "Surpoids", "Obese", "Massive"), include.lowest = FALSE, right = TRUE, ordered_result = FALSE ) }) bmi_h_class_2 <- with(couples, { cut(bmi_h, breaks = c(16, 25, 60), labels = c("Normal", "Surpoids"), include.lowest = FALSE, right = TRUE, ordered_result = FALSE ) }) couples <- couples %>% tibble::add_column(bmi_h_class_6, bmi_h_class_2, .after = "bmi_h") ##-----------------------------------. # Pathologie Homme ##-----------------------------------. patho_h <- couples$patho_h patho_h <- gsub(" *, *", ",", patho_h) patho_h <- gsub(" +", "_", patho_h) patho_h <- gsub(",", ";", patho_h) all_patho_h <- table(unlist(strsplit(patho_h, ";"))) all_patho_h # [1] "non" "chimiotherapie" # [3] "autre" "pathologies_respiratoire_chroniques" # [5] "hodgkin" "radiotherapie" # [7] "sinusites_chroniques" "diabete" # [9] "cancer_testis" "sarcome" # [11] "neurologique" table(couples_init$patho_h) # autre # 227 # cancer testis , chimiotherapie # 2 # chimiotherapie # 5 # chimiotherapie , radiotherapie # 2 # diabete # 7 # hodgkin , chimiotherapie , radiotherapie # 1 # neurologique # 1 # non # 842 # pathologies respiratoire chroniques # 9 # sarcome , chimiotherapie # 1 # sinusites chroniques # 33 # sinusites chroniques , pathologies respiratoire chroniques # 1 # Chimio v_chimio <- c("chimiotherapie", "cancer_testis", "radiotherapie", "hodgkin", "sarcome") v_chronic <- c("pathologies_respiratoire_chroniques", "sinusites_chroniques", "diabete") v_autre <- setdiff(all_patho_h, c("non", v_chimio, v_chronic)) patho_h_bin <- factor(patho_h == 'non', levels = c(FALSE, TRUE), labels = c(0, 1)) x <- patho_h[832] vect <- v_chimio # Chimiotherapie patho_h_regroup = factor( ifelse( patho_h == "non", "non", ifelse( sapply(patho_h, strfind, vect = v_chimio, xsep = ";"), "chimio", ifelse( sapply(patho_h, strfind, vect = v_chronic, xsep = ";"), "chronic", "autre" ) ) ), levels = c("non", "chimio", "chronic", "autre") ) table(patho_h_regroup) couples <- couples %>% dplyr::mutate( patho_h = patho_h ) %>% tibble::add_column( patho_h_regroup, patho_h_bin, .after = "patho_h" ) ##-----------------------------------. # Pathologie Femme ##-----------------------------------. patho_f <- couples$patho_f patho_f <- gsub(" *, *", ",", patho_f) patho_f <- gsub(" +", "_", patho_f) patho_f <- gsub(",", ", ", patho_f) all_patho_f <- unique(unlist(strsplit(patho_f, ", "))) all_patho_f table(patho_f) # autre endometriose hydrosalpinx # 18 17 2 # non pb tubaire bilateral pb tubaire unilateral # 647 14 65 patho_f_bin <- factor( (patho_f == 'non'), levels = c(FALSE, TRUE), labels = c(0, 1) ) patho_f_regroup <- factor( ifelse( test = is.na(patho_f), yes = NA, no = ifelse( test = (patho_f %in% c("non", "endometriose")), yes = patho_f, no = ifelse( test = grepl("tubaire", patho_f), yes = "tubaire", no = "autre" ) ) ), levels = c("non", "endometriose", "tubaire", "autre") ) summary(patho_f_regroup) couples <- couples %>% dplyr::mutate( patho_f = patho_f ) %>% tibble::add_column( patho_f_regroup, patho_f_bin, .after = "patho_f" ) ##-----------------------------------. # Bilan Femme ##-----------------------------------. df_bilan <- couples %>% dplyr::select(id, enfant, bh_f, ct_f, patho_f) nb_na_bilan_f <- with(couples, is.na(bh_f) + is.na(ct_f) + is.na(patho_f)) complet_f = (nb_na_bilan_f == 0) bilan_f <- with(couples, { factor( ifelse( test = (nb_na_bilan_f == 3), yes = NA, no = ifelse( test = (is.na(bh_f) | bh_f == "normal") & (is.na(ct_f) | ct_f == "ovulation") & (is.na(patho_f) | patho_f == "non"), yes = 0, no = 1 ) ), levels = c(1, 0), labels = c("dysfonc", "normal") ) }) ## ADD TO couples couples <- couples %>% tibble::add_column( bilan_f = bilan_f, complet_f = complet_f, .before = "bh_f" ) ##================================================================. # remake couples ##================================================================. couples <- droplevels(couples) %>% tibble::add_column( delta = with(couples, { ifelse(!is.na(dconception), dconception - dconsultation, ddn - dconsultation) }), .after = "ddn" ) ##================================================================. # Save ##================================================================. save(couples, file = 'stat_sante_copy/data/couples.RData') if(readline("Update Github data (y/n): ")%in% c("y", "1")){ save(couples, file = 'stat_sante_git/data/couples.RData') message("Substitution of data : done") }else{ message("No substitution of data") }
e6889f727ba31e1f51f915a237ad0f85e7df5c6f
fd2bf6d71e00c84e16814fa8fc41c35d52e0752b
/plot-fdcs-w-err-bar-Function.R
bce8e78f76a615eb66cbe63b91f22aef6cb5fc3f
[]
no_license
BTDangelo/Function-Archive
b9a6a5538e2dd56043b60d0ad7fe58286fb5ec9a
20cf00f19a5999ac4c6fab01f4cf68288fccd1b4
refs/heads/master
2020-12-02T18:04:59.106707
2017-08-09T20:05:06
2017-08-09T20:05:06
96,469,222
0
0
null
null
null
null
UTF-8
R
false
false
8,202
r
plot-fdcs-w-err-bar-Function.R
## BTD - Function to create flow duration curve with error bars flow.d.c <- function(chr.dir.main, chr.dir.flow.data, chr.dir.figures, chr.eb) { ## chr.dir.main - path to main workspace ## chr.dir.flow.data - path to flow data ## chr.dir.figures - path to write the figures ## chr.eb - path to error bar data library(ggplot2) library(scales) ## BTD - Generic plot scaling nethods library(tidyr) ## BTD - Makes it easy to "tidy" your data. Tidy data is data that's easy to work with. library(Hmisc) ## BTD - Contains functions useful for data analysis, high-level graphics, etc. library(plyr) ## BTD - Tools for splitting, applying, and combining data library(dplyr) ## BTD - A fast, consistent tool for working with data frame like objects, both in and out of memory ## BTD - Read the error bar data into R eb <- read.csv(file = chr.eb ) ## BTD - Create unique combination of a set of vectors pertaining to each station number, each station number is a character vector chr.stn.num <- as.character(unique(eb$station)) ## BTD - Create a for loop for each station for(j in 1:length(chr.stn.num)) { ## plot fdc for Upper Yaquina River Bacteria TMDL report document options(stringsAsFactors = FALSE) ## flow data file chr.file.flow.data <- paste0("flow_stn",chr.stn.num[j], ".txt") ## name of output figure file chr.file.figure <- paste0("fdc-stn-", chr.stn.num[j], ".png") ## creat empty data frome for flow data df.data.flow <- data.frame(stn = character(0), date = character(0), flow_cfs = character(0)) ## path and file name for flow data file tmp.flow.file <- paste0(chr.dir.flow.data, "/", chr.file.flow.data) ## read the flow data file into R tmp.data <- read.table(file=tmp.flow.file,sep="\t", header=TRUE, stringsAsFactors=FALSE, colClasses="character") ## populate the flow data frame with the flow data df.data.flow <- data.frame(stn = tmp.data$stn, date = as.POSIXct(strptime(tmp.data$date, format = "%m-%d-%Y")), flow_cfs = as.numeric(tmp.data$flow_cfs)) ## replace NAs in flow data with nearest previous non-NA value tmp.flow.no.na <- (df.data.flow %>% fill(flow_cfs)) df.data.flow <- cbind(df.data.flow, value = tmp.flow.no.na$flow_cfs) ## create a column for the flow exceedance in the flow data frame df.data.flow <- cbind(df.data.flow, flow.exceed = -1) ## create a function to calculate the flow exceedance flow.exceed <- function(v.flow) { tmp.rank <- rank(v.flow, ties.method = "average") tmp.exceed <- tmp.rank / length(v.flow) tmp.exceed <- 100 * (1 - tmp.exceed) return(tmp.exceed) } ## calculate the flow exceedance for the flow data df.data.flow$flow.exceed <- flow.exceed(df.data.flow$value) ## make plots of fdc ## dynamically calculate the breaks for the numbers on the y-axis tmp.breaks <- 10^seq(from = floor(log10(min(df.data.flow$value, na.rm = TRUE))), to = ceiling(log10(max(df.data.flow$value, na.rm = TRUE))), length.out = 5) fancy_scientific <- function(l) { ## function taken from stackoverflow.com post ## http://stackoverflow.com/questions/11610377/how-do-i-change-the-formatting-of-numbers-on-an-axis-with-ggplot/24241954 # turn in to character string in scientific notation x <- format(l, scientific = TRUE) # quote the part before the exponent to keep all the digits #y <- gsub("^(.*)e", "'\\1'e", x) # turn the 'e+' into plotmath format z <- gsub("^.*e", "10^", x) # return this as an expression parse(text=z) } ## add the fdc to the plot and set how the axes will appear p1 <- ggplot(data = df.data.flow) + geom_line(aes(x = flow.exceed, y = value), color = "blue", size = 1.5) + scale_y_log10(limits = range(tmp.breaks), breaks = tmp.breaks, minor_breaks = c(sapply(tmp.breaks, function(x) seq(0, x, x/10))), labels = fancy_scientific) + scale_x_continuous(limits = c(0, 100), expand = c(0,0)) + labs( x = "Flow Exceedance (%)", y = "Average Daily Flow (cfs)" ) ## set the appeareance of the grid lines and the text in the figure p2 <- p1 + theme( axis.title = element_text(size = 10, color = "black"), axis.text = element_text(size = 8, color = "black"), panel.grid.major = element_line(colour = "grey60"), panel.grid.minor = element_line(colour = "grey60"), panel.background = element_rect(fill = "white"), panel.border = element_rect(colour = "black", fill=NA, size=1) ) ## now add information dynamically for the flowzone boundaires ## flowzone descriptions chr.flz.desc <- c("High Flows", "Transitional Flows", "Typical Flows", "Dry Flows", "Low Flows") ## flowzone for exceedance boundaries num.flow.exceed.bnd <- c(10,40,60,90) ## add lines for flowzone boundaries p3 <- p2 + geom_vline(xintercept = num.flow.exceed.bnd, size = 1.5, linetype = "dashed") ## adding text for flowzone boundaires ## get the mid-pojnts of the x-values of the flowzones junk <- c(0, num.flow.exceed.bnd, 100) junk.mid <- c() for(ii in 2:length(junk)) { junk.mid <- c(junk.mid, junk[ii-1] + (junk[ii] - junk[ii-1]) / 2) } num.flow.exceed.bnd.mids <- junk.mid rm(junk, junk.mid) ## create a data frame that has the x and y values along with the labels for the flowzone boundaries df.fz.lables <- data.frame(x = num.flow.exceed.bnd.mids, y = 10^ (rep(1, length(num.flow.exceed.bnd.mids))* min(p3$scales$scales[[1]]$limits) + 0.015 * (p3$scales$scales[[1]]$limits[2] - p3$scales$scales[[1]]$limits[1])), chr.label = chr.flz.desc) ## add flowzone text to the fdc plot p4 <- p3 + geom_text(data = df.fz.lables, aes(x = x, y = y, label = chr.label), size = 6 * 0.352777778, fontface = "bold") ## set the width of the plot in inches p.width = 6 ## write the plot to a graphics file png(filename = paste0(chr.dir.figures, "/", chr.file.figure), width = p.width, height = round(p.width / 1.61803398875, 1), units = "in", res = 1200) plot(p4) dev.off() ## BTD - Add Error Bars ## BTD - Take all data from error bar dataset and filter or return rows according to each station number df.err.bar <- eb %>% filter(station == chr.stn.num[j]) ## BTD - Turn flow.exceed column fron a decimal into a percentage df.err.bar$flow.exceed <- df.err.bar$flow.exceed * 100 ## BTD - Add ggplot for error bars to the fdc plot, make error bars red p5 <- p4 + geom_point(data = df.err.bar, aes(x = flow.exceed, y = value), color = "red") + geom_errorbar(data = df.err.bar, aes(x=flow.exceed, ymin = err.lower.limit , ymax = err.upper.limit), color = "red") ## BTD - Name of the fdc plot with the added error bars png(filename = paste0(chr.dir.figures, "/", paste0('fdc-stn-', chr.stn.num[j], '-w-err-bar.png')), width = p.width, height = round(p.width / 1.61803398875, 1), units = "in", res = 1200) plot(p5) dev.off() } } flow.d.c(chr.dir.main = "M:/Models/Bacteria/LDC/Bernadette-workspace", chr.dir.flow.data = "M:/Models/Bacteria/LDC/Bernadette-workspace/data" , chr.dir.figures = "M:/Models/Bacteria/LDC/Bernadette-workspace/figures", chr.eb = "//deqhq1/TMDL/TMDL_WR/MidCoast/Models/Bacteria/LDC/Bernadette-workspace/data/fdc-err-bars.csv")
2e3f36a0154a3a1e3bfdb82fd57e387901848f06
c542082c439cf134c109d1b12605aedabe2ca082
/R/playground/old_attempts/predict_2_kernels.R
352660db0e77206d43255c8fe66b7495a449ade5
[]
no_license
NathanWycoff/GPArcLength
f811702d0bcb3bd3e447c8c9ab8611d1830936f7
4b84fb390dd21cf3487f53b5709ef9a9c8b1eadd
refs/heads/master
2021-05-06T04:14:31.562516
2018-01-27T17:08:44
2018-01-27T17:08:44
114,920,616
0
0
null
null
null
null
UTF-8
R
false
false
1,840
r
predict_2_kernels.R
#!/usr/bin/Rscript # predict_2_kernels.r Author "Nathan Wycoff <nathanbrwycoff@gmail.com>" Date 01.08.2018 require(mds.methods) source('../../lib/some_gp_funcs.R') ## Seeing what predictions looked like on the nonsmooth multiscale GP ######### Generate some data with specified weirdness set.seed(1234) n <- 50 p <- 1 X <- matrix(runif(n*p), ncol = p) #X <- matrix(seq(0,1,length.out=n), ncol = p) ## Denote the top s which have the largest inner product with the p-vector of 1's as being in the different kernel space sp <- 0.25#proportion of things in different kernel space s <- ceiling(sp * n) r <- rank(-X %*% rep(1,p)) ##Xn -- x normal, obeys the kernel for most of the space ##Xd -- x different, obeys a kernel with a tenth of the lengthscale Xn <- as.matrix(X[r > s,], ncol = 1) Xd <- as.matrix(X[r <= s,], ncol = 1) #Create the kernels and the response kernn <- kernel_factory(lengthscale=0.1) kernd <- kernel_factory(lengthscale=0.01, covariance = 10) nugget <- 0.01 yn <- gen_gp(Xn, kernn, nugget) yd <- gen_gp(Xd, kernd, nugget) #Store them in one vector y <- rep(NA, n) y[r > s] <- yn y[r <= s] <- yd #Only works in 1D so far. funky_kern <- function(x, y) { cp1 <- max(Xn) cp2 <- min(Xd) if (x <= cp1) { return(kernn(x, y)) } else if (x >= cp2) { return(kernd(x, y)) } else { l <- conn_line_seg(c(cp1, kernn(x, y)), c(cp2, kernd(x, y))) return(l(x - cp1)[2]) } } ##For 1D only, plot the points as well as the normal GP fit. if (p == 1) { quartz() cols <- c('red', 'blue') plot(X, y, lwd=0) text(X, y, 1:n, col = cols[(r > s) + 1]) XX <- as.matrix(seq(0,1,length.out=200), ncol = p) mu <- gp_post_mean_factory(X, y, funky_kern, nugget) yy <- sapply(1:nrow(XX), function(xx) mu(XX[xx,])) points(XX, yy, col = 'red', type = 'l') }
94f41207c4622ee8e480ac0a3ed1836e8871b16d
fc00987cf8ddb7ee81fd7865cfd8f272a7f4a101
/R/by-game-parsers.r
d0fa28d8eff979a579a49739c450c7b029a8d40e
[ "MIT" ]
permissive
zamorarr/msf
72bcaed4569b2c4f3bca05940965285b4c0c3fd4
d84327bd04a15efbd36e918646d82458bf61a280
refs/heads/master
2018-10-06T20:16:27.831851
2018-06-22T14:16:11
2018-06-22T14:16:11
116,212,412
0
0
null
null
null
null
UTF-8
R
false
false
6,596
r
by-game-parsers.r
#' Parse box scores #' #' @param json content from response #' @export #' @examples #' \dontrun{ #' resp <- game_boxscore("nfl", "20170917-ARI-IND", season = "2017-2018-regular") #' resp <- game_boxscore("nhl", "20171114-BUF-PIT", season = "2017-2018-regular") #' parse_boxscore(resp$content) #' } parse_boxscore <- function(json) { gameboxscore <- json[["gameboxscore"]] # game data game <- gameboxscore[["game"]] away_id <- game[["awayTeam"]][["ID"]] home_id <- game[["homeTeam"]][["ID"]] # player stats away <- gameboxscore[["awayTeam"]][["awayPlayers"]][["playerEntry"]] home <- gameboxscore[["homeTeam"]][["homePlayers"]][["playerEntry"]] df_away <- parse_team_boxscore(away) df_home <- parse_team_boxscore(home) # data frames df_home$team_id <- home_id df_away$team_id <- away_id # combine home and away result <- rbind(df_away, df_home) important_names <- c("player_id", "team_id", "position") result[c(important_names, setdiff(names(result), important_names))] } #' @keywords internal parse_team_boxscore <- function(json) { # stats stats <- purrr::map(json, "stats") toremove <- purrr::map_lgl(stats, is.null) # no stats? get outta here stats <- stats[!toremove] df_stats <- parse_stats(stats) # players players <- purrr::map(json, "player") players <- players[!toremove] player_ids <- purrr::map_chr(players, "ID") positions <- purrr::map_chr(players, "Position") result <- tibble::tibble(player_id = player_ids, position = positions) cbind(result, df_stats) } #' Parse starting lineup for a game #' #' @param json content from response #' @param type actual or expected lineup #' @export #' @examples #' \dontrun{ #' resp <- game_starting_lineup("nhl", "20171014-BUF-LAK", season = "2017-2018-regular") #' resp <- game_starting_lineup("mlb", "20170822-COL-KC", season = "2017-regular") #' resp <- game_starting_lineup("nba", "42070", season = "2017-2018-regular") #' parse_starting_lineup(resp$content, "actual") #' #' } parse_starting_lineup <- function(json, type = c("actual", "expected")) { startinglineup <- json[["gamestartinglineup"]] # game info game_id <- startinglineup[["game"]][["id"]] # lineups type <- match.arg(type) lineups <- purrr::map(startinglineup[["teamLineup"]], parse_single_lineup, type) # combine data frames lineups <- do.call(rbind, lineups) lineups$game_id <- game_id lineups[c("player_id", "team_id", "game_id", "lineup_position")] } #' @keywords internal parse_single_lineup <- function(lineup, type) { # team info team_id <- lineup[["team"]][["ID"]] # player info players <- lineup[[type]][["starter"]] lineup_position <- purrr::map_chr(players, "position", .null = NA) player_ids <- purrr::map_chr(players, c("player", "ID"), .null = NA) tibble::tibble(player_id = player_ids, team_id = team_id, lineup_position = lineup_position) } #' Parse Play by Play Data #' @param json list of data #' @param sport sport #' @export #' @examples #' \dontrun{ #' resp <- game_pbp("nhl", "20161215-FLO-WPJ", season = "2016-2017-regular") #' parse_game_pbp(resp$content, "nhl") #' } parse_game_pbp <- function(json, sport = c(NA, "nba", "nhl", "nfl", "mlb")) { sport = match.arg(sport) # get plays or at-bats if (is.na(sport)) { stop("Please provide a sport argument.") } if (sport == "mlb") { plays <- json[["gameplaybyplay"]][["atBats"]][["atBat"]] } else { plays <- json[["gameplaybyplay"]][["plays"]][["play"]] } # parse events if (sport == "nba") { quarter <- purrr::map_chr(plays, "quarter") time <- purrr::map_chr(plays, "time") event <- purrr::map_chr(plays, ~ names(.x)[3]) event_data <- purrr::map(plays, 3) tibble::tibble(quarter = quarter, time = time, event = event, data = event_data) } else if (sport == "nhl") { period <- purrr::map_chr(plays, "period") time <- purrr::map_chr(plays, "time") event <- purrr::map_chr(plays, ~ names(.x)[3]) event_data <- purrr::map(plays, 3) tibble::tibble(period = period, time = time, event = event, data = event_data) } else if (sport == "nfl") { quarter <- purrr::map_chr(plays, "quarter") time <- purrr::map_chr(plays, "time") event <- purrr::map_chr(plays, ~ utils::tail(names(.x),1)) event_data <- purrr::map(plays, utils::tail, 1) tibble::tibble(quarter = quarter, time = time, event = event, data = event_data) } else if (sport == "mlb") { inning <- purrr::map_chr(plays, "inning") inning_half <- purrr::map_chr(plays, "inningHalf") batting_team <- purrr::map_chr(plays, c("battingTeam", "ID")) atbat_id <- seq_along(inning) event_data <- purrr::map(plays, "atBatPlay") event_data <- purrr::map(event_data, parse_mlb_event) results <- tibble::tibble( inning = inning, inning_half = inning_half, batting_team = batting_team, atbat_id = atbat_id, data = event_data) #tidyr::unnest(results, data) results } } #' Parse mlb events #' @param event a nested json event #' @keywords internal parse_mlb_event <- function(event) { event_type <- purrr::map_chr(event, ~ names(head(.x))) event_data <- purrr::map(event, 1) play_id <- seq_along(event_type) tibble::tibble(play_id, event = event_type, data = event_data) } #parse_starting_lineup(resp$content, "actual") %>% # filter(!is.na(player_id)) %>% # mutate(type = if_else(grepl("BO[0-9]", lineup_position), "BO", "position")) %>% # tidyr::spread(type, lineup_position) #mlb_batting_order <- function(id, position, team_id) { # is_order <- grepl("BO", position) # batting order values start with BO # col_type <- dplyr::if_else(is_order, "lineup_order", "position") # # df <- tibble::tibble(id = id, position = position, col_type = col_type) # df <- dplyr::filter(df, !is.na(id)) # # # hack to avoid errors when players are listed at multiple lineup spots # # simply selects the first instance of that player # df <- dplyr::arrange(df, id, position) # df <- dplyr::group_by(df, id, col_type) # df <- dplyr::slice(df, 1) # df <- dplyr::ungroup(df) # # df <- tidyr::spread(df, col_type, position) # # # # add lineup_order column if not found # if (!("lineup_order" %in% colnames(df))) df[["lineup_order"]] <- NA_character_ # # # batting orders are in the form BO1, BO2, BO3, etc.. # # stopifnot(length(df[["lineup_order"]]) == 9) # df[["lineup_order"]] <- stringr::str_extract(df[["lineup_order"]], "[0-9]") # df[["lineup_order"]] <- as.integer(df[["lineup_order"]]) # # # add team id # df[["team_id"]] <- team_id # # df[c("id", "lineup_order", "team_id")] # }
e63ba045123abac07392c7175a229257ba14b35f
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
/data/genthat_extracted_code/intrinsicDimension/examples/M_rozza.rd.R
a6eabda7b791ecdb0970acf93228058dc0cf14d5
[]
no_license
surayaaramli/typeRrh
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
66e6996f31961bc8b9aafe1a6a6098327b66bf71
refs/heads/master
2023-05-05T04:05:31.617869
2019-04-25T22:10:06
2019-04-25T22:10:06
null
0
0
null
null
null
null
UTF-8
R
false
false
430
r
M_rozza.rd.R
library(intrinsicDimension) ### Name: M_rozza ### Title: Manifolds from Rozza et al. (2012) ### Aliases: m14Manifold m15Manifold ### Keywords: datagen ### ** Examples datap <- m14Manifold(800) par(mfrow = c(1, 3)) plot(datap[,1], datap[,3]) plot(datap[,2], datap[,3]) plot(datap[,1], datap[,2]) datap <- m15Manifold(800) par(mfrow = c(1, 3)) plot(datap[,1], datap[,3]) plot(datap[,2], datap[,3]) plot(datap[,1], datap[,2])
01bccb36b6bafff6eaa8ea497431ab4d0f8c0ef1
030b6b645e227da9a2be3b812c7846499e3bf65a
/hai.r
662fad51300f6badbd1465c165b9f482239f16cc
[]
no_license
endft/kmmi_r
7b055db7722ad1367939a25bf5e3b6a313bd068a
0e502a94c23c6c4093012a4e58b44e295cc1a587
refs/heads/main
2023-06-30T04:45:12.580868
2021-08-09T03:31:27
2021-08-09T03:31:27
393,987,945
0
0
null
null
null
null
UTF-8
R
false
false
15
r
hai.r
teks1 ="haiiii"
ac906c1a8ada194f07d84769d9b81e641b5d3b92
b4c24634b5f5d84a23405e1339bd03b065ebc62c
/R/derivatives_basic.R
fbd16f4499b8ad73c542c3f7c4f792d95aa8eda3
[ "MIT" ]
permissive
minghao2016/diseq
0f6f41ae1d3c258d7ce3df264c2137a2857e12ec
035c7d54f3c3fbe07fbb7255bf61f6a9c565f228
refs/heads/master
2023-02-28T00:06:13.168971
2021-01-26T17:22:02
2021-01-26T17:22:02
null
0
0
null
null
null
null
UTF-8
R
false
false
13,701
r
derivatives_basic.R
#' @include system_basic.R setGeneric("partial_beta_d_of_loglh", function(object) { standardGeneric("partial_beta_d_of_loglh") }) setMethod("partial_beta_d_of_loglh", signature(object = "system_basic"), function(object) { # nolint start c(((object@supply@psi * object@rho1 / (object@demand@sigma * object@supply@sigma) + (object@demand@Psi * object@demand@h - object@demand@psi * object@rho2) / object@demand@var)) / object@lh) * object@demand@independent_matrix # nolint end }) setGeneric("partial_beta_s_of_loglh", function(object) { standardGeneric("partial_beta_s_of_loglh") }) setMethod("partial_beta_s_of_loglh", signature(object = "system_basic"), function(object) { # nolint start c(((object@demand@psi * object@rho1 / (object@demand@sigma * object@supply@sigma) + (object@supply@Psi * object@supply@h - object@supply@psi * object@rho2) / object@supply@var)) / object@lh) * object@supply@independent_matrix # nolint end }) setGeneric("partial_var_d_of_loglh", function(object) { standardGeneric("partial_var_d_of_loglh") }) setMethod("partial_var_d_of_loglh", signature(object = "system_basic"), function(object) { # nolint start c((object@demand@h * object@supply@psi * object@rho1 / (2 * object@demand@var * object@supply@sigma) + (object@demand@Psi * (object@demand@h**2 - 1) - object@demand@h * object@demand@psi * object@rho2) / (2 * object@demand@sigma**3)) / object@lh) # nolint end }) setGeneric("partial_var_s_of_loglh", function(object) { standardGeneric("partial_var_s_of_loglh") }) setMethod("partial_var_s_of_loglh", signature(object = "system_basic"), function(object) { # nolint start c((object@supply@h * object@demand@psi * object@rho1 / (2 * object@demand@sigma * object@supply@var) + (object@supply@Psi * (object@supply@h**2 - 1) - object@supply@h * object@supply@psi * object@rho2) / (2 * object@supply@sigma**3)) / object@lh) # nolint end }) setGeneric("partial_rho_of_loglh", function(object) { standardGeneric("partial_rho_of_loglh") }) setMethod("partial_rho_of_loglh", signature(object = "system_basic"), function(object) { # nolint start c((object@rho1**2 * (object@demand@psi * object@demand@z / object@demand@sigma + object@supply@psi * object@supply@z / object@supply@sigma)) / object@lh) # nolint end }) setGeneric("partial_beta_d_partial_beta_d_of_loglh", function(object) { standardGeneric("partial_beta_d_partial_beta_d_of_loglh") }) setMethod("partial_beta_d_partial_beta_d_of_loglh", signature(object = "system_basic"), function(object) { # nolint start (t(object@demand@independent_matrix * c(((object@supply@psi * object@rho1**2 * object@demand@sigma * object@demand@z + object@supply@sigma * (object@demand@Psi * (object@demand@h**2 - 1) - object@demand@psi * object@rho1 * (object@supply@h - object@rho1 * object@supply@z))) / (object@demand@sigma**3 * object@supply@sigma)) / object@lh)) %*% object@demand@independent_matrix) - t(partial_beta_d_of_loglh(object)) %*% partial_beta_d_of_loglh(object) # nolint end }) setGeneric("partial_beta_d_partial_beta_s_of_loglh", function(object) { standardGeneric("partial_beta_d_partial_beta_s_of_loglh") }) setMethod("partial_beta_d_partial_beta_s_of_loglh", signature(object = "system_basic"), function(object) { # nolint start (t(object@supply@independent_matrix * c((object@rho1 * (object@demand@psi * object@supply@sigma * (object@demand@h - object@rho2 * object@supply@z) + object@supply@psi * object@demand@sigma * (object@supply@h - object@rho2 * object@demand@z)) / (object@demand@var * object@supply@var)) / object@lh)) %*% object@demand@independent_matrix) - t(partial_beta_s_of_loglh(object)) %*% partial_beta_d_of_loglh(object) # nolint end }) setGeneric("partial_beta_d_partial_var_d_of_loglh", function(object) { standardGeneric("partial_beta_d_partial_var_d_of_loglh") }) setMethod("partial_beta_d_partial_var_d_of_loglh", signature(object = "system_basic"), function(object) { # nolint start (colSums(c(((object@supply@psi * object@rho1 * object@demand@sigma * (object@demand@h * object@rho1 * object@demand@z - 1) + object@supply@sigma * (object@demand@Psi * object@demand@h**3 - object@demand@h * (3 * object@demand@Psi + object@demand@psi * (2 * object@supply@h * object@rho1 - object@supply@z * (object@rho1**2 + 1))) + 2 * object@demand@psi * object@rho2)) / (2 * object@demand@sigma**4 * object@supply@sigma)) / object@lh) * object@demand@independent_matrix)) - t(partial_var_d_of_loglh(object)) %*% partial_beta_d_of_loglh(object) # nolint end }) setGeneric("partial_beta_d_partial_var_s_of_loglh", function(object) { standardGeneric("partial_beta_d_partial_var_s_of_loglh") }) setMethod("partial_beta_d_partial_var_s_of_loglh", signature(object = "system_basic"), function(object) { # nolint start (colSums(c((object@rho1 * (object@demand@psi * object@supply@sigma * (object@demand@h * (object@supply@h - object@rho1 * object@supply@z) + object@demand@z * object@supply@z) - object@supply@psi * object@demand@sigma * (object@demand@h * object@rho1 * object@demand@z - object@supply@h**2 - object@demand@z**2 + 1)) / (2 * object@demand@var * object@supply@sigma**3)) / object@lh) * object@demand@independent_matrix)) - t(partial_var_s_of_loglh(object)) %*% partial_beta_d_of_loglh(object) # nolint end }) setGeneric("partial_beta_d_partial_rho_of_loglh", function(object) { standardGeneric("partial_beta_d_partial_rho_of_loglh") }) setMethod("partial_beta_d_partial_rho_of_loglh", signature(object = "system_basic"), function(object) { # nolint start (colSums(c((-object@rho1**2 * (object@demand@psi * object@supply@sigma * (object@rho1 - object@demand@z * (object@demand@h - object@rho2 * object@supply@z)) - object@supply@psi * object@demand@sigma * (object@rho1 * object@demand@z * object@supply@z + object@rho2)) / (object@demand@var * object@supply@sigma)) / object@lh) * object@demand@independent_matrix)) - t(partial_rho_of_loglh(object)) %*% partial_beta_d_of_loglh(object) # nolint end }) setGeneric("partial_beta_s_partial_beta_s_of_loglh", function(object) { standardGeneric("partial_beta_s_partial_beta_s_of_loglh") }) setMethod("partial_beta_s_partial_beta_s_of_loglh", signature(object = "system_basic"), function(object) { # nolint start (t(object@supply@independent_matrix * c(((object@demand@psi * object@rho1**2 * object@supply@sigma * object@supply@z + object@demand@sigma * (object@supply@Psi * (object@supply@h**2 - 1) - object@supply@psi * object@rho1 * (object@demand@h - object@rho1 * object@demand@z))) / (object@demand@sigma * object@supply@sigma**3)) / object@lh)) %*% object@supply@independent_matrix) - t(partial_beta_s_of_loglh(object)) %*% partial_beta_s_of_loglh(object) # nolint end }) setGeneric("partial_beta_s_partial_var_d_of_loglh", function(object) { standardGeneric("partial_beta_s_partial_var_d_of_loglh") }) setMethod("partial_beta_s_partial_var_d_of_loglh", signature(object = "system_basic"), function(object) { # nolint start (colSums(c((object@rho1 * (object@demand@psi * object@supply@sigma * (object@demand@h**2 - object@supply@h * object@rho1 * object@supply@z + object@supply@z**2 - 1) + object@supply@psi * object@demand@sigma * (object@supply@h * (object@demand@h - object@rho1 * object@demand@z) + object@demand@z * object@supply@z)) / (2 * object@demand@sigma**3 * object@supply@var)) / object@lh) * object@supply@independent_matrix)) - t(partial_var_d_of_loglh(object)) %*% partial_beta_s_of_loglh(object) # nolint end }) setGeneric("partial_beta_s_partial_var_s_of_loglh", function(object) { standardGeneric("partial_beta_s_partial_var_s_of_loglh") }) setMethod("partial_beta_s_partial_var_s_of_loglh", signature(object = "system_basic"), function(object) { # nolint start (colSums(c(((object@demand@psi * object@rho1 * object@supply@sigma * (object@supply@h * object@rho1 * object@supply@z - 1) + object@demand@sigma * (object@supply@Psi * object@supply@h**3 - object@supply@h * (3 * object@supply@Psi + object@supply@psi * (2 * object@demand@h * object@rho1 - object@demand@z * (object@rho1**2 + 1))) + 2 * object@supply@psi * object@rho2)) / (2 * object@demand@sigma * object@supply@sigma**4)) / object@lh) * object@supply@independent_matrix)) - t(partial_var_s_of_loglh(object)) %*% partial_beta_s_of_loglh(object) # nolint end }) setGeneric("partial_beta_s_partial_rho_of_loglh", function(object) { standardGeneric("partial_beta_s_partial_rho_of_loglh") }) setMethod("partial_beta_s_partial_rho_of_loglh", signature(object = "system_basic"), function(object) { # nolint start (colSums(c((object@rho1**2 * (object@demand@psi * object@supply@sigma * (object@rho1 * object@demand@z * object@supply@z + object@rho2) - object@supply@psi * object@demand@sigma * (object@rho1 - object@supply@z * (object@supply@h - object@rho2 * object@demand@z))) / (object@demand@sigma * object@supply@var)) / object@lh) * object@supply@independent_matrix)) - t(partial_rho_of_loglh(object)) %*% partial_beta_s_of_loglh(object) # nolint end }) setGeneric("partial_var_d_partial_var_d_of_loglh", function(object) { standardGeneric("partial_var_d_partial_var_d_of_loglh") }) setMethod("partial_var_d_partial_var_d_of_loglh", signature(object = "system_basic"), function(object) { # nolint start (sum(((object@demand@h * object@supply@psi * object@rho1 * object@demand@sigma * (object@demand@h * object@rho1 * object@demand@z - 3) + object@supply@sigma * (object@demand@Psi * (object@demand@h**4 + 3) + object@demand@h**2 * (-6 * object@demand@Psi + object@demand@psi * object@supply@z * (object@rho1**2 + 1)) - object@demand@psi * (object@supply@h * object@rho1 * (2 * object@demand@h**2 - 3) + 3 * object@supply@z))) / (4 * object@demand@sigma**5 * object@supply@sigma)) / object@lh)) - t(partial_var_d_of_loglh(object)) %*% partial_var_d_of_loglh(object) # nolint end }) setGeneric("partial_var_d_partial_var_s_of_loglh", function(object) { standardGeneric("partial_var_d_partial_var_s_of_loglh") }) setMethod("partial_var_d_partial_var_s_of_loglh", signature(object = "system_basic"), function(object) { # nolint start (sum((object@rho1 * (object@supply@h * object@demand@psi * object@supply@sigma * (object@demand@h**2 - object@supply@h * object@rho1 * object@supply@z + object@supply@z**2 - 1) + object@supply@psi * object@demand@sigma * (object@demand@h * (object@supply@h**2 - 1) - object@supply@h * object@demand@z * (object@supply@h * object@rho1 - object@supply@z))) / (4 * object@demand@sigma**3 * object@supply@sigma**3)) / object@lh)) - t(partial_var_s_of_loglh(object)) %*% partial_var_d_of_loglh(object) # nolint end }) setGeneric("partial_var_d_partial_rho_of_loglh", function(object) { standardGeneric("partial_var_d_partial_rho_of_loglh") }) setMethod("partial_var_d_partial_rho_of_loglh", signature(object = "system_basic"), function(object) { # nolint start (sum((object@rho1**2 * (-object@demand@psi * object@supply@sigma * (object@rho1 * (object@demand@h + object@supply@h * object@demand@z * object@supply@z) - object@demand@z * (object@demand@h**2 + object@supply@z**2 - 1)) + object@supply@psi * object@demand@sigma * (object@rho1 * (object@demand@h * object@demand@z * object@supply@z + object@supply@h) - object@supply@z)) / (2 * object@demand@sigma**3 * object@supply@sigma)) / object@lh)) - t(partial_rho_of_loglh(object)) %*% partial_var_d_of_loglh(object) # nolint end }) setGeneric("partial_var_s_partial_var_s_of_loglh", function(object) { standardGeneric("partial_var_s_partial_var_s_of_loglh") }) setMethod("partial_var_s_partial_var_s_of_loglh", signature(object = "system_basic"), function(object) { # nolint start (sum(((object@supply@h * object@demand@psi * object@rho1 * object@supply@sigma * (object@supply@h * object@rho1 * object@supply@z - 3) + object@demand@sigma * (object@supply@Psi * (object@supply@h**4 + 3) + object@supply@h**2 * (-6 * object@supply@Psi + object@supply@psi * object@demand@z * (object@rho1**2 + 1)) - object@supply@psi * (object@demand@h * object@rho1 * (2 * object@supply@h**2 - 3) + 3 * object@demand@z))) / (4 * object@demand@sigma * object@supply@sigma**5)) / object@lh)) - t(partial_var_s_of_loglh(object)) %*% partial_var_s_of_loglh(object) # nolint end }) setGeneric("partial_var_s_partial_rho_of_loglh", function(object) { standardGeneric("partial_var_s_partial_rho_of_loglh") }) setMethod("partial_var_s_partial_rho_of_loglh", signature(object = "system_basic"), function(object) { # nolint start (sum((object@rho1**2 * (object@demand@psi * object@supply@sigma * (object@rho1 * (object@demand@h + object@supply@h * object@demand@z * object@supply@z) - object@demand@z) - object@supply@psi * object@demand@sigma * (object@rho1 * (object@demand@h * object@demand@z * object@supply@z + object@supply@h) - object@supply@z * (object@supply@h**2 + object@demand@z**2 - 1))) / (2 * object@demand@sigma * object@supply@sigma**3)) / object@lh)) - t(partial_rho_of_loglh(object)) %*% partial_var_s_of_loglh(object) # nolint end }) setGeneric("partial_rho_partial_rho_of_loglh", function(object) { standardGeneric("partial_rho_partial_rho_of_loglh") }) setMethod("partial_rho_partial_rho_of_loglh", signature(object = "system_basic"), function(object) { # nolint start (sum((object@rho1**3 * (object@demand@psi * object@supply@sigma * (2 * object@supply@h + object@rho1 * object@supply@z * (object@demand@z**2 - 3)) + object@supply@psi * object@demand@sigma * (2 * object@demand@h + object@rho1 * object@demand@z * (object@supply@z**2 - 3))) / (object@demand@sigma * object@supply@sigma)) / object@lh)) - t(partial_rho_of_loglh(object)) %*% partial_rho_of_loglh(object) # nolint end })
802f61fe60a78fe20cb8c95e509e0f037f89e141
ffe095c7f1411c8cc009fcf09bc2392e7f739455
/tests/testthat.R
c4afda67212410b0052f0570b85991d4bb01b060
[ "MIT" ]
permissive
atusy/swiper
da4fce37abbcf06bc3420ef4726b200fe066b389
d930167a705e3409bcb1ebca1718481c14844823
refs/heads/master
2023-06-08T08:49:25.606844
2021-07-01T16:23:54
2021-07-01T16:23:54
376,047,555
0
0
null
null
null
null
UTF-8
R
false
false
56
r
testthat.R
library(testthat) library(swiper) test_check("swiper")
e8862a8472346c49665b1688981a70961dfdc7a0
9c79f8d1e89ee5adf7b93115ccc741d3303404f1
/InteractiveMaps/Trojborg/Outs.R
08c423ecced7361684d62533c5f8a3f4ae6a5307
[]
no_license
derek-corcoran-barrios/derek-corcoran-barrios.github.io
e1631feef111cfc9bc693df1853e02818435071a
ccb8f21c053fd41559082eb58ccb7f64cc7fcf86
refs/heads/master
2023-07-17T13:11:43.739914
2023-07-03T07:24:21
2023-07-03T07:24:21
107,616,762
33
33
null
2020-06-18T19:25:50
2017-10-20T01:23:44
HTML
UTF-8
R
false
false
4,953
r
Outs.R
library(tidyverse) library(sf) library(raster) library(spThin) Trojborg_Raster <- read_rds("TrojborgRaster.rds") Trojborg_Raster <- Trojborg_Raster[[1]] %>% projectRaster(crs ="+proj=longlat +datum=WGS84 +no_defs") Trojborg <- read_sf("GroupsTrojborg.shp") %>% st_transform(crs = "+proj=longlat +datum=WGS84 +no_defs") %>% fasterize::fasterize(Trojborg_Raster, field = "Group", background = 0) Trojborg_Outline <- read_sf("GroupsTrojborg.shp") %>% st_transform(crs = "+proj=longlat +datum=WGS84 +no_defs") %>% st_union() %>% st_as_sf() Trojborg_Buff <- read_sf("GroupsTrojborg.shp") %>% st_buffer(dist = 30) %>% st_transform(crs = "+proj=longlat +datum=WGS84 +no_defs") %>% fasterize::fasterize(Trojborg_Raster, field = "Group") Trojborg_Large <- read_sf("GroupsTrojborgLarge.shp") %>% st_transform(crs = "+proj=longlat +datum=WGS84 +no_defs") %>% fasterize::fasterize(Trojborg_Raster, field = "Group") Test <- (Trojborg_Large - Trojborg) values(Test) <- ifelse(values(Test) < 1, NA, values(Test)) library(stars) Test <- stars::st_as_stars(Test) l = st_contour(Test, contour_lines = FALSE, breaks = 0:7) %>% mutate(Group = case_when(layer == "[1,2)" ~ 1, layer == "[2,3)" ~ 2, layer == "[3,4)" ~ 3, layer == "[4,5)" ~ 4, layer == "[5,6)" ~ 5, layer == "[6,7)" ~ 6)) %>% dplyr::select(Group) %>% mutate(Group = case_when(Group == 1 ~ "A1", Group == 2 ~ "A2", Group == 3 ~ "B1", Group == 4 ~ "B2", Group == 5 ~ "C1", Group == 6 ~ "C2")) Centroids <- st_read("Final_Centroids_Trojborg.shp") ggplot() + geom_sf(data = l, aes(fill = as.factor(Group))) + geom_sf(data = Centroids, aes(color = as.factor(Group))) Groups <- unique(Centroids$Group) Final_ones <- list() for(i in 1:length(Groups)){ Temp_Pol <- l %>% dplyr::filter(Group == Groups[i]) Temp_Rast <- fasterize::fasterize(Temp_Pol, Trojborg_Raster) Temp_Point <- Centroids %>% dplyr::filter(Group == Groups[i]) set.seed(i) Out_Random <- dismo::randomPoints(mask = Temp_Rast, 1000) %>% as.data.frame() Coords <- Out_Random Out_Random <- Out_Random %>% st_as_sf(coords = c(1,2), crs ="+proj=longlat +datum=WGS84 +no_defs") # mutate(Group = Groups[i]) Distances <- Out_Random %>% st_distance(Trojborg_Outline) %>% as.numeric() Coords$Distances <- Distances Coords <- Coords %>% dplyr::filter(Distances > 50) %>% mutate(Group = Groups[i]) NewCoords <- spThin::thin(loc.data = Coords, lat.col = "y", long.col = "x", spec.col = "Group", verbose = F, out.dir = getwd(), thin.par = 0.05, reps = 1, locs.thinned.list.return = T, write.files = F, write.log.file = F) Out_Random <- NewCoords[[1]] %>% st_as_sf(coords = c(1,2), crs ="+proj=longlat +datum=WGS84 +no_defs") %>% mutate(Group = Groups[i]) %>% tibble::rowid_to_column() Temp_Point$Distance <- st_distance(Temp_Point, Temp_Pol) %>% as.numeric() Temp_Point <- Temp_Point %>% group_by(rowid) %>% dplyr::filter(Distance == min(Distance)) Final_points <- list() for(j in 1:nrow(Temp_Point)){ To_Match <- Temp_Point[j,] Dist_To_Match <- st_distance(To_Match, Out_Random) %>% as.numeric() Matched <- Out_Random[Dist_To_Match == min(Dist_To_Match),] Out_Random <- Out_Random[Dist_To_Match != min(Dist_To_Match),] Code <- To_Match %>% separate(col = Code, into = c("Group1", "id", "treatment")) Matched <- cbind(Matched, Code) %>% mutate(Code = paste(Group, id, "D", sep = "_")) %>% dplyr::select("Group", "lat", "long", "Code") Final_points[[j]] <- Matched %>% mutate(rowid = To_Match$rowid) %>% relocate(rowid, .before = everything()) } Final_points <- Final_points %>% reduce(rbind) Final_ones[[i]] <- Final_points } Final_ones <- Final_ones %>% reduce(rbind) ggplot() + geom_sf(data = Final_points, shape=21, aes(color = "blue", fill = as.factor(rowid))) + geom_sf(data = Temp_Point, shape=21, aes(color = "red", fill = as.factor(rowid))) saveRDS(Final_ones, "Final_ones_out.Trojborg.rds") ToGPX <- Centroids %>% rbind(Final_ones) %>% arrange(Code) %>% dplyr::select(Code) %>% st_transform("+proj=longlat + ellps=WGS84") %>% as_Spatial() # use the ID field for the names ToGPX@data$name <- ToGPX@data$Code library(rgdal) #Now only write the "name" field to the file writeOGR(ToGPX["name"], driver="GPX", layer="waypoints", dsn="TreatmentsTrojborg.gpx") read_sf("TreatmentsTrojborg.gpx")
d75ffd79a260959cfade2c8c0e26fc0a967725ca
8633d09805e0c6cd67765865d2dd8708e400b057
/scripts/excess_deaths_script.R
fef4313808a5dd2892bb4eb907208b78748774d4
[ "MIT", "CC-BY-4.0" ]
permissive
nnutter/covid-19-excess-deaths-tracker
0f51a258841fbc8664e90181d2560f43f1c2bf59
f8933ac749fe175f3078c8a8e4ac10d7575bcea8
refs/heads/master
2022-12-14T01:20:52.751827
2020-09-09T16:04:18
2020-09-09T16:04:18
null
0
0
null
null
null
null
UTF-8
R
false
false
12,558
r
excess_deaths_script.R
# Step 1: import libraries and data --------------------------------------- # Import libraries library(tidyverse) library(readxl) library(data.table) library(lubridate) options(scipen=999) # Import data austria_weekly_deaths <- fread("output-data/historical-deaths/austria_weekly_deaths.csv") belgium_weekly_deaths <- fread("output-data/historical-deaths/belgium_weekly_deaths.csv") brazil_monthly_deaths <- fread("output-data/historical-deaths/brazil_monthly_deaths.csv") britain_weekly_deaths <- fread("output-data/historical-deaths/britain_weekly_deaths.csv") chile_weekly_deaths <- fread("output-data/historical-deaths/chile_weekly_deaths.csv") denmark_weekly_deaths <- fread("output-data/historical-deaths/denmark_weekly_deaths.csv") ecuador_monthly_deaths <- fread("output-data/historical-deaths/ecuador_monthly_deaths.csv") france_weekly_deaths <- fread("output-data/historical-deaths/france_weekly_deaths.csv") germany_weekly_deaths <- fread("output-data/historical-deaths/germany_weekly_deaths.csv") indonesia_monthly_deaths <- fread("output-data/historical-deaths/indonesia_monthly_deaths.csv") italy_weekly_deaths <- fread("output-data/historical-deaths/italy_weekly_deaths.csv") mexico_weekly_deaths <- fread("output-data/historical-deaths/mexico_weekly_deaths.csv") netherlands_weekly_deaths <- fread("output-data/historical-deaths/netherlands_weekly_deaths.csv") norway_weekly_deaths <- fread("output-data/historical-deaths/norway_weekly_deaths.csv") peru_monthly_deaths <- fread("output-data/historical-deaths/peru_monthly_deaths.csv") portugal_weekly_deaths <- fread("output-data/historical-deaths/portugal_weekly_deaths.csv") russia_monthly_deaths <- fread("output-data/historical-deaths/russia_monthly_deaths.csv") south_africa_weekly_deaths <- fread("output-data/historical-deaths/south_africa_weekly_deaths.csv") spain_weekly_deaths <- fread("output-data/historical-deaths/spain_weekly_deaths.csv") sweden_weekly_deaths <- fread("output-data/historical-deaths/sweden_weekly_deaths.csv") switzerland_weekly_deaths <- fread("output-data/historical-deaths/switzerland_weekly_deaths.csv") turkey_weekly_deaths <- fread("output-data/historical-deaths/turkey_weekly_deaths.csv") united_states_weekly_deaths <- fread("output-data/historical-deaths/united_states_weekly_deaths.csv") # Step 2: define function that calculates excess deaths, and apply to weekly deaths --------------------------------------- # Define function that calculates excess deaths get_excess_deaths <- function(df,frequency="weekly",calculate=TRUE){ if(frequency == "weekly" & calculate == TRUE) { # Calculate expected deaths for weekly time series expected_deaths <- df %>% dplyr::select(-expected_deaths) %>% filter(year == 2020) %>% left_join(df %>% filter(year >= 2015,year <= 2019) %>% group_by(region,week) %>% summarise(expected_deaths = mean(total_deaths,na.rm=T))) } else if(frequency == "monthly" & calculate == TRUE) { # Calculate expected deaths for monthly time series expected_deaths <- df %>% dplyr::select(-expected_deaths) %>% filter(year == 2020) %>% left_join(df %>% filter(year >= 2015,year <= 2019) %>% group_by(region,month) %>% summarise(expected_deaths = mean(total_deaths,na.rm=T))) } else { expected_deaths <- df %>% filter(year == 2020)} # Calculate excess deaths excess_deaths <- expected_deaths %>% mutate(excess_deaths = total_deaths - expected_deaths, non_covid_deaths = total_deaths - covid_deaths, region_code = as.character(region_code)) %>% mutate(covid_deaths_per_100k = covid_deaths / population * 100000, excess_deaths_per_100k = excess_deaths / population * 100000, excess_deaths_pct_change = ((expected_deaths + excess_deaths) / expected_deaths) - 1) # Calculate weekly rates for monthly data if(frequency == "monthly") { excess_deaths <- excess_deaths %>% mutate(month_days = as.numeric(difftime(end_date,start_date,units=c("days"))) + 1, total_deaths_per_7_days = total_deaths / month_days * 7, covid_deaths_per_7_days = covid_deaths / month_days * 7, expected_deaths_per_7_days = expected_deaths / month_days * 7, excess_deaths_per_7_days = excess_deaths / month_days * 7, non_covid_deaths_per_7_days = non_covid_deaths / month_days * 7, covid_deaths_per_100k_per_7_days = covid_deaths_per_100k / month_days * 7, excess_deaths_per_100k_per_7_days = excess_deaths_per_100k / month_days * 7) %>% dplyr::select(-month_days) } excess_deaths } # Export Austria austria_excess_deaths <- get_excess_deaths(austria_weekly_deaths) write.csv(austria_excess_deaths,"output-data/excess-deaths/austria_excess_deaths.csv", fileEncoding = "UTF-8",row.names=FALSE) # Export Belgium belgium_excess_deaths <- get_excess_deaths(belgium_weekly_deaths) write.csv(belgium_excess_deaths,"output-data/excess-deaths/belgium_excess_deaths.csv", fileEncoding = "UTF-8",row.names=FALSE) # Export Brazil brazil_excess_deaths <- get_excess_deaths(brazil_monthly_deaths,frequency="monthly") write.csv(brazil_excess_deaths,"output-data/excess-deaths/brazil_excess_deaths.csv", fileEncoding = "UTF-8",row.names=FALSE) # Export Britain britain_excess_deaths <- get_excess_deaths(britain_weekly_deaths) write.csv(britain_excess_deaths,"output-data/excess-deaths/britain_excess_deaths.csv", fileEncoding = "UTF-8",row.names=FALSE) # Export Chile chile_excess_deaths <- get_excess_deaths(chile_weekly_deaths) write.csv(chile_excess_deaths,"output-data/excess-deaths/chile_excess_deaths.csv", fileEncoding = "UTF-8",row.names=FALSE) # Export Denmark denmark_excess_deaths <- get_excess_deaths(denmark_weekly_deaths) write.csv(denmark_excess_deaths,"output-data/excess-deaths/denmark_excess_deaths.csv", fileEncoding = "UTF-8",row.names=FALSE) # Export Ecuador ecuador_excess_deaths <- get_excess_deaths(ecuador_monthly_deaths,frequency="monthly") write.csv(ecuador_excess_deaths,"output-data/excess-deaths/ecuador_excess_deaths.csv", fileEncoding = "UTF-8",row.names=FALSE) # Export France france_excess_deaths <- get_excess_deaths(france_weekly_deaths) write.csv(france_excess_deaths,"output-data/excess-deaths/france_excess_deaths.csv", fileEncoding = "UTF-8",row.names=FALSE) # Export Germany germany_excess_deaths <- get_excess_deaths(germany_weekly_deaths) write.csv(germany_excess_deaths,"output-data/excess-deaths/germany_excess_deaths.csv", fileEncoding = "UTF-8",row.names=FALSE) # Export Indonesia indonesia_excess_deaths <- get_excess_deaths(indonesia_monthly_deaths,frequency="monthly") write.csv(indonesia_excess_deaths,"output-data/excess-deaths/indonesia_excess_deaths.csv", fileEncoding = "UTF-8",row.names=FALSE) # Export Italy italy_excess_deaths <- get_excess_deaths(italy_weekly_deaths) write.csv(italy_excess_deaths,"output-data/excess-deaths/italy_excess_deaths.csv", fileEncoding = "UTF-8",row.names=FALSE) # Export Mexico mexico_excess_deaths <- get_excess_deaths(mexico_weekly_deaths) write.csv(mexico_excess_deaths,"output-data/excess-deaths/mexico_excess_deaths.csv", fileEncoding = "UTF-8",row.names=FALSE) # Export the Netherlands netherlands_excess_deaths <- get_excess_deaths(netherlands_weekly_deaths) write.csv(netherlands_excess_deaths,"output-data/excess-deaths/netherlands_excess_deaths.csv", fileEncoding = "UTF-8",row.names=FALSE) # Export Norway norway_excess_deaths <- get_excess_deaths(norway_weekly_deaths) write.csv(norway_excess_deaths,"output-data/excess-deaths/norway_excess_deaths.csv", fileEncoding = "UTF-8",row.names=FALSE) # Export Peru peru_excess_deaths <- get_excess_deaths(peru_monthly_deaths,frequency="monthly") write.csv(peru_excess_deaths,"output-data/excess-deaths/peru_excess_deaths.csv", fileEncoding = "UTF-8",row.names=FALSE) # Export Portugal portugal_excess_deaths <- get_excess_deaths(portugal_weekly_deaths) write.csv(portugal_excess_deaths,"output-data/excess-deaths/portugal_excess_deaths.csv", fileEncoding = "UTF-8",row.names=FALSE) # Export Russia russia_excess_deaths <- get_excess_deaths(russia_monthly_deaths,frequency="monthly") write.csv(russia_excess_deaths,"output-data/excess-deaths/russia_excess_deaths.csv", fileEncoding = "UTF-8",row.names=FALSE) # Export South Africa south_africa_excess_deaths <- get_excess_deaths(south_africa_weekly_deaths,calculate=FALSE) write.csv(south_africa_excess_deaths,"output-data/excess-deaths/south_africa_excess_deaths.csv", fileEncoding = "UTF-8",row.names=FALSE) # Export Spain spain_excess_deaths <- get_excess_deaths(spain_weekly_deaths,calculate=FALSE) write.csv(spain_excess_deaths,"output-data/excess-deaths/spain_excess_deaths.csv", fileEncoding = "UTF-8",row.names=FALSE) # Export Sweden sweden_excess_deaths <- get_excess_deaths(sweden_weekly_deaths) write.csv(sweden_excess_deaths,"output-data/excess-deaths/sweden_excess_deaths.csv", fileEncoding = "UTF-8",row.names=FALSE) # Export Switzerland switzerland_excess_deaths <- get_excess_deaths(switzerland_weekly_deaths) write.csv(switzerland_excess_deaths,"output-data/excess-deaths/switzerland_excess_deaths.csv", fileEncoding = "UTF-8",row.names=FALSE) # Export Turkey turkey_excess_deaths <- get_excess_deaths(turkey_weekly_deaths) write.csv(turkey_excess_deaths,"output-data/excess-deaths/turkey_excess_deaths.csv", fileEncoding = "UTF-8",row.names=FALSE) # Export the United States united_states_excess_deaths <- get_excess_deaths(united_states_weekly_deaths) write.csv(united_states_excess_deaths,"output-data/excess-deaths/united_states_excess_deaths.csv", fileEncoding = "UTF-8",row.names=FALSE) # Step 3: combine weekly and monthly deaths together, and calculate deaths per 100,000 people and percentage change --------------------------------------- # Combine weekly deaths and calculate per 100,000 people and percentage change all_weekly_excess_deaths <- bind_rows(austria_excess_deaths, belgium_excess_deaths, britain_excess_deaths, chile_excess_deaths, denmark_excess_deaths, france_excess_deaths, germany_excess_deaths, italy_excess_deaths, mexico_excess_deaths, netherlands_excess_deaths, norway_excess_deaths, portugal_excess_deaths, south_africa_excess_deaths, spain_excess_deaths, sweden_excess_deaths, switzerland_excess_deaths, turkey_excess_deaths, united_states_excess_deaths) %>% mutate(covid_deaths_per_100k = covid_deaths / population * 100000, excess_deaths_per_100k = excess_deaths / population * 100000, excess_deaths_pct_change = ((expected_deaths + excess_deaths) / expected_deaths) - 1) # Export weekly deaths write.csv(all_weekly_excess_deaths,"output-data/excess-deaths/all_weekly_excess_deaths.csv", fileEncoding = "UTF-8",row.names=FALSE) # Combine monthly deaths and calculate per 100,000 people and percentage change all_monthly_excess_deaths <- bind_rows(brazil_excess_deaths, ecuador_excess_deaths, indonesia_excess_deaths, peru_excess_deaths, russia_excess_deaths) %>% mutate(covid_deaths_per_100k = covid_deaths / population * 100000, excess_deaths_per_100k = excess_deaths / population * 100000, excess_deaths_pct_change = ((expected_deaths + excess_deaths) / expected_deaths) - 1) # Export monthly deaths write.csv(all_monthly_excess_deaths,"output-data/excess-deaths/all_monthly_excess_deaths.csv", fileEncoding = "UTF-8",row.names=FALSE)
40d006c456321f8ecae7ad008c7d225985cd8143
2b36bf4a6b6ec05db94f6fa23076cd27843ff747
/scripts/IRAIL_data_exploration_020117.R
01c5436066032e9d9cbf80bd3509632cebbeae7d
[]
no_license
simonkassel/IRAIL
9227f1a307221e4793df2387d7277654d6793eff
fd37b45343efd652b4cefcb2ca9f3e107b8cf336
refs/heads/master
2021-01-25T06:55:12.596187
2017-04-18T19:35:06
2017-04-18T19:35:06
80,666,443
0
0
null
null
null
null
UTF-8
R
false
false
9,530
r
IRAIL_data_exploration_020117.R
# INTRO ------------------------------------------------------------------- # Explore dataset through visualization and summary statistics # Simon Kassel # Created: 1 Feb 17 # load helper functions source("https://raw.githubusercontent.com/simonkassel/IRAIL/master/scripts/IRAIL_helper_functions_032317.R") # load packages packages(c("plyr", "dplyr", "ggplot2", "ggmap", "ggthemes", "chron", "tidyr", "reshape2")) # global options options(stringsAsFactors = TRUE) options(scipen = "999") # data stations <- read.csv("https://raw.githubusercontent.com/simonkassel/IRAIL/master/data/stations_cleaned.csv") dat <- read.csv("https://raw.githubusercontent.com/simonkassel/IRAIL/master/data/trip_data_clean.csv") md <- read.csv("https://raw.githubusercontent.com/simonkassel/IRAIL/master/data/model_variables.csv") md <- md[,-1] # MAP THE STATIONS -------------------------------------------------------- # new station variables stations$instudy <- ifelse(stations$station %in% unique(trips$to), "Y", "N") %>% factor(levels = c("Y", "N")) ggplot(stations, aes(x = instudy)) + geom_bar(stat = "count") + theme( axis.ticks = element_blank(), plot.background = element_blank(), legend.background = element_blank(), axis.title = element_text(face = "italic") ) + ylab("Count") + xlab("Does the station have a measurement?") + ggtitle("Stations in and out of the sample") # bounding box get_Belgium_basemap <- function(){ bbox <- c(min(stations$longitude), min(stations$latitude), max(stations$longitude), max(stations$latitude)) bm <- get_stamenmap(bbox = bbox, maptype = "toner-background") } # Get basemap bm <- get_googlemap(center = c(mean(stations$longitude), mean(stations$latitude)), zoom = 7, color = "bw") # Map stations in and out of training set map.stations <- ggmap(bm) + geom_point(data = stations, aes(x = longitude, y = latitude), size = .25) + theme_map() + scale_color_fivethirtyeight("Station in \nTraining Set?") + ggtitle("Belgian Train Stations") + theme( legend.position = c(.05,.85), legend.direction = "horizontal", plot.title = element_text(face = "bold", hjust = "0.5", size = 14)) ggsave("IRAIL_stage1_mapping_stations.pdf", map.stations, device = "pdf", width = 8.5, height = 11, units = "in") # TIME INTERVALS ---------------------------------------------------------- # cor b/w occupancy and weekend weekday.cor.jitter <- ggplot(dat, aes(x = occupancy, y = weekend)) + geom_jitter() + theme_fivethirtyeight() + xlab("ridership") + ylab("weekend day?") + ggtitle("Correlation between ridership level and weekday/weekend") + theme( axis.title = element_text(face = "italic", colour = "grey50"), plot.title = element_text(hjust = 0.5, size = 14) ) ggsave("IRAIL_stage1_weekday-weekend_ridership_jitter.pdf", weekday.cor.jitter, device = "pdf", width = 11, height = 8.5, units = "in") # trip segments by day of week trip.segments.dow <- ggmap(bm) + geom_segment(data = dat, aes(x = from.longitude, y = from.latitude, xend = to.longitude, yend = to.latitude, colour = occupancy), size = .5) + ggtitle("Train ridership by day-of-week") + theme_fivethirtyeight() + theme( axis.text = element_blank(), axis.ticks = element_blank(), axis.title = element_blank(), plot.background = element_blank(), legend.background = element_blank() ) + facet_wrap(~day_of_week, ncol = 4) ggsave("IRAIL_stage1_trip_segments_by_dow.pdf", trip.segments.dow, device = "pdf", width = 11, height = 8.5, units = "in") # trip segments by hour of day trip.segments.hour.of.day <- ggmap(bm) + geom_segment(data = dat, aes(x = from.longitude, y = from.latitude, xend = to.longitude, yend = to.latitude, colour = occupancy), size = .5) + ggtitle("Train ridership by hour of the day") + theme_fivethirtyeight() + theme( axis.text = element_blank(), axis.ticks = element_blank(), axis.title = element_blank(), plot.background = element_blank(), legend.background = element_blank() ) + facet_wrap(~hour, ncol = 6) ggsave("IRAIL_stage1_trip_segments_by_hod.pdf", trip.segments.hour.of.day, device = "pdf", width = 11, height = 8.5, units = "in") # Bar plot of observations by day of the week day.barplot <- ggplot(dat, aes(x = day_of_week)) + geom_bar(stat = "count", fill = "grey50") + ylab("Count of samples") + ggtitle("Train usage samples by day of week") + theme( axis.title = element_text(face = "italic"), axis.title.x = element_blank(), plot.title = element_text(face = "bold", hjust = 0.5), panel.background = element_blank(), axis.ticks = element_blank() ) ggsave("IRAIL_stage1_bar_plot_samples_by_dow.pdf", day.barplot, device = "pdf", width = 11, height = 8.5, units = "in") # Observations time series daily.obs <- dat$date %>% table() %>% data.frame() colnames(daily.obs) <- c("date", "obs") daily.obs$date <- as.Date(daily.obs$date) irail.collection.timeseries <- ggplot(daily.obs, aes(x = date, y = obs)) + geom_line() + geom_point(aes(colour = wday(date, label = TRUE))) + scale_color_discrete("Day of \nthe Week") + ggtitle("IRAIL traffic data collection time series") + ylab("# of measurements") + theme( panel.background = element_rect(fill = "white"), panel.grid.minor = element_line(color = "grey90"), panel.grid.major.y = element_line(color = "grey90"), axis.title.y = element_text(face = "italic"), axis.title.x = element_blank(), plot.title = element_text(hjust = 0.5, face = "bold", size = 24)) ggsave("IRAIL_stage1_data_collection_time_series.pdf", day.barplot, device = "pdf", width = 11, height = 8.5, units = "in") ggplot(dat, aes(x = occupancy, fill = occupancy)) + geom_bar(stat = "count") + ggtitle("Dist. of training set occupancy levels") + xlab("") + ylab("Count") # PREDICTOR VARIABLES ----------------------------------------------------- catv <- md[, !sapply(md, is.numeric)] catv$occ_binary <- md$occ_binary catv_tidy <- melt(catv, id.vars = "occ_binary", measure.vars = names(catv)[which(names(catv) != "occ_binary")]) ggplot(catv_tidy, aes(x = as.factor(occ_binary), fill = value)) + geom_bar(position = "fill") + facet_wrap(~variable) + labs( title = "Categorical predictors" ) + xlab("Train traffice level (0=low, 1=high)") + theme_minimal() + theme( legend.position = "none", axis.text.y = element_blank() ) conv <- md[, sapply(md, is.numeric)] conv_tidy <- melt(conv, id.vars = "occ_binary", measure.vars = names(conv)[which(names(conv) != "occ_binary")]) ggplot(conv_tidy, aes(x = as.factor(occ_binary), y = value)) + geom_boxplot() + facet_wrap(~variable, scales = "free") + labs( title = "Continuous predictors" ) + xlab("Train traffic level (0=low, 1=high)") + theme_minimal() + theme( legend.position = "none", axis.text.y = element_blank() ) # NETOWRK HIERARCHY ------------------------------------------------------- ### mult_k <- ldply(c(5:13), function(x) { return(findHubs(stations, x)) }) mult_k$k <- factor(mult_k$k, levels(mult_k$k)[c(5:9, 1:4)]) ggplot(mult_k, aes(x = longitude, y = latitude, color = as.factor(groups))) + geom_point() + geom_label(data = filter(mult_k, maxcount == count), aes(label = name), size = 2) + theme_void() + facet_wrap(~k, ncol = 3) + ggtitle("Spatial Clustering of Stations, different numbers (k) of clusters") + theme( legend.position = "none", strip.text = element_text(size = 12), plot.title = element_text(hjust = 0.5) ) stations$maj_groups <- as.factor(stations$maj_groups) stations$maj_groups <- factor(stations$maj_groups, levels(stations$maj_groups)[c(2,1,3:5)]) leg_labels <- ddply(stations, ~maj_groups, summarise, name = paste0(min(count), " - ", max(count)))$name pal <- c('#c7e9b4','#7fcdbb','#41b6c4','#2c7fb8','#253494') ggplot(stations, aes(x = longitude, y = latitude, color = maj_groups)) + geom_point(size = 2) + theme_void() + labs(title = "Belgian Rail Hierarchy", subtitle = "# of trains to come through each station") + scale_color_manual("Number of trains", values = pal, labels = leg_labels) + geom_label(data = filter(stations, major_hub == "Y"), aes(label = name), size = 2) + theme( legend.position = "right", plot.margin = unit(c(0.5, 0.5, 0.5, 0.5), "in") ) stations$maj_groups <- stations[,c("count")] %>% dist(method = "euclidean") %>% hclust(method="ward.D") %>% cutree(5) %>% paste0("mg", .) %>% as.factor() # mapping clusters hubs <- findHubs(st, 5) for (i in c(6:13)) { temp <- findHubs(st, i) hubs <- rbind(hubs, temp) } ggplot(hubs, aes(longitude, latitude, color = as.factor(groups))) + geom_point(size = 0.5) + geom_label(data = filter(hubs, maxcount == count), aes(label = name), size = 2) + facet_wrap(~k, ncol = 3) + theme_void() + theme(legend.position = "none") sorted <- arrange(st, desc(count)) sorted$count_rank <- c(1:nrow(sorted)) maxhubs <- head(sorted) maxhubs$h <- "hubs = 5" for (i in c(6:13)) { temp <- head(sorted, i) temp$h <- paste0("hubs = ", i) maxhubs <- rbind(maxhubs, temp) } ggmap(bmc) + geom_point(data = maxhubs, aes(longitude, latitude, color = count), size = 2) + facet_wrap(~h, ncol = 3) + theme_void() + theme(legend.position = "none") ggplot(filter(hubs, maxcount == count & k == "k = 11"), aes(x = groups, y = count)) + geom_bar(stat = "identity") + facet_wrap(~k)
38b33ccb9acc2b85d68f4e24fe52ade94e2520b2
8ad3594325900e5a4715ca4405cd765bc9958158
/statistical-inference/goodness-of-fit/exercise-06.r
627e5be831eecf8a7f16f9492745c6a269e17943
[ "Apache-2.0" ]
permissive
garciparedes/r-examples
22806859c7c147a6d503f1b1223a5168b6fa9d76
0e0e18439ad859f97eafb27c5e7f77d33da28bc6
refs/heads/master
2021-01-25T16:59:16.020983
2019-05-21T10:26:27
2019-05-21T10:26:27
102,385,669
1
0
Apache-2.0
2018-05-24T07:38:21
2017-09-04T17:27:27
Jupyter Notebook
UTF-8
R
false
false
809
r
exercise-06.r
## Author: Sergio García Prado ## Title: Statistical Inference - Goodness of Fit - Exercise 06 rm(list = ls()) observed <- c(442, 38, 514, 6) (k <- length(observed)) # 4 (n <- sum(observed)) # 1000 EspectedProbabilities <- function(p) { c(0.5 * p, 0.5 * (1 - p), 0.5 * p ^ 2 + p * (1 - p), 0.5 * (1 - p) ^ 2) } LogLikelihood <- function(p, y) { sum(y * log(EspectedProbabilities(p))) } NegativeLogLikelihood <- function(...) { - LogLikelihood(...) } opt <- optim(0.5, NegativeLogLikelihood, y = observed, hessian = TRUE, lower = 10e-4, upper = 1 - 10e-4, method = 'L-BFGS-B') (p.hat <- opt$par) # 0.912941500560347 expected <- EspectedProbabilities(p.hat) * n (Q <- sum((observed - expected) ^ 2 / expected)) # 3.08815842598583 (pvalue <- 1 - pchisq(Q, 2)) # 0.213508376478253
27bc7bcfbab83d8bdc522e01eb67dae55f5b41d5
c87eac12aee2d5403410e925baf8b4e5ec295475
/play_sequence_generators/test_prebuilt_lstm.R
ad1c5f68d17f2ecd92dded9df5c20b616522ccc7
[ "Apache-2.0" ]
permissive
cxd/text_dnn_experiments
9cbc7274b4c760f801d5074229be2bdfb3ef64cc
4e57ca2db4151ba3796583abd0ed3bf2feaf8356
refs/heads/master
2021-06-14T07:13:36.161787
2021-03-11T10:14:00
2021-03-11T10:14:00
158,998,082
1
0
null
null
null
null
UTF-8
R
false
false
1,153
r
test_prebuilt_lstm.R
library(keras) library(stringr) source("lib/init.R") source("lib/prepare_squad_data.R") source("lib/read_glove.R") source("lib/lstm_sequence_learner.R") # Setup environment cfg <- init(getwd()) prebuilt <- "test/bri-data-01/model3.h5" modelTest <- load_model_hdf5(prebuilt, compile=TRUE) path <- get_file( "nietzsche.txt", origin = "https://s3.amazonaws.com/text-datasets/nietzsche.txt" ) text <- tolower(readChar(path, file.info(path)$size)) # Select a text seed at random ## Note the shortcoming of this model is that it is a sequence generator purely for ## sequences of text that it has seen before. These are the discrete character sequences ## that it has trained on. It is not capable of taking a sequence of characters that it has ## not been trained on and stringing togethor the next sequence of possible characters. maxlen <- 60 start_index <- sample(1:(nchar(text) - maxlen - 1), 1) seed_text <- str_sub(text, start_index, start_index + maxlen - 1) (prediction <- predict_sequence_of_length(modelTest, seed_text, temperature=0.5)) (prediction2 <- predict_sequence_until(modelTest, seed_text, window=60, temperature=0.6))
ce2b19b78239e96c46b52f94cd4c74e1bb220a22
510734b2e6f1fe4110177aa90e647739764b737d
/man/rename_states.Rd
c182fa72aaf765d4240b19977c15cdb03b785e0c
[]
no_license
helske/KFAS
4be85a2db7c33c9c1e7c95d66f0fa26ccdd6b764
e183590a08cce796763451a023e6714a52ce83fe
refs/heads/master
2023-03-11T03:07:50.040079
2023-02-06T15:12:09
2023-02-06T15:12:09
18,439,915
56
19
null
2016-06-11T12:28:15
2014-04-04T13:32:14
R
UTF-8
R
false
true
1,085
rd
rename_states.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/rename_states.R \name{rename_states} \alias{rename_states} \title{Rename the States of SSModel Object} \usage{ rename_states(model, state_names) } \arguments{ \item{model}{Object of class SSModel} \item{state_names}{Character vector giving new names for the states.} } \value{ Original model with dimnames corresponding to states renamed. } \description{ A simple function for renaming the states of \code{\link{SSModel}} object. Note that since KFAS version 1.2.3 the auxiliary functions such as \code{\link{SSMtrend}} have argument \code{state_names} which can be used to overwrite the default state names when building the model with \code{\link{SSModel}}. } \examples{ custom_model <- SSModel(1:10 ~ -1 + SSMcustom(Z = 1, T = 1, R = 1, Q = 1, P1inf = 1), H = 1) custom_model <- rename_states(custom_model, "level") ll_model <- SSModel(1:10 ~ SSMtrend(1, Q = 1), H = 1) test_these <- c("y", "Z", "H", "T", "R", "Q", "a1", "P1", "P1inf") identical(custom_model[test_these], ll_model[test_these]) }
dcaf315d43d018e823c86dffd72da8ecef1e09c9
6ef05ff1b841edfeea7a2e54e055d03a450b7469
/R/get.adjacency.matrix.R
92168ae72a4fc991cead750a124dd0cc5984ad07
[]
no_license
cran/SIMMS
c0527ec2c154495a8dc3fb18237f47a5b808f999
7e3d61a1757bc01b3df19576814d381855388b74
refs/heads/master
2022-05-02T23:05:38.840427
2022-04-24T13:50:05
2022-04-24T13:50:05
17,693,536
0
0
null
null
null
null
UTF-8
R
false
false
2,161
r
get.adjacency.matrix.R
#' A utility function to convert tab delimited networks file into adjacency #' matrices #' #' A utility function to convert tab-delimited networks file into adjacency #' matrices #' #' #' @param subnets.file A tab-delimited file containing networks. New networks #' start with a new line with '#' at the begining of network name and #' subsequent lines contain a binary interaction per line #' @return A list of adjacency matrices #' @author Syed Haider #' @keywords Networks #' @examples #' #' subnets.file <- get.program.defaults()[["subnets.file"]]; #' all.adjacency.matrices <- get.adjacency.matrix(subnets.file); #' #' @export get.adjacency.matrix get.adjacency.matrix <- function(subnets.file = NULL) { all.adjacency.matrices <- list(); subnets <- readLines(subnets.file, ok = TRUE); graph.name <- ""; vertices <- ""; interactions <- ""; for(i in seq(1, length(subnets), 1)) { # check if its a header line if (length(grep("^#", subnets[i], perl = TRUE)) > 0) { # time to process previous subgraph if (nchar(as.character(vertices)) > 0) { # make a matrix of this graph adjacency.matrix <- make.matrix(vertices, interactions); all.adjacency.matrices[[graph.name]] <- adjacency.matrix; # reinitialise everything else vertices <- ""; interactions <- ""; } graph.name <- make.names(gsub("\t$", "", subnets[i])); } else { id.p1.p2 <- unlist(strsplit(subnets[i], "\t")); id.p1.p2 <- gsub("\\(|\\)", "-", id.p1.p2, perl=TRUE); p1 <- paste("\"",id.p1.p2[2],"\"", sep=""); p2 <- paste("\"",id.p1.p2[3],"\"", sep=""); # this vertex is not already seen in this sub graph, lets add if (length(grep(p1, vertices, perl = TRUE)) < 1) { vertices <- paste(vertices, p1, sep=","); } if (length(grep(p2, vertices, perl = TRUE)) < 1) { vertices <- paste(vertices, p2, sep = ","); } interactions <- paste(interactions, ",\"", id.p1.p2[2],":", id.p1.p2[3],"\"", sep=""); } } # make a matrix of this graph adjacency.matrix <- make.matrix(vertices, interactions); all.adjacency.matrices[[graph.name]] <- adjacency.matrix; return (all.adjacency.matrices); }
7c9992b712d51d5f381601b43c026bcd357d4c6c
c05e0de22f5699d1c2b2921480be68c8e8b8943f
/man/tab_caption.Rd
afc368c9dce30835382afd96867ca9d83cc85577
[ "MIT" ]
permissive
rstudio/gt
36ed1a3d5d9a1717dfe71ed61e5c005bc17e0dce
c73eeceaa8494180eaf2f0ad981056c53659409b
refs/heads/master
2023-09-04T06:58:18.903630
2023-09-01T02:06:05
2023-09-01T02:06:05
126,038,547
1,812
225
NOASSERTION
2023-09-08T00:21:34
2018-03-20T15:18:51
R
UTF-8
R
false
true
2,404
rd
tab_caption.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/tab_create_modify.R \name{tab_caption} \alias{tab_caption} \title{Add a table caption} \usage{ tab_caption(data, caption) } \arguments{ \item{data}{\emph{The gt table data object} \verb{obj:<gt_tbl>} // \strong{required} This is the \strong{gt} table object that is commonly created through use of the \code{\link[=gt]{gt()}} function.} \item{caption}{\emph{Table caption text} \verb{scalar<character>} // \strong{required} The table caption to use for cross-referencing in R Markdown, Quarto, or \strong{bookdown}.} } \value{ An object of class \code{gt_tbl}. } \description{ Add a caption to a \strong{gt} table, which is handled specially for a table within an R Markdown, Quarto, or \strong{bookdown} context. The addition of captions makes tables cross-referencing across the containing document. The caption location (i.e., top, bottom, margin) is handled at the document level in each of these system. } \section{Examples}{ With three columns from the \code{\link{gtcars}} dataset, let's create a \strong{gt} table. First, we'll add a header part with the \code{\link[=tab_header]{tab_header()}} function. After that, a caption is added through use of \code{tab_caption()}. \if{html}{\out{<div class="sourceCode r">}}\preformatted{gtcars |> dplyr::select(mfr, model, msrp) |> dplyr::slice(1:5) |> gt() |> tab_header( title = md("Data listing from **gtcars**"), subtitle = md("`gtcars` is an R dataset") ) |> tab_caption(caption = md("**gt** table example.")) }\if{html}{\out{</div>}} \if{html}{\out{ <img src="https://raw.githubusercontent.com/rstudio/gt/master/images/man_tab_caption_1.png" alt="This image of a table was generated from the first code example in the `tab_caption()` help file." style="width:100\%;"> }} } \section{Function ID}{ 2-9 } \section{Function Introduced}{ \code{v0.8.0} (November 16, 2022) } \seealso{ Other part creation/modification functions: \code{\link{tab_footnote}()}, \code{\link{tab_header}()}, \code{\link{tab_info}()}, \code{\link{tab_options}()}, \code{\link{tab_row_group}()}, \code{\link{tab_source_note}()}, \code{\link{tab_spanner_delim}()}, \code{\link{tab_spanner}()}, \code{\link{tab_stub_indent}()}, \code{\link{tab_stubhead}()}, \code{\link{tab_style_body}()}, \code{\link{tab_style}()} } \concept{part creation/modification functions}
c80fc7ec62cd2ab4453e7f7563d75269d5ea1b37
c221bac282063ef7c50923eb6ae422b81bda8af8
/GoodnessOfFit.R
db69ad564883a27503814aa42a23941c0b9a13a9
[]
no_license
daviddwlee84/StatisticInference
60481b9747371817e641244b4f80824b4223dabd
d02e3d33fd8e91a31f5bbcdd8d5bf990939981d4
refs/heads/master
2021-01-25T11:28:21.971166
2017-07-02T02:28:26
2017-07-02T02:28:26
93,928,780
2
1
null
null
null
null
UTF-8
R
false
false
536
r
GoodnessOfFit.R
# Goodness of fit source("GoodnessOfFitFunctions.R") alpha = as.numeric(readline("Significance level(in %): ")) alpha = alpha/100 dist <- menu(c("Multinomial Distribution", "Normal Distribution", "Poisson Distribution", "Binomial Distribution"), title="Select the distribution of the Sample Statistic") if(dist == 1){ sce <- menu(c("Test if the die is fair"), title="Select the scenario of Sample Statistic") if(sce == 1){ K = as.numeric(readline("How many cell each die (K nomial)? ")) GOF_MULTINOM_DIE(alpha, K) } }
b1c549fbb6df3d6e66c9a825242cb9c4dc74ad90
b844fc764deff4c305d5a5499f78266f2ec817e9
/man/fitted.mylm.Rd
b19e30afabeff13c30cef2f0826c1ec953335cbf
[]
no_license
jenper/mylm
0aa7c1e7498a35dc9225e26dc2f75f35ab5a808a
785316d117b822c2edf9b63ff5d1c1e7f7902c22
refs/heads/main
2023-08-03T10:49:26.281973
2021-07-22T01:41:54
2021-07-22T01:41:54
null
0
0
null
null
null
null
UTF-8
R
false
true
332
rd
fitted.mylm.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/funcs.R \name{fitted.mylm} \alias{fitted.mylm} \title{Fitted values} \usage{ \method{fitted}{mylm}(object, ...) } \arguments{ \item{object}{object of class "mylm"} \item{...}{additional arguments to be passed to methods} } \description{ Fitted values }
2fd056d93f11620a97d4d17b37a0c7a7f86d41e1
530753dfb8c6b2db7d32e1de9b69e9c04df3c501
/cachematrix.R
9635c8e3259c98683846de0ace6851e79df3ecf8
[]
no_license
khomyuk/ProgrammingAssignment2
45f3617ff88fb4d422e0dbf5447849dcd54fb10c
a8859a26b5236b7c905580322d8ca04647c9b10f
refs/heads/master
2021-01-15T11:08:17.863618
2015-12-27T23:10:12
2015-12-27T23:10:12
48,662,006
0
0
null
2015-12-27T22:00:18
2015-12-27T22:00:15
null
UTF-8
R
false
false
886
r
cachematrix.R
## The function makeCacheMatrix allows you to create an oblect of a matrix that can cache its inverse. makeCacheMatrix <- function(x = matrix()) { inverted_matrix <- NULL set <- function(y) { x <<- y inverted_matrix <<- NULL } get <- function() x setinv <- function(inv) inverted_matrix <<- inv getinv <- function() inverted_matrix list(set = set, get = get, setinv = setinv, getinv = getinv) } ## The function cacheSolve computes the inverted matrix. ## In case it has already been computed earlier this function returns cached value. cacheSolve <- function(x, ...) { ## Return a matrix that is the inverse of 'x' inverted_matrix <- x$getinv() if(!is.null(inverted_matrix)) { message("getting cached inverse") return(inverted_matrix) } data <- x$get() inverted_matrix <- solve(data,...) x$setinv(inverted_matrix) inverted_matrix }
049fb55dec8a56e5625293dbd9a742810115419c
3cb0fcdaa83cb2bc60aef905d229c00ac4440243
/R/ImmuneSpace.R
946854d4f37f169e3af928d9b276672cdc863068
[]
no_license
jfrelinger/ImmuneSpaceR
45325d9841391dd6a7992d550ab777e5bff11b15
deefe0813ffd0b101b10c5c63fd549c21443a4e9
refs/heads/master
2021-01-21T19:13:20.752212
2014-11-19T21:29:58
2014-11-19T21:29:58
null
0
0
null
null
null
null
UTF-8
R
false
false
28,650
r
ImmuneSpace.R
#'@docType package #'@title A Thin Wrapper Around ImmuneSpace. #'@description ImmuneSpaceR provides a convenient API for accessing data sets within the ImmuneSpace database. #' #'@details Uses the Rlabkey package to connect to ImmuneSpace. Implements caching, and convenient methods for accessing data sets. #' #'@name ImmuneSpaceR-package #'@aliases ImmuneSpaceR #'@author Greg Finak #'@import data.table Rlabkey methods Biobase gtools digest NULL #'@title CreateConnection #'@name CreateConnection #'@param study \code{"character"} vector naming the study. #'@description Constructor for \code{ImmuneSpaceConnection} class #'@details Instantiates and \code{ImmuneSpaceConnection} for \code{study} #'The constructor will try to take the values of the various `labkey.*` parameters from the global environment. #'If they don't exist, it will use default values. These are assigned to `options`, which are then used by the \code{ImmuneSpaceConnection} class. #'@export CreateConnection #'@return an instance of an \code{ImmuneSpaceConnection} CreateConnection = function(study=NULL, verbose = FALSE){ labkey.url.path<-try(get("labkey.url.path",.GlobalEnv),silent=TRUE) if(inherits(labkey.url.path,"try-error")){ if(is.null(study)){ stop("study cannot be NULL") } labkey.url.path<-paste0("/Studies/",study) }else if(!is.null(study)){ labkey.url.path<-file.path(dirname(labkey.url.path),study) } labkey.url.base<-try(get("labkey.url.base",.GlobalEnv),silent=TRUE) if(inherits(labkey.url.base,"try-error")) labkey.url.base<-"https://www.immunespace.org" labkey.url.base<-gsub("http:","https:",labkey.url.base) if(length(grep("^https://", labkey.url.base)) == 0) labkey.url.base <- paste0("https://", labkey.url.base) labkey.user.email<-try(get("labkey.user.email",.GlobalEnv),silent=TRUE) if(inherits(labkey.user.email,"try-error")) labkey.user.email="unknown_user at not_a_domain.com" options(labkey.url.base=labkey.url.base) options(labkey.url.path=labkey.url.path) options(labkey.user.email=labkey.user.email) options(ISverbose = verbose) new("ImmuneSpaceConnection") } #'@name ImmuneSpaceConnection #'@aliases ImmuneSpaceConnection-class #'@aliases ImmuneSpace #'@rdname ImmuneSpaceConnection-class #'@docType class #'@title The ImmuneSpaceConnection class #'@description Instantiate this class to access a study #'@details Uses global variables \code{labkey.url.base}, and \code{labkey.url.path}, to access a study. #'\code{labkey.url.base} should be \code{https://www.immunespace.org/}. #'\code{labkey.url.path} should be \code{/Studies/studyname}, where 'studyname' is the name of the study. #'The ImmunespaceConnection will initialize itself, and look for a \code{.netrc} file in \code{"~/"} the user's home directory. #'The \code{.netrc} file should contain a \code{machine}, \code{login}, and \code{password} entry to allow access to ImmuneSpace, #'where \code{machine} is the host name like "www.immunespace.org". #'@seealso \code{\link{ImmuneSpaceR-package}} \code{\link{ImmuneSpaceConnection_getGEMatrix}} \code{\link{ImmuneSpaceConnection_getDataset}} \code{\link{ImmuneSpaceConnection_listDatasets}} #'@exportClass ImmuneSpaceConnection #'@examples #'labkey.url.base <- "https://www.immunespace.org" #'labkey.url.path <- "/Studies/SDY269" #'labkey.user.email <- 'gfinak at fhcrc.org' #'sdy269 <- CreateConnection("SDY269") #'sdy269 #'@return An instance of an ImmuneSpaceConnection for a study in `labkey.url.path` setRefClass(Class = "ImmuneSpaceConnection", fields = list(study = "character", config="list", available_datasets = "data.table", data_cache="list",constants="list"), methods=list( initialize=function(){ constants<<-list(matrices="GE_matrices",matrix_inputs="GE_inputs") .AutoConfig() gematrices_success<-try(.GeneExpressionMatrices(),silent=TRUE) geinputs_success<-try(.GeneExpressionInputs(),silent=TRUE) if(inherits(gematrices_success,"try-error")){ message("No gene expression data") } }, .AutoConfig=function(){ #should use options labkey.url.base<-getOption("labkey.url.base") labkey.url.path<-getOption("labkey.url.path") labkey.user.email<-getOption("labkey.user.email") verbose <- getOption("ISverbose") if(gsub("https://", "", labkey.url.base) == "www.immunespace.org"){ curlOptions <- labkey.setCurlOptions(ssl.verifyhost = 2, ssl.cipher.list="ALL") } else{ curlOptions <- labkey.setCurlOptions(ssl.verifyhost = 2, sslversion=1) } study<<-basename(labkey.url.path) config<<-list(labkey.url.base=labkey.url.base, labkey.url.path=labkey.url.path, labkey.user.email=labkey.user.email, curlOptions = curlOptions, verbose = verbose) .getAvailableDataSets(); }, show=function(){ cat(sprintf("Immunespace Connection to study %s\n",study)) cat(sprintf("URL: %s\n",file.path(gsub("/$","",config$labkey.url.base),gsub("^/","",config$labkey.url.path)))) cat(sprintf("User: %s\n",config$labkey.user.email)) cat("Available datasets\n") for(i in 1:nrow(available_datasets)){ cat(sprintf("\t%s\n",available_datasets[i,Name])) } if(!is.null(data_cache[[constants$matrices]])){ cat("Expression Matrices\n") for(i in 1:nrow(data_cache[[constants$matrices]])){ cat(sprintf("%s\n",data_cache[[constants$matrices]][i,"name"])) } } }, # There is something odd with Rlabkey::labkey.getFolders (permissions set to 0) .checkStudy = function(verbose = FALSE){ browser() if(length(available_datasets)==0){ validStudies <- mixedsort(grep("^SDY", basename(lsFolders(getSession(config$labkey.url.base, "Studies"))), value = TRUE)) req_study <- basename(config$labkey.url.path) if(!req_study %in% validStudies){ if(!verbose){ stop(paste0(req_study, " is not a valid study")) } else{ stop(paste0(req_study, " is not a valid study\nValid studies: ", paste(validStudies, collapse=", "))) } } } }, .getAvailableDataSets=function(){ if(length(available_datasets)==0){ dataset_filter <- makeFilter(c("showbydefault", "EQUAL", TRUE)) available_datasets<<-data.table(labkey.selectRows(baseUrl = config$labkey.url.base,config$labkey.url.path,schemaName = "study",queryName = "DataSets", colFilter = dataset_filter))[,list(Label,Name,Description,`Key Property Name`)] } }, getDataset=function(x, original_view = FALSE, reload=FALSE, ...){ if(nrow(available_datasets[Name%in%x])==0){ stop(sprintf("Invalid data set: %s",x)) }else{ hash_key = digest(c(x,original_view)) if(!is.null(data_cache[[hash_key]])&!reload){ data_cache[[hash_key]] }else{ viewName <- NULL if(original_view){ viewName <- "full" } data_cache[[hash_key]] <<- data.table(labkey.selectRows(baseUrl = config$labkey.url.base,config$labkey.url.path,schemaName = "study", queryName = x, viewName = viewName, colNameOpt = "fieldname", ...)) setnames(data_cache[[hash_key]],.munge(colnames(data_cache[[hash_key]]))) data_cache[[hash_key]] } } }, listDatasets=function(){ for(i in 1:nrow(available_datasets)){ cat(sprintf("\t%s\n",available_datasets[i,Name])) } if(!is.null(data_cache[[constants$matrices]])){ cat("Expression Matrices\n") for(i in 1:nrow(data_cache[[constants$matrices]])){ cat(sprintf("%s\n",data_cache[[constants$matrices]][i,"name"])) } } }, .munge=function(x){ tolower(gsub(" ","_",basename(x))) }, .GeneExpressionInputs=function(){ if(!is.null(data_cache[[constants$matrix_inputs]])){ data_cache[[constants$matrix_inputs]] }else{ ge<-labkey.selectRows(baseUrl = config$labkey.url.base,config$labkey.url.path,schemaName = "assay.ExpressionMatrix.matrix",queryName = "InputSamples",colNameOpt = "fieldname",viewName = "gene_expression_matrices",showHidden=TRUE) setnames(ge,.munge(colnames(ge))) data_cache[[constants$matrix_inputs]]<<-ge } }, .GeneExpressionFeatures=function(matrix_name,summary=FALSE){ if(!any((data_cache[[constants$matrices]][,"name"]%in%matrix_name))){ stop("Invalid gene expression matrix name"); } annotation_set_id<-.getFeatureId(matrix_name) #.lksession <- list() #.lksession[["curlOptions"]] <- config$curlOptions #.lksession[["curlOptions"]]$httpauth <- 1L #print(.lksession[["curlOptions"]]) if(is.null(data_cache[[.mungeFeatureId(annotation_set_id)]])){ if(!summary){ message("Downloading Features..") featureAnnotationSetQuery=sprintf("SELECT * from FeatureAnnotation where FeatureAnnotationSetId='%s';",annotation_set_id); features<-labkey.executeSql(config$labkey.url.base,config$labkey.url.path,schemaName = "Microarray",sql = featureAnnotationSetQuery ,colNameOpt = "fieldname") }else{ features<-data.frame(FeatureId=con$data_cache[[matrix_name]][,gene_symbol],GeneSymbol=con$data_cache[[matrix_name]][,gene_symbol]) } data_cache[[.mungeFeatureId(annotation_set_id)]]<<-features } }, .GeneExpressionMatrices=function(){ if(!is.null(data_cache[[constants$matrices]])){ data_cache[[constants$matrices]] }else{ ge<-labkey.selectRows(baseUrl = config$labkey.url.base,config$labkey.url.path,schemaName = "assay.ExpressionMatrix.matrix",queryName = "Runs",colNameOpt = "fieldname",showHidden = TRUE, viewName = "expression_matrices") setnames(ge,.munge(colnames(ge))) data_cache[[constants$matrices]]<<-ge } }, .downloadMatrix=function(x, summary = FALSE){ if(is.null(data_cache[[x]])){ if(nrow(subset(data_cache[[constants$matrices]],name%in%x))==0){ stop(sprintf("No matrix %s in study\n",x)) } summary <- ifelse(summary, ".summary", "") #link<-URLdecode(file.path(gsub("www.","",gsub("http:","https:",gsub("/$","",config$labkey.url.base))), paste0(gsub("^/","",subset(data_cache[[constants$matrices]],name%in%x)[,"downloadlink"]),summary))) #shouldn't be removing the www reported by labkey. Fix your netrc entry instead link<-URLdecode(file.path(gsub("http:","https:",gsub("/$","",config$labkey.url.base)), "_webdav", gsub("^/","",config$labkey.url.path), "@files/analysis/exprs_matrices", paste0(x, ".tsv", summary))) localpath<-.localStudyPath(link) if(.isRunningLocally(localpath)){ fl<-localpath message("Reading local matrix") data_cache[[x]]<<-fread(fl,header=TRUE) }else{ opts <- config$curlOptions opts$netrc <- 1L opts$httpauth <- 1L handle<-getCurlHandle(.opts=opts) h<-basicTextGatherer() message("Downloading matrix..") curlPerform(url=link,curl=handle,writefunction=h$update) fl<-tempfile() write(h$value(),file=fl) EM <- fread(fl,header=TRUE) if(nrow(EM) == 0){ stop("The downloaded matrix has 0 rows. Something went wrong") } data_cache[[x]] <<-EM file.remove(fl) } }else{ data_cache[[x]] } }, getGEMatrix=function(x, summary = FALSE){ if(x%in%names(data_cache)){ data_cache[[x]] }else{ .downloadMatrix(x, summary) .GeneExpressionFeatures(x,summary) .ConstructExpressionSet(x, summary) data_cache[[x]] } }, .ConstructExpressionSet=function(matrix_name, summary){ #matrix message("Constructing ExpressionSet") matrix<-data_cache[[matrix_name]] #features features<-data_cache[[.mungeFeatureId(.getFeatureId(matrix_name))]][,c("FeatureId","GeneSymbol")] #inputs pheno<-unique(subset(data_cache[[constants$matrix_inputs]],biosample_accession%in%colnames(matrix))[,c("biosample_accession","subject_accession","arm_name","study_time_collected")]) if(summary){ fdata <- data.frame(FeatureId = matrix$gene_symbol, gene_symbol = matrix$gene_symbol, row.names = matrix$gene_symbol) fdata <- AnnotatedDataFrame(fdata) } else{ try(setnames(matrix," ","FeatureId"),silent=TRUE) setkey(matrix,FeatureId) rownames(features)<-features$FeatureId features<-features[matrix$FeatureId,]#order feature info fdata <- AnnotatedDataFrame(features) } rownames(pheno)<-pheno$biosample_accession pheno<-pheno[colnames(matrix)[-1L],] ad_pheno<-AnnotatedDataFrame(data=pheno) es<-ExpressionSet(assayData=as.matrix(matrix[,-1L,with=FALSE]),phenoData=ad_pheno,featureData=fdata) data_cache[[matrix_name]]<<-es }, .getFeatureId=function(matrix_name){ subset(data_cache[[constants$matrices]],name%in%matrix_name)[,"featureset"] }, .mungeFeatureId=function(annotation_set_id){ return(sprintf("featureset_%s",annotation_set_id)) }, .isRunningLocally=function(path){ file.exists(path) }, .localStudyPath=function(urlpath){ LOCALPATH<-"/shared/silo_researcher/Gottardo_R/immunespace" PRODUCTION_HOST<-"www.immunespace.org" STAGING_HOST<-"posey.fhcrc.org" TEST_HOST<-"test.immunespace.org" PRODUCTION_PATH<-"production/files" STAGING_PATH<-"staging/files" if(grepl(PRODUCTION_HOST,urlpath)){ PROCESS<-PRODUCTION_PATH }else if(grepl(STAGING_HOST,urlpath)){ PROCESS<-STAGING_PATH }else if(grepl(TEST_HOST,urlpath)){ LOCALPATH <- "/share/files" PROCESS <- "" }else{ stop("Can't determine if we are running on immunespace (production) or posey (staging)") } gsub(file.path(gsub("/$","",config$labkey.url.base), "_webdav"), file.path(LOCALPATH,PROCESS), urlpath) }, listGEAnalysis = function(){ GEA <- labkey.selectRows(config$labkey.url.base, config$labkey.url.path, "gene_expression", "gene_expression_analysis", colNameOpt = "rname") print(GEA) }, getGEAnalysis = function(analysis_accession){ "Get gene expression analysis resluts from a connection" if(missing(analysis_accession)){ stop("Missing analysis_accession argument. Use listGEAnalysis to get a list of available analysis_accession numbers") } AA_filter <- makeFilter(c("analysis_accession", "IN", analysis_accession)) GEAR <- labkey.selectRows(config$labkey.url.base, config$labkey.url.path, "gene_expression", "gene_expression_analysis_results", colFilter = AA_filter) colnames(GEAR) <- .munge(colnames(GEAR)) return(GEAR) }, clear_cache = function(){ data_cache[grep("^GE", names(data_cache), invert = TRUE)] <<- NULL }, .qpHeatmap = function(dt, normalize_to_baseline, legend, text_size){ contrast <- "study_time_collected" annoCols <- c("name", "subject_accession", contrast, "Gender", "Age", "Race") palette <- ISpalette(20) expr <- parse(text = paste0(contrast, ":=as.factor(", contrast, ")")) dt <- dt[, eval(expr)] #No need to order by legend. This should be done after. if(!is.null(legend)){ dt <- dt[order(name, study_time_collected, get(legend))] } else{ dt <- dt[order(name, study_time_collected)] } form <- as.formula(paste("analyte ~ name +", contrast, "+ subject_accession")) mat <- acast(data = dt, formula = form, value.var = "response") #drop = FALSE yields NAs if(ncol(mat) > 2 & nrow(mat) > 1){ mat <- mat[rowSums(apply(mat, 2, is.na)) < ncol(mat),, drop = FALSE] } # Annotations: anno <- data.frame(unique(dt[, annoCols, with = FALSE])) rownames(anno) <- paste(anno$name, anno[, contrast], anno$subject_accession, sep = "_") expr <- parse(text = c(rev(legend), contrast, "name")) anno <- anno[with(anno, order(eval(expr))),] anno <- anno[, c(rev(legend), contrast, "name")] #Select and order the annotation rows anno[, contrast] <- as.factor(anno[, contrast]) anno_color <- colorpanel(n = length(levels(anno[,contrast])), low = "white", high = "black") names(anno_color) <- levels(anno[, contrast]) anno_color <- list(anno_color) if(contrast == "study_time_collected"){ setnames(anno, c("name", contrast), c("Arm Name", "Time")) contrast <- "Time" } names(anno_color) <- contrast if("Age" %in% legend){ anno_color$Age <- c("yellow", "red") } mat <- mat[, rownames(anno), drop = FALSE] # pheatmap parameters if(normalize_to_baseline){ scale <- "none" max <- max(abs(mat), na.rm = TRUE) breaks <- seq(-max, max, length.out = length(palette)) } else{ scale <- "row" breaks <- NA } show_rnames <- ifelse(nrow(mat) < 50, TRUE, FALSE) cluster_rows <- ifelse(nrow(mat) > 2 & ncol(mat) > 2, TRUE, FALSE) e <- try({ p <- pheatmap(mat = mat, annotation = anno, show_colnames = FALSE, show_rownames = show_rnames, cluster_cols = FALSE, cluster_rows = cluster_rows, color = palette, scale = scale, breaks = breaks, fontsize = text_size, annotation_color = anno_color) }) if(inherits(e, "try-error")){ p <- pheatmap(mat = mat, annotation = anno, show_colnames = FALSE, show_rownames = show_rnames, cluster_cols = FALSE, cluster_rows = FALSE, color = palette, scale = scale, breaks = breaks, fontsize = text_size, annotation_color = anno_color) } return(p) }, quick_plot = function(dataset, normalize_to_baseline = TRUE, type = "auto", filter = NULL, facet = "grid", text_size = 15, legend = NULL, ...){ ggthemr("solarized") addPar <- c("Gender", "Age", "Race") annoCols <- c("name", "subject_accession", "study_time_collected", addPar) toKeep <- c("response", "analyte", annoCols) logT <- TRUE #By default, log transform the value_reported message_out <- "" extras <- list(...) e <- try({ dt <- con$getDataset(dataset, reload = TRUE, colFilter = filter) setnames(dt, c("gender", "age_reported", "race"), addPar) if(!"analyte" %in% colnames(dt)){ if("analyte_name" %in% colnames(dt)){ dt <- dt[, analyte := analyte_name] } else{ dt <- dt[, analyte := ""] } } if(type == "auto"){ if(length(unique(dt$analyte)) < 10){ type <- "boxplot" } else{ type <- "heatmap" } } # Datasets if(dataset == "elispot"){ dt <- dt[, value_reported := (spot_number_reported) / cell_number_reported] } else if(dataset == "pcr"){ if(all(is.na(dt[, threshold_cycles]))){ stop("PCR results cannot be displayed for studies that do not use threshold cycles. Use LabKey Quick Chart interface to plot this dataset.") } dt <- dt[, value_reported := threshold_cycles] dt <- dt[, analyte := entrez_gene_id] logT <- FALSE #Threshold cycle is already log transformed } else if(dataset == "mbaa"){ if(all(dt$concentration_value ==0) || all(is.na(dt$concentration_value))){ if(any(!is.na(dt$mfi)) && any(dt$mfi != 0)){ dt <- dt[, value_reported := as.numeric(mfi)] }else{ stop("Plotting MBAA requires either concentration or MFI values") } } else{ dt <- dt[, value_reported := as.numeric(concentration_value)] } } dt <- dt[, response := ifelse(value_reported <0, 0, value_reported)] if(logT){ dt <- dt[, response := mean(log2(response+1), na.rm = TRUE), by = "name,subject_accession,analyte,study_time_collected"] } else{ dt <- dt[, response := mean(response, na.rm = TRUE), by = "name,subject_accession,analyte,study_time_collected"] } dt <- unique(dt[, toKeep, with = FALSE]) if(normalize_to_baseline){ dt <- dt[,response:=response-response[study_time_collected==0], by="name,subject_accession,analyte"][study_time_collected!=0] ylab <- "Response normalized to baseline" } else{ ylab <- "Response (log2)" } }) if(inherits(e, "try-error")){ type <- "error" error_string <- attr(e, "condition")$message } # Plot if(facet == "grid"){ facet <- facet_grid(aes(analyte, name), scales = "free") } else if(facet == "wrap"){ facet <- facet_wrap(~name + analyte, scales = "free") } if(type == "heatmap"){ p <- .qpHeatmap(dt, normalize_to_baseline, legend, text_size) } else if(type == "boxplot"){ p <- ggplot(data = dt, aes(as.factor(study_time_collected), response)) + geom_boxplot(outlier.size = 0) + xlab("Time") + ylab(ylab) + facet + theme(text = element_text(size = text_size), axis.text.x = element_text(angle = 45)) if(!is.null(extras[["size"]])){ p <- p + geom_jitter(aes_string(...)) } else{ p <- p + geom_jitter(size = 3, aes_string(...)) } print(p) } else if(type == "line"){ p <- ggplot(data = dt, aes(study_time_collected, response, group = subject_accession)) + geom_line(aes_string(...)) + xlab("Time") + ylab(ylab) + facet + theme(text = element_text(size = text_size), axis.text.x = element_text(angle = 45)) if(!is.null(extras[["size"]])){ p <- p + geom_point(aes_string(...)) } else{ p <- p + geom_point(size = 3, aes_string(...)) } print(p) } else{#} if(type == "error"){ data <- data.frame(x = 0, y = 0, err = error_string) p <- ggplot(data = data) + geom_text(aes(x, y, label = err), size = text_size) print(p) } return(message_out) } )) #'@title get Gene Expression Matrix #'@aliases getGEMatrix #'@param x \code{"character"} name of the Gene Expression Matrix #'@details Returns an `ExpressionSet` from the matrix named 'x', downloads it if it is not already cached. #'@return an \code{ExpressionSet} #'@name ImmuneSpaceConnection_getGEMatrix #'@examples #'labkey.url.base="https://www.immunespace.org" #'labkey.url.path="/Studies/SDY269" #'labkey.user.email='gfinak at fhcrc.org' #'sdy269<-CreateConnection("SDY269") #'sdy269$getGEMatrix("TIV_2008") NULL #'@title get a dataset #'@aliases getDataset #'@param x A \code{character}. The name of the dataset #'@param original_view A \code{logical}. If set to TRUE, download the ImmPort view. Else, #' download the default grid view. #'@param reload A \code{logical}. Clear the cache. If set to TRUE, download the #' dataset, whether a cached versio exist or not. #'@details Returns the dataset named 'x', downloads it if it is not already cached. #'@return a \code{data.table} #'@name ImmuneSpaceConnection_getDataset #'@examples #'labkey.url.base="https://www.immunespace.org" #'labkey.url.path="/Studies/SDY269" #'labkey.user.email='gfinak at fhcrc.org' #'sdy269<-CreateConnection("SDY269") #'sdy269$getDataset("hai") NULL #'@title list available datasets #'@aliases listDatasets #'@details Prints the names of the available datasets #'@return Doesn't return anything, just prints to console. #'@name ImmuneSpaceConnection_listDatasets #'@examples #'labkey.url.base="https://www.immunespace.org" #'labkey.url.path="/Studies/SDY269" #'labkey.user.email='gfinak at fhcrc.org' #'sdy269<-CreateConnection("SDY269") #'sdy269$listDatasets() NULL #'@title list available gene expression analysis #'@aliases listGEAnalysis #'@details Prints the table of differential expression analysis #'@return A \code{data.frame}. The list of gene expression analysis. #'@name ImmuneSpaceConnection_listGEAnalysis #'@examples #'labkey.url.base="https://www.immunespace.org" #'labkey.url.path="/Studies/SDY269" #'labkey.user.email='gfinak at fhcrc.org' #'sdy269<-CreateConnection("SDY269") #'sdy269$listGEAnalysis() NULL
9877c4ee0f911e1f708d069ab0dc8db6e9c5515e
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
/data/genthat_extracted_code/rhnerm/examples/cmseRHNERM.Rd.R
28ee8deb135c6ada587bd81229cc12899b5455b7
[]
no_license
surayaaramli/typeRrh
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
66e6996f31961bc8b9aafe1a6a6098327b66bf71
refs/heads/master
2023-05-05T04:05:31.617869
2019-04-25T22:10:06
2019-04-25T22:10:06
null
0
0
null
null
null
null
UTF-8
R
false
false
688
r
cmseRHNERM.Rd.R
library(rhnerm) ### Name: cmseRHNERM ### Title: Conditional mean squared error estimation of the empirical Bayes ### estimators under random heteroscedastic nested error regression ### models ### Aliases: cmseRHNERM ### ** Examples #generate data set.seed(1234) beta=c(1,1); la=1; tau=c(8,4) m=20; ni=rep(3,m); N=sum(ni) X=cbind(rep(1,N),rnorm(N)) mu=beta[1]+beta[2]*X[,2] sig=1/rgamma(m,tau[1]/2,tau[2]/2); v=rnorm(m,0,sqrt(la*sig)) y=c() cum=c(0,cumsum(ni)) for(i in 1:m){ term=(cum[i]+1):cum[i+1] y[term]=mu[term]+v[i]+rnorm(ni[i],0,sqrt(sig[i])) } #fit the random heteroscedastic nested error regression C=cbind(rep(1,m),rnorm(m)) cmse=cmseRHNERM(y,X,ni,C,B=10) cmse
c37675c91c6ce937c750378a3b3c81a02eaed72b
87a5d63aa52e25dfb121b4283c6dc935a6fa4c87
/R_Cointegration Case/S_ECM_wADF.R
017fbf722c1e08d96e8e7fb825bf8318d59f5592
[]
no_license
AnthonyGachuru/cqf-1
fca9834a95bf24d1753fedb4813390d70c630a4c
8d66227755dff201b671a25cf45408ce7527bcfb
refs/heads/master
2021-05-21T22:07:47.254035
2019-10-29T06:59:41
2019-10-29T06:59:41
null
0
0
null
null
null
null
UTF-8
R
false
false
2,320
r
S_ECM_wADF.R
###################################################################### # 2015. Richard Diamond. Quieries to r.diamond@cqf.com # # Models are specified and validated but any use is at your own risk # ###################################################################### # ECM IMPLEMENTATION (two variables, Engle-Granger method) # ADF TEST ON SERIES # "drift" -- refers Delta Y= constant, "trend" refers to Delta Y = beta*t -- increases critical values but overfits time dependence adf.test = ur.df(curve2.this$X10, type = "drift") print(summary(adf.test)) adf.test = ur.df(curve2.this$X25, type = "drift") print(summary(adf.test)) # NAIVE COINTEGRATING EQUATION coint.reg = lm(curve2.this$X10 ~ curve2.this$X25) print(summary(coint.reg)) # CADF TEST ON RESIDUAL cadf.test = ur.df(residuals(coint.reg), type = "none") # CADF because ADF test applies to cointegrated residual print(summary(cadf.test)) # ECM PARAMETERS ESTIMATION (one-way) tenorY.diff = diff(curve2.this$X10) #tenorY.diff = tenorY.diff - mean(tenorY.diff) # however, mean is very small tenorX.diff = diff(curve2.this$X25) eq_corr.lag = lag(residuals(coint.reg), k = -1) ecm.reg = lm(tenorY.diff ~ tenorX.diff + eq_corr.lag + 0) print(summary(ecm.reg)) #ECM with Delta Y_t-1 # // but that variable comes as not significant by t statistic #ecm.reg = lm(tenorY.diff[ time(tenorY.diff) != as.Date("2013-05-31")] ~ lag(tenorY.diff, k = -1) + tenorX.diff[ time(tenorX.diff) != as.Date("2013-05-31")] + eq_corr.lag[ time(eq_corr.lag) != as.Date("2013-05-31")] + 0) #print(summary(ecm.reg)) # to check the relationship 'the other way', r_25Y on r_10Y -- we recompute the residual eq-correction term # that will save time on deciding which way is 'better' and which variable is leading (we do two things in one) cointO.reg = lm(curve2.this$X25 ~ curve2.this$X10) eq_corrO.lag = lag(residuals(cointO.reg), k = -1) #omit the step of testing residual with CADF but test result given on Case - Extra Slides ecmO.reg = lm(tenorX.diff ~ tenorY.diff + eq_corrO.lag + 0) print(summary(ecmO.reg)) # LINEAR REGRESSION ON DIFFERENCES (for comparison) // linear regression in differences gives the minimum variance hedge simple.reg = lm(diff(curve2.this$X10) ~ diff(curve2.this$X25) + 0) # + 0 means no cash holdings print(summary(simple.reg))
3667fa6be414754330b9a087e7808b68d0c40b71
00c98a4502e7a0670813325a408e16d1c7da4139
/man/dorem_no_link_func.Rd
708f948a4482f29d7caf6719a71070f05627e5e7
[ "MIT" ]
permissive
mladenjovanovic/dorem
7f192c94080d511bef5a2244205a7a4e06198de2
573377ac7740b8e5190bf92d5f023bf06e8cc277
refs/heads/master
2023-04-11T07:09:31.504879
2022-07-18T19:03:29
2022-07-18T19:03:29
256,017,605
7
3
MIT
2020-08-30T17:04:29
2020-04-15T19:32:52
R
UTF-8
R
false
true
365
rd
dorem_no_link_func.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/dorem-control.R \name{dorem_no_link_func} \alias{dorem_no_link_func} \title{Default link function} \usage{ dorem_no_link_func(x) } \arguments{ \item{x}{Numeric vector} } \value{ Numeric vector } \description{ By default there is no link function. } \examples{ dorem_no_link_func(1:10) }
1969ba4540326c5579608bd503a131f3c1b9d227
4d77b035d6cbb2b2ba6111b63298f87f3279d778
/run_analysis.R
55e7cad356754fa1532c36d6d1e56b698b3ae0cb
[]
no_license
xnoamix/GettingAndCleaningDataCourseProject
799af7e73dd55e094b618405082b84ab623eadec
575421e36d66d447ca9e202dedb746d924943bc2
refs/heads/master
2021-01-17T09:31:54.078234
2014-11-23T13:53:32
2014-11-23T13:53:32
null
0
0
null
null
null
null
UTF-8
R
false
false
2,019
r
run_analysis.R
library(dplyr) unzip("getdata_projectfiles_UCI HAR Dataset.zip") ## Loading all the different files belong to the training and test sets features <- read.table("UCI HAR Dataset/features.txt") test_data <- read.table("UCI HAR Dataset/test/X_test.txt") subject_test <- read.table("UCI HAR Dataset/test/subject_test.txt") activity_test <- read.table("UCI HAR Dataset/test/y_test.txt") subject_train <- read.table("UCI HAR Dataset/train/subject_train.txt") activity_train <- read.table("UCI HAR Dataset/train/y_train.txt") train_data <- read.table("UCI HAR Dataset/train/X_train.txt") activity_labels <- read.table("UCI HAR Dataset/activity_labels.txt") ## First, merging separetly activity labels, subjects and datasets from training and test, ## then, extracting from the datasets only those columns which calculate mean or std. ## Only then merging all together to "all_merged". activity_merged <- rbind(activity_train, activity_test) subject_merged <- rbind(subject_train, subject_test) merged <- rbind(train_data, test_data) cname <- features[, 2] rel_col <- grepl("mean\\(\\)|std\\(\\)", cname) all_merged <- cbind(subject_merged, activity_merged, merged[ , rel_col]) ##Setting the columns names to be clean and descriptive, replacing activity indices with labels col_names <- c("Subject", "Activity", as.character(cname[rel_col])) colnames(all_merged) <- col_names all_merged$Activity <- cut(all_merged$Activity, breaks=6, labels=activity_labels[, 2]) clean_col <- gsub("_|\\(|\\)|,|-", "", colnames(all_merged)) clean_col <- gsub("^t", "Time", clean_col) clean_col <- gsub("^f", "Freq", clean_col) clean_col <- gsub("BodyBody", "Body", clean_col) clean_col <- gsub("std", "Std", clean_col) clean_col <- gsub("mean", "Mean", clean_col) clean_col <- make.names(clean_col, unique=TRUE) colnames(all_merged) <- clean_col ## Creating another data frame that holds the average of each variable per subject and activity mean_var <- group_by(all_merged, Subject, Activity) %>% summarise_each(funs(mean))
b87101f2a7e258c35bfe56ef04b2034e77644255
d3a4319a66f8b86051c127c28d32619a256de156
/R/onload.R
ff5872961b2830ccd3fa0b52bf2cbdcfe9a19c60
[ "MIT" ]
permissive
SCAR/sohungry
11fbfbc14bb740754047ae5146f6c137f2ba913a
fd33d2c05fb7f2a8d51bb341ee30497f6098bab2
refs/heads/master
2023-04-22T07:14:44.234956
2023-04-03T23:21:38
2023-04-03T23:21:38
78,815,311
4
0
null
null
null
null
UTF-8
R
false
false
1,388
r
onload.R
.onLoad <- function(libname, pkgname) { ## populate the options slot this_options <- list( doi_file = "scar_diet_energetics_doi.txt", sources_table = "ecology.dbo.scar_references", sources_file = "scar_sources.csv", energetics_table = "ecology.dbo.scar_energetics", energetics_file = "scar_energetics.csv", isotopes_table = "ecology.dbo.scar_isotopes", ##isotopes_file = "scar_isotopes.csv", ## deprecated as of v0.9.0 isotopes_mv_file = "scar_isotopes_mv.csv", diet_table = "ecology.dbo.scar_diet", diet_file = "scar_diet.csv", dna_diet_table = "ecology.dbo.scar_dna_diet", dna_diet_file = "scar_dna_diet.csv", lipids_table = "ecology.dbo.scar_lipids", lipids_file = "scar_lipids.csv", zenodo_id = 5072527, ## this is the concept ID, which should always point to the most recent version zip_file = "SCAR_Diet_Energetics.zip", issue_text = "If the problem persists, please lodge an issue at https://github.com/SCAR/sohungry/issues", session_cache_dir = file.path(tempdir(), "sohungry-cache"), ## cache directory to use for cache_directory = "session" persistent_cache_dir = rappdirs::user_cache_dir("sohungry", "SCAR") ## and for cache_directory = "persistent" ) options(list(sohungry = this_options)) invisible() }
a73a202d42e7ea843f350e4972bfa9d3702eef5c
72721f21b5e6bd7802f88fefda30dc77cd602a64
/FDR_control/simulated_dames.R
bf2fa1936edf02fb9c13df32f4abdab4d1b402d9
[]
no_license
markrobinsonuzh/allele_specificity_paper
9effc5cad41159c7473743449968c7313a2369e6
4bb4cc7c7270f1a7a063e8a5482a0f9b84b89bc8
refs/heads/master
2021-03-19T13:12:32.965571
2020-02-14T12:24:46
2020-02-14T12:24:46
49,428,747
2
1
null
2020-02-14T12:24:47
2016-01-11T13:40:19
R
UTF-8
R
false
false
13,133
r
simulated_dames.R
#!/usr/bin/env Rscript # chmod +x # run as [R < scriptName.R --no-save] ######################################################################################### # Benchmark p-val assigment strategies with simulations # # TBS-seq data CRCs Vs Norm # # # Stephany Orjuela, May 2019 ######################################################################################### library(SummarizedExperiment) library(ggplot2) library(iCOBRA) library(tidyr) #### Set sim #### load("data/derASM_fullCancer.RData") derASM <- GenomeInfoDb::sortSeqlevels(derASM) #only necessary for old calc_derivedasm() derASM <- sort(derASM) #use only the sites completely covered by all samples filt <- rowSums(!is.na(assay(derASM, "der.ASM"))) >= 12 derASM <- derASM[filt,] #9073 x <- assay(derASM,"der.ASM") ##Get only norm samples prop.clust <- x[,7:12] original <- prop.clust means <- rowMeans(prop.clust) diffs <- apply(prop.clust, 1, function(w){mean(w[1:3]) - mean(w[4:6])}) var <- rowVars(prop.clust) dd <- as.data.frame(cbind(var, means, diffs)) head(dd) #MD plot MD1 <- ggplot(dd, aes(means, diffs)) + geom_point(alpha = 0.2) + theme_bw() #MV plot MV1 <- ggplot(dd, aes(means, var)) + geom_point(alpha = 0.2) + theme_bw() #### play with clust length given maxGap #### #20 clust <- bumphunter::clusterMaker(as.character(seqnames(derASM)), start(derASM), maxGap = 20) max20 <- data.frame(clusL = rle(clust)$length, maxGap = 20) #100 clust <- bumphunter::clusterMaker(as.character(seqnames(derASM)), start(derASM), maxGap = 100) maxcien <- data.frame(clusL = rle(clust)$length, maxGap = 100) #1000 clust <- bumphunter::clusterMaker(as.character(seqnames(derASM)), start(derASM), maxGap = 1000) maxmil <- data.frame(clusL = rle(clust)$length, maxGap = 1000) clustab <- rbind(max20,maxcien,maxmil) ggplot(clustab, aes(clusL)) + geom_histogram() + theme_bw() + labs(x= "Number of CpGs") + facet_grid(~maxGap) ggsave("curvesNscatters/sim_cluster_sizes.png") #### inverse sampling #### #inverse sampling with truncated beta set.seed(20) # params for very obvious regions alpha <- 1 beta <- 2.5 minb <- 0.35 # 0.15 too small for lmfit to consider it a difference maxb <- 0.75 pDiff <- 0.2 #this should affect the k choice cluster.ids <- unique(clust) #3229, 1038 diffClusts <- 1:floor(pDiff*length(cluster.ids)) #645 #plot runif and beta fullb <- qbeta(runif(length(diffClusts), minb, maxb), alpha, beta) p1 <- ggplot() + geom_histogram(aes(fullb), bins = 6) + theme_bw() + labs(x = "Effect sizes") un <- runif(length(diffClusts), minb, maxb) p2 <- ggplot() + geom_histogram(aes(un), bins = 7) + theme_bw() + labs(x = "Unif(0.35,0.75)") ran <- seq(0, 1, length = 100) db <- dbeta(ran, alpha,beta) d3 <- data.frame(p = ran, density = db) p3 <- ggplot(d3, aes(p,density)) + geom_line() + theme_bw() cowplot::plot_grid(p1,p2,p3, nrow = 3, ncol = 1, labels = c("A","B","C")) ggsave("curvesNscatters/beta_and_unif_hists.png", width = 6, height = 10) #get real coordinates to start from chr <- as.character(seqnames(derASM)) starts <- start(derASM) ends <- end(derASM) realregs <- data.frame(chr=sapply(cluster.ids,function(Index) chr[clust == Index][1]), start=sapply(cluster.ids,function(Index) min(starts[clust == Index])), end=sapply(cluster.ids, function(Index) max(ends[clust == Index])), clusL=sapply(cluster.ids, function(Index) length(clust[clust == Index]))) #create 50 more simulations to run the methods draw_sims <- function(numsims = 50, x, alpha, beta, minb, maxb, diffClusts, clust, #same params cluster.ids, chr, starts, ends, realregs, original, trend, methlmfit = "ls"){ #for find_dames, ggfile all_perf <- list() all_points <- list() for(j in 1:numsims){ print(j) prop.clust <- x[,7:12] d <- qbeta(runif(length(diffClusts), minb, maxb), alpha, beta) #hist(d) ### Simulation #### for(i in diffClusts){ #get CpGs per cluster that will have spike-in cpgs <- which(clust == cluster.ids[i]) #choose number of CpGs diff per regions, and from what position if(length(cpgs) > 1){ numdiff <- sample(1:length(cpgs), 1) maxpos <- length(cpgs) - numdiff + 1 posdiff <- sample(1:maxpos,1) cpgs <- cpgs[1:posdiff] #reset region start end ends realregs$start[i] <- min(starts[cpgs]) realregs$end[i] <- max(ends[cpgs]) realregs$clusL[i] <- length(cpgs) } #randomly choose which group is diff ran <- sample(c(1,2),1) if(ran == 1) {group <- 1:3} else {group <- 4:6} #get cluster ASMsnp mean (if more than one sample) if(length(cpgs) > 1){ DMRmean <- mean(rowMeans(prop.clust[cpgs,])) } else{ DMRmean <- mean(prop.clust[cpgs,]) } #sign is deterministic: #if the DMR mean (across samples and loci) is below #effect size 0.5, sign is positive if(DMRmean < 0.5) {sign <- 1} else {sign <- -1} #if any of the values goes outside of [0,1], keep the original prop (second) prop.clust[cpgs,group] <- original[cpgs,group] + (d[i] * sign) if(any(prop.clust[cpgs,group] < 0 | prop.clust[cpgs,group] > 1)){ w <- which(prop.clust[cpgs,group] < 0 | prop.clust[cpgs,group] > 1) prop.clust[cpgs,group][w] <- original[cpgs,group][w] } } #make real GRanges realregsGR <- GRanges(realregs$chr, IRanges(realregs$start, realregs$end), clusL = realregs$clusL, label = c(rep(1,length(diffClusts)), rep(0,(length(cluster.ids)-length(diffClusts))))) filt <- realregsGR$clusL != 1 realregsGR <- realregsGR[filt] #773 #table(realregsGR$label) #head(prop.clust) #head(original) #re-do plots with added effects # means <- rowMeans(prop.clust) # var <- rowVars(prop.clust) # diffs <- apply(prop.clust, 1, function(w){mean(w[1:3]) - mean(w[4:6])}) # dd <- as.data.frame(cbind(diffs, means,var)) # ggplot(dd, aes(means, diffs)) + geom_point(alpha = 0.2) + theme_bw() # ggplot(dd, aes(means, var)) + geom_point(alpha = 0.2) + theme_bw() #build a sumExp with new data fakeDerAsm <- derASM[,7:12] assay(fakeDerAsm, "der.ASM") <- prop.clust grp <- factor(c(rep("CRC",3),rep("NORM",3)), levels = c("NORM", "CRC")) mod <- model.matrix(~grp) #### Apply all methods #### #simes regs <- find_dames(fakeDerAsm, mod, maxGap = 100, trend = trend, method = methlmfit) regsGR <- GRanges(regs$chr, IRanges(regs$start, regs$end), clusterL = regs$clusterL, pval = regs$pvalSimes, FDR = regs$FDR) #empirical regs2 <- find_dames(fakeDerAsm, mod, maxGap = 100, pvalAssign = "empirical", Q = 0.2, trend = trend, method = methlmfit) regs1GR <- GRanges(regs2$chr, IRanges(regs2$start, regs2$end), segmentL = regs2$segmentL, clusterL = regs2$clusterL, pval = regs2$pvalEmp, FDR = regs2$FDR) regs2 <- find_dames(fakeDerAsm, mod, maxGap = 100, pvalAssign = "empirical", Q = 0.5, trend = trend, method = methlmfit) regs2GR <- GRanges(regs2$chr, IRanges(regs2$start, regs2$end), segmentL = regs2$segmentL, clusterL = regs2$clusterL, pval = regs2$pvalEmp, FDR = regs2$FDR) regs2 <- find_dames(fakeDerAsm, mod, maxGap = 100, pvalAssign = "empirical", Q = 0.8, trend = trend, method = methlmfit) regs3GR <- GRanges(regs2$chr, IRanges(regs2$start, regs2$end), segmentL = regs2$segmentL, clusterL = regs2$clusterL, pval = regs2$pvalEmp, FDR = regs2$FDR) #### build tables with pval methods #### pvalmat <- data.frame(matrix(1, nrow = length(realregsGR), ncol = 4)) fdrmat <- data.frame(matrix(1, nrow = length(realregsGR), ncol = 4)) colnames(pvalmat) <- colnames(fdrmat) <- c("simes", "perms_02", "perms_05", "perms_08") #simes over <- findOverlaps(realregsGR, regsGR, type = "within") pvalmat$simes[queryHits(over)] <- mcols(regsGR)$pval[subjectHits(over)] fdrmat$simes[queryHits(over)] <- mcols(regsGR)$FDR[subjectHits(over)] #perms.0.2 over <- findOverlaps(realregsGR, regs1GR, type = "within") pvalmat$perms_02[queryHits(over)] <- mcols(regs1GR)$pval[subjectHits(over)] fdrmat$perms_02[queryHits(over)] <- mcols(regs1GR)$FDR[subjectHits(over)] #perms.0.5 over <- findOverlaps(realregsGR, regs2GR, type = "within") pvalmat$perms_05[queryHits(over)] <- mcols(regs2GR)$pval[subjectHits(over)] fdrmat$perms_05[queryHits(over)] <- mcols(regs2GR)$FDR[subjectHits(over)] #perm.0.8 over <- findOverlaps(realregsGR, regs3GR, type = "within") pvalmat$perms_08[queryHits(over)] <- mcols(regs3GR)$pval[subjectHits(over)] fdrmat$perms_08[queryHits(over)] <- mcols(regs3GR)$FDR[subjectHits(over)] #### plot powerFDR #### #generate truth + facet table truth <- as.data.frame(mcols(realregsGR)) #change clusL to num.CpGs #run iCOBRa cobradat <- COBRAData(pval = pvalmat, padj = fdrmat, truth = truth) #single plot cobraperf <- calculate_performance(cobradat, binary_truth = "label", cont_truth = "label", aspects = c("fdrtpr","fdrtprcurve"), thrs = c(0.01, 0.05, 0.1)) all_perf[[j]] <- cobraperf@fdrtprcurve all_points[[j]] <- cobraperf@fdrtpr } #### set up to plot all sims #### #lines tpr <- lapply(all_perf, function(x){x$TPR}) allperftab <- data.frame(sim = rep(1:numsims, lengths(tpr)), FDR = unlist(lapply(all_perf, function(x){x$FDR})), TPR = unlist(tpr), method = unlist(lapply(all_perf, function(x){x$method}))) allperftab <- unite(allperftab, unique_id, c(sim, method), sep="_", remove = FALSE) #points tpr <- lapply(all_points, function(x){x$TPR}) allpointtab <- data.frame(sim = rep(1:numsims, lengths(tpr)), FDR = unlist(lapply(all_points, function(x){x$FDR})), TPR = unlist(tpr), method = unlist(lapply(all_points, function(x){x$method})), thr = unlist(lapply(all_points, function(x){x$thr})), satis = unlist(lapply(all_points, function(x){x$satis}))) summpoints <- allpointtab %>% dplyr::group_by(method, thr) %>% dplyr::summarise(meanTPR=mean(TPR), meanFDR = mean(FDR)) %>% as.data.frame() summpoints$thr <- as.numeric(gsub("thr","",summpoints$thr)) summpoints$satis <- ifelse(summpoints$meanFDR <= summpoints$thr,16,21) myColor <- RColorBrewer::brewer.pal(8, "Set1") gplot <- ggplot(allperftab) + geom_line(aes(FDR, TPR, color=method, group=unique_id), alpha = 0.11) + scale_x_continuous(trans='sqrt', breaks = c(0.01,0.05,0.10,0.5)) + scale_color_manual(values = myColor) + labs(color = "Method") + geom_vline(xintercept = c(0.01,0.05,0.1), linetype = 2) + geom_line(data = summpoints, aes(x = meanFDR, y = meanTPR,color=method), size = 1) + geom_point(data = summpoints, aes(x = meanFDR,y = meanTPR,color=method, shape = satis), size = 5, fill = "white") + scale_shape_identity() + theme_bw() return(gplot) } #figure 3 pdiff02 <- draw_sims(numsims = 50, x, alpha, beta, minb, maxb, diffClusts, clust, cluster.ids, chr, starts, ends, realregs, original, FALSE) ggplot2::ggsave("curvesNscatters/powerFDR_pdiff02.png", pdiff02, width = 6, height = 5) #supp fig 1 #figure 3 pdiff05 <- draw_sims(numsims = 50, x, alpha, beta, minb, maxb, diffClusts, clust, cluster.ids, chr, starts, ends, realregs, original, FALSE) #pdiff 0.2, len 20 (change above clust) len20 <- draw_sims(numsims = 50, x, alpha, beta, minb, maxb, diffClusts, clust, cluster.ids, chr, starts, ends, realregs, original, TRUE) #pdiff 0.2, len 1000 (change above clust) len1000 <- draw_sims(numsims = 50, x, alpha, beta, minb, maxb, diffClusts, clust, cluster.ids, chr, starts, ends, realregs, original, FALSE) #pdiff 0.2, len 100, trend true trendtrue <- draw_sims(numsims = 50, x, alpha, beta, minb, maxb, diffClusts, clust, cluster.ids, chr, starts, ends, realregs, original, TRUE) len20 <- len20 + theme(legend.position = "none") len1000 <- len1000 + theme(legend.position = "none") trendtrue <- trendtrue + theme(legend.position = "none") pdiff05 <- pdiff05 + theme(legend.position = "none") legend <- cowplot::get_legend(trendtrue) m4 <- cowplot::plot_grid(len20, len1000, legend, trendtrue, pdiff05, ncol=3, nrow = 2, labels = c("A", "B", "","C", "D"), rel_widths = c(1, 1, 0.3)) ggplot2::ggsave("curvesNscatters/powerFDR_otherparams.png", m4, width = 8, height = 7)
b62341d9eaeace251dfe76fef1142c5f51055895
b2d46260f641db68780b0899f41661cb52413b43
/survival_gene_list_tcga.R
3b6b9202489ae9a4d462ea63e2949ec4b91a3d3d
[]
no_license
bio-liucheng/brca-singlecell
53dd13a82f8fba4411edcc247d81227a9883cdc4
98d63348e6daad001f8d0b9f3aea7ae5d483834e
refs/heads/main
2023-04-12T06:21:37.666495
2021-12-14T11:24:55
2021-12-14T11:24:55
423,029,266
1
0
null
null
null
null
WINDOWS-1252
R
false
false
6,196
r
survival_gene_list_tcga.R
library(survival) library(survminer) library(survMisc) gene_list = c("GPR157") setwd("G:/scRNA-seq/LC/TCGA/BRCA") options(stringsAsFactors = F) clin <- read.delim("BRCA_clinicalMatrix") expr <- read.delim("HiSeqV2.gz") surv <- read.delim("BRCA_survival.txt.gz") rownames(expr) <- expr[,1] expr <- expr[,-1] colnames(expr) <- gsub(".", '-', colnames(expr), fixed = T) rownames(clin) <- clin$sampleID #¼ÆËãtumor vs normal ²îÒì clin <- clin[colnames(expr),] tumor_flag <- clin$sample_type == "Primary Tumor" normol_flag <- clin$sample_type == "Solid Tissue Normal" ER_positive <- clin$ER_Status_nature2012 == "Positive" #caculate survival HR clin <- clin[tumor_flag,] expr <- expr[,tumor_flag] rownames(surv) <- surv$sample surv <- surv[,-1] surv <- surv[rownames(clin),] clin <- cbind(clin, surv) #filter clin clin2 <- clin[!grepl("Her2|Lum", clin$PAM50Call_RNAseq),] expr2 <- expr[,rownames(clin2)] data <- as.matrix(expr) surv_genelist <- function(data, clin_s, gene_list){ inter_gene <- intersect(rownames(data), gene_list[1:20]) expr_gene <- data[inter_gene,] expr_gene <- apply(expr_gene, 1, scale) rownames(expr_gene) <- colnames(data) expr_gene <- t(expr_gene) expr_gene <- apply(expr_gene, 2, sum) flag <- ifelse(expr_gene >median(expr_gene, na.rm = T), "high", "low") flag2 <- expr_gene < quantile(expr_gene, probs = 0.4) flag3 <- expr_gene > quantile(expr_gene, probs = 0.6) fit <- coxph(Surv(OS.time, OS) ~ flag ,subset = flag2 | flag3, data = clin_s) sum_fit <- summary(fit) fit2 <- survfit(Surv(OS.time, OS) ~ flag, subset = flag2 | flag3, data = clin_s) ggsurvplot(fit2, data = clin_s, pval = TRUE, ) sum_fit } gene_list #calculate survival with gene_list average expression gene surv_genelist <- function(data, clin, clin_type, sub_type, gene_list){ if(is.null(clin_type)){ sub_expr <- data sub_clin <- clin }else{ flag_sub <- clin[,clin_type] %in% sub_type sub_clin <- clin[flag_sub,] sub_expr <- data[,flag_sub] } if(length(gene_list) >1){ inter_gene <- intersect(rownames(sub_expr), gene_list) expr_gene <- sub_expr[inter_gene,] expr_gene <- apply(expr_gene, 2, mean) }else{ expr_gene <- sub_expr[gene_list,] expr_gene <- as.numeric(expr_gene) } flag <- ifelse(expr_gene >median(expr_gene, na.rm = T), "high", "low") flag <- factor(flag, levels = c("high", "low")) flag_low <- expr_gene < quantile(expr_gene, probs = 0.45, na.rm = T) flag_high <- expr_gene > quantile(expr_gene, probs = 0.55, na.rm = T) sub_clin$flag <- flag sub_clin$flag_low <- flag_low sub_clin$flag_high <- flag_high # fit <- coxph(Surv(time = OS.time, event = OS) ~ flag, subset = flag_low |flag_high, data = sub_clin) fit2 <- survfit(Surv(time = OS.time, event = OS) ~ flag, subset = flag_low |flag_high, data = sub_clin) ggsurvplot(fit2, data = sub_clin, pval = TRUE) # sum_fit <- summary(fit) } gene_list_a <- gene_list$gene gene_list_b <- c("LAMP3", "C1DC", "CLEC9A") gene_a <- apply(data[gene_list_a,], 1, scale) gene_a <- t(gene_a) gene_a <- apply(gene_a, 2, mean) gene_a <- apply(data[intersect(gene_list_a, rownames(data)),], 2, mean) gene_b <- apply(data[gene_list_b,], 2, mean) gene_a <- as.numeric(data["LAMP3",]) gene_b <- as.numeric(data["LAMP3",]) relative_gene <- scale(gene_a) / gene_b relative_gene <- gene_b / gene_a relative_gene <- gene_a flag <- ifelse(relative_gene >median(relative_gene, na.rm = T), "high", "low") flag <- factor(flag, levels = c("high", "low")) flag_low <- relative_gene < quantile(relative_gene, probs = 0.45, na.rm = T) flag_high <- relative_gene > quantile(relative_gene, probs = 0.55, na.rm = T) clin$flag <- flag clin$flag_low <- flag_low clin$flag_high <- flag_high fit2 <- survfit(Surv(time = OS.time, event = OS) ~ flag, subset = flag_low |flag_high, data = clin) ggsurvplot(fit2, data = clin, pval = TRUE) if(T){ #get a gene list gene_cluster <- "CAF_C3_PLA2G2A" gene_list <- cluster_significant_markers[cluster_significant_markers$cluster == gene_cluster,]$gene if(!is.null(intersect(gene_list, rownames(expr)))) fit <- surv_genelist(expr, clin, "ER_Status_nature2012", "Positive", gene_list) } type <- unique(clin$ER_Status_nature2012) type <- type[-c(1,4)] library(dplyr) #calculate survival for(j in 1:length(type)){ type_s <- type[j] cluster <- unique(final_marker_s$cluster) for(i in 1:length(cluster)){ gene_list <- final_marker_s$gene[final_marker_s$cluster == cluster[i]] f <- surv_genelist(expr, clin, NULL, type_s, gene_list) f_table <-data.frame(coef = f[["conf.int"]][1], low_95 = f[["conf.int"]][3], high_95 = f[["conf.int"]][4], pvalue = f[["coefficients"]][5]) if(i == 1) f_table_final <- f_table else f_table_final <- rbind(f_table_final, f_table) } f_table_final$cluster <- cluster f_table_final$patient_type <- type_s if(j ==1) f_table_final_final <- f_table_final else f_table_final_final <- rbind(f_table_final_final, f_table_final) } #calculate survival with two gene (high low) expression gene surv_TwoGene <- function(data, clin_s, two_gene){ inter_gene <- intersect(rownames(data), two_gene) expr_gene <- data[inter_gene,] expr_gene <- apply(expr_gene, 1, scale) rownames(expr_gene) <- colnames(data) gene1_hgih <- ifelse(expr_gene[,1] > median(expr_gene[,1]),paste0(two_gene[1], "_high"),paste0(two_gene[1], "low")) gene2_high <- ifelse(expr_gene[,2] > median(expr_gene[,2]),paste0(two_gene[2], "_high"),paste0(two_gene[2], "low")) s <- paste(gene1_hgih, gene2_high, sep = '_') s <- factor(s, levels = unique(s)[c(2,1,3,4)]) clin_s$s <- s fit <- coxph(Surv(OS.time, OS) ~ s, data = clin_s) sum_fit <- summary(fit) fit2 <- survfit(Surv(OS.time, OS) ~ s, data = clin_s) ggsurvplot(fit2, data = clin_s, pval = TRUE) } surv_genelist(expr, clin, gene_list) unique(clin$PAM50Call_RNAseq)
cb77c1de7f55a5cccccd59e541872cf5989d100d
2a7e77565c33e6b5d92ce6702b4a5fd96f80d7d0
/fuzzedpackages/snipEM/R/sclust.R
5e322f8dcfcd1d390596939589e055b1e947216d
[]
no_license
akhikolla/testpackages
62ccaeed866e2194652b65e7360987b3b20df7e7
01259c3543febc89955ea5b79f3a08d3afe57e95
refs/heads/master
2023-02-18T03:50:28.288006
2021-01-18T13:23:32
2021-01-18T13:23:32
329,981,898
7
1
null
null
null
null
UTF-8
R
false
false
3,906
r
sclust.R
.eigenConst <- function(Sev, p, lambda=12){ moptv <- 999999 if( Sev[p] > 0 & Sev[1]/Sev[p] < lambda){ moptv <- sum(log(Sev) + 1) return(Sev) } else{ for(i in 1:p){ cm <- Sev[i] if( cm > 0){ m <- cm/lambda Sevm <- pmin( pmax( Sev, m ), cm) moptv_tmp <- sum(log(Sevm) + Sev/Sevm) if( moptv_tmp < moptv ){ moptv <- moptv_tmp mopt <- m } } m <- Sev[p-i+1] if( m > 0){ cm <- m*lambda Sevm <- pmin( pmax( Sev, m ), cm) moptv_tmp <- sum(log(Sevm) + Sev/Sevm) if( moptv_tmp < moptv ){ moptv <- moptv_tmp mopt <- m } } } Sevm <- pmin( pmax( Sev, mopt ), mopt*lambda) return( Sevm ) } } sclust<- function(X,k,V,R,restr.fact=12,tol=1e-4,maxiters=100,maxiters.S=1000, print.it=FALSE) { if( missing(X) ) stop("'X' missing") if( missing(k) ) stop("'k' missing") if( missing(V) ) stop("'V' missing") if( missing(R) ) stop("'R' missing") if(is.data.frame(X) | is.matrix(X)) X <- data.matrix(X) else stop("Data matrix must be of class matrix or data.frame") n <- nrow(X) p <- ncol(X) if(is.data.frame(V) | is.matrix(V)) V <- data.matrix(V) if( any(dim(V) != dim(X)) ) stop("'X' and 'V' have non-conforming size") epsilon=sum(V==0)/(n*p) if( length(unique(R)) != k) stop("Number of cluster labels must be 'k'") ## init ## m=matrix(NA,k,p) Sigma=array(0,c(k,p,p)) D=matrix(NA,n,p) Dd=Ddtmp=matrix(NA,n,k) autovalues=matrix(NA,p,k) U=Sigma CSmat=array(NA,c(k,n,p)) det=rep(NA,k) ## init values ## pi=prop.table(table(R[R!=0])) Xt=X Xt[V==0]=NA for(j in 1:k) { m[j,]=apply(Xt[R==j,],2,mean,na.rm=T) Sigma[j,,]=var(Xt[R==j,],na.rm=T) s=eigen(var(Xt[R==j,],na.rm=T)) U[j,,]=s$vectors autovalues[,j]=s$values } autovalues <- matrix(.eigenConst(as.vector( autovalues), p, restr.fact),ncol=k) for(j in 1:k) { Sigma[j,,]=U[j,,]%*%diag(autovalues[,j])%*%t(U[j,,]) det[j]=prod(autovalues[,j]) } for(j in 1:k) { Dd[,j]=log(pi[j])+ldmvnorm(Xt,m[j,],Sigma[j,,]) } Dd=exp(Dd-apply(Dd,1,sumlog)) lik=0 for(j in 1:k) { lik=lik+sum(Dd[,j]*(log(pi[j])+ldmvnorm(Xt,m[j,],Sigma[j,,])))} likold=lik-2*tol ii=0 while(lik-likold>tol & ii < maxiters) { ii=ii+1 ## CES step iter=0 flag=FALSE while(iter < maxiters.S & flag==FALSE) { iter=iter+1 s1=sample(which(V==1),1) s2=sample(which(V==0),1) Vc=V Vc[s1]=0 Vc[s2]=1 Xt=X Xt[Vc==0]=NA likcand=0 for(j in 1:k) { likcand=likcand+sum(Dd[,j]*(log(pi[j])+ldmvnorm(Xt,m[j,],Sigma[j,,])))} if(likcand>lik) { V=Vc flag=TRUE } } for(j in 1:k) { Dd[,j]=log(pi[j])+ldmvnorm(Xt,m[j,],Sigma[j,,]) } R=apply(Dd,1,which.max) R[which(apply(V==0,1,all))]=0 Dd[which(apply(V==0,1,all)),]=0 ## M step pi=apply(Dd[which(apply(V!=0,1,any)),],2,sumlog) pi=exp(pi-sumlog(pi)) Dd=exp(Dd-apply(Dd,1,sumlog)) Xt <- X Xt[V==0] <- NA for(j in 1:k) { XtDd <- sweep(Xt, 1, Dd[,j], "*") VDd <- sweep(V, 1, Dd[,j], "*") m[j,] <- colSums(XtDd, na.rm=T) m[j,] <- m[j,]/colSums(VDd, na.rm=T) Stmp <- matrix(NA, p,p) for(h in 1:(p-1)) { for(l in (h+1):p) { Stmp[h,l] <- sum(Dd[,j]*(Xt[,h]-m[j,h])*(Xt[,l]-m[j,l]),na.rm=T) Stmp[h,l] <- Stmp[h,l]/sum(Dd[,j]*V[,h]*V[,l]) Stmp[l,h] <- Stmp[h,l] } } for(h in 1:p) Stmp[h,h] <- sum(Dd[,j]*(Xt[,h]-m[j,h])^2,na.rm=T)/sum(Dd[,j]*V[,h]) s=eigen(Stmp) U[j,,]=s$vectors autovalues[,j]=s$values } autovalues <- matrix(.eigenConst(as.vector( autovalues), p, restr.fact),ncol=k) for(j in 1:k) { Sigma[j,,]=U[j,,]%*%diag(autovalues[,j])%*%t(U[j,,]) det[j]=prod(autovalues[,j]) } likold=lik lik=0 for(j in 1:k) { lik=lik+sum(Dd[,j]*(log(pi[j])+ldmvnorm(Xt,m[j,],Sigma[j,,])))} if( print.it )cat("iter", ii, "; current lik:", lik, "; change in lik:",lik-likold, "\n") } return(list(R=R,pi=pi,mu=m,S=Sigma,V=V,lik=lik,iter=ii))}
4d2b36c3695aec153a87f78e677ba58f09869d22
9969b02c26fa5388ac971b8212c761c6abf98efb
/inst/helperCode/find_gaps.r
39c21f8ea9cbe3a1b1f644ecd6cba6752e6f000c
[]
no_license
tmcd82070/CAMP_RST
0cccd7d20c8c72d45fca31833c78cd2829afc169
eca3e894c19936edb26575aca125e795ab21d99f
refs/heads/master
2022-05-10T13:33:20.464702
2022-04-05T21:05:35
2022-04-05T21:05:35
10,950,738
0
0
null
2017-05-19T20:42:56
2013-06-25T21:24:52
R
UTF-8
R
false
false
7,672
r
find_gaps.r
find_gaps <- function( river, site, taxon, min.date, max.date ){ # river <- "American River" # site <- 57000 # taxon <- 161980 # min.date <- '1980-01-01' # max.date <- '2016-03-02' if(river == ''){ db.file <- db.file1 } else if(river == 'Sacramento River'){ db.file <- db.file2 } else if(river == 'American River'){ db.file <- db.file3 } else if(river == ''){ db.file <- db.file4 } else if(river == 'Feather River'){ db.file <- db.file5 } else if(river == 'Stanislaus River'){ db.file <- db.file6 } else if(river == 'Old American Test'){ db.file <- db.file7 } else if(river == 'Mokelumne River'){ db.file <- db.file8 # } else if(river == "Knight's Landing"){ # db.file <- db.file9 } else if(river == "Knight's Landing"){ db.file <- db.fileA } db.file <<- db.file cat(paste0(db.file,"\n")) # Check that times are less than 1 year apart strt.dt <- as.POSIXct( min.date, format="%Y-%m-%d" ) end.dt <- as.POSIXct( max.date, format="%Y-%m-%d" ) run.season <- data.frame( start=strt.dt, end=end.dt ) nvisits <- F.buildReportCriteria( site, min.date, max.date ) if( nvisits == 0 ){ warning("Your criteria returned no trapVisit table records.") return() } db <- get( "db.file", env=.GlobalEnv ) ch <- odbcConnectAccess(db) F.run.sqlFile( ch, "QrySamplePeriod.sql", R.TAXON=taxon ) # This SQL file develops the hours fished and TempSamplingSummary table F.run.sqlFile( ch, "QryNotFishing.sql" ) # This SQL generates times when the traps were not fishing F.run.sqlFile( ch, "QryUnmarkedByRunLifestage.sql", R.TAXON=taxon ) # This SQL generates unmarked fish by run and life stage catch <- sqlFetch( ch, "TempSumUnmarkedByTrap_Run_Final" ) # Now, fetch the result F.sql.error.check(catch) close(ch) if(nrow(catch) == 0){ warning("Your criteria returned no catch records. Check to make sure valid Fishing occurred within your date range.") stop } catch$river <- river catch$site <- site catch } ame57000 <- find_gaps("American River" , 57000, 161980, '1980-01-01', '2016-03-02') fea3000 <- find_gaps("Feather River" , 3000, 161980, '1980-01-01', '2016-03-02') fea52000 <- find_gaps("Feather River" , 52000, 161980, '1980-01-01', '2016-03-02') fea5000 <- find_gaps("Feather River" , 5000, 161980, '1980-01-01', '2016-03-02') fea4000 <- find_gaps("Feather River" , 4000, 161980, '1980-01-01', '2016-03-02') fea2000 <- find_gaps("Feather River" , 2000, 161980, '1980-01-01', '2016-03-02') fea6000 <- find_gaps("Feather River" , 6000, 161980, '1980-01-01', '2016-03-02') sac42000 <- find_gaps("Sacramento River", 42000, 161980, '1980-01-01', '2016-03-02') sta1000 <- find_gaps("Stanislaus River", 1000, 161980, '1980-01-01', '2016-03-02') mok34000 <- find_gaps("Mokelumne River" , 34000, 161980, '1980-01-01', '2016-03-02') kni63000 <- find_gaps("Knight's Landing", 63000, 161980, '1980-01-01', '2016-03-02') gaps <- rbind(ame57000,fea3000,fea52000,fea5000,fea4000,fea2000,fea6000,sac42000,sta1000,mok34000,kni63000) table(gaps$TrapStatus) gapsT <- gaps[gaps$TrapStatus == "Not fishing",] gapsT$SampleHours <- gapsT$SampleMinutes / 60 gapsT$SampleDays <- gapsT$SampleMinutes / 60 / 24 gapsT <- gapsT[,c('trapPositionID','TrapPosition','SampleDate','StartTime','EndTime','SampleMinutes','SampleHours','SampleDays','siteID','siteName','river')] gapsT <- gapsT[order(gapsT$SampleDays, decreasing=TRUE),] gapsT365 <- gapsT[gapsT$SampleDays <= 365,] gapsT365 <- gapsT365[order(gapsT365$river,gapsT365$siteID,gapsT365$trapPositionID,gapsT365$SampleDate,gapsT365$SampleMinutes),] traps <- unique(gapsT365$trapPositionID)#[c(1:6)] png("C:/Users/jmitchell/Desktop/theGaps_DO_NOT_PRINT.png",units="in",width=24,height=120,res=300) par(mfrow=c(length(traps),6)) for(i in 1:length(traps)){ trap <- traps[i] # -------- set it up ------------ df <- gapsT365[gapsT365$trapPositionID == trap,] the95 <- quantile(df$SampleMinutes,c(0.95)) river <- df[1,]$river siteName <- df[1,]$siteName TrapPosition <- df[1,]$TrapPosition dist1 <- df$SampleMinutes dist2 <- ecdf(df$SampleDays) if(nrow(df) >= 10){ dist3 <- df[df$trapPositionID == trap & df$SampleMinutes <= the95,]$SampleMinutes dist3b <- df[df$trapPositionID == trap & df$SampleMinutes <= 15840,]$SampleMinutes dist4 <- ecdf(df[df$trapPositionID == trap & df$SampleMinutes <= the95,]$SampleDays) } else { dist4 <- dist3 <- "Insufficient Data" } # -------- make the plot -------- plot(1,1,type = "n",frame.plot = FALSE,axes = FALSE,xlab="",ylab=""); u <- par("usr") # zero out margins, make empty plot, get bounding box text(1,u[3] + 1.1*(u[4]-u[3])/2,river,cex=1) text(1,u[3] + 1.0*(u[4]-u[3])/2,siteName,cex=1) text(1,u[3] + 0.9*(u[4]-u[3])/2,TrapPosition,cex=1) box() h1 <- hist(dist1,main="Histogram -- All Data",xlab="Minutes",xaxt="n") axis(1,at=h1$breaks,formatC(h1$breaks, digits = 0, format = "f",big.mark=",") ) plot(dist2, xaxt="n",verticals = TRUE, main="EDF",col.points = "blue",col.hor = "red", col.vert = "bisque",xlab="Days") axis(1,at=pretty(seq(0,max(df$SampleDays)),length.out=10),formatC(pretty(seq(0,max(df$SampleDays)),length.out=10), digits = 0, format = "f",big.mark=",") ) if(class(dist3)[1] == "character" | class(dist4)[1] == "character"){ plot(1,1,type = "n",frame.plot = FALSE,axes = FALSE,xlab="",ylab=""); u <- par("usr") # zero out margins, make empty plot, get bounding box text(1,u[3] + 1.1*(u[4]-u[3])/2,paste0("Only N=",nrow(df)," points."),cex=1) box() plot(1,1,type = "n",frame.plot = FALSE,axes = FALSE,xlab="",ylab=""); u <- par("usr") # zero out margins, make empty plot, get bounding box text(1,u[3] + 1.1*(u[4]-u[3])/2,paste0("Only N=",nrow(df)," points."),cex=1) box() plot(1,1,type = "n",frame.plot = FALSE,axes = FALSE,xlab="",ylab=""); u <- par("usr") # zero out margins, make empty plot, get bounding box text(1,u[3] + 1.1*(u[4]-u[3])/2,paste0("Only N=",nrow(df)," points."),cex=1) box() } else { plot(1,1,type = "n",frame.plot = FALSE,axes = FALSE,xlab="",ylab="") par(new=TRUE) h <- hist(dist3b,breaks=seq(0,15840,length.out=11*4+1),col="lightgray",xaxt="n",main="Histogram -- 0 to 15,840, with Bin Size = 360 Mins",xlab="Minutes") axis(side=1, at=seq(0,15840,length.out=11*2+1), labels=seq(0,15840,length.out=11*2+1)) par(new=TRUE) for(i in 1:11){ drawEm <- seq(0,15840,length.out=11+1) #abline(v=drawEm[i],col="blue") segments(drawEm[i],0,drawEm[i],max(h$counts),col="blue",lwd=2) text(drawEm[i]+400,max(h$counts),drawEm[i]/1440 + 1,cex=1.5,pos=4) } hist(dist3,breaks=length(dist3)/5,main="Histogram -- 0 to 95th Percentile",xlab="Minutes") plot(dist4, xaxt="n",verticals = TRUE, main="EDF -- 0 to 95th Percentile",col.points = "blue",col.hor = "red", col.vert = "bisque",xlab="Days") axis(1,at=pretty(seq(0,max(df[df$trapPositionID == trap & df$SampleDays <= the95/60/24,]$SampleDays)),length.out=10),formatC(pretty(seq(0,max(df[df$trapPositionID == trap & df$SampleDays <= the95/60/24,]$SampleDays)),length.out=10), digits = 0, format = "f",big.mark=",") ) } } dev.off() par(mfrow=c(1,1))
76a87887d72473969097e79efdb7f4dc761dd6e0
77dc1bb37706ca78aec3efa42b0e4e39c9aab257
/R/RcppExports.R
b13a9348a40ed2113e915ddc59f6bbec8d44cba4
[]
no_license
yjzeng017/StatComp20088
82ee555adda1140cca32667ad7c57b464f59d9aa
15219b56be63e117ff21cfd6fe68304b3259f05b
refs/heads/master
2023-02-02T16:25:54.334536
2020-12-20T12:33:42
2020-12-20T12:33:42
323,021,375
0
0
null
null
null
null
UTF-8
R
false
false
673
r
RcppExports.R
# Generated by using Rcpp::compileAttributes() -> do not edit by hand # Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393 #' @title Random walk #' @description Random walk with Metropolis sampling method using Rcpp #' @param sigma variance #' @param x0 initial value #' @param N sample size #' @return a random sample of size N #' @export NULL #' @title Laplace density #' @description Laplace density #' @param x real value #' @return the density at x #' @export denLa <- function(x) { .Call('_StatComp20088_denLa', PACKAGE = 'StatComp20088', x) } RWcpp <- function(sigma, x0, N) { .Call('_StatComp20088_RWcpp', PACKAGE = 'StatComp20088', sigma, x0, N) }
2ecd71938696966f9a496b4e7883e36e8a29372b
323d3dcc710c658eeffd9e95878530f720efad42
/2-Regression/1. Simple Linear Regression/SimpleLinearRegression.R
1273ef260b0b89bab343ced44f33dc91521bda70
[]
no_license
ahorsager/MLFoundations
35eb98bbf86c57d4606e6760e8ec6f10d192d85e
9a6b5848202ebb21f307f38e518ec9e360f63c7a
refs/heads/master
2020-04-11T01:11:30.260798
2019-01-15T01:31:55
2019-01-15T01:31:55
161,408,250
0
0
null
null
null
null
UTF-8
R
false
false
1,549
r
SimpleLinearRegression.R
# SimpleLinearRegression.R # A template for a simple linear regression model # Author: Alan Horsager # Created: 12-DEC-2018 # **DATA PREPROCESSING** # Importing the data set setwd("/Users/horsager/Dropbox/projects/analytics/MLTraining/2-Regression/Simple Linear Regression") DataSet = read.csv('SalaryData.csv') # **SPLIT TRAINING & TEST DATA** set.seed(123) Split = sample.split(DataSet$Salary, SplitRatio = 2/3) TrainingSet = subset(DataSet, Split == TRUE) TestSet = subset(DataSet, Split == FALSE) # **MODEL** # Fitting linear regression to the data set regressor = lm(formula = Salary ~ YearsExperience, data = TrainingSet) summary(regressor) # Summary of lm fit results # Predict test set results TestPredict = predict(regressor, newdata = TestSet) # **DATA VISUALIZATION** # Plot regression fit of training set ggplot() + geom_point(aes(x = TrainingSet$YearsExperience, y = TrainingSet$Salary), color = 'red') + geom_line(aes(x = TrainingSet$YearsExperience, y = predict(regressor, newdata = TrainingSet)), color = 'blue') + ggtitle('Salary vs. Experience (Training Set)') + xlab('Years of Experience') + ylab('Salary') # Plot regression of test set ggplot() + geom_point(aes(x = TestSet$YearsExperience, y = TestSet$Salary), color = 'red') + geom_line(aes(x = TrainingSet$YearsExperience, y = predict(regressor, newdata = TrainingSet)), color = 'blue') + ggtitle('Salary vs. Experience (Test Set)') + xlab('Years of Experience') + ylab('Salary')
a59325be06c7b1a48d1bec440125968585d62e43
d2ca86d0aa2e84b14b0d455ded547df90b1a7bc1
/plot1.R
fea142390474772b9ccda2b58659320db590f66c
[]
no_license
adoroszlai/ExData_Plotting1
e5622bf069b8aa2e138a362a596e0377c34ce3ef
ac1e9481a8fcb62ffa02d5a99210b4816c5f3777
refs/heads/master
2021-01-18T11:08:33.306316
2014-06-03T17:40:32
2014-06-03T17:40:32
null
0
0
null
null
null
null
UTF-8
R
false
false
201
r
plot1.R
source('clean_data.R') # plot 1 png('plot1.png', width = 480, height = 480) hist(df$global_active_power, main = "Global Active Power", xlab = "Global Active Power (kilowatts)", col = "red") dev.off()
08d80b9ed21597dd15f87c3e38be2ee33b8d3fa0
6b95e88fd11aff60e778c90ef75e75383a965c0c
/Q1.R
1ff42b01eda80588f0c332c416209856ddeefaa6
[]
no_license
bishal839/AP_LAB8
2de69d3c7ef804a222793ddfe652ee4d447b889b
e8a87d00e36853e5c4119885d24ab018c6d56f8c
refs/heads/master
2020-05-03T08:59:32.315312
2019-03-30T10:29:43
2019-03-30T10:29:43
178,541,736
0
0
null
null
null
null
UTF-8
R
false
false
638
r
Q1.R
x=read.csv("student.csv") print(paste("\nquestion 1\n")) y<-max(x$Percent) print(subset(x,(Percent==y))) print(paste("\nquestion 2\n")) z<-subset(x,(Branch=="cse")) print(subset(z,(Percent>=80))) print(paste("\nquestion 3\n")) print(subset(x,as.Date(DOA)>(as.Date("2016/07/01")))) print(paste("Question 4")) write.csv(x,"student1.csv") cat(11,160,"Sohail Alam","CSE",82,"2017/05/23",file = "student1.csv",sep = ",",append = TRUE) cat(file="student1.csv",sep = "\n",append = TRUE) cat(12,327,"Md. Hamid Reza","CSE",96,"2018/05/12",file = "student1.csv",sep = ",",append = TRUE) y=read.csv("student1.csv") print(y)
672f2b3b7cef92d57004aea0bbcb769786c37aa9
d13a597f0dca27d35d63991e887a19dc3e5354c4
/R/packages.R
363c6ae1f9b543d93a1ba98bd910eafbd162a083
[ "Apache-2.0" ]
permissive
NewGraphEnvironment/backupr
9de69ffc286450a33fc807ac74673aa391fab54b
0230245296bc59557f307f56eac910e22d789a2c
refs/heads/main
2023-03-30T13:07:19.789039
2021-03-28T22:47:23
2021-03-28T22:47:23
346,763,301
0
0
null
null
null
null
UTF-8
R
false
false
85
r
packages.R
pacman::p_load( tidyverse, RPostgres, RPostgreSQL, DBI, sf, data.table )
f0e397809ee482545fe27f95e8dacf202e5aa2ea
3f47892735e42d094e31341f6b306424cf9d36d2
/R/feed.extract.R
93c554c6bdbffb2a85c259af48dabd79b0e43de7
[]
no_license
lovetoken/feed
f562c900a760a44344f25bb23d3baf138d64a7e5
52dc2f9f577ae26756c579c0611a2f184fe537ff
refs/heads/master
2020-03-25T01:02:11.830724
2018-08-02T10:07:46
2018-08-02T10:07:46
143,218,782
1
0
null
2018-08-01T23:37:09
2018-08-01T23:37:09
null
UTF-8
R
false
false
807
r
feed.extract.R
#' feed.extract #' #' This function extract and re-combine the list from feed.info(). #' @param url A URL that you want to scraping. #' @param n A number of list from feed.info(). #' @keywords feed, feedipedia #' @export #' @import rvest #' @import dplyr #' @examples #' feed.extract("https://www.feedipedia.org/node/556",2) #' feed.extract <- function(url,n) { # package stopifnot(require(rvest), require(dplyr)) html <- read_html(url, encoding="UTF-8") #nutrients list <- html %>% html_nodes("table") %>% html_table() a <- list[[n]] end <- c(which(a[,2] == '') - 1,nrow(a)) start <- c(2,which(a[,2] == '') + 2) df <- list() for(i in 1:length(end)){ df[[i]] <- a[start[i]:end[i],] names(df[i]) <- a[start[i]-1,1] colnames(df[[i]]) <- a[start[i]-1,] } print(df) }
618933237507ac6651cdffe043c3380b1919a47a
ee360f07fd7a202207aec4a26cfc68ba3d053bc5
/analysis/utils.R
d539c3071d8b6ec15738a9254f9bee1834741d0a
[]
no_license
timole/usage
bb3715d903022d33b4028f472908db5c43b7c2f8
a06fc4349c6b968b6020b2be8065761a42322464
refs/heads/master
2021-01-10T19:41:36.064006
2015-05-07T10:05:59
2015-05-07T10:05:59
33,768,302
0
1
null
null
null
null
UTF-8
R
false
false
2,205
r
utils.R
library("kohonen") library("rjson") classifierToXY <- function(somMap, c) { col <- (c - 1) %% somMap$grid$xdim + 1 row <- somMap$grid$ydim - ( floor( (c - 1) / somMap$grid$xdim)) return(list(x = col - 1, y = row - 1)) } getSomItemLocations <- function(somMap) { lapply(somMap$unit.classif, function(c) { return(classifierToXY(somMap, c))}) } somToDataMap <- function(somMap) { ids <- rownames(somMap$data) datas <- split(somMap$data, row(somMap$data)) locations <- getSomItemLocations(somMap) all <- list() for(i in seq(1:length(ids))) { item <- list(x = locations[[i]]$x, y = locations[[i]]$y) all[[ids[i]]] <- item } dimensions <- getDimensions(somMap) m <- kohmap$data m <- cbind(id = rownames(m), m) itemDimensionValues = toMapByColumnName(m, "id") dataMap <- list(xdim = somMap$grid$xdim, ydim = somMap$grid$ydim, items = all, dimensions = dimensions) #itemDimensionValues = itemDimensionValues return(dataMap) } somToJSON <- function(somMap) { dataMap <- somToDataMap(somMap) return(rjson::toJSON(dataMap)) } toMapByColumnName <- function(df, columnName) { colIndex <- grep(paste(paste("^", columnName, sep = ""), "$", sep = ""), colnames(df)) userList <- list() dataTypes <- sapply(df, class) apply(df, 1, function(d) { itemList <- list() i <- 1 lapply(colnames(df), function(colName) { item <- d[colName] if(dataTypes[i] != "factor") { class(item) <- dataTypes[i] } else { } itemList[[colName]] <<- item[[1]] i <<- i + 1 }) userList[[ d[colIndex] ]] <<- itemList }) return(userList) } getDimensions <- function(somMap) { matrices <- list() m <- matrix(0, nrow = somMap$grid$ydim, ncol = somMap$grid$xdim) vals <- lapply(seq(1, ncol(somMap$data)), function(i) { return(aggregate(as.numeric(somMap$data[,i]), by=list(somMap$unit.classif), FUN=mean, simplify=TRUE)[,2]) }) d <- 1 for(dimension in vals) { i <- 1 for(val in dimension) { coords <- classifierToXY(somMap, i) col <- coords$x + 1 row <- coords$y + 1 m[row, col] <- val i <- i + 1 } name <- colnames(somMap$data)[d] matrices[[name]] <- t(m) d <- d + 1 } return(matrices) }
637cf985e944cb990ad33d23d196c287fb5587bd
26dfe6af409cb36c6aa723dd92e53d793420632f
/long_term_trials/code/data_soil_carbon.R
3995a9ecd19e6ba14e6ed2212bb9b8cd5a0fe189
[]
no_license
cwreed/SHI
d1a6f8050eaf384473dc82a2f02c13662dd5196d
88e874faec7568eaf62501faf3bb039080b3f102
refs/heads/master
2021-06-27T22:39:21.269491
2021-01-21T21:07:21
2021-01-21T21:07:21
201,963,976
0
0
null
2020-04-28T17:58:22
2019-08-12T16:04:30
R
UTF-8
R
false
false
7,092
r
data_soil_carbon.R
source("code/libraries.R") d.carbon.raw <- read.xlsx('data/Long_term_yield _data.xlsx', sheet = 'Carbon') d.carbon <- d.carbon.raw[,-20] names(d.carbon)[1:5] <- c("Paper", "DOI", "Study_name", "Years_of_study", "Year_of_observation") d.carbon %>% fill(names(.)[c(1:2)]) %>% group_by(DOI) %>% fill(names(.)[c(3:4,6:16,17,18)]) %>% separate(col = "Years_of_study", into = c("Year_started","Year_ended"), sep = "-") %>% ungroup() %>% mutate_if(grepl(names(.),pattern = "Yield|begin|end|start|length"), as.numeric) %>% mutate_if(is.character, as.factor) -> d.carbon d.carbon[d.carbon == 'Placeholder'] <- NA paste.drop.NA <- function(x, sep = ", ") { x <- gsub("^\\s+|\\s+$", "", x) ret <- paste(x[!is.na(x) & !(x %in% "")], collapse = sep) is.na(ret) <- ret == "" return(ret) } d.carbon$Trt.combo <- apply(d.carbon[,7:13], 1, paste.drop.NA) ## Merge trt.codes d.carbon.trts <- read.csv("data/d.carbon.trts.csv") str(d.carbon.trts) test <- d.carbon %>% anti_join(d.carbon.trts[,c(1,10:12)]) d.carbon %>% inner_join(d.carbon.trts[,c(1,10,11,12)]) -> d.carbon d.carbon <- d.carbon[!is.na(d.carbon$Trt.code),] ## Summarize within papers d.carbon <- droplevels(d.carbon[-which(d.carbon$`Soil.sample.depth.(cm)` %in% c(">115",">120")),]) d.carbon$`Soil.sample.depth.(cm)` <- as.character(d.carbon$`Soil.sample.depth.(cm)`) d.carbon$Year_of_observation <- as.numeric(as.character(d.carbon$Year_of_observation)) d.carbon$Bulk.density <- as.numeric(as.character(d.carbon$Bulk.density)) d.carbon %>% dplyr::group_by(Paper) %>% filter(Year_of_observation == max(as.numeric(Year_of_observation))) %>% separate(`Soil.sample.depth.(cm)`, into = c("Top.depth", "Bottom.depth"), sep = "-", remove = F) %>% mutate(Depth.increment=as.numeric(Bottom.depth) - as.numeric(Top.depth)) %>% mutate(max.bottom = max(as.numeric(Bottom.depth))) %>% mutate(Depth.proportion =as.numeric(Depth.increment)/max.bottom) %>% mutate(Soil.kg.per.hectare = case_when( !is.na(Bulk.density) ~ case_when( Bulk.density.units == 'g cm-3' ~ as.numeric(100000 * Bulk.density * Depth.increment), Bulk.density.units == 'kg m-3' ~ as.numeric(10000 * Bulk.density * Depth.increment), Bulk.density.units == 'Mg m-3' ~ as.numeric(1e+7 * Bulk.density * Depth.increment), Bulk.density.units == 't m-3' ~ as.numeric(1e+7 * Bulk.density * Depth.increment) ), is.na(Bulk.density) ~ as.numeric(100000*Depth.increment))) -> d.carbon d.carbon$Depth.proportion d.carbon[is.na(d.carbon$Depth.proportion), "Depth.proportion"] <- 1 d.carbon <- d.carbon[as.numeric(as.character(d.carbon$Bottom.depth)) < 50,] ## Filter out unusual C measurements, convert all SOC data to same units, assume BD of one d.carbon %>% filter( `SOM.or.SOC` == "SOM"| `SOM.or.SOC` == "SOC"| `SOM.or.SOC` == "SOM (total)"| `SOM.or.SOC` == "SOC stock as equivalent soil mass"| `SOM.or.SOC` == "SOC stock"| `SOM.or.SOC` == "SOC content"| `SOM.or.SOC` == "SOC storage"| `SOM.or.SOC` == "SOC (total)"| `SOM.or.SOC` == "SOC Stock"| `SOM.or.SOC` == "TOC"| `SOM.or.SOC` == "Total C"| `SOM.or.SOC` == "Total SOC"| `SOM.or.SOC` == "SOC pool" ) -> d.carbon d.carbon <- droplevels(d.carbon) unique(d.carbon$C.Units) d.carbon <- droplevels(d.carbon[!d.carbon$C.Units %in% "g kg-1 aggregates",]) d.carbon <- d.carbon[!d.carbon$C.Units %in% "kg C m-2\n(on 450 kg m-2 soil)",] d.carbon$Amount <- as.numeric(as.character(d.carbon$Amount)) d.carbon %>% mutate(SOC.g.kg = case_when( C.Units == "%" ~ Amount/.1, #C.Units == "kg C m-2\n(on 450 kg m-2 soil)" ~ Amount*1000/Soil.kg.per.hectare*1000, C.Units == "kg m-2" ~ Amount*10000*1000/Soil.kg.per.hectare, C.Units == "g kg-1" ~ Amount, C.Units == "Mg ha-1" ~ (Amount*1000000/Soil.kg.per.hectare), C.Units == "T ha-1" ~ (Amount*1000000/Soil.kg.per.hectare), C.Units == "t ha-1" ~ (Amount*1000000/Soil.kg.per.hectare) )) -> d.carbon d.carbon[d.carbon$SOM.or.SOC %in% c("SOM","SOM (total)"),"SOC.g.kg"] <- d.carbon[d.carbon$SOM.or.SOC %in% c("SOM","SOM (total)"),"SOC.g.kg"]*.58 ## d.carbon$`Soil.sample.depth.(cm)` <- as.factor(d.carbon$`Soil.sample.depth.(cm)`) d.carbon %>% group_by(Paper, Trt.combo, `Soil.sample.depth.(cm)`) %>% mutate(SOC.g.kg.weighted = Depth.proportion*SOC.g.kg) %>% group_by(Paper, Trt.combo) %>% dplyr::summarise(SOC.SD = sd(SOC.g.kg.weighted, na.rm = TRUE), SOC.n = n(), SOC.g.kg.weighted = sum(SOC.g.kg.weighted, na.rm = TRUE)) -> d.carbon.summary #d.carbon.summary <- (d.carbon.summary[!d.carbon.summary$SOC.g.kg.weighted > 150,]) #d.carbon.summary <- (d.carbon.summary[!d.carbon.summary$SOC.g.kg.weighted == 0,]) ## New carbon data d.carbon.new <- read.xlsx("data/AgEvidence_Oldfield_selected.xlsx", sheet = "carbon") d.carbon.new$Trt.combo <- apply(d.carbon.new[,6:12], 1, paste.drop.NA) d.carbon.trts <- read.csv("data/d.carbon.trts.csv") test <- d.carbon.new %>% anti_join(d.carbon.trts[,c(1,10,11,12)]) d.carbon.new %>% inner_join(d.carbon.trts[,c(1,10,11,12)]) -> d.carbon.new ## Summarize within papers d.carbon.new %>% group_by(Paper, crop) %>% filter(obs.year == max(obs.year)) %>% mutate(Depth.increment = as.numeric(`bottom.measurement.depth.(cm)`) - as.numeric(`top.measurement.depth.(cm)`)) %>% do(mutate(., max.bottom = as.numeric(max(as.numeric(`bottom.measurement.depth.(cm)`))))) %>% mutate(Depth.proportion = as.numeric(Depth.increment)/max.bottom) %>% mutate(Soil.kg.per.hectare = case_when( !is.na(soil.bulk.density.units) ~ case_when( soil.bulk.density.units == 'g/cm^3' ~ as.numeric(1e+8/1000 * soil.bulk.density.value * Depth.increment), soil.bulk.density.units == 'Mg/m^3' ~ as.numeric(1e+7 * soil.bulk.density.value * Depth.increment)), is.na(soil.bulk.density.units) ~ as.numeric(100000*Depth.increment))) -> d.carbon.new d.carbon.new[is.na(d.carbon.new$Depth.proportion), 'Depth.proportion'] <- 1 d.carbon.new <- d.carbon.new %>% filter(`bottom.measurement.depth.(cm)` < 50 | is.na(`bottom.measurement.depth.(cm)`)) %>% filter(Paper != "Campbell et al. 2007") d.carbon.new %>% mutate(SOC.g.kg = case_when( soil.carbon.units == '%' ~ soil.carbon.value/.1, soil.carbon.units == 'Mg/ha' ~ (soil.carbon.value*1000000/Soil.kg.per.hectare), soil.carbon.units == 'g C/g soil' ~ soil.carbon.value*1000)) -> d.carbon.new d.carbon.new %>% group_by(Paper, Trt.combo, `top.measurement.depth.(cm)`) %>% mutate(SOC.g.kg.weighted = Depth.proportion*SOC.g.kg) %>% group_by(Paper, Trt.combo) %>% summarize(SOC.SD = sd(SOC.g.kg.weighted, na.rm = TRUE), SOC.n = n(), SOC.g.kg.weighted = sum(SOC.g.kg.weighted, na.rm = TRUE)) -> d.carbon.new_summary d.carbon.summary %>% rbind(d.carbon.new_summary) -> d.carbon.summary d.carbon.summary[is.na(d.carbon.summary$SOC.SD), "SOC.SD"] <- 0 save("d.carbon.summary", file = "data/d.carbon.summary.RData")
4a758677541eba70396a48644b72220b47cda9d3
b4dac3475d3c9d6f56b5cc24b80f904cf24400b5
/r_course_phd/all_R_script_files1/simple_function.R
9e3eab7e6a0a20bc4c511468717f8a17aac003fc
[]
no_license
rohitfarmer/learning
8aecdeddfa82bddf59be4ee9005e6783df4a4010
f0550cb9bc91287f3fbcee13d63fb182462ee920
refs/heads/master
2020-03-13T00:43:24.836300
2019-10-06T21:20:10
2019-10-06T21:20:10
130,882,198
0
0
null
null
null
null
UTF-8
R
false
false
65
r
simple_function.R
add <- function(arg1, arg2) { result <- arg1 + arg2 result }
c1b40a7a1dfa71e429b934fe33b59146660b3d40
ef290d0ed8111815f8d83054a80f79f34e2b82ce
/Alderaan.R
1842f13cb5d02dc4ef541be63cf8784da75c3e6a
[]
no_license
Venkatagutha/Web-APIs-in-R.
14ec0451d2f3ac92b2e00a4d649c0c6724039a2a
1b475563b4e3166872dbc00b160aa1d2a7f272cc
refs/heads/master
2020-03-22T12:31:39.423708
2018-07-12T03:50:14
2018-07-12T03:50:14
140,045,321
0
0
null
null
null
null
UTF-8
R
false
false
549
r
Alderaan.R
install.packages("httr") install.packages("jsonlite") install.packages("magrittr") library(httr) library(jsonlite) library(magrittr) # With the help of method GET, get the #data for planet Alderaan in StarWorlds alderaan<-GET("http://swapi.co/api/planets/", query = list(search="alderaan")) alderaan$status_code alderaan$header$`content-type` names(alderaan) #getting the content text<- content(alderaan, as="text", encoding = "UTF-8") # parsing with JSON LITE cont<- text%>% fromJSON planet_data<-cont$results str(planet_data)
9f3d2c0965a4a03cb9b954a303eaefd4cab9ffab
7aa6036ba7caf7ca08c6e341814ada363838ad39
/Ch04/4_2_Condition.R
6d2efdc2059ead34a039be13034f8b59ae3ed482
[]
no_license
kimhalyn/R
1db9ee75fa944f66fee63cf9abc33f94be82b296
ca67137b14d1e14650f859ced5dfbd9e6670c0cf
refs/heads/master
2023-06-09T17:17:24.121110
2021-07-01T15:11:34
2021-07-01T15:11:34
330,568,605
0
0
null
null
null
null
UTF-8
R
false
false
1,810
r
4_2_Condition.R
# 날짜 : 2021/01/19 # 이름 : 김하린 #내용 : Ch04.제어문과 함수 - 조건문 교재 p110 #교재 p110 실습 - if() 사용하기 x <- 50;y <- 4;z <- x * y if(x * y >= 40) { cat("x * y의 결과는 40 이상입니다.\n") cat("x * y = ", z) }else{ cat("x * y의 결과는 40 미만입니다. x * y = ",z, "\n") } #교재 p110 실습 - if() 사용으로 입력된 점수의 학점 구하기 score <- scan() result <- "노력" #결과 초기값 설정 if(score >= 80){ result <- "우수" } cat("당신의 학점은", result, score) #교재 p111 실습 - if ~ else if 형식으로 학점 구하기 score <- scan() if(score >= 90){ result = "A학점" }else if(score >= 80){ result = "B학점" }else if(score >= 70){ result = "C학점" }else if(score >= 60){ result = "D학점" }else{ result = "F학점" } cat("당신의 학점은", result) print(result) #교재 p112 실습 - ifelse() 사용하기 (조건, 참일 경우 처리문, 거짓일 경우 처리문) score <- scan() ifelse(score >= 80, "우수", "노력") ifelse(score <= 80, "우수", "노력") #교재 p113 실습 - switch() 를 사용하여 사원명으로 급여정보 보기 switch("name", id="hong", pwd="1234", age=35, name="홍길동") empname <- scan(what="") empname switch(empname, hong = 250, lee = 350, kim = 200, kang = 400) #교재 p114 실습 - 벡터에서 which() 사용:index 값을 반환 name <- c("kim", "lee","choi", "park") which(name == "choi") #교재 p114 실습 - 데이터프레임에서 which() 사용 no <- c(1:5) name <- c("홍길동", "이순신", "강감찬", "유관순", "김유신") score <- c(85, 78, 89, 90, 74) exam <- data.frame(학번 = no, 이름 = name, 성적 = score) exam which(exam$이름 == "유관순") exam[4,] #4행 데이터 보기
e8dd351fa7fe437e55b7010faff998dc0812fe26
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
/data/genthat_extracted_code/VTrack/examples/PointsCircuitous_crocs.Rd.R
e1e707477fb175add4cb413cdca25eb47546a626
[]
no_license
surayaaramli/typeRrh
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
66e6996f31961bc8b9aafe1a6a6098327b66bf71
refs/heads/master
2023-05-05T04:05:31.617869
2019-04-25T22:10:06
2019-04-25T22:10:06
null
0
0
null
null
null
null
UTF-8
R
false
false
696
r
PointsCircuitous_crocs.Rd.R
library(VTrack) ### Name: PointsCircuitous_crocs ### Title: Points File Containing VR2 Locations on the Wenlock River in ### 2008 with Waypoints Connecting Receivers ### Aliases: PointsCircuitous_crocs ### Keywords: datasets ### ** Examples # Load the points file for the Wenlock River data(PointsCircuitous_crocs) head(PointsCircuitous_crocs) receiversonly <- na.omit(PointsCircuitous_crocs) # Plot the locations of the receivers plus the waypoints par(mfrow=c(1,1),las=1,bty="l") plot(PointsCircuitous_crocs$LONGITUDE, PointsCircuitous_crocs$LATITUDE, pch=1,cex=0.5,col="grey",xlab="Longitude",ylab="Latitude") points(receiversonly$LONGITUDE,receiversonly$LATITUDE,cex=1,pch=10)
e62ecf9fa74ff3a725b37268de28c8f5f2c1da85
66f8711bc942a1bc635a6deea253e9a49c718094
/man/romanToArabic.Rd
afccb3474947a903ada699c05917d38b32775986
[ "MIT" ]
permissive
seanrsilver/novnet
bd179476c48a8dd809757c60488dde7193a4145b
85107cfbbabc68c603134db5b5fc8bbf9219624b
refs/heads/master
2020-06-05T18:20:58.057024
2019-06-18T14:29:45
2019-06-18T14:29:45
192,495,039
0
0
null
null
null
null
UTF-8
R
false
true
490
rd
romanToArabic.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/romanToArabic.R \name{romanToArabic} \alias{romanToArabic} \title{Roman Numeral Conversion} \usage{ romanToArabic(filename) } \arguments{ \item{filename}{File name as character string, i.e. "Crusoe".} } \description{ This function converts roman numeral chapter numbers to arabic, from Project Gutenberg files. It expects chapter headings in this format: CHAPTER I, CHAPTER II, etc... } \keyword{Disambiguation}
ac435ccee6822aa1ed7324dac50670fc2e95d0a2
a010c9aaf3a0e87e289f6fc9aa232ebf80b15116
/Code_fraud_project_12_new.R
64f0720ba3b4f13f2daa18637ddd9b391e08e9f0
[]
no_license
jmunich/Fraud-detection
a0d6d07841453f7ff1e498b26f6088a583c3cc95
dd0b8e7e9b6e98206c403a9ef99d4e745dfca1e0
refs/heads/master
2020-05-05T04:09:17.420473
2019-04-05T14:55:49
2019-04-05T14:55:49
179,699,613
0
0
null
null
null
null
UTF-8
R
false
false
9,056
r
Code_fraud_project_12_new.R
###Create corpus library(caret) library(readtext) library(quanteda) library(spacyr) library(igraph) library(text2vec) library(reshape2) library(gtools) library(lexicon) library(gdata) library(pROC) source("Get_new_text.R") ###Clean data ##From British to American toks<-tokens(tot_corpus) vocabulary<-read.csv("vocab.txt") toks<-tokens_replace(toks, as.character(vocabulary[,1]), as.character(vocabulary[,2])) toks<-tokens_remove(toks, pattern = " ") toks<-lapply(toks, function(x) tolower(x)) ctoks<-unlist(lapply(toks, function(x) paste(x, sep=" ", collapse=" "))) tot_corpus_clean<-corpus(ctoks) ########### ### Entities spacy_initialize(model='en') docvars(tot_corpus_clean, "text")<-all_id[,1] identify<-docvars(tot_corpus_clean) identify<-cbind(identify, rownames(identify)) parses<-spacy_parse(tot_corpus_clean, dependency=TRUE) entities<-entity_extract(parses, type = "all") entities<-cbind(entities, mne=rep(0,length(entities[,1]))) entities[,5]<-ifelse(entities$entity_type %in% c("ORG", "QUANTITY", "ORDINAL", "CARDINAL"),1,0) encount <-aggregate(entities[,5], by=list(entities[,1]), FUN=sum, na.rm=TRUE) colnames(identify)<-c("id","match") colnames(encount)<-c("match","score") endata<-merge(identify, encount, by="match", all.x=TRUE) endata[which(is.na(endata[,3])==TRUE),3]<-0 endata[,3]<-endata[,3]/ntoken(tot_corpus_clean) ###Get lemma corpus lemmatized<-c() counter<-0 for(i in unique(parses[,1])){ counter<-counter+1 wordvec<-parses[which(parses[,1]==i),5] lemmatized[counter]<-paste(wordvec, sep=" ", collapse=" ") } tot_corpus_clean_lemma<-corpus(lemmatized) ### Dependencies prop_root<-c() prop_nsubj<-c() prop_nobj<-c() slope_root<-c() slope_nsubj<-c() slope_nobj<-c() counteri<-0 for(i in unique(parses[,1])){ counteri<-counteri+1 root<-c() subject<-c() object<-c() counterj<-0 for(j in unique(parses[which(parses[,1]==i),2])){ counterj<-counterj+1 set<-NULL set<-as.matrix(parses[which(parses[,1]==i & parses[,2]==j),]) root_id<-set[which(set[,8]=="ROOT"),3] nsubj_id<-set[which(set[,8]=="nsubj"),3] pobj_id<-set[which(set[,8]=="pobj"),3] root[counterj]<-length(which(set[,7]==root_id))/length(set[,1]) subject[counterj]<-length(which(set[,7]==nsubj_id))/length(set[,1]) object[counterj]<-length(which(set[,7]==pobj_id))/length(set[,1]) } prop_root[counteri]<-mean(root) prop_nsubj[counteri]<-mean(subject) prop_nobj[counteri]<-mean(object) slope_root[counteri]<-lm(root~c(1:length(root)))$coefficients[2] slope_nsubj[counteri]<-lm(subject~c(1:length(subject)))$coefficients[2] slope_nobj[counteri]<-lm(object~c(1:length(object)))$coefficients[2] } dependencies_data<-cbind(prop_root,prop_nsubj,prop_nobj, slope_root, slope_nsubj, slope_nobj) ###N-grams with stopwords ngrams<-dfm(tot_corpus_clean_lemma, remove_punct=TRUE, remove_numbers=TRUE, ngrams=2:3) select<-dfm_trim(ngrams, sparsity=.90) select1<-dfm_tfidf(select) ngram_data<-as.data.frame(select1) ###Ngrams nonstop ##NB Get better stopwords source("get_sw.R") nostop_tot_corp<-tokens(tot_corpus_clean_lemma) nostop_tot_corp<-tokens_remove(nostop_tot_corp, pattern=c(sws, "et","al", "p","c")) ns_ngrams<-dfm(nostop_tot_corp, remove_punct=TRUE, remove_numbers=TRUE, ngrams=1:3) ns_select<-dfm_trim(ns_ngrams, sparsity=.90) ns_select1<-dfm_tfidf(ns_select) ns_ngram_data<-as.data.frame(ns_select1) dup<-which(colnames(ns_ngram_data) %in% colnames(ngram_data)) ns_ngram_data<-ns_ngram_data[,-dup] ###Readability toks<-tokens(tot_corpus) vocabulary<-read.csv("vocab.txt") toks<-tokens_replace(toks, as.character(vocabulary[,1]), as.character(vocabulary[,2])) toks<-tokens_remove(toks, pattern = " ") ctoksup<-unlist(lapply(toks, function(x) paste(x, sep=" ", collapse=" "))) tot_corpus_cleanup<-corpus(ctoksup) readability<-textstat_readability(tot_corpus_cleanup, "Flesch") names(readability)<-c("id","Flesch") ### Semantic network: here, I create a ### boolean co-occurence network for every individual document. ### The networks consist of the 30 most frequent tokens in all aggregated documents. my_toks<-tokens(tot_corpus_clean_lemma, remove_punct = TRUE, remove_numbers = TRUE) my_toks<-tokens_remove(my_toks, c(stopwords('en'),"et","al", "p", "c")) my_dfm<-dfm(my_toks, tolower=TRUE) # Get wordlist source("words.R") # Total probablities of occurrence my_dfm_p<-dfm_weight(my_dfm, scheme="prop") props<-dfm_select(my_dfm_p, allw) tprops<-as.data.frame(props)[,-1] # Quantites of word occurrences quants<-dfm_select(my_dfm, allw) tquant<-as.data.frame(quants)[,-1] # Prepare a dataframe for values edgelist<-permutations(length(allw), r=2, allw, repeats.allowed = TRUE) edgelist<-paste(edgelist[,1],edgelist[,2], sep = "_") my_fcms<-data.frame(matrix(0,ncol=length(edgelist), nrow=length(my_toks))) colnames(my_fcms)<-edgelist for(h in 1:length(my_toks)){ cmat<-fcm(paste(my_toks[h], sep=" ", collapse=" "), context="window", window=10, count="weighted") pcmat<-as.matrix(fcm_select(cmat, allw)) namemat<-colnames(pcmat) if(length(pcmat)>0){ for(i in 1:length(pcmat[,1])){ for(j in 1:length(pcmat[,1])){ pcmat[i,j]<-(pcmat[i,j]/tquant[h,namemat[j]])/tprops[h,namemat[i]] } } el<-melt(as.matrix(pcmat)) edges<-paste(el[,1],el[,2], sep = "_") my_fcms[i, edges]<-el[,3] } } my_fcms<-my_fcms[,-which(colSums(my_fcms)==0)] ### Add sentiment file_gn<-read.table("unretracted_new.txt", header=TRUE) file_fn<-read.table("fraudulent_new.txt", header=TRUE) file_gn<-file_gn[,c(4,7)] file_fn<-file_fn[,c(4,7)] LIWC<-rbind(file_gn,file_fn) ### Transform from long to wide names(all_id)[2]<-"section_of_article" final_data<-list(as.matrix(all_id[,3],ncol=1), ngram_data[,-1], ns_ngram_data, as.matrix(readability[,-1],ncol=1), my_fcms, LIWC, as.matrix(endata[,3],ncol=1), dependencies_data) feature_names<-c("fraud", "ngrams", "ns_ngrams", "readability", "hedging", "liwc", "entities", "dependencies") feat_namelist<-list() final_list<-list() for(i in 1:length(final_data)){ set<-cbind(all_id[,c(1,2)],final_data[[i]]) features<-reshape(set, idvar = "id", timevar = "section_of_article", direction = "wide") feat_namelist[[i]]<-colnames(final_data[[i]]) final_list[[i]]<-features[,-1] } widid<-reshape(all_id[,c(1,2)], idvar = "id", timevar = "section_of_article", direction = "wide") for(i in 1:length(widid[,1])){ if(widid[i,1]>900){widid[i,1]<-widid[i,1]/1000} } sortedmems<-rep(max(memberdata[,2])+1,length(matchids)) for(i in 1:length(matchids)){ if(matchids[i]>900){matchids[i]<-matchids[i]/1000} if(length(memberdata[which(memberdata[,1]==matchids[i]),2])>0){ sortedmems[i]<-memberdata[which(memberdata[,1]==matchids[i]),2] } } wide_feat_namelist<-list() for(i in 1:length(feat_namelist)){ namevec<-c() secs<-c(".d",".i",".m",".r") for(k in 1:4){ l<-length(feat_namelist[[i]])*(k-1) for(j in 1:length(feat_namelist[[i]])){ namevec[j+l]<-paste(feat_namelist[[i]][j],secs[k],sep="") } } wide_feat_namelist[[i]]<-namevec } final_list[[1]][is.na(final_list[[1]])]<-1000 score<-list() for(i in 1:length(final_list[[1]][,1])){ score[[i]]<-unique(unlist(final_list[[1]][i,])) } fraudvec<-c() for(i in 1:length(score)){ if(length(score[[i]])==1){ fraudvec[i]<-score[[i]][1] } if(length(score[[i]])==2){ fraudvec[i]<-min(score[[i]]) } if(length(score[[i]])>2){ print("Warning, something went teribly wrong!!!!") } } final_list[[1]]<-as.matrix(fraudvec, ncol=1) names(final_list)<-feature_names names(wide_feat_namelist)<-feature_names for(i in 1:length(final_list)){ names(final_list[[i]])<-wide_feat_namelist[[i]] } names(final_list[[1]])<-"fraud" names(final_list[[4]])<-c("flesch.d","flesch.i","flesch.m","flesch.r") names(final_list[[7]])<-c("entities.d","entities.i","entities.m","entities.r") final_frame<-do.call(cbind, final_list) exclude<-complete.cases(final_frame) for(i in 1:length(final_list)){ final_list[[i]]<-final_list[[i]][-which(exclude==FALSE),] if(i>1){ if(length(which(colSums(final_list[[i]])==0)==TRUE)>0) final_list[[i]]<-final_list[[i]][,-which(colSums(final_list[[i]])==0)] } } widid<-widid[-which(exclude==FALSE),] source("Get_membership.R") matchids<-reshape(all_id[,-3], idvar = "id", timevar = "section_of_article", direction = "wide") matchids<-matchids[,1] usemember<-sortedmems[-which(exclude==FALSE)] set.seed(1) selects<-c() a<-1 b<-1 while(a<.6 | a>.65 | b<.494 | b>.506){ selects<-sample(unique(usemember), sample(1:(length(unique(usemember))))) intraining<-which(usemember%in%selects) a<-length(intraining)/length(usemember) b<-sum(as.numeric(unlist(final_list[1])[intraining]))/length(unlist(final_list[1])[intraining]) } inTraining_sep<-intraining
91bda5082d8724b25f8049415845500ea23afa50
0886d094611c5e514a3366482ae2238a7b7a3e4b
/man/pmwright1.Rd
ac0f4ec7203f5abf08e717b6f4eecda08e3cc2ba
[]
no_license
cran/MWright
8ac3118ce26d91ded51c14f1ba709b2e28711e12
828384ab72439cdbc6bf53d1385ea72fb59204dc
refs/heads/master
2020-06-30T00:49:44.347860
2019-08-07T22:00:05
2019-08-07T22:00:05
200,671,449
0
0
null
null
null
null
UTF-8
R
false
true
1,574
rd
pmwright1.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/distn1side.R \name{pmwright1} \alias{pmwright1} \title{Distribution function for one-sided M-Wright distribution} \usage{ pmwright1(alp, sc, upper) } \arguments{ \item{alp}{point estimate for shape parameter alpha.} \item{sc}{point estimate for scale parameter s.} \item{upper}{non-negative upper quantile} } \value{ numeric } \description{ Calculates a left-tail probability. } \examples{ pmwright1(runif(1), runif(1,0,10),Inf ) pmwright1(runif(1), runif(1,0,10), 0.5 ) } \references{ Cahoy and Minkabo (2017). \emph{Inference for three-parameter M-Wright distributions with applications.} Model Assisted Statistics and Applications, 12(2), 115-125. \url{https://doi.org/10.3233/MAS-170388} Cahoy (2012). \emph{Moment estimators for the two-parameter M-Wright distribution.} Computational Statistics, 27(3), 487-497. \url{https://doi.org/10.1007/s00180-011-0269-x} Cahoy (2012). \emph{Estimation and simulation for the M-Wright function.} Communications in Statistics-Theory and Methods, 41(8), 1466-1477. \url{https://doi.org/10.1080/03610926.2010.543299} Cahoy (2011). \emph{On the parameterization of the M-Wright function.} Far East Journal of Theoretical Statistics, 34(2), 155-164. \url{http://www.pphmj.com/abstract/5767.htm} Mainardi, Mura, and Pagnini (2010). \emph{The M-Wright Function in Time-Fractional Diffusion Processes: A Tutorial Survey}. Int. J. Differ. Equ., Volume 2010. \url{https://doi.org/10.1155/2010/104505} }
b1a7a9887c90a42bc8f152513629c95ed21703b5
1c74d653f86b446a9cd87435ce3920977e2cb109
/packages/av/test.R
338ef472ee47953b7503858c788e553ce5b0966a
[ "Apache-2.0" ]
permissive
rstudio/shinyapps-package-dependencies
f1742d5cddf267d06bb895f97169eb29243edf44
8d73ce05438f49368b887de7ae00ff9d2681df38
refs/heads/master
2023-07-22T08:53:56.108670
2023-07-12T13:58:58
2023-07-12T13:58:58
22,746,486
81
76
NOASSERTION
2023-07-12T13:59:00
2014-08-08T04:57:26
R
UTF-8
R
false
false
498
r
test.R
options(download.file.method="curl") install.packages("av", repos="https://cran.rstudio.com") # from av_demo output = tempfile(fileext = ".mp4") av::av_demo(output = output) stopifnot(file.exists(output)) output = tempfile(fileext = ".mkv") av::av_demo(output = output) stopifnot(file.exists(output)) output = tempfile(fileext = ".mov") av::av_demo(output = output) stopifnot(file.exists(output)) output = tempfile(fileext = ".flv") av::av_demo(output = output) stopifnot(file.exists(output))
18881877321fe312d54ecc102f4551d41a56580e
229f163de91efd1d38909e1f4d24aac4741c92f6
/PRSmodels/lassosum.R
be9c6d8fe04e79d7972cc1fc5667a8836f5c9e68
[]
no_license
daiqile96/OTTERS
531433cdaeb9e6dbe35eb1ba92977c55974154ae
8e0bb1f1c9c1065a05ecf9f88a53d76a828a76cd
refs/heads/main
2023-09-01T17:37:25.069311
2023-08-09T14:41:34
2023-08-09T14:41:34
468,945,830
14
1
null
null
null
null
UTF-8
R
false
false
3,470
r
lassosum.R
#!/usr/bin/env Rscript ################################################################### # Import packages needed library(data.table) library(lassosum) ############################################################### # parse input arguments Sys.setlocale("LC_ALL", "C") options(stringsAsFactors = F) ## Collect arguments args <- commandArgs(TRUE) ## Default setting when no arguments passed if(length(args) < 1) { args <- c("--help") } ## Parse arguments (we expect the form --arg=value) parseArgs <- function(x) strsplit(sub("^--", "", x), "=") argsDF <- as.data.frame(do.call("rbind", parseArgs(args))) argsL <- as.list(as.character(argsDF$V2)) names(argsL) <- argsDF$V1 ## Check mandatory parameters if (is.null(argsL$chr)) { cat('* Please specify the chromosome --chr\n') q(save="no") } else if(is.null(argsL$medianN)) { cat('* Please specify the path to the median sample size file using --medianN_path\n') q(save="no") } else if (is.null(argsL$bim_file)) { cat('* Please specify the directory to the reference PLINK file --bim_dir\n') q(save="no") } else if (is.null(argsL$sst_file)) { cat('* Please specify the path to the summary statistics with standardized beta using --sst_dir\n') q(save="no") } else if (is.null(argsL$LDblocks)) { cat('* Please specify the name of LDblocks --LDblocks\n') q(save="no") } else if (is.null(argsL$out_path)) { cat('* Please specify the output path\n') q(save="no") } ## Check optional parameters and assign default values if (is.null(argsL$n_thread)){ argsL$n_thread <- 1 } print(argsL) ############################################################### # time calculation start_time <- Sys.time() # Create the output file gene_name=argsL$gene_name chr = argsL$chr # Specify the PLINK file of the reference panel bfile <- argsL$bim_file # Read the summary statistics of standardized beta in single variant test # the standardized beta in single variant test = correlation ss <- fread(argsL$sst_file) cor <- ss$Beta # lassosum only allow -1 < cor < 1 if (sum(abs(cor) >= 1) > 0){ shrink_factor = max(abs(cor)) / 0.9999 cor = cor / shrink_factor } ss$SNPPos <- sapply(1:length(ss$SNP), function(i) strsplit(ss$SNP[i], "_")[[1]][2]) ss$Chrom <- chr # train lassosum out <- lassosum.pipeline(cor=cor, chr=as.numeric(ss$Chrom), pos=as.numeric(ss$SNPPos), A1=ss$A1, A2=ss$A2, # A2 is not required but advised s = c(0.2, 0.5, 0.9, 1), lambda = exp(seq(log(0.0001), log(0.1), length.out = 20)), ref.bfile = bfile, # The reference panel dataset test.bfile = bfile, # We don't have test data here LDblocks = argsL$LDblocks, exclude.ambiguous = F, destandardize = F, trace = 0) # perform pseudovalidation v <- pseudovalidate(out) lassosum_out <- subset(out, s=v$best.s, lambda=v$best.lambda) # save estimated beta sumstats = lassosum_out$sumstats[, c("chr", "pos", "A1", "A2")] beta = unlist(lassosum_out$beta) results = data.frame(sumstats, ES = beta) results = results[, c("chr", "pos", "A1", "A2", "ES")] write.table(results, argsL$out_path, quote = F, row.names= F, col.names= T, sep = "\t", append = F)
c7b7b8fdf40c2819a9862b84085b3f5065490ae7
4c14bcc37fa428673536b87083afb734866f947c
/man/series.Rd
aa5e156f41bd3b263686e7f1eb45d14f60bb8d13
[]
no_license
RobinHankin/ResistorArray
9c06802cb867eb3c40014ae5552ae8b8420411d1
fe8588cc44b3c5afd91033efd768ce9846860087
refs/heads/master
2021-09-28T17:31:52.431138
2021-09-18T22:25:41
2021-09-18T22:25:41
168,077,182
0
0
null
null
null
null
UTF-8
R
false
false
985
rd
series.Rd
\name{series} \alias{series} \title{Conductance matrix for resistors in series} \description{ Conductance matrix for resistors of arbitrary resistance in series } \usage{ series(x) } \arguments{ \item{x}{The resistances of the resistors.} } \details{ \strong{Note:} if \code{length(x)=n}, the function returns a conductance matrix of size \code{n+1} by \code{n+1}, because \code{n} resistors in series have \code{n+1} nodes to consider. } \author{Robin K. S. Hankin} \seealso{\code{\link{cube}}} \examples{ ## Resistance of four resistors in series: resistance(series(rep(1,5)),1,5) ##sic! FOUR resistors have FIVE nodes ## What current do we need to push into a circuit of five equal ## resistors in order to maintain the potentials at 1v, 2v, ..., 6v? circuit(series(rep(1,5)),v=1:6) #(obvious, isn't it?) ## Now, what is the resistance matrix of four nodes connected in series ## with resistances 1,2,3 ohms? Wu(series(1:3)) #Yup, obvious again. } \keyword{array}
0324b1faab7b06bba5491ad27f965f412e339b6f
7c6017497f50d6e068f4ad18d70c1acb119c391a
/cachematrix.R
f624dbd3d66412954f093fce55f0a2aa13613c01
[]
no_license
johnfossella/ProgrammingAssignment2
f9af7a77993b85aa20ff2cded7493cbce2249bb8
296e196254e9102e3d5068cd31a18f51c48a5111
refs/heads/master
2021-01-16T23:02:03.406020
2015-09-21T23:02:52
2015-09-21T23:02:52
42,868,290
0
0
null
2015-09-21T13:37:13
2015-09-21T13:37:12
null
UTF-8
R
false
false
901
r
cachematrix.R
## For an invertable matrix this script creates ## a list of functions sets and gets the matrix ## and also sets and gets the inverse of the matrix. ## These are used by the cacheSolve script. makeCacheMatrix <- function(x = matrix()) { inv = NULL set = function(y) { x <<- y inv <<- NULL } get = function() x setinverse = function(inverse) inv <<- inverse getinverse = function() inv list(set=set, get=get, setinverse=setinverse, getinverse=getinverse) } ## This script gets the output of makeCacheMatrix() ## and uses it to calculate the inverse. ## Then it sets the value of the inverse in the cache. cacheSolve <- function(x, ...) { inv = x$getinvers() if (!is.null(inv)){ message("getting cached data") return(inv) } matrixvalues = x$get() inv = solve(matrixvalues, ...) x$setinvers(inv) return(inv) }
374dad41da5ae7ab808091d4e44529211951f91a
3866452efa0b4bc18eb3e560106c6c4d7951f07c
/man/step.Rd
718f723789145c7f017a0943177ff56c65f597e0
[]
no_license
cran/relax
39b419a84a1271e725aa01748fd5152fcd454212
9032feb8f608664c9fb145fd62ef11fa83fe998f
refs/heads/master
2020-04-14T23:23:20.277277
2014-03-10T00:00:00
2014-03-10T00:00:00
null
0
0
null
null
null
null
UTF-8
R
false
false
789
rd
step.Rd
\name{step} \alias{step} \title{ modified version of step for relax } \description{Select a formula-based model by AIC.} \usage{ step(object, scope, scale = 0, direction = c("both", "backward", "forward"), trace = 1, keep = NULL, steps = 1000, k = 2, ...) } \arguments{ \item{object}{ model } \item{scope}{ range of model } \item{scale}{ used in the definiton of AIC statistic } \item{direction}{ mode of search } \item{trace}{ printing during running } \item{keep}{ filter function } \item{steps}{ max number of steps } \item{k}{ multiple of number of d.f. for penalty } \item{\dots}{ further arguments } } \details{ see help of step (package stats) } \value{ stepwise-selected model is returned ... } \seealso{ \code{\link{step}} } \examples{ ## } \keyword{ IO }
13990779e4ec34828f6c370b52fd6871aa7d8b90
03bf43d695db86fb8203e5186a8b3ce12d92d9aa
/tarea0/problema6.R
723c14e4c172a7c19ac38c18ae22027d8cfbccfb
[]
no_license
joseaznar/simulacion
e01fc9bace916cb445641190c7583cda87d0d8f1
525484c4616fb8e0fde3483ca70a7f1f93dc1ee2
refs/heads/master
2021-09-06T06:02:24.358856
2018-02-03T00:40:24
2018-02-03T00:40:24
119,995,028
0
0
null
null
null
null
UTF-8
R
false
false
218
r
problema6.R
# problema 6 # primero definimos la ecuación eq = function(x){exp(-x*x)/(1+x*x)} # graficamos la función entre 0 y 10 curve(eq, from=0, to=10) # ahora integramos entre 0 e infinito integrate(eq, lower=0, upper=Inf)
8e1871d2afb46c6c782a6ddb153cf6bf2a992b46
5e1560b3a996ed2f56a74f32dc987a8e60e405f3
/R/tolerance.eigen.R
f0e12877f856cc4724c5124cde17d1c92211bd04
[]
no_license
diogo-almeida/GSVD
19803dde129b18cb0492d3e211e037e33efb7d31
90cb90497daf1efd38e1a3ab85439407f16ff46e
refs/heads/master
2020-12-18T15:34:15.768461
2019-11-14T21:28:24
2019-11-14T21:28:24
null
0
0
null
null
null
null
UTF-8
R
false
false
2,999
r
tolerance.eigen.R
#' @export #' #' @title \code{tolerance.eigen}: An eigenvalue decomposition to truncate potentially spurious (near machine precision) components. #' #' @description \code{tolerance.eigen} eliminates likely spurious components: any eigenvalue (squared singular value) below a tolerance level is elminated. #' The (likely) spurious eigen values and vectors are then eliminated from \code{$vectors} and \code{$values}. #' The use of a real positive value for \code{tol} will eliminate any small valued components. #' With \code{tol}, \code{tolerance.eigen} will stop if any singular values are complex or negative. #' #' @param x A data matrix of size for input to the eigen value decomposition (\code{\link{eigen}}) #' @param tol Default is \code{sqrt(.Machine$double.eps)}. A tolerance level for eliminating near machine precision components. #' Use of this parameter causes \code{tolerance.eigen} to stop if negative or complex eigen values are detected. #' The use of \code{tol < 0}, \code{NA}, \code{NaN}, \code{Inf}, \code{-Inf}, or \code{NULL} passes through to \code{\link{eigen}}. #' @param ... Further arguments to \code{\link{eigen}}. See \code{\link{eigen}}. #' #' @return A list with two elements (like \code{eigen}): #' \item{values}{ A vector containing the eigen values of x > \code{tol}.} #' \item{vectors}{ A matrix whose columns contain the right singular vectors of x, present if nv > 0. Dimension \code{min(c(ncol(x), nv, length(d))}.} #' #' @seealso \code{\link{eigen}} #' #' @author Derek Beaton #' @keywords multivariate, diagonalization, eigen tolerance.eigen <- function(x, tol = sqrt(.Machine$double.eps), ...) { eigen_res <- eigen(x, ...) # if tolerance is any of these values, just do nothing; send back the EVD results as is. if( (is.null(tol) | is.infinite(tol) | is.na(tol) | is.nan(tol) | tol < 0) ){ return(eigen_res) } ## once you go past this point you *want* the tolerance features. if(any(unlist(lapply(eigen_res$values,is.complex)))){ stop("tolerance.eigen: eigen values ($values) are complex.") } # if( (any(abs(eigen_res$values) > tol) ) & (any(sign(eigen_res$values) != 1)) ){ # if( (any(abs(eigen_res$values) < tol) ) ){ if( any( (abs(eigen_res$values) > tol) & (sign(eigen_res$values)==-1) ) ){ stop("tolerance.eigen: eigen values ($values) are negative with a magnitude above 'tol'.") } evs.to.keep <- which(!(eigen_res$values < tol)) if(length(evs.to.keep)==0){ stop("tolerance.eigen: All eigen values were below 'tol'") } eigen_res$values <- eigen_res$values[evs.to.keep] ## this would happen if only.values=TRUE if(!is.null(eigen_res$vectors)){ eigen_res$vectors <- eigen_res$vectors[,evs.to.keep] rownames(eigen_res$vectors) <- colnames(x) ## force consistent directions as best as possible: if( sign(eigen_res$vectors[1]) == -1 ){ eigen_res$vectors <- eigen_res$vectors * -1 } } class(eigen_res) <- c("list", "GSVD", "eigen") return(eigen_res) }
b75f1a115ed69306fd9141173a7784bb4a50912d
d80d2f9e911820898bb21bc9e2e2c7d10e8cfa59
/R/prompt-git.R
918088dae97a9c6187ed55b6c2a0ceb45ec03a38
[]
no_license
Robinlovelace/prompt
d0d8f43d4459f2d8e67ed433e55cdf88ff28d07a
950124035700126412df8f5bc78cb583ee0555f6
refs/heads/master
2020-04-09T22:37:33.908096
2018-09-11T21:50:52
2018-09-11T21:51:44
null
0
0
null
null
null
null
UTF-8
R
false
false
2,681
r
prompt-git.R
#' An example 'git' prompt #' #' It shows the current branch, whether there are #' commits to push or pull to the default remote, #' and whether the working directory is dirty. #' #' @param ... Unused. #' #' @family example prompts #' @export #' @examples #' \dontrun{ #' set_prompt(prompt_git) #' } prompt_git <- function(...) { if (!is_git_dir()) return ("> ") paste0( git_branch(), git_dirty(), git_arrows(), " > " ) } is_git_dir <- function() { status <- git("status") attr(status, "status") == 0 } ## It fails before the first commit, so we just return "master" there git_branch <- function() { status <- git("rev-parse --abbrev-ref HEAD") if (attr(status, "status") != 0) "master" else status } #' @importFrom clisymbols symbol git_arrows <- function() { res <- "" status <- git("rev-parse --abbrev-ref @'{u}'") if (attr(status, "status") != 0) return(res) status <- git("rev-list --left-right --count HEAD...@'{u}'") if (attr(status, "status") != 0) return (res) lr <- scan(text = status, quiet = TRUE) if (lr[2] != 0) res <- paste0(res, symbol$arrow_down) if (lr[1] != 0) res <- paste0(res, symbol$arrow_up) if (res != "") paste0(" ", res) else res } git_dirty <- function() { status <- git("diff --no-ext-diff --quiet --exit-code") if (attr(status, "status") != 0) "*" else "" } git <- function(args, quiet = TRUE, path = ".") { full <- paste0(shQuote(check_git_path()), " ", paste(args, collapse = "")) if (!quiet) { message(full) } result <- tryCatch( suppressWarnings( in_dir(path, system(full, intern = TRUE, ignore.stderr = quiet)) ), error = function(x) x ) if (methods::is(result, "error")) { result <- structure("", status = 1) } else { attr(result, "status") <- attr(result, "status") %||% 0 } result } git_path <- function(git_binary_name = NULL) { # Use user supplied path if (!is.null(git_binary_name)) { if (!file.exists(git_binary_name)) { stop("Path ", git_binary_name, " does not exist", .call = FALSE) } return(git_binary_name) } # Look on path git_path <- Sys.which("git")[[1]] if (git_path != "") return(git_path) # On Windows, look in common locations if (os_type() == "windows") { look_in <- c( "C:/Program Files/Git/bin/git.exe", "C:/Program Files (x86)/Git/bin/git.exe" ) found <- file.exists(look_in) if (any(found)) return(look_in[found][1]) } NULL } check_git_path <- function(git_binary_name = NULL) { path <- git_path(git_binary_name) if (is.null(path)) { stop("Git does not seem to be installed on your system.", call. = FALSE) } path }
c8708248a3c9286ee6acb1d41a9556c777fbc3f2
109734b597c2d760725a1a050174a5d11b3c1a9b
/man/diameter.owin.Rd
cad0b6deaad18918e2a9ccb4160dd51a0d1680b0
[]
no_license
rubak/spatstat
c293e16b17cfeba3e1a24cd971b313c47ad89906
93e54a8fd8276c9a17123466638c271a8690d12c
refs/heads/master
2020-12-07T00:54:32.178710
2020-11-06T22:51:20
2020-11-06T22:51:20
44,497,738
2
0
null
2020-11-06T22:51:21
2015-10-18T21:40:26
R
UTF-8
R
false
false
1,076
rd
diameter.owin.Rd
\name{diameter.owin} \alias{diameter.owin} \title{Diameter of a Window} \description{ Computes the diameter of a window. } \usage{ \method{diameter}{owin}(x) } \arguments{ \item{x}{ A window whose diameter will be computed. } } \value{ The numerical value of the diameter of the window. } \details{ This function computes the diameter of a window of arbitrary shape, i.e. the maximum distance between any two points in the window. The argument \code{x} should be a window (an object of class \code{"owin"}, see \code{\link{owin.object}} for details) or can be given in any format acceptable to \code{\link{as.owin}()}. The function \code{diameter} is generic. This function is the method for the class \code{"owin"}. } \seealso{ \code{\link{area.owin}}, \code{\link{perimeter}}, \code{\link{edges}}, \code{\link{owin}}, \code{\link{as.owin}} } \examples{ w <- owin(c(0,1),c(0,1)) diameter(w) # returns sqrt(2) data(letterR) diameter(letterR) } \author{\adrian and \rolf } \keyword{spatial} \keyword{math}
e0a49b14aeaa0b04e94e3dadfd813d0d10d0543c
03d20ec52ea429d2bffdefa849044ab6d0ad7481
/03_stop_frisk/scripts/shiny/server.R
53321fcac699cc40920f5e00eae34ce5662d3954
[]
no_license
GWarrenn/dc_data
3f679b28aa02f1cec7b9e887d66087d44ed40d7c
15b358d77210644dcdd908ef05d6e95930fbf62e
refs/heads/master
2021-11-17T07:36:55.337352
2021-09-30T21:44:56
2021-09-30T21:44:56
98,127,130
0
0
null
null
null
null
UTF-8
R
false
false
1,263
r
server.R
library(shiny) library(DT) shinyServer(function(input, output) { filedata <- read.csv("sf_nbh_summary.csv") format_cols <- c("Black.Diff","Hispanic.Latino.Diff","Juvenile.Diff","White.Diff") numeric_cols <- c("Black.stop_and_frisk","Black.census","Black.Diff", "Hispanic.Latino.stop_and_frisk","Hispanic.Latino.census","Hispanic.Latino.Diff", "Juvenile.stop_and_frisk","Juvenile.census","Juvenile.Diff","White.stop_and_frisk", "White.census","White.Diff") output$tbl = renderDT( datatable(filedata,rownames = FALSE, extensions ="FixedColumns",options = list( scrollX=TRUE, scrollY=500, fixedColumns = list(leftColumns = 2), autoWidth = TRUE, columnDefs = list(list(width = '250px', targets = c(1)), list(className = 'dt-center', targets = 0:13), list(visible=FALSE, targets=c(0))))) %>% formatStyle(format_cols, backgroundColor = styleInterval(0, c('lightpink', 'lightgreen'))) %>% #formatStyle("neighborhood","white-space"="nowrap") %>% #formatStyle(columns = c(2), width='200%') %>% formatPercentage(numeric_cols, 1) ) }) # columnDefs = list(list(visible=FALSE, targets=c(4) #
1a21e4fca73fddb89742615632cb67512929cca6
9d4e1ec7dd4128c99360e98b05de206661f3f130
/stoke_boost.R
d76d0f0fcffad9ffcdbfb1b7483876242c17740e
[]
no_license
coderjones/stroke_prediction
df6afb8fc5681ea46e050a22a07456b6445cb89e
be5a1dbea2436c068b313dbf39d471e8df84591e
refs/heads/main
2023-06-08T05:17:47.580765
2021-06-27T20:07:01
2021-06-27T20:07:01
367,732,010
0
0
null
null
null
null
UTF-8
R
false
false
1,059
r
stoke_boost.R
# Using gbm for gradient boosting classification library(tidyverse) library(fastDummies) library(rsample) library(gbm) # set working directory setwd("/Users/jeremiahhamilton/code/stroke_prediction") # read in data df <- read.csv("healthcare-dataset-stroke-data.csv") df <- dummy_cols(df, select_columns = c('smoking_status', 'ever_married','work_type', 'Residence_type', 'gender')) df %>% select(-ever_married, -work_type, -Residence_type, -gender, -smoking_status, -id, -avg_glucose_level) -> df # split into train/test groups set.seed(22) df_split <- initial_split(df, prop = .7, strata = "stroke") train_df <- training(df_split) test_df <- testing(df_split) stroke_gbm1 <- gbm( formula = stroke ~ ., data = train_df, distribution = "gaussian", n.trees = 5000, shrinkage = 0.1, interaction.depth = 3, n.minobsinnode = 10, cv.folds = 10 ) # find index fornumber trees with minimum cv error best <- which.min(stroke_gbm1$cv.error) #get RMSE sqrt(stroke_gbm1$cv.error[best]) # plot error curve gbm.perf(stroke_gbm1, method = "cv")
9a9a6b4f313264e2921ba1a55a9e113ffc3df5ed
22da09a9095cbd13d25edff454c1b32972357ffc
/man/animate_series.Rd
152d9861ff8020765cc6fd1c31658782afaab7a4
[]
no_license
ThoDah/rabbiTS-1
f49f516b72f4805b5d7ddb7899eb412ff6757e3d
ef7427619aeb5b6b174751dc1067b2cfe6d3d6f4
refs/heads/master
2020-03-17T18:03:51.299496
2018-05-17T13:02:27
2018-05-17T13:02:27
133,813,294
0
0
null
2018-05-17T12:55:20
2018-05-17T12:55:19
null
UTF-8
R
false
true
1,246
rd
animate_series.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/animate_series.R \name{animate_series} \alias{animate_series} \title{Animate a time series of rasters} \usage{ animate_series(r, dates, breaks, param = NULL, param.name = "Parameter", file.name = tempfile(fileext = ".gif"), ...) } \arguments{ \item{r}{raster or stack of rasters, each representing a different acquisition time.} \item{dates}{character vector of length \code{nlayers(r)}, representing corresponding times of \code{r}.} \item{breaks}{numeric vector, value range to be displayed, e.g. \code{seq(1, 180, by = 1)}} \item{param}{data.frame or \code{NULL}, optional parameter data.frame derived using \link{bands_param} to show the development of a paramater over time.} \item{param.name}{character, name of the defined paramater. Default is "Parameter".} \item{file.name}{character, path to the output file Default is a temporary file.} \item{...}{arguments passed to \link{saveGIF}, e.g. interval=0.2, ani.width = 500, ani.height = 700 etc.} } \value{ List of plots that are used as frames for the animation. An animation file will be written to file.name. } \description{ \code{animate_series} animates a series of rasters in consecutive order. }
a28b6d45d9b576b7d7fd1b692ab7c6579bce7ddf
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
/data/genthat_extracted_code/rnr/tests/test-solve.R
7991f85e7dbd1be1bb33531a853d989d70bf0c16
[]
no_license
surayaaramli/typeRrh
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
66e6996f31961bc8b9aafe1a6a6098327b66bf71
refs/heads/master
2023-05-05T04:05:31.617869
2019-04-25T22:10:06
2019-04-25T22:10:06
null
0
0
null
null
null
null
UTF-8
R
false
false
640
r
test-solve.R
context("sensitize") test_that("solve_closed() finds correct value for atomic vector", { ps <- seq(0, 1, 0.01) delta <- 2 theta <- 1 for (p in ps) { lhs <- 1 - (p*inv_logit(theta) + (1 - p)*inv_logit(theta + delta)) generated <- solve_closed(p, delta, lhs) expect_equal(generated, theta) } }) test_that("solve() finds correct value for vector of values", { ps <- seq(0, 1, 0.01) delta <- 2 theta <- (ps - mean(ps))*5 # Just some reasonable value should do lhs <- 1 - (ps*inv_logit(theta) + (1 - ps)*inv_logit(theta + delta)) generated <- solve_closed(ps, delta, lhs) expect_equal(generated, theta) })
41ec86a3d14cb16a89dd7b1ca80144522889526f
e8524f6a0301d922ec18d6d017cfb223c9eceee4
/data-portraits/andy-challenge-02.R
fb515bf21f170f303fc060dbaf3c245ba255fd24
[]
no_license
melodyaltschuler/tidytuesday
eff7713ff1cc36a9ea382954159e9f315f0c4691
6514ec95de2cc366be1b340050218819430963c4
refs/heads/master
2023-04-12T22:38:52.281026
2021-04-22T22:42:03
2021-04-22T22:42:03
296,454,761
0
0
null
null
null
null
UTF-8
R
false
false
5,524
r
andy-challenge-02.R
## DATA PORTRAITS - CHALLENGE 2 ## ICD TIDY TUESDAY ## MARCH 2021 # Load library library(tidyverse) library(showtext) #To use googlefonts library(patchwork) # Import data conjugal <- read_csv('https://raw.githubusercontent.com/rfordatascience/tidytuesday/master/data/2021/2021-02-16/conjugal.csv') # Convert to long format conjugal = conjugal %>% pivot_longer(3:5, names_to = "Status", values_to = "Percentage") %>% # Re-order factor levels to correspond to plot mutate( Population = factor(Population, levels = c("Negroes", "Germany")) ) %>% # Compute x-coordinates for text in bars group_by(Population, Age) %>% mutate( half_perc = 0.5*Percentage, x_coord = half_perc + lag(Percentage, 2, default = 0) + lag(Percentage, 1, default = 0), perc_text = paste0(Percentage, "%") ) ## Loading Google fonts (https://fonts.google.com/) font_add_google(name = "Cutive Mono") ## Automatically use showtext to render google fonts showtext_auto() # Create plot for 15-40 age group p1 = conjugal %>% filter(Age == "15-40") %>% ggplot(aes(x = Percentage, y = Population, fill = Status)) + geom_bar(stat = "identity", width = 0.75) + geom_text(aes(x = x_coord, label = perc_text), size = 2) + theme_light() + scale_x_continuous(name = "", breaks = NULL) + scale_y_discrete(name = "") + scale_fill_manual(name = "", values = c("#707a6a", "#e6b329", "#be324b")) + ggtitle("CONJUGAL CONDITION") + theme( legend.position = "top", legend.background = element_rect(fill = "#dfd2c1"), legend.key = element_rect(fill = "#dfd2c1"), plot.background = element_rect(fill = "#dfd2c1"), panel.background = element_rect(fill = "#dfd2c1"), panel.grid = element_blank(), panel.border = element_blank(), axis.text = element_text(family = "Cutive Mono"), legend.text = element_text(family = "Cutive Mono"), plot.title = element_text(hjust = 0.5, family = "Cutive Mono", face = "bold", size = 14), plot.margin = margin(15, 0, 0, 50, "pt"), #trbl strip.background = element_blank(), strip.text = element_blank(), aspect.ratio = .1 ) + coord_cartesian(xlim = c(0, 100), clip = "off") + annotate("text", x = -26, y = "Germany", label = c("Age\n15-40"), vjust = 1, family = "Cutive Mono", size = 3) + annotate("text", x = -20, y = "Germany", label = "{", vjust = 0.9, family = "Cutive Mono", size = 12, alpha = 0.6) # Create plot for 40-60 age group p2 = conjugal %>% filter(Age == "40-60") %>% ggplot(aes(x = Percentage, y = Population, fill = Status)) + geom_bar(stat = "identity", width = 0.75) + geom_text(aes(x = x_coord, label = perc_text), size = 2) + theme_light() + scale_x_continuous(name = "", breaks = NULL) + scale_y_discrete(name = "") + scale_fill_manual(name = "", values = c("#707a6a", "#e6b329", "#be324b")) + theme( legend.position = "top", legend.background = element_rect(fill = "#dfd2c1"), legend.key = element_rect(fill = "#dfd2c1"), plot.background = element_rect(fill = "#dfd2c1"), panel.background = element_rect(fill = "#dfd2c1"), panel.grid = element_blank(), panel.border = element_blank(), axis.text = element_text(family = "Cutive Mono"), legend.text = element_text(family = "Cutive Mono"), plot.title = element_text(hjust = 0.5, family = "Cutive Mono", face = "bold", size = 14), plot.margin = margin(15, 0, 0, 50, "pt"), #trbl strip.background = element_blank(), strip.text = element_blank(), aspect.ratio = .1 ) + guides(fill = FALSE) + coord_cartesian(xlim = c(0, 100), clip = "off") + annotate("text", x = -26, y = "Germany", label = c("\n40-60"), vjust = 0.6, family = "Cutive Mono", size = 3) + annotate("text", x = -20, y = "Germany", label = "{", vjust = 0.9, family = "Cutive Mono", size = 12, alpha = 0.6) # Create plot for 60 and over age group p3 = conjugal %>% filter(Age == "60 and over") %>% ggplot(aes(x = Percentage, y = Population, fill = Status)) + geom_bar(stat = "identity", width = 0.75) + geom_text(aes(x = x_coord, label = perc_text), size = 2) + theme_light() + scale_x_continuous(name = "", breaks = NULL) + scale_y_discrete(name = "") + scale_fill_manual(name = "", values = c("#707a6a", "#e6b329", "#be324b")) + theme( legend.position = "top", legend.background = element_rect(fill = "#dfd2c1"), legend.key = element_rect(fill = "#dfd2c1"), plot.background = element_rect(fill = "#dfd2c1"), panel.background = element_rect(fill = "#dfd2c1"), panel.grid = element_blank(), panel.border = element_blank(), axis.text = element_text(family = "Cutive Mono"), legend.text = element_text(family = "Cutive Mono"), plot.title = element_text(hjust = 0.5, family = "Cutive Mono", face = "bold", size = 14), plot.margin = margin(15, 0, 0, 50, "pt"), #trbl strip.background = element_blank(), strip.text = element_blank(), aspect.ratio = .1 ) + guides(fill = FALSE) + coord_cartesian(xlim = c(0, 100), clip = "off") + annotate("text", x = -26, y = "Germany", label = c("60\nAND\nOVER"), vjust = 0.8, family = "Cutive Mono", size = 3) + annotate("text", x = -20, y = "Germany", label = "{", vjust = 0.9, family = "Cutive Mono", size = 12, alpha = 0.6) # Layout plots and fill in background between plots p4 = p1 /p2 / p3 & theme(plot.background = element_rect(fill = "#dfd2c1", color = "#dfd2c1")) # Output the plot ggsave(p4, filename = "~/Desktop/challenge_02.png", width = 12, height = 3.7)
7a3dbdf4368c303fb0cafbd899712af6cf5ca0d4
3530fb409502ac4e55bfcf053daadf14573e5b08
/q14.R
ad2626bf18668adc582fa78232b0bf762f8c8ec9
[]
no_license
nathandarmawan/rprog_quiz_week1
fa0ee5b46ea0ceb56f808739aacdb9eb095f2db6
2f239f96a8cdd683b5f9df23b7adb7998670b98d
refs/heads/master
2021-01-19T05:36:32.364798
2014-10-13T06:36:16
2014-10-13T06:36:16
null
0
0
null
null
null
null
UTF-8
R
false
false
277
r
q14.R
## Q 14 ## Extract the last 2 rows of the data frame ## and print them to the console. ## What does the output look like? ## Reading Data setwd("D:/GitHub/rprog_quiz_week1") data <- read.csv("hw1_data.csv") ## Show the last 2 rows of the data frame ## Use tail() tail(data,2)
3e397af706756bbde6d44b2845e1aed5af54847c
4a4cae45a127183fa4c58bd75737fb0980bd8bfd
/required_packages.R
91af782f392747c0430f8a9eeb697b1c7a3121c1
[ "MIT" ]
permissive
klintkanopka/nn_workshop
7394a97a80cddb9a5a91e73e754f931b46884fde
bd2d857af0ebc98b83bebff1b266da6a6082cec0
refs/heads/master
2020-05-23T10:23:38.903759
2019-05-15T06:43:03
2019-05-15T06:43:03
186,719,123
2
0
null
null
null
null
UTF-8
R
false
false
59
r
required_packages.R
install.packages("tidyverse") install.packages("neuralnet")
ec74b4bff2900a44e9be9a2956db480c1f4fa0de
5395cdc191ff5a30d1c59e68ca0f95a288892c8b
/man/M_el_mat.Rd
a5090a55249ad8698b59c2e7044182532cd445bc
[]
no_license
nielsjdewinter/ShellTrace
fe16bb69b8981211bd24ef120627fc38d283db66
34dd076d72bb0812f251c986b1aad04b6849261b
refs/heads/master
2021-07-23T13:37:20.750368
2017-11-02T08:57:26
2017-11-02T08:57:26
105,881,428
0
0
null
null
null
null
UTF-8
R
false
true
1,852
rd
M_el_mat.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/M_el_mat.R \docType{data} \name{M_el_mat} \alias{M_el_mat} \title{Matrix of modelled mass accumulation rates per trace element} \format{A data frame with 5 rows and 24 variables: \describe{ \item{C}{Mass accumulation of C in subincrement} \item{O}{Mass accumulation of O in subincrement} \item{Na}{Mass accumulation of Na in subincrement} \item{Mg}{Mass accumulation of Mg in subincrement} \item{Al}{Mass accumulation of Al in subincrement} \item{Si}{Mass accumulation of Si in subincrement} \item{P}{Mass accumulation of P in subincrement} \item{S}{Mass accumulation of S in subincrement} \item{Cl}{Mass accumulation of Cl in subincrement} \item{K}{Mass accumulation of K in subincrement} \item{Ca}{Mass accumulation of Ca in subincrement} \item{Ti}{Mass accumulation of Ti in subincrement} \item{Cr}{Mass accumulation of Cr in subincrement} \item{Mn}{Mass accumulation of Mn in subincrement} \item{Fe}{Mass accumulation of Fe in subincrement} \item{Ni}{Mass accumulation of Ni in subincrement} \item{Cu}{Mass accumulation of Cu in subincrement} \item{Zn}{Mass accumulation of Zn in subincrement} \item{Br}{Mass accumulation of Br in subincrement} \item{Rb}{Mass accumulation of Rb in subincrement} \item{Sr}{Mass accumulation of Sr in subincrement} \item{Rh}{Mass accumulation of Rh in subincrement} \item{Ba}{Mass accumulation of Ba in subincrement} \item{Pb}{Mass accumulation of Pb in subincrement} }} \source{ \url{https://doi.org/10.5194/gmd-2017-137-supplement} } \usage{ data(M_el_mat) } \description{ A dataset containing trace element accumulation modelled for every based on the a phase map of the XRF mapped surface of the Crassostrea gigas #1 oyster used as an example in de Winter (2017) } \keyword{datasets}
14990f5ce2aee8d498ee1d7e83b7972396f7a8be
caa9387f050ded3c5f1b9879eb1935a29f7db8ce
/code.R
b5ead6e513166d9c633d1f8d1dc107ec8d13ef4f
[]
no_license
joebrew/map_plos
b4a3a901d7d1a4bc62aabbc75b2bca1c5d1f49b2
b1380ae0eb1efbda6a162396f4f3f21b2c195187
refs/heads/master
2021-01-10T09:54:17.699613
2016-03-08T10:40:53
2016-03-08T10:40:53
52,874,937
0
0
null
null
null
null
UTF-8
R
false
false
9,688
r
code.R
library(rgdal) library(dplyr) library(raster) library(readxl) library(RColorBrewer) require(maptools) library(ggrepel) # for avoiding overlapping labels in ggplot2 library(ggthemes) ##### Read in shapefiles # arruamento <- readOGR('data/spatial/', 'arruamento') bairros_e_zonas <- readOGR('data/spatial/', 'Bairros_e_Zonas') # zonas_administrativas <- readOGR('data/spatial/', 'zonas_administrativas') # For shape files we don't have, get spatial data from raster package brazil0 <- getData('GADM', country = 'BRA', level = 0) brazil1 <- getData('GADM', country = 'BRA', level = 1) brazil2 <- getData('GADM', country = 'BRA', level = 2) brazil3 <- getData('GADM', country = 'BRA', level = 3) # save.image('~/Desktop/brazil.RData') ##### Read in data # Counts by bairro mulheres <- read_excel('data/spreadsheets/ECO.xls') # Raw data raw <- read_excel('data/spreadsheets/aaobserva.xls') # %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% ##### Figure 1 - bairros # %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% ##### Join data to spatial # Define which names are good (ie, in map) goods <- as.character(sort(unique(bairros_e_zonas@data$NOME))) # Define which names don't need changing ins <- as.character(sort(unique(mulheres$NOME))) %in% as.character(sort(unique(bairros_e_zonas@data$NOME))) # Define which names need changing outs <- as.character(sort(unique(mulheres$NOME))[!ins]) # Define corrections corrections <- data.frame(mulheres = as.character(sort(unique(mulheres$NOME))), stringsAsFactors = FALSE) corrections$bairros <- ifelse(corrections$mulheres %in% goods, corrections$mulheres, NA) # Make corrections corrections$bairros[corrections$mulheres == 'COLÔNIA TERRA NOVA'] <- 'COL TERRA NOVA' corrections$bairros[corrections$mulheres == 'LIRIO DO VALE'] <- 'LÍRIO DO VALE' corrections$bairros[corrections$mulheres == 'NOSSA SENHORA DAS GRAÇAS'] <- 'N SRA DAS GRAÇAS' corrections$bairros[corrections$mulheres == 'PARQUE 10 DE NOVEMBRO'] <- 'PARQUE DEZ DE NOVEMBRO' corrections$bairros[corrections$mulheres == 'TANCREDO NEVES '] <- 'TANCREDO NEVES' corrections$bairros[corrections$mulheres == 'ZUMBI'] <- 'ZUMBI DOS PALMARES' # THE FOLLOWING WE ARE JUST TAKING OUT # corrections$bairros[corrections$mulheres == 'NOVO ALEIXO'] <- # 'ALEIXO' # corrections$bairros[corrections$mulheres == 'COLONIA ANTONIO ALEIXO'] <- # 'ALEIXO' # corrections$bairros[corrections$mulheres == 'CAMPOS SALES'] <- # 'SANTA ETELVINA' # corrections$bairros[corrections$mulheres == 'CIDADE DE DEUS'] <- # 'CIDADE NOVA' # corrections$bairros[corrections$mulheres == 'LAGO AZUL'] <- # 'TARUMÃ' # corrections$bairros[corrections$mulheres == 'PARQUE DAS LARANJEIRAS'] <- # 'FLORES' # Implement the corrections names(corrections) <- c('NOME', 'new_name') mulheres <- left_join(mulheres, corrections, by = 'NOME') mulheres$NOME <- mulheres$new_name; mulheres$new_name <- NULL # Make the join bairros_e_zonas@data <- left_join(x = bairros_e_zonas@data, y = mulheres, by = 'NOME') # Set to 0 the NAs bairros_e_zonas@data$MULHERES[is.na(bairros_e_zonas@data$MULHERES)] <- 0 # Define a color vector # cols <- rev(brewer.pal(n = 9, name = 'Spectral')) cols <- brewer.pal(n = 9, name = 'Greens') colors <- colorRampPalette(cols)(max(bairros_e_zonas@data$MULHERES, na.rm = TRUE)) colors <- adjustcolor(colors, alpha.f = 0.6) bairros_e_zonas@data$color <- 'white' # bairros_e_zonas@data$color[bairros_e_zonas@data$MULHERES > 0] <- # colors[bairros_e_zonas@data$MULHERES[bairros_e_zonas@data$MULHERES > 0]] # # bairros_e_zonas@data$color <- ifelse( # bairros_e_zonas@data$MULHERES == 0, # 'white', # ifelse(bairros_e_zonas@data$MULHERES > 0, # colors[bairros_e_zonas@data$MULHERES], # 'orange')) for (i in 1:nrow(bairros_e_zonas@data)){ if(bairros_e_zonas@data$MULHERES[i] > 0){ bairros_e_zonas@data$color[i] <- # colors[bairros_e_zonas@data$MULHERES[i]] colors[bairros_e_zonas@data$MULHERES[i]] } } # # PLOT GG STYLE # # # # fortify map # bairros_e_zonas@data$place_id <- row.names(bairros_e_zonas@data) # row.names(bairros_e_zonas@data) <- NULL # bairros_e_zonas_f <- fortify(bairros_e_zonas, region = 'place_id') # # bring in number of women # # bairros_e_zonas_f <- left_join(bairros_e_zonas_f, # bairros_e_zonas@data %>% # mutate(OBJECTID = as.character(OBJECTID)), # by = c('id' = 'OBJECTID')) # # Create a labeling dataframe label_df <- bairros_e_zonas@data[,c('NOME', 'MULHERES')] label_df <- label_df[!duplicated(label_df$NOME),] # add lat long label_df$long <- coordinates(bairros_e_zonas)[,1] label_df$lat <- coordinates(bairros_e_zonas)[,2] # Keep only those with > 0 women label_df <- label_df[label_df$MULHERES > 0,] # Replace spaces with line breaks label_df$NOME <- gsub(' ', '\n', label_df$NOME) # # # ggplot() + # coord_map() + # geom_polygon(data = bairros_e_zonas_f, # aes(x = long, y =lat, group = group, # fill = MULHERES), color = 'grey') + # geom_label_repel(data = label_df, # aes(long, lat, # #fill = factor(NOME), # label = factor(NOME)), # fontface = 'bold', # color = 'black', # size = 1.5, # box.padding = unit(1.75, 'lines')) + # theme_tufte() + # theme(axis.ticks.length = unit(0.001, "mm")) + labs(x=NULL, y=NULL) + # theme(axis.line=element_blank(), # axis.text.x=element_blank(), # axis.text.y=element_blank(), # axis.ticks=element_blank(), # axis.title.x=element_blank(), # axis.title.y=element_blank(), # legend.position="none", # panel.background=element_blank(), # panel.border=element_blank(), # panel.grid.major=element_blank(), # panel.grid.minor=element_blank(), # plot.background=element_blank()) + # scale_fill_manual(guide = guide_legend(title = 'Area'), # values = cols) # Add dataframe for accessory labeling acc <- bairros_e_zonas@data acc$lng <- coordinates(bairros_e_zonas)[,1] acc$lat <- coordinates(bairros_e_zonas)[,2] # Keep only those areas that have hospitals acc <- acc[acc$NOME %in% c('JORGE TEIXEIRA', 'DOM PEDRO'),] acc$label <- c('Jap Clinic', 'FMT HVD') # Give them special colors / symbols acc$color <- adjustcolor(c('blue', 'red'), alpha.f = 0.6) acc$pch <- c(15, 17) # acc <- # data.frame(label = c('Jap Clinic', 'FMT-HVD'), # lng = as.numeric(coordinates(bairros_e_zonas[bairros_e_zonas@data$NOME == 'JORGE TEIXEIRA',])[,1]), # lat = as.numeric(coordinates(bairros_e_zonas[bairros_e_zonas@data$NOME == 'JORGE TEIXEIRA',])[,2])) # Plot pdf('figure_1.pdf', width = 10, height = 8) plot(bairros_e_zonas, col = bairros_e_zonas@data$color, border = adjustcolor('black', alpha.f = 0.3) # border = NA ) points(acc$lng, acc$lat, pch = acc$pch, col = acc$color, cex = 2) text(x = label_df$long, y = label_df$lat, label = label_df$NOME, cex = 0.3, col = adjustcolor('black', alpha.f = 0.6)) legend('right', fill = colors, ncol = 1, cex = 0.8, border = NA, col = colors, legend = 1:length(colors), title = 'Women') # text(x = acc$lng, # y = acc$lat, # label = gsub(' ', '\n', acc$label), # cex = 0.6) legend('topright', pch = acc$pch, col = acc$color, legend = acc$label) # Add compass rose compassRose(x = -60.1, y = -3.12) # Add scale maps::map.scale(x =-59.91, y = -3.15, relwidth = 0.2, metric = TRUE, ratio = TRUE, col = adjustcolor('black', alpha.f = 0.6), cex = 0.6) dev.off() # %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% ##### Figure 2 - bairros # %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% amazonas <- brazil2[brazil2@data$NAME_1 == 'Amazonas',] amazonas@data$mulheres <- 0 amazonas@data$mulheres[amazonas@data$NAME_2 == 'São Gabriel de Cahoeira'] <- 1 amazonas@data$mulheres[amazonas@data$NAME_2 == 'Barcelos'] <- 1 amazonas@data$mulheres[amazonas@data$NAME_2 == 'Presidente Figueiredo'] <- 1 amazonas@data$mulheres[amazonas@data$NAME_2 == 'Tapauá'] <- 1 amazonas@data$mulheres[amazonas@data$NAME_2 == 'Rio Preto da Eva'] <- 2 amazonas@data$color <- adjustcolor( ifelse(amazonas@data$mulheres == 0, 'white', ifelse(amazonas@data$mulheres == 1, 'lightblue', ifelse(amazonas@data$mulheres == 2, 'darkblue', 'black'))), alpha.f = 0.6) # Make a labeling vector label_df <- amazonas@data label_df$long <- coordinates(amazonas)[,1] label_df$lat <- coordinates(amazonas)[,2] label_df <- label_df[label_df$mulheres > 0,] label_df$NAME_2 <- gsub(' ', '\n', label_df$NAME_2) pdf('figure_2.pdf', width = 10, height = 8) plot(amazonas, col = amazonas@data$color, border = adjustcolor('black', alpha.f = 0.3)) text(x = label_df$long, y = label_df$lat, label = label_df$NAME_2, cex = 0.4, col = adjustcolor('black', alpha.f = 0.6)) legend('bottomright', fill = adjustcolor(c('white', 'lightblue', 'darkblue'), alpha.f = 0.6), legend = c(0, 1, 2), cex = 0.8, border = NA, title = 'Women') # Add compass rose compassRose(x = -72, y = -1.4) # Add scale maps::map.scale(x =-64, y = -9.5, relwidth = 0.2, metric = TRUE, ratio = TRUE, col = adjustcolor('black', alpha.f = 0.6), cex = 0.6) dev.off()
8fd04719dd11319c33c7835ca12d0729516e6622
021fa1134701528153dab7dd4c24ed145d15af06
/Template.R
12fd9c8bdf56d63a4b4b84502051c42697854615
[]
no_license
a30123/R_Handy
924802172fdd1dab1f965f5f90c029ebe992db26
059abe4df07bbcd1c76d02eef2befe16b04f3c59
refs/heads/master
2021-01-19T12:36:20.663018
2015-07-05T11:42:35
2015-07-05T11:42:35
38,416,196
0
0
null
null
null
null
UTF-8
R
false
false
2,051
r
Template.R
#### created date: #### last modified date: #### author:A30123 #### description: ######################################################################################################### ### ##### ##### ##### ############### # ### ### ### ################ ### ######### ######## ######## #################### # # ### ### ### ### ### ################ ### ######### ######## ######## #################### #### ### ### ### ### ### ################ ### ##### ######## ######## ############### #### ### ### ### ################ ######################################################################################################### ######################################################################################################### ####################################### IMPORT LIBRARIES ########################################### ######################################################################################################### ######################################################################################################### ######################################## FUNCTIONS ########################################## ######################################################################################################### ######################################################################################################### ####################################### INITIALIZING ########################################### ######################################################################################################### ######################################################################################################### ######################################## MAIN PROGRAM ########################################## ######################################################################################################### #start timer### ptm<-proc.time() proc.time()-ptm
8f8aae35fe32f7dfb78e8093d74f73109b2add4c
f99326be507c62c63b91a45ec3246aa1b3a55f30
/RandForest.R
c1207b7e7049b4d78f82df18c5eacbef2d1169d9
[]
no_license
sherryxhu/wesad
3fa6b5190bef1a517bde46e0c6e38f8e51f088d8
1695af914b638a6f809e17599e4fc22b0c9bc524
refs/heads/master
2022-11-08T22:25:54.747305
2020-06-20T17:09:51
2020-06-20T17:09:51
272,251,557
0
0
null
null
null
null
UTF-8
R
false
false
3,934
r
RandForest.R
# install packages install.packages('nnet', repos = "http://cran.us.r-project.org") install.packages('tidyverse', repos = "http://cran.us.r-project.org") install.packages('dplyr', repos = "http://cran.us.r-project.org") install.packages('arm', repos = "http://cran.us.r-project.org") install.packages('plyr', repos = "http://cran.us.r-project.org") install.packages('randomForest', repos = "http://cran.us.r-project.org") install.packages('caTools', repos = "http://cran.us.r-project.org") install.packages('partykit', repos = "http://cran.us.r-project.org") install.packages('caret', repos = "http://cran.us.r-project.org") # load packages library(tidyverse) library(dplyr) library(nnet) # for multinom library(plyr) # for count library(knitr) # for kable library(randomForest) # for random forest library(caTools) # for random forest library(partykit) # for ctree library(caret) #load data load("data/df.RData") load("data/df_train.RData") load("data/df_test.RData") mcr=NULL # function to compute missclassification rate avclassifyrate=function(data, model){ rating=l1=l2=l3=l4=l0=NULL for (i in 1:5){ tf=c(sample(as.numeric(rownames(subset(df, label==0))),nrow(subset(df, label==0))*0.8), sample(as.numeric(rownames(subset(df, label==1))),nrow(subset(df, label==1))*0.8),sample(as.numeric(rownames(subset(df, label==2))),nrow(subset(df, label==2))*0.8), sample(as.numeric(rownames(subset(df, label==3))),nrow(subset(df, label==3))*0.8), sample(as.numeric(rownames(subset(df, label==4))),nrow(subset(df, label==4))*0.8)) t_rain <- data[tf,] t_est <- data[-tf,] m1<- model # column of predicted classes t_est$predictions <- predict(m1, t_est) # 0-1 loss (overall) loss <- ifelse(t_est$label != t_est$predictions, 1, 0) rating <- c(rating, sum(loss==1)/length(loss)) #overall misclassification rate #missclassified0 sub0<- subset(t_est, label==0) loss0 <- ifelse(sub0$label != sub0$predictions, 1, 0) l0<- c(l0, sum(loss0==1)/length(loss0)) #missclassified1 sub1<- subset(t_est, label==1) loss0 <- ifelse(sub1$label != sub1$predictions, 1, 0) l1<- c(l1, sum(loss0==1)/length(loss0)) #missclassified2 sub2<- subset(t_est, label==2) loss0 <- ifelse(sub2$label != sub2$predictions, 1, 0) l2<- c(l2, sum(loss0==1)/length(loss0)) #missclassified3 sub3<- subset(t_est, label==3) loss0 <- ifelse(sub3$label != sub3$predictions, 1, 0) l3<- c(l3, sum(loss0==1)/length(loss0)) #missclassified4 sub4<- subset(t_est, label==4) loss0 <- ifelse(sub4$label != sub4$predictions, 1, 0) l4<- c(l4, sum(loss0==1)/length(loss0)) } mcr=as.data.frame(rbind(l0,l1,l2,l3,l4,rating)) mcr$Average=rowMeans(mcr) return(mcr) } #final model #random forest rftrain=df_train #single effects rf <- randomForest(as.factor(label) ~ chest_ACC_X + chest_ACC_Y + chest_ACC_Z + chest_ECG + chest_EMG + chest_EDA + chest_Temp + chest_Resp + wrist_ACC_X + wrist_ACC_Y + wrist_ACC_Z + wrist_BVP + wrist_EDA + wrist_Temp,data=rftrain, ntree=5000, nodesize=15) rftrain$rfpred=predict(rf, df_test) #plot actual vs. predicted #actual predictions rftrain$finpred=predict(mod1, df_test) #Get missclass rate avsens=as.data.frame(avclassifyrate(df, rf)$Average) print(avsens) save(avsens, file="MisclassRandFor.RData") #Make percentage classification table pclass=merge(count(rftrain$finpred), count(rftrain$rfpred), by="x", all=T) pclass=pclass[,-1] pclass=(pclass/nrow(rftrain))*100 #Calculate differences in classification relative to final model diffrf=ifelse(rftrain$finpred==rftrain$rfpred,0,1) pclass=rbind(pclass, c(0,sum(diffrf))) rownames(pclass)=c("% Classified as 0","% Classified as 1","% Classified as 2","% Classified as 3","% Classified as 4", "Absolute Diff in Classification") colnames(pclass)=c("Final Model","Random Forest") pclass[is.na(pclass)]=0 print(pclass) save(pclass, file="Classification_Table_for Forest.RData")
d0b6c777b1310244f3d4289feb19afc95893b080
8629ad85edfb2293280f0820c27c933739bebc5a
/submissions/01_r4ds-data-transformation-hl2da.R
957b9a18a885c03769a0ed618c8810c46f2369ee
[]
no_license
GCOM7140/r4ds-exercises
ce94ac4f3a4a7c5d3038db76ae54cacbad6ad22d
a5fefe1bfcca6ae0d4d231a4b3e2222cb963ce17
refs/heads/master
2021-05-01T15:03:27.315763
2019-07-29T22:31:46
2019-07-29T22:31:46
121,028,562
1
1
null
2018-04-11T13:10:41
2018-02-10T15:43:46
HTML
UTF-8
R
false
false
2,628
r
01_r4ds-data-transformation-hl2da.R
<<<<<<< HEAD library(tidyverse) library(nycflights13) # Question1 # How many flights flew into LAX filter(flights, dest == "LAX") nrow(filter(flights, dest == "LAX")) flights %>% filter(dest =="LAX") %>% nrow() #HOW many flights flew out of LAX flights %>% filter(origin =="LAX") %>% nrow() #How many flights were longer than or equal to 2,000 miles in distance? flights %>% filter(distance >= 2000) %>% nrow() # How many flights were destined for airports in the Los Angeles area (LAX, ONT, SNA, PSP, SBD, BUR, or LGB), but did not originate out of JFK? flights %>% filter( dest %in% c("LAX", "ONT", "SNA", "PSP", "SBD", "BUR", "LGB"), origin != "JFK" ) %>% nrow() # Question2 flights %>% filter(!is.na(dep_time), is.na(arr_time)) %>% nrow() # Question3 flights %>% arrange(desc(is.na(arr_time))) # Question4 select(flights, contains("TIME")) # Contains() is case sensitive so this code won't select any column names. select(flights, contains("TIME", ignore.case = T)) # Question5 flights %>% filter(distance >= 2000, arr_delay > 0) %>% group_by(dest) %>% summarize(arr_delay_mins = sum(arr_delay)) %>% mutate(arr_delay_pct_of_total = arr_delay_mins / sum(arr_delay_mins)) %>% arrange(desc(arr_delay_pct_of_total)) %>% head(3) ======= library(tidyverse) library(nycflights13) # Question1 # How many flights flew into LAX filter(flights, dest == "LAX") nrow(filter(flights, dest == "LAX")) flights %>% filter(dest =="LAX") %>% nrow() #HOW many flights flew out of LAX flights %>% filter(origin =="LAX") %>% nrow() #How many flights were longer than or equal to 2,000 miles in distance? flights %>% filter(distance >= 2000) %>% nrow() # How many flights were destined for airports in the Los Angeles area (LAX, ONT, SNA, PSP, SBD, BUR, or LGB), but did not originate out of JFK? flights %>% filter( dest %in% c("LAX", "ONT", "SNA", "PSP", "SBD", "BUR", "LGB"), origin != "JFK" ) %>% nrow() # Question2 flights %>% filter(!is.na(dep_time), is.na(arr_time)) %>% nrow() # Question3 flights %>% arrange(desc(is.na(arr_time))) # Question4 select(flights, contains("TIME")) # Contains() is case sensitive so this code won't select any column names. select(flights, contains("TIME", ignore.case = T)) # Question5 flights %>% filter(distance >= 2000, arr_delay > 0) %>% group_by(dest) %>% summarize(arr_delay_mins = sum(arr_delay)) %>% mutate(arr_delay_pct_of_total = arr_delay_mins / sum(arr_delay_mins)) %>% arrange(desc(arr_delay_pct_of_total)) %>% head(3) >>>>>>> 333a6c412c7a689d232b5af0e23de7fa80cf517c
43a8f805d63f9f365ca2e07dc37044c60d710f2e
db9a558fe2273bcaa88d5c0c47633857766492fa
/Chapter 1 Updated.R
e361f67523eb5d09107ab6fb4da1d49038e70bfd
[]
no_license
uvonpunkfarm/que
e0058061a286107e6b638aa487ff902ff1b0a4ea
fe375fd65a12e363d7a96ed118b49b560a6a9158
refs/heads/master
2020-12-26T20:25:45.186586
2020-02-09T21:33:09
2020-02-09T21:33:09
237,631,400
0
0
null
null
null
null
UTF-8
R
false
false
900
r
Chapter 1 Updated.R
x <- c(5,10,15,20,25,30,35,40) x sum(x) mean(x) x x y <- seq(5,40,13) y z <- seq(2,6,2) z hypoteneuse <- function(a,b){ hyp <- sqrt(a^2+b^2) return(hyp) } Raptors <-c("Lowry", "DeRozan", "Bosh", "Kawhi") Long ass number <- 5:200 quadrifecta <- c(1,2,3,4) repeated_quadrifecta <- rep(quadrifecta,5) repeated_quadrifecta repeating <-c(2,1,2,1) rep_vector <- rep(quadrifecta, repeating) rep_vector num_matrix <- seq(5,100,5) dim(num_matrix) <c(6,4) num_matrix num_matrix[3,1] num_matrix2 <-seq(1,10,1) dim(num_matrix2) <c(3,2) num_matrix2 Raptors <-c("Lowry", "DeRozan", "Bosh", "Kawhi") ages <-c(34, 29, 35, 27) Raptorsages <-list(names=Raptors, currentage=ages) Raptorsages Raptorsages$currentage[3] Raptorsages$names[Raptorsages$currentage>=28] xx <-seq(1,6,1) yy <-NULL for(i in 1:length(xx)) {if(xx[i]%% 2 ==0){yy[i] <-"EVEN"} else{yy[i] <- "ODD"} } yy xx
73496ffcce43e5246e8427b8aef8c5fa932e4f80
57d6bac4eae56c4efcddcd212eedf47eaafa142d
/practical_machine_learning/project_scratchpad_june.R
32089cc6daadb6a0fbe87ceed69ad435a88f3aeb
[]
no_license
sdevine188/coursera_code
14e8ef7e74e02c50fca76636f7cb4ed7f47af6bf
f2069acec6a247746c178ee706bd85fa4d550479
refs/heads/master
2021-01-25T06:40:03.171925
2016-01-25T16:15:19
2016-01-25T16:15:19
31,866,660
0
0
null
null
null
null
UTF-8
R
false
false
2,404
r
project_scratchpad_june.R
# read in data setwd("C:/Users/Steve/Desktop/Coursera/Practical Machine Learning") full_training <- read.csv("pml-training.csv") full_testing <- read.csv("pml-testing.csv") # split full_training into training and testing in_train <- createDataPartition(full_training$classe, p = .7, list = FALSE) training <- full_training[in_train, ] testing <- full_training[-in_train, ] # remove non-predictors training1 <- training[ , -1] training1 <- training1[ , -c(2:6)] testing1 <- testing[ , -1] testing1 <- testing1[ , -c(2:6)] # find NAs missing <- lapply(training1, function(x) length(which(is.na(x)))) missing <- lapply(testing1, function(x) length(which(is.na(x)))) # convert NAs to blanks training2 <- training1 training2[is.na(training2)] <- "" testing2 <- testing1 testing2[is.na(testing2)] <- "" # convert #DIV/0! to blanks # all the data remains accurate after conversion, but all variables are inexplicably turned into factors training3 <- training2 training3 <- as.data.frame(lapply(training3, function(x) str_replace(x, "#DIV/0!", ""))) # turn factors into numeric variables to speed processing time # all variable columns except 1 and 154 can be converted to numeric # column 1 is user_name, column 154 is classe # but it converts blanks to NA, so we'll need to reconvert NAs to blanks again training4 <- training3 # more efficient version of as.numeric(as.character(x)) # http://stackoverflow.com/questions/3418128/how-to-convert-a-factor-to-an-integer-numeric-without-a-loss-of-information # training4 <- as.data.frame(lapply(training4, function(x) as.numeric(as.character(x)))) training4 <- as.data.frame(lapply(training4[ , -c(1, 154)], function(x) as.numeric(levels(x))[x])) # re-add columns 1 and 154 training4 <- cbind(training3[ , 1], training4) training4 <- cbind(training4, training3[ , 154]) names(training4)[154] <- "classe" # reconvert NAs to blanks training5 <- training4 training5[is.na(training5)] <- "" # small dataset to try rf model sample_index <- sample(1:nrow(training5), 500) sample <- training3[sample_index, ] str(sample) rf_mod <- train(classe ~ ., method = "rf", data = sample2) # i think this is a slower version bc it limits number of k in k-fold cv to 3, instead of default 10 rf_mod <- train(classe ~ ., method = "rf", data = sample, trControl = trainControl(method = "cv"), number = 3) # try gbm gbm_mod <- train(classe ~ ., method = "gbm", data = sample)
94c011b1344d5fcf6d08962b94adfacb2c030402
7fb8caee598f0d71598f3f022d9552c6b9b862f6
/sentiment.r
9c9bd112e6128bb878f132d98261ec30c23d7a6b
[]
no_license
Nivas138/Predicting-Social-Nexus
6525f6d8a68de5775ff02f08ee20e8876386d051
3077bd25a22e0600a2e4879dca82f593e52e1e3d
refs/heads/master
2020-04-13T22:26:56.306650
2019-03-20T08:27:48
2019-03-20T08:27:48
163,479,473
0
0
null
null
null
null
UTF-8
R
false
false
1,600
r
sentiment.r
setwd('C:\\Users\\Nivas\\Documents\\') getwd() install.packages("ggplot2") install.packages("tm") install.packages("wordcloud") install.packages("syuzhet") ?tm library(ggplot2) library(tm) library(wordcloud) library(syuzhet) texts = readLines("chat1.txt") print(texts) docs = Corpus(VectorSource(texts)) docs trans=content_transformer(function(x,pattern) gsub(pattern," ",x)) docs=tm_map(docs,trans,"/") docs=tm_map(docs,trans,"@") docs=tm_map(docs,trans,"\\|") docs=tm_map(docs,content_transformer(tolower)) docs=tm_map(docs,removeNumbers) docs=tm_map(docs,removeWords,stopwords("english")) docs=tm_map(docs,removePunctuation) docs=tm_map(docs,stripWhitespace) docs=tm_map(docs,stemDocument) docs dtm=TermDocumentMatrix(docs) mat=as.matrix(dtm) mat v=sort(rowSums(mat),decreasing=TRUE) print(v) d = data.frame(word=names(v),freq=v) head(d) set.seed(1056) wordcloud(words=d$word,freq=d$freq,min.freq=1,max.words=200,random.order=FALSE, rot.per=0.45,colors=brewer.pal(8,"Dark2")) ?get_nrc_sentiment sentiment=get_nrc_sentiment(texts) print(sentiment) text=cbind(texts,sentiment) head(text) TotalSentiment = data.frame(colSums(text[,c(2:11)])) TotalSentiment names(TotalSentiment)="count" TotalSentiment=cbind("sentiment" = rownames(TotalSentiment),TotalSentiment) print(TotalSentiment) rownames(TotalSentiment) ggplot(data=TotalSentiment, aes(x = sentiment,y=count)) + geom_bar(aes(fill=sentiment),stat="identity")+theme(legend.position="none")+xlab("sentiment")+ylab("Total Count")+ggtitle("Total semtiment Score")
7894c5bcf78e793c74074f125630c74445c6b2e9
da4c8b3a0201143378037766966603e8d5e4598d
/plot1.R
f98d286f62a24e738e771bb829fda6bfdfd765f6
[]
no_license
hmoralesos/ExData_Plotting1
1b0e9e089b19c7a699acd3959097c2053a8723d6
3dc08ee4931e6dc1ca57722ca593ecafc4931f99
refs/heads/master
2021-01-17T20:24:59.835872
2016-08-15T19:11:38
2016-08-15T19:11:38
65,756,123
0
0
null
2016-08-15T18:43:22
2016-08-15T18:43:19
null
UTF-8
R
false
false
1,257
r
plot1.R
################################################################################ # Read complete dataset # ################################################################################ dataset<-read.table("household_power_consumption.txt",header=TRUE,sep=";") head(dataset,5) str(dataset) dim(dataset) ################################################################################ # Extract data from the dates 2007-02-01 and 2007-02-02 # ################################################################################ data<-subset(dataset,Date=="1/2/2007"|Date=="2/2/2007") head(data,5) (n<-nrow(data)) ################################################################################ # Plot 1 # ################################################################################ # Column Global_active_power plot1<-as.numeric(as.character(data$Global_active_power)) # Save plot png(filename = "plot1.png",width = 480, height = 480, units = "px", pointsize = 12,bg = "white") hist(plot1,main="Global Active Power", xlab="Global Active Power (kilowatts)", col="red") dev.off() # close png
779d56e0eac0986e7a411fe7ff6dc307e14c419e
714e7c6736a2e3d8fd07634427c4a8bb3cef2d61
/man/plot_avg_dot.Rd
9c313e4ce776c5e8b33c5b7c4d2e58312fc89a51
[ "MIT" ]
permissive
flaneuse/llamar
da7cb58a03b2adbffb6b2fe2e57f3ffeede98afb
ea46e2a9fcb72be872518a51a4550390b952772b
refs/heads/master
2021-01-18T00:10:00.797724
2017-10-24T13:41:21
2017-10-24T13:41:21
48,335,371
0
1
null
null
null
null
UTF-8
R
false
true
3,444
rd
plot_avg_dot.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/plot_avg_dot.R \name{plot_avg_dot} \alias{plot_avg_dot} \title{Plot a dot plot after averaging the values} \usage{ plot_avg_dot(df, by_var = "region", value_var = "avg", incl_x_axis = TRUE, x_label = NULL, x_limits = NULL, x_breaks = waiver(), include_n = TRUE, n_shape = "square", low_colour = grey10K, high_colour = grey70K, use_weights = FALSE, strata_var = "strata", psu_var = "psu", weight_var = "weight", na.rm = TRUE, sort_asc = FALSE, sort_by = "avg", plot_ci = TRUE, ci_factor = 2, lb_var = "lb", ub_var = "ub", ci_colour = grey25K, ci_alpha = 0.6, ci_size = 2, ref_line = TRUE, ref_text = "sample average", label_ref = TRUE, nudge_ref_label = NULL, ref_label_y = 1, ref_arrow = arrow(length = unit(0.007, "npc")), ref_stroke = 0.5, ref_colour = grey75K, lollipop = FALSE, lollipop_stroke = 0.25, lollipop_colour = grey75K, facet_var = NULL, ncol = NULL, nrow = NULL, scales = "fixed", dot_size = 6, dot_shape = 21, dot_fill_cont = brewer.pal(9, "YlGnBu"), label_vals = TRUE, label_size = 3, label_colour = grey75K, label_digits = 1, percent_vals = FALSE, value_label_offset = NULL, sat_threshold = 0.5, horiz = TRUE, file_name = NULL, width = 10, height = 6, saveBoth = FALSE, font_normal = "Lato", font_semi = "Lato", font_light = "Lato Light", panel_spacing = 1, font_axis_label = 12, font_axis_title = font_axis_label * 1.15, font_facet = font_axis_label * 1.15, font_legend_title = font_axis_label, font_legend_label = font_axis_label * 0.8, font_subtitle = font_axis_label * 1.2, font_title = font_axis_label * 1.3, legend.position = "none", legend.direction = "horizontal", grey_background = FALSE, background_colour = grey10K, projector = FALSE) } \description{ Plot a dot plot after averaging the values } \examples{ # generate random data library(dplyr) df = data.frame(avg = sample(1:100, 10), region = letters[1:10], ci = sample(1:100, 10)/10) \%>\% mutate(lb = avg - ci, ub = avg + ci) # sans confidence intervals plot_dot(df, by_var = 'region', value_var = 'avg') # with confidence intervals, no labels plot_dot(df, by_var = 'region', value_var = 'avg', plot_ci = TRUE, label_vals = FALSE) # as lollipops df2 = data.frame(avg = sample(-100:100, 10)/100, region = letters[1:10], ci = sample(1:100, 20)/1000) \%>\% mutate(lb = avg - ci, ub = avg + ci) library(RColorBrewer) plot_dot(df2, by_var = 'region', value_var = 'avg', lollipop = TRUE, dot_fill_cont = brewer.pal(10, 'RdYlBu')) # percent labels plot_dot(df2, by_var = 'region', value_var = 'avg', percent_vals = TRUE, lollipop = TRUE, dot_fill_cont = brewer.pal(10, 'RdYlBu')) # with reference line plot_dot(df2, by_var = 'region', value_var = 'avg', ref_line = 0, ref_text = 'no change', label_ref = FALSE, lollipop = TRUE, dot_fill_cont = brewer.pal(10, 'RdYlBu'), percent_vals = TRUE) # horizontal plot_dot(df2, by_var = 'region', value_var = 'avg', horiz = FALSE, ref_line = 0, ref_text = 'no change', lollipop = TRUE, plot_ci = TRUE, dot_fill_cont = brewer.pal(10, 'RdYlBu')) # in-built facet_wrap. Note: may screw up ordering, since will sort based on ALL the data. df3 = data.frame(avg = sample(-100:100, 20), region = rep(letters[1:10], 2), group = c(rep('group1', 10), rep('group2', 10))) plot_dot(df3, by_var = 'region', value_var = 'avg', facet_var = 'group', lollipop = TRUE, dot_fill_cont = brewer.pal(10, 'RdYlBu')) }
8de5be52896a330d9c1aa8306d1d06d4cd921b38
109681dbabeb2ba82dc1ef895a28d40f03033ccb
/man/ontologyLogPage-methods.Rd
16bed29bf67ea25f428bbb7494345174f15e23f4
[]
no_license
frenkiboy/cellexalvrR
97cc210f47c0fcff998704200adfcd549ffabbcf
cf9ab9e8c5fd519d0db2dd98b7ccbc84812cba77
refs/heads/master
2020-12-22T11:00:05.728789
2020-01-28T14:49:13
2020-01-28T14:49:13
236,758,585
0
0
null
2020-01-28T14:47:26
2020-01-28T14:47:25
null
UTF-8
R
false
true
842
rd
ontologyLogPage-methods.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/ontologyLogPage.R \docType{methods} \name{ontologyLogPage} \alias{ontologyLogPage} \alias{ontologyLogPage,cellexalvrR-method} \title{description of function ontologyLogPage} \usage{ ontologyLogPage(cellexalObj, genes, grouping = NULL, ontology = "BP", topNodes = 10, ...) } \arguments{ \item{cellexalObj}{the cellexalvrR object} \item{genes}{a list of gene symbols (IMPORTANT)} \item{grouping}{the grouping this gene list originated on (default = NULL; use last grouping)} \item{ontology}{which GO ontology to choose from (default = "BP")} \item{topNodes}{how many GO terms to report (default 10)} \item{...}{unused} } \description{ creates the GO analysis for a gene list and puts it into the report. } \details{ The ontology analysis for the log files. }
bd7440d20b5875b1e8ca57116b8ff91c92b9d6da
58f4573bc3e9efbc14ff9ebbf089231c246cf066
/man/inlineModel.Rd
895753bad6ac628462b0402af0c548a93d1eddea
[]
no_license
Anathawa/mlxR
1a4ec2f277076bd13525f0c1d912ede3d20cb1cc
7e05119b78b47c8b19126de07c084e7d267c4baf
refs/heads/master
2021-01-19T09:17:35.765267
2017-04-05T18:00:39
2017-04-05T18:00:39
null
0
0
null
null
null
null
UTF-8
R
false
true
311
rd
inlineModel.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/inlineModel.R \name{inlineModel} \alias{inlineModel} \title{inline model} \usage{ inlineModel(str, filename = NULL) } \arguments{ \item{str}{model} \item{filename}{where to write the temporary model} } \description{ inline model }
fc1a2cb14010fca4dbefa9f9828c2477cb486e0c
f25c5405790cf17a2b6e78b4ef58654810c8bb7b
/R/piechart.R
32fff8197d5b8c9a0f0e970e21c6e576b6017c2e
[]
no_license
moturoa/shintodashboard
15ad881ea4c72549b616a3021852a0db8c25f6fd
80385da221d370a563eb1cfe8946964acfacfe15
refs/heads/master
2023-05-31T07:05:11.026309
2021-06-28T12:55:32
2021-06-28T12:55:32
312,505,839
0
0
null
null
null
null
UTF-8
R
false
false
1,174
r
piechart.R
# #' @importFrom waffle geom_waffle # #' @importFrom waffle theme_enhance_waffle piechart <- function(data, xvar, yvar, xlab = NULL, ylab = NULL, glab = NULL, type = "Pie", na.rm=FALSE){ data <- as.data.frame(data) data$xv <- data[,xvar] data$yv <- data[,yvar] if(na.rm){ data <- dplyr::filter(data, !is.na(xv)) } type <- match.arg(type) dat <- group_by(data, xv) %>% summarize(n = sum(yv, na.rm=TRUE)) if(is.null(xlab))xlab <- xvar if(is.null(ylab))ylab <- "" if(is.null(glab))glab <- "" if(type == "Pie"){ ggplot(dat, aes(fill = xv, y = n, x = "")) + geom_bar(width = 1, stat = "identity") + coord_polar("y", start=0) + theme_minimal() + labs(x = xlab, y = ylab, fill = glab) } # else if(type == "Waffle"){ # # ggplot(dat, aes(fill = xv, values = n, x = "")) + # geom_waffle(n_rows = 25, size = 0.33, colour = "white", flip = FALSE) + # theme_minimal() + # labs(x = xlab, y = ylab) # # theme_enhance_waffle() + # # } }
d81082b025b15834cce818b8dc6c2de6f4c8c166
d8da5c909feddfa679dceb2aa79da8483b607ada
/models/stacked_model/stacked_model_draft.R
560a31eb4f1ed8da430a84ee0a719d79f3245241
[]
no_license
edouardArgenson/house_prices
9af379fba6378a123227239c1e56a0fb991bac5f
6c348f2d02359ba2790ae9f3e23dc287aabb2d9a
refs/heads/master
2021-01-20T08:41:40.694968
2017-05-03T18:25:11
2017-05-03T18:25:11
70,399,835
0
0
null
null
null
null
UTF-8
R
false
false
15,188
r
stacked_model_draft.R
library('lattice') library('ggplot2') library('caret') library('data.table') library('Metrics') library('MASS') library('e1071') library('kernlab') library('gbm') library('survival') library('splines') library('parallel') library('plyr') train = fread('~/kaggle/house_prices/data/train.csv', colClasses=c('MiscFeature'='character','PoolQC'='character','Alley'='character')) # Rename columns 1stFlrSF, 2ndFlrSF, and 3SsnPorch FirstFlrSF=train$'1stFlrSF' SecondFlrSF=train$'2ndFlrSF' ThreeSsnPorch=train$'3SsnPorch' new_names = names(train)[-which(names(train)=='1stFlrSF'|names(train)=='2ndFlrSF'|names(train)=='3SsnPorch')] to_add = data.table(FirstFlrSF,SecondFlrSF,ThreeSsnPorch) train = cbind(train[,new_names,with=FALSE],to_add) # Transform categorical arguments KitchenQual, ExterQual, BsmtQual, GarageFinish, into numerical # KitchenQual nKitchenQual = numeric(length(train$KitchenQual)) nKitchenQual[train$KitchenQual=='TA']=1.0 nKitchenQual[train$KitchenQual=='Gd']=2.0 nKitchenQual[train$KitchenQual=='Ex']=3.0 train=cbind(train,nKitchenQual) # ExterQual nExterQual = numeric(length(train$ExterQual)) nExterQual[train$ExterQual=='TA']=1.0 nExterQual[train$ExterQual=='Gd']=2.0 nExterQual[train$ExterQual=='Ex']=3.0 train=cbind(train,nExterQual) # BsmtQual nBsmtQual = numeric(length(train$BsmtQual)) nBsmtQual[train$BsmtQual=='TA']=1.0 nBsmtQual[train$BsmtQual=='Gd']=2.0 nBsmtQual[train$BsmtQual=='Ex']=3.0 train=cbind(train,nBsmtQual) # GarageFinish nGarageFinish = numeric(length(train$GarageFinish)) nGarageFinish[train$GarageFinish=='Unf']=1.0 nGarageFinish[train$GarageFinish=='RFn']=2.0 nGarageFinish[train$GarageFinish=='Fin']=3.0 train=cbind(train,nGarageFinish) # Full and half bathrooms train$Bath = train$FullBath + train$HalfBath train$BsmtBaths = train$BsmtFullBath + train$BsmtHalfBath # TotalBsmtSF_on_GRLivArea (for SVR) train$TotalBsmtSF_on_GrLivArea = train$TotalBsmtSF/train$GrLivArea # MSSubClassCat train$MSSubClassCat = train[,.(MSSubClassCat=sapply(MSSubClass,toString)),with=TRUE] # Deal with missing values LotFrontage_mean = round(mean(train$LotFrontage,na.rm=TRUE)) train[which(is.na(LotFrontage)),'LotFrontage'] <- LotFrontage_mean train=cbind(train,"IsGarage"=1+numeric(nrow(train))) train[which(is.na(GarageYrBlt)),'GarageYrBlt'] <- 1900 #train[which(is.na(GarageQual)),'IsGarage'] <- 0 train[which(is.na(MasVnrArea)),'MasVnrArea'] <- 0 train[which(is.na(BsmtCond)),'BsmtCond'] <- 'MISSING' train[which(is.na(BsmtFinType1)),'BsmtFinType1'] <- 'MISSING' train[which(is.na(BsmtFinType2)),'BsmtFinType2'] <- 'MISSING' train[which(is.na(BsmtFinSF1)),'BsmtFinSF1'] <- 0 train[which(is.na(BsmtFinSF2)),'BsmtFinSF2'] <- 0 train[which(is.na(TotalBsmtSF)),'TotalBsmtSF'] <- 0 train[which(is.na(GarageCars)),'GarageCars'] <- 0 train[which(is.na(GarageArea)),'GarageArea'] <- 0 train[which(is.na(BsmtUnfSF)),'BsmtUnfSF'] <- 0 train[which(is.na(BsmtFullBath)),'BsmtFullBath'] <- 0 train[which(is.na(BsmtHalfBath)),'BsmtHalfBath'] <- 0 train[which(is.na(MSZoning)),'MSZoning'] <- 'RL' train[which(is.na(SaleType)),'SaleType'] <- 'Oth' train[which(is.na(Exterior1st)),'Exterior1st'] <- 'Other' train[which(is.na(Exterior2nd)),'Exterior2nd'] <- 'Other' train[which(is.na(Functional)),'Functional'] <- 'Typ' ## Deal with missing values #test[which(is.na(LotFrontage)),'LotFrontage'] <- LotFrontage_mean #test=cbind(test,"IsGarage"=1+numeric(nrow(test))) #test[which(is.na(GarageYrBlt)),'GarageYrBlt'] <- 1900 ##test.sample[which(is.na(GarageQual)),'IsGarage'] <- 0 #test[which(is.na(MasVnrArea)),'MasVnrArea'] <- 0 #test[which(is.na(BsmtCond)),'BsmtCond'] <- 'MISSING' #test[which(is.na(BsmtFinType1)),'BsmtFinType1'] <- 'MISSING' #test[which(is.na(BsmtFinType2)),'BsmtFinType2'] <- 'MISSING' #test[which(is.na(BsmtFinSF1)),'BsmtFinSF1'] <- 0 #test[which(is.na(BsmtFinSF2)),'BsmtFinSF2'] <- 0 #test[which(is.na(TotalBsmtSF)),'TotalBsmtSF'] <- 0 #test[which(is.na(GarageCars)),'GarageCars'] <- 0 #test[which(is.na(GarageArea)),'GarageArea'] <- 0 #test[which(is.na(BsmtUnfSF)),'BsmtUnfSF'] <- 0 #test[which(is.na(BsmtFullBath)),'BsmtFullBath'] <- 0 #test[which(is.na(BsmtHalfBath)),'BsmtHalfBath'] <- 0 #test[which(is.na(MSZoning)),'MSZoning'] <- 'RL' #test[which(is.na(SaleType)),'SaleType'] <- 'Oth' #test[which(is.na(Exterior1st)),'Exterior1st'] <- 'Other' #test[which(is.na(Exterior2nd)),'Exterior2nd'] <- 'Other' #test[which(is.na(Functional)),'Functional'] <- 'Typ' #test$BsmtBaths = test$BsmtFullBath + test$BsmtHalfBath #train.kept = train[,kept_features,with=FALSE] #test.kept = test[,kept_features[-which(kept_features=="SalePrice")],with=FALSE] # separate train set in two parts: train_a and train_b # train_a for fitting base models # train_b for fitting stage 2 model set.seed(10) train_a_part = createDataPartition(train$SalePrice,p=.80,list=FALSE) #train.sample = train.kept[inTrain,-"SalePrice",with=FALSE] #train.target = train.kept[inTrain,.(SalePrice=as.numeric(SalePrice))] #test.sample = train.kept[-inTrain,-"SalePrice",with=FALSE] #test.target = train.kept[-inTrain,.(SalePrice=as.numeric(SalePrice))] # fit SVR model on train_a # meta params: C=1.25, sigma=0.015 kept_features_svr = c("LotArea","OverallQual","YearBuilt","YearRemodAdd","nKitchenQual","nExterQual", "nBsmtQual","GrLivArea","Bath","nGarageFinish", "BsmtFinSF1","GarageCars","TotalBsmtSF","KitchenAbvGr","BedroomAbvGr","TotRmsAbvGrd","OverallCond", "TotalBsmtSF_on_GrLivArea") train.kept_svr = train[,c(kept_features_svr,"SalePrice"),with=FALSE] train_a.sample = train.kept_svr[train_a_part,-"SalePrice",with=FALSE] train_a.target = train.kept_svr[train_a_part,.(SalePrice=as.numeric(SalePrice))] bootControl <- trainControl(number = 10, verboseIter=TRUE) tuneGrid = expand.grid(C=c(1.25),sigma=c(0.015)) # mandatory svrFit_a = train(x=train_a.sample,y=train_a.target$SalePrice,method='svmRadial',trControl=bootControl, tuneGrid=tuneGrid, preProcess=c("center","scale")) # predict train_b with SVR model train_b.sample = data.table(scale(train.kept_svr[-train_a_part,-"SalePrice",with=FALSE])) train_b.target = train.kept_svr[-train_a_part,.(SalePrice=as.numeric(SalePrice))] svrFit_a.predict_b = predict(svrFit_a$finalModel,newdata=train_b.sample) print("train_b.sample SalePrice predicted with model svrFit_a") # print rmsle print("rmsle:") print(rmsle(train_b.target$SalePrice,svrFit_a.predict_b)) # fit gbm model on train_a # meta parameters: 1950 trees, depth=4, shrinkage=.03 kept_num_features_gbm = c("LotFrontage", "LotArea", "OverallQual", "OverallCond", "YearBuilt", "YearRemodAdd", "BsmtFinSF1", "BsmtFinSF2", "BsmtUnfSF", "TotalBsmtSF", "FirstFlrSF", "SecondFlrSF", "LowQualFinSF", "GrLivArea", "BsmtFullBath", "BsmtHalfBath", "FullBath", "HalfBath", "BedroomAbvGr", "KitchenAbvGr", "TotRmsAbvGrd", "Fireplaces", "GarageYrBlt", "GarageCars", "GarageArea", "WoodDeckSF", "OpenPorchSF", "EnclosedPorch", "ThreeSsnPorch", "ScreenPorch", "PoolArea", "MiscVal", "MoSold", "YrSold") kept_cat_features_gbm = c("Neighborhood","ExterQual","HeatingQC","CentralAir","KitchenQual","SaleType", "SaleCondition","IsGarage") kept_features_gbm = c(kept_num_features_gbm,kept_cat_features_gbm) train.kept_gbm = train[,c(kept_features_gbm,"SalePrice"),with=FALSE] # Separate numeric and categorical features for conversion (as numeric and factor) train_a.sample.num_features = train[train_a_part,kept_num_features_gbm,with=FALSE] train_a.sample.cat_features = train[train_a_part,kept_cat_features_gbm,with=FALSE] # Change class of data and merge back numeric and categorical train_a.sample.num_features.toFit = train_a.sample.num_features[,lapply(.SD,as.numeric)] train_a.sample.cat_features.toFit = train_a.sample.cat_features[,lapply(.SD,as.factor)] train_a.sample = cbind(train_a.sample.num_features.toFit,train_a.sample.cat_features.toFit) train_a.target = train.kept_gbm[train_a_part,.(SalePrice=as.numeric(SalePrice))] bootControl <- trainControl(number = 10, verboseIter=TRUE) gbmGrid = expand.grid(interaction.depth = (3:5),n.trees = c(1950),shrinkage=c(.02,.03,.04), n.minobsinnode=10) gbmFit_a = train(train_a.sample,train_a.target$SalePrice,method='gbm',trControl=bootControl,verbose=TRUE, bag.fraction=.8,tuneGrid=gbmGrid,metric='RMSE') # .1353 # predict train_b with gbm model # Separate numeric and categorical features for conversion (as numeric and factor) train_b.sample.num_features = train[-train_a_part,kept_num_features_gbm,with=FALSE] train_b.sample.cat_features = train[-train_a_part,kept_cat_features_gbm,with=FALSE] # Change class of data and merge back numeric and categorical train_b.sample.num_features.toFit = train_b.sample.num_features[,lapply(.SD,as.numeric)] train_b.sample.cat_features.toFit = train_b.sample.cat_features[,lapply(.SD,as.factor)] train_b.sample = cbind(train_b.sample.num_features.toFit,train_b.sample.cat_features.toFit) train_b.target = train.kept_gbm[-train_a_part,.(SalePrice=as.numeric(SalePrice))] gbmFit_a.predict_b = predict(gbmFit_a$finalModel,newdata=train_b.sample,n.trees=1950) print("train_b.sample SalePrice predicted with model gbmFit_a") # print rmsle print("rmsle:") print(rmsle(train_b.target$SalePrice,gbmFit_a.predict_b)) # Create new data.table with predictions on train_b, for level 1 model training #train_2 = data.table(preds_svr=svrFit_a.predict_b, # preds_gbm=gbmFit_a.predict_b,SalePrice=train[-train_a_part,SalePrice]) train_2.sample = data.table(preds_svr=svrFit_a.predict_b,preds_gbm=gbmFit_a.predict_b) train_2.target = data.table(SalePrice=train[-train_a_part,SalePrice]) #head(train_2.sample) #head(train_2.target) # Fitting a gbm as level 1 model gbmGrid <- expand.grid(interaction.depth = (1:3),n.trees = (30:40)*5, shrinkage = c(.02,.03,.04,.05,.06,.07,.08),n.minobsinnode = (2:10)) bootControl <- trainControl(number = 10, verboseIter=TRUE) gbmFit_2 = train(train_2.sample,train_2.target$SalePrice,method='gbm',trControl=bootControl,verbose=TRUE, bag.fraction=.6,tuneGrid=gbmGrid,metric='RMSE') # grid-search result: # n.trees = 165, interaction.depth = 1, shrinkage = 0.05, n.minobsinnode = 5 # load test file test = fread('~/kaggle/house_prices/data/test.csv', colClasses=c('MiscFeature'='character','PoolQC'='character','Alley'='character')) # Il faut renommer les colonnes 1stFlrSF, 2ndFlrSF, et 3SsnPorch pour pas avoir d'emmerdes FirstFlrSF=test$'1stFlrSF' SecondFlrSF=test$'2ndFlrSF' ThreeSsnPorch=test$'3SsnPorch' new_names = names(test)[-which(names(test)=='1stFlrSF'|names(test)=='2ndFlrSF'|names(test)=='3SsnPorch')] to_add = data.table(FirstFlrSF,SecondFlrSF,ThreeSsnPorch) test = cbind(test[,new_names,with=FALSE],to_add) # Transform categorical arguments KitchenQual, ExterQual, BsmtQual, GarageFinish, into numerical # KitchenQual nKitchenQual = numeric(length(test$KitchenQual)) nKitchenQual[test$KitchenQual=='TA']=1.0 nKitchenQual[test$KitchenQual=='Gd']=2.0 nKitchenQual[test$KitchenQual=='Ex']=3.0 test=cbind(test,nKitchenQual) # ExterQual nExterQual = numeric(length(test$ExterQual)) nExterQual[test$ExterQual=='TA']=1.0 nExterQual[test$ExterQual=='Gd']=2.0 nExterQual[test$ExterQual=='Ex']=3.0 test=cbind(test,nExterQual) # BsmtQual nBsmtQual = numeric(length(test$BsmtQual)) nBsmtQual[test$BsmtQual=='TA']=1.0 nBsmtQual[test$BsmtQual=='Gd']=2.0 nBsmtQual[test$BsmtQual=='Ex']=3.0 test=cbind(test,nBsmtQual) # GarageFinish nGarageFinish = numeric(length(test$GarageFinish)) nGarageFinish[test$GarageFinish=='Unf']=1.0 nGarageFinish[test$GarageFinish=='RFn']=2.0 nGarageFinish[test$GarageFinish=='Fin']=3.0 test=cbind(test,nGarageFinish) # Full and half bathrooms test$Bath = test$FullBath + test$HalfBath test$BsmtBaths = test$BsmtFullBath + test$BsmtHalfBath # TotalBsmtSF_on_GrLivArea test$TotalBsmtSF_on_GrLivArea = test$TotalBsmtSF/test$GrLivArea # MSSubClassCat test$MSSubClassCat = test[,.(MSSubClassCat=sapply(MSSubClass,toString)),with=TRUE] # Deal with missing values test[which(is.na(LotFrontage)),'LotFrontage'] <- LotFrontage_mean test=cbind(test,"IsGarage"=1+numeric(nrow(test))) test[which(is.na(GarageYrBlt)),'GarageYrBlt'] <- 1900 #test.sample[which(is.na(GarageQual)),'IsGarage'] <- 0 test[which(is.na(MasVnrArea)),'MasVnrArea'] <- 0 test[which(is.na(BsmtCond)),'BsmtCond'] <- 'MISSING' test[which(is.na(BsmtFinType1)),'BsmtFinType1'] <- 'MISSING' test[which(is.na(BsmtFinType2)),'BsmtFinType2'] <- 'MISSING' test[which(is.na(BsmtFinSF1)),'BsmtFinSF1'] <- 0 test[which(is.na(BsmtFinSF2)),'BsmtFinSF2'] <- 0 test[which(is.na(TotalBsmtSF)),'TotalBsmtSF'] <- 0 test[which(is.na(GarageCars)),'GarageCars'] <- 0 test[which(is.na(GarageArea)),'GarageArea'] <- 0 test[which(is.na(BsmtUnfSF)),'BsmtUnfSF'] <- 0 test[which(is.na(BsmtFullBath)),'BsmtFullBath'] <- 0 test[which(is.na(BsmtHalfBath)),'BsmtHalfBath'] <- 0 test[which(is.na(MSZoning)),'MSZoning'] <- 'RL' test[which(is.na(SaleType)),'SaleType'] <- 'Oth' test[which(is.na(Exterior1st)),'Exterior1st'] <- 'Other' test[which(is.na(Exterior2nd)),'Exterior2nd'] <- 'Other' test[which(is.na(Functional)),'Functional'] <- 'Typ' test$BsmtBaths = test$BsmtFullBath + test$BsmtHalfBath test$TotalBsmtSF_on_GrLivArea = test$TotalBsmtSF/test$GrLivArea #train.kept = train[,kept_features,with=FALSE] #test.kept = test[,kept_features[-which(kept_features=="SalePrice")],with=FALSE] # predict test with lvl0 SVR and gbm models #--------------------------------- # SVR test.sample_svr = data.table(scale(test[,kept_features_svr,with=FALSE])) # don't forget to scale svrFit_a.test_preds = predict(svrFit_a$finalModel,newdata=test.sample_svr) #--------------------------------- # gbm # Separate numeric and categorical features for conversion (as numeric and factor) test.sample.num_features_gbm = test[,kept_num_features_gbm,with=FALSE] test.sample.cat_features_gbm = test[,kept_cat_features_gbm,with=FALSE] # Change class of data and merge back numeric and categorical test.sample.num_features_gbm.tp = test.sample.num_features_gbm[,lapply(.SD,as.numeric)] test.sample.cat_features_gbm.tp = test.sample.cat_features_gbm[,lapply(.SD,as.factor)] test.sample_gbm = cbind(test.sample.num_features_gbm.tp,test.sample.cat_features_gbm.tp) gbmFit_a.test_preds = predict(gbmFit_a$finalModel,newdata=test.sample_gbm,n.trees=1950) # build lvl1 test set test_2.sample = data.table(test_preds_svr=svrFit_a.test_preds,test_preds_gbm=gbmFit_a.test_preds) # predict test with lvl1 gbm aka gbmFit_2 gbmFit_2.test_preds = predict(gbmFit_2$finalModel,newdata=test_2.sample,n.trees=165) # write submission file test.sample_submission = fread('~/kaggle/house_prices/data/sample_submission.csv') test.sample_submission = test.sample_submission[,.(Id)] test.sample_submission.new = cbind(test.sample_submission,SalePrice=gbmFit_2.test_preds) write.csv(test.sample_submission.new,'~/kaggle/house_prices/data/my_submission_stacked.csv',row.names=FALSE) # leaderboard score = 0.13956 (with train_a=60% of dataset)
71768805f8dbbd444da86772f916489c730959db
2e3c6f281490f908608c19e1841fdfdbeb081c21
/stuff.R
7f200f2a7cf1c26a52d07300b38499777f693b79
[]
no_license
bdeonovic/binsum1
ad81ccf90be3097c6a929946f92855c9a68aa5a9
a9f501e4aeaf5dc49fdd9c88153392ef029e71c9
refs/heads/master
2020-04-25T06:54:55.202443
2017-07-13T19:50:04
2017-07-13T19:50:04
null
0
0
null
null
null
null
UTF-8
R
false
false
1,228
r
stuff.R
# stuff # Stirling2 is (n,m) where m is upstairs in brackets # ie first parameter greater than second Stirling2(4,2) choose(4,2) clji(6,5,6) ans=data.frame() for (l in 1:6) { for (j in 1:l) { for (i in j:l) { ans=rbind(ans,c(l=l,j=j,i=i,clji=clji(l,j,i))) } } } ans kolmo(c(2,3),c(0.2,0.1),5,0.1) kolmo2(c(2,3),c(0.2,0.1),5,0.1) kolmogorov(c(2,3),c(0.2,0.1)) # examples in paper # example 1 n=rep(5,5) p=seq(0.02,0.10,0.02) n p kolmogorov(n,p) # check # example 2 n=rep(100,5) p=seq(0.01,0.03,0.005) n p kk=kolmogorov(n,p) kk %>% filter(s==19) # check; checked several # example 3 n=seq(500,100,-100) p=1/n n p kk=kolmogorov(n,p) kk %>% filter(s==5) kk %>% filter(s==8) kk %>% filter(s==10) kk %>% filter(s==14) # example 4 n=seq(50,250,50) p=seq(0.1,0.5,0.1) n p kk=kolmogorov(n,p) kk %>% filter(s==275) kk %>% filter(s==296) kk %>% filter(s==305) kk %>% filter(s==320) # example 5 n=c(3,6,2,7) p=c(0.016,0.071,0.093,0.035) n p kk=kolmogorov(n,p) kk %>% filter(s==0) kk %>% filter(s==3) kk %>% filter(s==5) # example 5 n=c(12,14,4,2,20,17,11,1,8,11) p=c(0.074,0.039,0.095,0.039,0.053,0.043,0.067,0.018,0.099,0.045) n p kk=kolmogorov(n,p) kk %>% filter(s==0) kk %>% filter(s==5) kk %>% filter(s==8)
a17bc682a2aaf5955f8b7fb31007ccfe1b03f84d
fea181071db54de2be82d3d669e7c8048400a84b
/Assignment 2.R
fedf153c277ada871baffe2cbabdab1ccbcf8d00
[]
no_license
CoHae/First-Repository-USF-R-Class
16c5e03205588d3a7a9ccce7b2eccd2ee2be04d4
997d21c8ac8a7e7941c52eff618237e2000dc03a
refs/heads/master
2020-12-10T17:27:53.385416
2020-04-04T16:25:23
2020-04-04T16:25:23
233,659,658
1
0
null
null
null
null
UTF-8
R
false
false
459
r
Assignment 2.R
# original/faulty Assignment 2 # it shows that neither "assignment" nor "someData" exist assignment2 <- c(16, 18, 14, 22, 27, 17, 19, 17, 17, 22, 20, 22) myMean <- function(assignment2) { return(sum(assignment)/length(someData)) } myMean(assignment2) someData # corrected code for Assignment 2 assignment2 <- c(16, 18, 14, 22, 27, 17, 19, 17, 17, 22, 20, 22) myMean <- function(assignment2) { return(sum(assignment2)/length(assignment2)) } myMean(assignment2)
e23ecb72cacc054bf9e5641b7ae0dd0d3c6e95cf
8f7320c10f2c5fc8475753dc5256d1a66067e15c
/rkeops/man/ternaryop.LazyTensor.Rd
79e4fd52c367bfd2900deb4aabd33ec5edb75a31
[ "MIT" ]
permissive
getkeops/keops
947a5409710379893c6c7a46d0a256133a6d8aff
52ed22a7fbbcf4bd02dbdf5dc2b00bf79cceddf5
refs/heads/main
2023-08-25T12:44:22.092925
2023-08-09T13:33:58
2023-08-09T13:33:58
182,054,091
910
69
MIT
2023-09-03T20:35:44
2019-04-18T09:04:07
Python
UTF-8
R
false
true
1,865
rd
ternaryop.LazyTensor.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/lazytensor_preprocess.R \name{ternaryop.LazyTensor} \alias{ternaryop.LazyTensor} \title{Build a ternary operation} \usage{ ternaryop.LazyTensor(x, y, z, opstr, dim_check_type = "sameor1", dim_res = NA) } \arguments{ \item{x}{A \code{LazyTensor}, a \code{ComplexLazyTensor}, a vector of numeric values, or a scalar value.} \item{y}{A \code{LazyTensor}, a \code{ComplexLazyTensor}, a vector of numeric values, or a scalar value.} \item{z}{A \code{LazyTensor}, a \code{ComplexLazyTensor}r, a vector of numeric values, or a scalar value.} \item{opstr}{A text string corresponding to an operation.} \item{dim_check_type}{A string to specify if, and how, we should check input dimensions. Supported values are: \itemize{ \item {\strong{"same"}:}{ \strong{x} and \strong{y} should have the same inner dimension;} \item {\strong{"sameor1"} (default):}{ \strong{x} and \strong{y} should have the same inner dimension or at least one of them should be of dimension 1;} \item {\strong{NA}:}{ no dimension restriction.} }} \item{dim_res}{NA (default) or an integer corresponding to the inner dimension of the output \code{LazyTensor}. If NA, \strong{dim_res} is set to the maximum between the inner dimensions of the three input \code{LazyTensor}s.} } \value{ An object of class "LazyTensor". } \description{ Symbolically applies \strong{opstr} operation to \strong{x}, \strong{y} and \strong{z}. } \examples{ \dontrun{ # basic example D <- 3 M <- 100 N <- 150 P <- 200 x <- matrix(runif(M * D), M, D) y <- matrix(runif(N * D), N, D) z <- matrix(runif(P * D), P, D) x_i <- LazyTensor(x, index = 'i') y_j <- LazyTensor(y, index = 'j') z_i <- LazyTensor(z, index = 'i') # symbolic matrix: tern_xyz <- ternaryop.LazyTensor(x_i, y_j, z_i, "IfElse") } } \author{ Chloe Serre-Combe, Amelie Vernay }
cea2b0ede00b21627fbb3d5d5ce963f294a20af9
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
/data/genthat_extracted_code/traitdataform/examples/cast.traitdata.Rd.R
38ed9c56d63859476c8762bceb44e44f7d3824cf
[]
no_license
surayaaramli/typeRrh
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
66e6996f31961bc8b9aafe1a6a6098327b66bf71
refs/heads/master
2023-05-05T04:05:31.617869
2019-04-25T22:10:06
2019-04-25T22:10:06
null
0
0
null
null
null
null
UTF-8
R
false
false
1,056
r
cast.traitdata.Rd.R
library(traitdataform) ### Name: cast.traitdata ### Title: Cast long-table trait data into wide-table format ### Aliases: cast.traitdata ### ** Examples pulldata("arthropodtraits") head(arthropodtraits) dataset3 <- as.traitdata(arthropodtraits, taxa = "SpeciesID", traits = c("Body_Size", "Dispersal_ability", "Feeding_guild","Feeding_guild_short", "Feeding_mode", "Feeding_specialization", "Feeding_tissue", "Feeding_plant_part", "Endophagous_lifestyle", "Stratum_use", "Stratum_use_short"), units = c(Body_Size = "mm"), keep = c(measurementRemark = "Remark"), metadata = as.metadata( license = "http://creativecommons.org/publicdomain/zero/1.0/" ) ) head(dataset3) dd3 <-cast.traitdata(dataset3) head(dd3)
2dd02c35be9e211e0f4a5cf903af2c9e32509371
8823b744fa8328268704c81fcbd23644cc65a271
/R/simulate_data.R
78db8dcabc88f0e74b7ad67d418d1d3e611cc3e5
[ "MIT" ]
permissive
zosob/RiskAssessment
53c1b21f499fd2e82d1ab56096dbfbf2f8a81c0b
412a4c4d1f89fbe011e185f26b4f22f379bbb23b
refs/heads/master
2022-01-26T20:17:22.056667
2019-07-18T09:05:10
2019-07-18T09:05:10
null
0
0
null
null
null
null
UTF-8
R
false
false
14,522
r
simulate_data.R
#' @title Simulate Data #' @description Function that simulate complete data, incomplete data and also approximate the top event probability distribution by simulation. #' The data is a matrix type in R with the number of rows equal to the number of observation and the number #' of columns equal to the number of nodes in the tree. The primary nodes must be listed as #' the first columns and the top event node must be the last column. #' #' Each element of the matrix is either a 1 (that event was observed to occur), 0 (that event #' was observed not to occur) or NA (that event was not observed) #' @param n_simulated_data: number of observations to be generated. #' @param tree_definition is the fault tree structure to be used, as defined above in the create_fault_tree function. #' @param data_type: is one of "complete" (all nodes are observed), "top_only" (only the top node is #' observed, all others are NA) or "incomplete" (each node is randomly observed with #' probability p_obs, otherwise it is NA) #' @param true_primary_p is an array of the true probabilities of each primary event occurring. #' Obviously must be of the same dimension as the number of primary events in tree. #' @param p_obs: in the case that data_type="incomplete", it is the probability that a node is observed. #' @return A Matrix with number of rows equal to the number of simulated data and number of columns equal to the number of nodes in the tree. #' @examples data <- simulate_data(n_simulated_data = 5, #' tree_definition = tree, #' data_type = "complete", #' true_primary_p = c(0.02,0.05,0.05,0.1), #' p_obs = 0.5) #' @export simulate_data # #################################################################################################################################### # Monte Carlo simulation functions # Contains routines that simulate complete data, incomplete data and also approximate the top event probability distribution by simulation # #################################################################################################################################### # ############################################################################################################### # Data # Either it's in the matrix data or, if we're simulating data, simulate it now # ############################################################################################################### simulate_data <- function (n_simulated_data, tree_definition, data_type, true_primary_p, p_obs) { if (data_type == "top_only") { # Simulate data that consists of the top event only data <- simulate_top_event_data(n=n_simulated_data,p=true_primary_p,tree_definition) } else if (data_type=="incomplete") { # Simulate data where each event is observed independently with probability p_obs data <- simulate_incomplete_data(n=n_simulated_data,p=true_primary_p,p_obs=p_obs,tree_definition) } else if (data_type=="intermediate") { # Simulate data that consists of the intermediate and top events only data <- simulate_intermediate_data(n=n_simulated_data,p=true_primary_p,tree_definition) } else { # Simulate data where all events observed data <- simulate_complete_data(n=n_simulated_data,p=true_primary_p,tree_definition) } return (data) } # #################################################################################################################################### # simulate_complete_data # #################################################################################################################################### # Simulate complete data # Simulate n values. Each value is a set of indept. Bernoulli values with sucess probabilities given in the vector p # #################################################################################################################################### simulate_complete_data <- function (n, p, tree) { data <- matrix(NA,nrow=n,ncol=tree$n_nodes) # Simulate the primary events for (i in 1:tree$n_primary) { data[,i] <- rbinom(n,size=1,prob=p[i]) } # Infer the values of the intermediate and top events start <- tree$n_primary+1 for (j in start:tree$n_nodes) { if (tree$nodes[[j]]$Logic == "and") { data[,j] <- and_logic (d=data, child=tree$children[(j-tree$n_primary),]) } else { data[,j] <- or_logic (d=data, child=tree$children[(j-tree$n_primary),]) } } return(data) } # #################################################################################################################################### # #################################################################################################################################### # simulate_intermediate_data # #################################################################################################################################### # Simulate data from all non-primary e.g. intermediate and top events # Simulate n values. Each value is a set of indept. Bernoulli values with sucess probabilities given in the vector p # #################################################################################################################################### simulate_intermediate_data <- function(n, p, tree) { data <- matrix(NA,nrow=n,ncol=tree$n_nodes) # Simulate the primary events for (i in 1:tree$n_primary) { data[,i] <- rbinom(n,size=1,prob=p[i]) } # Infer the values of the intermediate and top events start <- tree$n_primary+1 for (j in start:tree$n_nodes) { if (tree$nodes[[j]]$Logic == "and") { data[,j] <- and_logic (d=data, child = tree$children[(j-tree$n_primary),]) } else { data[,j] <- or_logic (d=data, child = tree$children[(j-tree$n_primary),]) } } data[,1:tree$n_primary] <- NA return(data) } # #################################################################################################################################### # simulate_incomplete_data # #################################################################################################################################### # Simulate incomplete data with each observation observed with probability p_obs # Simulate n values. Each value is a set of indept. Bernoulli values with sucess # probabilities given in the vector p # #################################################################################################################################### simulate_incomplete_data <- function(n,p,p_obs,tree) { # Generate complete data data <- matrix(NA,nrow=n,ncol=tree$n_nodes) for (i in 1:length(p)) { data[,i] <- rbinom(n,size=1,prob=p[i]) } # Each element of the matrix is observed with probability p_obs, otherwise it is replaced by NA start <- tree$n_primary+1 for (j in start:tree$n_nodes) { if (tree$nodes[[j]]$Logic == "and") { data[,j] <- and_logic (d=data, child = tree$children[(j-tree$n_primary),]) } else { data[,j] <- or_logic (d=data, child = tree$children[(j-tree$n_primary),]) } } # Each element of the matrix is observed with probability p_obs, otherwise it is replaced by NA observed_flag <- matrix(rbinom(n*tree$n_nodes,size=1,prob=p_obs),nrow=n) # Replace unobserved elements with NA data[observed_flag==0] <- NA return(data) } # #################################################################################################################################### # #################################################################################################################################### # simulate_top_event_data # #################################################################################################################################### # Simulate data where the top event only is observed # Simulate n values. Each value is a set of indept. Bernoulli values with sucess probabilities given in the vector p # #################################################################################################################################### simulate_top_event_data <- function(n, p, tree) { # Generate complete data data <- matrix(NA,nrow=n,ncol=tree$n_nodes) # Simulate the primary events for (i in 1:length(p)) { data[,i] <- rbinom(n,size=1,prob=p[i]) } # Infer the values of the intermediate and top events start <- tree$n_primary+1 for (j in start:tree$n_nodes) { if (tree$nodes[[j]]$Logic == "and") { data[,j] <- and_logic (d=data, child=tree$children[(j-tree$n_primary),]) } else { data[,j] <- or_logic (d=data, child = tree$children[(j-tree$n_primary),]) } } if (length(dim(data)[2])==0) { # Is the tree just the top event? data_top_event <- data } else { # Return n observations of the top event only with all other events recorded as NA n_events <- dim(data)[2] data_top_event <- cbind(matrix(NA,nrow=n,ncol=n_events-1),data[,n_events]) } return(data_top_event) } # #################################################################################################################################### # #################################################################################################################################### # simulate_top_event_probability # #################################################################################################################################### # Simulate the top event probability prior in a fault tree # Assume independent beta(2,2) priors on primary event probabilities # Function inputs are: the number of simulations n_simulations to use, and a n x 2 matrix consisting of n beta parameter pairs, # one for each of the n primary event probabilities. Output is a KDE of the prior density of the top event probability # #################################################################################################################################### simulate_top_event_probability <- function(n_simulations, beta_params, tree, x_limits, lty) { x_min <- 0 x_max <- 1 simulated_p <- matrix(nrow=tree$n_nodes,ncol=n_simulations) # Simulate the primary event probabilities from their beta prior for (i in 1:tree$n_primary) { simulated_p[i,] <- rbeta(n_simulations,shape1=beta_params[i,1],shape2=beta_params[i,2]) } start <- tree$n_primary+1 for (j in start:tree$n_nodes){ if (tree$nodes[[j]]$Logic == "and") { simulated_p[j,] <- calculate_and_scalar(child=tree$children[(j-tree$n_primary),], mysim=simulated_p) } else { simulated_p[j,] <- calculate_or_scalar(child=tree$children[(j-tree$n_primary),], mysim=simulated_p) } } # Create an estimate of the density of the top event probability (which we assume is the last-indexed event) p_top <- density(simulated_p[tree$n_nodes,],from=x_min,to=x_max) plot_y_max <- max(p_top$y) par(mfrow=c(1,1)) plot(p_top$x,p_top$y,type="l",xlab="TOP EVENT PROBABILITY",ylab="DENSITY",main="", xlim=x_limits,ylim=c(0,1.05*plot_y_max),lwd=2,lty=lty) grid() cat("Prior probability of top event:","\n") cat(" Mean is", mean(simulated_p[tree$n_nodes,]), "\n") # modified for ATV example cat(" Standard deviation is", sd(simulated_p[tree$n_nodes,]), "\n") cat(" Central 95% probability interval is (",quantile(simulated_p[tree$n_nodes,],0.025), ", ",quantile(simulated_p[tree$n_nodes,],0.975),")", "\n",sep="") } # ########################################################################################################### # ########################################################################################################### # and_logic. Performs the AND function of all the children of a node. # ########################################################################################################### and_logic <- function(d, child) { first<-TRUE for (k in 1:(ncol(d)-1)) { if (child[k]==1) { if (first==TRUE) { aux<-d[,k] first<-FALSE } else { aux <- aux & d[,k] } } } return (as.integer(aux)) } # ################################################################################ # or_logic. Performs the OR function of all the children of a node. # ################################################################################ or_logic <- function(d, child) { first<-TRUE for (k in 1:(ncol(d)-1)) { if (child[k]==1) { if (first==TRUE) { aux<-d[,k] first<-FALSE } else { aux <- aux | d[,k] } } } return (as.integer(aux)) } # ##################################################################################################### # and_scalar. Calculates the probability of a "and" tree node based its children's probabilities # ##################################################################################################### calculate_and_scalar <- function(child, mysim){ if (length(dim(mysim))==0) { # just passing a vector of probabilities just_a_vector <- TRUE } else { # else we're passing a matrix just_a_vector <- FALSE } aux <- 1 for (j in 1:length(child)) { if (child[j]==1) { if (just_a_vector) { aux <- aux * mysim[j] } else { aux <- aux * mysim[j,] } } } return (aux) } # ##################################################################################################### # and_scalar. Calculates the probability of a "or" tree node based its children's probabilities. # ##################################################################################################### calculate_or_scalar <- function(child, mysim){ if (length(dim(mysim))==0) { # just passing a vector of probabilities just_a_vector <- TRUE } else { # else we're passing a matrix just_a_vector <- FALSE } aux <- 1 for (j in 1:length(child)) { if (child[j]==1) { if (just_a_vector) { aux <- aux * (1-mysim[j]) } else { aux <- aux * (1-mysim[j,]) } } } aux <- (1-aux) return (aux) }
c4910451f175881fe1f6a54d71053db446ae261f
8b2a91990c0d78af91ccae2939985bf0a1eed858
/part3.R
31de6151b9b8e28e0729e8ba0ceb0d75e6f27020
[]
no_license
i94u/Lab1_603410031
ceaae5ff38196e0dbfb4e90947396454bff11250
bacf8a73c6b774c49184163c2f9d106e4878f5c4
refs/heads/master
2021-01-10T10:13:02.200693
2015-09-24T04:08:19
2015-09-24T04:08:19
43,038,216
0
0
null
null
null
null
UTF-8
R
false
false
480
r
part3.R
Wingcrd <- c(59, 55, 53.5, 55, 52.5, 57.5, 53, 55) mean(Wingcrd); median(Wingcrd); min(Wingcrd); max(Wingcrd) Tarsus <- c(22.3, 19.7, 20.8, 20.3, 20.8, 21.5, 20.6, 21.5) mean(Tarsus); median(Tarsus); min(Tarsus); max(Tarsus) Head <- c(31.2, 30.4, 30.6, 30.3, 30.3, 30.8, 32.5, NA) mean(Head, na.rm = TRUE); median(Head, na.rm = TRUE); min(Head, na.rm = TRUE); max(Head, na.rm = TRUE) Wt <- c(9.5, 13.8, 14.8, 15.2, 15.5, 15.6, 15.6, 15.7) mean(Wt); median(Wt); min(Wt); max(Wt)
2ec01c7a58c12b7dd8eeda96b25b00e19e2a29a3
f3dffcb0cd531bb61c12e68e38dc8b4d6192d4c0
/plot1.R
6f40d08b4a102e0ac2533150615045388f6e0f26
[]
no_license
joekieffer/EDA---PM2.5
e9d8e81f9beae6d5702005ece013e520de81e846
be9d5a210fc1997c033b1bae7df56a5686d1e89e
refs/heads/master
2021-01-10T21:11:11.771063
2014-10-26T18:44:19
2014-10-26T18:44:19
null
0
0
null
null
null
null
UTF-8
R
false
false
314
r
plot1.R
#importing data source('Class_Project2.R') #data manipulation totalE <- aggregate(Emissions ~ year, NEI, sum) #printing of plot png(file="plot1.png", bg="white") barplot(height=totalE$Emissions, names.arg=totalE$year, ylab="Total emissions", xlab="Years",main="Total fine particulate matter emission") dev.off()
d765ec52059a123c4e9060ee9fcaccd7f36d9684
37cbbbbfc95eda55dc99f5637b39dba59bbddc6a
/tests/testthat/test_dCModel.R
c27c32194d952f86407f1137f31098f20f7538b4
[ "MIT" ]
permissive
djinnome/rtedem
f2f080e1eabfc01b3e448ac610aee16c6d12b547
7a3232d46410e9f29b42209165a990ce1e0bb934
refs/heads/master
2021-01-12T13:17:07.189860
2016-09-24T16:11:10
2016-09-24T16:11:10
null
0
0
null
null
null
null
UTF-8
R
false
false
5,705
r
test_dCModel.R
# Testing code for the RCMIP5 'dCModel.R' script # Uses the testthat package # See http://journal.r-project.org/archive/2011-1/RJournal_2011-1_Wickham.pdf library(testthat) # To run this code: # source("R/dCModel.R") # library(testthat) # test_file("tests/testthat/test_dCModel.R") context("dCModel") test_that('dCModel produces expected errors',{ expect_error(dCModel(t=0, parms=c(a=1, b=2), reactionNetwork=data.frame(from=c('C1'), to='C2', reaction=c('a+c')))) expect_error(dCModel(t=0, parms=c(a=1, b=2), reactionNetwork=data.frame(from=c('C1'), to='C2', reaction=c('a+C3')))) }) test_that('dCModel runs with and without factos', { reactionNetwork1 <- data.frame(from=c('C1', 'C1', 'C2', 'C2'), to=c(NA, 'C2', NA, 'C1'), reaction=c('1/tau1*(1-trans2)*C1', '1/tau1*trans2*C1', '1/tau2*(1-trans3)*C2', '1/tau2*trans3*C2'), type=c('decay', 'transfer', 'decay', 'transfer'), stringsAsFactors=FALSE) reactionNetwork2 <- data.frame(from=c('C1', 'C1', 'C2', 'C2'), to=c(NA, 'C2', NA, 'C1'), reaction=c('1/tau1*(1-trans2)*C1', '1/tau1*trans2*C1', '1/tau2*(1-trans3)*C2', '1/tau2*trans3*C2'), type=c('decay', 'transfer', 'decay', 'transfer'), stringsAsFactors=TRUE) y <- c(C1=1, C2=3) parms <- c(tau1=10, tau2=100, trans2=0.5, trans3=0.1) expect_equal(dCModel(t=0, y=y, parms=parms, reactionNetwork=reactionNetwork1), dCModel(t=0, y=y, parms=parms, reactionNetwork=reactionNetwork2)) }) test_that('dCModel reproduces a simple first order model', { expect_equal(dCModel(t=0), list(unlist(list(C1=-0.097, C2=0.020)))) reactionNetwork <- data.frame(from=c('C1', 'C1', 'C2', 'C2'), to=c(NA, 'C2', NA, 'C1'), reaction=c('1/tau1*(1-trans2)*C1', '1/tau1*trans2*C1', '1/tau2*(1-trans3)*C2', '1/tau2*trans3*C2'), type=c('decay', 'transfer', 'decay', 'transfer'), stringsAsFactors=FALSE) y <- c(C1=1, C2=3) parms <- c(tau1=10, tau2=100, trans2=0.5, trans3=0.1) decayMatrix <- matrix(c(-1/parms['tau1'], 1/parms['tau1']*parms['trans2'], 1/parms['tau2']*parms['trans3'], -1/parms['tau2']), nrow=2) ans <- as.numeric(t(decayMatrix%*%matrix(y, nrow=2))) names(ans) <- c('C1', 'C2') expect_equal(list(ans), dCModel(t=0, y=y, parms=parms, reactionNetwork=reactionNetwork)) }) test_that('dCModel produces expected errors',{ expect_error(dCModel(parms=list(not=1, real=2), t=1, y=c(1,1,1,1))) expect_error(dCModel(parms=list(not=1, real=2), t=1, y=1)) par <- unlist(list('v_enz'=0.2, 'km_enz'=10, 'v_up'=1, 'km_up'=2, 'cue'=0.5, 'basal' = 0.01, 'turnover_b'=0.5, 'turnover_e'=0.1)) expect_error(dCModel(parms=par, t=1, y=1)) par <- unlist(list('v_enz'=0.2, 'km_enz'=10, turnover_e=0.1))[1:2] y0 <- unlist(list(simple=1, complex=2, enzyme=3)) poolAssignment <- list(simple=1, complex=2, enzyme=3) expect_error(dCModel(parms=par, y=y0, rateFlags=list(enz='MM'), poolAssignment=poolAssignment)) }) test_that('dC.biomassModel returns correct enzyme kinetics',{ par <- unlist(list('v_enz'=0.2, 'km_enz'=10, turnover_e=0.1)) y0 <- unlist(list(simple=1, complex=2, enzyme=3)) renet <- data.frame(from=c('complex', 'enzyme'), to=c('simple', 'complex'), reaction=c('complex*enzyme*v_enz/(km_enz+complex)', 'turnover_e*enzyme'), stringsAsFactors=FALSE) expect_equal(unlist(dCModel(t=0, y=y0, parms=par,reactionNetwor=renet )), unlist(list(simple=as.numeric(par['v_enz']*y0['complex']*y0['enzyme']/(par['km_enz']+y0['complex'])), complex=as.numeric(par['turnover_e']*y0['enzyme']-par['v_enz']*y0['complex']*y0['enzyme']/(par['km_enz']+y0['complex'])), enzyme=as.numeric(-par['turnover_e']*y0['enzyme'])))) renet <- data.frame(from=c('complex', 'enzyme'), to=c('simple', 'complex'), reaction=c('complex*enzyme*v_enz/(km_enz+enzyme)', 'turnover_e*enzyme'), stringsAsFactors=FALSE) expect_equal(unlist(dCModel(t=0, y=y0, parms=par,reactionNetwor=renet )), unlist(list(simple=as.numeric(par['v_enz']*y0['complex']*y0['enzyme']/(par['km_enz']+y0['enzyme'])), complex=as.numeric(par['turnover_e']*y0['enzyme']-par['v_enz']*y0['complex']*y0['enzyme']/(par['km_enz']+y0['enzyme'])), enzyme=as.numeric(-par['turnover_e']*y0['enzyme'])))) par <- unlist(list('v_enz'=0.2, turnover_e=0.1)) renet <- data.frame(from=c('complex', 'enzyme'), to=c('simple', 'complex'), reaction=c('complex*enzyme*v_enz', 'turnover_e*enzyme'), stringsAsFactors=FALSE) expect_equal(unlist(dCModel(t=0, y=y0, parms=par,reactionNetwor=renet )), unlist(list(simple=as.numeric(par['v_enz']*y0['complex']*y0['enzyme']), complex=as.numeric(par['turnover_e']*y0['enzyme']-par['v_enz']*y0['complex']*y0['enzyme']), enzyme=as.numeric(-par['turnover_e']*y0['enzyme'])))) })
6531935d91b9eb1d701f9f649be1fa229e34fc0c
95a0aaef3033adc33dee58dc00742c526bf67f95
/RProgramming/Assignment2/cachematrix.R
b7636cfb42d96aac18a5ddec4242eaaa191c7520
[ "MIT" ]
permissive
skyguy94/datasciencecoursera
9add9f8e6df3e0837e8817f99ad2bfd4a8f91d39
236892664c25f65f8f45c1290aa84d137e1e890d
refs/heads/master
2021-01-10T04:11:51.147895
2015-08-23T22:27:33
2015-08-23T22:27:33
36,816,880
0
0
null
null
null
null
UTF-8
R
false
false
1,094
r
cachematrix.R
## This code attempts to optimize the handling the computation of inverse ## matrices by caching the result of previous computations in memory ## and checking that cache before computing an existing result. ## This function creates a special "matrix" object that ## can cache its inverse. makeCacheMatrix <- function(x = matrix()) { inverse <- NULL set <- function(y) { x <<- y inverse <<- NULL } get <- function() x setinverse <- function(rval) inverse <<- rval getinverse <- function() inverse list(set = set, get = get, setinverse = setinverse, getinverse = getinverse) } ## This function computes the inverse of the special "matrix" ## returned by makeCacheMatrix above. If the inverse has already m ## been calculated (and the matrix has not changed), then the ## cacheSolve should retrieve the inverse from the cache. cacheSolve <- function(x, ...) { inverse <- x$getinverse() if(!is.null(inverse)) { message("getting cached data") return (inverse) } data <- x$get() inverse <- solve(data) x$setinverse(inverse) inverse }
d428d73de4c8da9a9f63d56a80d58cbe81d3440c
2bee25fa7cd8961eed2336183284b768a575e4d6
/R/plot.samplesize.R
3a729796b1f713f556560230b0ec927ed5476412
[]
no_license
annaheath/EVSI
d2a6adb3a15b4013d3e7a82de815e51a016377a3
11accaca10816c0eb0cc32cdb0ae74829ed41c01
refs/heads/master
2022-07-18T02:00:08.761837
2022-06-24T13:02:55
2022-06-24T13:02:55
102,010,781
8
1
null
2019-03-21T20:17:17
2017-08-31T14:40:52
R
UTF-8
R
false
false
3,468
r
plot.samplesize.R
##plot.samplesize########################################################### plot.samplesize <- function(evsi,wtp=NULL,pos=c("bottomright"),CI=NULL){ ##'Calculating the EVSI for a specific WTP giving the uncertainty bands across the different ##'samples sizes ##INPUTS ##'@param evsi Output of the comp.evsi.N function ##'@param wtp The willingness to pay value that the graphic should be produced for - it will ##' be chosen if wtp=NULL. ##'@param pos The position where the legend will be printed (default='bottomright') ##'@param CI The indexes that we would like to take from the CI in the evsi object. ##' ##OUTPUTS ##'@return EVSI The EVSI calculated for a specific wtp with uncertainty estimates. ##'@return A graphical representation of the uncertainty. alt.legend <- pos if (is.numeric(alt.legend) & length(alt.legend) == 2) { temp <- "" if (alt.legend[2] == 0) temp <- paste0(temp, "bottom") else if (alt.legend[2] != 0.5) temp <- paste0(temp, "top") if (alt.legend[1] == 1) temp <- paste0(temp, "right") else temp <- paste0(temp, "left") alt.legend <- temp if (length(grep("^((bottom|top)(left|right)|right)$", temp)) == 0) alt.legend <- FALSE } if (is.logical(alt.legend)) { if (!alt.legend) alt.legend = "topright" else alt.legend = "topleft" } #Pick wtp threshold if not selected. if(class(wtp)!="numeric"){ wtp.select<-which.min(abs(evsi$he$kstar-evsi$attrib$wtp)) wtp<-evsi$attrib$wtp[which.min(abs(evsi$he$kstar-evsi$attrib$wtp))] } if(class(wtp)=="numeric"){ wtp.select<-which.min(abs(wtp-evsi$attrib$wtp)) wtp<-evsi$attrib$wtp[which.min(abs(wtp-evsi$attrib$wtp))] } if(class(CI)=="numeric"){ CI.select<-CI CI<-evsi$attrib$CI[CI] } if(is.null(CI)){ CI.select<-1:length(evsi$attrib$CI) CI<-evsi$attrib$CI } if(class(evsi$attrib$N)=="character"){ stop("This plot gives the EVSI for increasing sample size. Do not use on a single design.") } CI.length<-length(CI) #Extracting the EVSI values for the wtp of interest EVSI<-array(NA,dim=c(length(evsi$attrib$N),1,CI.length)) EVSI[,1,(1:CI.length)]<-rbind(evsi$evsi[,wtp.select,CI.select]) #Set up the plot plot(1,1,ylim=c(min(EVSI)*0.95,max(EVSI)*1.05),xlim=c(min(evsi$attrib$N),max(evsi$attrib$N)), col="white",xlab=expression("Sample Size"),ylab="Per Person EVSI",oma=c(0,0,-1,0),main="Expected Value of Sample Information across Sample Size") if(CI.length%%2==1){ lwd<-c(1:ceiling(CI.length/2),(ceiling(CI.length/2)-1):1,1) lty<-c(ceiling(CI.length/2):1,2:ceiling(CI.length/2),1) } if(CI.length%%2==0){ lwd<-c(1:(CI.length/2),(CI.length/2):1,1) lty<-c((CI.length/2):1,2:(CI.length/2),1) } if(length(evsi$attrib$N)<15){ for(l in 1:CI.length){ points(evsi$attrib$N,EVSI[,,l],pch=19, lwd=lwd[l],lty=lty[l]) } } if(length(evsi$attrib$N)>=15){ for(l in 1:CI.length){ points(evsi$attrib$N,EVSI[,,l],type="l", lwd=lwd[l],lty=lty[l]) } } fitted.PI<--(wtp*evsi$evppi$fitted.e-evsi$evppi$fitted.c) abline(h=mean(apply(fitted.PI,1,max))-max(apply(fitted.PI,2,mean)),col="springgreen",lwd=lwd[CI.length+1],lty=lty[CI.length+1]) legend(alt.legend,c(as.character(CI),"EVPPI"), col=c(rep("black",CI.length),"springgreen"),lwd=lwd,lty=lty, box.lwd = 0,box.col = "white",bg = "white") box() }
c4a83149e6d22334ebe3636808b34810dba0863a
af982fba9c4fab24bf06e810720de721a7c43bd2
/data-raw/weather data/make weather data.R
5735741262d425469607cfc692bd0c3f57f3cff2
[ "Apache-2.0" ]
permissive
Schmidtpk/Covid
398dcc70bc12d91bb588939eedd3e7248c547e23
0efcfe093be2b8b66799930185b580bee1dfe529
refs/heads/master
2021-04-11T09:36:33.532750
2020-04-25T08:27:00
2020-04-25T08:27:00
249,008,243
2
1
null
null
null
null
UTF-8
R
false
false
1,079
r
make weather data.R
library(httr) vars <- c("cloud","tMax","tMin","precip","humidity","wind") df <- NULL for(var.cur in vars) { df.cur <-as_tibble(read.csv( text=as.character( GET( paste0("https://raw.githubusercontent.com/imantsm/COVID-19/master/csv/", var.cur, ".csv")), header=T))) cat(var.cur,sum(grepl("Other",df.cur$Province.State))) #sum(grepl("ther",df.cur$Province.State)> df.cur.long <- df.cur %>% tidyr::pivot_longer(starts_with("X"),names_to = "date") df.cur.long$date <- as.Date(df.cur.long$date, "X%m.%d.%y") df.cur.long$var <- var.cur df <- rbind(df.cur.long,df) } weather <- df %>% rename( region = Province.State, country = Country.Region ) weather <- weather %>% tidyr::pivot_wider( names_from = "var", values_from = "value" ) #summary(weather$region) weather$country<-as.character(weather$country) weather$region<-as.character(weather$region) weather$region[weather$region==""]<-NA weather$region[weather$region==weather$country] <- NA #use_data(weather,overwrite = T)
b3d2ca6ad251c578d9327d6ce91a605c579e7fee
a55c6e1f121a7114d238437cb3ff7002e31c4d42
/tests/testthat/test-overscope.R
9a596f5a45f3b70744022303f5afc4ba9e656aee
[ "MIT" ]
permissive
mohamedndiaye/dplyr
93af925b8144d462cbeb3d94ff5ca9b0e9c94b99
12e76215b01cea302d26d600a17549d5019026d3
refs/heads/master
2020-03-06T16:02:24.867469
2017-04-28T14:28:47
2017-04-28T14:28:47
126,966,112
1
1
null
2018-03-27T09:56:37
2018-03-27T09:56:36
null
UTF-8
R
false
false
309
r
test-overscope.R
context("overscope") test_that(".data has strict matching semantics (#2591)", { expect_error( data_frame(a = 1) %>% mutate(c = .data$b), "Column `b`: not found in data" ) expect_error( data_frame(a = 1:3) %>% group_by(a) %>% mutate(c = .data$b), "Column `b`: not found in data" ) })
aa53f30a8b749cced8084d6fe6f5fb375650623e
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
/data/genthat_extracted_code/igraph/examples/vertex_attr.Rd.R
c3a9c580aabad670b314007a92f5b0edc3b7179a
[]
no_license
surayaaramli/typeRrh
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
66e6996f31961bc8b9aafe1a6a6098327b66bf71
refs/heads/master
2023-05-05T04:05:31.617869
2019-04-25T22:10:06
2019-04-25T22:10:06
null
0
0
null
null
null
null
UTF-8
R
false
false
337
r
vertex_attr.Rd.R
library(igraph) ### Name: vertex_attr ### Title: Query vertex attributes of a graph ### Aliases: vertex_attr get.vertex.attribute vertex.attributes ### ** Examples g <- make_ring(10) %>% set_vertex_attr("color", value = "red") %>% set_vertex_attr("label", value = letters[1:10]) vertex_attr(g, "label") vertex_attr(g) plot(g)
8a0ccce6475661429a346bdbad4b25a2ea5ad341
1462094b01791141a5e21727aec8c15c205ee28f
/ui.R
dff75b82ecaf1903385d93694a3146a9d3c2ce49
[]
no_license
dcarvalho/analise_dados_abertos
f668b58d007f8b316236c5d81aa048863d738237
c2b2cf6c9be48fa88111e4461720dd4db122e255
refs/heads/master
2020-04-15T11:49:49.358424
2019-04-03T17:59:10
2019-04-03T17:59:10
164,646,914
0
0
null
null
null
null
UTF-8
R
false
false
2,077
r
ui.R
library(shiny) library(DT) library(plotly) d<- as.POSIXlt(Sys.Date()) data_fim<-d d$year<-d$year-5 data_inicio<-as.Date(d) shinyUI( fluidPage(style = "text-align:center;", # Application title titlePanel("Matriz de Informação Social - Dados Abertos"), fluidRow(style = "background-color:lightgray;text-align:left;", column(7, selectInput("campo", "Indicador:", choices = campos, width = '100%' ) ) , column(2, selectInput("uf", "UF:", choices = c( 'BRASIL'='*', 'AC'='12', 'AL'='27', 'AM'='13', 'AP'='16', 'BA'='29', 'CE'='23', 'DF'='53', 'ES'='32', 'GO'='52', 'MA'='21', 'MG'='31', 'MS'='50', 'MT'='51', 'PA'='15', 'PB'='25', 'PE'='26', 'PI'='22', 'PR'='41', 'RJ'='33', 'RN'='24', 'RO'='11', 'RR'='14', 'RS'='43', 'SC'='42', 'SE'='28', 'SP'='35', 'TO'='17' ) ) ) , column(3, dateRangeInput('ano',label = "Período da análise: ",format = "yyyy",language="pt",start = data_inicio, end=data_fim,startview = "year",separator = " - ") ) ), hr(), fluidRow(style = "text-align:left;", column(12, plotlyOutput("plotly") ) ), hr(), fluidRow(style = "text-align:left;", column(12, dataTableOutput('tbl') ) ) ) )
9d56c21c3bab72a41eb6987f6dfe53b8f96b1acf
e2f262ced6cc36bebd9ff8e142ca082f8904d8a2
/R/uscb_acs_5ye.R
c94aaec453f421ca14f2655ff94f8df234bdb343
[ "MIT" ]
permissive
josesamos/geogenr
408949b2801df306ac8e0c209a09c737c11fbae3
448e4c09e052da5d34e2539fd5ad2735c4cbc8fc
refs/heads/master
2023-01-22T02:00:39.706397
2020-11-19T11:32:37
2020-11-19T11:32:37
303,603,346
0
0
null
null
null
null
UTF-8
R
false
false
4,061
r
uscb_acs_5ye.R
#' `uscb_acs_5ye` S3 class #' #' Internal low-level constructor that creates new objects with the correct #' structure. #' #' @param folder A string. #' #' @importFrom magrittr %>% #' @name %>% #' #' @return A `uscb_acs_5ye` object. #' #' @keywords internal new_uscb_acs_5ye <- function(folder = "") { years <- 2010:2018 url <- "https://www2.census.gov/geo/tiger/TIGER_DP/%dACS/" extension <- ".gdb.zip" legal_and_administrative_areas <- data.frame( type = 1, name = c( "American Indian/Alaska Native/Native Hawaiian Area", "Alaska Native Regional Corporation", "Congressional District (116th Congress)", "County", "Place", "Elementary School District", "Secondary School District", "Unified School District", "State", "State Legislative Districts - Upper Chamber", "State Legislative Districts - Lower Chamber", "Zip Code Tabulation Area" ), url = c( "ACS_%d_5YR_AIARES", "ACS_%d_5YR_ANRC", "ACS_%d_5YR_CD_116", "ACS_%d_5YR_COUNTY", "ACS_%d_5YR_PLACE", "ACS_%d_5YR_SDE", "ACS_%d_5YR_SDS", "ACS_%d_5YR_SDU", "ACS_%d_5YR_STATE", "ACS_%d_5YR_SLDU", "ACS_%d_5YR_SLDL", "ACS_%d_5YR_ZCTA" ) ) statistical_areas <- data.frame( type = 2, name = c( "Tribal Block Group", "Tribal Census Tract", "New England City and Town Area", "New England City and Town Area Division", "Combined New England City and Town Area", "Metropolitan/Micropolitan Statistical Area", "Metropolitan Division", "Combined Statistical Area", "Public Use Microdata Area", "Urban Area" ), url = c( "ACS_%d_5YR_TBG", "ACS_%d_5YR_TTRACT", "ACS_%d_5YR_NECTA", "ACS_%d_5YR_NECTADIV", "ACS_%d_5YR_CNECTA", "ACS_%d_5YR_MSA", "ACS_%d_5YR_METDIV", "ACS_%d_5YR_CSA", "ACS_%d_5YR_PUMA", "ACS_%d_5YR_UA" ) ) acs <- list( folder = folder, years = years, url = url, extension = extension, variables = rbind(legal_and_administrative_areas, statistical_areas) ) structure(acs, class = "uscb_acs_5ye") } #' `uscb_acs_5ye` S3 class #' #' A `uscb_acs_5ye` object is created from a given local folder. #' #' @param folder A string. #' #' @return A `uscb_acs_5ye` object. #' #' @family data collection functions #' @seealso #' #' @examples #' #' folder <- "../geodimension/data/us/" #' ua <- uscb_acs_5ye(folder = folder) #' #' folder <- system.file("extdata", package = "geogenr") #' folder <- stringr::str_replace_all(paste(folder, "/", ""), " ", "") #' ua <- uscb_acs_5ye(folder = folder) #' #' @export uscb_acs_5ye <- function(folder = "") { new_uscb_acs_5ye(folder) } # ----------------------------------------------------------------------- #' url_file_exists #' #' https://stackoverflow.com/questions/60318926/how-to-check-if-file-exists-in-the-url-before-use-download-file-in-r #' #' @param mdr A string. #' #' @return A boolean #' #' @keywords internal url_file_exists <- function(url) { head_url <- httr::HEAD(url) (head_url$all_headers[[1]]$status == 200) } # ----------------------------------------------------------------------- #' get_geodatabase_url #' #' #' @param mdr A string. #' #' @return A boolean #' #' @keywords internal get_geodatabase_url <- function(url, extension, names, name, year) { name <- names[names$name == name, "url"] url <- paste(url, name, extension, sep = "") sprintf(url, year, year) } # ----------------------------------------------------------------------- #' get_geodatabase_file #' #' #' @param mdr A string. #' #' @return A boolean #' #' @keywords internal get_geodatabase_file <- function(folder, extension, names, name, year) { name <- names[names$name == name, "url"] file <- paste(folder, name, extension, sep = "") sprintf(file, year) }
fb0a0a40d6f321941c78f0743c60a7e762585498
9ee587651e82c3efdf58036364c197829ffa57e1
/Chapter1_FineScaleAcousticSurvey/nmds_birds_v.laptopt.R
ef6f8da4081bbd470eefc41f87aea477d837cbd7
[ "Apache-2.0" ]
permissive
QutEcoacoustics/spatial-acoustics
7f0fd2af6663200ab529a2f8979eec56a0bf2e40
5e8eaba29576a59f85220c8013d0b083ddb70592
refs/heads/master
2023-04-15T09:50:44.063038
2023-03-14T23:36:36
2023-03-14T23:36:36
222,621,976
0
1
null
null
null
null
UTF-8
R
false
false
16,105
r
nmds_birds_v.laptopt.R
#NMDS# #Marina Scarpelli# #07.01.2020# rm(list = ls()) library(tidyverse) library(ggplot2) library(stringi) library(car) library(data.table) library(MuMIn) library(plotly) #Reading and preparing the data #### getDataPath <- function (...) { return(file.path("C:/Users/scarp/OneDrive - Queensland University of Technology/Documents/PhD/Project", ...)) } chapter <- "Chapter1_FineScaleAcousticSurvey" #NMDS to decrease number of land variables library(vegan) set.seed(123) nmds_df <- read.csv(getDataPath(chapter, "27.02.2021_CompleteData.csv")) %>% mutate(., period = case_when(hour > 4 & hour <= 11 ~ "morning", hour > 11 & hour <= 19 ~ "afternoon", T ~ "night")) land_var <- read.csv(getDataPath("Fieldwork_Bowra", "26.02.2021_dataSAVINDVI_v.laptop.csv")) land_var$aug_ndvi_avg <- as.numeric(land_var$aug_ndvi_avg) land_var <- filter(land_var, NewVegDescription != "") %>% mutate_at(., vars(NT_N_DIST, NT_W_DIST, NT_S_DIST, NT_E_DIST, NS_N_DIST, NS_W_DIST, NS_S_DIST, NS_E_DIST), ~ replace(., is.na(.), 100)) %>% mutate_at(., vars(NT_N_HEIGHT, NT_S_HEIGHT, NT_W_HEIGHT, NT_E_HEIGHT, NS_N_HEIGHT, NS_S_HEIGHT, NS_E_HEIGHT, NS_W_HEIGHT), ~replace(., is.na(.), 0)) %>% mutate_at(., vars(GC_NF_W, Slope, Aspect, Elevation, DistWater, CanopyCover, ShrubCover, CanopyHeight, SubcanopyHeight, aug_ndvi_avg, aug_savi_avg), ~replace(., is.na(.), 0)) %>% mutate(NT_DIST_AVG = (NT_N_DIST + NT_S_DIST + NT_E_DIST + NT_W_DIST)/4) %>% mutate(NT_HEIGHT_AVG = (NT_N_HEIGHT + NT_S_HEIGHT + NT_E_HEIGHT + NT_W_HEIGHT)/4) %>% mutate(NS_DIST_AVG = (NS_N_DIST + NS_S_DIST + NS_E_DIST + NS_W_DIST)/4) %>% mutate(NS_HEIGHT_AVG = (NS_N_HEIGHT + NS_S_HEIGHT + NS_E_HEIGHT + NS_W_HEIGHT)/4) %>% mutate(GC_NG_AVG = (GC_NG_N + GC_NG_S + GC_NG_E + GC_NG_W)/4) %>% mutate(GC_NF_AVG = (GC_NF_N + GC_NF_S + GC_NF_E + GC_NF_W)/4) %>% mutate(GC_BS_AVG = (GC_BS_N + GC_BS_S + GC_BS_E + GC_BS_W)/4) %>% mutate(GC_LT_AVG = (GC_LT_N + GC_LT_S + GC_LT_E + GC_LT_W)/4) %>% mutate(GC_SH_AVG = (GC_SH_N + GC_SH_S + GC_SH_E + GC_SH_W)/4) %>% select(., NT_DIST_AVG, NT_HEIGHT_AVG, NS_DIST_AVG, NS_HEIGHT_AVG, GC_NG_AVG, GC_NF_AVG, GC_BS_AVG, GC_SH_AVG, aug_ndvi_avg, CanopyCover, ShrubCover, CanopyHeight, SubcanopyHeight, Slope, Aspect, Elevation, DistWater, Point, NewVegDescription, VegDescription2) %>% droplevels(.) df_newveg1 <- select(land_var, Point, NewVegDescription, VegDescription2) %>% merge(., nmds_df, by.x = "Point", by.y = "point") %>% mutate_at(c(75:82, 89, 91, 95, 97, 99:103), decostand, method = "range") rownames(land_var) <- land_var$Point # ##### # # # # bird_df <- filter(nmds_df, class_model == "bird") %>% # group_by(point) %>% # summarise(., mean_bird = mean(mean), sd_bird = mean(sd), mean_bird_temp = mean(mean_temp)) %>% # merge(., land_var, by.x = "point", by.y = "Point", all.x = T, all.y = F) %>% # filter(., point != "WAA2O" | point != "WBA2O") %>% # #mutate_at(c(2:22), decostand, "range") %>% # droplevels(.) # # insect_df <- filter(nmds_df, class_model == "insect") %>% # group_by(point) %>% # summarise(., mean_insect = mean(mean), sd_insect = mean(sd), mean_insect_temp = mean(mean_temp)) %>% # merge(., bird_df, by.x = "point", by.y = "point", all.x = T, all.y = T) %>% # filter(., point != "WAA2O" | point != "WBA2O") %>% # mutate_at(c(2:22), decostand, "range") %>% # droplevels(.) # # ##### # # rownames(nmds_df) <- nmds_df$id # df_test <- select(nmds_df, mean, sd, NT_DIST_AVG, SubcanopyHeight) %>% # mutate_at(c(1:ncol(.)), decostand, "range") %>% # droplevels(.) # # #NT_DIST_AVG, NS_DIST_AVG, GC_NG_AVG, GC_NF_AVG, GC_BS_AVG, GC_SH_AVG, aug_ndvi_avg, CanopyCover, ShrubCover, CanopyHeight, SubcanopyHeight, Slope, Aspect, Elevation, DistWater, mean, sd) %>% # # # nmds_mean <- metaMDS(df_test, k = 2, trymax = 100) # # nmds_mean # stressplot(nmds_mean) # # plot(nmds_mean) # ordiplot(nmds_mean,type="n") # ordihull(nmds_mean, groups=nmds_df$class_model, lty=2) col=cores2, # #orditorp(nmds_mean,display="species",col="red",air=0.01) # #orditorp(nmds_mean,display="sites",cex=0.9,air=0.01) # # plot(nmds_mean, type="n") # points(resultado.nmds, col=cores2[dados$Bloco_Amostral], pch=16) # ordihull(resultado.nmds, groups=dados$Bloco_Amostral, col=cores2, lty=2) # text(resultado.nmds, labels = row.names(bio), pos=4) # # scores <- nmds_mean[["species"]] # # adonis(nmds_mean[["dist"]]~nmds_df$class_model) # nmds_df$aug_ndvi_avg <- as.numeric(nmds_df$aug_ndvi_avg) #A PERMANOVA: # colours <- c("#CCFF00", "#CCCC00", "#CC9900", "#CC6600", "#CC3300", "#FF00FF", "#660000", "#663399", "#666600", "#669900", "#66CC00", "#66FF00", "#009999", "#0066FF", "#000000") # rownames(insect_df) <- insect_df$point # # df_test <- select(insect_df, mean_insect, mean_bird, NT_DIST_AVG, NT_HEIGHT_AVG, CanopyHeight, Elevation) # # nmds_mean <- metaMDS(df_test, k = 2, try = 100) # # nmds_mean # stressplot(nmds_mean) # # plot(nmds_mean) # ordiplot(nmds_mean,type="n") # orditorp(nmds_mean,display="species",col="red",air=0.01) # orditorp(nmds_mean,display="sites",cex=0.9,air=0.01) # # scores <- nmds_mean[["species"]] # # points(nmds_mean, col= colours[land_var$NewVegDescription], pch=16) # #ordihull(nmds_mean, groups= land_var$NewVegDescription, lty=2) # #legend("topleft", legend = as.factor(land_var$NewVegDescription), fill = colours, cex = 0.5) # # scores <- nmds_mean[["species"]] # # adonis(df_test~insect_df$NewVegDescription) # anosim(df_test, insect_df$NewVegDescription) # # ###### # # rownames(insect_df) <- insect_df$point # # df_test <- select(insect_df, sd_insect, sd_bird, NT_DIST_AVG, NT_HEIGHT_AVG, CanopyHeight, Elevation) # # nmds_sd <- metaMDS(df_test, k = 3, try = 100) # # nmds_sd # stressplot(nmds_sd) # # plot(nmds_sd) # ordiplot(nmds_sd,type="n") # orditorp(nmds_sd,display="species",col="red",air=0.01) # orditorp(nmds_sd,display="sites",cex=0.9,air=0.01) # # points(nmds_sd, col= colours[insect_df$NewVegDescription], pch=16) # #ordihull(nmds_mean, groups= land_var$NewVegDescription, lty=2) # legend("topleft", legend = as.factor(insect_df$NewVegDescription), fill = colours, cex = 0.5) # # scores <- nmds_sd[["species"]] # # adonis(df_test~insect_df$NewVegDescription) # anosim(df_test, insect_df$NewVegDescription) #Complete model - birds and insects df_newveg <- filter(df_newveg1, class_model == "bird") row.names(df_newveg) <- df_newveg$id colours <- c("#fdae61", "#8c510a", "#b8e186", "#f46d43", "#4d9221") dep_var <- select(df_newveg, mean_temp, SubcanopyHeight, DistWater, aug_ndvi_avg, Elevation, GC_NF_AVG) all <- plyr::ldply(1:6, function(x)t(combn(colnames(dep_var), x))) all <- rename(all, col1 = 1, col2 = 2, col3 = 3, col4 = 4, col5 = 5, col6 = 6) for (c in 1:ncol(all)) { all[,c] <- as.character(all[,c]) } write.csv(all, getDataPath(chapter, "Fig1", "NMDS_BIRDS_OPT", "key_birds_nmds_take2.csv")) test <- as.list(all[seq(1,nrow(all), 16),]) scores <- data.frame( model_var = NA, conv = NA, stress = NA, permanova_veg_F = NA, permanova_veg_R2 = NA, permanova_veg_p = NA, permanova_class_F = NA, permanova_class_R2 = NA, permanova_class_p = NA) scores_temp <- data.frame( model_var = NA, conv = NA, stress = NA, permanova_veg_F = NA, permanova_veg_R2 = NA, permanova_veg_p = NA, permanova_class_F = NA, permanova_class_R2 = NA, permanova_class_p = NA) colours <- c("#fdae61", "#8c510a", "#b8e186", "#f46d43", "#4d9221") line_type <- c(5, 4, 3, 2, 1) colours2 <- c("#542788", "#b35806") line_type2 <- c(1, 2) summary(df_newveg) df_newveg$VegDescription2 <- as.factor(df_newveg$VegDescription2) rm(outcome) #jahs <- all[c(1, 1001, 10001, 20001, 30001, 40001, 50001, 60001, 65099, 65534),] for (i in 1:nrow(all)){ outcome <- NULL model <- NULL PERMANOVA <- NULL perm <- NULL outcome <- as.character(all[i,]) %>% na.exclude(.) %>% paste(., sep = ",") skip_to_next <- FALSE tryCatch({ model <- metaMDS(df_newveg[outcome], dist = "bray", k = 2, try = 100) PERMANOVA <- adonis(df_newveg[outcome]~df_newveg$VegDescription2) png(filename=getDataPath(chapter, "Fig1", "NMDS_BIRDS_OPT", paste(rownames(all[i,]), "veg_TAKE2", ".png", sep = ""))) plot(model) ordiplot(model,type="n") orditorp(model,display="sites",cex=0.9,air=0.01, labels = F) points(model, col= colours[df_newveg$VegDescription2], pch = 16) orditorp(model, display="species",col="red",air=0.5) ordihull(model, groups= df_newveg$VegDescription2, lty = line_type) legend("topleft", legend = unique(df_newveg$VegDescription2), fill = colours, cex = 0.6) legend("bottomleft", legend = unique(df_newveg$VegDescription2), lty = line_type, cex = 0.6) dev.off() #This one only for the complete models - with all groups # perm<-adonis(df_newveg[outcome]~df_newveg$class_model) # # # # png(filename=getDataPath("Chapter1_FineScaleAcousticSurvey", "Fig1", "NMDS_birds", paste(rownames(all[i,]), "class", ".png", sep = ""))) # # plot(model) # ordiplot(model,type="n") # orditorp(model,display="sites",cex=0.9,air=0.01, labels = F) # points(model, col= colours2[df_newveg$class_model], pch=16) # ordihull(model, groups= df_newveg$class_model, lty=line_type2[df_newveg$class_model]) # legend("topleft", legend = unique(df_newveg$class_model), fill = colours2, cex = 0.6) # legend("bottomleft", legend = unique(df_newveg$class_model), lty = line_type2, cex = 0.6) # dev.off() scores_temp$model_var <- as.character(rownames(all[i,])) scores_temp$conv <- as.character(model$converged) scores_temp$stress <- as.numeric(model$stress) scores_temp$permanova_veg_F <- as.numeric(PERMANOVA$aov.tab$F.Model[1]) scores_temp$permanova_veg_R2 <- as.numeric(PERMANOVA$aov.tab$R2[1]) scores_temp$permanova_veg_p <- as.numeric(PERMANOVA$aov.tab$Pr[1]) # scores_temp$permanova_class_F <- as.numeric(perm$aov.tab$F.Model[1]) # scores_temp$permanova_class_R2 <- as.numeric(perm$aov.tab$R2[1]) # scores_temp$permanova_class_p <- as.numeric(perm$aov.tab$Pr[1]) write.csv(scores_temp, getDataPath("Chapter1_FineScaleAcousticSurvey", "Fig1", "NMDS_BIRDS_OPT", paste(rownames(all[i,]), "veg_TAKE2", ".csv", sep = ""))) }, #scores <- rbind(scores, scores_temp) }, error = function(e) {skip_to_next <<-TRUE }) if(skip_to_next) { next } } write.csv(scores, getDataPath(chapter, "Fig1", "NMDS_BIRDS_OPT", "scores_nmds_birds.csv")) files <- list.files(getDataPath(chapter, "Fig1", "NMDS_BIRDS_OPT"), pattern = "veg_TAKE4.csv", full.names = T) results <- lapply(files, read.csv) %>% map(., select, model_var, conv, stress, permanova_veg_F, permanova_veg_R2, permanova_veg_p) %>% do.call(rbind, .) %>% filter(., conv == "TRUE") %>% filter(., stress < 0.1) %>% write.csv(getDataPath(chapter, "Fig1", "NMDS_BIRDS_OPT", "filtered_nmds_birds4.csv"), row.names = F) #Birds: #Best model df_birds <- filter(df_newveg1, class_model == "bird") %>% select(., SubcanopyHeight, NS_DIST_AVG) best_birds <- metaMDS(df_birds, dist = "bray", k = 2, try = 100) df_birds <- filter(df_newveg1, class_model == "bird") data.scores <- as.data.frame(scores(best_birds)) data.scores$id <- rownames(best_birds) data.scores$veg <- df_birds$VegDescription2 data.scores$point <- df_birds$Point head(data.scores$veg) species.scores <- as.data.frame(scores(best_birds, "species")) species.scores$landvar <- rownames(species.scores) head(species.scores) p_bestbirds <- plot_ly() p_bestbirds <- add_trace(p_bestbirds, name = data.scores$veg, type = "scatter", x = data.scores$NMDS1, y = data.scores$NMDS2, text = data.scores$point) p_bestbirds <- add_trace(p_bestbirds, name = "Landscape attributes", mode = "text", x = species.scores$NMDS1, y = species.scores$NMDS2, text = species.scores$landvar) p_bestbirds <- layout(p_bestbirds, title = "Birds - best model (Stress: 0.07)") #Plotly - birds all land variables df_birds <- filter(df_newveg1, class_model == "bird") %>% select(., SubcanopyHeight, DistWater, aug_ndvi_avg , NS_DIST_AVG, GC_NF_AVG, GC_BS_AVG) complete_birds <- metaMDS(df_birds, dist = "bray", k = 2, try = 100) df_birds <- filter(df_newveg1, class_model == "bird") data.scores <- as.data.frame(scores(complete_birds)) data.scores$id <- rownames(complete_birds) data.scores$veg <- df_birds$VegDescription2 data.scores$point <- df_birds$Point head(data.scores$veg) species.scores <- as.data.frame(scores(complete_birds, "species")) species.scores$landvar <- rownames(species.scores) head(species.scores) p_completebirds <- plot_ly() p_completebirds <- add_trace(p_completebirds, name = data.scores$veg, type = "scatter", x = data.scores$NMDS1, y = data.scores$NMDS2, text = data.scores$point) p_completebirds <- add_trace(p_completebirds, name = "Landscape attributes", mode = "text", x = species.scores$NMDS1, y = species.scores$NMDS2, text = species.scores$landvar) p_completebirds <- layout(p_completebirds, title = "Birds - all landscape variables (Stress:0.19)") #Insects: #Best model df_insects <- filter(df_newveg1, class_model == "insect") %>% select(., SubcanopyHeight, NS_DIST_AVG) best_insects <- metaMDS(df_insects, dist = "bray", k = 2, try = 100) df_insects <- filter(df_newveg1, class_model == "bird") data.scores <- as.data.frame(scores(best_insects)) data.scores$id <- rownames(best_insects) data.scores$veg <- df_insects$VegDescription2 data.scores$point <- df_insects$Point head(data.scores$veg) species.scores <- as.data.frame(scores(best_insects, "species")) species.scores$landvar <- rownames(species.scores) head(species.scores) p_bestinsects <- plot_ly() p_bestinsects <- add_trace(p_bestinsects, name = data.scores$veg, type = "scatter", x = data.scores$NMDS1, y = data.scores$NMDS2, text = data.scores$point) p_bestinsects <- add_trace(p_bestinsects, name = "Landscape attributes", mode = "text", x = species.scores$NMDS1, y = species.scores$NMDS2, text = species.scores$landvar) p_bestinsects <- layout(p_bestinsects, title = "Insects - best model (Stress: 0.07)") #Plotly - insects all land variables df_insects <- filter(df_newveg1, class_model == "insect") %>% select(., SubcanopyHeight, GC_BS_AVG, DistWater, GC_NF_AVG, NS_DIST_AVG, GC_SH_AVG) complete_insects <- metaMDS(df_insects, dist = "bray", k = 2, try = 100) df_insects <- filter(df_newveg1, class_model == "insect") data.scores <- as.data.frame(scores(complete_insects)) data.scores$id <- rownames(complete_insects) data.scores$veg <- df_insects$VegDescription2 data.scores$point <- df_insects$Point head(data.scores$veg) species.scores <- as.data.frame(scores(complete_insects, "species")) species.scores$landvar <- rownames(species.scores) head(species.scores) p_completeinsects <- plot_ly() p_completeinsects <- add_trace(p_completeinsects, name = data.scores$veg, type = "scatter", x = data.scores$NMDS1, y = data.scores$NMDS2, text = data.scores$point) p_completeinsects <- add_trace(p_completeinsects, name = "Landscape attributes", mode = "text", x = species.scores$NMDS1, y = species.scores$NMDS2, text = species.scores$landvar) p_completeinsects <- layout(p_completeinsects, title = "Insects - all landscape variables (Stress:0.18)")