content
large_stringlengths
0
6.46M
path
large_stringlengths
3
331
license_type
large_stringclasses
2 values
repo_name
large_stringlengths
5
125
language
large_stringclasses
1 value
is_vendor
bool
2 classes
is_generated
bool
2 classes
length_bytes
int64
4
6.46M
extension
large_stringclasses
75 values
text
stringlengths
0
6.46M
library(raster) library(sf) library(sp) # library(rdgal) library(rgeos) library(dplyr) library(ggplot2) library(spdplyr) library(gstat) library(geoR) library(spatstat) library(viridis) library(rangemap) library(e1071) library(evd) gis_dir <- "C:/Users/lloken/OneDrive - DOI/GIS" onedrive_dir <- 'C:/Users/lloken/OneDrive - DOI/FLAMebodia' maxdist = 10 subset = 10 color.palette = colorRampPalette(c(viridis(6, begin=.1, end=.98), rev(magma(5, begin=.25, end=.98))), bias=1) #Load water grid # watergrid_300m_sp <- readRDS(file.path(onedrive_dir, "GIS", # "TonleSap_January2022_WaterGrid_300m.rds")) # projection = proj4string(watergrid_300m_sp) watergrid_150m_sp <- readRDS(file.path(onedrive_dir, "GIS", "TonleSap_January2022_WaterGrid_150m.rds")) projection = proj4string(watergrid_150m_sp) watergrid_predict <- watergrid_150m_sp # plot(watergrid_300m_sp, col = "pink") # plot(watergrid_150m_sp, col = "pink") # plot(watergrid_predict, col = "pink") # data_name <- "Merged_TonleSap_Jan_2022" data_name <- "Merged_Mekong_TonleSap_JanApr_2022" data_dir <- file.path(onedrive_dir, "Data", data_name) data <- readRDS(file.path(data_dir, "Shapefiles", paste0(data_name, "_Shapefile_AllData.rds"))) data <- spTransform(data, crs(watergrid_predict)) # variables <- names(data)[4:58] variables <- names(data)[4:61] # data2 <- data %>% # filter(!is.na(ODO_mgL)) # # predict <- gstat::idw(pull(select(data2@data, ODO_mgL)) ~ 1, # data2, # watergrid_predict, # idp=2) # # print(spplot(predict, # zcol='var1.pred', # colorkey = TRUE, # cuts = 99, # col.regions = color.palette, # # sp.layout=list(l1, l2, l3, l4), # main="Predicted dissolved oxygen (mgL)", # xlim = bbox(watergrid_predict)[1,], # ylim = bbox(watergrid_predict)[2,])) # Copy from NHLD code # Trying to subset based on proximity to measured values. data_sample <- data[sample(seq_along(data$date_time), nrow(data)/100),] concave_cloud <- hull_polygon(data_sample, hull_type = "concave", concave_distance_lim = 100, verbose = TRUE) buffered_cloud <- gBuffer(concave_cloud, width = 1000*10) # dev.off() # plot(data_sample, cex = .5) # plot(concave_cloud, add = TRUE, border = "purple") # plot(buffered_cloud, add = TRUE, border = "magenta") # # Generate 'data cloud' based on observations # bdry <- ripras(coordinates(data)) # # # Convert data cloud to spatial polygon # bdry_df <- data.frame(bdry[[4]][[1]]$x, bdry[[4]][[1]]$y) # bdry_df[nrow(bdry_df)+1,]<-bdry_df[1,] # bdry_poly<-Polygon(bdry_df) # bdry_poly2 = Polygons(list(bdry_poly), "s1") # bdry_poly_sp<-SpatialPolygons(list(bdry_poly2), proj4string=CRS(as.character(projection))) # # # Make Buffer around data cloud polygon # # width = distance in meters; currently using two pixel distances # buffered <- gBuffer(bdry_poly_sp, width = 1000*2) # Make prediction area as intersection of buffered area and lake polygon # Area<-gIntersection(buffered, lake_polygon) # Check Area and Confirm # dev.off() # plot(watergrid_predict, col = "pink") # plot(data_sample, add = TRUE, cex = .5) # plot(concave_cloud, add = TRUE, border = "blue") # plot(buffered_cloud, add = TRUE, border = "magenta") # plot(lakes_Base, add=TRUE) plot(watergrid_predict, add = TRUE) # Make polygrid - This is each location to make predictions watergrid_predict # pts_in = over(SpatialPoints(watergrid_predict), SpatialPolygons(buffered@polygons), # returnlist = TRUE) pts_in = over(SpatialPoints(watergrid_predict), SpatialPolygons(buffered_cloud@polygons), returnlist = TRUE) watergrid_predict_subset <- watergrid_predict[!is.na(pts_in),] # plot(watergrid_predict, col = "pink") # plot(watergrid_predict_subset, col = "blue", add = TRUE) # plot data grid, boundary, and observations # dev.off() # plot(watergrid_predict_subset, col="blue") # plot(buffered_cloud, add = TRUE, border = "magenta") # plot(data_sample, add=TRUE, col="red", cex=0.2) # Make spatial object to save surface predictions watergrid_predict_subset_data <- watergrid_predict_subset %>% select(-layer) watergrid_predict_subset_data@data[,variables] <- NA # Make an empty summary table for each filename # This will be populated with summary stats for each variable summary_lake <- as.data.frame(matrix(nrow = length(variables), ncol=22)) names(summary_lake)<-c('Min', 'Q25', 'Median', 'Mean', 'Q75', 'Max', 'Q05', 'Q10', 'Q90', 'Q95', 'sd', 'SDL', 'n', 'mad', 'MADM', 'skewness', 'loc', 'scale', 'shape', 'CV', 'QuartileDispersion', 'MADMOverMedian') # ========================================== # Start of loop to run through each variable # ========================================== var = variables[6] for (var in variables){ var_number <- which(variables==var) # Select only variable of interest data2<-data %>% select(all_of(var)) #Identify column in data2 that contains variable (should be 1) column <- which(names(data2)==var) data2 <- data2[which(!is.na(data2@data[,column])),] # Skip variable if all NAs if (nrow(data2)>0){ #Add minimum if its a variable that has potential negative values # if (var %in% minvars[,1]){ # minimum<-minvars[which(var==minvars[,1]),2] # data2@data[,column]<-data2@data[,column]-minimum # } # Plot Timeseries of variable # Make sure data seem reasonable # plot(data@data[,column1], type="p") #Transform data into UTM's. This way distance is in meters (m) data2<-spTransform(data2, CRS(projection)) # Remove observations that are within maxdist (5m) of each other. data2<-remove.duplicates(data2, zero=maxdist) #Plot heat map atop lake base polygon # spplot(data2[var], cuts=99, colorkey=TRUE, sp.layout = list(lakes_Base['Lake_Name']) ) # subset (%) of the data. Take random percent of points # Depending on the analysis and size of data, R cannot handle entire dataset data3<-data2[sample(nrow(data2), nrow(data2)/subset), ] colnames(data3@coords)<-c("x", "y") # ========================= # Using Inverse Distance Weighting predict values at each grid cell # idp = denominator exponent. # idp = 1: 1/distance # idp = 2: 1/(distance squared) # ========================= predict <- gstat::idw(pull(select(data3@data, all_of(var))) ~ 1, data3, watergrid_predict_subset, idp = 2) names(predict) <- c(paste(var, sep=""), paste(var, "_v", sep="")) # par(mfrow=c(1,1)) # par(mar=c(4,4,4,4), oma=c(1,1,1,1)) # spplot(predict, names(predict)[1], colorkey=TRUE, cuts=99, sp.layout=list(lake_polygon['Lake_Name'], col=1, fill=0, lwd=3, lty=1, first=F) , main=paste(var, "_prediction_inverse_distance_weight", sep=""), xlim=bbox(lake_polygon)[1,], ylim=bbox(lake_polygon)[2,]) # Create summary stats for variable values <- predict@data[,1] basic_stats<-summary(values) quantiles<-quantile(values, probs = c(0.05, .1, .9, 0.95), na.rm = TRUE) summary_var<-c(basic_stats, quantiles, sd=sd(values), SDL=sd(log10(values), na.rm=T), n=length(values), mad=mad(values), MADM=median(abs(values-median(values))), skewness=skewness(values, na.rm = T)) # Save summary info to summary table summary_lake[var_number,1:16]<-summary_var #if zero heterogeneity exists, skip evd and plotting if (identical(round(min(values), 3), round(max(values),3))==FALSE){ # hist(values,breaks=20, xlab=var, main="", col="grey") evd <- fgev(values, std.err=F) evd$estimate summary_lake[var_number,17:19]<-evd$estimate # Save spatial data to spatial object watergrid_predict_subset_data@data[var_number]<-predict@data[1] # create subfolder 'maps_idw' if it does not already exist dir.create(file.path(data_dir, "Maps_idw", sep=""), showWarnings = FALSE) # Plot Spatial data png(paste(data_dir, "/Maps_idw/", var, ".png", sep=""), res=200, width=6, height=6, units="in") xdist <- diff(bbox(watergrid_predict_subset_data)[1,1:2]) scale <- signif(xdist/6, digits=1) # polyx<-c(bbox(lake_polygon)[1,1]+scale*(c(0.2,1.2))) # polyy<-c(bbox(lake_polygon)[2,1]+scale*c(.2,.4)) # coords<-data.frame(x=c(polyx, rev(polyx)), y=c(rep(polyy[1], 2), rep(polyy[2], 2))) # poly_box<-Polygon(coords) # poly_box2<-Polygons(list(poly_box), "s1") # poly_box_sp<-SpatialPolygons(list(poly_box2), proj4string=CRS(as.character(projection))) # polyx<-c(bbox(watergrid_predict_subset_data)[1,1]+scale*(c(0.2,1.2))) polyy<-c(bbox(watergrid_predict_subset_data)[2,1]+scale*c(.2,.4)) coords<-data.frame(x=c(rep(polyx[1], 2), rep(polyx[2], 2)), y=c(rev(polyy), polyy)) poly_line<-Line((coords)) S1 = Lines(list(poly_line), ID="a") poly_line_sp<- SpatialLines(list(S1)) # l1 = list(lake_polygon['Lake_Name'], col=1, fill=0, lwd=3, lty=1, first=F) l2 = list("SpatialPolygonsRescale", layout.north.arrow(type=1), offset = c(polyx[1], polyy[1]+scale*.25), scale = scale*.5, first=FALSE) l3<- list(poly_line_sp, fill=NA, lwd=2, lty=1, first=F) # l3<- list(poly_box_sp, fill=NA, lwd=2, lty=1, first=F) # mean(polyx), mean(polyy) # l3 = list("SpatialPolygonsRescale", layout.scale.bar(height=scale/1000), offset = # c(bbox(lake_polygon)[1,1]+0.5*scale,bbox(lake_polygon)[2,1]+scale), # scale = scale, fill=c('black'), lwd=1, first=FALSE) l4 = list("sp.text", c(mean(polyx), polyy[1]), paste0(scale/1000, " km"), cex=0.6, first=FALSE, pos=3) print(spplot(watergrid_predict_subset_data, zcol = var, colorkey = TRUE, cuts = 99, col.regions = color.palette, sp.layout=list(l2, l3, l4) , main = paste(var, data_name, sep = ": "), sub = "Prediction using inverse distance weight", xlim = bbox(watergrid_predict_subset_data)[1,], ylim=bbox(watergrid_predict_subset_data)[2,])) dev.off() closeAllConnections() } } } # Add variable names to summary table summary_lake$CV<-summary_lake$sd/summary_lake$Mean summary_lake$QuartileDispersion<-(summary_lake$Q75 - summary_lake$Q25)/ (summary_lake$Q75 + summary_lake$Q25) summary_lake$MADMOverMedian<-(summary_lake$MADM)/ (summary_lake$Median) summary_lake$MaxMinusMin<-(summary_lake$Max) - (summary_lake$Min) summary_lake$IQR<-(summary_lake$Q75) - (summary_lake$Q25) summary_lake$Q95MinusQ05<-(summary_lake$Q95) - (summary_lake$Q05) summary_lake$Variable<-variables # Save shapefile of interpolated surface (spatial pixels data frame) writeOGR(watergrid_predict_subset_data, dsn = file.path(data_dir, "Shapefiles"), layer = paste0(data_name, "_Shapefile_idw"), driver="ESRI Shapefile", verbose=F, overwrite=T) # Convert spatialpixesldataframe to raster raster_withData <- stack(watergrid_predict_subset_data) # Save raster of interpolated surface (stacked raster) # Note - ArcMap cannot read this type of file writeRaster(raster_withData, file.path(data_dir, "Shapefiles", paste0(data_name, "_Raster_idw", sep="")), format='raster', overwrite=TRUE) #Write summary to file write.table(summary_lake, file = file.path(data_dir, paste0(data_name, "_PixelSummaries.csv")), col.names = TRUE, row.names = FALSE, sep=",") rm(summary_lake)
/interpolate_tonlesap.R
no_license
lukeloken/SuperFlamer
R
false
false
12,257
r
library(raster) library(sf) library(sp) # library(rdgal) library(rgeos) library(dplyr) library(ggplot2) library(spdplyr) library(gstat) library(geoR) library(spatstat) library(viridis) library(rangemap) library(e1071) library(evd) gis_dir <- "C:/Users/lloken/OneDrive - DOI/GIS" onedrive_dir <- 'C:/Users/lloken/OneDrive - DOI/FLAMebodia' maxdist = 10 subset = 10 color.palette = colorRampPalette(c(viridis(6, begin=.1, end=.98), rev(magma(5, begin=.25, end=.98))), bias=1) #Load water grid # watergrid_300m_sp <- readRDS(file.path(onedrive_dir, "GIS", # "TonleSap_January2022_WaterGrid_300m.rds")) # projection = proj4string(watergrid_300m_sp) watergrid_150m_sp <- readRDS(file.path(onedrive_dir, "GIS", "TonleSap_January2022_WaterGrid_150m.rds")) projection = proj4string(watergrid_150m_sp) watergrid_predict <- watergrid_150m_sp # plot(watergrid_300m_sp, col = "pink") # plot(watergrid_150m_sp, col = "pink") # plot(watergrid_predict, col = "pink") # data_name <- "Merged_TonleSap_Jan_2022" data_name <- "Merged_Mekong_TonleSap_JanApr_2022" data_dir <- file.path(onedrive_dir, "Data", data_name) data <- readRDS(file.path(data_dir, "Shapefiles", paste0(data_name, "_Shapefile_AllData.rds"))) data <- spTransform(data, crs(watergrid_predict)) # variables <- names(data)[4:58] variables <- names(data)[4:61] # data2 <- data %>% # filter(!is.na(ODO_mgL)) # # predict <- gstat::idw(pull(select(data2@data, ODO_mgL)) ~ 1, # data2, # watergrid_predict, # idp=2) # # print(spplot(predict, # zcol='var1.pred', # colorkey = TRUE, # cuts = 99, # col.regions = color.palette, # # sp.layout=list(l1, l2, l3, l4), # main="Predicted dissolved oxygen (mgL)", # xlim = bbox(watergrid_predict)[1,], # ylim = bbox(watergrid_predict)[2,])) # Copy from NHLD code # Trying to subset based on proximity to measured values. data_sample <- data[sample(seq_along(data$date_time), nrow(data)/100),] concave_cloud <- hull_polygon(data_sample, hull_type = "concave", concave_distance_lim = 100, verbose = TRUE) buffered_cloud <- gBuffer(concave_cloud, width = 1000*10) # dev.off() # plot(data_sample, cex = .5) # plot(concave_cloud, add = TRUE, border = "purple") # plot(buffered_cloud, add = TRUE, border = "magenta") # # Generate 'data cloud' based on observations # bdry <- ripras(coordinates(data)) # # # Convert data cloud to spatial polygon # bdry_df <- data.frame(bdry[[4]][[1]]$x, bdry[[4]][[1]]$y) # bdry_df[nrow(bdry_df)+1,]<-bdry_df[1,] # bdry_poly<-Polygon(bdry_df) # bdry_poly2 = Polygons(list(bdry_poly), "s1") # bdry_poly_sp<-SpatialPolygons(list(bdry_poly2), proj4string=CRS(as.character(projection))) # # # Make Buffer around data cloud polygon # # width = distance in meters; currently using two pixel distances # buffered <- gBuffer(bdry_poly_sp, width = 1000*2) # Make prediction area as intersection of buffered area and lake polygon # Area<-gIntersection(buffered, lake_polygon) # Check Area and Confirm # dev.off() # plot(watergrid_predict, col = "pink") # plot(data_sample, add = TRUE, cex = .5) # plot(concave_cloud, add = TRUE, border = "blue") # plot(buffered_cloud, add = TRUE, border = "magenta") # plot(lakes_Base, add=TRUE) plot(watergrid_predict, add = TRUE) # Make polygrid - This is each location to make predictions watergrid_predict # pts_in = over(SpatialPoints(watergrid_predict), SpatialPolygons(buffered@polygons), # returnlist = TRUE) pts_in = over(SpatialPoints(watergrid_predict), SpatialPolygons(buffered_cloud@polygons), returnlist = TRUE) watergrid_predict_subset <- watergrid_predict[!is.na(pts_in),] # plot(watergrid_predict, col = "pink") # plot(watergrid_predict_subset, col = "blue", add = TRUE) # plot data grid, boundary, and observations # dev.off() # plot(watergrid_predict_subset, col="blue") # plot(buffered_cloud, add = TRUE, border = "magenta") # plot(data_sample, add=TRUE, col="red", cex=0.2) # Make spatial object to save surface predictions watergrid_predict_subset_data <- watergrid_predict_subset %>% select(-layer) watergrid_predict_subset_data@data[,variables] <- NA # Make an empty summary table for each filename # This will be populated with summary stats for each variable summary_lake <- as.data.frame(matrix(nrow = length(variables), ncol=22)) names(summary_lake)<-c('Min', 'Q25', 'Median', 'Mean', 'Q75', 'Max', 'Q05', 'Q10', 'Q90', 'Q95', 'sd', 'SDL', 'n', 'mad', 'MADM', 'skewness', 'loc', 'scale', 'shape', 'CV', 'QuartileDispersion', 'MADMOverMedian') # ========================================== # Start of loop to run through each variable # ========================================== var = variables[6] for (var in variables){ var_number <- which(variables==var) # Select only variable of interest data2<-data %>% select(all_of(var)) #Identify column in data2 that contains variable (should be 1) column <- which(names(data2)==var) data2 <- data2[which(!is.na(data2@data[,column])),] # Skip variable if all NAs if (nrow(data2)>0){ #Add minimum if its a variable that has potential negative values # if (var %in% minvars[,1]){ # minimum<-minvars[which(var==minvars[,1]),2] # data2@data[,column]<-data2@data[,column]-minimum # } # Plot Timeseries of variable # Make sure data seem reasonable # plot(data@data[,column1], type="p") #Transform data into UTM's. This way distance is in meters (m) data2<-spTransform(data2, CRS(projection)) # Remove observations that are within maxdist (5m) of each other. data2<-remove.duplicates(data2, zero=maxdist) #Plot heat map atop lake base polygon # spplot(data2[var], cuts=99, colorkey=TRUE, sp.layout = list(lakes_Base['Lake_Name']) ) # subset (%) of the data. Take random percent of points # Depending on the analysis and size of data, R cannot handle entire dataset data3<-data2[sample(nrow(data2), nrow(data2)/subset), ] colnames(data3@coords)<-c("x", "y") # ========================= # Using Inverse Distance Weighting predict values at each grid cell # idp = denominator exponent. # idp = 1: 1/distance # idp = 2: 1/(distance squared) # ========================= predict <- gstat::idw(pull(select(data3@data, all_of(var))) ~ 1, data3, watergrid_predict_subset, idp = 2) names(predict) <- c(paste(var, sep=""), paste(var, "_v", sep="")) # par(mfrow=c(1,1)) # par(mar=c(4,4,4,4), oma=c(1,1,1,1)) # spplot(predict, names(predict)[1], colorkey=TRUE, cuts=99, sp.layout=list(lake_polygon['Lake_Name'], col=1, fill=0, lwd=3, lty=1, first=F) , main=paste(var, "_prediction_inverse_distance_weight", sep=""), xlim=bbox(lake_polygon)[1,], ylim=bbox(lake_polygon)[2,]) # Create summary stats for variable values <- predict@data[,1] basic_stats<-summary(values) quantiles<-quantile(values, probs = c(0.05, .1, .9, 0.95), na.rm = TRUE) summary_var<-c(basic_stats, quantiles, sd=sd(values), SDL=sd(log10(values), na.rm=T), n=length(values), mad=mad(values), MADM=median(abs(values-median(values))), skewness=skewness(values, na.rm = T)) # Save summary info to summary table summary_lake[var_number,1:16]<-summary_var #if zero heterogeneity exists, skip evd and plotting if (identical(round(min(values), 3), round(max(values),3))==FALSE){ # hist(values,breaks=20, xlab=var, main="", col="grey") evd <- fgev(values, std.err=F) evd$estimate summary_lake[var_number,17:19]<-evd$estimate # Save spatial data to spatial object watergrid_predict_subset_data@data[var_number]<-predict@data[1] # create subfolder 'maps_idw' if it does not already exist dir.create(file.path(data_dir, "Maps_idw", sep=""), showWarnings = FALSE) # Plot Spatial data png(paste(data_dir, "/Maps_idw/", var, ".png", sep=""), res=200, width=6, height=6, units="in") xdist <- diff(bbox(watergrid_predict_subset_data)[1,1:2]) scale <- signif(xdist/6, digits=1) # polyx<-c(bbox(lake_polygon)[1,1]+scale*(c(0.2,1.2))) # polyy<-c(bbox(lake_polygon)[2,1]+scale*c(.2,.4)) # coords<-data.frame(x=c(polyx, rev(polyx)), y=c(rep(polyy[1], 2), rep(polyy[2], 2))) # poly_box<-Polygon(coords) # poly_box2<-Polygons(list(poly_box), "s1") # poly_box_sp<-SpatialPolygons(list(poly_box2), proj4string=CRS(as.character(projection))) # polyx<-c(bbox(watergrid_predict_subset_data)[1,1]+scale*(c(0.2,1.2))) polyy<-c(bbox(watergrid_predict_subset_data)[2,1]+scale*c(.2,.4)) coords<-data.frame(x=c(rep(polyx[1], 2), rep(polyx[2], 2)), y=c(rev(polyy), polyy)) poly_line<-Line((coords)) S1 = Lines(list(poly_line), ID="a") poly_line_sp<- SpatialLines(list(S1)) # l1 = list(lake_polygon['Lake_Name'], col=1, fill=0, lwd=3, lty=1, first=F) l2 = list("SpatialPolygonsRescale", layout.north.arrow(type=1), offset = c(polyx[1], polyy[1]+scale*.25), scale = scale*.5, first=FALSE) l3<- list(poly_line_sp, fill=NA, lwd=2, lty=1, first=F) # l3<- list(poly_box_sp, fill=NA, lwd=2, lty=1, first=F) # mean(polyx), mean(polyy) # l3 = list("SpatialPolygonsRescale", layout.scale.bar(height=scale/1000), offset = # c(bbox(lake_polygon)[1,1]+0.5*scale,bbox(lake_polygon)[2,1]+scale), # scale = scale, fill=c('black'), lwd=1, first=FALSE) l4 = list("sp.text", c(mean(polyx), polyy[1]), paste0(scale/1000, " km"), cex=0.6, first=FALSE, pos=3) print(spplot(watergrid_predict_subset_data, zcol = var, colorkey = TRUE, cuts = 99, col.regions = color.palette, sp.layout=list(l2, l3, l4) , main = paste(var, data_name, sep = ": "), sub = "Prediction using inverse distance weight", xlim = bbox(watergrid_predict_subset_data)[1,], ylim=bbox(watergrid_predict_subset_data)[2,])) dev.off() closeAllConnections() } } } # Add variable names to summary table summary_lake$CV<-summary_lake$sd/summary_lake$Mean summary_lake$QuartileDispersion<-(summary_lake$Q75 - summary_lake$Q25)/ (summary_lake$Q75 + summary_lake$Q25) summary_lake$MADMOverMedian<-(summary_lake$MADM)/ (summary_lake$Median) summary_lake$MaxMinusMin<-(summary_lake$Max) - (summary_lake$Min) summary_lake$IQR<-(summary_lake$Q75) - (summary_lake$Q25) summary_lake$Q95MinusQ05<-(summary_lake$Q95) - (summary_lake$Q05) summary_lake$Variable<-variables # Save shapefile of interpolated surface (spatial pixels data frame) writeOGR(watergrid_predict_subset_data, dsn = file.path(data_dir, "Shapefiles"), layer = paste0(data_name, "_Shapefile_idw"), driver="ESRI Shapefile", verbose=F, overwrite=T) # Convert spatialpixesldataframe to raster raster_withData <- stack(watergrid_predict_subset_data) # Save raster of interpolated surface (stacked raster) # Note - ArcMap cannot read this type of file writeRaster(raster_withData, file.path(data_dir, "Shapefiles", paste0(data_name, "_Raster_idw", sep="")), format='raster', overwrite=TRUE) #Write summary to file write.table(summary_lake, file = file.path(data_dir, paste0(data_name, "_PixelSummaries.csv")), col.names = TRUE, row.names = FALSE, sep=",") rm(summary_lake)
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/Path-class.R \name{Path$.} \alias{Path$.} \title{Get the elements in the directory} \description{ Returns a named list of Path elements in a directory } \seealso{ Other Path: \code{\link{Path$..}}, \code{\link{Path$J}}, \code{\link{Path$dir}}, \code{\link{Path$join}}, \code{\link{Path$name}}, \code{\link{Path$new}}, \code{\link{Path$parent}}, \code{\link{Path$show}}, \code{\link{Path}}, \code{\link{\%//\%}()} } \concept{Path}
/man/Path-cash-..Rd
permissive
strazto/pathlibr
R
false
true
509
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/Path-class.R \name{Path$.} \alias{Path$.} \title{Get the elements in the directory} \description{ Returns a named list of Path elements in a directory } \seealso{ Other Path: \code{\link{Path$..}}, \code{\link{Path$J}}, \code{\link{Path$dir}}, \code{\link{Path$join}}, \code{\link{Path$name}}, \code{\link{Path$new}}, \code{\link{Path$parent}}, \code{\link{Path$show}}, \code{\link{Path}}, \code{\link{\%//\%}()} } \concept{Path}
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/censoring_estimate.R \name{estimateCensoring} \alias{estimateCensoring} \title{Estimate Censoring Mechanisms} \usage{ estimateCensoring( dataList, adjustVars, t0, SL.ctime = NULL, glm.ctime = NULL, glm.family, cvControl, returnModels = FALSE, verbose = TRUE, gtol = 0.001, ... ) } \arguments{ \item{dataList}{A list of \code{data.frame} objects as described in the documentation of \code{\link{makeDataList}}.} \item{adjustVars}{Object of class \code{data.frame} that contains the variables to adjust for in the regression.} \item{t0}{The timepoint at which \code{survtmle} was called to evaluate. Needed only because the naming convention for the regression if \code{t == t0} is different than if \code{t != t0}.} \item{SL.ctime}{A character vector or list specification to be passed to the \code{SL.library} argument of \code{\link[SuperLearner]{SuperLearner}} for the outcome regression (either cause-specific hazards or conditional mean). See the documentation of \code{\link[SuperLearner]{SuperLearner}} for more information on how to specify valid \code{SuperLearner} libraries. It is expected that the wrappers used in the library will play nicely with the input variables, which will be called \code{"trt"} and \code{names(adjustVars)}.} \item{glm.ctime}{A character specification of the right-hand side of the equation passed to the \code{\link[stats]{formula}} option of a call to \code{\link[stats]{glm}} for the outcome regression (either cause-specific hazards or conditional mean). Ignored if \code{SL.ctime != NULL}. Use \code{"trt"} to specify the treatment in this formula (see examples). The The formula can additionally include any variables found in \code{names(adjustVars)}.} \item{glm.family}{The type of regression to be performed if fitting GLMs in the estimation and fluctuation procedures. The default is "binomial" for logistic regression. Only change this from the default if there are justifications that are well understood. This is inherited from the calling function (either \code{\link{mean_tmle}} or \code{\link{hazard_tmle}}).} \item{cvControl}{A \code{list} providing control options to be fed directly into calls to \code{\link[SuperLearner]{SuperLearner}}. This should match the contents of \code{SuperLearner.CV.control} exactly. For details, consult the documentation of the \pkg{SuperLearner} package. This is passed in from \code{\link{mean_tmle}} or \code{\link{hazard_tmle}} via \code{\link{survtmle}}.} \item{returnModels}{A \code{logical} indicating whether to return the \code{glm} or \code{SuperLearner} objects used to estimate the nuisance parameters. Must be set to \code{TRUE} to make downstream calls to \code{\link{timepoints}} for obtaining estimates at times other than \code{t0}. See documentation of \code{\link{timepoints}} for more information.} \item{verbose}{A \code{logical} indicating whether the function should print messages to indicate progress.} \item{gtol}{The truncation level of predicted censoring survival to handle positivity violations.} \item{...}{Other arguments. Not currently used.} } \value{ The function returns a list that is exactly the same as the input \code{dataList}, but with a column named \code{G_dC} added to it, which is the estimated conditional survival distribution for the censoring variable evaluated at the each of the rows of each \code{data.frame} in \code{dataList}. } \description{ Computes an estimate of the hazard for censoring using either \code{\link[stats]{glm}} or \code{\link[SuperLearner]{SuperLearner}} based on log-likelihood loss. The function then computes the censoring survival distribution based on these estimates. The structure of the function is specific to how it is called within \code{\link{survtmle}}. In particular, \code{dataList} must have a very specific structure for this function to run properly. The list should consist of \code{data.frame} objects. The first will have the number of rows for each observation equal to the \code{ftime} corresponding to that observation. Subsequent entries will have \code{t0} rows for each observation and will set \code{trt} column equal to each value of \code{trtOfInterest} in turn. One of these columns must be named \code{C} that is a counting process for the right-censoring variable. The function will fit a regression with \code{C} as the outcome and functions of \code{trt} and \code{names(adjustVars)} as specified by \code{glm.ctime} or \code{SL.ctime} as predictors. }
/man/estimateCensoring.Rd
permissive
benkeser/survtmle
R
false
true
4,577
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/censoring_estimate.R \name{estimateCensoring} \alias{estimateCensoring} \title{Estimate Censoring Mechanisms} \usage{ estimateCensoring( dataList, adjustVars, t0, SL.ctime = NULL, glm.ctime = NULL, glm.family, cvControl, returnModels = FALSE, verbose = TRUE, gtol = 0.001, ... ) } \arguments{ \item{dataList}{A list of \code{data.frame} objects as described in the documentation of \code{\link{makeDataList}}.} \item{adjustVars}{Object of class \code{data.frame} that contains the variables to adjust for in the regression.} \item{t0}{The timepoint at which \code{survtmle} was called to evaluate. Needed only because the naming convention for the regression if \code{t == t0} is different than if \code{t != t0}.} \item{SL.ctime}{A character vector or list specification to be passed to the \code{SL.library} argument of \code{\link[SuperLearner]{SuperLearner}} for the outcome regression (either cause-specific hazards or conditional mean). See the documentation of \code{\link[SuperLearner]{SuperLearner}} for more information on how to specify valid \code{SuperLearner} libraries. It is expected that the wrappers used in the library will play nicely with the input variables, which will be called \code{"trt"} and \code{names(adjustVars)}.} \item{glm.ctime}{A character specification of the right-hand side of the equation passed to the \code{\link[stats]{formula}} option of a call to \code{\link[stats]{glm}} for the outcome regression (either cause-specific hazards or conditional mean). Ignored if \code{SL.ctime != NULL}. Use \code{"trt"} to specify the treatment in this formula (see examples). The The formula can additionally include any variables found in \code{names(adjustVars)}.} \item{glm.family}{The type of regression to be performed if fitting GLMs in the estimation and fluctuation procedures. The default is "binomial" for logistic regression. Only change this from the default if there are justifications that are well understood. This is inherited from the calling function (either \code{\link{mean_tmle}} or \code{\link{hazard_tmle}}).} \item{cvControl}{A \code{list} providing control options to be fed directly into calls to \code{\link[SuperLearner]{SuperLearner}}. This should match the contents of \code{SuperLearner.CV.control} exactly. For details, consult the documentation of the \pkg{SuperLearner} package. This is passed in from \code{\link{mean_tmle}} or \code{\link{hazard_tmle}} via \code{\link{survtmle}}.} \item{returnModels}{A \code{logical} indicating whether to return the \code{glm} or \code{SuperLearner} objects used to estimate the nuisance parameters. Must be set to \code{TRUE} to make downstream calls to \code{\link{timepoints}} for obtaining estimates at times other than \code{t0}. See documentation of \code{\link{timepoints}} for more information.} \item{verbose}{A \code{logical} indicating whether the function should print messages to indicate progress.} \item{gtol}{The truncation level of predicted censoring survival to handle positivity violations.} \item{...}{Other arguments. Not currently used.} } \value{ The function returns a list that is exactly the same as the input \code{dataList}, but with a column named \code{G_dC} added to it, which is the estimated conditional survival distribution for the censoring variable evaluated at the each of the rows of each \code{data.frame} in \code{dataList}. } \description{ Computes an estimate of the hazard for censoring using either \code{\link[stats]{glm}} or \code{\link[SuperLearner]{SuperLearner}} based on log-likelihood loss. The function then computes the censoring survival distribution based on these estimates. The structure of the function is specific to how it is called within \code{\link{survtmle}}. In particular, \code{dataList} must have a very specific structure for this function to run properly. The list should consist of \code{data.frame} objects. The first will have the number of rows for each observation equal to the \code{ftime} corresponding to that observation. Subsequent entries will have \code{t0} rows for each observation and will set \code{trt} column equal to each value of \code{trtOfInterest} in turn. One of these columns must be named \code{C} that is a counting process for the right-censoring variable. The function will fit a regression with \code{C} as the outcome and functions of \code{trt} and \code{names(adjustVars)} as specified by \code{glm.ctime} or \code{SL.ctime} as predictors. }
Uses <- function(MP=NA, data=NULL) { options(warn=-1) if (class(MP) != 'character' && !is.na(MP)) stop("MP must be class 'character'", call.=FALSE) options(warn=1) if (all(is.na(MP))) MP <- c(avail("Output"), avail("Input")) if (!is.null(data)) MP <- c(avail("Output"), avail("Input")) val <- MP %in% c(avail("Output"), avail("Input")) if (all(!val)) stop("Invalid MPs: ", paste(MP[!val], ""), "\nNo valid MPs found", call.=FALSE) if (any(!val)) message("Ignoring invalid MPs: ", paste(MP[!val], "")) MP <- MP[val] out <- matrix(NA, nrow=length(MP), ncol=3) out[,1] <- MP out[,2] <- MPclass(MP) slots <- slotNames('Data') slotnams <- paste("Data@", slotNames("Data"), sep = "") for (mm in 1:length(MP)) { temp <- format(match.fun(MP[mm])) temp <- paste(temp[1:(length(temp))], collapse = " ") uses <- NULL for (j in 1:length(slotnams)) { if (grepl(slotnams[j], temp)) uses <- c(uses, slots[j]) } out[mm,3] <- paste(uses[1:length(uses)], collapse = ", ") } colnames(out) <- c("MP", "Class", "Uses") if (!is.null(data)) { val <- data %in% slotNames("Data") if (all(!val)) stop("Invalid data: ", paste(data[!val], ""), "\nNo valid data found.\nValid data are: ", sort(paste(slotNames("Data"), "")), call.=FALSE) if (any(!val)) message("Ignoring invalid data: ", paste(data[!val], "")) data <- data[val] ind <- apply(do.call('cbind', lapply(data, grepl, x=out[,3])), 1, prod) > 0 if (all(!ind)) return(message("No MPs found using: ", paste(data, ""))) message("MPs using: ", paste(data, "")) print(out[ind,1:2]) return(invisible(out)) } else { return(out) } } Uses("AvC") Uses(c("AvC", "LBSPR_ItTAC")) Uses('DCAC') Uses(data=c("steep")) uses <- Uses() uses[,3] lapply(strsplit(uses[3,3], split=","), trimws) library(cluster) library(factoextra) str(USArrests) slots <- slotNames('Data') ignslots <- c("Name", "CV_", "Year", "Misc", "nareas", "Ref", "Ref_type", "Log", "params", "OM", "TACs", "Obs", "TACbias", "Sense", "Units") ind <- apply(do.call('cbind', lapply(ignslots, grepl, x=slots)), 1, sum) > 0 slots[!ind] slots[ind] valslots <- slots[!ind] tempdat <- matrix(0, nrow=nrow(uses), ncol=length(valslots)) for (X in 1:nrow(uses)) { for (y in 1:length(valslots)) if(grepl(valslots[y], uses[X,3])) tempdat[X,y] <- 1 } colnames(tempdat) <- valslots rownames(tempdat) <- uses[,1] tempdat <- as.data.frame(tempdat) d = dist(tempdat, method = "binary") hc = hclust(d, method="ward.D") plot(hc) democut<-cutree(hc,h=2) plot(hc, labels = as.character(democut)) uses[democut == 1,] uses[democut == 2,] uses[democut == 3,] uses[democut == 4,] uses[democut == 5,] uses[democut == 6,] uses[democut == 7,] uses[democut == 8,] tt <-table(uses[,1],democut) tt[order(tt[,1]),]
/Uses.R
no_license
DLMtool/DLMDev
R
false
false
2,839
r
Uses <- function(MP=NA, data=NULL) { options(warn=-1) if (class(MP) != 'character' && !is.na(MP)) stop("MP must be class 'character'", call.=FALSE) options(warn=1) if (all(is.na(MP))) MP <- c(avail("Output"), avail("Input")) if (!is.null(data)) MP <- c(avail("Output"), avail("Input")) val <- MP %in% c(avail("Output"), avail("Input")) if (all(!val)) stop("Invalid MPs: ", paste(MP[!val], ""), "\nNo valid MPs found", call.=FALSE) if (any(!val)) message("Ignoring invalid MPs: ", paste(MP[!val], "")) MP <- MP[val] out <- matrix(NA, nrow=length(MP), ncol=3) out[,1] <- MP out[,2] <- MPclass(MP) slots <- slotNames('Data') slotnams <- paste("Data@", slotNames("Data"), sep = "") for (mm in 1:length(MP)) { temp <- format(match.fun(MP[mm])) temp <- paste(temp[1:(length(temp))], collapse = " ") uses <- NULL for (j in 1:length(slotnams)) { if (grepl(slotnams[j], temp)) uses <- c(uses, slots[j]) } out[mm,3] <- paste(uses[1:length(uses)], collapse = ", ") } colnames(out) <- c("MP", "Class", "Uses") if (!is.null(data)) { val <- data %in% slotNames("Data") if (all(!val)) stop("Invalid data: ", paste(data[!val], ""), "\nNo valid data found.\nValid data are: ", sort(paste(slotNames("Data"), "")), call.=FALSE) if (any(!val)) message("Ignoring invalid data: ", paste(data[!val], "")) data <- data[val] ind <- apply(do.call('cbind', lapply(data, grepl, x=out[,3])), 1, prod) > 0 if (all(!ind)) return(message("No MPs found using: ", paste(data, ""))) message("MPs using: ", paste(data, "")) print(out[ind,1:2]) return(invisible(out)) } else { return(out) } } Uses("AvC") Uses(c("AvC", "LBSPR_ItTAC")) Uses('DCAC') Uses(data=c("steep")) uses <- Uses() uses[,3] lapply(strsplit(uses[3,3], split=","), trimws) library(cluster) library(factoextra) str(USArrests) slots <- slotNames('Data') ignslots <- c("Name", "CV_", "Year", "Misc", "nareas", "Ref", "Ref_type", "Log", "params", "OM", "TACs", "Obs", "TACbias", "Sense", "Units") ind <- apply(do.call('cbind', lapply(ignslots, grepl, x=slots)), 1, sum) > 0 slots[!ind] slots[ind] valslots <- slots[!ind] tempdat <- matrix(0, nrow=nrow(uses), ncol=length(valslots)) for (X in 1:nrow(uses)) { for (y in 1:length(valslots)) if(grepl(valslots[y], uses[X,3])) tempdat[X,y] <- 1 } colnames(tempdat) <- valslots rownames(tempdat) <- uses[,1] tempdat <- as.data.frame(tempdat) d = dist(tempdat, method = "binary") hc = hclust(d, method="ward.D") plot(hc) democut<-cutree(hc,h=2) plot(hc, labels = as.character(democut)) uses[democut == 1,] uses[democut == 2,] uses[democut == 3,] uses[democut == 4,] uses[democut == 5,] uses[democut == 6,] uses[democut == 7,] uses[democut == 8,] tt <-table(uses[,1],democut) tt[order(tt[,1]),]
test_that("base parse query string for user ID works", { query <- shiny::parseQueryString("?user_id=hadley&other_parameter=other/") base_val <- base_extract_user_id(query) expect_equal(base_val, "hadley") })
/tests/testthat/test-helpers-survey-code.R
permissive
nklepeis/shinysurveys
R
false
false
218
r
test_that("base parse query string for user ID works", { query <- shiny::parseQueryString("?user_id=hadley&other_parameter=other/") base_val <- base_extract_user_id(query) expect_equal(base_val, "hadley") })
vignette("Tutorial", package="mstate") library(mstate) library("colorspace") data(aidssi) si <- aidssi # Just a shorter name table(si$status) tmat <- trans.comprisk(2, names = c("event-free", "AIDS", "SI")) tmat ?msfit si$stat1 <- as.numeric(si$status == 1) si$stat2 <- as.numeric(si$status == 2) dim(aidssi)[1] #add a continious variable #ccr5 category variable with 2 levels one dummy variable ww as reference #radom continuous variable si$radom<-rnorm(dim(aidssi)[1],0,1) silong <- msprep(time = c(NA, "time", "time"), status = c(NA, "stat1", "stat2"), data = si, keep = c("ccr5","radom"), trans = tmat) events(silong) covs<-c("ccr5","radom") silong <- expand.covs(silong, covs) c1 <- coxph(Surv(time, status) ~ 1, data = silong, subset = (trans ==1), method = "breslow") #a non-parametric model c0<-coxph(Surv(time, status) ~ strata(trans), data = silong,method = "breslow") msf0 <- msfit(object = c0, vartype = "greenwood", trans = tmat) plot(msf0, las = 1, lty = rep(1:2, c(8, 4)), xlab = "Years since event-free") #stacked plot of probability of transition pt0<-probtrans(msf0,predt=0,method="greenwood") summary(pt0,from=1) plot(pt0, ord=c(1,2,3), lwd=2, xlab = "Years since event-free", ylab="Prediction probabilities" ) #stacked plot with heat map statecols<-heat_hcl(3,c=c(80,30),l=c(30,90),power=c(1/5,2))[c(1,2,3)] ord<-c(1,2,3) plot(pt0,ord=ord,xlab = "Years since transplantation", las = 1, ylim=c(-0.1,1.1),type = "filled", col = statecols[ord]) #from 1?? it seems computing risk can only be from 1 and state 2 and 3 are absorbing state #hard coding stacked plot pt01<-pt0[[1]] head(pt01) tail(pt01) plot(pt01$time,pt01$pstate1,lwd=1, xlab = "Years since event-free", ylab="Prediction probabilities", col="red" ) lines(pt01$time,pt01$pstate1+pt01$pstate2,lwd=1,col="blue") lines(pt01$time,pt01$pstate1+pt01$pstate2+pt01$pstate3,lwd=1,col="green") legend("topleft",c("event-free","AID","SI"),lwd = 2, col = c("red","blue","green"), bty = "n") #legend=c("SI","AIDS") #transition 1 event-free to AID 2 event-free to SI #check to see if we need trasition specific covariate coxph(Surv(time, status) ~ ccr5*factor(trans)+strata(trans), data = silong,method = "breslow") #significant interaction coxph(Surv(time, status) ~ ccr5WM.1+ccr5WM.2+strata(trans), data = silong,method = "breslow") coxph(Surv(time, status) ~ radom*factor(trans)+strata(trans), data = silong,method = "breslow") #random covariate not significant, interaction not significant coxph(Surv(time, status) ~ radom.1+radom.2+strata(trans), data = silong,method = "breslow") #full model cfull<-coxph(Surv(time, status) ~ ccr5WM.1+ccr5WM.2+radom.1+radom.2+strata(trans), data = silong,method = "breslow") silong[silong$id==1,] #prediction for a observation with WW feature WW<-data.frame(ccr5WM.1=c(0,0),ccr5WM.2=c(0,0),trans=c(1,2), strata=c(1,2),radom.1=c(0.5,0),radom.2=c(0,0.5)) msf.WW <- msfit(cfull, WW, trans = tmat) pt.WW <- probtrans(msf.WW, 0)[[1]] #prediction for a observation with WM feature WM<-data.frame(ccr5WM.1=c(1,0),ccr5WM.2=c(0,1),trans=c(1,2), strata=c(1,2),radom.1=c(0.5,0),radom.2=c(0,0.5)) msf.WM <- msfit(cfull, WM, trans = tmat) pt.WM <- probtrans(msf.WM, 0)[[1]] idx1 <- (pt.WW$time < 13) idx2 <- (pt.WM$time < 13) plot(c(0, pt.WW$time[idx1]), c(0, pt.WW$pstate2[idx1]), type = "s", ylim = c(0, 0.5), xlab = "Years from HIV infection", ylab = "Probability", lwd = 2,col="red") lines(c(0, pt.WM$time[idx2]), c(0, pt.WM$pstate2[idx2]), type = "s", lwd = 2, col = "blue") title(main = "AIDS") text(9.2, 0.345, "WW", adj = 0, cex = 0.75) text(9.2, 0.125, "WM", adj = 0, cex = 0.75) plot(c(0, pt.WW$time[idx1]), c(0, pt.WW$pstate3[idx1]), type = "s", ylim = c(0, 0.5), xlab = "Years from HIV infection", ylab = "Probability", lwd = 2,col="red") lines(c(0, pt.WM$time[idx2]), c(0, pt.WM$pstate3[idx2]), type = "s", lwd = 2, col = "blue") title(main = "SI appearance") text(7.5, 0.31, "WW", adj = 0, cex = 0.75) text(7.5, 0.245, "WM", adj = 0, cex = 0.75) #another way to compare there two observations ptWW<-probtrans(msf.WW,predt=0) ptWM<-probtrans(msf.WM,predt=0) summary(ptWW,from=1) statecols<-heat_hcl(3,c=c(80,30),l=c(30,90),power=c(1/5,2))[c(1,2,3)] ord<-c(1,2,3) par(mfrow = c(1,2)) plot(ptWW,ord=ord,xlab = "Years since transplantation", las = 1, ylim=c(-0.1,1.1),type = "filled", col = statecols[ord],main="WW") plot(ptWM,ord=ord,xlab = "Years since transplantation", las = 1, ylim=c(-0.1,1.1),type = "filled", col = statecols[ord],main="WM")
/competing_risk.R
no_license
bnuzyc91/mstate
R
false
false
4,623
r
vignette("Tutorial", package="mstate") library(mstate) library("colorspace") data(aidssi) si <- aidssi # Just a shorter name table(si$status) tmat <- trans.comprisk(2, names = c("event-free", "AIDS", "SI")) tmat ?msfit si$stat1 <- as.numeric(si$status == 1) si$stat2 <- as.numeric(si$status == 2) dim(aidssi)[1] #add a continious variable #ccr5 category variable with 2 levels one dummy variable ww as reference #radom continuous variable si$radom<-rnorm(dim(aidssi)[1],0,1) silong <- msprep(time = c(NA, "time", "time"), status = c(NA, "stat1", "stat2"), data = si, keep = c("ccr5","radom"), trans = tmat) events(silong) covs<-c("ccr5","radom") silong <- expand.covs(silong, covs) c1 <- coxph(Surv(time, status) ~ 1, data = silong, subset = (trans ==1), method = "breslow") #a non-parametric model c0<-coxph(Surv(time, status) ~ strata(trans), data = silong,method = "breslow") msf0 <- msfit(object = c0, vartype = "greenwood", trans = tmat) plot(msf0, las = 1, lty = rep(1:2, c(8, 4)), xlab = "Years since event-free") #stacked plot of probability of transition pt0<-probtrans(msf0,predt=0,method="greenwood") summary(pt0,from=1) plot(pt0, ord=c(1,2,3), lwd=2, xlab = "Years since event-free", ylab="Prediction probabilities" ) #stacked plot with heat map statecols<-heat_hcl(3,c=c(80,30),l=c(30,90),power=c(1/5,2))[c(1,2,3)] ord<-c(1,2,3) plot(pt0,ord=ord,xlab = "Years since transplantation", las = 1, ylim=c(-0.1,1.1),type = "filled", col = statecols[ord]) #from 1?? it seems computing risk can only be from 1 and state 2 and 3 are absorbing state #hard coding stacked plot pt01<-pt0[[1]] head(pt01) tail(pt01) plot(pt01$time,pt01$pstate1,lwd=1, xlab = "Years since event-free", ylab="Prediction probabilities", col="red" ) lines(pt01$time,pt01$pstate1+pt01$pstate2,lwd=1,col="blue") lines(pt01$time,pt01$pstate1+pt01$pstate2+pt01$pstate3,lwd=1,col="green") legend("topleft",c("event-free","AID","SI"),lwd = 2, col = c("red","blue","green"), bty = "n") #legend=c("SI","AIDS") #transition 1 event-free to AID 2 event-free to SI #check to see if we need trasition specific covariate coxph(Surv(time, status) ~ ccr5*factor(trans)+strata(trans), data = silong,method = "breslow") #significant interaction coxph(Surv(time, status) ~ ccr5WM.1+ccr5WM.2+strata(trans), data = silong,method = "breslow") coxph(Surv(time, status) ~ radom*factor(trans)+strata(trans), data = silong,method = "breslow") #random covariate not significant, interaction not significant coxph(Surv(time, status) ~ radom.1+radom.2+strata(trans), data = silong,method = "breslow") #full model cfull<-coxph(Surv(time, status) ~ ccr5WM.1+ccr5WM.2+radom.1+radom.2+strata(trans), data = silong,method = "breslow") silong[silong$id==1,] #prediction for a observation with WW feature WW<-data.frame(ccr5WM.1=c(0,0),ccr5WM.2=c(0,0),trans=c(1,2), strata=c(1,2),radom.1=c(0.5,0),radom.2=c(0,0.5)) msf.WW <- msfit(cfull, WW, trans = tmat) pt.WW <- probtrans(msf.WW, 0)[[1]] #prediction for a observation with WM feature WM<-data.frame(ccr5WM.1=c(1,0),ccr5WM.2=c(0,1),trans=c(1,2), strata=c(1,2),radom.1=c(0.5,0),radom.2=c(0,0.5)) msf.WM <- msfit(cfull, WM, trans = tmat) pt.WM <- probtrans(msf.WM, 0)[[1]] idx1 <- (pt.WW$time < 13) idx2 <- (pt.WM$time < 13) plot(c(0, pt.WW$time[idx1]), c(0, pt.WW$pstate2[idx1]), type = "s", ylim = c(0, 0.5), xlab = "Years from HIV infection", ylab = "Probability", lwd = 2,col="red") lines(c(0, pt.WM$time[idx2]), c(0, pt.WM$pstate2[idx2]), type = "s", lwd = 2, col = "blue") title(main = "AIDS") text(9.2, 0.345, "WW", adj = 0, cex = 0.75) text(9.2, 0.125, "WM", adj = 0, cex = 0.75) plot(c(0, pt.WW$time[idx1]), c(0, pt.WW$pstate3[idx1]), type = "s", ylim = c(0, 0.5), xlab = "Years from HIV infection", ylab = "Probability", lwd = 2,col="red") lines(c(0, pt.WM$time[idx2]), c(0, pt.WM$pstate3[idx2]), type = "s", lwd = 2, col = "blue") title(main = "SI appearance") text(7.5, 0.31, "WW", adj = 0, cex = 0.75) text(7.5, 0.245, "WM", adj = 0, cex = 0.75) #another way to compare there two observations ptWW<-probtrans(msf.WW,predt=0) ptWM<-probtrans(msf.WM,predt=0) summary(ptWW,from=1) statecols<-heat_hcl(3,c=c(80,30),l=c(30,90),power=c(1/5,2))[c(1,2,3)] ord<-c(1,2,3) par(mfrow = c(1,2)) plot(ptWW,ord=ord,xlab = "Years since transplantation", las = 1, ylim=c(-0.1,1.1),type = "filled", col = statecols[ord],main="WW") plot(ptWM,ord=ord,xlab = "Years since transplantation", las = 1, ylim=c(-0.1,1.1),type = "filled", col = statecols[ord],main="WM")
#' calculate p-value for accuracy using binomial test #' #' Calcualte p-value for the null accuracy = chance level #' @param predictions predicted values #' @param labels true values #' @param p proportion against which we are testing #' @keywords p values, statisticall signifficance, accuracy #' @export #' @examples #' p_val_acc(c(0,0,0,1,1,1), c(0, 0, 1, 0,1,1)) p_val_acc <- function(predictions, labels, p=0.5){ assert_values_only(predictions, c(0, 1)) assert_values_only(labels, c(0, 1)) successes <- sum(predictions == labels) binom <- stats::binom.test(successes, length(labels), p=p, alternative = 'greater') return(binom$p.value) } #' calculate p-value for the AUC statistics #' #' Calcualte p-value for the AUC statistics corresponding to null hypothesis AUC == 0 #' @param predictions predicted values #' @param labels true values #' @keywords p values, statisticall signifficance, auc #' @export #' @examples #' p_val_auc(c(0.3, 0.4, 0.6, 0.7), c(0, 0, 1, 1)) p_val_auc <- function(predictions, labels){ assert_values_only(labels, c(0,1)) # if all the predictions are same, just return p-value of 1 directly if (all_the_same(predictions)){ return(1) } wilcox <- stats::wilcox.test(predictions ~ labels, alternative='less') wilcox$p.value } #' null hypothesis signifficance permutation test for brier score #' #' Caluclates p value for brier score using a permutaiton test. Either with #' early stopping rules or with fixed number of permutations #' #' @param predictions probabilistic predictions between 0 and 1 #' @param labels true values coded as 0 and 1 #' @param method either 'earlystop' or 'fixedn' #' @param nperms maximum number of permutations performed #' @param earlystop_threshold p-value to consider for early stopping #' @param earlystop_error error we are willing to accept in return for early stopping #' #' @return p value #' @export #' #' @examples p_val_brier <- function(predictions, labels, method='earlystop', nperms=2000, earlystop_threshold=0.05, earlystop_error=0.001){ assert_values_only(labels, c(0,1)) assert_values_range(predictions, c(0, 1)) perm_test(predictions, labels, get_brier_score, method, nperms, earlystop_threshold, earlystop_error) } #' null hypothesis signifficance permutation test for logarithmic score #' #' Caluclates p value for logarighmic score using a permutaiton test. Either with #' early stopping rules or with fixed number of permutations #' #' @param predictions probabilistic predictions between 0 and 1 #' @param labels true values coded as 0 and 1 #' @param method either 'earlystop' or 'fixedn' #' @param nperms maximum number of permutations performed #' @param earlystop_threshold p-value to consider for early stopping #' @param earlystop_error error we are willing to accept in return for early stopping #' #' @return p value #' @export #' #' @examples p_val_logscore <- function(predictions, labels, method='earlystop', nperms=2000, earlystop_threshold=0.05, earlystop_error=0.001){ assert_values_only(labels, c(0,1)) assert_values_range(predictions, c(0, 1)) perm_test(predictions, labels, get_log_score, method, nperms, earlystop_threshold, earlystop_error) } #' performs permutation test either with early stopping or with a fixed number #' of permutations #' #' @param predictions #' @param labels #' @param perf_measure #' @param method #' @param nperms #' @param earlystop_threshold #' @param earlystop_error #' #' @return #' @export #' #' @examples perm_test <- function(predictions, labels, perf_measure, method, nperms, earlystop_threshold, earlystop_error){ assert_values_only(method, c('earlystop', 'fixedn')) # if all the predictions are same, just return p-value of 1 directly if (all_the_same(predictions)){ return(1) } if (method=='fixedn'){ return(perm_test_fixedn(predictions, labels, perf_measure, nperms)) } else if (method == 'earlystop') { return(perm_test_simctest(predictions, labels, perf_measure, nperms, earlystop_threshold, earlystop_error)) } } #' Perform permutation test for fixed number of permutations #' #' @param predictions #' @param labels #' @param perf_measure #' @param nperms #' #' @return #' @export #' #' @examples perm_test_fixedn <- function(predictions, labels, perf_measure, nperms){ observed_measure <- mean(perf_measure(predictions, labels)) null_distr <- list() for (i in 1:nperms){ null_distr[[i]] <- mean(perf_measure(sample(predictions), labels)) } rank <- sum(observed_measure > null_distr) + 1 pval <- rank / (nperms+1) return(pval) } #' perform permuation test with early stopping #' #' @param predictions #' @param labels #' @param perf_measure #' @param nperms #' @param earlystop_threshold #' @param earlystop_error #' #' @return #' @export #' #' @examples perm_test_simctest <- function(predictions, labels, perf_measure, nperms, earlystop_threshold, earlystop_error){ # library(simctest) observed <- mean(perf_measure(predictions, labels)) null_realization_generator <- function(){ observed >= mean(perf_measure(sample(predictions), labels)) } res <- simctest::simctest(null_realization_generator, level=earlystop_threshold, epsilon=earlystop_error, maxsteps=nperms) res@pos / res@steps # mctest.simctest(null_realization_generator, J=J) } #' get early-stopping limits for permutation test #' #' @param level #' @param epsilon #' @param maxsteps #' @param granurality #' #' @return #' @export #' #' @examples get_simctest_rejection_limits <- function(level=0.05, epsilon=1e-3, maxsteps=1e4, granurality=1000){ #TODO: make this analytically instead of empirically len <- granurality p_vals <- numeric(len) steps <- numeric(len) for (i in seq_len(len)){ p <- i/len binom_generator <- function(){ return(sample(c(0,1), size=1, replace = T, prob = c(1-p, p))) } res <- simctest::simctest(binom_generator, level=level, epsilon=epsilon, maxsteps = maxsteps) p_vals[i] <- res@pos / res@steps # to avoid NA when no decision steps[i] <- res@steps } p_vals_steps_limit <- approxfun(p_vals, steps) } #' computes mean sequentially #' #' @param previous_mean #' @param current_number #' @param i_current #' #' @return mean #' @export #' #' @examples #' x <- cbind(c(1, 0, 1, 1, 0, 0, 1), #' c(1, 1, 0, 1, 0, 1, 0)) #' seq_means <- list() #' expected_means <- list() #' for(i in seq_len(nrow(x))){ #' current_mean <- sequential_mean(current_mean, x[i,], i) #' seq_means[[i]] <- current_mean #' if(i==1){ #' expected_means[[i]] <- x[1,] #' next() #' } #' expected_means[[i]] <- colMeans(x[1:i,]) #' } #' seq_means <- do.call(rbind, seq_means) #' expected_means <- do.call(rbind, expected_means) #' seq_means #' expected_means sequential_mean <- function(previous_mean, current_number, i_current){ if(i_current == 1){ return(current_number) } return(previous_mean*(i_current-1)/i_current + current_number*(1/i_current)) }
/helpers/significance_functions.R
no_license
Nian-Jingqing/beyond-acc
R
false
false
7,320
r
#' calculate p-value for accuracy using binomial test #' #' Calcualte p-value for the null accuracy = chance level #' @param predictions predicted values #' @param labels true values #' @param p proportion against which we are testing #' @keywords p values, statisticall signifficance, accuracy #' @export #' @examples #' p_val_acc(c(0,0,0,1,1,1), c(0, 0, 1, 0,1,1)) p_val_acc <- function(predictions, labels, p=0.5){ assert_values_only(predictions, c(0, 1)) assert_values_only(labels, c(0, 1)) successes <- sum(predictions == labels) binom <- stats::binom.test(successes, length(labels), p=p, alternative = 'greater') return(binom$p.value) } #' calculate p-value for the AUC statistics #' #' Calcualte p-value for the AUC statistics corresponding to null hypothesis AUC == 0 #' @param predictions predicted values #' @param labels true values #' @keywords p values, statisticall signifficance, auc #' @export #' @examples #' p_val_auc(c(0.3, 0.4, 0.6, 0.7), c(0, 0, 1, 1)) p_val_auc <- function(predictions, labels){ assert_values_only(labels, c(0,1)) # if all the predictions are same, just return p-value of 1 directly if (all_the_same(predictions)){ return(1) } wilcox <- stats::wilcox.test(predictions ~ labels, alternative='less') wilcox$p.value } #' null hypothesis signifficance permutation test for brier score #' #' Caluclates p value for brier score using a permutaiton test. Either with #' early stopping rules or with fixed number of permutations #' #' @param predictions probabilistic predictions between 0 and 1 #' @param labels true values coded as 0 and 1 #' @param method either 'earlystop' or 'fixedn' #' @param nperms maximum number of permutations performed #' @param earlystop_threshold p-value to consider for early stopping #' @param earlystop_error error we are willing to accept in return for early stopping #' #' @return p value #' @export #' #' @examples p_val_brier <- function(predictions, labels, method='earlystop', nperms=2000, earlystop_threshold=0.05, earlystop_error=0.001){ assert_values_only(labels, c(0,1)) assert_values_range(predictions, c(0, 1)) perm_test(predictions, labels, get_brier_score, method, nperms, earlystop_threshold, earlystop_error) } #' null hypothesis signifficance permutation test for logarithmic score #' #' Caluclates p value for logarighmic score using a permutaiton test. Either with #' early stopping rules or with fixed number of permutations #' #' @param predictions probabilistic predictions between 0 and 1 #' @param labels true values coded as 0 and 1 #' @param method either 'earlystop' or 'fixedn' #' @param nperms maximum number of permutations performed #' @param earlystop_threshold p-value to consider for early stopping #' @param earlystop_error error we are willing to accept in return for early stopping #' #' @return p value #' @export #' #' @examples p_val_logscore <- function(predictions, labels, method='earlystop', nperms=2000, earlystop_threshold=0.05, earlystop_error=0.001){ assert_values_only(labels, c(0,1)) assert_values_range(predictions, c(0, 1)) perm_test(predictions, labels, get_log_score, method, nperms, earlystop_threshold, earlystop_error) } #' performs permutation test either with early stopping or with a fixed number #' of permutations #' #' @param predictions #' @param labels #' @param perf_measure #' @param method #' @param nperms #' @param earlystop_threshold #' @param earlystop_error #' #' @return #' @export #' #' @examples perm_test <- function(predictions, labels, perf_measure, method, nperms, earlystop_threshold, earlystop_error){ assert_values_only(method, c('earlystop', 'fixedn')) # if all the predictions are same, just return p-value of 1 directly if (all_the_same(predictions)){ return(1) } if (method=='fixedn'){ return(perm_test_fixedn(predictions, labels, perf_measure, nperms)) } else if (method == 'earlystop') { return(perm_test_simctest(predictions, labels, perf_measure, nperms, earlystop_threshold, earlystop_error)) } } #' Perform permutation test for fixed number of permutations #' #' @param predictions #' @param labels #' @param perf_measure #' @param nperms #' #' @return #' @export #' #' @examples perm_test_fixedn <- function(predictions, labels, perf_measure, nperms){ observed_measure <- mean(perf_measure(predictions, labels)) null_distr <- list() for (i in 1:nperms){ null_distr[[i]] <- mean(perf_measure(sample(predictions), labels)) } rank <- sum(observed_measure > null_distr) + 1 pval <- rank / (nperms+1) return(pval) } #' perform permuation test with early stopping #' #' @param predictions #' @param labels #' @param perf_measure #' @param nperms #' @param earlystop_threshold #' @param earlystop_error #' #' @return #' @export #' #' @examples perm_test_simctest <- function(predictions, labels, perf_measure, nperms, earlystop_threshold, earlystop_error){ # library(simctest) observed <- mean(perf_measure(predictions, labels)) null_realization_generator <- function(){ observed >= mean(perf_measure(sample(predictions), labels)) } res <- simctest::simctest(null_realization_generator, level=earlystop_threshold, epsilon=earlystop_error, maxsteps=nperms) res@pos / res@steps # mctest.simctest(null_realization_generator, J=J) } #' get early-stopping limits for permutation test #' #' @param level #' @param epsilon #' @param maxsteps #' @param granurality #' #' @return #' @export #' #' @examples get_simctest_rejection_limits <- function(level=0.05, epsilon=1e-3, maxsteps=1e4, granurality=1000){ #TODO: make this analytically instead of empirically len <- granurality p_vals <- numeric(len) steps <- numeric(len) for (i in seq_len(len)){ p <- i/len binom_generator <- function(){ return(sample(c(0,1), size=1, replace = T, prob = c(1-p, p))) } res <- simctest::simctest(binom_generator, level=level, epsilon=epsilon, maxsteps = maxsteps) p_vals[i] <- res@pos / res@steps # to avoid NA when no decision steps[i] <- res@steps } p_vals_steps_limit <- approxfun(p_vals, steps) } #' computes mean sequentially #' #' @param previous_mean #' @param current_number #' @param i_current #' #' @return mean #' @export #' #' @examples #' x <- cbind(c(1, 0, 1, 1, 0, 0, 1), #' c(1, 1, 0, 1, 0, 1, 0)) #' seq_means <- list() #' expected_means <- list() #' for(i in seq_len(nrow(x))){ #' current_mean <- sequential_mean(current_mean, x[i,], i) #' seq_means[[i]] <- current_mean #' if(i==1){ #' expected_means[[i]] <- x[1,] #' next() #' } #' expected_means[[i]] <- colMeans(x[1:i,]) #' } #' seq_means <- do.call(rbind, seq_means) #' expected_means <- do.call(rbind, expected_means) #' seq_means #' expected_means sequential_mean <- function(previous_mean, current_number, i_current){ if(i_current == 1){ return(current_number) } return(previous_mean*(i_current-1)/i_current + current_number*(1/i_current)) }
#' @include unitedSimOne.R #' @include unitedSimResults.R NULL ############################################### # --------------------------------------------# # unitedSim # # --------------------------------------------# ############################################### # -------------------------------------------- # Main Function for simulation line ups # -------------------------------------------- #' Simulating a formation #' #' Simulates a formation against another formations (several formations of away are possible). #' #' @inheritParams overview #' @param ... several objects of the class \code{formation} #' #' @return Creates an object of the \code{unitedSim} class. #' #' @seealso \code{\link{unitedSimOne}} #' #' @examples #' home <- formation(10, NA, c(7,5,3), c(8,8), c(10,10,8)) #' away <- formation(5, 8, c(8,8), c(10,10), c(10,10,10), #' hardness = c(0,0,0,0,1)) #' set.seed(123) #' unitedSim(home, away) #' # can also be simualated #' unitedSim(home, away, r = 100) #' # several away lineups #' unitedSim(home, away, away) #' # several away lineups simulated #' unitedSim(home, away, away, r = 100) #' # used hardness matrix (default) #' # shows the probability of receiving a specifed number of yellow cards #' # dependent on the used points of hardness #' dimNams <- list(paste(0:7, "cards"), paste(0:10, "hardness points")) #' (hardnessMatrix <- matrix(c(90,10,0,0,0,0,0,0, #' 70,30,0,0,0,0,0,0,50,40,10, #' 0,0,0,0,0,30,50,20,0,0,0,0,0,20,40,30,10,0,0, #' 0,0,10,30,40,20,0,0,0,0,0,20,40,30,10,0,0,0,0, #' 10,30,40,20,0,0,0,0,0,20,40,30,10,0,0,0,0,10,20, #' 40,20,10,0,0,0,0,10,40,20,20,10), nrow = 8, #' dimnames = dimNams)) #' #' #' @export unitedSim <- function(home, ..., r, penaltyProb = 0.1, preventGoalGK = 1/14, preventGoalSW = 1/15, hardnessMatrix, L, overtime = FALSE) { stopifnot(validObject(home), is(home, "formation")) ## set default value for hardness matrix if (missing(hardnessMatrix)) { hardnessMatrix <- matrix(c(90,10,0,0,0,0,0,0,70,30,0,0,0,0,0,0,50,40,10, 0,0,0,0,0,30,50,20,0,0,0,0,0,20,40,30,10,0,0, 0,0,10,30,40,20,0,0,0,0,0,20,40,30,10,0,0,0,0, 10,30,40,20,0,0,0,0,0,20,40,30,10,0,0,0,0,10,20, 40,20,10,0,0,0,0,10,40,20,20,10), nrow = 8) } else { stopifnot(is.matrix(hardnessMatrix)) } if (!missing(L)) { formations <- L } else { formations <- list(...) } if (!all(sapply(formations, function(x) is(x, "formation")))) stop("Not all ... objects of class formation.") if (missing(r)) { if (length(formations) == 1) { return(unitedSimOne(home, formations[[1]], penaltyProb = penaltyProb, preventGoalGK = preventGoalGK, preventGoalSW = preventGoalSW, hardnessMatrix = hardnessMatrix, overtime = overtime)) } else { games <- lapply(formations, function(formation) { unitedSimOne(home, formation, penaltyProb = penaltyProb, preventGoalGK = preventGoalGK, preventGoalSW = preventGoalSW, hardnessMatrix = hardnessMatrix, overtime = overtime) } ) } return(new("unitedSimResults", games = games)) } else { stopifnot(is.numeric(r), round(r) == r, length(r) == 1) if (length(formations) == 1) { return(unitedSimOne(home, formations[[1]], r = r, penaltyProb = penaltyProb, preventGoalGK = preventGoalGK, preventGoalSW = preventGoalSW, hardnessMatrix = hardnessMatrix, overtime = overtime)) } else { games <- lapply(formations, function(formation) { unitedSimOne(home, formation, r = r, penaltyProb = penaltyProb, preventGoalGK = preventGoalGK, preventGoalSW = preventGoalSW, hardnessMatrix = hardnessMatrix, overtime = overtime) } ) } return(new("unitedSimResults", games = games)) } }
/R/unitedSim.R
no_license
cran/unitedR
R
false
false
4,074
r
#' @include unitedSimOne.R #' @include unitedSimResults.R NULL ############################################### # --------------------------------------------# # unitedSim # # --------------------------------------------# ############################################### # -------------------------------------------- # Main Function for simulation line ups # -------------------------------------------- #' Simulating a formation #' #' Simulates a formation against another formations (several formations of away are possible). #' #' @inheritParams overview #' @param ... several objects of the class \code{formation} #' #' @return Creates an object of the \code{unitedSim} class. #' #' @seealso \code{\link{unitedSimOne}} #' #' @examples #' home <- formation(10, NA, c(7,5,3), c(8,8), c(10,10,8)) #' away <- formation(5, 8, c(8,8), c(10,10), c(10,10,10), #' hardness = c(0,0,0,0,1)) #' set.seed(123) #' unitedSim(home, away) #' # can also be simualated #' unitedSim(home, away, r = 100) #' # several away lineups #' unitedSim(home, away, away) #' # several away lineups simulated #' unitedSim(home, away, away, r = 100) #' # used hardness matrix (default) #' # shows the probability of receiving a specifed number of yellow cards #' # dependent on the used points of hardness #' dimNams <- list(paste(0:7, "cards"), paste(0:10, "hardness points")) #' (hardnessMatrix <- matrix(c(90,10,0,0,0,0,0,0, #' 70,30,0,0,0,0,0,0,50,40,10, #' 0,0,0,0,0,30,50,20,0,0,0,0,0,20,40,30,10,0,0, #' 0,0,10,30,40,20,0,0,0,0,0,20,40,30,10,0,0,0,0, #' 10,30,40,20,0,0,0,0,0,20,40,30,10,0,0,0,0,10,20, #' 40,20,10,0,0,0,0,10,40,20,20,10), nrow = 8, #' dimnames = dimNams)) #' #' #' @export unitedSim <- function(home, ..., r, penaltyProb = 0.1, preventGoalGK = 1/14, preventGoalSW = 1/15, hardnessMatrix, L, overtime = FALSE) { stopifnot(validObject(home), is(home, "formation")) ## set default value for hardness matrix if (missing(hardnessMatrix)) { hardnessMatrix <- matrix(c(90,10,0,0,0,0,0,0,70,30,0,0,0,0,0,0,50,40,10, 0,0,0,0,0,30,50,20,0,0,0,0,0,20,40,30,10,0,0, 0,0,10,30,40,20,0,0,0,0,0,20,40,30,10,0,0,0,0, 10,30,40,20,0,0,0,0,0,20,40,30,10,0,0,0,0,10,20, 40,20,10,0,0,0,0,10,40,20,20,10), nrow = 8) } else { stopifnot(is.matrix(hardnessMatrix)) } if (!missing(L)) { formations <- L } else { formations <- list(...) } if (!all(sapply(formations, function(x) is(x, "formation")))) stop("Not all ... objects of class formation.") if (missing(r)) { if (length(formations) == 1) { return(unitedSimOne(home, formations[[1]], penaltyProb = penaltyProb, preventGoalGK = preventGoalGK, preventGoalSW = preventGoalSW, hardnessMatrix = hardnessMatrix, overtime = overtime)) } else { games <- lapply(formations, function(formation) { unitedSimOne(home, formation, penaltyProb = penaltyProb, preventGoalGK = preventGoalGK, preventGoalSW = preventGoalSW, hardnessMatrix = hardnessMatrix, overtime = overtime) } ) } return(new("unitedSimResults", games = games)) } else { stopifnot(is.numeric(r), round(r) == r, length(r) == 1) if (length(formations) == 1) { return(unitedSimOne(home, formations[[1]], r = r, penaltyProb = penaltyProb, preventGoalGK = preventGoalGK, preventGoalSW = preventGoalSW, hardnessMatrix = hardnessMatrix, overtime = overtime)) } else { games <- lapply(formations, function(formation) { unitedSimOne(home, formation, r = r, penaltyProb = penaltyProb, preventGoalGK = preventGoalGK, preventGoalSW = preventGoalSW, hardnessMatrix = hardnessMatrix, overtime = overtime) } ) } return(new("unitedSimResults", games = games)) } }
tabPanel(title = "Daten im Kontext", id = "contextualizeTab", value = "contextualizeTab", icon = icon("globe"), # election map start ############################################################' fluidRow(column(3, # links: ----------------------------------------------------------------------- textAreaInput("context_map_header1", "Überschrift", value = "Vorfälle im Kontext von Bundestagswahlergebnissen", rows = 2, resize = "none") %>% shiny::tagAppendAttributes(style = 'width: 100%;'), #uiOutput("context_map_option1"), selectInput('context_map_option1', 'Partei auswählen', choices = NULL, multiple = FALSE), textAreaInput("context_map_text1", "Beschreibung", value = "Vorfälle rechter Gewalt zusammen mit Wahlergebnissen laut regionalstatistik.de", rows = 7, resize = "none") %>% shiny::tagAppendAttributes(style = 'width: 100%;') # ------------------------------------------------------------------------------ ), column(width = 9, # rechts:----------------------------------------------------------------------- h3(textOutput("context_map_header1")), plotOutput('context_map1', height = "500"), p(textOutput("context_map_text1")) # ------------------------------------------------------------------------------ )), # election map end ############################################################### # population map start ############################################################' fluidRow(column(3, # links: ----------------------------------------------------------------------- textAreaInput("context_map_header2", "Überschrift", value = "Wo gibt es rechte Gewalt, wo wohnen Menschen", rows = 2, resize = "none") %>% shiny::tagAppendAttributes(style = 'width: 100%;'), textAreaInput("context_map_text2", "Beschreibung", value = "Vorfälle rechter Gewalt zusammen mit Bevölkerungsdichte, Quelle: eurostat gisco", rows = 7, resize = "none") %>% shiny::tagAppendAttributes(style = 'width: 100%;') # ------------------------------------------------------------------------------ ), column(width = 9, # rechts:----------------------------------------------------------------------- h3(textOutput("context_map_header2")), plotOutput('context_map2', height = "600"), p(textOutput("context_map_text2")) # ------------------------------------------------------------------------------ )), # population map end ############################################################ # foreigner map start ############################################################ fluidRow(column(3, # links: ----------------------------------------------------------------------- textAreaInput("context_map_header3", "Überschrift", value = "Vorfälle und Anteil ausländischer MitbürgerInnen", rows = 2, resize = "none") %>% shiny::tagAppendAttributes(style = 'width: 100%;', height = "600"), textAreaInput("context_map_text3", "Beschreibung", value = "Vorfälle rechter Gewalt zusammen mit dem Anteil ausländischer MitbürgerInnen, Quelle: regionalstatistik.de", rows = 7, resize = "none") %>% shiny::tagAppendAttributes(style = 'width: 100%;') # ------------------------------------------------------------------------------ ), column(width = 9, # rechts:----------------------------------------------------------------------- h3(textOutput("context_map_header3")), plotOutput('context_map3', height = "500"), p(textOutput("context_map_text3")) # ------------------------------------------------------------------------------ )), # foreigner map end ############################################################ # nsdap map start ############################################################ fluidRow(column(3, # links: ----------------------------------------------------------------------- textAreaInput("context_map_header4", "Überschrift", value = "Vorfälle im historischen Kontext", rows = 2, resize = "none") %>% shiny::tagAppendAttributes(style = 'width: 100%;'), textAreaInput("context_map_text4", "Beschreibung", value = "Vorfälle rechter Gewalt zusammen mit dem Anteil NSDAP Wählender 1933, Quelle: Falter et al 1992", rows = 7, resize = "none") %>% shiny::tagAppendAttributes(style = 'width: 100%;') # ------------------------------------------------------------------------------ ), column(width = 9, # rechts:----------------------------------------------------------------------- h3(textOutput("context_map_header4")), plotOutput('context_map4', height = "500"), p(textOutput("context_map_text4")) # ------------------------------------------------------------------------------ )), # nsdap map end ############################################################ )
/R/tab-contextualize.R
permissive
prototypefund/rightwatching-shiny-app
R
false
false
5,970
r
tabPanel(title = "Daten im Kontext", id = "contextualizeTab", value = "contextualizeTab", icon = icon("globe"), # election map start ############################################################' fluidRow(column(3, # links: ----------------------------------------------------------------------- textAreaInput("context_map_header1", "Überschrift", value = "Vorfälle im Kontext von Bundestagswahlergebnissen", rows = 2, resize = "none") %>% shiny::tagAppendAttributes(style = 'width: 100%;'), #uiOutput("context_map_option1"), selectInput('context_map_option1', 'Partei auswählen', choices = NULL, multiple = FALSE), textAreaInput("context_map_text1", "Beschreibung", value = "Vorfälle rechter Gewalt zusammen mit Wahlergebnissen laut regionalstatistik.de", rows = 7, resize = "none") %>% shiny::tagAppendAttributes(style = 'width: 100%;') # ------------------------------------------------------------------------------ ), column(width = 9, # rechts:----------------------------------------------------------------------- h3(textOutput("context_map_header1")), plotOutput('context_map1', height = "500"), p(textOutput("context_map_text1")) # ------------------------------------------------------------------------------ )), # election map end ############################################################### # population map start ############################################################' fluidRow(column(3, # links: ----------------------------------------------------------------------- textAreaInput("context_map_header2", "Überschrift", value = "Wo gibt es rechte Gewalt, wo wohnen Menschen", rows = 2, resize = "none") %>% shiny::tagAppendAttributes(style = 'width: 100%;'), textAreaInput("context_map_text2", "Beschreibung", value = "Vorfälle rechter Gewalt zusammen mit Bevölkerungsdichte, Quelle: eurostat gisco", rows = 7, resize = "none") %>% shiny::tagAppendAttributes(style = 'width: 100%;') # ------------------------------------------------------------------------------ ), column(width = 9, # rechts:----------------------------------------------------------------------- h3(textOutput("context_map_header2")), plotOutput('context_map2', height = "600"), p(textOutput("context_map_text2")) # ------------------------------------------------------------------------------ )), # population map end ############################################################ # foreigner map start ############################################################ fluidRow(column(3, # links: ----------------------------------------------------------------------- textAreaInput("context_map_header3", "Überschrift", value = "Vorfälle und Anteil ausländischer MitbürgerInnen", rows = 2, resize = "none") %>% shiny::tagAppendAttributes(style = 'width: 100%;', height = "600"), textAreaInput("context_map_text3", "Beschreibung", value = "Vorfälle rechter Gewalt zusammen mit dem Anteil ausländischer MitbürgerInnen, Quelle: regionalstatistik.de", rows = 7, resize = "none") %>% shiny::tagAppendAttributes(style = 'width: 100%;') # ------------------------------------------------------------------------------ ), column(width = 9, # rechts:----------------------------------------------------------------------- h3(textOutput("context_map_header3")), plotOutput('context_map3', height = "500"), p(textOutput("context_map_text3")) # ------------------------------------------------------------------------------ )), # foreigner map end ############################################################ # nsdap map start ############################################################ fluidRow(column(3, # links: ----------------------------------------------------------------------- textAreaInput("context_map_header4", "Überschrift", value = "Vorfälle im historischen Kontext", rows = 2, resize = "none") %>% shiny::tagAppendAttributes(style = 'width: 100%;'), textAreaInput("context_map_text4", "Beschreibung", value = "Vorfälle rechter Gewalt zusammen mit dem Anteil NSDAP Wählender 1933, Quelle: Falter et al 1992", rows = 7, resize = "none") %>% shiny::tagAppendAttributes(style = 'width: 100%;') # ------------------------------------------------------------------------------ ), column(width = 9, # rechts:----------------------------------------------------------------------- h3(textOutput("context_map_header4")), plotOutput('context_map4', height = "500"), p(textOutput("context_map_text4")) # ------------------------------------------------------------------------------ )), # nsdap map end ############################################################ )
source("prepData.R") plot2 <- function() { powerData_subset <- prepData() png("plot2.png", width=480, height=480) plot(powerData_subset$Time, powerData_subset$Global_active_power, type="l", xlab="", ylab="Global Active Power (kilowatts)") dev.off() } plot2()
/plot2.R
no_license
jkewnani/ExData_Plotting1
R
false
false
279
r
source("prepData.R") plot2 <- function() { powerData_subset <- prepData() png("plot2.png", width=480, height=480) plot(powerData_subset$Time, powerData_subset$Global_active_power, type="l", xlab="", ylab="Global Active Power (kilowatts)") dev.off() } plot2()
# CAMPUS information ------------------------------------------------------ # importing campus-level data # corresponding key: Reference - Campus Name, District Name, Country Name, Region Number campus_data_raw <- read_csv("CREF.csv") campus_data <- campus_data_raw head(campus_data) # CAMPUS information: ## character ## two leading zeroes campus_data$CAMPUS <- as.numeric(campus_data$CAMPUS) # strip leading zeroes from CAMPUS column campus_data <- lapply(campus_data, function(y) sub('^0+(?=[1-9])', '', y, perl=TRUE)) # convert campus data to a tibble instead of a list campus_data <- as_tibble(campus_data) # convert CAMPUS column to numeric campus_data$CAMPUS = as.numeric(campus_data$CAMPUS) # select only the columns of interest campus <- campus_data[c("CAMPUS", "DISTRICT", "CAMPNAME", "COUNTY", "DISTNAME", "CAD_READ", "CAD_MATH", "CAD_GAP", "CAD_PROGRESS", "GRDTYPE")] head(campus) # APPROACHES grade level -------------------------------------------------- # corresponding keys: ## Campus STAAR Data - Approaches Grade Level or Above (Grades 3, 4, & 5) Primary Student Groups ## Campus STAAR Data - Approaches Grade Level or Above (Grades 6, 7, & 8) Primary Student Groups ## Campus STAAR Data - Approaches Grade Level or Above (Grades End of Course) Primary Student Groups app_data_raw <- read_csv("CAMPSTAAR1.csv") approaches <- app_data_raw # converts data to numeric # introduces NAs by coercion - this is OK, as it removes the "dots" approaches[] <- lapply(approaches, function(x) as.numeric(as.character(x))) head(approaches) # CAMPUS information: ## character ## no leading zeroes colnames(approaches) # MEETS grade level ------------------------------------------------------- # corresponding key: ## Campus STAAR Data - Meets Grade Level (All Grades) Primary Student Groups meet_data_raw <- read_csv("CAMPSTAAR2.csv") meets <- meet_data_raw meets[] <- lapply(meets, function(x) as.numeric(as.character(x))) head(meets) # CAMPUS information: # numeric # no leading zeroes # MOBILITY data ----------------------------------------------------------- # mobility from CSTUD data set mobility_raw <- read_csv("CSTUD.csv") mobility <- mobility_raw # converts data to numeric mobility[] <- lapply(mobility, function(x) as.numeric(as.character(x))) # mobility$CAMPUS <- as.character(mobility$CAMPUS) head(mobility) # CAMPUS information # character # no leading zeroes # note - mobility reflects campus-level mobility! colnames(mobility) mobility <- mobility[c("CAMPUS", "CPEMALLP", "CPETBILP", "CPETECOP")] names(mobility) <- c("CAMPUS", "all_mobility", "bilingual_mobility", "low_ses_mobility")
/data_import.R
no_license
jrosen48/tapr_FY2018
R
false
false
2,648
r
# CAMPUS information ------------------------------------------------------ # importing campus-level data # corresponding key: Reference - Campus Name, District Name, Country Name, Region Number campus_data_raw <- read_csv("CREF.csv") campus_data <- campus_data_raw head(campus_data) # CAMPUS information: ## character ## two leading zeroes campus_data$CAMPUS <- as.numeric(campus_data$CAMPUS) # strip leading zeroes from CAMPUS column campus_data <- lapply(campus_data, function(y) sub('^0+(?=[1-9])', '', y, perl=TRUE)) # convert campus data to a tibble instead of a list campus_data <- as_tibble(campus_data) # convert CAMPUS column to numeric campus_data$CAMPUS = as.numeric(campus_data$CAMPUS) # select only the columns of interest campus <- campus_data[c("CAMPUS", "DISTRICT", "CAMPNAME", "COUNTY", "DISTNAME", "CAD_READ", "CAD_MATH", "CAD_GAP", "CAD_PROGRESS", "GRDTYPE")] head(campus) # APPROACHES grade level -------------------------------------------------- # corresponding keys: ## Campus STAAR Data - Approaches Grade Level or Above (Grades 3, 4, & 5) Primary Student Groups ## Campus STAAR Data - Approaches Grade Level or Above (Grades 6, 7, & 8) Primary Student Groups ## Campus STAAR Data - Approaches Grade Level or Above (Grades End of Course) Primary Student Groups app_data_raw <- read_csv("CAMPSTAAR1.csv") approaches <- app_data_raw # converts data to numeric # introduces NAs by coercion - this is OK, as it removes the "dots" approaches[] <- lapply(approaches, function(x) as.numeric(as.character(x))) head(approaches) # CAMPUS information: ## character ## no leading zeroes colnames(approaches) # MEETS grade level ------------------------------------------------------- # corresponding key: ## Campus STAAR Data - Meets Grade Level (All Grades) Primary Student Groups meet_data_raw <- read_csv("CAMPSTAAR2.csv") meets <- meet_data_raw meets[] <- lapply(meets, function(x) as.numeric(as.character(x))) head(meets) # CAMPUS information: # numeric # no leading zeroes # MOBILITY data ----------------------------------------------------------- # mobility from CSTUD data set mobility_raw <- read_csv("CSTUD.csv") mobility <- mobility_raw # converts data to numeric mobility[] <- lapply(mobility, function(x) as.numeric(as.character(x))) # mobility$CAMPUS <- as.character(mobility$CAMPUS) head(mobility) # CAMPUS information # character # no leading zeroes # note - mobility reflects campus-level mobility! colnames(mobility) mobility <- mobility[c("CAMPUS", "CPEMALLP", "CPETBILP", "CPETECOP")] names(mobility) <- c("CAMPUS", "all_mobility", "bilingual_mobility", "low_ses_mobility")
Data=c(0.163, 0.153, 0.136, NA, 0.141, 0.145, 0.13, 0.206, 0.186, 0.184, NA, 0.178, 0.185, 0.185, 0.237, 0.22, 0.218, NA, 0.223, 0.235, 0.229, 0.199, 0.194, 0.183, NA, 0.182, 0.185, 0.182, NA, NA, NA, 0.148, 0.159, 0.149, 0.138, NA, NA, NA, 0.168, 0.169, 0.17, 0.167, NA, NA, NA, 0.144, 0.147, 0.151, 0.14, NA, NA, NA, 0.153, 0.155, 0.159, 0.149, NA, NA, NA, 0.158, 0.168, 0.165, 0.155, NA, NA, NA, 0.177, 0.185, 0.181, 0.179, NA, NA, NA, 0.143, 0.147, 0.145, 0.141, NA, NA, NA, 0.144, 0.155, 0.155, 0.15, NA, NA, NA, 0.18, 0.181, 0.181, 0.179, NA, NA, NA, 0.189, 0.19, 0.19, 0.188, NA, NA, NA, 0.208, 0.208, 0.207, 0.205, NA, NA, NA, 0.196, 0.195, 0.192, 0.189, NA, NA, NA, 0.183, 0.183, 0.183, 0.171, NA, NA, NA, 0.21, 0.208, 0.192, 0.187, NA, NA, NA, 0.168, 0.16, 0.166, 0.151, NA, NA, NA, 0.188, 0.188, 0.187, 0.179, NA, NA, NA, 0.19, 0.189, 0.191, 0.18, NA, NA, NA, 0.185, 0.182, 0.185, 0.185, NA, NA, NA, 0.182, 0.182, 0.186, 0.172, NA, NA, NA, 0.232, 0.232, 0.23, 0.223, NA, NA, 0.203, NA, 0.199, 0.193, 0.187, NA, NA, 0.206, NA, 0.207, 0.205, 0.199, NA, NA, 0.22, NA, 0.219, 0.212, 0.212, NA, NA, 0.249, NA, 0.238, 0.241, 0.233, NA, NA, 0.208, NA, 0.204, 0.199, 0.192, NA, NA, 0.202, NA, 0.198, 0.203, 0.193, NA, NA, 0.21, NA, 0.215, 0.214, 0.214, NA, NA, 0.225, NA, 0.218, 0.218, 0.211, NA, NA, 0.209, NA, 0.199, 0.201, 0.183, NA, NA, 0.239, NA, 0.239, 0.233, 0.222, NA, NA, 0.23, NA, 0.239, 0.238, 0.235, NA, NA, 0.24, NA, 0.228, 0.23, 0.223, NA, NA, 0.205, NA, 0.209, 0.205, 0.198, NA, NA, 0.228, NA, 0.233, 0.233, 0.23, NA, NA, 0.214, NA, 0.214, 0.214, 0.21, NA, NA, 0.22, NA, 0.22, 0.217, 0.212, 0.246, 0.24, 0.243, NA, 0.241, 0.241, 0.236, 0.213, 0.202, 0.205, NA, 0.206, 0.206, 0.202, 0.248, 0.24, 0.215, NA, 0.215, 0.214, 0.212, 0.238, 0.238, 0.241, NA, 0.243, 0.24, 0.235, NA, NA, 0.23, NA, 0.208, 0.205, 0.199, NA, NA, 0.231, NA, 0.226, 0.225, 0.221, NA, NA, 0.213, NA, 0.205, 0.206, 0.203, NA, NA, 0.226, NA, 0.224, 0.23, 0.222, NA, NA, 0.174, NA, 0.168, 0.159, 0.155, NA, NA, 0.241, NA, 0.23, 0.231, 0.227, NA, NA, 0.218, NA, 0.215, 0.213, 0.21, NA, NA, 0.237, NA, 0.23, 0.225, 0.227, NA, NA, 0.22, NA, 0.218, 0.213, 0.208, NA, NA, 0.195, NA, 0.195, 0.195, 0.193, NA, NA, 0.22, NA, 0.215, 0.215, 0.21, NA, NA, 0.215, NA, 0.212, 0.209, 0.209, NA, NA, 0.196, NA, 0.203, 0.198, 0.195, NA, NA, 0.232, NA, 0.229, 0.233, 0.23, NA, NA, 0.216, NA, 0.22, 0.218, 0.215, NA, NA, 0.241, NA, 0.236, 0.239, 0.233, NA, NA, 0.196, NA, 0.198, 0.194, 0.192, NA, NA, 0.214, NA, 0.21, 0.207, 0.203, NA, NA, 0.209, NA, 0.209, 0.209, 0.203, NA, NA, 0.216, NA, 0.21, 0.21, 0.207, NA, NA, 0.215, NA, 0.208, 0.212, 0.209, NA, NA, 0.22, NA, 0.222, 0.222, 0.218, NA, NA, 0.203, NA, 0.203, 0.205, 0.2, NA, NA, 0.223, NA, 0.223, 0.223, 0.218, NA, NA, 0.165, NA, 0.169, 0.163, 0.157, NA, NA, 0.226, NA, 0.227, 0.225, 0.22, NA, NA, 0.211, NA, 0.208, 0.21, 0.213, NA, NA, 0.244, NA, 0.246, 0.244, 0.24, NA, NA, 0.214, NA, 0.215, 0.222, 0.217, NA, NA, 0.219, NA, 0.218, 0.219, 0.215, NA, NA, 0.218, NA, 0.218, 0.218, 0.214, NA, NA, 0.208, NA, 0.208, 0.21, 0.217, NA, NA, 0.198, NA, 0.204, 0.202, 0.2, NA, NA, 0.212, NA, 0.212, 0.213, 0.21, NA, NA, 0.219, NA, 0.219, 0.219, 0.215, NA, NA, 0.23, NA, 0.232, 0.235, 0.235, NA, NA, 0.207, NA, 0.211, 0.209, 0.205, NA, NA, 0.222, NA, 0.221, 0.225, 0.221, NA, NA, 0.224, NA, 0.22, 0.218, 0.215, NA, NA, 0.204, NA, 0.206, 0.214, 0.214, 0.188, 0.186, 0.188, NA, 0.191, 0.2, 0.197, 0.234, 0.234, 0.229, NA, 0.226, 0.217, 0.215, 0.216, 0.215, 0.213, NA, 0.215, 0.225, 0.221, 0.241, 0.238, 0.236, NA, 0.234, 0.234, 0.23) Data <- matrix(Data, nrow = 88, byrow = T) N <- nrow(Data)* ncol(Data) - sum(is.na(Data)) # jj, kk, y jj <- numeric(N) kk <- numeric(N) y <- numeric(N) count = 0 for(i in 1:88) for(j in 1:7){ if(!is.na(Data[i,j])){ count = count + 1 jj[count] = i kk[count] = j y[count] = Data[i,j] } } Time <- c(9172, 10997, 11453 ,11515, 11613, 11779, 12072) F3_data <- list(N = N,m = 88,n = 7,Y = y,jj = jj,kk = kk, t = Time) library(rstan) rstan_options(auto_write = TRUE) options(mc.cores = parallel::detectCores()) fit1 <- stan(file = "/Users/pro/Projects/Pipelines/Simple/Simple_Model1.stan", data = F3_data, iter = 10000, control = list(max_treedepth = 15)) summary(fit1)$summary[1:3,]
/Simple/Simple_Model1_Drive.r
no_license
ISUCyclone/Pipelines
R
false
false
4,705
r
Data=c(0.163, 0.153, 0.136, NA, 0.141, 0.145, 0.13, 0.206, 0.186, 0.184, NA, 0.178, 0.185, 0.185, 0.237, 0.22, 0.218, NA, 0.223, 0.235, 0.229, 0.199, 0.194, 0.183, NA, 0.182, 0.185, 0.182, NA, NA, NA, 0.148, 0.159, 0.149, 0.138, NA, NA, NA, 0.168, 0.169, 0.17, 0.167, NA, NA, NA, 0.144, 0.147, 0.151, 0.14, NA, NA, NA, 0.153, 0.155, 0.159, 0.149, NA, NA, NA, 0.158, 0.168, 0.165, 0.155, NA, NA, NA, 0.177, 0.185, 0.181, 0.179, NA, NA, NA, 0.143, 0.147, 0.145, 0.141, NA, NA, NA, 0.144, 0.155, 0.155, 0.15, NA, NA, NA, 0.18, 0.181, 0.181, 0.179, NA, NA, NA, 0.189, 0.19, 0.19, 0.188, NA, NA, NA, 0.208, 0.208, 0.207, 0.205, NA, NA, NA, 0.196, 0.195, 0.192, 0.189, NA, NA, NA, 0.183, 0.183, 0.183, 0.171, NA, NA, NA, 0.21, 0.208, 0.192, 0.187, NA, NA, NA, 0.168, 0.16, 0.166, 0.151, NA, NA, NA, 0.188, 0.188, 0.187, 0.179, NA, NA, NA, 0.19, 0.189, 0.191, 0.18, NA, NA, NA, 0.185, 0.182, 0.185, 0.185, NA, NA, NA, 0.182, 0.182, 0.186, 0.172, NA, NA, NA, 0.232, 0.232, 0.23, 0.223, NA, NA, 0.203, NA, 0.199, 0.193, 0.187, NA, NA, 0.206, NA, 0.207, 0.205, 0.199, NA, NA, 0.22, NA, 0.219, 0.212, 0.212, NA, NA, 0.249, NA, 0.238, 0.241, 0.233, NA, NA, 0.208, NA, 0.204, 0.199, 0.192, NA, NA, 0.202, NA, 0.198, 0.203, 0.193, NA, NA, 0.21, NA, 0.215, 0.214, 0.214, NA, NA, 0.225, NA, 0.218, 0.218, 0.211, NA, NA, 0.209, NA, 0.199, 0.201, 0.183, NA, NA, 0.239, NA, 0.239, 0.233, 0.222, NA, NA, 0.23, NA, 0.239, 0.238, 0.235, NA, NA, 0.24, NA, 0.228, 0.23, 0.223, NA, NA, 0.205, NA, 0.209, 0.205, 0.198, NA, NA, 0.228, NA, 0.233, 0.233, 0.23, NA, NA, 0.214, NA, 0.214, 0.214, 0.21, NA, NA, 0.22, NA, 0.22, 0.217, 0.212, 0.246, 0.24, 0.243, NA, 0.241, 0.241, 0.236, 0.213, 0.202, 0.205, NA, 0.206, 0.206, 0.202, 0.248, 0.24, 0.215, NA, 0.215, 0.214, 0.212, 0.238, 0.238, 0.241, NA, 0.243, 0.24, 0.235, NA, NA, 0.23, NA, 0.208, 0.205, 0.199, NA, NA, 0.231, NA, 0.226, 0.225, 0.221, NA, NA, 0.213, NA, 0.205, 0.206, 0.203, NA, NA, 0.226, NA, 0.224, 0.23, 0.222, NA, NA, 0.174, NA, 0.168, 0.159, 0.155, NA, NA, 0.241, NA, 0.23, 0.231, 0.227, NA, NA, 0.218, NA, 0.215, 0.213, 0.21, NA, NA, 0.237, NA, 0.23, 0.225, 0.227, NA, NA, 0.22, NA, 0.218, 0.213, 0.208, NA, NA, 0.195, NA, 0.195, 0.195, 0.193, NA, NA, 0.22, NA, 0.215, 0.215, 0.21, NA, NA, 0.215, NA, 0.212, 0.209, 0.209, NA, NA, 0.196, NA, 0.203, 0.198, 0.195, NA, NA, 0.232, NA, 0.229, 0.233, 0.23, NA, NA, 0.216, NA, 0.22, 0.218, 0.215, NA, NA, 0.241, NA, 0.236, 0.239, 0.233, NA, NA, 0.196, NA, 0.198, 0.194, 0.192, NA, NA, 0.214, NA, 0.21, 0.207, 0.203, NA, NA, 0.209, NA, 0.209, 0.209, 0.203, NA, NA, 0.216, NA, 0.21, 0.21, 0.207, NA, NA, 0.215, NA, 0.208, 0.212, 0.209, NA, NA, 0.22, NA, 0.222, 0.222, 0.218, NA, NA, 0.203, NA, 0.203, 0.205, 0.2, NA, NA, 0.223, NA, 0.223, 0.223, 0.218, NA, NA, 0.165, NA, 0.169, 0.163, 0.157, NA, NA, 0.226, NA, 0.227, 0.225, 0.22, NA, NA, 0.211, NA, 0.208, 0.21, 0.213, NA, NA, 0.244, NA, 0.246, 0.244, 0.24, NA, NA, 0.214, NA, 0.215, 0.222, 0.217, NA, NA, 0.219, NA, 0.218, 0.219, 0.215, NA, NA, 0.218, NA, 0.218, 0.218, 0.214, NA, NA, 0.208, NA, 0.208, 0.21, 0.217, NA, NA, 0.198, NA, 0.204, 0.202, 0.2, NA, NA, 0.212, NA, 0.212, 0.213, 0.21, NA, NA, 0.219, NA, 0.219, 0.219, 0.215, NA, NA, 0.23, NA, 0.232, 0.235, 0.235, NA, NA, 0.207, NA, 0.211, 0.209, 0.205, NA, NA, 0.222, NA, 0.221, 0.225, 0.221, NA, NA, 0.224, NA, 0.22, 0.218, 0.215, NA, NA, 0.204, NA, 0.206, 0.214, 0.214, 0.188, 0.186, 0.188, NA, 0.191, 0.2, 0.197, 0.234, 0.234, 0.229, NA, 0.226, 0.217, 0.215, 0.216, 0.215, 0.213, NA, 0.215, 0.225, 0.221, 0.241, 0.238, 0.236, NA, 0.234, 0.234, 0.23) Data <- matrix(Data, nrow = 88, byrow = T) N <- nrow(Data)* ncol(Data) - sum(is.na(Data)) # jj, kk, y jj <- numeric(N) kk <- numeric(N) y <- numeric(N) count = 0 for(i in 1:88) for(j in 1:7){ if(!is.na(Data[i,j])){ count = count + 1 jj[count] = i kk[count] = j y[count] = Data[i,j] } } Time <- c(9172, 10997, 11453 ,11515, 11613, 11779, 12072) F3_data <- list(N = N,m = 88,n = 7,Y = y,jj = jj,kk = kk, t = Time) library(rstan) rstan_options(auto_write = TRUE) options(mc.cores = parallel::detectCores()) fit1 <- stan(file = "/Users/pro/Projects/Pipelines/Simple/Simple_Model1.stan", data = F3_data, iter = 10000, control = list(max_treedepth = 15)) summary(fit1)$summary[1:3,]
#' @title backSample #' #' @description Background sample selection. #' @param x Object of class \emph{SpatialPoints} of \emph{SpatialPointsDataFrame}. #' @param z Vector of region identifiers for each sample. #' @param sampling.method One of \emph{random} or \emph{pca}. Default is \emph{random}. #' @param y Object of class \emph{RasterLayer}, \emph{RasterStack} or \emph{RasterBrick}. #' @param nr.samples Number of random background samples. #' @importFrom raster cellFromXY xyFromCell crs ncell #' @importFrom sp SpatialPoints SpatialPointsDataFrame #' @importFrom stats complete.cases prcomp median #' @references \href{10.1002/rse2.70}{Remelgado, R., Leutner, B., Safi, K., Sonnenschein, R., Kuebert, C. and Wegmann, M. (2017), Linking animal movement and remote sensing - mapping resource suitability from a remote sensing perspective. Remote Sens Ecol Conserv.} #' @return A \emph{SpatialPoints} or a \emph{SpatialPointsDataFrame}. #' @details {First, the function determines the unique pixel coordinates for \emph{x} based on the dimensions of \emph{y} and retrieves #' \emph{n}, random background samples where \emph{n} is determined by \emph{nr.samples}. If \emph{sampling.method} is set to \emph{"random"}, #' the function will return the selected samples as a \emph{SpatialPoints} object. However, if \emph{sampling.method} is set to \emph{"pca"}, the #' function performs a Principal Components Analysis (PCA) over \emph{y} to evaluate the similarity between the samples associated to \emph{x} and #' the initial set of random samples. To achieve this, the function selects the most important Principal Components (PC's) using the kaiser rule #' (i.e. PC's with eigenvalues greater than 1) and, for each PC, estimates the median and the Median Absolute Deviation (MAD) based on the samples #' of related ot each unique identifier in \emph{z}). Based on this data, the function selects background samples where the difference between their #' variance and the variance of the region samples exceeds the absolute difference between the median and the MAD. Finally, the algorithm filteres out #' all the background samples that were not selected by all sample regions. The ouptut is a \emph{SpatialPointsDataFrame} containing the selected samples #' and the corresponding \emph{y} values. If \emph{nr.samples} is not provided all background pixels are considered.} #' @seealso \code{\link{labelSample}} \code{\link{hotMove}} \code{\link{dataQuery}} #' @examples { #' #' require(raster) #' #' # read raster data #' file <- list.files(system.file('extdata', '', package="rsMove"), 'ndvi.tif', full.names=TRUE) #' r.stk <- stack(file) #' #' # read movement data #' data(shortMove) #' #' # find sample regions #' label <- labelSample(shortMove, 30, agg.radius=30, nr.pixels=2) #' #' # select background samples (pca) #' bSamples <- backSample(shortMove, r.stk, label, sampling.method='pca') #' #' # select background samples (random) #' bSamples <- backSample(shortMove, r.stk, sampling.method='random') #' #' } #' @export #' #-------------------------------------------------------------------------------------------------------------------------------# backSample <- function(x, y, z, sampling.method="random", nr.samples=NULL) { #-------------------------------------------------------------------------------------------------------------------------------# # 1. check input variables #-------------------------------------------------------------------------------------------------------------------------------# if (missing("z")) {z <- replicate(length(x), 1)} else {if (length(z)!=length(x)) {stop('"x" and "z" have different lengths')}} if (!class(x)[1]%in%c('SpatialPoints', 'SpatialPointsDataFrame')) {stop('"x" is not of a valid class')} if (is.null(crs(x)@projargs)) {stop('"x" is missing a valid projection')} if (!sampling.method%in%c('random', 'pca')) {stop('"sampling.method" is not a valid keyword')} if (!class(y)[1]%in%c('RasterLayer', 'RasterStack', 'RasterBrick')) {stop('"y" is not of a valid class')} if (crs(x)@projargs!=crs(y)@projargs) {stop('"x" and "y" have different projections')} np <- ncell(y[[1]]) # number of pixels op <- crs(y) # output projection if (!is.null(nr.samples)) {if (!is.numeric(nr.samples) | length(nr.samples)!=1) {stop('"nr.samples" is not a valid input')}} #-------------------------------------------------------------------------------------------------------------------------------# # 2. extract random background samples #-------------------------------------------------------------------------------------------------------------------------------# # convert presences to pixel positions sp <- cellFromXY(y[[1]], x) # remove duplicated records dr <- !duplicated(sp) & !is.na(z) sp <- sp[dr] z <- z[dr] # derice background samples ind <- which(!(1:np)%in%sp) if (!is.null(nr.samples)) {ind <- ind[sample(1:length(ind), nr.samples, replace=TRUE)]} x <- rbind(xyFromCell(y[[1]], sp), xyFromCell(y[[1]], ind)) z <- c(z, replicate(length(ind), 0)) rm(sp, ind) #-------------------------------------------------------------------------------------------------------------------------------# # 3. select background samples #-------------------------------------------------------------------------------------------------------------------------------# if (sampling.method=='pca') { # extract environmental information y <- as.data.frame(extract(y, x)) cc <- complete.cases(y) # index to remove NA's y <- y[cc,] z <- z[cc] x <- x[cc,] # kaiser rule pcf = function(x) {which((x$sdev^2) > 1)} # estimate pca and apply kaiser rule pca <- prcomp(y, scale=TRUE, center=TRUE) npc <- pcf(pca) pca <- data.frame(pca$x[,npc]) # select absences uv <- unique(z[which(z > 0)]) i0 = which(z==0) ai = vector('list', ncol(pca)) for (p in 1:length(npc)) { usr = vector('list', length(uv)) for (j in 1:length(uv)) { ri <- which(z==uv[j]) s1 <- median(pca[ri,p]) s2 <- median(abs(pca[ri,p]-s1)) usr[[j]] <- i0[which(abs(pca[i0,p]-s1) > s2)] } usr <- unlist(usr) ui <- unique(usr) count <- vector('numeric', length(ui)) for (j in 1:length(ui)) {count[j] <- length(which(usr==ui[j]))} ai[[p]] <- ui[which(count==length(uv))] } # return samples ai <- unique(unlist(ai)) return(SpatialPointsDataFrame(x[ai,], as.data.frame(y[ai,]), proj4string=op)) } if (sampling.method=='random') { # return samples ind <- which(z==0) return(SpatialPoints(x[ind,], proj4string=op)) } }
/R/backSample.R
no_license
sonthuybacha/rsMove
R
false
false
6,689
r
#' @title backSample #' #' @description Background sample selection. #' @param x Object of class \emph{SpatialPoints} of \emph{SpatialPointsDataFrame}. #' @param z Vector of region identifiers for each sample. #' @param sampling.method One of \emph{random} or \emph{pca}. Default is \emph{random}. #' @param y Object of class \emph{RasterLayer}, \emph{RasterStack} or \emph{RasterBrick}. #' @param nr.samples Number of random background samples. #' @importFrom raster cellFromXY xyFromCell crs ncell #' @importFrom sp SpatialPoints SpatialPointsDataFrame #' @importFrom stats complete.cases prcomp median #' @references \href{10.1002/rse2.70}{Remelgado, R., Leutner, B., Safi, K., Sonnenschein, R., Kuebert, C. and Wegmann, M. (2017), Linking animal movement and remote sensing - mapping resource suitability from a remote sensing perspective. Remote Sens Ecol Conserv.} #' @return A \emph{SpatialPoints} or a \emph{SpatialPointsDataFrame}. #' @details {First, the function determines the unique pixel coordinates for \emph{x} based on the dimensions of \emph{y} and retrieves #' \emph{n}, random background samples where \emph{n} is determined by \emph{nr.samples}. If \emph{sampling.method} is set to \emph{"random"}, #' the function will return the selected samples as a \emph{SpatialPoints} object. However, if \emph{sampling.method} is set to \emph{"pca"}, the #' function performs a Principal Components Analysis (PCA) over \emph{y} to evaluate the similarity between the samples associated to \emph{x} and #' the initial set of random samples. To achieve this, the function selects the most important Principal Components (PC's) using the kaiser rule #' (i.e. PC's with eigenvalues greater than 1) and, for each PC, estimates the median and the Median Absolute Deviation (MAD) based on the samples #' of related ot each unique identifier in \emph{z}). Based on this data, the function selects background samples where the difference between their #' variance and the variance of the region samples exceeds the absolute difference between the median and the MAD. Finally, the algorithm filteres out #' all the background samples that were not selected by all sample regions. The ouptut is a \emph{SpatialPointsDataFrame} containing the selected samples #' and the corresponding \emph{y} values. If \emph{nr.samples} is not provided all background pixels are considered.} #' @seealso \code{\link{labelSample}} \code{\link{hotMove}} \code{\link{dataQuery}} #' @examples { #' #' require(raster) #' #' # read raster data #' file <- list.files(system.file('extdata', '', package="rsMove"), 'ndvi.tif', full.names=TRUE) #' r.stk <- stack(file) #' #' # read movement data #' data(shortMove) #' #' # find sample regions #' label <- labelSample(shortMove, 30, agg.radius=30, nr.pixels=2) #' #' # select background samples (pca) #' bSamples <- backSample(shortMove, r.stk, label, sampling.method='pca') #' #' # select background samples (random) #' bSamples <- backSample(shortMove, r.stk, sampling.method='random') #' #' } #' @export #' #-------------------------------------------------------------------------------------------------------------------------------# backSample <- function(x, y, z, sampling.method="random", nr.samples=NULL) { #-------------------------------------------------------------------------------------------------------------------------------# # 1. check input variables #-------------------------------------------------------------------------------------------------------------------------------# if (missing("z")) {z <- replicate(length(x), 1)} else {if (length(z)!=length(x)) {stop('"x" and "z" have different lengths')}} if (!class(x)[1]%in%c('SpatialPoints', 'SpatialPointsDataFrame')) {stop('"x" is not of a valid class')} if (is.null(crs(x)@projargs)) {stop('"x" is missing a valid projection')} if (!sampling.method%in%c('random', 'pca')) {stop('"sampling.method" is not a valid keyword')} if (!class(y)[1]%in%c('RasterLayer', 'RasterStack', 'RasterBrick')) {stop('"y" is not of a valid class')} if (crs(x)@projargs!=crs(y)@projargs) {stop('"x" and "y" have different projections')} np <- ncell(y[[1]]) # number of pixels op <- crs(y) # output projection if (!is.null(nr.samples)) {if (!is.numeric(nr.samples) | length(nr.samples)!=1) {stop('"nr.samples" is not a valid input')}} #-------------------------------------------------------------------------------------------------------------------------------# # 2. extract random background samples #-------------------------------------------------------------------------------------------------------------------------------# # convert presences to pixel positions sp <- cellFromXY(y[[1]], x) # remove duplicated records dr <- !duplicated(sp) & !is.na(z) sp <- sp[dr] z <- z[dr] # derice background samples ind <- which(!(1:np)%in%sp) if (!is.null(nr.samples)) {ind <- ind[sample(1:length(ind), nr.samples, replace=TRUE)]} x <- rbind(xyFromCell(y[[1]], sp), xyFromCell(y[[1]], ind)) z <- c(z, replicate(length(ind), 0)) rm(sp, ind) #-------------------------------------------------------------------------------------------------------------------------------# # 3. select background samples #-------------------------------------------------------------------------------------------------------------------------------# if (sampling.method=='pca') { # extract environmental information y <- as.data.frame(extract(y, x)) cc <- complete.cases(y) # index to remove NA's y <- y[cc,] z <- z[cc] x <- x[cc,] # kaiser rule pcf = function(x) {which((x$sdev^2) > 1)} # estimate pca and apply kaiser rule pca <- prcomp(y, scale=TRUE, center=TRUE) npc <- pcf(pca) pca <- data.frame(pca$x[,npc]) # select absences uv <- unique(z[which(z > 0)]) i0 = which(z==0) ai = vector('list', ncol(pca)) for (p in 1:length(npc)) { usr = vector('list', length(uv)) for (j in 1:length(uv)) { ri <- which(z==uv[j]) s1 <- median(pca[ri,p]) s2 <- median(abs(pca[ri,p]-s1)) usr[[j]] <- i0[which(abs(pca[i0,p]-s1) > s2)] } usr <- unlist(usr) ui <- unique(usr) count <- vector('numeric', length(ui)) for (j in 1:length(ui)) {count[j] <- length(which(usr==ui[j]))} ai[[p]] <- ui[which(count==length(uv))] } # return samples ai <- unique(unlist(ai)) return(SpatialPointsDataFrame(x[ai,], as.data.frame(y[ai,]), proj4string=op)) } if (sampling.method=='random') { # return samples ind <- which(z==0) return(SpatialPoints(x[ind,], proj4string=op)) } }
install.packages(c("FactoMineR", "factoextra")) library("FactoMineR") library("factoextra") install.packages('corrplot') library("corrplot") library(gplots) library(tidyverse) library(lme4) library(lmerTest) library(wesanderson) library(reshape) install.packages('ggmap') library(ggmap) install.packages('CRAN') library("rjson") library(maps) install.packages("RJSONIO") library(RJSONIO) install.packages("googleway") library("googleway") install.packages("emmeans") library(emmeans) library(multcomp) #citation("ggmap") #All the amounts are umol/mg #Details: setwd("C:/Users/Ella Katz/Desktop/01 - Post Doc/Projects/Sarah Turner/Analyses") details <- read.table(file="GSLs profiles by weight.csv", header=T, sep=",") setwd("C:/Users/Ella Katz/Desktop/01 - Post Doc/Projects/Sarah Turner/Analyses/Stat/emmeans") data <- read.table(file="Emmeans_data.csv", header=T, sep=",") #na.omit(data) data[is.na(data)] <- 0 colnames(details) details_1 <- details[,c(6:10)] Total_data <- merge(details_1, data, by="CS") Total_data <- droplevels.data.frame(Total_data) Total_data_1 <- unique(Total_data) Total_data_2 <-Total_data_1[, -c(6,7,31:33)] write.csv(Total_data_2, file = "Total_data.csv") #Calculating the traits based on the emmeans: #Starting to classifie the accessions by chemotypes: colnames(data) data[is.na(data)] <- 0 Data_02<-merge(details_1, data, by="CS") Data_02<-droplevels.data.frame(Data_02) Data_03<- unique(Data_02) Data_01 <- mutate(Data_03, C3= (X3OHP + X3MSO + Allyl + X3MT), C4= (X4OHB + OHBut + X4MSO + But + X4MT)) Data_01$Total_GSL <- rowSums(Data_01[,c(4:26)]) Data_1 <- mutate(Data_01, C3ratio_emmeans= C3/(C3+C4), Alk_emmeans=(OHBut+ Allyl +But)/(C3+C4), OH_emmeans=(X3OHP+X4OHB)/(C3+C4), MSO_emmeans=(X3MSO+X4MSO)/(C3+C4), GSOH_emmeans=OHBut/(OHBut+But), lc=(X7MT+X8MT+ X7MSO+X8MSO+X5MSO+X6MSO+X8MTder)/Total_GSL, sc=(C3+C4)/Total_GSL, GSOX_emmeans=(X3MT+X4MT)/(C3+C4), Alk_OH_ratio=Alk_emmeans/(OH_emmeans+Alk_emmeans)) Data_1$Ref <- NA Data_1$Ref[which(Data_1$CS == "CS76778")] <- "Col" Data_1$Ref[which(Data_1$CS == "CS77021")] <- "Ler" Data_1$Ref[which(Data_1$CS == "CS76789")] <- "Cvi" Data_1$Ref[is.na(Data_1$Ref)] <- "Accession" Data_1[is.na(Data_1)] <- 0 #Excluding accessions: Data_1<-Data_1[!(Data_1$Country=="AFG"),] Data_1<-Data_1[!(Data_1$Country=="USA"),] Data_1<-Data_1[!(Data_1$Country=="CAN"),] Data_1<-Data_1[!(Data_1$Country=="CHN"),] Data_1<-Data_1[!(Data_1$Country=="CPV"),] Data_1<-Data_1[!(Data_1$Country=="UZB"),] Data_1<-Data_1[!(Data_1$Country=="TJK"),] Data_1<-Data_1[!(Data_1$Country=="IND"),] Data_1<-Data_1[!(Data_1$Country=="JPN"),] Data_1<-Data_1[!(Data_1$Country=="KAZ"),] Data_1<-Data_1[!(Data_1$Country=="KGZ"),] Data_1<-Data_1[!(Data_1$Country=="MAR"),] Data_1<-Data_1[!(Data_1$Name == "Bijisk-4"),] Data_1<-Data_1[!(Data_1$Name == "Kolyv-2"),] Data_1<-Data_1[!(Data_1$Name == "Kolyv-3"),] Data_1<-Data_1[!(Data_1$Name == "Kly-1"),] Data_1<-Data_1[!(Data_1$Name == "Kly-4"),] Data_1<-Data_1[!(Data_1$Name == "Kolyv-5"),] Data_1<-Data_1[!(Data_1$Name == "Kolyv-6"),] Data_1<-Data_1[!(Data_1$Name == "Koz-2"),] Data_1<-Data_1[!(Data_1$Name == "K-oze-1"),] Data_1<-Data_1[!(Data_1$Name == "K-oze-3"),] Data_1<-Data_1[!(Data_1$Name == "Masl-1"),] Data_1<-Data_1[!(Data_1$Name == "Noveg-3"),] Data_1<-Data_1[!(Data_1$Name == "Noveg-2"),] Data_1<-Data_1[!(Data_1$Name == "Noveg-1"),] Data_1<-Data_1[!(Data_1$Name == "Lebja-1"),] Data_1<-Data_1[!(Data_1$Name == "Nosov-1"),] Data_1<-Data_1[!(Data_1$Name == "Panke-1"),] Data_1<-Data_1[!(Data_1$Name == "Rakit-1"),] Data_1<-Data_1[!(Data_1$Name == "Rakit-3"),] Data_1<-Data_1[!(Data_1$Name == "Basta-1"),] Data_1<-Data_1[!(Data_1$Name == "Basta-2"),] Data_1<-Data_1[!(Data_1$Name == "Basta-3"),] Data_1<-Data_1[!(Data_1$Name == "Chaba-2"),] Data_1<-Data_1[!(Data_1$Name == "Sever-1"),] Data_1<-Data_1[!(Data_1$Name == "Balan-1"),] Data_1<-Data_1[!(Data_1$Name == "Valm"),] Data_1<-Data_1[!(Data_1$Name == "Stepn-1"),] Data_1<-Data_1[!(Data_1$Name == "Stepn-2"),] Data_1<-Data_1[!(Data_1$Name == "Adam-1"),] Data_1<-Data_1[!(Data_1$Name == "Karag-1"),] Data_1<-Data_1[!(Data_1$Name == "Karag-2"),] Data_1<-Data_1[!(Data_1$Name == "Kidr-1"),] Data_1<-Data_1[!(Data_1$Name == "Per-1"),] Data_1<-Data_1[!(Data_1$Name == "Pi-0"),] #Chemotyping: #Classifying to Alk OH MSO: Data_1$AOP <- NA Data_1$AOP[which(Data_1$Alk_emmeans < 0.4 & Data_1$OH_emmeans > 0.55 &Data_1$MSO_emmeans<0.1)] <- "OH" Data_1$AOP[which(Data_1$Alk_emmeans > 0.1 & Data_1$OH_emmeans < 0.55 )] <- "Alk" Data_1$AOP[which(Data_1$Alk_emmeans < 0.1& Data_1$OH_emmeans < 0.55)] <- "MSO" #Manual corrections: Data_1$AOP[which(Data_1$CS == "CS76357")] <- "Alk" Data_1$AOP[which(Data_1$CS == "CS78767")] <- "Alk" Data_1$AOP[which(Data_1$CS == "CS77281")] <- "Alk" Data_1$AOP[which(Data_1$CS == "CS76729")] <- "AOP23" Data_1$AOP[which(Data_1$CS == "CS77277")] <- "AOP23" Data_1$AOP[which(Data_1$CS == "CS76653")] <- "AOP23" Data_1$AOP[which(Data_1$CS == "CS76935")] <- "AOP23" Data_1$AOP[which(Data_1$CS == "CS77299")] <- "AOP23" Data_1$AOP[which(Data_1$CS == "CS76389")] <- "AOP23" Data_1$AOP[which(Data_1$CS == "CS77164")] <- "AOP23" Data_1$AOP[which(Data_1$CS == "CS76437")] <- "AOP23" Data_1$AOP[which(Data_1$CS == "CS76445")] <- "AOP23" Data_1$AOP[which(Data_1$CS == "CS76680")] <- "AOP23" Data_1$AOP[which(Data_1$CS == "CS77356")] <- "AOP23" Data_1$AOP[which(Data_1$CS == "CS77139")] <- "AOP23" Data_1$AOP[which(Data_1$CS == "CS76934")] <- "AOP23" Data_1$AOP[which(Data_1$CS == "CS76808")] <- "AOP23" Data_1$AOP[which(Data_1$CS == "CS76987")] <- "AOP23" Data_1$AOP[which(Data_1$CS == "CS78778")] <- "AOP23" Data_1$AOP[which(Data_1$CS == "CS77224")] <- "AOP23" Data_1$AOP[which(Data_1$CS == "CS76580")] <- "AOP23" Data_1$AOP[which(Data_1$CS == "CS76515")] <- "AOP23" Data_1$AOP[which(Data_1$CS == "CS77038")] <- "AOP23" #MAM classification: qplot(Data_1$C3ratio_emmeans, geom="histogram") +theme_bw() ggplot(data=Data_1, aes(Data_1$C3ratio_emmeans)) + geom_histogram()+theme_bw()+ ylab("# of accessions") + xlab("C3/C4 ratio") colnames(Data_1) Data_1$Elong<-cut(Data_1$C3ratio_emmeans, c(-0.1, 0.5, 1.1)) Data_1$Elong <- Data_1$Elong levels(Data_1$Elong) <- c("C4", "C3") #GSOH colnames(Data_1) qplot(Data_1$GSOH_emmeans, geom="histogram") +theme_bw() ggplot(data=Data_1, aes(Data_1$GSOH_emmeans)) + geom_histogram()+theme_bw()+ ylab("# of accessions") + xlab("OH_Butenyl ratio") Data_1$GSOH_1<-cut(Data_1$GSOH, c(-0.1, 0.1, 2)) Data_1$ GSOH_1<- as.factor(Data_1$ GSOH_1) levels(Data_1$GSOH_1) <- c("NF", "F") Data_1$Key <- as.factor(paste(Data_1$Elong, Data_1$AOP, Data_1$GSOH_1, sep="_")) Data_1$Classification <- Data_1$Key nlevels(Data_1$Key) levels(Data_1$Classification) <- c(1,2,3,4,5,6,7,8,9,10,11,12,13) Data_1$Classification_name <- Data_1$Classification #Divisions by (this order): MAM (3/4), AOP(Alk/MSO/OH), GSOH(+/-) levels(Data_1$Classification_name) <- c("Allyl", "Allyl", "?", "?", "3MSO", "3MSO", "3OHP", "3OHP", "OH-But", "Butenyl", "4MSO", "4MSO", "4OHB") Data_1<-Data_1[!(Data_1$Classification_name == "?"),] setwd("C:/Users/Ella Katz/Desktop/01 - Post Doc/Projects/Sarah Turner/Analyses/PCA") write.csv(Data_1, file = "Master_GSL.csv") Data_1 <- read.table(file="Master_GSL.csv", header=T, sep=",") #PCA with traits calculates by the emmeans: colnames(Data_1) data_PCA_3 <- Data_1[,c(33:37,40)] res.pca <- prcomp(data_PCA_3, center=T, scale=T) PCscores <- as.data.frame(res.pca$x) data_PC_3 <- cbind(Data_1, PCscores) ggplot(data_PC_3, aes(PC1, PC2, color=Classification_name))+ geom_point()+theme_bw()+ ggtitle("Principal Component Analysis, by classifications") + scale_color_manual(values = c("orange", "lightblue", "purple", "Yellow", "red", "blue", "green", "purple")) plot_ly(data_PC_3, x = ~PC1, y = ~PC2, z = ~PC3, color = ~ Classification_name, symbol = ~Ref, symbols = c('circle','x','x','x'), size=10, alpha = 0.8)%>% add_markers() %>% layout(scene = list(xaxis = list(title = 'PC1'), yaxis = list(title = 'PC2'), zaxis = list(title = 'PC3'))) %>% add_text(data_PC_3, x = ~PC1, y = ~PC2, z = ~PC3) #Creating the PCA barplot: pca.var <- res.pca$sdev^2 pca.var.per <- round(pca.var/sum(pca.var)*100, 1) barplot(pca.var.per, main="Scree Plot - traits emmeans", xlab="Principal Component", ylab="Percent Variation") # The effect of each phenotype: fviz_pca_var(res.pca, col.var = "contrib", # Color by contributions to the PC gradient.cols = c("#00AFBB", "#E7B800", "#FC4E07"), repel = TRUE # Avoid text overlapping ) #Contribution of variables to Dim 1: var<-get_pca_var(res.pca) v<-var$coord v fviz_contrib(res.pca, choice="var", axes=1, top=10, title="Contribution of variables - traits emmeans") Data_2 <- cbind(Data_1, PCscores) Data_3 <- merge(Data_2, details_1, by="CS") Data_3 <- droplevels.data.frame(Data_3) Data_3 <- unique(Data_3) ggplot(Data_3, aes(PC1, PC2, color=Classification_name))+ geom_point()+theme_bw()+ ggtitle("Principal Component Analysis")+ scale_color_manual(values = c("orange", "lightblue", "purple", "Yellow", "red", "blue", "green", "purple")) #PCA on GSLs: colnames(Data_1) data_PCA_1 <- Data_1[,c(9:31)] res.pca <- prcomp(data_PCA_1, center=T, scale=T) PCscores <- as.data.frame(res.pca$x) data_PC_1 <- cbind(Data_1, PCscores) colnames(data_PC_1) ggplot(data_PC_1, aes(PC1, PC2, color=Ref, shape=Ref))+ geom_point()+theme_bw()+ ggtitle("Principal Component Analysis") + scale_color_manual(values = c("grey", "black", "black", "black")) setwd("C:/Users/Ella Katz/Desktop/01 - Post Doc/Projects/Sarah Turner/Analyses/PCA/traits, PCA and map") ggsave("PCA.png", dpi=300) #Creating the PCA barplot: pca.var <- res.pca$sdev^2 pca.var.per <- round(pca.var/sum(pca.var)*100, 1) view(pca.var.per) barplot(pca.var.per, main="Scree Plot - GSLs", xlab="Principal Component", ylab="Percent Variation", names.arg = c("1", "2", "3", "4", "5", "6", "7", "8", "9", "10", "11", "12", "13", "14", "15", "16", "17", "18", "19", "20", "21", "22", "23")) # The effect of each phenotype: fviz_pca_var(res.pca, col.var = "contrib", # Color by contributions to the PC gradient.cols = c("#00AFBB", "#E7B800", "#FC4E07"), repel = TRUE # Avoid text overlapping ) #Contribution of variables to Dim 1: cols <- c("X7MT"="lightgrey", "X4OHB"="red", "7MSO"="lightgrey", "6MSO"="lightgrey","OHBut"="red","4MT"="red", "X3OHP"="blue", "Branched"="lightgrey", Allyl="blue","5MSO"="lightgrey", "3MT"="blue", But="red", "X4MSO"="red", BZO="lightgrey", "3MSO"= "blue", OHPentenyl= "lightgrey", "8MSO"="lightgrey","X8MT"="lightgrey", "X4OHI3M"="lightgrey","X4MOI3M"="lightgrey") cols1 <- c("lightgrey", "lightgrey","lightgrey","lightgrey", "red", "blue", "red", "lightgrey","lightgrey","lightgrey","blue", "red", "lightgrey","blue", "lightgrey","lightgrey","lightgrey","blue", "lightgrey","lightgrey") var<-get_pca_var(res.pca) v<-var$coord v fviz_contrib(res.pca, choice="var", axes=1, top=20, title="Contribution of variables - PC1", fill=cols, color = "black")+ theme(text = element_text(size = 14), axis.text = element_text(size = 14), axis.title = element_text(size = 14)) fviz_contrib(res.pca, choice="var", axes=2, top=20, title="Contribution of variables - PC2", fill=cols1, color = "black")+ theme(text = element_text(size = 14), axis.text = element_text(size = 14), axis.title = element_text(size = 14)) colnames(data_PC_1) ggplot(data_PC_1, aes(PC1, PC2, color=Classification_name))+ geom_point()+theme_bw()+ ggtitle("Principal Component Analysis")+ scale_color_manual(values = c("orange", "lightblue", "purple", "Yellow", "red", "blue", "green", "purple")) Data_2 <- merge(data_PC_1, details_1, by="CS") Data_2 <- droplevels.data.frame(Data_2) Data_2 <- unique(Data_2) colnames(Data_2) setwd("C:/Users/Ella Katz/Desktop/01 - Post Doc/Projects/Sarah Turner/Analyses/PCA/Phylogeny") write.csv(data_PC_1, file = "Data_PCA.csv") #GSOH map: colnames(Data_1) Data_3<- Data_1[c(-347,-167),c(2:6,48:53)] Data_3$Elong <- as.factor(Data_3$Elong) Data_3_Elong <- split(Data_3, Data_3$Elong) list2env(Data_3_Elong, envir=.GlobalEnv) rm(Data_3_Elong) Data_C3 <- droplevels.data.frame(C3) Data_C4 <- droplevels.data.frame(C4) Data_C4$Classification_name <- as.factor(Data_C4$Classification_name) Data_C4_Classification_name <- split(Data_C4, Data_C4$Classification_name) list2env(Data_C4_Classification_name, envir=.GlobalEnv) Butenyl <- droplevels.data.frame(Butenyl) OH-But <- droplevels.data.frame(OH-But) Data_5<-rbind(Butenyl, `OH-But`) #Maps: style1<-'[ { "featureType": "administrative", "elementType": "all", "stylers": [ { "visibility": "off" } ] }, { "featureType": "administrative", "elementType": "labels", "stylers": [ { "visibility": "off" } ] }, { "featureType": "administrative", "elementType": "labels.text.fill", "stylers": [ { "color": "#444444" } ] }, { "featureType": "landscape", "elementType": "all", "stylers": [ { "color": "#f2f2f2" } ] }, { "featureType": "poi", "elementType": "all", "stylers": [ { "visibility": "off" } ] }, { "featureType": "road", "elementType": "all", "stylers": [ { "saturation": -100 }, { "lightness": 45 } ] }, { "featureType": "road.highway", "elementType": "all", "stylers": [ { "visibility": "simplified" } ] }, { "featureType": "road.arterial", "elementType": "labels.icon", "stylers": [ { "visibility": "off" } ] }, { "featureType": "transit", "elementType": "all", "stylers": [ { "visibility": "off" } ] }, { "featureType": "water", "elementType": "all", "stylers": [ { "color": "#5d637d" }, { "visibility": "on" } ] } ]' #46bcec old ocean color style_list<-fromJSON(style1) create_style_string<- function(style_list){ style_string <- "" for(i in 1:length(style_list)){ if("featureType" %in% names(style_list[[i]])){ style_string <- paste0(style_string, "feature:", style_list[[i]]$featureType, "|") } elements <- style_list[[i]]$stylers a <- lapply(elements, function(x)paste0(names(x), ":", x)) %>% unlist() %>% paste0(collapse="|") style_string <- paste0(style_string, a) if(i < length(style_list)){ style_string <- paste0(style_string, "&style=") } } # google wants 0xff0000 not #ff0000 style_string <- gsub("#", "0x", style_string) return(style_string) } style_string <- create_style_string(style_list) mymap<-ggmap(get_googlemap(c(lon = 18, lat = 52),zoom=4, xlim=c(-10,72), ylim=c(30, 80), style=style_string), extent="device") colnames(data_PC_1) test<-data_PC_1[,c(2,54)] test1<-data_PC_1[c(-347,-167),] print(mymap)+ geom_point(data=test1, aes(x=Long, y=Lat, color=PC1), alpha = 7/10, size=3) + ggtitle('') + xlab('Longitude') + ylab('Latitude') + theme(text = element_text(size=16, face="bold")) + theme(axis.text=element_text(size=16,color='black'), axis.title=element_text(size=18,color='black')) + theme(plot.title = element_text(size = 26, vjust=1)) + theme(plot.title = element_text(size = 18,face="bold")) + theme(legend.key = element_rect(fill = "white")) + theme(legend.text = element_text(colour="black", size=16))+ theme(legend.title = element_text(colour="black", size=18, face="bold"))+ theme(legend.position = c(0.045,0.889))+ scale_colour_gradient(low = "yellow", high = "red") ggsave("C:/Users/Ella Katz/Desktop/01 - Post Doc/Projects/Sarah Turner/Analyses/Manuscript/Figures/More plots/PC1.jpeg", dpi = 600) lm_PC1 <- lm(data=data_PC_1, PC1 ~ Lat + Long+ Lat*Long) anova(lm_PC1) lm_PC2 <- lm(data=data_PC_1, PC2 ~ Lat + Long+ Lat*Long) anova(lm_PC2) print(mymap)+ geom_point(data=test1, aes(x=Long, y=Lat, color=Classification_name), alpha = 7/10, size=3) + ggtitle('') + xlab('Longitude') + ylab('Latitude') + theme(text = element_text(size=16, face="bold")) + theme(axis.text=element_text(size=16,color='black'), axis.title=element_text(size=18,color='black')) + theme(plot.title = element_text(size = 26, vjust=1)) + theme(plot.title = element_text(size = 18,face="bold")) + theme(legend.key = element_rect(fill = "white")) + theme(legend.text = element_text(colour="black", size=16))+ theme(legend.title = element_text(colour="black", size=18, face="bold"))+ scale_color_manual(values= c("yellow", "green2","magenta", "skyblue", "blue", "black", "red3")) ggsave("C:/Users/Ella Katz/Desktop/01 - Post Doc/Projects/Sarah Turner/Analyses/Manuscript/Figures/More plots/Cheotypes.jpeg", dpi = 600) print(mymap)+ geom_point(data=Data_5, aes(x=Long, y=Lat, color=GSOH_1), alpha = 7/10, size=3) + ggtitle('') + xlab('Longitude') + ylab('Latitude') + theme(text = element_text(size=16, face="bold")) + theme(axis.text=element_text(size=16,color='black'), axis.title.x=element_text(size=16,color='black'), axis.title.y=element_text(size=16,color='black', hjust=0.22)) + theme(plot.title = element_text(size = 26, vjust=1)) + theme(plot.title = element_text(size = 18,face="bold")) + theme(legend.key = element_rect(fill = "white")) + theme(legend.text = element_text(colour="black", size=16))+ theme(legend.title = element_text(colour="black", size=16, face="bold"))+ theme(legend.position = c(0.04,0.95))+ labs(colour="GSOH")+ scale_color_manual(values = c("sienna2", "black")) ggsave("C:/Users/Ella Katz/Desktop/01 - Post Doc/Projects/Sarah Turner/Analyses/Manuscript/Figures/More plots/GSOH.jpeg", dpi = 600) print(mymap)+ geom_point(data=test1, aes(x=Long, y=Lat, color=AOP), alpha = 7/10, size=3) + ggtitle('') + xlab('Longitude') + ylab('Latitude') + theme(text = element_text(size=16, face="bold")) + theme(axis.text=element_text(size=16,color='black'), axis.title.x=element_text(size=16,color='black'), axis.title.y=element_text(size=16,color='black', hjust=0.22)) + theme(plot.title = element_text(size = 26, vjust=1)) + theme(plot.title = element_text(size = 18,face="bold")) + theme(legend.key = element_rect(fill = "white")) + theme(legend.text = element_text(colour="black", size=16))+ theme(legend.title = element_text(colour="black", size=16, face="bold"))+ theme(legend.position = c(0.067,0.93))+ scale_color_manual(values = c("magenta","green3", "yellow")) ggsave("C:/Users/Ella Katz/Desktop/01 - Post Doc/Projects/Sarah Turner/Analyses/Manuscript/Figures/More plots/AOP.jpeg", dpi = 600) print(mymap)+ geom_point(data=test1, aes(x=Long, y=Lat, color=Elong), alpha = 7/10, size=3) + ggtitle('') + xlab('Longitude') + ylab('Latitude') + theme(text = element_text(size=16, face="bold")) + theme(axis.text=element_text(size=16,color='black'), axis.title.x=element_text(size=16,color='black'), axis.title.y=element_text(size=16,color='black', hjust=0.22)) + theme(plot.title = element_text(size = 26, vjust=1)) + theme(plot.title = element_text(size = 18,face="bold")) + theme(legend.key = element_rect(fill = "white")) + theme(legend.text = element_text(colour="black", size=16))+ theme(legend.title = element_text(colour="black", size=16, face="bold"))+ theme(legend.position = c(0.09,0.94))+ labs(colour="Chain length")+ scale_color_manual(values = c("blue","red")) ggsave("C:/Users/Ella Katz/Desktop/01 - Post Doc/Projects/Sarah Turner/Analyses/Manuscript/Figures/More plots/Elong.jpeg", dpi = 600) scale_color_gradientn(colours = rainbow(9)) scale_colour_gradient(low = "blue", high = "red") scale_colour_gradientn(colours = terrain.colors(10)) #Grouping by the mountains: test1$Mountain <- NA test1$Mountain[which(test1$Country == "ESP")] <- "South" test1$Mountain[which(test1$Country == "POR")] <- "South" test1$Mountain[which(test1$Country == "ITA")] <- "South" test1$Mountain[which(test1$Country == "GRC")] <- "South" test1$Mountain[which(test1$Country == "BUL")] <- "South" test1$Mountain[which(test1$Country == "CRO")] <- "South" test1$Mountain[which(test1$Country == "SRB")] <- "South" test1$Mountain[which(test1$Country == "ROU")] <- "South" test1$Mountain[which(test1$Country == "SVK")] <- "South" test1$Mountain[which(test1$Country == "LBN")] <- "South" test1$Mountain[which(test1$Country == "GEO")] <- "South" test1$Mountain[which(test1$Country == "ARM")] <- "South" test1$Mountain[which(test1$Country == "Gr-5")] <- "South" test1$Mountain[which(test1$Country == "Gr-1")] <- "South" test1$Mountain[is.na(test1$Mountain)] <- "North" print(mymap)+ geom_point(data=test1, aes(x=Long, y=Lat, color=Mountain), alpha = 7/10, size=3) + ggtitle('') + xlab('Longitude') + ylab('Latitude') + theme(text = element_text(size=16, face="bold")) + theme(axis.text=element_text(size=16,color='black'), axis.title=element_text(size=16,color='black')) + theme(plot.title = element_text(size = 26, vjust=1)) + theme(plot.title = element_text(size = 18,face="bold")) + theme(legend.key = element_rect(fill = "white")) + theme(legend.text = element_text(colour="black", size=16))+ theme(legend.title = element_text(colour="black", size=16, face="bold"))+ theme(legend.position = c(0.075,0.938))+ labs(colour="Geography")+ scale_color_manual(values = c("chartreuse4", "deeppink2")) ggsave("C:/Users/Ella Katz/Desktop/01 - Post Doc/Projects/Sarah Turner/Analyses/Manuscript/Figures/More plots/Geography.jpeg", dpi = 600) basemap <- get_googlemap(c(lon = 17, lat = 52),zoom=4, xlim=c(-22,60), ylim=c(30, 80), #maptype = 'hybrid', #maptype = 'terrain', #maptype = 'satellite', maptype = 'roadmap', color="bw", key="AIzaSyAYVck1dRnEJY0Sfzsb9i5K9gWqlwExITI") ggmap(basemap) + geom_point(data=Data_3, aes(x=Long, y=Lat, color=Classification_name), alpha = 5/10) + ggtitle('') + xlab('') + ylab('') + theme(text = element_text(size=18)) + theme(axis.text=element_text(size=8,color='black'), axis.title=element_text(size=20,color='black')) + theme(plot.title = element_text(size = 26, vjust=1)) + theme(plot.title = element_text(size = 18,face="bold")) + theme(legend.key = element_rect(fill = "white")) + theme(legend.text = element_text(colour="black", size=8))+ theme(legend.title = element_text(colour="black", size=10, face="bold"))+ scale_color_manual(values = c("yellow", "blue", "lightblue", "orange", "red", "turquoise", "green", "purple")) ggmap(basemap) + geom_point(data=Data_3, aes(x=Long, y=Lat, color=PC1), alpha = 5/10) + ggtitle('') + xlab('') + ylab('') + theme(text = element_text(size=18)) + theme(axis.text=element_text(size=8,color='black'), axis.title=element_text(size=20,color='black')) + theme(plot.title = element_text(size = 26, vjust=1)) + theme(plot.title = element_text(size = 18,face="bold")) + theme(legend.key = element_rect(fill = "white")) + theme(legend.text = element_text(colour="black", size=8))+ theme(legend.title = element_text(colour="black", size=10, face="bold"))+ scale_color_gradientn(colours = rainbow(5)) basemap <- get_googlemap(c(lon = 35, lat = 40),zoom=3, xlim=c(-22,78), ylim=c(24, 80), #maptype = 'hybrid', #maptype = 'terrain', #maptype = 'satellite', maptype = 'roadmap', color="bw", key="AIzaSyAYVck1dRnEJY0Sfzsb9i5K9gWqlwExITI") basemap <- get_googlemap(c(lon = 18, lat = 52),zoom=4, xlim=c(-10,72), ylim=c(30, 80), #maptype = 'hybrid', #maptype = 'terrain', #maptype = 'satellite', maptype = 'roadmap', key="AIzaSyAYVck1dRnEJY0Sfzsb9i5K9gWqlwExITI") ggmap(basemap) + geom_point(data=Data_3, aes(x=Long, y=Lat, color=Classification_name), alpha = 5/10) + ggtitle('') + xlab('') + ylab('') + theme(text = element_text(size=18)) + theme(axis.text=element_text(size=8,color='black'), axis.title=element_text(size=20,color='black')) + theme(plot.title = element_text(size = 26, vjust=1)) + theme(plot.title = element_text(size = 18,face="bold")) + theme(legend.key = element_rect(fill = "white")) + theme(legend.text = element_text(colour="black", size=8))+ theme(legend.title = element_text(colour="black", size=10, face="bold"))+ scale_color_manual(values = c("yellow", "blue", "lightblue", "orange", "red", "turquoise", "green", "purple")) #Costumized map: #https://snazzymaps.com/editor #https://mapstyle.withgoogle.com/ style1<-'[ { "featureType": "administrative", "elementType": "all", "stylers": [ { "visibility": "off" } ] }, { "featureType": "administrative", "elementType": "labels", "stylers": [ { "visibility": "off" } ] }, { "featureType": "administrative", "elementType": "labels.text.fill", "stylers": [ { "color": "#444444" } ] }, { "featureType": "landscape", "elementType": "all", "stylers": [ { "color": "#f2f2f2" } ] }, { "featureType": "poi", "elementType": "all", "stylers": [ { "visibility": "off" } ] }, { "featureType": "road", "elementType": "all", "stylers": [ { "saturation": -100 }, { "lightness": 45 } ] }, { "featureType": "road.highway", "elementType": "all", "stylers": [ { "visibility": "simplified" } ] }, { "featureType": "road.arterial", "elementType": "labels.icon", "stylers": [ { "visibility": "off" } ] }, { "featureType": "transit", "elementType": "all", "stylers": [ { "visibility": "off" } ] }, { "featureType": "water", "elementType": "all", "stylers": [ { "color": "#46bcec" }, { "visibility": "on" } ] } ]' style_list<-fromJSON(style1) create_style_string<- function(style_list){ style_string <- "" for(i in 1:length(style_list)){ if("featureType" %in% names(style_list[[i]])){ style_string <- paste0(style_string, "feature:", style_list[[i]]$featureType, "|") } elements <- style_list[[i]]$stylers a <- lapply(elements, function(x)paste0(names(x), ":", x)) %>% unlist() %>% paste0(collapse="|") style_string <- paste0(style_string, a) if(i < length(style_list)){ style_string <- paste0(style_string, "&style=") } } # google wants 0xff0000 not #ff0000 style_string <- gsub("#", "0x", style_string) return(style_string) } style_string <- create_style_string(style_list) mymap<-ggmap(get_googlemap(c(lon = 18, lat = 52),zoom=4, xlim=c(-10,72), ylim=c(30, 80), style=style_string), extent="device") print(mymap)+ geom_point(data=Data_3, aes(x=Long, y=Lat, color=Classification_name), alpha = 5/10) + ggtitle('') + xlab('Longitude') + ylab('Latitude') + theme(text = element_text(size=18)) + theme(axis.text=element_text(size=8,color='black'), axis.title=element_text(size=12,color='black')) + theme(plot.title = element_text(size = 26, vjust=1)) + theme(plot.title = element_text(size = 18,face="bold")) + theme(legend.key = element_rect(fill = "white")) + theme(legend.text = element_text(colour="black", size=8))+ theme(legend.title = element_text(colour="black", size=10, face="bold"))+ scale_color_manual(values = c("yellow", "blue", "grey", "orange", "red", "black", "green", "purple")) ggsave(mymap, filename="mymap.png") #133 accession from 360 collection: setwd("C:/Users/Ella Katz/Desktop/01 - Post Doc/Projects/Sarah Turner/Analyses/Stat/emmeans") Data_360 <- read.table(file="Data_360.csv", header=T, sep=",") colnames(Data_3) Data_4 <-Data_3[, c(1,57,58)] Data_360_1<- merge(Data_360, Data_4, by="CS") basemap <- get_googlemap(c(lon = 18, lat = 52),zoom=4, xlim=c(-10,72), ylim=c(30, 80), #maptype = 'hybrid', #maptype = 'terrain', #maptype = 'satellite', maptype = 'roadmap', key="AIzaSyAYVck1dRnEJY0Sfzsb9i5K9gWqlwExITI") ggmap(basemap)+ geom_point(data=Data_360_1, aes(x=Long, y=Lat, color=Classification_name), alpha = 5/10) + ggtitle('') + xlab('') + ylab('') + theme(text = element_text(size=18)) + theme(axis.text=element_text(size=8,color='black'), axis.title=element_text(size=20,color='black')) + theme(plot.title = element_text(size = 26, vjust=1)) + theme(plot.title = element_text(size = 18,face="bold")) + theme(legend.key = element_rect(fill = "white")) + theme(legend.text = element_text(colour="black", size=8))+ theme(legend.title = element_text(colour="black", size=10, face="bold"))+ scale_color_manual(values = c("grey", "orange", "red", "green", "yellow", "black", "blue")) #PCA without C7 C8: colnames(Data_1) data_PCA_3 <- Data_1[,c(9:21, 23,24,26,28:29)] res.pca <- prcomp(data_PCA_3, center=T, scale=T) PCscores <- as.data.frame(res.pca$x) data_PC_3 <- cbind(Data_1, PCscores) ggplot(data_PC_3, aes(PC1, PC2, color=Classification_name))+ geom_point()+theme_bw()+ ggtitle("Principal Component Analysis, by classifications") + scale_color_manual(values = c("orange", "lightblue", "purple", "Yellow", "red", "blue", "green", "purple")) plot_ly(data_PC_3, x = ~PC1, y = ~PC2, z = ~PC3, color = ~ Classification_name, symbol = ~Ref, symbols = c('circle','x','x','x'), size=10, alpha = 0.8)%>% add_markers() %>% layout(scene = list(xaxis = list(title = 'PC1'), yaxis = list(title = 'PC2'), zaxis = list(title = 'PC3'))) %>% add_text(data_PC_3, x = ~PC1, y = ~PC2, z = ~PC3) #Creating the PCA barplot: pca.var <- res.pca$sdev^2 pca.var.per <- round(pca.var/sum(pca.var)*100, 1) barplot(pca.var.per, main="Scree Plot - traits emmeans", xlab="Principal Component", ylab="Percent Variation") # The effect of each phenotype: fviz_pca_var(res.pca, col.var = "contrib", # Color by contributions to the PC gradient.cols = c("#00AFBB", "#E7B800", "#FC4E07"), repel = TRUE # Avoid text overlapping ) #Contribution of variables to Dim 1: var<-get_pca_var(res.pca) v<-var$coord v fviz_contrib(res.pca, choice="var", axes=2, top=10, title="Contribution of variables - traits emmeans") Data_2 <- cbind(Data_1, PCscores) Data_3 <- merge(Data_2, details_1, by="CS") Data_3 <- droplevels.data.frame(Data_3) Data_3 <- unique(Data_3) ggplot(Data_3, aes(PC1, PC2, color=Classification_name))+ geom_point()+theme_bw()+ ggtitle("Principal Component Analysis")+ scale_color_manual(values = c("orange", "lightblue", "purple", "Yellow", "red", "blue", "green", "purple")) print(mymap)+ geom_point(data=Data_3, aes(x=Long.x, y=Lat.x, color=PC1), alpha = 5/10, size=2) + ggtitle('') + xlab('Longitude') + ylab('Latitude') + theme(text = element_text(size=18)) + theme(axis.text=element_text(size=8,color='black'), axis.title=element_text(size=12,color='black')) + theme(plot.title = element_text(size = 26, vjust=1)) + theme(plot.title = element_text(size = 18,face="bold")) + theme(legend.key = element_rect(fill = "white")) + theme(legend.text = element_text(colour="black", size=8))+ theme(legend.title = element_text(colour="black", size=10, face="bold"))+ scale_color_gradient2(low = 'yellow', mid = 'red', high = 'red')
/Data_organization_classification/Traits, PCA and classification.R
no_license
EllaKatz1/GSLs_seeds
R
false
false
33,745
r
install.packages(c("FactoMineR", "factoextra")) library("FactoMineR") library("factoextra") install.packages('corrplot') library("corrplot") library(gplots) library(tidyverse) library(lme4) library(lmerTest) library(wesanderson) library(reshape) install.packages('ggmap') library(ggmap) install.packages('CRAN') library("rjson") library(maps) install.packages("RJSONIO") library(RJSONIO) install.packages("googleway") library("googleway") install.packages("emmeans") library(emmeans) library(multcomp) #citation("ggmap") #All the amounts are umol/mg #Details: setwd("C:/Users/Ella Katz/Desktop/01 - Post Doc/Projects/Sarah Turner/Analyses") details <- read.table(file="GSLs profiles by weight.csv", header=T, sep=",") setwd("C:/Users/Ella Katz/Desktop/01 - Post Doc/Projects/Sarah Turner/Analyses/Stat/emmeans") data <- read.table(file="Emmeans_data.csv", header=T, sep=",") #na.omit(data) data[is.na(data)] <- 0 colnames(details) details_1 <- details[,c(6:10)] Total_data <- merge(details_1, data, by="CS") Total_data <- droplevels.data.frame(Total_data) Total_data_1 <- unique(Total_data) Total_data_2 <-Total_data_1[, -c(6,7,31:33)] write.csv(Total_data_2, file = "Total_data.csv") #Calculating the traits based on the emmeans: #Starting to classifie the accessions by chemotypes: colnames(data) data[is.na(data)] <- 0 Data_02<-merge(details_1, data, by="CS") Data_02<-droplevels.data.frame(Data_02) Data_03<- unique(Data_02) Data_01 <- mutate(Data_03, C3= (X3OHP + X3MSO + Allyl + X3MT), C4= (X4OHB + OHBut + X4MSO + But + X4MT)) Data_01$Total_GSL <- rowSums(Data_01[,c(4:26)]) Data_1 <- mutate(Data_01, C3ratio_emmeans= C3/(C3+C4), Alk_emmeans=(OHBut+ Allyl +But)/(C3+C4), OH_emmeans=(X3OHP+X4OHB)/(C3+C4), MSO_emmeans=(X3MSO+X4MSO)/(C3+C4), GSOH_emmeans=OHBut/(OHBut+But), lc=(X7MT+X8MT+ X7MSO+X8MSO+X5MSO+X6MSO+X8MTder)/Total_GSL, sc=(C3+C4)/Total_GSL, GSOX_emmeans=(X3MT+X4MT)/(C3+C4), Alk_OH_ratio=Alk_emmeans/(OH_emmeans+Alk_emmeans)) Data_1$Ref <- NA Data_1$Ref[which(Data_1$CS == "CS76778")] <- "Col" Data_1$Ref[which(Data_1$CS == "CS77021")] <- "Ler" Data_1$Ref[which(Data_1$CS == "CS76789")] <- "Cvi" Data_1$Ref[is.na(Data_1$Ref)] <- "Accession" Data_1[is.na(Data_1)] <- 0 #Excluding accessions: Data_1<-Data_1[!(Data_1$Country=="AFG"),] Data_1<-Data_1[!(Data_1$Country=="USA"),] Data_1<-Data_1[!(Data_1$Country=="CAN"),] Data_1<-Data_1[!(Data_1$Country=="CHN"),] Data_1<-Data_1[!(Data_1$Country=="CPV"),] Data_1<-Data_1[!(Data_1$Country=="UZB"),] Data_1<-Data_1[!(Data_1$Country=="TJK"),] Data_1<-Data_1[!(Data_1$Country=="IND"),] Data_1<-Data_1[!(Data_1$Country=="JPN"),] Data_1<-Data_1[!(Data_1$Country=="KAZ"),] Data_1<-Data_1[!(Data_1$Country=="KGZ"),] Data_1<-Data_1[!(Data_1$Country=="MAR"),] Data_1<-Data_1[!(Data_1$Name == "Bijisk-4"),] Data_1<-Data_1[!(Data_1$Name == "Kolyv-2"),] Data_1<-Data_1[!(Data_1$Name == "Kolyv-3"),] Data_1<-Data_1[!(Data_1$Name == "Kly-1"),] Data_1<-Data_1[!(Data_1$Name == "Kly-4"),] Data_1<-Data_1[!(Data_1$Name == "Kolyv-5"),] Data_1<-Data_1[!(Data_1$Name == "Kolyv-6"),] Data_1<-Data_1[!(Data_1$Name == "Koz-2"),] Data_1<-Data_1[!(Data_1$Name == "K-oze-1"),] Data_1<-Data_1[!(Data_1$Name == "K-oze-3"),] Data_1<-Data_1[!(Data_1$Name == "Masl-1"),] Data_1<-Data_1[!(Data_1$Name == "Noveg-3"),] Data_1<-Data_1[!(Data_1$Name == "Noveg-2"),] Data_1<-Data_1[!(Data_1$Name == "Noveg-1"),] Data_1<-Data_1[!(Data_1$Name == "Lebja-1"),] Data_1<-Data_1[!(Data_1$Name == "Nosov-1"),] Data_1<-Data_1[!(Data_1$Name == "Panke-1"),] Data_1<-Data_1[!(Data_1$Name == "Rakit-1"),] Data_1<-Data_1[!(Data_1$Name == "Rakit-3"),] Data_1<-Data_1[!(Data_1$Name == "Basta-1"),] Data_1<-Data_1[!(Data_1$Name == "Basta-2"),] Data_1<-Data_1[!(Data_1$Name == "Basta-3"),] Data_1<-Data_1[!(Data_1$Name == "Chaba-2"),] Data_1<-Data_1[!(Data_1$Name == "Sever-1"),] Data_1<-Data_1[!(Data_1$Name == "Balan-1"),] Data_1<-Data_1[!(Data_1$Name == "Valm"),] Data_1<-Data_1[!(Data_1$Name == "Stepn-1"),] Data_1<-Data_1[!(Data_1$Name == "Stepn-2"),] Data_1<-Data_1[!(Data_1$Name == "Adam-1"),] Data_1<-Data_1[!(Data_1$Name == "Karag-1"),] Data_1<-Data_1[!(Data_1$Name == "Karag-2"),] Data_1<-Data_1[!(Data_1$Name == "Kidr-1"),] Data_1<-Data_1[!(Data_1$Name == "Per-1"),] Data_1<-Data_1[!(Data_1$Name == "Pi-0"),] #Chemotyping: #Classifying to Alk OH MSO: Data_1$AOP <- NA Data_1$AOP[which(Data_1$Alk_emmeans < 0.4 & Data_1$OH_emmeans > 0.55 &Data_1$MSO_emmeans<0.1)] <- "OH" Data_1$AOP[which(Data_1$Alk_emmeans > 0.1 & Data_1$OH_emmeans < 0.55 )] <- "Alk" Data_1$AOP[which(Data_1$Alk_emmeans < 0.1& Data_1$OH_emmeans < 0.55)] <- "MSO" #Manual corrections: Data_1$AOP[which(Data_1$CS == "CS76357")] <- "Alk" Data_1$AOP[which(Data_1$CS == "CS78767")] <- "Alk" Data_1$AOP[which(Data_1$CS == "CS77281")] <- "Alk" Data_1$AOP[which(Data_1$CS == "CS76729")] <- "AOP23" Data_1$AOP[which(Data_1$CS == "CS77277")] <- "AOP23" Data_1$AOP[which(Data_1$CS == "CS76653")] <- "AOP23" Data_1$AOP[which(Data_1$CS == "CS76935")] <- "AOP23" Data_1$AOP[which(Data_1$CS == "CS77299")] <- "AOP23" Data_1$AOP[which(Data_1$CS == "CS76389")] <- "AOP23" Data_1$AOP[which(Data_1$CS == "CS77164")] <- "AOP23" Data_1$AOP[which(Data_1$CS == "CS76437")] <- "AOP23" Data_1$AOP[which(Data_1$CS == "CS76445")] <- "AOP23" Data_1$AOP[which(Data_1$CS == "CS76680")] <- "AOP23" Data_1$AOP[which(Data_1$CS == "CS77356")] <- "AOP23" Data_1$AOP[which(Data_1$CS == "CS77139")] <- "AOP23" Data_1$AOP[which(Data_1$CS == "CS76934")] <- "AOP23" Data_1$AOP[which(Data_1$CS == "CS76808")] <- "AOP23" Data_1$AOP[which(Data_1$CS == "CS76987")] <- "AOP23" Data_1$AOP[which(Data_1$CS == "CS78778")] <- "AOP23" Data_1$AOP[which(Data_1$CS == "CS77224")] <- "AOP23" Data_1$AOP[which(Data_1$CS == "CS76580")] <- "AOP23" Data_1$AOP[which(Data_1$CS == "CS76515")] <- "AOP23" Data_1$AOP[which(Data_1$CS == "CS77038")] <- "AOP23" #MAM classification: qplot(Data_1$C3ratio_emmeans, geom="histogram") +theme_bw() ggplot(data=Data_1, aes(Data_1$C3ratio_emmeans)) + geom_histogram()+theme_bw()+ ylab("# of accessions") + xlab("C3/C4 ratio") colnames(Data_1) Data_1$Elong<-cut(Data_1$C3ratio_emmeans, c(-0.1, 0.5, 1.1)) Data_1$Elong <- Data_1$Elong levels(Data_1$Elong) <- c("C4", "C3") #GSOH colnames(Data_1) qplot(Data_1$GSOH_emmeans, geom="histogram") +theme_bw() ggplot(data=Data_1, aes(Data_1$GSOH_emmeans)) + geom_histogram()+theme_bw()+ ylab("# of accessions") + xlab("OH_Butenyl ratio") Data_1$GSOH_1<-cut(Data_1$GSOH, c(-0.1, 0.1, 2)) Data_1$ GSOH_1<- as.factor(Data_1$ GSOH_1) levels(Data_1$GSOH_1) <- c("NF", "F") Data_1$Key <- as.factor(paste(Data_1$Elong, Data_1$AOP, Data_1$GSOH_1, sep="_")) Data_1$Classification <- Data_1$Key nlevels(Data_1$Key) levels(Data_1$Classification) <- c(1,2,3,4,5,6,7,8,9,10,11,12,13) Data_1$Classification_name <- Data_1$Classification #Divisions by (this order): MAM (3/4), AOP(Alk/MSO/OH), GSOH(+/-) levels(Data_1$Classification_name) <- c("Allyl", "Allyl", "?", "?", "3MSO", "3MSO", "3OHP", "3OHP", "OH-But", "Butenyl", "4MSO", "4MSO", "4OHB") Data_1<-Data_1[!(Data_1$Classification_name == "?"),] setwd("C:/Users/Ella Katz/Desktop/01 - Post Doc/Projects/Sarah Turner/Analyses/PCA") write.csv(Data_1, file = "Master_GSL.csv") Data_1 <- read.table(file="Master_GSL.csv", header=T, sep=",") #PCA with traits calculates by the emmeans: colnames(Data_1) data_PCA_3 <- Data_1[,c(33:37,40)] res.pca <- prcomp(data_PCA_3, center=T, scale=T) PCscores <- as.data.frame(res.pca$x) data_PC_3 <- cbind(Data_1, PCscores) ggplot(data_PC_3, aes(PC1, PC2, color=Classification_name))+ geom_point()+theme_bw()+ ggtitle("Principal Component Analysis, by classifications") + scale_color_manual(values = c("orange", "lightblue", "purple", "Yellow", "red", "blue", "green", "purple")) plot_ly(data_PC_3, x = ~PC1, y = ~PC2, z = ~PC3, color = ~ Classification_name, symbol = ~Ref, symbols = c('circle','x','x','x'), size=10, alpha = 0.8)%>% add_markers() %>% layout(scene = list(xaxis = list(title = 'PC1'), yaxis = list(title = 'PC2'), zaxis = list(title = 'PC3'))) %>% add_text(data_PC_3, x = ~PC1, y = ~PC2, z = ~PC3) #Creating the PCA barplot: pca.var <- res.pca$sdev^2 pca.var.per <- round(pca.var/sum(pca.var)*100, 1) barplot(pca.var.per, main="Scree Plot - traits emmeans", xlab="Principal Component", ylab="Percent Variation") # The effect of each phenotype: fviz_pca_var(res.pca, col.var = "contrib", # Color by contributions to the PC gradient.cols = c("#00AFBB", "#E7B800", "#FC4E07"), repel = TRUE # Avoid text overlapping ) #Contribution of variables to Dim 1: var<-get_pca_var(res.pca) v<-var$coord v fviz_contrib(res.pca, choice="var", axes=1, top=10, title="Contribution of variables - traits emmeans") Data_2 <- cbind(Data_1, PCscores) Data_3 <- merge(Data_2, details_1, by="CS") Data_3 <- droplevels.data.frame(Data_3) Data_3 <- unique(Data_3) ggplot(Data_3, aes(PC1, PC2, color=Classification_name))+ geom_point()+theme_bw()+ ggtitle("Principal Component Analysis")+ scale_color_manual(values = c("orange", "lightblue", "purple", "Yellow", "red", "blue", "green", "purple")) #PCA on GSLs: colnames(Data_1) data_PCA_1 <- Data_1[,c(9:31)] res.pca <- prcomp(data_PCA_1, center=T, scale=T) PCscores <- as.data.frame(res.pca$x) data_PC_1 <- cbind(Data_1, PCscores) colnames(data_PC_1) ggplot(data_PC_1, aes(PC1, PC2, color=Ref, shape=Ref))+ geom_point()+theme_bw()+ ggtitle("Principal Component Analysis") + scale_color_manual(values = c("grey", "black", "black", "black")) setwd("C:/Users/Ella Katz/Desktop/01 - Post Doc/Projects/Sarah Turner/Analyses/PCA/traits, PCA and map") ggsave("PCA.png", dpi=300) #Creating the PCA barplot: pca.var <- res.pca$sdev^2 pca.var.per <- round(pca.var/sum(pca.var)*100, 1) view(pca.var.per) barplot(pca.var.per, main="Scree Plot - GSLs", xlab="Principal Component", ylab="Percent Variation", names.arg = c("1", "2", "3", "4", "5", "6", "7", "8", "9", "10", "11", "12", "13", "14", "15", "16", "17", "18", "19", "20", "21", "22", "23")) # The effect of each phenotype: fviz_pca_var(res.pca, col.var = "contrib", # Color by contributions to the PC gradient.cols = c("#00AFBB", "#E7B800", "#FC4E07"), repel = TRUE # Avoid text overlapping ) #Contribution of variables to Dim 1: cols <- c("X7MT"="lightgrey", "X4OHB"="red", "7MSO"="lightgrey", "6MSO"="lightgrey","OHBut"="red","4MT"="red", "X3OHP"="blue", "Branched"="lightgrey", Allyl="blue","5MSO"="lightgrey", "3MT"="blue", But="red", "X4MSO"="red", BZO="lightgrey", "3MSO"= "blue", OHPentenyl= "lightgrey", "8MSO"="lightgrey","X8MT"="lightgrey", "X4OHI3M"="lightgrey","X4MOI3M"="lightgrey") cols1 <- c("lightgrey", "lightgrey","lightgrey","lightgrey", "red", "blue", "red", "lightgrey","lightgrey","lightgrey","blue", "red", "lightgrey","blue", "lightgrey","lightgrey","lightgrey","blue", "lightgrey","lightgrey") var<-get_pca_var(res.pca) v<-var$coord v fviz_contrib(res.pca, choice="var", axes=1, top=20, title="Contribution of variables - PC1", fill=cols, color = "black")+ theme(text = element_text(size = 14), axis.text = element_text(size = 14), axis.title = element_text(size = 14)) fviz_contrib(res.pca, choice="var", axes=2, top=20, title="Contribution of variables - PC2", fill=cols1, color = "black")+ theme(text = element_text(size = 14), axis.text = element_text(size = 14), axis.title = element_text(size = 14)) colnames(data_PC_1) ggplot(data_PC_1, aes(PC1, PC2, color=Classification_name))+ geom_point()+theme_bw()+ ggtitle("Principal Component Analysis")+ scale_color_manual(values = c("orange", "lightblue", "purple", "Yellow", "red", "blue", "green", "purple")) Data_2 <- merge(data_PC_1, details_1, by="CS") Data_2 <- droplevels.data.frame(Data_2) Data_2 <- unique(Data_2) colnames(Data_2) setwd("C:/Users/Ella Katz/Desktop/01 - Post Doc/Projects/Sarah Turner/Analyses/PCA/Phylogeny") write.csv(data_PC_1, file = "Data_PCA.csv") #GSOH map: colnames(Data_1) Data_3<- Data_1[c(-347,-167),c(2:6,48:53)] Data_3$Elong <- as.factor(Data_3$Elong) Data_3_Elong <- split(Data_3, Data_3$Elong) list2env(Data_3_Elong, envir=.GlobalEnv) rm(Data_3_Elong) Data_C3 <- droplevels.data.frame(C3) Data_C4 <- droplevels.data.frame(C4) Data_C4$Classification_name <- as.factor(Data_C4$Classification_name) Data_C4_Classification_name <- split(Data_C4, Data_C4$Classification_name) list2env(Data_C4_Classification_name, envir=.GlobalEnv) Butenyl <- droplevels.data.frame(Butenyl) OH-But <- droplevels.data.frame(OH-But) Data_5<-rbind(Butenyl, `OH-But`) #Maps: style1<-'[ { "featureType": "administrative", "elementType": "all", "stylers": [ { "visibility": "off" } ] }, { "featureType": "administrative", "elementType": "labels", "stylers": [ { "visibility": "off" } ] }, { "featureType": "administrative", "elementType": "labels.text.fill", "stylers": [ { "color": "#444444" } ] }, { "featureType": "landscape", "elementType": "all", "stylers": [ { "color": "#f2f2f2" } ] }, { "featureType": "poi", "elementType": "all", "stylers": [ { "visibility": "off" } ] }, { "featureType": "road", "elementType": "all", "stylers": [ { "saturation": -100 }, { "lightness": 45 } ] }, { "featureType": "road.highway", "elementType": "all", "stylers": [ { "visibility": "simplified" } ] }, { "featureType": "road.arterial", "elementType": "labels.icon", "stylers": [ { "visibility": "off" } ] }, { "featureType": "transit", "elementType": "all", "stylers": [ { "visibility": "off" } ] }, { "featureType": "water", "elementType": "all", "stylers": [ { "color": "#5d637d" }, { "visibility": "on" } ] } ]' #46bcec old ocean color style_list<-fromJSON(style1) create_style_string<- function(style_list){ style_string <- "" for(i in 1:length(style_list)){ if("featureType" %in% names(style_list[[i]])){ style_string <- paste0(style_string, "feature:", style_list[[i]]$featureType, "|") } elements <- style_list[[i]]$stylers a <- lapply(elements, function(x)paste0(names(x), ":", x)) %>% unlist() %>% paste0(collapse="|") style_string <- paste0(style_string, a) if(i < length(style_list)){ style_string <- paste0(style_string, "&style=") } } # google wants 0xff0000 not #ff0000 style_string <- gsub("#", "0x", style_string) return(style_string) } style_string <- create_style_string(style_list) mymap<-ggmap(get_googlemap(c(lon = 18, lat = 52),zoom=4, xlim=c(-10,72), ylim=c(30, 80), style=style_string), extent="device") colnames(data_PC_1) test<-data_PC_1[,c(2,54)] test1<-data_PC_1[c(-347,-167),] print(mymap)+ geom_point(data=test1, aes(x=Long, y=Lat, color=PC1), alpha = 7/10, size=3) + ggtitle('') + xlab('Longitude') + ylab('Latitude') + theme(text = element_text(size=16, face="bold")) + theme(axis.text=element_text(size=16,color='black'), axis.title=element_text(size=18,color='black')) + theme(plot.title = element_text(size = 26, vjust=1)) + theme(plot.title = element_text(size = 18,face="bold")) + theme(legend.key = element_rect(fill = "white")) + theme(legend.text = element_text(colour="black", size=16))+ theme(legend.title = element_text(colour="black", size=18, face="bold"))+ theme(legend.position = c(0.045,0.889))+ scale_colour_gradient(low = "yellow", high = "red") ggsave("C:/Users/Ella Katz/Desktop/01 - Post Doc/Projects/Sarah Turner/Analyses/Manuscript/Figures/More plots/PC1.jpeg", dpi = 600) lm_PC1 <- lm(data=data_PC_1, PC1 ~ Lat + Long+ Lat*Long) anova(lm_PC1) lm_PC2 <- lm(data=data_PC_1, PC2 ~ Lat + Long+ Lat*Long) anova(lm_PC2) print(mymap)+ geom_point(data=test1, aes(x=Long, y=Lat, color=Classification_name), alpha = 7/10, size=3) + ggtitle('') + xlab('Longitude') + ylab('Latitude') + theme(text = element_text(size=16, face="bold")) + theme(axis.text=element_text(size=16,color='black'), axis.title=element_text(size=18,color='black')) + theme(plot.title = element_text(size = 26, vjust=1)) + theme(plot.title = element_text(size = 18,face="bold")) + theme(legend.key = element_rect(fill = "white")) + theme(legend.text = element_text(colour="black", size=16))+ theme(legend.title = element_text(colour="black", size=18, face="bold"))+ scale_color_manual(values= c("yellow", "green2","magenta", "skyblue", "blue", "black", "red3")) ggsave("C:/Users/Ella Katz/Desktop/01 - Post Doc/Projects/Sarah Turner/Analyses/Manuscript/Figures/More plots/Cheotypes.jpeg", dpi = 600) print(mymap)+ geom_point(data=Data_5, aes(x=Long, y=Lat, color=GSOH_1), alpha = 7/10, size=3) + ggtitle('') + xlab('Longitude') + ylab('Latitude') + theme(text = element_text(size=16, face="bold")) + theme(axis.text=element_text(size=16,color='black'), axis.title.x=element_text(size=16,color='black'), axis.title.y=element_text(size=16,color='black', hjust=0.22)) + theme(plot.title = element_text(size = 26, vjust=1)) + theme(plot.title = element_text(size = 18,face="bold")) + theme(legend.key = element_rect(fill = "white")) + theme(legend.text = element_text(colour="black", size=16))+ theme(legend.title = element_text(colour="black", size=16, face="bold"))+ theme(legend.position = c(0.04,0.95))+ labs(colour="GSOH")+ scale_color_manual(values = c("sienna2", "black")) ggsave("C:/Users/Ella Katz/Desktop/01 - Post Doc/Projects/Sarah Turner/Analyses/Manuscript/Figures/More plots/GSOH.jpeg", dpi = 600) print(mymap)+ geom_point(data=test1, aes(x=Long, y=Lat, color=AOP), alpha = 7/10, size=3) + ggtitle('') + xlab('Longitude') + ylab('Latitude') + theme(text = element_text(size=16, face="bold")) + theme(axis.text=element_text(size=16,color='black'), axis.title.x=element_text(size=16,color='black'), axis.title.y=element_text(size=16,color='black', hjust=0.22)) + theme(plot.title = element_text(size = 26, vjust=1)) + theme(plot.title = element_text(size = 18,face="bold")) + theme(legend.key = element_rect(fill = "white")) + theme(legend.text = element_text(colour="black", size=16))+ theme(legend.title = element_text(colour="black", size=16, face="bold"))+ theme(legend.position = c(0.067,0.93))+ scale_color_manual(values = c("magenta","green3", "yellow")) ggsave("C:/Users/Ella Katz/Desktop/01 - Post Doc/Projects/Sarah Turner/Analyses/Manuscript/Figures/More plots/AOP.jpeg", dpi = 600) print(mymap)+ geom_point(data=test1, aes(x=Long, y=Lat, color=Elong), alpha = 7/10, size=3) + ggtitle('') + xlab('Longitude') + ylab('Latitude') + theme(text = element_text(size=16, face="bold")) + theme(axis.text=element_text(size=16,color='black'), axis.title.x=element_text(size=16,color='black'), axis.title.y=element_text(size=16,color='black', hjust=0.22)) + theme(plot.title = element_text(size = 26, vjust=1)) + theme(plot.title = element_text(size = 18,face="bold")) + theme(legend.key = element_rect(fill = "white")) + theme(legend.text = element_text(colour="black", size=16))+ theme(legend.title = element_text(colour="black", size=16, face="bold"))+ theme(legend.position = c(0.09,0.94))+ labs(colour="Chain length")+ scale_color_manual(values = c("blue","red")) ggsave("C:/Users/Ella Katz/Desktop/01 - Post Doc/Projects/Sarah Turner/Analyses/Manuscript/Figures/More plots/Elong.jpeg", dpi = 600) scale_color_gradientn(colours = rainbow(9)) scale_colour_gradient(low = "blue", high = "red") scale_colour_gradientn(colours = terrain.colors(10)) #Grouping by the mountains: test1$Mountain <- NA test1$Mountain[which(test1$Country == "ESP")] <- "South" test1$Mountain[which(test1$Country == "POR")] <- "South" test1$Mountain[which(test1$Country == "ITA")] <- "South" test1$Mountain[which(test1$Country == "GRC")] <- "South" test1$Mountain[which(test1$Country == "BUL")] <- "South" test1$Mountain[which(test1$Country == "CRO")] <- "South" test1$Mountain[which(test1$Country == "SRB")] <- "South" test1$Mountain[which(test1$Country == "ROU")] <- "South" test1$Mountain[which(test1$Country == "SVK")] <- "South" test1$Mountain[which(test1$Country == "LBN")] <- "South" test1$Mountain[which(test1$Country == "GEO")] <- "South" test1$Mountain[which(test1$Country == "ARM")] <- "South" test1$Mountain[which(test1$Country == "Gr-5")] <- "South" test1$Mountain[which(test1$Country == "Gr-1")] <- "South" test1$Mountain[is.na(test1$Mountain)] <- "North" print(mymap)+ geom_point(data=test1, aes(x=Long, y=Lat, color=Mountain), alpha = 7/10, size=3) + ggtitle('') + xlab('Longitude') + ylab('Latitude') + theme(text = element_text(size=16, face="bold")) + theme(axis.text=element_text(size=16,color='black'), axis.title=element_text(size=16,color='black')) + theme(plot.title = element_text(size = 26, vjust=1)) + theme(plot.title = element_text(size = 18,face="bold")) + theme(legend.key = element_rect(fill = "white")) + theme(legend.text = element_text(colour="black", size=16))+ theme(legend.title = element_text(colour="black", size=16, face="bold"))+ theme(legend.position = c(0.075,0.938))+ labs(colour="Geography")+ scale_color_manual(values = c("chartreuse4", "deeppink2")) ggsave("C:/Users/Ella Katz/Desktop/01 - Post Doc/Projects/Sarah Turner/Analyses/Manuscript/Figures/More plots/Geography.jpeg", dpi = 600) basemap <- get_googlemap(c(lon = 17, lat = 52),zoom=4, xlim=c(-22,60), ylim=c(30, 80), #maptype = 'hybrid', #maptype = 'terrain', #maptype = 'satellite', maptype = 'roadmap', color="bw", key="AIzaSyAYVck1dRnEJY0Sfzsb9i5K9gWqlwExITI") ggmap(basemap) + geom_point(data=Data_3, aes(x=Long, y=Lat, color=Classification_name), alpha = 5/10) + ggtitle('') + xlab('') + ylab('') + theme(text = element_text(size=18)) + theme(axis.text=element_text(size=8,color='black'), axis.title=element_text(size=20,color='black')) + theme(plot.title = element_text(size = 26, vjust=1)) + theme(plot.title = element_text(size = 18,face="bold")) + theme(legend.key = element_rect(fill = "white")) + theme(legend.text = element_text(colour="black", size=8))+ theme(legend.title = element_text(colour="black", size=10, face="bold"))+ scale_color_manual(values = c("yellow", "blue", "lightblue", "orange", "red", "turquoise", "green", "purple")) ggmap(basemap) + geom_point(data=Data_3, aes(x=Long, y=Lat, color=PC1), alpha = 5/10) + ggtitle('') + xlab('') + ylab('') + theme(text = element_text(size=18)) + theme(axis.text=element_text(size=8,color='black'), axis.title=element_text(size=20,color='black')) + theme(plot.title = element_text(size = 26, vjust=1)) + theme(plot.title = element_text(size = 18,face="bold")) + theme(legend.key = element_rect(fill = "white")) + theme(legend.text = element_text(colour="black", size=8))+ theme(legend.title = element_text(colour="black", size=10, face="bold"))+ scale_color_gradientn(colours = rainbow(5)) basemap <- get_googlemap(c(lon = 35, lat = 40),zoom=3, xlim=c(-22,78), ylim=c(24, 80), #maptype = 'hybrid', #maptype = 'terrain', #maptype = 'satellite', maptype = 'roadmap', color="bw", key="AIzaSyAYVck1dRnEJY0Sfzsb9i5K9gWqlwExITI") basemap <- get_googlemap(c(lon = 18, lat = 52),zoom=4, xlim=c(-10,72), ylim=c(30, 80), #maptype = 'hybrid', #maptype = 'terrain', #maptype = 'satellite', maptype = 'roadmap', key="AIzaSyAYVck1dRnEJY0Sfzsb9i5K9gWqlwExITI") ggmap(basemap) + geom_point(data=Data_3, aes(x=Long, y=Lat, color=Classification_name), alpha = 5/10) + ggtitle('') + xlab('') + ylab('') + theme(text = element_text(size=18)) + theme(axis.text=element_text(size=8,color='black'), axis.title=element_text(size=20,color='black')) + theme(plot.title = element_text(size = 26, vjust=1)) + theme(plot.title = element_text(size = 18,face="bold")) + theme(legend.key = element_rect(fill = "white")) + theme(legend.text = element_text(colour="black", size=8))+ theme(legend.title = element_text(colour="black", size=10, face="bold"))+ scale_color_manual(values = c("yellow", "blue", "lightblue", "orange", "red", "turquoise", "green", "purple")) #Costumized map: #https://snazzymaps.com/editor #https://mapstyle.withgoogle.com/ style1<-'[ { "featureType": "administrative", "elementType": "all", "stylers": [ { "visibility": "off" } ] }, { "featureType": "administrative", "elementType": "labels", "stylers": [ { "visibility": "off" } ] }, { "featureType": "administrative", "elementType": "labels.text.fill", "stylers": [ { "color": "#444444" } ] }, { "featureType": "landscape", "elementType": "all", "stylers": [ { "color": "#f2f2f2" } ] }, { "featureType": "poi", "elementType": "all", "stylers": [ { "visibility": "off" } ] }, { "featureType": "road", "elementType": "all", "stylers": [ { "saturation": -100 }, { "lightness": 45 } ] }, { "featureType": "road.highway", "elementType": "all", "stylers": [ { "visibility": "simplified" } ] }, { "featureType": "road.arterial", "elementType": "labels.icon", "stylers": [ { "visibility": "off" } ] }, { "featureType": "transit", "elementType": "all", "stylers": [ { "visibility": "off" } ] }, { "featureType": "water", "elementType": "all", "stylers": [ { "color": "#46bcec" }, { "visibility": "on" } ] } ]' style_list<-fromJSON(style1) create_style_string<- function(style_list){ style_string <- "" for(i in 1:length(style_list)){ if("featureType" %in% names(style_list[[i]])){ style_string <- paste0(style_string, "feature:", style_list[[i]]$featureType, "|") } elements <- style_list[[i]]$stylers a <- lapply(elements, function(x)paste0(names(x), ":", x)) %>% unlist() %>% paste0(collapse="|") style_string <- paste0(style_string, a) if(i < length(style_list)){ style_string <- paste0(style_string, "&style=") } } # google wants 0xff0000 not #ff0000 style_string <- gsub("#", "0x", style_string) return(style_string) } style_string <- create_style_string(style_list) mymap<-ggmap(get_googlemap(c(lon = 18, lat = 52),zoom=4, xlim=c(-10,72), ylim=c(30, 80), style=style_string), extent="device") print(mymap)+ geom_point(data=Data_3, aes(x=Long, y=Lat, color=Classification_name), alpha = 5/10) + ggtitle('') + xlab('Longitude') + ylab('Latitude') + theme(text = element_text(size=18)) + theme(axis.text=element_text(size=8,color='black'), axis.title=element_text(size=12,color='black')) + theme(plot.title = element_text(size = 26, vjust=1)) + theme(plot.title = element_text(size = 18,face="bold")) + theme(legend.key = element_rect(fill = "white")) + theme(legend.text = element_text(colour="black", size=8))+ theme(legend.title = element_text(colour="black", size=10, face="bold"))+ scale_color_manual(values = c("yellow", "blue", "grey", "orange", "red", "black", "green", "purple")) ggsave(mymap, filename="mymap.png") #133 accession from 360 collection: setwd("C:/Users/Ella Katz/Desktop/01 - Post Doc/Projects/Sarah Turner/Analyses/Stat/emmeans") Data_360 <- read.table(file="Data_360.csv", header=T, sep=",") colnames(Data_3) Data_4 <-Data_3[, c(1,57,58)] Data_360_1<- merge(Data_360, Data_4, by="CS") basemap <- get_googlemap(c(lon = 18, lat = 52),zoom=4, xlim=c(-10,72), ylim=c(30, 80), #maptype = 'hybrid', #maptype = 'terrain', #maptype = 'satellite', maptype = 'roadmap', key="AIzaSyAYVck1dRnEJY0Sfzsb9i5K9gWqlwExITI") ggmap(basemap)+ geom_point(data=Data_360_1, aes(x=Long, y=Lat, color=Classification_name), alpha = 5/10) + ggtitle('') + xlab('') + ylab('') + theme(text = element_text(size=18)) + theme(axis.text=element_text(size=8,color='black'), axis.title=element_text(size=20,color='black')) + theme(plot.title = element_text(size = 26, vjust=1)) + theme(plot.title = element_text(size = 18,face="bold")) + theme(legend.key = element_rect(fill = "white")) + theme(legend.text = element_text(colour="black", size=8))+ theme(legend.title = element_text(colour="black", size=10, face="bold"))+ scale_color_manual(values = c("grey", "orange", "red", "green", "yellow", "black", "blue")) #PCA without C7 C8: colnames(Data_1) data_PCA_3 <- Data_1[,c(9:21, 23,24,26,28:29)] res.pca <- prcomp(data_PCA_3, center=T, scale=T) PCscores <- as.data.frame(res.pca$x) data_PC_3 <- cbind(Data_1, PCscores) ggplot(data_PC_3, aes(PC1, PC2, color=Classification_name))+ geom_point()+theme_bw()+ ggtitle("Principal Component Analysis, by classifications") + scale_color_manual(values = c("orange", "lightblue", "purple", "Yellow", "red", "blue", "green", "purple")) plot_ly(data_PC_3, x = ~PC1, y = ~PC2, z = ~PC3, color = ~ Classification_name, symbol = ~Ref, symbols = c('circle','x','x','x'), size=10, alpha = 0.8)%>% add_markers() %>% layout(scene = list(xaxis = list(title = 'PC1'), yaxis = list(title = 'PC2'), zaxis = list(title = 'PC3'))) %>% add_text(data_PC_3, x = ~PC1, y = ~PC2, z = ~PC3) #Creating the PCA barplot: pca.var <- res.pca$sdev^2 pca.var.per <- round(pca.var/sum(pca.var)*100, 1) barplot(pca.var.per, main="Scree Plot - traits emmeans", xlab="Principal Component", ylab="Percent Variation") # The effect of each phenotype: fviz_pca_var(res.pca, col.var = "contrib", # Color by contributions to the PC gradient.cols = c("#00AFBB", "#E7B800", "#FC4E07"), repel = TRUE # Avoid text overlapping ) #Contribution of variables to Dim 1: var<-get_pca_var(res.pca) v<-var$coord v fviz_contrib(res.pca, choice="var", axes=2, top=10, title="Contribution of variables - traits emmeans") Data_2 <- cbind(Data_1, PCscores) Data_3 <- merge(Data_2, details_1, by="CS") Data_3 <- droplevels.data.frame(Data_3) Data_3 <- unique(Data_3) ggplot(Data_3, aes(PC1, PC2, color=Classification_name))+ geom_point()+theme_bw()+ ggtitle("Principal Component Analysis")+ scale_color_manual(values = c("orange", "lightblue", "purple", "Yellow", "red", "blue", "green", "purple")) print(mymap)+ geom_point(data=Data_3, aes(x=Long.x, y=Lat.x, color=PC1), alpha = 5/10, size=2) + ggtitle('') + xlab('Longitude') + ylab('Latitude') + theme(text = element_text(size=18)) + theme(axis.text=element_text(size=8,color='black'), axis.title=element_text(size=12,color='black')) + theme(plot.title = element_text(size = 26, vjust=1)) + theme(plot.title = element_text(size = 18,face="bold")) + theme(legend.key = element_rect(fill = "white")) + theme(legend.text = element_text(colour="black", size=8))+ theme(legend.title = element_text(colour="black", size=10, face="bold"))+ scale_color_gradient2(low = 'yellow', mid = 'red', high = 'red')
# R script for programtic access to Revigo. Run it with (last output file name is optional): # Rscript revigo-r.R example-data.csv output.csv library(httr) library(stringi) args = commandArgs(trailingOnly=TRUE) # Read user data from a file fileName <- args[1] semsim <- args[2] meas <- args[3] random_num = args[4] enrichments <- readChar(fileName,file.info(fileName)$size) # Submit job to Revigo httr::POST( url = "http://revigo.irb.hr/StartJob.aspx", body = list( cutoff = semsim, valueType = "pvalue", speciesTaxon = "9606", measure = meas, goList = enrichments ), # application/x-www-form-urlencoded encode = "form" ) -> res dat <- httr::content(res, encoding = "UTF-8") jobid <- jsonlite::fromJSON(dat,bigint_as_char=TRUE)$jobid # Check job status running <- "1" while (running != "0" ) { httr::POST( url = "http://revigo.irb.hr/QueryJobStatus.aspx", query = list( jobid = jobid ) ) -> res2 dat2 <- httr::content(res2, encoding = "UTF-8") running <- jsonlite::fromJSON(dat2)$running Sys.sleep(1) } # Fetch results httr::POST( url = "http://revigo.irb.hr/ExportJob.aspx", query = list( jobid = jobid, namespace = "1", type = "csvtable" ) ) -> res3 dat3 <- httr::content(res3, encoding = "UTF-8") # Write results to a file - if file name is not provided the default is output.csv dat3 <- stri_replace_all_fixed(dat3, "\r", "") fout_name_tmp = unlist(strsplit(fileName, "/")) fout_name_finale = paste0(paste0(fout_name_tmp[1:length(fout_name_tmp)-1], collapse = "/"), "/revigo_out.csv") cat(dat3, file=fout_name_finale, fill = FALSE)
/AnnotateMe/BIN/parseREVIGO_new.R
no_license
Gray-Tu/snpXplorer
R
false
false
1,615
r
# R script for programtic access to Revigo. Run it with (last output file name is optional): # Rscript revigo-r.R example-data.csv output.csv library(httr) library(stringi) args = commandArgs(trailingOnly=TRUE) # Read user data from a file fileName <- args[1] semsim <- args[2] meas <- args[3] random_num = args[4] enrichments <- readChar(fileName,file.info(fileName)$size) # Submit job to Revigo httr::POST( url = "http://revigo.irb.hr/StartJob.aspx", body = list( cutoff = semsim, valueType = "pvalue", speciesTaxon = "9606", measure = meas, goList = enrichments ), # application/x-www-form-urlencoded encode = "form" ) -> res dat <- httr::content(res, encoding = "UTF-8") jobid <- jsonlite::fromJSON(dat,bigint_as_char=TRUE)$jobid # Check job status running <- "1" while (running != "0" ) { httr::POST( url = "http://revigo.irb.hr/QueryJobStatus.aspx", query = list( jobid = jobid ) ) -> res2 dat2 <- httr::content(res2, encoding = "UTF-8") running <- jsonlite::fromJSON(dat2)$running Sys.sleep(1) } # Fetch results httr::POST( url = "http://revigo.irb.hr/ExportJob.aspx", query = list( jobid = jobid, namespace = "1", type = "csvtable" ) ) -> res3 dat3 <- httr::content(res3, encoding = "UTF-8") # Write results to a file - if file name is not provided the default is output.csv dat3 <- stri_replace_all_fixed(dat3, "\r", "") fout_name_tmp = unlist(strsplit(fileName, "/")) fout_name_finale = paste0(paste0(fout_name_tmp[1:length(fout_name_tmp)-1], collapse = "/"), "/revigo_out.csv") cat(dat3, file=fout_name_finale, fill = FALSE)
fpca.nonscore <- function(A, maxsteps = 100, tol = 1e-3, normalised = T, K = 2, ridge = T, approx = FALSE){ # we need the diagonal elements equal to 0 diag(A) = 0 # get the iso and non-iso lists iso.A = isolate(A) iso.seq = iso.A$isolate noniso.seq = iso.A$nonisolate # work on the non-iso part from now on A.noniso = A[noniso.seq, noniso.seq] # to get the laplacian matrix L = laplacian(A = A.noniso, normalised = normalised) L.svd = svd(L) n = length(noniso.seq) Ts = fused.trans(A.noniso) temp.dim = dim(Ts) Ts = as.numeric(Ts) Ts = matrix(Ts, byrow = F, nrow = temp.dim[1]) #if(K == 2) index = n #if(K > 2) index = ((n - K + 1): n) index = ((n - K + 1): (n - 1)) n.ind = length(index) fused.whole = list() for(i in 1 : n.ind){ # response vector in the equivalent regression case reg.y = L.svd$u[, index[i]] * L.svd$d[index[i]] temp = fusedlasso.mod(y = reg.y, X = L, D = Ts, maxsteps = maxsteps, tol = tol, ridge = ridge, approx = approx) fused.whole[[i]] = temp$beta } if(K == 2) final.whole = fused.whole[[1]] if(K > 2){ temp1 = rep(0, n.ind) for(i in 1: n.ind){ temp1[i] = dim(fused.whole[[i]])[2] } ind.min = min(temp1) for(i in 1: n.ind){ fused.whole[[i]] = fused.whole[[i]][, 1:ind.min] } final.whole = array(0, dim = c(n, n.ind, ind.min)) for(i in 1:ind.min){ for(j in 1: n.ind){ final.whole[, j, i] = fused.whole[[j]][,i] } final.whole[, , i] = scale(final.whole[, , i], center = T) } } class(final.whole) = 'FPCA' return(list(final.whole = final.whole, iso.seq = iso.seq)) }
/FusedPCA/R/fpca.nonscore.R
no_license
ingted/R-Examples
R
false
false
1,601
r
fpca.nonscore <- function(A, maxsteps = 100, tol = 1e-3, normalised = T, K = 2, ridge = T, approx = FALSE){ # we need the diagonal elements equal to 0 diag(A) = 0 # get the iso and non-iso lists iso.A = isolate(A) iso.seq = iso.A$isolate noniso.seq = iso.A$nonisolate # work on the non-iso part from now on A.noniso = A[noniso.seq, noniso.seq] # to get the laplacian matrix L = laplacian(A = A.noniso, normalised = normalised) L.svd = svd(L) n = length(noniso.seq) Ts = fused.trans(A.noniso) temp.dim = dim(Ts) Ts = as.numeric(Ts) Ts = matrix(Ts, byrow = F, nrow = temp.dim[1]) #if(K == 2) index = n #if(K > 2) index = ((n - K + 1): n) index = ((n - K + 1): (n - 1)) n.ind = length(index) fused.whole = list() for(i in 1 : n.ind){ # response vector in the equivalent regression case reg.y = L.svd$u[, index[i]] * L.svd$d[index[i]] temp = fusedlasso.mod(y = reg.y, X = L, D = Ts, maxsteps = maxsteps, tol = tol, ridge = ridge, approx = approx) fused.whole[[i]] = temp$beta } if(K == 2) final.whole = fused.whole[[1]] if(K > 2){ temp1 = rep(0, n.ind) for(i in 1: n.ind){ temp1[i] = dim(fused.whole[[i]])[2] } ind.min = min(temp1) for(i in 1: n.ind){ fused.whole[[i]] = fused.whole[[i]][, 1:ind.min] } final.whole = array(0, dim = c(n, n.ind, ind.min)) for(i in 1:ind.min){ for(j in 1: n.ind){ final.whole[, j, i] = fused.whole[[j]][,i] } final.whole[, , i] = scale(final.whole[, , i], center = T) } } class(final.whole) = 'FPCA' return(list(final.whole = final.whole, iso.seq = iso.seq)) }
best<-function(state,outcome){ ## Read outcome data #------------------- outcomeOrig_df<-read.csv("outcome-of-care-measures.csv",colClasses="character") myvars<-c(2,7,11,17,23) # numbers of the columns of the variables that we need data<-outcomeOrig_df[,myvars] # subsets de dataframe, keeping only the variables that we need rm(outcomeOrig_df,myvars) data$State<-as.factor(data$State) cols.num <- c(3:5) # idicates the numerical variables data[,cols.num]<-suppressWarnings(sapply(data[,cols.num],as.numeric)) # converts our numeric variables to numeric, suppressing the warning for NAs outcomes<-list("heart attack"=3,"heart failure"=4, "pneumonia"=5) #creates a list of outcomes and corresponding column numbers outcomeVar<-outcomes[[outcome]] #assigns the column number to 'outcomeVar' ## Check that state and outcome are valid #---------------------------------------- if(is.null(outcomeVar)){ stop("Invalid outcome") #returns an error message } validState<-sum(data$State==state)!=0 if(validState!=TRUE){ stop("Invalid state") #returns an error message } statedata <- data[which(data$State==state),] #subsets the state data ## Return hospital name in that state with lowest 30-day death rate #------------------------------------------------------------------ statedata$Rank<-rank(statedata[,outcomeVar],ties.method="min") #ranks the hospitals, giving same rank in case of a tie first<-sort(statedata$Hospital.Name[statedata$Rank==1]) #sorts alphabetically the result for the fist hospital(s) first[1] #displays the first name in the vector in case of a tie }
/best.R
no_license
tsimonso/ProgrammingAssignment3
R
false
false
1,845
r
best<-function(state,outcome){ ## Read outcome data #------------------- outcomeOrig_df<-read.csv("outcome-of-care-measures.csv",colClasses="character") myvars<-c(2,7,11,17,23) # numbers of the columns of the variables that we need data<-outcomeOrig_df[,myvars] # subsets de dataframe, keeping only the variables that we need rm(outcomeOrig_df,myvars) data$State<-as.factor(data$State) cols.num <- c(3:5) # idicates the numerical variables data[,cols.num]<-suppressWarnings(sapply(data[,cols.num],as.numeric)) # converts our numeric variables to numeric, suppressing the warning for NAs outcomes<-list("heart attack"=3,"heart failure"=4, "pneumonia"=5) #creates a list of outcomes and corresponding column numbers outcomeVar<-outcomes[[outcome]] #assigns the column number to 'outcomeVar' ## Check that state and outcome are valid #---------------------------------------- if(is.null(outcomeVar)){ stop("Invalid outcome") #returns an error message } validState<-sum(data$State==state)!=0 if(validState!=TRUE){ stop("Invalid state") #returns an error message } statedata <- data[which(data$State==state),] #subsets the state data ## Return hospital name in that state with lowest 30-day death rate #------------------------------------------------------------------ statedata$Rank<-rank(statedata[,outcomeVar],ties.method="min") #ranks the hospitals, giving same rank in case of a tie first<-sort(statedata$Hospital.Name[statedata$Rank==1]) #sorts alphabetically the result for the fist hospital(s) first[1] #displays the first name in the vector in case of a tie }
context("Using graph actions") test_that("actions can be added to a graph object", { # Create an empty graph object graph <- create_graph() # Add a graph action that sets a node # attr column with a function; the # main function `set_node_attr_w_fcn()` # uses the `get_betweenness()` function # to provide betweenness values in the # `btwns` column graph <- graph %>% add_graph_action( fcn = "set_node_attr_w_fcn", node_attr_fcn = "get_betweenness", column_name = "btwns", action_name = "get_btwns") # Expect a `data.frame` object with in # `graph$graph_actions` expect_is( graph$graph_actions, "data.frame") # Extract `graph$graph_actions` to a # separate object graph_actions <- graph$graph_actions # Expect a single row in the data frame expect_equal( nrow(graph_actions), 1) # Expect three columns in the data frame expect_equal( ncol(graph_actions), 3) # Expect certain column names in the # data frame object expect_equal( colnames(graph_actions), c("action_index", "action_name", "expression")) # Expect the `action_index` to be 1 expect_equal( graph_actions$action_index, 1) # Expect the `action_name` to be `get_btwns` expect_equal( graph_actions$action_name, "get_btwns") # Expect the action in the data frame to # be correctly generated expect_equal( graph_actions$expression, "set_node_attr_w_fcn(graph = graph, node_attr_fcn = 'get_betweenness', column_name = 'btwns')") }) test_that("actions can be deleted from a graph object", { # Create an empty graph object graph <- create_graph() # Add three graph actions to the # graph graph <- graph %>% add_graph_action( fcn = "set_node_attr_w_fcn", node_attr_fcn = "get_pagerank", column_name = "pagerank", action_name = "get_pagerank") %>% add_graph_action( fcn = "rescale_node_attrs", node_attr_from = "pagerank", node_attr_to = "width", action_name = "pagerank_to_width") %>% add_graph_action( fcn = "colorize_node_attrs", node_attr_from = "width", node_attr_to = "fillcolor", action_name = "pagerank_fillcolor") number_of_graph_actions_before_deletion <- nrow(graph$graph_actions) # Delete two of the graph actions graph <- graph %>% delete_graph_actions( actions = c(2, 3)) # Expect that one graph action remains # Expect a single row in the data frame expect_equal( nrow(graph$graph_actions), number_of_graph_actions_before_deletion - 2) # Expect that the first graph action # remains in the graph # Expect the `action_index` to be 1 expect_equal( graph$graph_actions$action_index, 1) # Expect the `action_name` to be `get_btwns` expect_equal( graph$graph_actions$action_name, "get_pagerank") # Expect the action in the data frame to # be correctly generated expect_equal( graph$graph_actions$expression, "set_node_attr_w_fcn(graph = graph, node_attr_fcn = 'get_pagerank', column_name = 'pagerank')") }) test_that("actions within a graph object can be reordered", { # Create an empty graph object graph <- create_graph() # Add three graph actions to the # graph graph <- graph %>% add_graph_action( fcn = "set_node_attr_w_fcn", node_attr_fcn = "get_pagerank", column_name = "pagerank", action_name = "get_pagerank") %>% add_graph_action( fcn = "rescale_node_attrs", node_attr_from = "pagerank", node_attr_to = "width", action_name = "pagerank_to_width") %>% add_graph_action( fcn = "colorize_node_attrs", node_attr_from = "width", node_attr_to = "fillcolor", action_name = "pagerank_fillcolor") # Get the names of the graph actions # before the reordering occurs names_of_graph_actions_before_reordering <- graph$graph_actions$action_name # Reorder the graph actions so that `2`, # precedes `3`, which precedes `1` graph <- graph %>% reorder_graph_actions( indices = c(2, 3, 1)) # Expect three graph actions in the # graph object expect_equal( nrow(graph$graph_actions), 3) # Get the names of the graph actions # before the reordering occurs names_of_graph_actions_after_reordering <- graph$graph_actions$action_name # Expect that the graph action names # appear in the order according to the # vector provided as `indices` expect_equal( names_of_graph_actions_after_reordering, names_of_graph_actions_before_reordering[c(2, 3, 1)]) }) test_that("graph actions can be triggered to modify the graph", { # Create a random graph graph <- create_random_graph( n = 5, m = 10, set_seed = 23) %>% drop_node_attrs( node_attr = "value") # Add three graph actions to: # - add PageRank values # - rescale PageRank values # - create a `fillcolor` attr # ...then, manually trigger the # actions to perform evaluation graph <- graph %>% add_graph_action( fcn = "set_node_attr_w_fcn", node_attr_fcn = "get_pagerank", column_name = "pagerank", action_name = "get_pagerank") %>% add_graph_action( fcn = "rescale_node_attrs", node_attr_from = "pagerank", node_attr_to = "width", action_name = "pgrnk_to_width") %>% add_graph_action( fcn = "colorize_node_attrs", node_attr_from = "width", node_attr_to = "fillcolor", action_name = "pgrnk_fillcolor") %>% trigger_graph_actions() # Expect certain columns to be available # in the graph's internal node data frame expect_equal( colnames(graph$nodes_df), c("id", "type", "label", "pagerank", "width", "fillcolor")) # Expect the `pagerank` column to have # numeric values less than 1 expect_is( graph$nodes_df$pagerank, "numeric") expect_true( all(graph$nodes_df$pagerank <= 1)) # Expect the `width` column to have # numeric values less than 1 expect_is( graph$nodes_df$width, "numeric") expect_true( all(graph$nodes_df$width <= 1)) # Expect the `fillcolor` column to have # character values with color codes expect_is( graph$nodes_df$fillcolor, "character") expect_true( all(grepl("#[A-F0-9]*", graph$nodes_df$fillcolor))) # Expect a warning if using the # `trigger_graph_actions()` function # when there are no graph actions expect_warning( create_graph() %>% trigger_graph_actions()) })
/tests/testthat/test-graph_actions.R
no_license
ekstroem/DiagrammeR
R
false
false
6,508
r
context("Using graph actions") test_that("actions can be added to a graph object", { # Create an empty graph object graph <- create_graph() # Add a graph action that sets a node # attr column with a function; the # main function `set_node_attr_w_fcn()` # uses the `get_betweenness()` function # to provide betweenness values in the # `btwns` column graph <- graph %>% add_graph_action( fcn = "set_node_attr_w_fcn", node_attr_fcn = "get_betweenness", column_name = "btwns", action_name = "get_btwns") # Expect a `data.frame` object with in # `graph$graph_actions` expect_is( graph$graph_actions, "data.frame") # Extract `graph$graph_actions` to a # separate object graph_actions <- graph$graph_actions # Expect a single row in the data frame expect_equal( nrow(graph_actions), 1) # Expect three columns in the data frame expect_equal( ncol(graph_actions), 3) # Expect certain column names in the # data frame object expect_equal( colnames(graph_actions), c("action_index", "action_name", "expression")) # Expect the `action_index` to be 1 expect_equal( graph_actions$action_index, 1) # Expect the `action_name` to be `get_btwns` expect_equal( graph_actions$action_name, "get_btwns") # Expect the action in the data frame to # be correctly generated expect_equal( graph_actions$expression, "set_node_attr_w_fcn(graph = graph, node_attr_fcn = 'get_betweenness', column_name = 'btwns')") }) test_that("actions can be deleted from a graph object", { # Create an empty graph object graph <- create_graph() # Add three graph actions to the # graph graph <- graph %>% add_graph_action( fcn = "set_node_attr_w_fcn", node_attr_fcn = "get_pagerank", column_name = "pagerank", action_name = "get_pagerank") %>% add_graph_action( fcn = "rescale_node_attrs", node_attr_from = "pagerank", node_attr_to = "width", action_name = "pagerank_to_width") %>% add_graph_action( fcn = "colorize_node_attrs", node_attr_from = "width", node_attr_to = "fillcolor", action_name = "pagerank_fillcolor") number_of_graph_actions_before_deletion <- nrow(graph$graph_actions) # Delete two of the graph actions graph <- graph %>% delete_graph_actions( actions = c(2, 3)) # Expect that one graph action remains # Expect a single row in the data frame expect_equal( nrow(graph$graph_actions), number_of_graph_actions_before_deletion - 2) # Expect that the first graph action # remains in the graph # Expect the `action_index` to be 1 expect_equal( graph$graph_actions$action_index, 1) # Expect the `action_name` to be `get_btwns` expect_equal( graph$graph_actions$action_name, "get_pagerank") # Expect the action in the data frame to # be correctly generated expect_equal( graph$graph_actions$expression, "set_node_attr_w_fcn(graph = graph, node_attr_fcn = 'get_pagerank', column_name = 'pagerank')") }) test_that("actions within a graph object can be reordered", { # Create an empty graph object graph <- create_graph() # Add three graph actions to the # graph graph <- graph %>% add_graph_action( fcn = "set_node_attr_w_fcn", node_attr_fcn = "get_pagerank", column_name = "pagerank", action_name = "get_pagerank") %>% add_graph_action( fcn = "rescale_node_attrs", node_attr_from = "pagerank", node_attr_to = "width", action_name = "pagerank_to_width") %>% add_graph_action( fcn = "colorize_node_attrs", node_attr_from = "width", node_attr_to = "fillcolor", action_name = "pagerank_fillcolor") # Get the names of the graph actions # before the reordering occurs names_of_graph_actions_before_reordering <- graph$graph_actions$action_name # Reorder the graph actions so that `2`, # precedes `3`, which precedes `1` graph <- graph %>% reorder_graph_actions( indices = c(2, 3, 1)) # Expect three graph actions in the # graph object expect_equal( nrow(graph$graph_actions), 3) # Get the names of the graph actions # before the reordering occurs names_of_graph_actions_after_reordering <- graph$graph_actions$action_name # Expect that the graph action names # appear in the order according to the # vector provided as `indices` expect_equal( names_of_graph_actions_after_reordering, names_of_graph_actions_before_reordering[c(2, 3, 1)]) }) test_that("graph actions can be triggered to modify the graph", { # Create a random graph graph <- create_random_graph( n = 5, m = 10, set_seed = 23) %>% drop_node_attrs( node_attr = "value") # Add three graph actions to: # - add PageRank values # - rescale PageRank values # - create a `fillcolor` attr # ...then, manually trigger the # actions to perform evaluation graph <- graph %>% add_graph_action( fcn = "set_node_attr_w_fcn", node_attr_fcn = "get_pagerank", column_name = "pagerank", action_name = "get_pagerank") %>% add_graph_action( fcn = "rescale_node_attrs", node_attr_from = "pagerank", node_attr_to = "width", action_name = "pgrnk_to_width") %>% add_graph_action( fcn = "colorize_node_attrs", node_attr_from = "width", node_attr_to = "fillcolor", action_name = "pgrnk_fillcolor") %>% trigger_graph_actions() # Expect certain columns to be available # in the graph's internal node data frame expect_equal( colnames(graph$nodes_df), c("id", "type", "label", "pagerank", "width", "fillcolor")) # Expect the `pagerank` column to have # numeric values less than 1 expect_is( graph$nodes_df$pagerank, "numeric") expect_true( all(graph$nodes_df$pagerank <= 1)) # Expect the `width` column to have # numeric values less than 1 expect_is( graph$nodes_df$width, "numeric") expect_true( all(graph$nodes_df$width <= 1)) # Expect the `fillcolor` column to have # character values with color codes expect_is( graph$nodes_df$fillcolor, "character") expect_true( all(grepl("#[A-F0-9]*", graph$nodes_df$fillcolor))) # Expect a warning if using the # `trigger_graph_actions()` function # when there are no graph actions expect_warning( create_graph() %>% trigger_graph_actions()) })
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/data-covert.R \docType{data} \name{covert_46} \alias{covert_46} \title{Kenya Tanzania Gerdes} \format{ igraph object } \source{ Available from Center for Computational Analysis of Social and Organizational Systems (CASOS). (2008). Tanzania-Kenya-imoon.xml. Data available online: http://www.casos.cs.cmu.edu/ computational_tools/datasets/internal/tanzania_ kenya/index11.php. Also Available from Manchester (https://sites.google.com/site/ucinetsoftware/datasets/covert-networks). } \usage{ covert_46 } \description{ Data collected by the Center for Computational Analysis of Social and Organizational Systems, a research group at Carnegie Mellon University, on the participation of 18 Al Qaeda members in 25 functional tasks underlying the 1998 bombings of the U.S. Embassies in Nairobi, Kenya, and Dar es Salaam, Tanzania 2-Mode persons to Standing Committees. 2-mode matrix 18 x 25 persons to tasks, binary undirected. Relations are participation in tasks. } \references{ Gerdes, Luke M. (2014), ‘Dependency Centrality from Bipartite Social Networks’, \emph{Connections}, 34, 1&2 } \keyword{datasets}
/man/covert_46.Rd
permissive
schochastics/networkdata
R
false
true
1,186
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/data-covert.R \docType{data} \name{covert_46} \alias{covert_46} \title{Kenya Tanzania Gerdes} \format{ igraph object } \source{ Available from Center for Computational Analysis of Social and Organizational Systems (CASOS). (2008). Tanzania-Kenya-imoon.xml. Data available online: http://www.casos.cs.cmu.edu/ computational_tools/datasets/internal/tanzania_ kenya/index11.php. Also Available from Manchester (https://sites.google.com/site/ucinetsoftware/datasets/covert-networks). } \usage{ covert_46 } \description{ Data collected by the Center for Computational Analysis of Social and Organizational Systems, a research group at Carnegie Mellon University, on the participation of 18 Al Qaeda members in 25 functional tasks underlying the 1998 bombings of the U.S. Embassies in Nairobi, Kenya, and Dar es Salaam, Tanzania 2-Mode persons to Standing Committees. 2-mode matrix 18 x 25 persons to tasks, binary undirected. Relations are participation in tasks. } \references{ Gerdes, Luke M. (2014), ‘Dependency Centrality from Bipartite Social Networks’, \emph{Connections}, 34, 1&2 } \keyword{datasets}
# Run analysis_transcriptome.R and next extract_data_chipseq1.sh before running this script # ############################################### # # making plot with peaks promoters for all TF # # ############################################### # svglite(file = "~/ifpan-chipseq-timecourse/PLOTS/lineplot_promotores.svg", # width = 10, # height = 8) # # read.table("~/ChIP-seq/DATA/promotores_peaks_value.tsv", # header = FALSE, # sep = "\t", # stringsAsFactors = FALSE) %>% # set_colnames(c("gene.name", "chromosome", "start.range", "end.range", "gene.regulation", "TF", "time", "file", 1:40)) %>% # gather(., "bucket.range", "value", -c("gene.name", "chromosome", "start.range", "end.range", "gene.regulation", "TF", "time", "file")) %>% # group_by(bucket.range, time, TF, gene.regulation) %>% # summarize(mean.value = mean(value)) %>% # {ggplot(., aes(x = as.numeric(bucket.range)*500, y = mean.value, color = as.factor(gene.regulation))) + # geom_line(size = 0.5) + # facet_grid(TF~time, scales = "free_y") + # labs(fill = "Gene regulation") + # theme(axis.text.x = element_text(angle=45, hjust = 1, size = 14), # axis.text.y = element_text(size = 10), # axis.title.x = element_text(size = 18), # axis.title.y = element_text(size = 18), # strip.text.x = element_text(size = 16), # strip.text.y = element_text(size = 10), # legend.title = element_text(size = 18), # legend.text = element_text(size = 16), # legend.position = "bottom") + # #labs(fill = "Gene regulation") + # scale_color_manual(values = c("up-regulated" = "firebrick", # "random" = "gray", # "down-regulated" = "dodgerblue")) + # scale_x_continuous(limits=c(0, 20001), # breaks = c(1, 10001, 20001), # labels= c("-10000", "0", "10000")) + # ggtitle("Peaks for promoters")} # # dev.off() # ################################################################## # # making plot with peaks promoters for all TF - relative changes # # ################################################################## # svglite(file = "~/ifpan-chipseq-timecourse/PLOTS/lineplot_promotores_relative_changes.svg", # width = 10, # height = 8) # # read.table("~/ChIP-seq/DATA/promotores_peaks_value.tsv", # header = FALSE, # sep = "\t", # stringsAsFactors = FALSE) %>% # set_colnames(c("gene.name", "chromosome", "start.range", "end.range", "gene.regulation", "TF", "time", "file", 1:40)) %>% # gather(., "bucket.range", "value", -c("gene.name", "chromosome", "start.range", "end.range", "gene.regulation", "TF", "time", "file")) %>% # group_by(bucket.range, time, TF, gene.regulation) %>% # summarize(mean.value = mean(value)) %>% # mutate(number.regulation=c("down"=1, "random"=3, "up"=2)) %>% # mutate(control = ifelse(number.regulation == 3, 1, 0)) %>% # mutate(max = max(mean.value * control)) %>% # mutate(relative.value = mean.value / max) %>% # {ggplot(., aes(x = as.numeric(bucket.range)*500, y = relative.value, color = as.factor(gene.regulation))) + # geom_line(size = 0.5) + # facet_grid(TF~time, scales = "free_y") + # theme(axis.text.x = element_text(angle=45, hjust = 1, size = 14), # axis.text.y = element_text(size = 10), # axis.title.x = element_text(size = 18), # axis.title.y = element_text(size = 18), # strip.text.x = element_text(size = 16), # strip.text.y = element_text(size = 10), # legend.title = element_text(size = 18), # legend.text = element_text(size = 16), # legend.position = "bottom") + # scale_color_manual(values = c("up-regulated" = "firebrick", # "random" = "gray", # "down-regulated" = "dodgerblue")) + # scale_x_continuous(limits=c(0, 20001), # breaks = c(1, 10001, 20001), # labels= c("-10000", "0", "10000")) + # ggtitle("Relative peak changes for promoters")} # # dev.off() ################## # Choose four TF # ################## filtered_TF <- read.table("~/ChIP-seq/DATA/enhancer_amplitude_value.tsv", header = FALSE, sep = "\t", stringsAsFactors = FALSE) %>% set_colnames(c("gene.name", "chromosome", "start.range", "end.range", "gene.regulation", "TF", "time", "file", "amplitude")) %>% select(TF) %>% unique() %>% .$TF %>% sort %>% .[5:14] %>% .[-2] %>% .[-7:-8] %>% .[-4:-6] ################################################ # making plot with peaks promoters for four TF # ################################################ svglite(file = "~/ifpan-chipseq-timecourse/PLOTS/lineplot_promotores_fourTF.svg", width = 10, height = 8) read.table("~/ChIP-seq/DATA/promotores_peaks_value.tsv", header = FALSE, sep = "\t", stringsAsFactors = FALSE) %>% set_colnames(c("gene.name", "chromosome", "start.range", "end.range", "gene.regulation", "TF", "time", "file", 1:40)) %>% gather(., "bucket.range", "value", -c("gene.name", "chromosome", "start.range", "end.range", "gene.regulation", "TF", "time", "file")) %>% filter(TF %in% filtered_TF) %>% group_by(bucket.range, time, TF, gene.regulation) %>% summarize(mean.value = mean(value)) %>% {ggplot(., aes(x = as.numeric(bucket.range)*500, y = mean.value, color = as.factor(gene.regulation))) + geom_line(size = 0.5) + facet_grid(TF~time, scales = "free_y") + theme(axis.text.x = element_text(angle=45, hjust = 1), legend.position = "bottom") + scale_color_manual(values = c("up-regulated" = "firebrick", "random" = "gray", "down-regulated" = "dodgerblue")) + scale_x_continuous(limits=c(0, 20001), breaks = c(1, 10001, 20001), labels= c("-10000", "1", "10000")) + ggtitle("Peaks for promoters")} dev.off() ################################################################### # making plot with peaks promoters for four TF - relative changes # ################################################################### svglite(file = "~/ifpan-chipseq-timecourse/PLOTS/lineplot_promotores_relative_changes_fourTF.svg", width = 10, height = 8) read.table("~/ChIP-seq/DATA/promotores_peaks_value.tsv", header = FALSE, sep = "\t", stringsAsFactors = FALSE) %>% set_colnames(c("gene.name", "chromosome", "start.range", "end.range", "gene.regulation", "TF", "time", "file", 1:40)) %>% gather(., "bucket.range", "value", -c("gene.name", "chromosome", "start.range", "end.range", "gene.regulation", "TF", "time", "file")) %>% filter(TF %in% filtered_TF) %>% group_by(bucket.range, time, TF, gene.regulation) %>% summarize(mean.value = mean(value)) %>% mutate(number.regulation=c("down"=1, "random"=3, "up"=2)) %>% mutate(control = ifelse(number.regulation == 3, 1, 0)) %>% mutate(max = max(mean.value * control)) %>% mutate(relative.value = mean.value / max) %>% {ggplot(., aes(x = as.numeric(bucket.range)*500, y = relative.value, color = as.factor(gene.regulation))) + geom_line(size = 0.5) + facet_grid(TF~time, scales = "free_y") + theme(axis.text.x = element_text(angle=45, hjust = 1), legend.position = "bottom") + scale_color_manual(values = c("up-regulated" = "firebrick", "random" = "gray", "down-regulated" = "dodgerblue")) + scale_x_continuous(limits=c(0, 20001), breaks = c(1, 10001, 20001), labels= c("-10000", "1", "10000")) + ggtitle("Relative peak changes for promoters")} dev.off() ########################### # two-way ANOVA promoters # ########################### read.table("~/ChIP-seq/DATA/promotores_peaks_value.tsv", header = FALSE, sep = "\t", stringsAsFactors = FALSE) %>% set_colnames(c("gene.name", "chromosome", "start.range", "end.range", "gene.regulation", "TF", "time", "file", 1:40)) %>% gather(., "bucket.range", "value", -c("gene.name", "chromosome", "start.range", "end.range", "gene.regulation", "TF", "time", "file")) %>% filter(TF %in% filtered_TF) %>% mutate(gene.name = replace(gene.name, gene.regulation == "random", "NA")) %>% group_by(gene.name, bucket.range, time, TF, gene.regulation) %>% summarize(mean.value = mean(value)) %>% ungroup() %>% as.data.frame() %>% mutate(number.regulation= replace(gene.regulation, gene.regulation=="down-regulated", 1) %>% replace(gene.regulation == "random", 3) %>% replace(gene.regulation == "up-regulated", 2)) %>% mutate(control = ifelse(number.regulation == 3, 1, 0)) %>% group_by(bucket.range, time, TF) %>% mutate(max = max(mean.value * control)) %>% mutate(relative.value = mean.value / max) %>% ungroup() %>% mutate(bucket.range = as.numeric(bucket.range)) %>% as.data.frame() %>% filter(gene.regulation != "random") %>% group_by(time, gene.regulation, gene.name, TF) %>% summarise(amplitude = max(relative.value)) -> tmp.data.promoters lapply(split(tmp.data.promoters, tmp.data.promoters$TF),function(x) {aov(amplitude ~ time*gene.regulation, data = x) %>% summary}) rm(tmp.data.promoters)
/SCRIPTS/visualization_promoters.R
no_license
ippas/ifpan-chipseq-timecourse
R
false
false
9,913
r
# Run analysis_transcriptome.R and next extract_data_chipseq1.sh before running this script # ############################################### # # making plot with peaks promoters for all TF # # ############################################### # svglite(file = "~/ifpan-chipseq-timecourse/PLOTS/lineplot_promotores.svg", # width = 10, # height = 8) # # read.table("~/ChIP-seq/DATA/promotores_peaks_value.tsv", # header = FALSE, # sep = "\t", # stringsAsFactors = FALSE) %>% # set_colnames(c("gene.name", "chromosome", "start.range", "end.range", "gene.regulation", "TF", "time", "file", 1:40)) %>% # gather(., "bucket.range", "value", -c("gene.name", "chromosome", "start.range", "end.range", "gene.regulation", "TF", "time", "file")) %>% # group_by(bucket.range, time, TF, gene.regulation) %>% # summarize(mean.value = mean(value)) %>% # {ggplot(., aes(x = as.numeric(bucket.range)*500, y = mean.value, color = as.factor(gene.regulation))) + # geom_line(size = 0.5) + # facet_grid(TF~time, scales = "free_y") + # labs(fill = "Gene regulation") + # theme(axis.text.x = element_text(angle=45, hjust = 1, size = 14), # axis.text.y = element_text(size = 10), # axis.title.x = element_text(size = 18), # axis.title.y = element_text(size = 18), # strip.text.x = element_text(size = 16), # strip.text.y = element_text(size = 10), # legend.title = element_text(size = 18), # legend.text = element_text(size = 16), # legend.position = "bottom") + # #labs(fill = "Gene regulation") + # scale_color_manual(values = c("up-regulated" = "firebrick", # "random" = "gray", # "down-regulated" = "dodgerblue")) + # scale_x_continuous(limits=c(0, 20001), # breaks = c(1, 10001, 20001), # labels= c("-10000", "0", "10000")) + # ggtitle("Peaks for promoters")} # # dev.off() # ################################################################## # # making plot with peaks promoters for all TF - relative changes # # ################################################################## # svglite(file = "~/ifpan-chipseq-timecourse/PLOTS/lineplot_promotores_relative_changes.svg", # width = 10, # height = 8) # # read.table("~/ChIP-seq/DATA/promotores_peaks_value.tsv", # header = FALSE, # sep = "\t", # stringsAsFactors = FALSE) %>% # set_colnames(c("gene.name", "chromosome", "start.range", "end.range", "gene.regulation", "TF", "time", "file", 1:40)) %>% # gather(., "bucket.range", "value", -c("gene.name", "chromosome", "start.range", "end.range", "gene.regulation", "TF", "time", "file")) %>% # group_by(bucket.range, time, TF, gene.regulation) %>% # summarize(mean.value = mean(value)) %>% # mutate(number.regulation=c("down"=1, "random"=3, "up"=2)) %>% # mutate(control = ifelse(number.regulation == 3, 1, 0)) %>% # mutate(max = max(mean.value * control)) %>% # mutate(relative.value = mean.value / max) %>% # {ggplot(., aes(x = as.numeric(bucket.range)*500, y = relative.value, color = as.factor(gene.regulation))) + # geom_line(size = 0.5) + # facet_grid(TF~time, scales = "free_y") + # theme(axis.text.x = element_text(angle=45, hjust = 1, size = 14), # axis.text.y = element_text(size = 10), # axis.title.x = element_text(size = 18), # axis.title.y = element_text(size = 18), # strip.text.x = element_text(size = 16), # strip.text.y = element_text(size = 10), # legend.title = element_text(size = 18), # legend.text = element_text(size = 16), # legend.position = "bottom") + # scale_color_manual(values = c("up-regulated" = "firebrick", # "random" = "gray", # "down-regulated" = "dodgerblue")) + # scale_x_continuous(limits=c(0, 20001), # breaks = c(1, 10001, 20001), # labels= c("-10000", "0", "10000")) + # ggtitle("Relative peak changes for promoters")} # # dev.off() ################## # Choose four TF # ################## filtered_TF <- read.table("~/ChIP-seq/DATA/enhancer_amplitude_value.tsv", header = FALSE, sep = "\t", stringsAsFactors = FALSE) %>% set_colnames(c("gene.name", "chromosome", "start.range", "end.range", "gene.regulation", "TF", "time", "file", "amplitude")) %>% select(TF) %>% unique() %>% .$TF %>% sort %>% .[5:14] %>% .[-2] %>% .[-7:-8] %>% .[-4:-6] ################################################ # making plot with peaks promoters for four TF # ################################################ svglite(file = "~/ifpan-chipseq-timecourse/PLOTS/lineplot_promotores_fourTF.svg", width = 10, height = 8) read.table("~/ChIP-seq/DATA/promotores_peaks_value.tsv", header = FALSE, sep = "\t", stringsAsFactors = FALSE) %>% set_colnames(c("gene.name", "chromosome", "start.range", "end.range", "gene.regulation", "TF", "time", "file", 1:40)) %>% gather(., "bucket.range", "value", -c("gene.name", "chromosome", "start.range", "end.range", "gene.regulation", "TF", "time", "file")) %>% filter(TF %in% filtered_TF) %>% group_by(bucket.range, time, TF, gene.regulation) %>% summarize(mean.value = mean(value)) %>% {ggplot(., aes(x = as.numeric(bucket.range)*500, y = mean.value, color = as.factor(gene.regulation))) + geom_line(size = 0.5) + facet_grid(TF~time, scales = "free_y") + theme(axis.text.x = element_text(angle=45, hjust = 1), legend.position = "bottom") + scale_color_manual(values = c("up-regulated" = "firebrick", "random" = "gray", "down-regulated" = "dodgerblue")) + scale_x_continuous(limits=c(0, 20001), breaks = c(1, 10001, 20001), labels= c("-10000", "1", "10000")) + ggtitle("Peaks for promoters")} dev.off() ################################################################### # making plot with peaks promoters for four TF - relative changes # ################################################################### svglite(file = "~/ifpan-chipseq-timecourse/PLOTS/lineplot_promotores_relative_changes_fourTF.svg", width = 10, height = 8) read.table("~/ChIP-seq/DATA/promotores_peaks_value.tsv", header = FALSE, sep = "\t", stringsAsFactors = FALSE) %>% set_colnames(c("gene.name", "chromosome", "start.range", "end.range", "gene.regulation", "TF", "time", "file", 1:40)) %>% gather(., "bucket.range", "value", -c("gene.name", "chromosome", "start.range", "end.range", "gene.regulation", "TF", "time", "file")) %>% filter(TF %in% filtered_TF) %>% group_by(bucket.range, time, TF, gene.regulation) %>% summarize(mean.value = mean(value)) %>% mutate(number.regulation=c("down"=1, "random"=3, "up"=2)) %>% mutate(control = ifelse(number.regulation == 3, 1, 0)) %>% mutate(max = max(mean.value * control)) %>% mutate(relative.value = mean.value / max) %>% {ggplot(., aes(x = as.numeric(bucket.range)*500, y = relative.value, color = as.factor(gene.regulation))) + geom_line(size = 0.5) + facet_grid(TF~time, scales = "free_y") + theme(axis.text.x = element_text(angle=45, hjust = 1), legend.position = "bottom") + scale_color_manual(values = c("up-regulated" = "firebrick", "random" = "gray", "down-regulated" = "dodgerblue")) + scale_x_continuous(limits=c(0, 20001), breaks = c(1, 10001, 20001), labels= c("-10000", "1", "10000")) + ggtitle("Relative peak changes for promoters")} dev.off() ########################### # two-way ANOVA promoters # ########################### read.table("~/ChIP-seq/DATA/promotores_peaks_value.tsv", header = FALSE, sep = "\t", stringsAsFactors = FALSE) %>% set_colnames(c("gene.name", "chromosome", "start.range", "end.range", "gene.regulation", "TF", "time", "file", 1:40)) %>% gather(., "bucket.range", "value", -c("gene.name", "chromosome", "start.range", "end.range", "gene.regulation", "TF", "time", "file")) %>% filter(TF %in% filtered_TF) %>% mutate(gene.name = replace(gene.name, gene.regulation == "random", "NA")) %>% group_by(gene.name, bucket.range, time, TF, gene.regulation) %>% summarize(mean.value = mean(value)) %>% ungroup() %>% as.data.frame() %>% mutate(number.regulation= replace(gene.regulation, gene.regulation=="down-regulated", 1) %>% replace(gene.regulation == "random", 3) %>% replace(gene.regulation == "up-regulated", 2)) %>% mutate(control = ifelse(number.regulation == 3, 1, 0)) %>% group_by(bucket.range, time, TF) %>% mutate(max = max(mean.value * control)) %>% mutate(relative.value = mean.value / max) %>% ungroup() %>% mutate(bucket.range = as.numeric(bucket.range)) %>% as.data.frame() %>% filter(gene.regulation != "random") %>% group_by(time, gene.regulation, gene.name, TF) %>% summarise(amplitude = max(relative.value)) -> tmp.data.promoters lapply(split(tmp.data.promoters, tmp.data.promoters$TF),function(x) {aov(amplitude ~ time*gene.regulation, data = x) %>% summary}) rm(tmp.data.promoters)
#################################################################### ## dfm class definition and methods for primitives, ops, etc. ## ## Ken Benoit #################################################################### #' Virtual class "dfm" for a document-feature matrix #' #' @description The dfm class of object is a type of \link[Matrix]{Matrix-class} #' object with additional slots, described below. \pkg{quanteda} uses two #' subclasses of the \code{dfm} class, depending on whether the object can be #' represented by a sparse matrix, in which case it is a \code{dfmSparse} #' class object, or if dense, then a \code{dfmDense} object. See Details. #' #' @slot settings settings that govern corpus handling and subsequent downstream #' operations, including the settings used to clean and tokenize the texts, #' and to create the dfm. See \code{\link{settings}}. #' @slot weighting the feature weighting applied to the dfm. Default is #' \code{"frequency"}, indicating that the values in the cells of the dfm are #' simple feature counts. To change this, use the \code{\link{weight}} #' method. #' @slot smooth a smoothing parameter, defaults to zero. Can be changed using #' either the \code{\link{smooth}} or the \code{\link{weight}} methods. #' @slot Dimnames These are inherited from \link[Matrix]{Matrix-class} but are #' named \code{docs} and \code{features} respectively. #' @details The \code{dfm} class is a virtual class that will contain one of two #' subclasses for containing the cell counts of document-feature matrixes: #' \code{dfmSparse} or \code{dfmDense}. #' @seealso \link{dfm} #' @export #' @import methods #' @docType class #' @name dfm-class setClass("dfm", slots = c(settings = "list", weighting = "character", smooth = "numeric"), prototype = list(settings = list(NULL), Dim = integer(2), Dimnames = list(docs=NULL, features=NULL), weighting = "frequency", smooth = 0), contains = "Matrix") #' @rdname dfm-class #' @details The \code{dfmSparse} class is a sparse matrix version of #' \code{dfm-class}, inheriting \link[Matrix]{dgCMatrix-class} from the #' \pkg{Matrix} package. It is the default object type created when feature #' counts are the object of interest, as typical text-based feature counts #' tend contain many zeroes. As long as subsequent transformations of the dfm #' preserve cells with zero counts, the dfm should remain sparse. #' #' When the \pkg{Matrix} package implements sparse integer matrixes, we will #' switch the default object class to this object type, as integers are 4 #' bytes each (compared to the current numeric double type requiring 8 bytes #' per cell.) #' @export setClass("dfmSparse", contains = c("dfm", "dgCMatrix")) #' @rdname dfm-class #' @details The \code{dfmDense} class is a sparse matrix version of \code{dfm-class}, #' inheriting \link[Matrix]{dgeMatrix-class} from the \pkg{Matrix} package. dfm objects that #' are converted through weighting or other transformations into cells without zeroes will #' be automatically converted to the dfmDense class. This will necessarily be a much larger sized #' object than one of \code{dfmSparse} class, because each cell is recorded as a numeric (double) type #' requiring 8 bytes of storage. #' @export setClass("dfmDense", contains = c("dfm", "dgeMatrix")) # # @rdname print.dfm # # @export # setMethod("print", signature(x = "dfm"), callNextMethod()) # #' print a dfm object #' #' print methods for document-feature matrices #' @name print.dfm NULL #' @param x the dfm to be printed #' @param show.values print the dfm as a matrix or array (if resampled). #' @param show.settings Print the settings used to create the dfm. See #' \link{settings}. #' @param ... further arguments passed to or from other methods #' @export #' @rdname print.dfm setMethod("print", signature(x = "dfmSparse"), function(x, show.values=FALSE, show.settings=FALSE, ...) { cat("Document-feature matrix of: ", ndoc(x), " document", ifelse(ndoc(x)>1, "s, ", ", "), dim(x)[2], " feature", ifelse(dim(x)[2]>1, "s", ""), ifelse(is.resampled(x), paste(", ", nresample(x), " resamples", sep=""), ""), ".\n", sep="") if (show.settings) { cat("Settings: TO BE IMPLEMENTED.") } if (show.values | (nrow(x)<=20 & ncol(x)<=20)) { Matrix::printSpMatrix2(x, col.names=TRUE, zero.print=0, ...) } }) #' @rdname print.dfm setMethod("print", signature(x = "dfmDense"), function(x, show.values=FALSE, show.settings=FALSE, ...) { cat("Document-feature matrix of: ", ndoc(x), " document", ifelse(ndoc(x)>1, "s, ", ", "), dim(x)[2], " feature", ifelse(dim(x)[2]>1, "s", ""), ifelse(is.resampled(x), paste(", ", nresample(x), " resamples", sep=""), ""), ".\n", sep="") if (show.settings) { cat("Settings: TO BE IMPLEMENTED.") } if (show.values | (nrow(x)<=20 & ncol(x)<=20)) { getMethod("show", "denseMatrix")(x, ...) } }) #' @rdname print.dfm #' @param object the item to be printed setMethod("show", signature(object = "dfmSparse"), function(object) print(object)) #' @rdname print.dfm setMethod("show", signature(object = "dfmDense"), function(object) print(object)) #' @method print dfm #' @rdname print.dfm print.dfm <- function(x, show.values=FALSE, show.settings=FALSE, ...) { cat("Document-feature matrix of: ", ndoc(x), " document", ifelse(ndoc(x)>1, "s, ", ", "), dim(x)[2], " feature", ifelse(dim(x)[2]>1, "s", ""), ".\n", sep="") cat(ndoc(x), "x", nfeature(x), "dense matrix of (S3) class \"dfm\"\n") # ifelse(is.resampled(x), paste(", ", nresample(x), " resamples", sep=""), ""), if (show.settings) { cat("Settings: TO BE IMPLEMENTED.") } if (show.values | (nrow(x)<=20 & ncol(x)<=20)) { class(x) <- class(x)[2] attr(x, "settings") <- NULL attr(x, "weighting") <- NULL print(x) } } ## S4 Method for the S4 class sparse dfm # @param x the sparse dfm # @rdname dfm-class # @method t dfmSparse #setMethod("t", signature(x = "dfmSparse"), getMethod("t", "dgCMatrix")) ## S4 Method for the S4 class dense/weighted dfm # @rdname dfm-class # @method t dfmSparse # setMethod("t", signature(x = "dfmDense"), definition = # function(x) { # selectMethod("t", "dgeMatrix") # }) #getMethod("t", "dgeMatrix")) ## S4 Method for the S3 class dense dfm #' @export #' @param x the dfm object #' @rdname dfm-class setMethod("t", signature = (x = "dfm"), definition = function(x) { newx <- t(matrix(x, nrow=nrow(x))) dimnames(newx) <- rev(dimnames(x)) # if (isS4(x)) { # newx <- t(as.Matrix(x)) # attributes(newx)$dimnames <- rev(x@Dimnames) # } else { # attsorig <- attributes(x) # attributes(newx)$dimnames <- rev(attsorig$dimnames) # } newx }) # @details \code{rowSums} and \code{colSums} form row and column sums and means for \link{dfm-class} objects. # @param x a dfm, inheriting from \link[Matrix]{Matrix} # @param na.rm if \code{TRUE}, omit missing values (including \code{NaN}) from # the calculations # @param dims ignored # @param ... additional arguments, for methods/generic compatibility # @return returns a named (non-sparse) numeric vector # @rdname dfm-class # @aliases colSums rowSums # @export # @examples # myDfm <- dfm(inaugTexts, verbose=FALSE) # colSums(myDfm[, 1:10]) # rowSums(myDfm) # @export # setGeneric("colSums", # def = function(x, na.rm = FALSE, dims = 1L, ...) standardGeneric("colSums")) # # # @export # # @rdname dfm-class # setGeneric("rowSums", # def = function(x, na.rm = FALSE, dims = 1L, ...) standardGeneric("rowSums")) # @method colSums dfmSparse #' @rdname dfm-class #' @param na.rm if \code{TRUE}, omit missing values (including \code{NaN}) from #' the calculations #' @param dims ignored # @export setMethod("colSums", signature = (x = "dfmSparse"), definition = function(x, na.rm = FALSE, dims = 1L, ...) { csums <- callNextMethod() names(csums) <- features(x) csums }) # @method colSums dfmDense #' @rdname dfm-class # @export setMethod("colSums", signature = (x = "dfmDense"), definition = function(x, na.rm = FALSE, dims = 1L, ...) { csums <- callNextMethod() names(csums) <- features(x) csums }) # @method rowSums dfmSparse #' @rdname dfm-class # @export setMethod("rowSums", signature = (x = "dfmSparse"), definition = function(x, na.rm = FALSE, dims = 1L, ...) { rsums <- callNextMethod() names(rsums) <- docnames(x) rsums }) # @method rowSums dfmDense #' @rdname dfm-class # @export setMethod("rowSums", signature = (x = "dfmDense"), definition = function(x, na.rm = FALSE, dims = 1L, ...) { rsums <- callNextMethod() names(rsums) <- docnames(x) rsums }) ## S3 METHODS FOR INDEXING DENSE dfm object #' @export #' @method [ dfm #' @rdname dfm-class `[.dfm` <- function(x, i, j, ..., drop=FALSE) { if (drop) warning("drop=TRUE not currently supported") m <- NextMethod("[", drop=FALSE) attr(m, "settings") <- attr(x, "settings") attr(m, "weighting") <- attr(x, "weighting") class(m) <- class(x) m } ## S4 METHODS FOR INDEXING SPARSE dfm (dfmSparse) objects # FROM THE MATRIX PACKAGE - no need to duplicate here # setClassUnion("index", members = c("numeric", "integer", "logical", "character")) wrapIndexOperation <- function(x, i=NULL, j=NULL, ..., drop=FALSE) { if (is(x, "dfmSparse")) { asType <- "sparseMatrix" newType <- "dfmSparse" } else { asType <- "denseMatrix" newType <- "dfmDense" } if (drop) warning("drop=TRUE not currently supported") new(newType, "["(as(x, asType), i, j, ..., drop=FALSE)) } #' @param i index for documents #' @param j index for features #' @param drop always set to \code{FALSE} #' @param ... additional arguments not used here #' @rdname dfm-class setMethod("[", signature(x = "dfmDense", i = "index", j = "index", drop = "missing"), wrapIndexOperation) #' @rdname dfm-class setMethod("[", signature(x = "dfmDense", i = "index", j = "index", drop = "logical"), wrapIndexOperation) #' @rdname dfm-class setMethod("[", signature(x = "dfmDense", i = "index", j = "missing", drop = "missing"), function(x, i, j, ..., drop=FALSE) new("dfmDense", "["(as(x, "denseMatrix"), i, , ..., drop=FALSE))) #' @rdname dfm-class setMethod("[", signature(x = "dfmDense", i = "index", j = "missing", drop = "logical"), function(x, i, j, ..., drop=FALSE) { if (drop) warning("drop=TRUE not currently supported") new("dfmDense", "["(as(x, "denseMatrix"), i, , ..., drop=FALSE)) }) #' @rdname dfm-class setMethod("[", signature(x = "dfmDense", i = "missing", j = "index", drop = "missing"), function(x, i, j, ..., drop=FALSE) new("dfmDense", "["(as(x, "denseMatrix"), , j, ..., drop=FALSE))) #' @rdname dfm-class setMethod("[", signature(x = "dfmDense", i = "missing", j = "index", drop = "logical"), function(x, i, j, ..., drop=FALSE) { if (drop) warning("drop=TRUE not currently supported") new("dfmDense", "["(as(x, "denseMatrix"), , j, ..., drop=FALSE)) }) #' @rdname dfm-class setMethod("[", signature(x = "dfmDense", i = "missing", j = "missing", drop = "missing"), function(x, i, j, ..., drop=FALSE) new("dfmDense", "["(as(x, "denseMatrix"), , , ..., drop=FALSE))) #' @rdname dfm-class setMethod("[", signature(x = "dfmDense", i = "missing", j = "missing", drop = "logical"), function(x, i, j, ..., drop=FALSE) { if (drop) warning("drop=TRUE not currently supported") new("dfmDense", "["(as(x, "denseMatrix"), , , ..., drop=FALSE)) }) #' @rdname dfm-class setMethod("[", signature(x = "dfmSparse", i = "index", j = "index", drop = "missing"), wrapIndexOperation) #' @rdname dfm-class setMethod("[", signature(x = "dfmSparse", i = "index", j = "index", drop = "logical"), wrapIndexOperation) #' @rdname dfm-class setMethod("[", signature(x = "dfmSparse", i = "index", j = "missing", drop = "missing"), function(x, i, j, ..., drop=FALSE) new("dfmSparse", "["(as(x, "sparseMatrix"), i, , ..., drop=FALSE))) #' @rdname dfm-class setMethod("[", signature(x = "dfmSparse", i = "index", j = "missing", drop = "logical"), function(x, i, j, ..., drop=FALSE) { if (drop) warning("drop=TRUE not currently supported") new("dfmSparse", "["(as(x, "sparseMatrix"), i, , ..., drop=FALSE)) }) #' @rdname dfm-class setMethod("[", signature(x = "dfmSparse", i = "missing", j = "index", drop = "missing"), function(x, i, j, ..., drop=FALSE) new("dfmSparse", "["(as(x, "sparseMatrix"), , j, ..., drop=FALSE))) #' @rdname dfm-class setMethod("[", signature(x = "dfmSparse", i = "missing", j = "index", drop = "logical"), function(x, i, j, ..., drop=FALSE) { if (drop) warning("drop=TRUE not currently supported") new("dfmSparse", "["(as(x, "sparseMatrix"), , j, ..., drop=FALSE)) }) #' @rdname dfm-class setMethod("[", signature(x = "dfmSparse", i = "missing", j = "missing", drop = "missing"), function(x, i, j, ..., drop=FALSE) new("dfmSparse", "["(as(x, "sparseMatrix"), , , ..., drop=FALSE))) #' @rdname dfm-class setMethod("[", signature(x = "dfmSparse", i = "missing", j = "missing", drop = "logical"), function(x, i, j, ..., drop=FALSE) { if (drop) warning("drop=TRUE not currently supported") new("dfmSparse", "["(as(x, "sparseMatrix"), , , ..., drop=FALSE)) }) #' @param e1 first quantity in "+" operation for dfm #' @param e2 second quantity in "+" operation for dfm #' @rdname dfm-class setMethod("+", signature(e1 = "dfmSparse", e2 = "numeric"), function(e1, e2) { as(as(e1, "Matrix") + e2, ifelse(e2==0, "dfmSparse", "dfmDense")) }) #' @rdname dfm-class setMethod("+", signature(e1 = "numeric", e2 = "dfmSparse"), function(e1, e2) { as(e1 + as(e2, "Matrix"), ifelse(e1==0, "dfmSparse", "dfmDense")) }) #' @rdname dfm-class setMethod("+", signature(e1 = "dfmDense", e2 = "numeric"), function(e1, e2) { as(as(e1, "Matrix") + e2, "dfmDense") }) #' @rdname dfm-class setMethod("+", signature(e1 = "numeric", e2 = "dfmDense"), function(e1, e2) { as(e1 + as(e2, "Matrix"), "dfmDense") }) #' @rdname dfm-class #' @export #' @examples #' \dontshow{ #' dfmSparse <- dfm(inaugTexts, verbose=FALSE) #' str(as.matrix(dfmSparse)) #' class(as.matrix(dfmSparse)) #' dfmDense <- dfm(inaugTexts, verbose=FALSE, matrixType="dense") #' str(as.matrix(dfmDense)) #' class(as.matrix(dfmDense)) #' identical(as.matrix(dfmSparse), as.matrix(dfmDense)) #' } setMethod("as.matrix", signature(x="dfm"), function(x) { if (isS4(x)) { f <- getMethod("as.matrix", "Matrix") x <- f(x) names(dimnames(x)) <- c("docs", "features") } else { x <- matrix(x, nrow=ndoc(x), dimnames = list(docs = docnames(x), features = features(x))) } x }) #' @rdname dfm-class #' @export #' @examples #' \dontshow{ #' dfmSparse <- dfm(inaugTexts, verbose=FALSE) #' str(as.data.frame(dfmSparse)) #' class(as.data.frame(dfmSparse)) #' dfmDense <- dfm(inaugTexts, verbose=FALSE, matrixType="dense") #' str(as.data.frame(dfmDense)) #' class(as.data.frame(dfmDense)) #' identical(as.data.frame(dfmSparse), as.data.frame(dfmDense)) #' } setMethod("as.data.frame", signature(x="dfm"), function(x) as.data.frame(as.matrix(x)))
/R/dfm-classes.R
no_license
saldaihani/quanteda
R
false
false
16,871
r
#################################################################### ## dfm class definition and methods for primitives, ops, etc. ## ## Ken Benoit #################################################################### #' Virtual class "dfm" for a document-feature matrix #' #' @description The dfm class of object is a type of \link[Matrix]{Matrix-class} #' object with additional slots, described below. \pkg{quanteda} uses two #' subclasses of the \code{dfm} class, depending on whether the object can be #' represented by a sparse matrix, in which case it is a \code{dfmSparse} #' class object, or if dense, then a \code{dfmDense} object. See Details. #' #' @slot settings settings that govern corpus handling and subsequent downstream #' operations, including the settings used to clean and tokenize the texts, #' and to create the dfm. See \code{\link{settings}}. #' @slot weighting the feature weighting applied to the dfm. Default is #' \code{"frequency"}, indicating that the values in the cells of the dfm are #' simple feature counts. To change this, use the \code{\link{weight}} #' method. #' @slot smooth a smoothing parameter, defaults to zero. Can be changed using #' either the \code{\link{smooth}} or the \code{\link{weight}} methods. #' @slot Dimnames These are inherited from \link[Matrix]{Matrix-class} but are #' named \code{docs} and \code{features} respectively. #' @details The \code{dfm} class is a virtual class that will contain one of two #' subclasses for containing the cell counts of document-feature matrixes: #' \code{dfmSparse} or \code{dfmDense}. #' @seealso \link{dfm} #' @export #' @import methods #' @docType class #' @name dfm-class setClass("dfm", slots = c(settings = "list", weighting = "character", smooth = "numeric"), prototype = list(settings = list(NULL), Dim = integer(2), Dimnames = list(docs=NULL, features=NULL), weighting = "frequency", smooth = 0), contains = "Matrix") #' @rdname dfm-class #' @details The \code{dfmSparse} class is a sparse matrix version of #' \code{dfm-class}, inheriting \link[Matrix]{dgCMatrix-class} from the #' \pkg{Matrix} package. It is the default object type created when feature #' counts are the object of interest, as typical text-based feature counts #' tend contain many zeroes. As long as subsequent transformations of the dfm #' preserve cells with zero counts, the dfm should remain sparse. #' #' When the \pkg{Matrix} package implements sparse integer matrixes, we will #' switch the default object class to this object type, as integers are 4 #' bytes each (compared to the current numeric double type requiring 8 bytes #' per cell.) #' @export setClass("dfmSparse", contains = c("dfm", "dgCMatrix")) #' @rdname dfm-class #' @details The \code{dfmDense} class is a sparse matrix version of \code{dfm-class}, #' inheriting \link[Matrix]{dgeMatrix-class} from the \pkg{Matrix} package. dfm objects that #' are converted through weighting or other transformations into cells without zeroes will #' be automatically converted to the dfmDense class. This will necessarily be a much larger sized #' object than one of \code{dfmSparse} class, because each cell is recorded as a numeric (double) type #' requiring 8 bytes of storage. #' @export setClass("dfmDense", contains = c("dfm", "dgeMatrix")) # # @rdname print.dfm # # @export # setMethod("print", signature(x = "dfm"), callNextMethod()) # #' print a dfm object #' #' print methods for document-feature matrices #' @name print.dfm NULL #' @param x the dfm to be printed #' @param show.values print the dfm as a matrix or array (if resampled). #' @param show.settings Print the settings used to create the dfm. See #' \link{settings}. #' @param ... further arguments passed to or from other methods #' @export #' @rdname print.dfm setMethod("print", signature(x = "dfmSparse"), function(x, show.values=FALSE, show.settings=FALSE, ...) { cat("Document-feature matrix of: ", ndoc(x), " document", ifelse(ndoc(x)>1, "s, ", ", "), dim(x)[2], " feature", ifelse(dim(x)[2]>1, "s", ""), ifelse(is.resampled(x), paste(", ", nresample(x), " resamples", sep=""), ""), ".\n", sep="") if (show.settings) { cat("Settings: TO BE IMPLEMENTED.") } if (show.values | (nrow(x)<=20 & ncol(x)<=20)) { Matrix::printSpMatrix2(x, col.names=TRUE, zero.print=0, ...) } }) #' @rdname print.dfm setMethod("print", signature(x = "dfmDense"), function(x, show.values=FALSE, show.settings=FALSE, ...) { cat("Document-feature matrix of: ", ndoc(x), " document", ifelse(ndoc(x)>1, "s, ", ", "), dim(x)[2], " feature", ifelse(dim(x)[2]>1, "s", ""), ifelse(is.resampled(x), paste(", ", nresample(x), " resamples", sep=""), ""), ".\n", sep="") if (show.settings) { cat("Settings: TO BE IMPLEMENTED.") } if (show.values | (nrow(x)<=20 & ncol(x)<=20)) { getMethod("show", "denseMatrix")(x, ...) } }) #' @rdname print.dfm #' @param object the item to be printed setMethod("show", signature(object = "dfmSparse"), function(object) print(object)) #' @rdname print.dfm setMethod("show", signature(object = "dfmDense"), function(object) print(object)) #' @method print dfm #' @rdname print.dfm print.dfm <- function(x, show.values=FALSE, show.settings=FALSE, ...) { cat("Document-feature matrix of: ", ndoc(x), " document", ifelse(ndoc(x)>1, "s, ", ", "), dim(x)[2], " feature", ifelse(dim(x)[2]>1, "s", ""), ".\n", sep="") cat(ndoc(x), "x", nfeature(x), "dense matrix of (S3) class \"dfm\"\n") # ifelse(is.resampled(x), paste(", ", nresample(x), " resamples", sep=""), ""), if (show.settings) { cat("Settings: TO BE IMPLEMENTED.") } if (show.values | (nrow(x)<=20 & ncol(x)<=20)) { class(x) <- class(x)[2] attr(x, "settings") <- NULL attr(x, "weighting") <- NULL print(x) } } ## S4 Method for the S4 class sparse dfm # @param x the sparse dfm # @rdname dfm-class # @method t dfmSparse #setMethod("t", signature(x = "dfmSparse"), getMethod("t", "dgCMatrix")) ## S4 Method for the S4 class dense/weighted dfm # @rdname dfm-class # @method t dfmSparse # setMethod("t", signature(x = "dfmDense"), definition = # function(x) { # selectMethod("t", "dgeMatrix") # }) #getMethod("t", "dgeMatrix")) ## S4 Method for the S3 class dense dfm #' @export #' @param x the dfm object #' @rdname dfm-class setMethod("t", signature = (x = "dfm"), definition = function(x) { newx <- t(matrix(x, nrow=nrow(x))) dimnames(newx) <- rev(dimnames(x)) # if (isS4(x)) { # newx <- t(as.Matrix(x)) # attributes(newx)$dimnames <- rev(x@Dimnames) # } else { # attsorig <- attributes(x) # attributes(newx)$dimnames <- rev(attsorig$dimnames) # } newx }) # @details \code{rowSums} and \code{colSums} form row and column sums and means for \link{dfm-class} objects. # @param x a dfm, inheriting from \link[Matrix]{Matrix} # @param na.rm if \code{TRUE}, omit missing values (including \code{NaN}) from # the calculations # @param dims ignored # @param ... additional arguments, for methods/generic compatibility # @return returns a named (non-sparse) numeric vector # @rdname dfm-class # @aliases colSums rowSums # @export # @examples # myDfm <- dfm(inaugTexts, verbose=FALSE) # colSums(myDfm[, 1:10]) # rowSums(myDfm) # @export # setGeneric("colSums", # def = function(x, na.rm = FALSE, dims = 1L, ...) standardGeneric("colSums")) # # # @export # # @rdname dfm-class # setGeneric("rowSums", # def = function(x, na.rm = FALSE, dims = 1L, ...) standardGeneric("rowSums")) # @method colSums dfmSparse #' @rdname dfm-class #' @param na.rm if \code{TRUE}, omit missing values (including \code{NaN}) from #' the calculations #' @param dims ignored # @export setMethod("colSums", signature = (x = "dfmSparse"), definition = function(x, na.rm = FALSE, dims = 1L, ...) { csums <- callNextMethod() names(csums) <- features(x) csums }) # @method colSums dfmDense #' @rdname dfm-class # @export setMethod("colSums", signature = (x = "dfmDense"), definition = function(x, na.rm = FALSE, dims = 1L, ...) { csums <- callNextMethod() names(csums) <- features(x) csums }) # @method rowSums dfmSparse #' @rdname dfm-class # @export setMethod("rowSums", signature = (x = "dfmSparse"), definition = function(x, na.rm = FALSE, dims = 1L, ...) { rsums <- callNextMethod() names(rsums) <- docnames(x) rsums }) # @method rowSums dfmDense #' @rdname dfm-class # @export setMethod("rowSums", signature = (x = "dfmDense"), definition = function(x, na.rm = FALSE, dims = 1L, ...) { rsums <- callNextMethod() names(rsums) <- docnames(x) rsums }) ## S3 METHODS FOR INDEXING DENSE dfm object #' @export #' @method [ dfm #' @rdname dfm-class `[.dfm` <- function(x, i, j, ..., drop=FALSE) { if (drop) warning("drop=TRUE not currently supported") m <- NextMethod("[", drop=FALSE) attr(m, "settings") <- attr(x, "settings") attr(m, "weighting") <- attr(x, "weighting") class(m) <- class(x) m } ## S4 METHODS FOR INDEXING SPARSE dfm (dfmSparse) objects # FROM THE MATRIX PACKAGE - no need to duplicate here # setClassUnion("index", members = c("numeric", "integer", "logical", "character")) wrapIndexOperation <- function(x, i=NULL, j=NULL, ..., drop=FALSE) { if (is(x, "dfmSparse")) { asType <- "sparseMatrix" newType <- "dfmSparse" } else { asType <- "denseMatrix" newType <- "dfmDense" } if (drop) warning("drop=TRUE not currently supported") new(newType, "["(as(x, asType), i, j, ..., drop=FALSE)) } #' @param i index for documents #' @param j index for features #' @param drop always set to \code{FALSE} #' @param ... additional arguments not used here #' @rdname dfm-class setMethod("[", signature(x = "dfmDense", i = "index", j = "index", drop = "missing"), wrapIndexOperation) #' @rdname dfm-class setMethod("[", signature(x = "dfmDense", i = "index", j = "index", drop = "logical"), wrapIndexOperation) #' @rdname dfm-class setMethod("[", signature(x = "dfmDense", i = "index", j = "missing", drop = "missing"), function(x, i, j, ..., drop=FALSE) new("dfmDense", "["(as(x, "denseMatrix"), i, , ..., drop=FALSE))) #' @rdname dfm-class setMethod("[", signature(x = "dfmDense", i = "index", j = "missing", drop = "logical"), function(x, i, j, ..., drop=FALSE) { if (drop) warning("drop=TRUE not currently supported") new("dfmDense", "["(as(x, "denseMatrix"), i, , ..., drop=FALSE)) }) #' @rdname dfm-class setMethod("[", signature(x = "dfmDense", i = "missing", j = "index", drop = "missing"), function(x, i, j, ..., drop=FALSE) new("dfmDense", "["(as(x, "denseMatrix"), , j, ..., drop=FALSE))) #' @rdname dfm-class setMethod("[", signature(x = "dfmDense", i = "missing", j = "index", drop = "logical"), function(x, i, j, ..., drop=FALSE) { if (drop) warning("drop=TRUE not currently supported") new("dfmDense", "["(as(x, "denseMatrix"), , j, ..., drop=FALSE)) }) #' @rdname dfm-class setMethod("[", signature(x = "dfmDense", i = "missing", j = "missing", drop = "missing"), function(x, i, j, ..., drop=FALSE) new("dfmDense", "["(as(x, "denseMatrix"), , , ..., drop=FALSE))) #' @rdname dfm-class setMethod("[", signature(x = "dfmDense", i = "missing", j = "missing", drop = "logical"), function(x, i, j, ..., drop=FALSE) { if (drop) warning("drop=TRUE not currently supported") new("dfmDense", "["(as(x, "denseMatrix"), , , ..., drop=FALSE)) }) #' @rdname dfm-class setMethod("[", signature(x = "dfmSparse", i = "index", j = "index", drop = "missing"), wrapIndexOperation) #' @rdname dfm-class setMethod("[", signature(x = "dfmSparse", i = "index", j = "index", drop = "logical"), wrapIndexOperation) #' @rdname dfm-class setMethod("[", signature(x = "dfmSparse", i = "index", j = "missing", drop = "missing"), function(x, i, j, ..., drop=FALSE) new("dfmSparse", "["(as(x, "sparseMatrix"), i, , ..., drop=FALSE))) #' @rdname dfm-class setMethod("[", signature(x = "dfmSparse", i = "index", j = "missing", drop = "logical"), function(x, i, j, ..., drop=FALSE) { if (drop) warning("drop=TRUE not currently supported") new("dfmSparse", "["(as(x, "sparseMatrix"), i, , ..., drop=FALSE)) }) #' @rdname dfm-class setMethod("[", signature(x = "dfmSparse", i = "missing", j = "index", drop = "missing"), function(x, i, j, ..., drop=FALSE) new("dfmSparse", "["(as(x, "sparseMatrix"), , j, ..., drop=FALSE))) #' @rdname dfm-class setMethod("[", signature(x = "dfmSparse", i = "missing", j = "index", drop = "logical"), function(x, i, j, ..., drop=FALSE) { if (drop) warning("drop=TRUE not currently supported") new("dfmSparse", "["(as(x, "sparseMatrix"), , j, ..., drop=FALSE)) }) #' @rdname dfm-class setMethod("[", signature(x = "dfmSparse", i = "missing", j = "missing", drop = "missing"), function(x, i, j, ..., drop=FALSE) new("dfmSparse", "["(as(x, "sparseMatrix"), , , ..., drop=FALSE))) #' @rdname dfm-class setMethod("[", signature(x = "dfmSparse", i = "missing", j = "missing", drop = "logical"), function(x, i, j, ..., drop=FALSE) { if (drop) warning("drop=TRUE not currently supported") new("dfmSparse", "["(as(x, "sparseMatrix"), , , ..., drop=FALSE)) }) #' @param e1 first quantity in "+" operation for dfm #' @param e2 second quantity in "+" operation for dfm #' @rdname dfm-class setMethod("+", signature(e1 = "dfmSparse", e2 = "numeric"), function(e1, e2) { as(as(e1, "Matrix") + e2, ifelse(e2==0, "dfmSparse", "dfmDense")) }) #' @rdname dfm-class setMethod("+", signature(e1 = "numeric", e2 = "dfmSparse"), function(e1, e2) { as(e1 + as(e2, "Matrix"), ifelse(e1==0, "dfmSparse", "dfmDense")) }) #' @rdname dfm-class setMethod("+", signature(e1 = "dfmDense", e2 = "numeric"), function(e1, e2) { as(as(e1, "Matrix") + e2, "dfmDense") }) #' @rdname dfm-class setMethod("+", signature(e1 = "numeric", e2 = "dfmDense"), function(e1, e2) { as(e1 + as(e2, "Matrix"), "dfmDense") }) #' @rdname dfm-class #' @export #' @examples #' \dontshow{ #' dfmSparse <- dfm(inaugTexts, verbose=FALSE) #' str(as.matrix(dfmSparse)) #' class(as.matrix(dfmSparse)) #' dfmDense <- dfm(inaugTexts, verbose=FALSE, matrixType="dense") #' str(as.matrix(dfmDense)) #' class(as.matrix(dfmDense)) #' identical(as.matrix(dfmSparse), as.matrix(dfmDense)) #' } setMethod("as.matrix", signature(x="dfm"), function(x) { if (isS4(x)) { f <- getMethod("as.matrix", "Matrix") x <- f(x) names(dimnames(x)) <- c("docs", "features") } else { x <- matrix(x, nrow=ndoc(x), dimnames = list(docs = docnames(x), features = features(x))) } x }) #' @rdname dfm-class #' @export #' @examples #' \dontshow{ #' dfmSparse <- dfm(inaugTexts, verbose=FALSE) #' str(as.data.frame(dfmSparse)) #' class(as.data.frame(dfmSparse)) #' dfmDense <- dfm(inaugTexts, verbose=FALSE, matrixType="dense") #' str(as.data.frame(dfmDense)) #' class(as.data.frame(dfmDense)) #' identical(as.data.frame(dfmSparse), as.data.frame(dfmDense)) #' } setMethod("as.data.frame", signature(x="dfm"), function(x) as.data.frame(as.matrix(x)))
# Setting working directory setwd("C:/Users/Mansi/Documents/Assignments/Analytics Edge/Analytics edge (GitHub)/Unit5_Twitter") # Read in the data tweets = read.csv("tweets.csv", stringsAsFactors=FALSE) str(tweets) # Create dependent variable tweets$Negative = as.factor(tweets$Avg <= -1) table(tweets$Negative) # Install new packages install.packages("tm") library(tm) install.packages("SnowballC") library(SnowballC) # Create corpus corpus = VCorpus(VectorSource(tweets$Tweet)) # Look at corpus corpus corpus[[1]]$content # Convert to lower-case corpus = tm_map(corpus, content_transformer(tolower)) corpus[[1]]$content # Remove punctuation corpus = tm_map(corpus, removePunctuation) corpus[[1]]$content # Look at stop words stopwords("english")[1:10] # Remove stopwords and apple corpus = tm_map(corpus, removeWords, c("apple", stopwords("english"))) corpus[[1]]$content # Stem document corpus = tm_map(corpus, stemDocument) corpus[[1]]$content # Create matrix frequencies = DocumentTermMatrix(corpus) frequencies # Look at matrix inspect(frequencies[1000:1005,505:515]) # Check for sparsity findFreqTerms(frequencies, lowfreq=20) # Remove sparse terms sparse = removeSparseTerms(frequencies, 0.995) sparse # Convert to a data frame tweetsSparse = as.data.frame(as.matrix(sparse)) # Make all variable names R-friendly colnames(tweetsSparse) = make.names(colnames(tweetsSparse)) # Add dependent variable tweetsSparse$Negative = tweets$Negative # Split the data library(caTools) set.seed(123) split = sample.split(tweetsSparse$Negative, SplitRatio = 0.7) trainSparse = subset(tweetsSparse, split==TRUE) testSparse = subset(tweetsSparse, split==FALSE) # Build a CART model library(rpart) library(rpart.plot) tweetCART = rpart(Negative ~ ., data=trainSparse, method="class") prp(tweetCART) # Evaluate the performance of the model predictCART = predict(tweetCART, newdata=testSparse, type="class") table(testSparse$Negative, predictCART) # Compute accuracy (294+18)/(294+6+37+18) # Baseline accuracy table(testSparse$Negative) 300/(300+55) # Random forest model library(randomForest) set.seed(123) tweetRF = randomForest(Negative ~ ., data=trainSparse) # Make predictions: predictRF = predict(tweetRF, newdata=testSparse) table(testSparse$Negative, predictRF) # Accuracy: (293+21)/(293+7+34+21)
/Turning Tweets into Knowledge/Code.R
no_license
wesenu/Analytics-edge-MITx-
R
false
false
2,368
r
# Setting working directory setwd("C:/Users/Mansi/Documents/Assignments/Analytics Edge/Analytics edge (GitHub)/Unit5_Twitter") # Read in the data tweets = read.csv("tweets.csv", stringsAsFactors=FALSE) str(tweets) # Create dependent variable tweets$Negative = as.factor(tweets$Avg <= -1) table(tweets$Negative) # Install new packages install.packages("tm") library(tm) install.packages("SnowballC") library(SnowballC) # Create corpus corpus = VCorpus(VectorSource(tweets$Tweet)) # Look at corpus corpus corpus[[1]]$content # Convert to lower-case corpus = tm_map(corpus, content_transformer(tolower)) corpus[[1]]$content # Remove punctuation corpus = tm_map(corpus, removePunctuation) corpus[[1]]$content # Look at stop words stopwords("english")[1:10] # Remove stopwords and apple corpus = tm_map(corpus, removeWords, c("apple", stopwords("english"))) corpus[[1]]$content # Stem document corpus = tm_map(corpus, stemDocument) corpus[[1]]$content # Create matrix frequencies = DocumentTermMatrix(corpus) frequencies # Look at matrix inspect(frequencies[1000:1005,505:515]) # Check for sparsity findFreqTerms(frequencies, lowfreq=20) # Remove sparse terms sparse = removeSparseTerms(frequencies, 0.995) sparse # Convert to a data frame tweetsSparse = as.data.frame(as.matrix(sparse)) # Make all variable names R-friendly colnames(tweetsSparse) = make.names(colnames(tweetsSparse)) # Add dependent variable tweetsSparse$Negative = tweets$Negative # Split the data library(caTools) set.seed(123) split = sample.split(tweetsSparse$Negative, SplitRatio = 0.7) trainSparse = subset(tweetsSparse, split==TRUE) testSparse = subset(tweetsSparse, split==FALSE) # Build a CART model library(rpart) library(rpart.plot) tweetCART = rpart(Negative ~ ., data=trainSparse, method="class") prp(tweetCART) # Evaluate the performance of the model predictCART = predict(tweetCART, newdata=testSparse, type="class") table(testSparse$Negative, predictCART) # Compute accuracy (294+18)/(294+6+37+18) # Baseline accuracy table(testSparse$Negative) 300/(300+55) # Random forest model library(randomForest) set.seed(123) tweetRF = randomForest(Negative ~ ., data=trainSparse) # Make predictions: predictRF = predict(tweetRF, newdata=testSparse) table(testSparse$Negative, predictRF) # Accuracy: (293+21)/(293+7+34+21)
library(plot3D) ### Name: Perspective box ### Title: Creates an empty perspective box, ready for adding objects ### Aliases: perspbox ### Keywords: hplot ### ** Examples # save plotting parameters pm <- par("mfrow") pmar <- par("mar") ## ======================================================================== ## The 4 predefined box types ## ======================================================================== par(mfrow = c(2, 2), mar = c(1, 1, 1, 1)) # box type with only backward panels perspbox(z = volcano, bty = "b", ticktype = "detailed", d = 2, main = "bty = 'b'") # box as in 'persp' perspbox(z = volcano, bty = "f", ticktype = "detailed", d = 2, main = "bty = 'f'") # back panels with gridlines, detailed axes perspbox(z = volcano, bty = "b2", ticktype = "detailed", d = 2, main = "bty = 'b2'") # ggplot-type, simple axes perspbox(z = volcano, bty = "g", d = 2, main = "bty = 'g'") ## ======================================================================== ## A user-defined box ## ======================================================================== par(mfrow = c(1, 1)) perspbox(z = diag(2), bty = "u", ticktype = "detailed", col.panel = "gold", col.axis = "white", scale = FALSE, expand = 0.4, col.grid = "grey", main = "user-defined") # restore plotting parameters par(mfrow = pm) par(mar = pmar)
/data/genthat_extracted_code/plot3D/examples/perspbox.Rd.R
no_license
surayaaramli/typeRrh
R
false
false
1,463
r
library(plot3D) ### Name: Perspective box ### Title: Creates an empty perspective box, ready for adding objects ### Aliases: perspbox ### Keywords: hplot ### ** Examples # save plotting parameters pm <- par("mfrow") pmar <- par("mar") ## ======================================================================== ## The 4 predefined box types ## ======================================================================== par(mfrow = c(2, 2), mar = c(1, 1, 1, 1)) # box type with only backward panels perspbox(z = volcano, bty = "b", ticktype = "detailed", d = 2, main = "bty = 'b'") # box as in 'persp' perspbox(z = volcano, bty = "f", ticktype = "detailed", d = 2, main = "bty = 'f'") # back panels with gridlines, detailed axes perspbox(z = volcano, bty = "b2", ticktype = "detailed", d = 2, main = "bty = 'b2'") # ggplot-type, simple axes perspbox(z = volcano, bty = "g", d = 2, main = "bty = 'g'") ## ======================================================================== ## A user-defined box ## ======================================================================== par(mfrow = c(1, 1)) perspbox(z = diag(2), bty = "u", ticktype = "detailed", col.panel = "gold", col.axis = "white", scale = FALSE, expand = 0.4, col.grid = "grey", main = "user-defined") # restore plotting parameters par(mfrow = pm) par(mar = pmar)
pt(2.5, 15, lower.tail = FALSE) choose(8, 7)*0.5^8 + choose(8, 8)*0.5^8 pbinom(6, size = 8, prob = 0.5, lower.tail = FALSE) ppois(9, 5, lower.tail = FALSE)
/StatisticalInference/Semana3/pValue.R
no_license
jspaz/DataScience
R
false
false
158
r
pt(2.5, 15, lower.tail = FALSE) choose(8, 7)*0.5^8 + choose(8, 8)*0.5^8 pbinom(6, size = 8, prob = 0.5, lower.tail = FALSE) ppois(9, 5, lower.tail = FALSE)
library(frailtypack) ### Name: summary.trivPenal ### Title: Short summary of fixed covariates estimates of a joint model for ### longitudinal data, recurrent events and a terminal event ### Aliases: summary.trivPenal print.summary.trivPenal ### Keywords: methods ### ** Examples ## Not run: ##D ##D ###--- Trivariate joint model for longitudinal data, ---### ##D ###--- recurrent events and a terminal event ---### ##D ##D data(colorectal) ##D data(colorectalLongi) ##D ##D # Weibull baseline hazard function ##D # Random effects as the link function, Gap timescale ##D # (computation takes around 30 minutes) ##D model.weib.RE.gap <-trivPenal(Surv(gap.time, new.lesions) ~ cluster(id) ##D + age + treatment + who.PS + prev.resection + terminal(state), ##D formula.terminalEvent =~ age + treatment + who.PS + prev.resection, ##D tumor.size ~ year * treatment + age + who.PS, data = colorectal, ##D data.Longi = colorectalLongi, random = c("1", "year"), id = "id", ##D link = "Random-effects", left.censoring = -3.33, recurrentAG = FALSE, ##D hazard = "Weibull", method.GH="Pseudo-adaptive", n.nodes = 7) ##D ##D summary(model.weib.RE.gap) ## End(Not run)
/data/genthat_extracted_code/frailtypack/examples/summary.trivPenal.Rd.R
no_license
surayaaramli/typeRrh
R
false
false
1,175
r
library(frailtypack) ### Name: summary.trivPenal ### Title: Short summary of fixed covariates estimates of a joint model for ### longitudinal data, recurrent events and a terminal event ### Aliases: summary.trivPenal print.summary.trivPenal ### Keywords: methods ### ** Examples ## Not run: ##D ##D ###--- Trivariate joint model for longitudinal data, ---### ##D ###--- recurrent events and a terminal event ---### ##D ##D data(colorectal) ##D data(colorectalLongi) ##D ##D # Weibull baseline hazard function ##D # Random effects as the link function, Gap timescale ##D # (computation takes around 30 minutes) ##D model.weib.RE.gap <-trivPenal(Surv(gap.time, new.lesions) ~ cluster(id) ##D + age + treatment + who.PS + prev.resection + terminal(state), ##D formula.terminalEvent =~ age + treatment + who.PS + prev.resection, ##D tumor.size ~ year * treatment + age + who.PS, data = colorectal, ##D data.Longi = colorectalLongi, random = c("1", "year"), id = "id", ##D link = "Random-effects", left.censoring = -3.33, recurrentAG = FALSE, ##D hazard = "Weibull", method.GH="Pseudo-adaptive", n.nodes = 7) ##D ##D summary(model.weib.RE.gap) ## End(Not run)
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/help_laus_areacodes.R \name{help_laus_areacodes} \alias{help_laus_areacodes} \title{Prints a list of area names associated to an area code for the LAUS data} \usage{ help_laus_areacodes() } \value{ prints a list in the console } \description{ Prints a list of area names associated to an area code for the LAUS data } \examples{ library(blsAPI) library(dplyr) help_laus_areacodes() }
/man/help_laus_areacodes.Rd
no_license
mikeasilva/blsAPI
R
false
true
464
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/help_laus_areacodes.R \name{help_laus_areacodes} \alias{help_laus_areacodes} \title{Prints a list of area names associated to an area code for the LAUS data} \usage{ help_laus_areacodes() } \value{ prints a list in the console } \description{ Prints a list of area names associated to an area code for the LAUS data } \examples{ library(blsAPI) library(dplyr) help_laus_areacodes() }
Eggs<-read.csv2("http://jolej.linuxpl.info/Eggs.csv", header=TRUE) Eggs ls(Eggs) library(ggplot2) Eggs$First.Week <- as.factor(Eggs$First.Week) Eggs %>% gather(Egg.Pr:Cereal.Pr, key = "column", value = "value") # 1 plot <- ggplot(Eggs, aes(x = Cases, fill = Month)) histogram <- plot + geom_histogram(binwidth = 5000, color = "white") + theme_light() + labs(x = "Cases", y = "Months") + coord_flip() histogram # 2 scat <- ggplot(Eggs, aes(x = Week, y = Cases)) scat + geom_point(aes(color = Month), shape = 21, fill = "White", size = 3, stroke = 2) + theme_light() + labs(x = "Week", y = "Cases") # 3 pairs(Eggs[, 6:10], col = Eggs$Cases) # 4 box <- ggplot(Eggs, aes(x = Cases, y = Week)) a <- box + geom_boxplot() + geom_jitter(width = 1, aes(color = Easter)) + theme_light() + coord_flip() a # 5 plot(Eggs$Cases, Eggs$Week, col = Eggs$Easter) legend("topright", levels(Eggs$Easter), fill = Eggs$Easter) # 6! library(ggplot2) z <- ggplot(Eggs, aes(x = Week, y = Cases)) z + geom_point(aes(color = factor(Easter))) # 7! df <- Eggs[,c(1,6:10)] g <- ggplot(df, aes(Week)) g <- g + geom_line(aes(y=Egg.Pr), colour="red") g <- g + geom_line(aes(y=Beef.Pr), colour="green") g <- g + geom_line(aes(y=Pork.Pr), colour="yellow") g <- g + geom_line(aes(y=Cereal.Pr), colour="blue") g <- g + geom_line(aes(y=Chicken.Pr), colour="purple") g <- g + geom_line(aes(y=Cereal.Pr), colour="grey") g + labs(y = "Price")
/SGH/R/h2/homework2.R
no_license
Valkoiset/myrepo
R
false
false
1,443
r
Eggs<-read.csv2("http://jolej.linuxpl.info/Eggs.csv", header=TRUE) Eggs ls(Eggs) library(ggplot2) Eggs$First.Week <- as.factor(Eggs$First.Week) Eggs %>% gather(Egg.Pr:Cereal.Pr, key = "column", value = "value") # 1 plot <- ggplot(Eggs, aes(x = Cases, fill = Month)) histogram <- plot + geom_histogram(binwidth = 5000, color = "white") + theme_light() + labs(x = "Cases", y = "Months") + coord_flip() histogram # 2 scat <- ggplot(Eggs, aes(x = Week, y = Cases)) scat + geom_point(aes(color = Month), shape = 21, fill = "White", size = 3, stroke = 2) + theme_light() + labs(x = "Week", y = "Cases") # 3 pairs(Eggs[, 6:10], col = Eggs$Cases) # 4 box <- ggplot(Eggs, aes(x = Cases, y = Week)) a <- box + geom_boxplot() + geom_jitter(width = 1, aes(color = Easter)) + theme_light() + coord_flip() a # 5 plot(Eggs$Cases, Eggs$Week, col = Eggs$Easter) legend("topright", levels(Eggs$Easter), fill = Eggs$Easter) # 6! library(ggplot2) z <- ggplot(Eggs, aes(x = Week, y = Cases)) z + geom_point(aes(color = factor(Easter))) # 7! df <- Eggs[,c(1,6:10)] g <- ggplot(df, aes(Week)) g <- g + geom_line(aes(y=Egg.Pr), colour="red") g <- g + geom_line(aes(y=Beef.Pr), colour="green") g <- g + geom_line(aes(y=Pork.Pr), colour="yellow") g <- g + geom_line(aes(y=Cereal.Pr), colour="blue") g <- g + geom_line(aes(y=Chicken.Pr), colour="purple") g <- g + geom_line(aes(y=Cereal.Pr), colour="grey") g + labs(y = "Price")
\name{w.wsd} \alias{w.wsd} \title{ Retrieve Wind daily data } \description{ WSD is used to retrieve the history daily data of a security, such as intraday k-line.\cr To show the guide dialog, please input w.menu("wsd").\cr data<- w.wsd(windcodes,windfields,starttime,endtime,option)\cr \cr Description:\cr windcodes the Wind-code, like "600000.SH", only one security allowed.\cr windfields the fields, like "OPEN,CLOSE,HIGH".\cr starttime the start date, like "20120701".\cr endTime the end date, like "20120919".\cr \cr $Data the return result, a data.frame.\cr $Code the code of the data.\cr $ErrorCode the error ID (0 is OK).\cr } \usage{ w.wsd(codes, fields, beginTime, endTime, options = "") } \examples{ library(WindR) w.start() w.wsd("600000.SH","high,low,close,open","20120701","20120919") } \keyword{ wsd }
/WAPIWrapper/WAPIWrapperR/WindR/man/w.wsd.Rd
no_license
WindQuant/ThirdParty
R
false
false
944
rd
\name{w.wsd} \alias{w.wsd} \title{ Retrieve Wind daily data } \description{ WSD is used to retrieve the history daily data of a security, such as intraday k-line.\cr To show the guide dialog, please input w.menu("wsd").\cr data<- w.wsd(windcodes,windfields,starttime,endtime,option)\cr \cr Description:\cr windcodes the Wind-code, like "600000.SH", only one security allowed.\cr windfields the fields, like "OPEN,CLOSE,HIGH".\cr starttime the start date, like "20120701".\cr endTime the end date, like "20120919".\cr \cr $Data the return result, a data.frame.\cr $Code the code of the data.\cr $ErrorCode the error ID (0 is OK).\cr } \usage{ w.wsd(codes, fields, beginTime, endTime, options = "") } \examples{ library(WindR) w.start() w.wsd("600000.SH","high,low,close,open","20120701","20120919") } \keyword{ wsd }
# Cai, Jian-Feng, Emmanuel J. Candès, and Zuowei Shen. "A singular value thresholding algorithm for matrix completion." SIAM Journal on Optimization 20.4 (2010): 1956-1982. ## WARNING sketchy prototype ahead... library(Matrix) library(irlba) svt = function(M, delta=1.2, epsilon=1e-3, tau=5 * nrow(M), kmax=200) { F = norm(M, type="F") dp = diff(M@p) idx = cbind(M@i + 1, rep(seq_along(dp), dp)) # the non-zero indices k0 = max(floor(tau / (delta * irlba(M, 1)$d)), 1) Y = k0 * delta * M r = 0 err = sqrt(drop(crossprod(M[idx]))) for(k in seq(1, kmax)) { S = NULL s = r + 1 dmin = tau + 1 while(dmin > tau) { S = irlba(Y, nv=s, v=S) dmin = min(S$d) s = s + 5 } r = max(which(S$d > tau), 1) cat(k, s, r, err, "\n") # debug X = S$u %*% ((S$d - tau) * t(S$v)) err = sqrt(drop(crossprod(M[idx] - X[idx]))) / F if(err < epsilon) break Y[idx] = Y[idx] + delta * (M[idx] - X[idx]) } list(X=X, err=err, k=k) } # EXAMPLE set.seed(1) r = 10 n = 1000 Mtrue = matrix(rnorm(n * r), n) %*% matrix(rnorm(n * r), r) p = 0.1 * n * n i = sample(n, p, replace=TRUE) j = sample(n, p, replace=TRUE) M = sparseMatrix(i=i, j=j, x=Mtrue[cbind(i, j)], dims=c(n, n), use.last.ij=TRUE) x = svt(M) cat("Relative error ||X - Mtrue||_F / ||Mtrue||_F :\n") print(norm(Mtrue - x$X, "F") / norm(Mtrue, "F")) sd = svd(Mtrue)$d xd = svd(x$X)$d plot(sd) lines(xd) legend("topright", legend=c("true singular values", "imputed values"), pch=c("o", "lines"))
/svt.r
no_license
bwlewis/rfinance-2017
R
false
false
1,511
r
# Cai, Jian-Feng, Emmanuel J. Candès, and Zuowei Shen. "A singular value thresholding algorithm for matrix completion." SIAM Journal on Optimization 20.4 (2010): 1956-1982. ## WARNING sketchy prototype ahead... library(Matrix) library(irlba) svt = function(M, delta=1.2, epsilon=1e-3, tau=5 * nrow(M), kmax=200) { F = norm(M, type="F") dp = diff(M@p) idx = cbind(M@i + 1, rep(seq_along(dp), dp)) # the non-zero indices k0 = max(floor(tau / (delta * irlba(M, 1)$d)), 1) Y = k0 * delta * M r = 0 err = sqrt(drop(crossprod(M[idx]))) for(k in seq(1, kmax)) { S = NULL s = r + 1 dmin = tau + 1 while(dmin > tau) { S = irlba(Y, nv=s, v=S) dmin = min(S$d) s = s + 5 } r = max(which(S$d > tau), 1) cat(k, s, r, err, "\n") # debug X = S$u %*% ((S$d - tau) * t(S$v)) err = sqrt(drop(crossprod(M[idx] - X[idx]))) / F if(err < epsilon) break Y[idx] = Y[idx] + delta * (M[idx] - X[idx]) } list(X=X, err=err, k=k) } # EXAMPLE set.seed(1) r = 10 n = 1000 Mtrue = matrix(rnorm(n * r), n) %*% matrix(rnorm(n * r), r) p = 0.1 * n * n i = sample(n, p, replace=TRUE) j = sample(n, p, replace=TRUE) M = sparseMatrix(i=i, j=j, x=Mtrue[cbind(i, j)], dims=c(n, n), use.last.ij=TRUE) x = svt(M) cat("Relative error ||X - Mtrue||_F / ||Mtrue||_F :\n") print(norm(Mtrue - x$X, "F") / norm(Mtrue, "F")) sd = svd(Mtrue)$d xd = svd(x$X)$d plot(sd) lines(xd) legend("topright", legend=c("true singular values", "imputed values"), pch=c("o", "lines"))
# Define my own color pallet drsimonj_colors <- c( 'red' = "#d11141", 'green' = "#00b159", 'blue' = "#00aedb", 'orange' = "#f37735", 'yellow' = "#ffc425", 'light grey' = "#cccccc", 'dark grey' = "#8c8c8c", 'light blue' = "#03A9F4", 'purple' = "#9C27B0", 'teal' = "#64FFDA", 'lime' = "#CDDC39", 'amber' = "#FFC107", 'pink' = "#EC407A") drsimonj_cols <- function(...) { cols <- c(...) if (is.null(cols)) return (drsimonj_colors) drsimonj_colors[cols] } drsimonj_palettes <- list( 'main' = drsimonj_cols("blue", "green", "yellow"), 'cool' = drsimonj_cols("blue", "teal", "green"), 'hotpink' = drsimonj_cols("blue", "purple", "pink", "red"), 'hot' = drsimonj_cols("yellow", "orange", "red"), 'mixed' = drsimonj_cols("blue", "green", "yellow", "orange", "red"), 'grey' = drsimonj_cols("light grey", "dark grey") ) drsimonj_pal <- function(palette = "main", reverse = FALSE, ...) { pal <- drsimonj_palettes[[palette]] if (reverse) pal <- rev(pal) colorRampPalette(pal, ...) } scale_color_drsimonj <- function(palette = "main", discrete = TRUE, reverse = FALSE, ...) { pal <- drsimonj_pal(palette = palette, reverse = reverse) if (discrete) { discrete_scale("colour", paste0("drsimonj_", palette), palette = pal, ...) } else { scale_color_gradientn(colours = pal(256), ...) } } scale_fill_drsimonj <- function(palette = "main", discrete = TRUE, reverse = FALSE, ...) { pal <- drsimonj_pal(palette = palette, reverse = reverse) if (discrete) { discrete_scale("fill", paste0("drsimonj_", palette), palette = pal, ...) } else { scale_fill_gradientn(colours = pal(256), ...) } } # Ways of use: # scale_color_drsimonj(discrete = FALSE, palette = "cool") # scale_color_drsimonj() # scale_fill_drsimonj(palette = "mixed", guide = "none")
/predictive_data_science/color_pallet.R
no_license
italo-batista/data-science-R
R
false
false
1,898
r
# Define my own color pallet drsimonj_colors <- c( 'red' = "#d11141", 'green' = "#00b159", 'blue' = "#00aedb", 'orange' = "#f37735", 'yellow' = "#ffc425", 'light grey' = "#cccccc", 'dark grey' = "#8c8c8c", 'light blue' = "#03A9F4", 'purple' = "#9C27B0", 'teal' = "#64FFDA", 'lime' = "#CDDC39", 'amber' = "#FFC107", 'pink' = "#EC407A") drsimonj_cols <- function(...) { cols <- c(...) if (is.null(cols)) return (drsimonj_colors) drsimonj_colors[cols] } drsimonj_palettes <- list( 'main' = drsimonj_cols("blue", "green", "yellow"), 'cool' = drsimonj_cols("blue", "teal", "green"), 'hotpink' = drsimonj_cols("blue", "purple", "pink", "red"), 'hot' = drsimonj_cols("yellow", "orange", "red"), 'mixed' = drsimonj_cols("blue", "green", "yellow", "orange", "red"), 'grey' = drsimonj_cols("light grey", "dark grey") ) drsimonj_pal <- function(palette = "main", reverse = FALSE, ...) { pal <- drsimonj_palettes[[palette]] if (reverse) pal <- rev(pal) colorRampPalette(pal, ...) } scale_color_drsimonj <- function(palette = "main", discrete = TRUE, reverse = FALSE, ...) { pal <- drsimonj_pal(palette = palette, reverse = reverse) if (discrete) { discrete_scale("colour", paste0("drsimonj_", palette), palette = pal, ...) } else { scale_color_gradientn(colours = pal(256), ...) } } scale_fill_drsimonj <- function(palette = "main", discrete = TRUE, reverse = FALSE, ...) { pal <- drsimonj_pal(palette = palette, reverse = reverse) if (discrete) { discrete_scale("fill", paste0("drsimonj_", palette), palette = pal, ...) } else { scale_fill_gradientn(colours = pal(256), ...) } } # Ways of use: # scale_color_drsimonj(discrete = FALSE, palette = "cool") # scale_color_drsimonj() # scale_fill_drsimonj(palette = "mixed", guide = "none")
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/cache-memory.R \name{cache_mem} \alias{cache_mem} \title{Create a memory cache object} \usage{ cache_mem( max_size = 512 * 1024^2, max_age = Inf, max_n = Inf, evict = c("lru", "fifo"), missing = key_missing(), logfile = NULL ) } \arguments{ \item{max_size}{Maximum size of the cache, in bytes. If the cache exceeds this size, cached objects will be removed according to the value of the \code{evict}. Use \code{Inf} for no size limit. The default is 1 gigabyte.} \item{max_age}{Maximum age of files in cache before they are evicted, in seconds. Use \code{Inf} for no age limit.} \item{max_n}{Maximum number of objects in the cache. If the number of objects exceeds this value, then cached objects will be removed according to the value of \code{evict}. Use \code{Inf} for no limit of number of items.} \item{evict}{The eviction policy to use to decide which objects are removed when a cache pruning occurs. Currently, \code{"lru"} and \code{"fifo"} are supported.} \item{missing}{A value to return when \code{get(key)} is called but the key is not present in the cache. The default is a \code{\link[=key_missing]{key_missing()}} object. It is actually an expression that is evaluated each time there is a cache miss. See section Missing keys for more information.} \item{logfile}{An optional filename or connection object to where logging information will be written. To log to the console, use \code{stderr()} or \code{stdout()}.} } \value{ A memory caching object, with class \code{cache_mem}. } \description{ A memory cache object is a key-value store that saves the values in an environment. Objects can be stored and retrieved using the \code{get()} and \code{set()} methods. Objects are automatically pruned from the cache according to the parameters \code{max_size}, \code{max_age}, \code{max_n}, and \code{evict}. } \details{ In a \code{cache_mem}, R objects are stored directly in the cache; they are not \emph{not} serialized before being stored in the cache. This contrasts with other cache types, like \code{\link[=cache_disk]{cache_disk()}}, where objects are serialized, and the serialized object is cached. This can result in some differences of behavior. For example, as long as an object is stored in a cache_mem, it will not be garbage collected. } \section{Missing keys}{ The \code{missing} parameter controls what happens when \code{get()} is called with a key that is not in the cache (a cache miss). The default behavior is to return a \code{\link[=key_missing]{key_missing()}} object. This is a \emph{sentinel value} that indicates that the key was not present in the cache. You can test if the returned value represents a missing key by using the \code{\link[=is.key_missing]{is.key_missing()}} function. You can also have \code{get()} return a different sentinel value, like \code{NULL}. If you want to throw an error on a cache miss, you can do so by providing an expression for \code{missing}, as in \code{missing = stop("Missing key")}. When the cache is created, you can supply a value for \code{missing}, which sets the default value to be returned for missing values. It can also be overridden when \code{get()} is called, by supplying a \code{missing} argument. For example, if you use \code{cache$get("mykey", missing = NULL)}, it will return \code{NULL} if the key is not in the cache. The \code{missing} parameter is actually an expression which is evaluated each time there is a cache miss. A quosure (from the rlang package) can be used. If you use this, the code that calls \code{get()} should be wrapped with \code{\link[=tryCatch]{tryCatch()}} to gracefully handle missing keys. @section Cache pruning: Cache pruning occurs when \code{set()} is called, or it can be invoked manually by calling \code{prune()}. When a pruning occurs, if there are any objects that are older than \code{max_age}, they will be removed. The \code{max_size} and \code{max_n} parameters are applied to the cache as a whole, in contrast to \code{max_age}, which is applied to each object individually. If the number of objects in the cache exceeds \code{max_n}, then objects will be removed from the cache according to the eviction policy, which is set with the \code{evict} parameter. Objects will be removed so that the number of items is \code{max_n}. If the size of the objects in the cache exceeds \code{max_size}, then objects will be removed from the cache. Objects will be removed from the cache so that the total size remains under \code{max_size}. Note that the size is calculated using the size of the files, not the size of disk space used by the files --- these two values can differ because of files are stored in blocks on disk. For example, if the block size is 4096 bytes, then a file that is one byte in size will take 4096 bytes on disk. Another time that objects can be removed from the cache is when \code{get()} is called. If the target object is older than \code{max_age}, it will be removed and the cache will report it as a missing value. } \section{Eviction policies}{ If \code{max_n} or \code{max_size} are used, then objects will be removed from the cache according to an eviction policy. The available eviction policies are: \describe{ \item{\code{"lru"}}{ Least Recently Used. The least recently used objects will be removed. } \item{\code{"fifo"}}{ First-in-first-out. The oldest objects will be removed. } } } \section{Methods}{ A disk cache object has the following methods: \describe{ \item{\code{get(key, missing)}}{ Returns the value associated with \code{key}. If the key is not in the cache, then it evaluates the expression specified by \code{missing} and returns the value. If \code{missing} is specified here, then it will override the default that was set when the \code{cache_mem} object was created. See section Missing Keys for more information. } \item{\code{set(key, value)}}{ Stores the \code{key}-\code{value} pair in the cache. } \item{\code{exists(key)}}{ Returns \code{TRUE} if the cache contains the key, otherwise \code{FALSE}. } \item{\code{size()}}{ Returns the number of items currently in the cache. } \item{\code{keys()}}{ Returns a character vector of all keys currently in the cache. } \item{\code{reset()}}{ Clears all objects from the cache. } \item{\code{destroy()}}{ Clears all objects in the cache, and removes the cache directory from disk. } \item{\code{prune()}}{ Prunes the cache, using the parameters specified by \code{max_size}, \code{max_age}, \code{max_n}, and \code{evict}. } } }
/man/cache_mem.Rd
permissive
jimsforks/cachem
R
false
true
6,588
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/cache-memory.R \name{cache_mem} \alias{cache_mem} \title{Create a memory cache object} \usage{ cache_mem( max_size = 512 * 1024^2, max_age = Inf, max_n = Inf, evict = c("lru", "fifo"), missing = key_missing(), logfile = NULL ) } \arguments{ \item{max_size}{Maximum size of the cache, in bytes. If the cache exceeds this size, cached objects will be removed according to the value of the \code{evict}. Use \code{Inf} for no size limit. The default is 1 gigabyte.} \item{max_age}{Maximum age of files in cache before they are evicted, in seconds. Use \code{Inf} for no age limit.} \item{max_n}{Maximum number of objects in the cache. If the number of objects exceeds this value, then cached objects will be removed according to the value of \code{evict}. Use \code{Inf} for no limit of number of items.} \item{evict}{The eviction policy to use to decide which objects are removed when a cache pruning occurs. Currently, \code{"lru"} and \code{"fifo"} are supported.} \item{missing}{A value to return when \code{get(key)} is called but the key is not present in the cache. The default is a \code{\link[=key_missing]{key_missing()}} object. It is actually an expression that is evaluated each time there is a cache miss. See section Missing keys for more information.} \item{logfile}{An optional filename or connection object to where logging information will be written. To log to the console, use \code{stderr()} or \code{stdout()}.} } \value{ A memory caching object, with class \code{cache_mem}. } \description{ A memory cache object is a key-value store that saves the values in an environment. Objects can be stored and retrieved using the \code{get()} and \code{set()} methods. Objects are automatically pruned from the cache according to the parameters \code{max_size}, \code{max_age}, \code{max_n}, and \code{evict}. } \details{ In a \code{cache_mem}, R objects are stored directly in the cache; they are not \emph{not} serialized before being stored in the cache. This contrasts with other cache types, like \code{\link[=cache_disk]{cache_disk()}}, where objects are serialized, and the serialized object is cached. This can result in some differences of behavior. For example, as long as an object is stored in a cache_mem, it will not be garbage collected. } \section{Missing keys}{ The \code{missing} parameter controls what happens when \code{get()} is called with a key that is not in the cache (a cache miss). The default behavior is to return a \code{\link[=key_missing]{key_missing()}} object. This is a \emph{sentinel value} that indicates that the key was not present in the cache. You can test if the returned value represents a missing key by using the \code{\link[=is.key_missing]{is.key_missing()}} function. You can also have \code{get()} return a different sentinel value, like \code{NULL}. If you want to throw an error on a cache miss, you can do so by providing an expression for \code{missing}, as in \code{missing = stop("Missing key")}. When the cache is created, you can supply a value for \code{missing}, which sets the default value to be returned for missing values. It can also be overridden when \code{get()} is called, by supplying a \code{missing} argument. For example, if you use \code{cache$get("mykey", missing = NULL)}, it will return \code{NULL} if the key is not in the cache. The \code{missing} parameter is actually an expression which is evaluated each time there is a cache miss. A quosure (from the rlang package) can be used. If you use this, the code that calls \code{get()} should be wrapped with \code{\link[=tryCatch]{tryCatch()}} to gracefully handle missing keys. @section Cache pruning: Cache pruning occurs when \code{set()} is called, or it can be invoked manually by calling \code{prune()}. When a pruning occurs, if there are any objects that are older than \code{max_age}, they will be removed. The \code{max_size} and \code{max_n} parameters are applied to the cache as a whole, in contrast to \code{max_age}, which is applied to each object individually. If the number of objects in the cache exceeds \code{max_n}, then objects will be removed from the cache according to the eviction policy, which is set with the \code{evict} parameter. Objects will be removed so that the number of items is \code{max_n}. If the size of the objects in the cache exceeds \code{max_size}, then objects will be removed from the cache. Objects will be removed from the cache so that the total size remains under \code{max_size}. Note that the size is calculated using the size of the files, not the size of disk space used by the files --- these two values can differ because of files are stored in blocks on disk. For example, if the block size is 4096 bytes, then a file that is one byte in size will take 4096 bytes on disk. Another time that objects can be removed from the cache is when \code{get()} is called. If the target object is older than \code{max_age}, it will be removed and the cache will report it as a missing value. } \section{Eviction policies}{ If \code{max_n} or \code{max_size} are used, then objects will be removed from the cache according to an eviction policy. The available eviction policies are: \describe{ \item{\code{"lru"}}{ Least Recently Used. The least recently used objects will be removed. } \item{\code{"fifo"}}{ First-in-first-out. The oldest objects will be removed. } } } \section{Methods}{ A disk cache object has the following methods: \describe{ \item{\code{get(key, missing)}}{ Returns the value associated with \code{key}. If the key is not in the cache, then it evaluates the expression specified by \code{missing} and returns the value. If \code{missing} is specified here, then it will override the default that was set when the \code{cache_mem} object was created. See section Missing Keys for more information. } \item{\code{set(key, value)}}{ Stores the \code{key}-\code{value} pair in the cache. } \item{\code{exists(key)}}{ Returns \code{TRUE} if the cache contains the key, otherwise \code{FALSE}. } \item{\code{size()}}{ Returns the number of items currently in the cache. } \item{\code{keys()}}{ Returns a character vector of all keys currently in the cache. } \item{\code{reset()}}{ Clears all objects from the cache. } \item{\code{destroy()}}{ Clears all objects in the cache, and removes the cache directory from disk. } \item{\code{prune()}}{ Prunes the cache, using the parameters specified by \code{max_size}, \code{max_age}, \code{max_n}, and \code{evict}. } } }
# # plot2.R - Written for the Coursera Exploratory Data Analysis Week 1 # Project Assignment. # # Assignment - Given a picture of plot 2, construct a plot to look like the # given plot and save it to a PNG file with a width of 480 pixels and a # height of 480 pixels. Name the plot plot1.png. # # Goal: Using the electric power consumption data from the UCI Irvine # Learning Repository, examine how household energy usage varies over # a 2-day period in February 2007, specifically over February 1 and # February 2. # # Dataset Abstract: Measurements of electric power consumption in one # household with a one-minute sampling rate over a period of almost 4 years. # Different electrical quantities and some sub-metering values are available. # # Instructions to Run: # 1) Download the "household_power_consumption.zip" dataset at: # https://archive.ics.uci.edu/ml/machine-learning-databases/00235/ # to your working directorywhere plot1.R is located. # 2) Save and source plot2.R in your working directory # # Ouput: # plot2.png file written to your working directory # # Dataset Info: # Size: 2,075,259 rows and 9 columns/variables. # Variables: # Date: Date in format dd/mm/yyyy # Time: time in format hh:mm:ss # Global_active_power # Global_reactive_power # Voltage # Global_intestity # Sub_metering_1 # Sub_metering_2 # Sub_metering_3 # # plot2 <- function(){ # Unzip dataset if it hasn't been already zipFile <- "exdata-data-household_power_consumption.zip" if(!file.exists("household_power_consumption.txt")) { unzip(zipFile) filename <- "household_power_consumption.txt" } else { filename <- "household_power_consumption.txt" } classes <- c("character", "character","numeric","numeric", "numeric", "numeric", "numeric", "numeric", "numeric") # The two-day period for the plot data is February 1, 2007 to # February 2, 2007. # # Due to the large size of the original dataset, a shell command was run # (outside of this R file) in a terminal window to find the line number # of the first occurence of the date: "1/2/2007" in the dataset # file: "household_power_consumption.txt" # $ grep -n -m 1 "^1/2/2007" household_power_consumption.txt # $ 66638 # One is then subtracted from this number to get the line number # that the read.table() skip argument should be set to in a later call. # startReadingAt <- 66638-1 # Also, the shell command was used to find the line number of the first # occurence of the date: "3/2/2007" in the dataset # file: "household_power_consumption.txt" # $ grep -n -m 1 "^3/2/2007" household_power_consumption.txt # $ 69518 # One is then subtracted from this number to give you the last # line with a "2/2/2007" date and thus thet last line that # should be read from the dataset corresponding second day of # the dates of interest. # stopReadingAt <- 69518-1 # Determine how many lines to read from dataset numLinesToRead <- stopReadingAt - startReadingAt powerConsumption <- read.table(filename, sep=";", col.names=c("Date","Time", "Global Active Power", "Global Reactive Power", "Voltage", "Global Intensity", "Sub Metering 1", "Sub Metering 2", "Sub Metering 3"), na.strings="?", colClasses=classes, skip=startReadingAt, nrows=numLinesToRead, stringsAsFactors=F) # Convert $Date from 'character' class to 'Date' class powerConsumption$Date <-as.Date(powerConsumption$Date, format="%d/%m/%Y") # Determine locations of x-axis tick marks midPointTick <- nrow(powerConsumption)/2 endPointTick <-nrow(powerConsumption) # set png driver as the active graphics device png("plot2.png", width=480, height=480) # Make lines plot showing the global minute-averaged # active power usage, with appropriate y axis labels. with(powerConsumption, plot(Global.Active.Power, type="l", ylab="Global Active Power (kilowatts)", xlab="", xaxt="n")) # Annotate the plot with specific tick mark locations and tick mark # labels on the x-axis. axis(side=1, at=c(1, midPointTick, endPointTick), labels=c("Thu", "Fri", "Sat")) # shut off png graphics device so it is no longer active dev.off() }
/plot2.R
no_license
TammyWikner/ExData_Plotting1
R
false
false
4,355
r
# # plot2.R - Written for the Coursera Exploratory Data Analysis Week 1 # Project Assignment. # # Assignment - Given a picture of plot 2, construct a plot to look like the # given plot and save it to a PNG file with a width of 480 pixels and a # height of 480 pixels. Name the plot plot1.png. # # Goal: Using the electric power consumption data from the UCI Irvine # Learning Repository, examine how household energy usage varies over # a 2-day period in February 2007, specifically over February 1 and # February 2. # # Dataset Abstract: Measurements of electric power consumption in one # household with a one-minute sampling rate over a period of almost 4 years. # Different electrical quantities and some sub-metering values are available. # # Instructions to Run: # 1) Download the "household_power_consumption.zip" dataset at: # https://archive.ics.uci.edu/ml/machine-learning-databases/00235/ # to your working directorywhere plot1.R is located. # 2) Save and source plot2.R in your working directory # # Ouput: # plot2.png file written to your working directory # # Dataset Info: # Size: 2,075,259 rows and 9 columns/variables. # Variables: # Date: Date in format dd/mm/yyyy # Time: time in format hh:mm:ss # Global_active_power # Global_reactive_power # Voltage # Global_intestity # Sub_metering_1 # Sub_metering_2 # Sub_metering_3 # # plot2 <- function(){ # Unzip dataset if it hasn't been already zipFile <- "exdata-data-household_power_consumption.zip" if(!file.exists("household_power_consumption.txt")) { unzip(zipFile) filename <- "household_power_consumption.txt" } else { filename <- "household_power_consumption.txt" } classes <- c("character", "character","numeric","numeric", "numeric", "numeric", "numeric", "numeric", "numeric") # The two-day period for the plot data is February 1, 2007 to # February 2, 2007. # # Due to the large size of the original dataset, a shell command was run # (outside of this R file) in a terminal window to find the line number # of the first occurence of the date: "1/2/2007" in the dataset # file: "household_power_consumption.txt" # $ grep -n -m 1 "^1/2/2007" household_power_consumption.txt # $ 66638 # One is then subtracted from this number to get the line number # that the read.table() skip argument should be set to in a later call. # startReadingAt <- 66638-1 # Also, the shell command was used to find the line number of the first # occurence of the date: "3/2/2007" in the dataset # file: "household_power_consumption.txt" # $ grep -n -m 1 "^3/2/2007" household_power_consumption.txt # $ 69518 # One is then subtracted from this number to give you the last # line with a "2/2/2007" date and thus thet last line that # should be read from the dataset corresponding second day of # the dates of interest. # stopReadingAt <- 69518-1 # Determine how many lines to read from dataset numLinesToRead <- stopReadingAt - startReadingAt powerConsumption <- read.table(filename, sep=";", col.names=c("Date","Time", "Global Active Power", "Global Reactive Power", "Voltage", "Global Intensity", "Sub Metering 1", "Sub Metering 2", "Sub Metering 3"), na.strings="?", colClasses=classes, skip=startReadingAt, nrows=numLinesToRead, stringsAsFactors=F) # Convert $Date from 'character' class to 'Date' class powerConsumption$Date <-as.Date(powerConsumption$Date, format="%d/%m/%Y") # Determine locations of x-axis tick marks midPointTick <- nrow(powerConsumption)/2 endPointTick <-nrow(powerConsumption) # set png driver as the active graphics device png("plot2.png", width=480, height=480) # Make lines plot showing the global minute-averaged # active power usage, with appropriate y axis labels. with(powerConsumption, plot(Global.Active.Power, type="l", ylab="Global Active Power (kilowatts)", xlab="", xaxt="n")) # Annotate the plot with specific tick mark locations and tick mark # labels on the x-axis. axis(side=1, at=c(1, midPointTick, endPointTick), labels=c("Thu", "Fri", "Sat")) # shut off png graphics device so it is no longer active dev.off() }
library("neuralnet") #Going to create a neural network to perform prediction #Type ?neuralnet for more information on the neuralnet library #Generate training data #And store them as a dataframe traininginput <- as.data.frame(matrix(c(7.9 , 32 , 2 , 7.9 , 32 , 2 , 7.9 , 128 , 2, 9.7 , 32 , 2 , 7.9 , 128 , 2 , 12.9 , 128 , 4 , 12.9 , 128 , 4 , 9.7 , 256 , 2 , 12.9 , 256 , 4 , 7.9 , 16 , 2 ), nrow=10, ncol=3)) trainingoutput <- c(2499, 2399, 2899, 3599, 2999, 4949, 5199, 4599, 5699, 1869) #Column bind the data into one variable trainingdata <- cbind(traininginput, trainingoutput) # Create Vector of Column Max and Min Values maxs <- apply(trainingdata[,], 2, max) mins <- apply(trainingdata[,], 2, min) # Use scale() and convert the resulting matrix to a data frame scaled.trainingdata <- as.data.frame(scale(trainingdata[,], center=mins, scale=maxs-mins)) trainingdata <- scaled.trainingdata # Check out results print(head(trainingdata, 10)) colnames(trainingdata) <- c("wyswietlacz", "pojemnosc", "RAM", "Price") print(trainingdata) #Train the neural network #Going to have C(6, 5, 3) hidden layers #Threshold is a numeric value specifying the threshold for the partial #derivatives of the error function as stopping criteria. net.price <- neuralnet(Price~wyswietlacz+pojemnosc+RAM, trainingdata, hidden=c(6, 5, 3), threshold=0.001) print(net.price) #Plot the neural network plot(net.price) #Test the neural network on some training data testdata <- as.data.frame(matrix(c(7.9, 32, 2, 9.7, 256, 4, 12.9, 128, 2), nrow=3, ncol=3)) scaled.testdata <- as.data.frame(scale(testdata[,], center=mins[1:3], scale=maxs[1:3]-mins[1:3])) net.results <- compute(net.price, scaled.testdata) #Run them through the neural network #Lets see what properties net.price has ls(net.results) #Lets see the results print(net.results$net.result)
/smpd/lab 6/lab 6.R
no_license
Rukashu/systemy-i-metody-podejmowania-decyzji
R
false
false
2,274
r
library("neuralnet") #Going to create a neural network to perform prediction #Type ?neuralnet for more information on the neuralnet library #Generate training data #And store them as a dataframe traininginput <- as.data.frame(matrix(c(7.9 , 32 , 2 , 7.9 , 32 , 2 , 7.9 , 128 , 2, 9.7 , 32 , 2 , 7.9 , 128 , 2 , 12.9 , 128 , 4 , 12.9 , 128 , 4 , 9.7 , 256 , 2 , 12.9 , 256 , 4 , 7.9 , 16 , 2 ), nrow=10, ncol=3)) trainingoutput <- c(2499, 2399, 2899, 3599, 2999, 4949, 5199, 4599, 5699, 1869) #Column bind the data into one variable trainingdata <- cbind(traininginput, trainingoutput) # Create Vector of Column Max and Min Values maxs <- apply(trainingdata[,], 2, max) mins <- apply(trainingdata[,], 2, min) # Use scale() and convert the resulting matrix to a data frame scaled.trainingdata <- as.data.frame(scale(trainingdata[,], center=mins, scale=maxs-mins)) trainingdata <- scaled.trainingdata # Check out results print(head(trainingdata, 10)) colnames(trainingdata) <- c("wyswietlacz", "pojemnosc", "RAM", "Price") print(trainingdata) #Train the neural network #Going to have C(6, 5, 3) hidden layers #Threshold is a numeric value specifying the threshold for the partial #derivatives of the error function as stopping criteria. net.price <- neuralnet(Price~wyswietlacz+pojemnosc+RAM, trainingdata, hidden=c(6, 5, 3), threshold=0.001) print(net.price) #Plot the neural network plot(net.price) #Test the neural network on some training data testdata <- as.data.frame(matrix(c(7.9, 32, 2, 9.7, 256, 4, 12.9, 128, 2), nrow=3, ncol=3)) scaled.testdata <- as.data.frame(scale(testdata[,], center=mins[1:3], scale=maxs[1:3]-mins[1:3])) net.results <- compute(net.price, scaled.testdata) #Run them through the neural network #Lets see what properties net.price has ls(net.results) #Lets see the results print(net.results$net.result)
# Leaflet for R # November 29 2018 # EAS # easily make interactive maps within R # used by many to publish maps # easily render spatial objects from the sp and sf packages or dataframes with lat/long column library(leaflet) library(maps) # basic example m <- leaflet(options = leafletOptions(minZoom = 12, maxZoom = 18)) %>% addTiles() %>% addMarkers(lng=-73.2013, lat=44.4783, popup="landmark") # map views m %>% setView(-73.2,44.48, zoom = 16) m %>% fitBounds(-73.2,44.48, -73.1,44.49) m %>% setMaxBounds(-73.2,44.48, -73.1,44.49) # data objects # from base R df <- data.frame(Lat=1:10, Long=rnorm(10)) leaflet(df) %>% addTiles() %>% addCircles() head(df) # Basemaps m names(providers) m %>% addProviderTiles(providers$Stamen) m %>% addProviderTiles(providers$Esri.NatGeoWorldMap) # stacking base maps m %>% addProviderTiles(providers$Esri.NatGeoWorldMap) %>% addProviderTiles(providers$Stamen.TonerLines, options = providerTileOptions(opacity = .95)) %>% addProviderTiles(providers$Stamen.TonerLabels) # shapes and polygons # Circles cities <- read.csv(textConnection(" City,Lat,Long,Pop Boston, 42.36,-71.05,645966 New York City, 40.71,-74.00,8406000 Philadelphia, 39.95,-75.16,1553000")) leaflet(cities) %>% addTiles() %>% addCircles(lng =~Long, lat=~Lat, weight=1, radius = ~sqrt(Pop)*30, popup=~City) leaflet() %>% addTiles() %>% addRectangles( lng1 = -118.45, lat1 = 34.07, lng2 = -118.43, lat2 = 34.06, fillColor = "transparent") # Markers data(quakes) # locations of the earthquakes off fiji head(quakes) leaflet(data=quakes[1:20,] %>% addTiles() %>% addMarkers(~long,~lat,popup = ~as.character(mag), label = ~as.character(mag)) leaflet(quakes) %>% addTiles() %>% addMarkers(clusterOptions = markerClusterOptions()) %>% addMeasure() %>% addMiniMap()
/mapping.R
no_license
eshore863/Bio381
R
false
false
1,861
r
# Leaflet for R # November 29 2018 # EAS # easily make interactive maps within R # used by many to publish maps # easily render spatial objects from the sp and sf packages or dataframes with lat/long column library(leaflet) library(maps) # basic example m <- leaflet(options = leafletOptions(minZoom = 12, maxZoom = 18)) %>% addTiles() %>% addMarkers(lng=-73.2013, lat=44.4783, popup="landmark") # map views m %>% setView(-73.2,44.48, zoom = 16) m %>% fitBounds(-73.2,44.48, -73.1,44.49) m %>% setMaxBounds(-73.2,44.48, -73.1,44.49) # data objects # from base R df <- data.frame(Lat=1:10, Long=rnorm(10)) leaflet(df) %>% addTiles() %>% addCircles() head(df) # Basemaps m names(providers) m %>% addProviderTiles(providers$Stamen) m %>% addProviderTiles(providers$Esri.NatGeoWorldMap) # stacking base maps m %>% addProviderTiles(providers$Esri.NatGeoWorldMap) %>% addProviderTiles(providers$Stamen.TonerLines, options = providerTileOptions(opacity = .95)) %>% addProviderTiles(providers$Stamen.TonerLabels) # shapes and polygons # Circles cities <- read.csv(textConnection(" City,Lat,Long,Pop Boston, 42.36,-71.05,645966 New York City, 40.71,-74.00,8406000 Philadelphia, 39.95,-75.16,1553000")) leaflet(cities) %>% addTiles() %>% addCircles(lng =~Long, lat=~Lat, weight=1, radius = ~sqrt(Pop)*30, popup=~City) leaflet() %>% addTiles() %>% addRectangles( lng1 = -118.45, lat1 = 34.07, lng2 = -118.43, lat2 = 34.06, fillColor = "transparent") # Markers data(quakes) # locations of the earthquakes off fiji head(quakes) leaflet(data=quakes[1:20,] %>% addTiles() %>% addMarkers(~long,~lat,popup = ~as.character(mag), label = ~as.character(mag)) leaflet(quakes) %>% addTiles() %>% addMarkers(clusterOptions = markerClusterOptions()) %>% addMeasure() %>% addMiniMap()
data_avg <- economics_long %>% mutate(year = as.integer(format(date, "%Y"))) %>% group_by(year, variable) %>% summarize(value_avg = mean(value)) %>% ungroup() data_avg_line <- data_avg %>% filter(variable %in% c("psavert", "uempmed")) divfactor = 6 * 10^2 data_avg_bar <- data_avg %>% filter(variable %in% c("unemploy")) %>% mutate(value_avg_scaled = value_avg / divfactor) p1 <- ggplot() + geom_bar(data = data_avg_bar, aes(x = year, y = value_avg_scaled, fill = variable), stat = "identity", width = .4) + geom_line(data = data_avg_line, aes(x = year, y = value_avg, linetype = variable), color = "blue") + scale_color_manual(values = "#006bb6") + scale_fill_manual(values = "#759cd2") + guides(fill = guide_legend(title = "", keywidth = unit(4, "mm"), keyheight = unit(2, "mm"), override.aes = list(color = "black", size = 0.2) ), linetype = guide_legend(title = "", keywidth = unit(9, "mm"), # corresponds to 9mm keyheight = unit(2, "mm") )) + theme( panel.background = element_rect(fill = "#dcdcde", color = FALSE), legend.position = "top", legend.box = "horizontal", legend.background = element_rect(fill = FALSE, color = FALSE), ## legend.box.background = element_rect(fill = "#dcdcde", color = FALSE), legend.box.background = element_rect(fill = FALSE, color = FALSE), legend.box.just = "left", legend.justification = "center", ## legend.margin = margin(0, 0, 0, 0, unit = "mm"), # trbl legend.margin = margin(0.5, 0, 0.5, 0, unit = "mm"), # trbl legend.box.spacing = unit(2.5, "mm"), # between plot area and legend box legend.key = element_rect(color = FALSE, fill = FALSE) ) + xlab(NULL) + ylab(NULL)
/R/ggplot_detailed_features_preparePlot.R
permissive
bowerth/Intro-to-R-Bootcamp
R
false
false
1,985
r
data_avg <- economics_long %>% mutate(year = as.integer(format(date, "%Y"))) %>% group_by(year, variable) %>% summarize(value_avg = mean(value)) %>% ungroup() data_avg_line <- data_avg %>% filter(variable %in% c("psavert", "uempmed")) divfactor = 6 * 10^2 data_avg_bar <- data_avg %>% filter(variable %in% c("unemploy")) %>% mutate(value_avg_scaled = value_avg / divfactor) p1 <- ggplot() + geom_bar(data = data_avg_bar, aes(x = year, y = value_avg_scaled, fill = variable), stat = "identity", width = .4) + geom_line(data = data_avg_line, aes(x = year, y = value_avg, linetype = variable), color = "blue") + scale_color_manual(values = "#006bb6") + scale_fill_manual(values = "#759cd2") + guides(fill = guide_legend(title = "", keywidth = unit(4, "mm"), keyheight = unit(2, "mm"), override.aes = list(color = "black", size = 0.2) ), linetype = guide_legend(title = "", keywidth = unit(9, "mm"), # corresponds to 9mm keyheight = unit(2, "mm") )) + theme( panel.background = element_rect(fill = "#dcdcde", color = FALSE), legend.position = "top", legend.box = "horizontal", legend.background = element_rect(fill = FALSE, color = FALSE), ## legend.box.background = element_rect(fill = "#dcdcde", color = FALSE), legend.box.background = element_rect(fill = FALSE, color = FALSE), legend.box.just = "left", legend.justification = "center", ## legend.margin = margin(0, 0, 0, 0, unit = "mm"), # trbl legend.margin = margin(0.5, 0, 0.5, 0, unit = "mm"), # trbl legend.box.spacing = unit(2.5, "mm"), # between plot area and legend box legend.key = element_rect(color = FALSE, fill = FALSE) ) + xlab(NULL) + ylab(NULL)
library(PKI) ### Name: BIGNUMint ### Title: Functions for BIGNUM representation of arbitrarily precise ### integers ### Aliases: BIGNUMint as.BIGNUMint ### Keywords: manip ### ** Examples as.BIGNUMint(65537)
/data/genthat_extracted_code/PKI/examples/BIGNUMint.Rd.R
no_license
surayaaramli/typeRrh
R
false
false
217
r
library(PKI) ### Name: BIGNUMint ### Title: Functions for BIGNUM representation of arbitrarily precise ### integers ### Aliases: BIGNUMint as.BIGNUMint ### Keywords: manip ### ** Examples as.BIGNUMint(65537)
## Creates a special "matrix" object that can cache its inverse. makeCacheMatrix <- function(x = matrix()) { i <- NULL set <- function(y){ x <<- y i <<- NULL } get <- function() x setinverse <- function(inverse) i <<- inverse getinverse <- function() i list(set= set, get = get, setinverse = setinverse, getinverse = getinverse) } ## Computes the inverse. If the inverse has already been calculated (and the matrix has not changed), ## then the cachesolve should retrieve the inverse from the cach cacheSolve <- function(x, ...) { i <- x$getinverse() if (!is.null(i)){ message("getting cached data") return(i) } data <- x$get() i <- solve(data, ...) x$setinverse(i) i }
/cachematrix.R
no_license
rajatharlalka/ProgrammingAssignment2
R
false
false
746
r
## Creates a special "matrix" object that can cache its inverse. makeCacheMatrix <- function(x = matrix()) { i <- NULL set <- function(y){ x <<- y i <<- NULL } get <- function() x setinverse <- function(inverse) i <<- inverse getinverse <- function() i list(set= set, get = get, setinverse = setinverse, getinverse = getinverse) } ## Computes the inverse. If the inverse has already been calculated (and the matrix has not changed), ## then the cachesolve should retrieve the inverse from the cach cacheSolve <- function(x, ...) { i <- x$getinverse() if (!is.null(i)){ message("getting cached data") return(i) } data <- x$get() i <- solve(data, ...) x$setinverse(i) i }
fetch_facts <- function(x, nchar = 20) { GET(x) %>% content() %>% .[["all"]] %>% lapply("[[", "text") %>% .[nchar(.) > nchar] } enable_mini_sidebar <- function() { tags$script(HTML("$('body').addClass('sidebar-mini');")) } mini_sidebar_title <- function(full = "catfacts!", mini = ":)") { tagList( tags$span( class = "logo-mini", mini ), tags$span( class = "logo-lg", full ) ) } enable_favicon <- function() { tags$head( tags$link(rel = "shortcut icon", href = "favicon.ico"), tags$link(rel = "apple-touch-icon", sizes = "180x180", href = "favicon.ico"), tags$link(rel = "icon", type = "image/png", sizes = "32x32", href = "/favicon-32x32.png"), tags$link(rel = "icon", type = "image/png", sizes = "16x16", href = "/favicon-16x16.png") ) }
/R/utils.R
no_license
han-tun/catfacts
R
false
false
816
r
fetch_facts <- function(x, nchar = 20) { GET(x) %>% content() %>% .[["all"]] %>% lapply("[[", "text") %>% .[nchar(.) > nchar] } enable_mini_sidebar <- function() { tags$script(HTML("$('body').addClass('sidebar-mini');")) } mini_sidebar_title <- function(full = "catfacts!", mini = ":)") { tagList( tags$span( class = "logo-mini", mini ), tags$span( class = "logo-lg", full ) ) } enable_favicon <- function() { tags$head( tags$link(rel = "shortcut icon", href = "favicon.ico"), tags$link(rel = "apple-touch-icon", sizes = "180x180", href = "favicon.ico"), tags$link(rel = "icon", type = "image/png", sizes = "32x32", href = "/favicon-32x32.png"), tags$link(rel = "icon", type = "image/png", sizes = "16x16", href = "/favicon-16x16.png") ) }
# This file contains functions to process # wiki-survey data suppressPackageStartupMessages(library(ggplot2)) get.active.ideas <- function(ideas) # This function gets the active ideas in the idea data frame # (Mostly for internal use) { ideas = ideas[ideas$Active,] return(ideas) } rename.ideas <- function(ideas) # This function adds "o." to the IDs of the ideas # (Mostly for internal use) { ideas$Idea.ID = as.character(ideas$Idea.ID) ideas$Idea.ID = paste('o',ideas$Idea.ID,sep='.') return(ideas) } change.type.ideas <- function(ideas) # This function changes the data type of and text of # ideas into character # (Mostly for internal use) { ideas$Idea.Text = as.character(ideas$Idea.Text) return(ideas) } threshold.ideas <- function(ideas, thres = 0) # This function gets the ideas that # win and lose for at least thres times # Inputs: # ideas, the data frame representing the ideas in the wiki-survey; # thres, the threshold # Output: # ideas, the data frame representing selected ideas # (Mostly for internal use) { flag = vector(length = nrow(ideas)) for(i in 1:length(flag)) { flag[i] = min(ideas$Wins[i],ideas$Losses[i]) } ideas = ideas[flag>thres,] return(ideas) } preprocess.ideas <- function(ideas, thres = 0) # This function completes the preprocessing of ideas # by getting the active ideas, making necessary changes to data types # of certain attributes, and selecting the ideas that win and lose # for at least thres times. # Inputs: # ideas, the data frame representing the ideas in the wiki-survey; # thres, the threshold # Output: # ideas, the data frame representing preprocessed ideas # (For external use) { ideas = get.active.ideas(ideas) ideas = rename.ideas(ideas) ideas = change.type.ideas(ideas) ideas = threshold.ideas(ideas, thres) return(ideas) } rename.votes <- function(votes) # This function adds "s." at the beginning of the Session IDs # and "o." at the beginning of the idea IDS of each vote # Input: # votes, the data frame representing the votes # Output: # votes, the data frame representing the votes with session IDs and # idea IDs changed. { votes$Session.ID = as.character(votes$Session.ID) votes$Session.ID = paste('s',votes$Session.ID,sep = '.') votes$Left.Choice.ID = as.character(votes$Left.Choice.ID) votes$Left.Choice.ID = paste('o',votes$Left.Choice.ID,sep = '.') votes$Right.Choice.ID = as.character(votes$Right.Choice.ID) votes$Right.Choice.ID = paste('o',votes$Right.Choice.ID,sep = '.') votes$Winner.ID = as.character(votes$Winner.ID) votes$Winner.ID = paste('o',votes$Winner.ID,sep = '.') votes$Loser.ID = as.character(votes$Loser.ID) votes$Loser.ID = paste('o',votes$Loser.ID,sep = '.') votes$Subject.ID = votes$Session.ID return(votes) } get.valid.votes <- function(votes, act.ideas.list) # This function gets the valid votes # Valid votes must be between two active ideas # Inputs: # votes, the data frame representing the votes # notice that the names of this data frame must have been # pre-processed # act.ideas.list, the list of the names of active ideas # Output: # votes, the data frame representing the valid votes # (Mostly for internal use) { votes = votes[votes$Valid,] votes = votes[votes$Left.Choice.ID %in% act.ideas.list,] votes = votes[votes$Right.Choice.ID %in% act.ideas.list,] return(votes) } give.vote.results <- function(votes) # This function adds an attribute to the data frame of votes # called "results". The results are set to be 1 of the idea shown # on the left won the comparison. # Input: # votes, the data frame representing the votes # Output: # votes, the data frame representing the votes, after adding "results" # (Mostly for internal use) { votes$results = (votes$Left.Choice.ID == votes$Winner.ID) return(votes) } preprocess.votes <- function(votes,ideas) # This function completes the preprocessing of votes # by running the three functions above # Inputs: # votes, the data frame representing the votes # ideas, the data frame representing the preprocessed ideas # Output: # votes, the data frame representing the preprocessed votes # (For external use) { votes = rename.votes(votes) act.ideas.list = ideas$Idea.ID votes = get.valid.votes(votes,act.ideas.list) votes = give.vote.results(votes) return(votes) } show.num.app <- function(ideas) # This function shows the distribution of the number of times # that each idea is involved in comparisons # (For external use) { ideas$Num.App = ideas$Wins + ideas$Losses ideas$Provider = 'Designer' ideas$Provider[ideas$User.Submitted] = 'Voter' ggplot(ideas,aes(Num.App, fill = Provider)) + geom_histogram() + scale_fill_manual(values = c('Voter' = 'blue', 'Designer' = 'orange')) + xlab('Number of appearances per idea') + theme(axis.title = element_text(size = 20), axis.text = element_text(size = 15), legend.title = element_text(size = 15), legend.text = element_text(size = 15), legend.position = c(0.9,0.9)) } make.cont.matrix <- function(votes,ideas) # This function creates the contingency matrix that can be used # in the Bradley-Terry and Thurstone models # Inputs: # votes, the data frame representing the preprocessed votes # ideas, the data frame representing the preprocessed ideas # Output: # cont.matrix, the contingency matrix # (For external use) { act.ideas.list = ideas$Idea.ID cont.matrix = matrix(nrow = length(act.ideas.list), ncol = length(act.ideas.list)) colnames(cont.matrix) = act.ideas.list rownames(cont.matrix) = act.ideas.list cont.matrix[,] = 0 for(i in 1:nrow(votes)) { cont.matrix[votes$Winner.ID[i],votes$Loser.ID[i]] = cont.matrix[votes$Winner.ID[i],votes$Loser.ID[i]] + 1 } return(cont.matrix) } # The following functions are used to generate data for # STAN estimation of the hierarchical Thurstone model get.theta.v.indexes <- function(votes) # This function creates the indexes for theta_v in the # hierarchical Thurstone (Salganik-Levy) model # (Mostly for internal use) { theta.v.indexes = list() for(i in 1:nrow(votes)) { left.object = votes$Left.Choice.ID[i] right.object = votes$Right.Choice.ID[i] session = votes$Session.ID[i] left.index = paste(left.object,session,sep=',') right.index = paste(right.object,session,sep=',') theta.v.indexes = c(theta.v.indexes,left.index,right.index) } theta.v.indexes = unique(theta.v.indexes) theta.v.indexes = unlist(theta.v.indexes) object.indexes = theta.v.indexes for(i in 1:length(theta.v.indexes)) { object.indexes[i] = unlist(strsplit(theta.v.indexes[i],','))[1] } theta.v.indexes = theta.v.indexes[order(object.indexes)] object.indexes = object.indexes[order(object.indexes)] return(list(theta.v.indexes = theta.v.indexes, object.indexes = object.indexes)) } make.stan.data <- function(votes, sigma2 = 1, tau0 = 4) # This function creates the data for STAN implementation of the # hierarchical Thurstone (Salganik-Levy) model # Inputs: # votes, the data frame representing preprocessed votes # Output: # stan.data, a list that can be used for STAN estimation { print('Preparing indexes......') # Prepare labels prep.indexes = get.theta.v.indexes(votes) theta.v.indexes = prep.indexes$theta.v.indexes #print(length(theta.v.labels)) object.indexes = prep.indexes$object.indexes print('Indexes prepared.') # The design matrix print('Preparing the design matrix......') left.is = vector(length = nrow(votes)) right.is = vector(length = nrow(votes)) win.is = vector(length = nrow(votes)) for(i in 1:nrow(votes)) { left.object = votes$Left.Choice.ID[i] right.object = votes$Right.Choice.ID[i] session = votes$Session.ID[i] left.index = paste(left.object,session,sep=',') right.index = paste(right.object,session,sep=',') left.i = which(theta.v.indexes == left.index) right.i = which(theta.v.indexes == right.index) win.i = votes$results[i] left.is[i] = left.i right.is[i] = right.i win.is[i] = win.i if(i%%1000 == 0) print(i) } print('Design matrix prepared.') # N_k: the number that theta_jk is in theta_v, for every k print('Preparing Nk......') act.ideas.list = unique(object.indexes) Ns = vector(length = length(act.ideas.list) + 1) Ns[1] = 1 cnt = 1 for(i in 2:length(theta.v.indexes)) { if(object.indexes[i] != object.indexes[i-1]) { cnt = cnt + 1 Ns[cnt] = i } if(i%%1000 == 0) print(i) } Ns[length(act.ideas.list) + 1] = length(theta.v.indexes) + 1 print('Nk prepared.') stan.data <- list(V = nrow(votes), K = length(act.ideas.list), N = length(theta.v.indexes), Ns = Ns, left_indices = left.is, right_indices = right.is, y = as.integer(win.is), mu0 = rep(0,length(act.ideas.list)), tau0 = c(1e-3,rep(tau0,length(act.ideas.list)-1)), sigma = sigma2, theta.v.indexes = theta.v.indexes) print('STAN data prepared.') return(stan.data) }
/wiki_utils.R
no_license
Weichen-Wu-CMU/wiki_survey_analysis
R
false
false
9,324
r
# This file contains functions to process # wiki-survey data suppressPackageStartupMessages(library(ggplot2)) get.active.ideas <- function(ideas) # This function gets the active ideas in the idea data frame # (Mostly for internal use) { ideas = ideas[ideas$Active,] return(ideas) } rename.ideas <- function(ideas) # This function adds "o." to the IDs of the ideas # (Mostly for internal use) { ideas$Idea.ID = as.character(ideas$Idea.ID) ideas$Idea.ID = paste('o',ideas$Idea.ID,sep='.') return(ideas) } change.type.ideas <- function(ideas) # This function changes the data type of and text of # ideas into character # (Mostly for internal use) { ideas$Idea.Text = as.character(ideas$Idea.Text) return(ideas) } threshold.ideas <- function(ideas, thres = 0) # This function gets the ideas that # win and lose for at least thres times # Inputs: # ideas, the data frame representing the ideas in the wiki-survey; # thres, the threshold # Output: # ideas, the data frame representing selected ideas # (Mostly for internal use) { flag = vector(length = nrow(ideas)) for(i in 1:length(flag)) { flag[i] = min(ideas$Wins[i],ideas$Losses[i]) } ideas = ideas[flag>thres,] return(ideas) } preprocess.ideas <- function(ideas, thres = 0) # This function completes the preprocessing of ideas # by getting the active ideas, making necessary changes to data types # of certain attributes, and selecting the ideas that win and lose # for at least thres times. # Inputs: # ideas, the data frame representing the ideas in the wiki-survey; # thres, the threshold # Output: # ideas, the data frame representing preprocessed ideas # (For external use) { ideas = get.active.ideas(ideas) ideas = rename.ideas(ideas) ideas = change.type.ideas(ideas) ideas = threshold.ideas(ideas, thres) return(ideas) } rename.votes <- function(votes) # This function adds "s." at the beginning of the Session IDs # and "o." at the beginning of the idea IDS of each vote # Input: # votes, the data frame representing the votes # Output: # votes, the data frame representing the votes with session IDs and # idea IDs changed. { votes$Session.ID = as.character(votes$Session.ID) votes$Session.ID = paste('s',votes$Session.ID,sep = '.') votes$Left.Choice.ID = as.character(votes$Left.Choice.ID) votes$Left.Choice.ID = paste('o',votes$Left.Choice.ID,sep = '.') votes$Right.Choice.ID = as.character(votes$Right.Choice.ID) votes$Right.Choice.ID = paste('o',votes$Right.Choice.ID,sep = '.') votes$Winner.ID = as.character(votes$Winner.ID) votes$Winner.ID = paste('o',votes$Winner.ID,sep = '.') votes$Loser.ID = as.character(votes$Loser.ID) votes$Loser.ID = paste('o',votes$Loser.ID,sep = '.') votes$Subject.ID = votes$Session.ID return(votes) } get.valid.votes <- function(votes, act.ideas.list) # This function gets the valid votes # Valid votes must be between two active ideas # Inputs: # votes, the data frame representing the votes # notice that the names of this data frame must have been # pre-processed # act.ideas.list, the list of the names of active ideas # Output: # votes, the data frame representing the valid votes # (Mostly for internal use) { votes = votes[votes$Valid,] votes = votes[votes$Left.Choice.ID %in% act.ideas.list,] votes = votes[votes$Right.Choice.ID %in% act.ideas.list,] return(votes) } give.vote.results <- function(votes) # This function adds an attribute to the data frame of votes # called "results". The results are set to be 1 of the idea shown # on the left won the comparison. # Input: # votes, the data frame representing the votes # Output: # votes, the data frame representing the votes, after adding "results" # (Mostly for internal use) { votes$results = (votes$Left.Choice.ID == votes$Winner.ID) return(votes) } preprocess.votes <- function(votes,ideas) # This function completes the preprocessing of votes # by running the three functions above # Inputs: # votes, the data frame representing the votes # ideas, the data frame representing the preprocessed ideas # Output: # votes, the data frame representing the preprocessed votes # (For external use) { votes = rename.votes(votes) act.ideas.list = ideas$Idea.ID votes = get.valid.votes(votes,act.ideas.list) votes = give.vote.results(votes) return(votes) } show.num.app <- function(ideas) # This function shows the distribution of the number of times # that each idea is involved in comparisons # (For external use) { ideas$Num.App = ideas$Wins + ideas$Losses ideas$Provider = 'Designer' ideas$Provider[ideas$User.Submitted] = 'Voter' ggplot(ideas,aes(Num.App, fill = Provider)) + geom_histogram() + scale_fill_manual(values = c('Voter' = 'blue', 'Designer' = 'orange')) + xlab('Number of appearances per idea') + theme(axis.title = element_text(size = 20), axis.text = element_text(size = 15), legend.title = element_text(size = 15), legend.text = element_text(size = 15), legend.position = c(0.9,0.9)) } make.cont.matrix <- function(votes,ideas) # This function creates the contingency matrix that can be used # in the Bradley-Terry and Thurstone models # Inputs: # votes, the data frame representing the preprocessed votes # ideas, the data frame representing the preprocessed ideas # Output: # cont.matrix, the contingency matrix # (For external use) { act.ideas.list = ideas$Idea.ID cont.matrix = matrix(nrow = length(act.ideas.list), ncol = length(act.ideas.list)) colnames(cont.matrix) = act.ideas.list rownames(cont.matrix) = act.ideas.list cont.matrix[,] = 0 for(i in 1:nrow(votes)) { cont.matrix[votes$Winner.ID[i],votes$Loser.ID[i]] = cont.matrix[votes$Winner.ID[i],votes$Loser.ID[i]] + 1 } return(cont.matrix) } # The following functions are used to generate data for # STAN estimation of the hierarchical Thurstone model get.theta.v.indexes <- function(votes) # This function creates the indexes for theta_v in the # hierarchical Thurstone (Salganik-Levy) model # (Mostly for internal use) { theta.v.indexes = list() for(i in 1:nrow(votes)) { left.object = votes$Left.Choice.ID[i] right.object = votes$Right.Choice.ID[i] session = votes$Session.ID[i] left.index = paste(left.object,session,sep=',') right.index = paste(right.object,session,sep=',') theta.v.indexes = c(theta.v.indexes,left.index,right.index) } theta.v.indexes = unique(theta.v.indexes) theta.v.indexes = unlist(theta.v.indexes) object.indexes = theta.v.indexes for(i in 1:length(theta.v.indexes)) { object.indexes[i] = unlist(strsplit(theta.v.indexes[i],','))[1] } theta.v.indexes = theta.v.indexes[order(object.indexes)] object.indexes = object.indexes[order(object.indexes)] return(list(theta.v.indexes = theta.v.indexes, object.indexes = object.indexes)) } make.stan.data <- function(votes, sigma2 = 1, tau0 = 4) # This function creates the data for STAN implementation of the # hierarchical Thurstone (Salganik-Levy) model # Inputs: # votes, the data frame representing preprocessed votes # Output: # stan.data, a list that can be used for STAN estimation { print('Preparing indexes......') # Prepare labels prep.indexes = get.theta.v.indexes(votes) theta.v.indexes = prep.indexes$theta.v.indexes #print(length(theta.v.labels)) object.indexes = prep.indexes$object.indexes print('Indexes prepared.') # The design matrix print('Preparing the design matrix......') left.is = vector(length = nrow(votes)) right.is = vector(length = nrow(votes)) win.is = vector(length = nrow(votes)) for(i in 1:nrow(votes)) { left.object = votes$Left.Choice.ID[i] right.object = votes$Right.Choice.ID[i] session = votes$Session.ID[i] left.index = paste(left.object,session,sep=',') right.index = paste(right.object,session,sep=',') left.i = which(theta.v.indexes == left.index) right.i = which(theta.v.indexes == right.index) win.i = votes$results[i] left.is[i] = left.i right.is[i] = right.i win.is[i] = win.i if(i%%1000 == 0) print(i) } print('Design matrix prepared.') # N_k: the number that theta_jk is in theta_v, for every k print('Preparing Nk......') act.ideas.list = unique(object.indexes) Ns = vector(length = length(act.ideas.list) + 1) Ns[1] = 1 cnt = 1 for(i in 2:length(theta.v.indexes)) { if(object.indexes[i] != object.indexes[i-1]) { cnt = cnt + 1 Ns[cnt] = i } if(i%%1000 == 0) print(i) } Ns[length(act.ideas.list) + 1] = length(theta.v.indexes) + 1 print('Nk prepared.') stan.data <- list(V = nrow(votes), K = length(act.ideas.list), N = length(theta.v.indexes), Ns = Ns, left_indices = left.is, right_indices = right.is, y = as.integer(win.is), mu0 = rep(0,length(act.ideas.list)), tau0 = c(1e-3,rep(tau0,length(act.ideas.list)-1)), sigma = sigma2, theta.v.indexes = theta.v.indexes) print('STAN data prepared.') return(stan.data) }
require(parallel) ENV_CONCERTO_R_APP_URL = Sys.getenv("CONCERTO_R_APP_URL") ENV_CONCERTO_R_DB_CONNECTION = Sys.getenv("CONCERTO_R_DB_CONNECTION") ENV_CONCERTO_R_SESSION_FIFO_PATH = Sys.getenv("CONCERTO_R_SESSION_FIFO_PATH") ENV_CONCERTO_R_SERVICE_FIFO_PATH = Sys.getenv("CONCERTO_R_SERVICE_FIFO_PATH") ENV_CONCERTO_R_MAX_EXEC_TIME = Sys.getenv("CONCERTO_R_MAX_EXEC_TIME") ENV_CONCERTO_R_MAX_IDLE_TIME = Sys.getenv("CONCERTO_R_MAX_IDLE_TIME") ENV_CONCERTO_R_KEEP_ALIVE_TOLERANCE_TIME = Sys.getenv("CONCERTO_R_KEEP_ALIVE_TOLERANCE_TIME") ENV_CONCERTO_R_PLATFORM_URL = Sys.getenv("CONCERTO_R_PLATFORM_URL") ENV_CONCERTO_R_PUBLIC_DIR = Sys.getenv("CONCERTO_R_PUBLIC_DIR") ENV_CONCERTO_R_REDIS_CONNECTION = Sys.getenv("CONCERTO_R_REDIS_CONNECTION") ENV_CONCERTO_R_SESSION_STORAGE = Sys.getenv("CONCERTO_R_SESSION_STORAGE") ENV_CONCERTO_R_SESSION_FILES_EXPIRATION = Sys.getenv("CONCERTO_R_SESSION_FILES_EXPIRATION") ENV_CONCERTO_R_SESSION_LOG_LEVEL = as.numeric(Sys.getenv("CONCERTO_R_SESSION_LOG_LEVEL")) ENV_CONCERTO_R_FORCED_GC_INTERVAL = as.numeric(Sys.getenv("CONCERTO_R_FORCED_GC_INTERVAL")) concerto5:::concerto.init( dbConnectionParams = fromJSON(ENV_CONCERTO_R_DB_CONNECTION), publicDir = ENV_CONCERTO_R_PUBLIC_DIR, platformUrl = ENV_CONCERTO_R_PLATFORM_URL, appUrl = ENV_CONCERTO_R_APP_URL, maxExecTime = as.numeric(ENV_CONCERTO_R_MAX_EXEC_TIME), maxIdleTime = as.numeric(ENV_CONCERTO_R_MAX_IDLE_TIME), keepAliveToleranceTime = as.numeric(ENV_CONCERTO_R_KEEP_ALIVE_TOLERANCE_TIME), sessionStorage = ENV_CONCERTO_R_SESSION_STORAGE, redisConnectionParams = fromJSON(ENV_CONCERTO_R_REDIS_CONNECTION), sessionFilesExpiration = ENV_CONCERTO_R_SESSION_FILES_EXPIRATION, serviceFifoDir = ENV_CONCERTO_R_SERVICE_FIFO_PATH ) switch(concerto$dbConnectionParams$driver, pdo_mysql = require("RMySQL"), pdo_sqlsrv = require("RSQLServer") ) switch(ENV_CONCERTO_R_SESSION_STORAGE, redis = require("redux") ) concerto.log("starting forker listener") queue = c() unlink(paste0(ENV_CONCERTO_R_SESSION_FIFO_PATH, "*.fifo")) lastForcedGcTime = as.numeric(Sys.time()) repeat { if(ENV_CONCERTO_R_FORCED_GC_INTERVAL >= 0) { currentTime = as.numeric(Sys.time()) if(currentTime - lastForcedGcTime > ENV_CONCERTO_R_FORCED_GC_INTERVAL) { gcOutput = gc(F) lastForcedGcTime = currentTime } } fpath = "" if(length(queue) == 0) { queue = list.files(ENV_CONCERTO_R_SESSION_FIFO_PATH, full.names=TRUE) } if(length(queue) > 0) { fpath = queue[1] queue = queue[-1] } else { Sys.sleep(0.25) next } con = fifo(fpath, blocking=TRUE, open="rt") response = readLines(con, warn = FALSE, n = 1, ok = TRUE) close(con) rm(con) unlink(fpath) rm(fpath) if(length(response) == 0) { concerto.log(response, "invalid request") next } response = tryCatch({ fromJSON(response) }, error = function(e) { message(e) message(response) q("no", 1) }) if(is.null(response$rLogPath)) response$rLogPath = "/dev/null" mcparallel({ if(ENV_CONCERTO_R_SESSION_LOG_LEVEL > 0) { sinkFile <- file(response$rLogPath, open = "at") sink(file = sinkFile, append = TRUE, type = "output", split = FALSE) sink(file = sinkFile, append = TRUE, type = "message", split = FALSE) rm(sinkFile) } else { nullFile <- file("/dev/null", open = "at") #UNIX only sink(file = nullFile, append = TRUE, type = "output", split = FALSE) sink(file = nullFile, append = TRUE, type = "message", split = FALSE) } rm(queue) concerto$lastSubmitTime <- as.numeric(Sys.time()) concerto$lastKeepAliveTime <- as.numeric(Sys.time()) concerto5:::concerto.run( workingDir = response$workingDir, client = response$client, sessionHash = response$sessionId, maxIdleTime = response$maxIdleTime, maxExecTime = response$maxExecTime, response = response$response, initialPort = response$initialPort, runnerType = response$runnerType ) }, detached = TRUE) } concerto.log("listener closing")
/src/Concerto/TestBundle/Resources/R/forker.R
permissive
campsych/concerto-platform
R
false
false
4,315
r
require(parallel) ENV_CONCERTO_R_APP_URL = Sys.getenv("CONCERTO_R_APP_URL") ENV_CONCERTO_R_DB_CONNECTION = Sys.getenv("CONCERTO_R_DB_CONNECTION") ENV_CONCERTO_R_SESSION_FIFO_PATH = Sys.getenv("CONCERTO_R_SESSION_FIFO_PATH") ENV_CONCERTO_R_SERVICE_FIFO_PATH = Sys.getenv("CONCERTO_R_SERVICE_FIFO_PATH") ENV_CONCERTO_R_MAX_EXEC_TIME = Sys.getenv("CONCERTO_R_MAX_EXEC_TIME") ENV_CONCERTO_R_MAX_IDLE_TIME = Sys.getenv("CONCERTO_R_MAX_IDLE_TIME") ENV_CONCERTO_R_KEEP_ALIVE_TOLERANCE_TIME = Sys.getenv("CONCERTO_R_KEEP_ALIVE_TOLERANCE_TIME") ENV_CONCERTO_R_PLATFORM_URL = Sys.getenv("CONCERTO_R_PLATFORM_URL") ENV_CONCERTO_R_PUBLIC_DIR = Sys.getenv("CONCERTO_R_PUBLIC_DIR") ENV_CONCERTO_R_REDIS_CONNECTION = Sys.getenv("CONCERTO_R_REDIS_CONNECTION") ENV_CONCERTO_R_SESSION_STORAGE = Sys.getenv("CONCERTO_R_SESSION_STORAGE") ENV_CONCERTO_R_SESSION_FILES_EXPIRATION = Sys.getenv("CONCERTO_R_SESSION_FILES_EXPIRATION") ENV_CONCERTO_R_SESSION_LOG_LEVEL = as.numeric(Sys.getenv("CONCERTO_R_SESSION_LOG_LEVEL")) ENV_CONCERTO_R_FORCED_GC_INTERVAL = as.numeric(Sys.getenv("CONCERTO_R_FORCED_GC_INTERVAL")) concerto5:::concerto.init( dbConnectionParams = fromJSON(ENV_CONCERTO_R_DB_CONNECTION), publicDir = ENV_CONCERTO_R_PUBLIC_DIR, platformUrl = ENV_CONCERTO_R_PLATFORM_URL, appUrl = ENV_CONCERTO_R_APP_URL, maxExecTime = as.numeric(ENV_CONCERTO_R_MAX_EXEC_TIME), maxIdleTime = as.numeric(ENV_CONCERTO_R_MAX_IDLE_TIME), keepAliveToleranceTime = as.numeric(ENV_CONCERTO_R_KEEP_ALIVE_TOLERANCE_TIME), sessionStorage = ENV_CONCERTO_R_SESSION_STORAGE, redisConnectionParams = fromJSON(ENV_CONCERTO_R_REDIS_CONNECTION), sessionFilesExpiration = ENV_CONCERTO_R_SESSION_FILES_EXPIRATION, serviceFifoDir = ENV_CONCERTO_R_SERVICE_FIFO_PATH ) switch(concerto$dbConnectionParams$driver, pdo_mysql = require("RMySQL"), pdo_sqlsrv = require("RSQLServer") ) switch(ENV_CONCERTO_R_SESSION_STORAGE, redis = require("redux") ) concerto.log("starting forker listener") queue = c() unlink(paste0(ENV_CONCERTO_R_SESSION_FIFO_PATH, "*.fifo")) lastForcedGcTime = as.numeric(Sys.time()) repeat { if(ENV_CONCERTO_R_FORCED_GC_INTERVAL >= 0) { currentTime = as.numeric(Sys.time()) if(currentTime - lastForcedGcTime > ENV_CONCERTO_R_FORCED_GC_INTERVAL) { gcOutput = gc(F) lastForcedGcTime = currentTime } } fpath = "" if(length(queue) == 0) { queue = list.files(ENV_CONCERTO_R_SESSION_FIFO_PATH, full.names=TRUE) } if(length(queue) > 0) { fpath = queue[1] queue = queue[-1] } else { Sys.sleep(0.25) next } con = fifo(fpath, blocking=TRUE, open="rt") response = readLines(con, warn = FALSE, n = 1, ok = TRUE) close(con) rm(con) unlink(fpath) rm(fpath) if(length(response) == 0) { concerto.log(response, "invalid request") next } response = tryCatch({ fromJSON(response) }, error = function(e) { message(e) message(response) q("no", 1) }) if(is.null(response$rLogPath)) response$rLogPath = "/dev/null" mcparallel({ if(ENV_CONCERTO_R_SESSION_LOG_LEVEL > 0) { sinkFile <- file(response$rLogPath, open = "at") sink(file = sinkFile, append = TRUE, type = "output", split = FALSE) sink(file = sinkFile, append = TRUE, type = "message", split = FALSE) rm(sinkFile) } else { nullFile <- file("/dev/null", open = "at") #UNIX only sink(file = nullFile, append = TRUE, type = "output", split = FALSE) sink(file = nullFile, append = TRUE, type = "message", split = FALSE) } rm(queue) concerto$lastSubmitTime <- as.numeric(Sys.time()) concerto$lastKeepAliveTime <- as.numeric(Sys.time()) concerto5:::concerto.run( workingDir = response$workingDir, client = response$client, sessionHash = response$sessionId, maxIdleTime = response$maxIdleTime, maxExecTime = response$maxExecTime, response = response$response, initialPort = response$initialPort, runnerType = response$runnerType ) }, detached = TRUE) } concerto.log("listener closing")
suppressPackageStartupMessages(library(argparse)) suppressPackageStartupMessages(library(epiRepeatR)) suppressPackageStartupMessages(library(ggplot2)) ap <- ArgumentParser() ap$add_argument("-i", "--in", action="store", dest="inFileTable", help="Input file table (tab-separated).") ap$add_argument("-o", "--out", action="store", dest="output", help="Output directory") ap$add_argument("-c", "--config", action="store", help="Config file (json)") cmdArgs <- ap$parse_args() #problem: too long of a command line logger.cmd.args(cmdArgs) loadConfig(cmdArgs$config) if (is.element("debug", names(epiRepeatR:::.config)) && epiRepeatR:::.config$debug){ saveRDS(cmdArgs, file.path(cmdArgs$output, "cmdargs.rds")) } outDir <- cmdArgs$output inFileTable <- read.table(cmdArgs$inFileTable, sep="\t", comment.char="", header=TRUE, stringsAsFactors=FALSE) logger.start("Getting read counts and stats") alnStats <- do.call("rbind", lapply(1:nrow(inFileTable), function(i){ logger.status(c("Processing (",i,"):", paste(inFileTable[i, c("sampleName","mark","dataType")], collapse=" - "))) return(epiRepeatR:::getAlnStats(inFileTable[i, "fileName.repeatAlignment"], inFileTable[i, "fileName.bamExtract"])) })) alnStats <- data.frame(inFileTable[,c("sampleName", "mark", "dataType")], alnFile=inFileTable[,"fileName.repeatAlignment"], alnStats, stringsAsFactors=FALSE) logger.completed() logger.start("Writing output") fn <- file.path(outDir, "alignmentStats.tsv") write.table(alnStats, file=fn, quote=FALSE, row.names=FALSE, sep="\t", col.names=TRUE) logger.completed() logger.start("Generating Plots") theme_set(theme_bw()) fn <- file.path(outDir, "alignmentStats.pdf") pp <- plotRepeatAlignmentStats(alnStats) ggsave(fn, pp, width=10,height=10) logger.completed()
/inst/extdata/exec/repeatAlignmentStats.R
no_license
MPIIComputationalEpigenetics/epiRepeatR
R
false
false
1,771
r
suppressPackageStartupMessages(library(argparse)) suppressPackageStartupMessages(library(epiRepeatR)) suppressPackageStartupMessages(library(ggplot2)) ap <- ArgumentParser() ap$add_argument("-i", "--in", action="store", dest="inFileTable", help="Input file table (tab-separated).") ap$add_argument("-o", "--out", action="store", dest="output", help="Output directory") ap$add_argument("-c", "--config", action="store", help="Config file (json)") cmdArgs <- ap$parse_args() #problem: too long of a command line logger.cmd.args(cmdArgs) loadConfig(cmdArgs$config) if (is.element("debug", names(epiRepeatR:::.config)) && epiRepeatR:::.config$debug){ saveRDS(cmdArgs, file.path(cmdArgs$output, "cmdargs.rds")) } outDir <- cmdArgs$output inFileTable <- read.table(cmdArgs$inFileTable, sep="\t", comment.char="", header=TRUE, stringsAsFactors=FALSE) logger.start("Getting read counts and stats") alnStats <- do.call("rbind", lapply(1:nrow(inFileTable), function(i){ logger.status(c("Processing (",i,"):", paste(inFileTable[i, c("sampleName","mark","dataType")], collapse=" - "))) return(epiRepeatR:::getAlnStats(inFileTable[i, "fileName.repeatAlignment"], inFileTable[i, "fileName.bamExtract"])) })) alnStats <- data.frame(inFileTable[,c("sampleName", "mark", "dataType")], alnFile=inFileTable[,"fileName.repeatAlignment"], alnStats, stringsAsFactors=FALSE) logger.completed() logger.start("Writing output") fn <- file.path(outDir, "alignmentStats.tsv") write.table(alnStats, file=fn, quote=FALSE, row.names=FALSE, sep="\t", col.names=TRUE) logger.completed() logger.start("Generating Plots") theme_set(theme_bw()) fn <- file.path(outDir, "alignmentStats.pdf") pp <- plotRepeatAlignmentStats(alnStats) ggsave(fn, pp, width=10,height=10) logger.completed()
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/calc_effort.R \name{calc_fishing_days_trip} \alias{calc_fishing_days_trip} \title{Calculate fishing days for a fishing trip.} \usage{ calc_fishing_days_trip(trip) } \arguments{ \item{trip}{Data.frame of the trip data} } \value{ A data.frame with the fishing days by gear, fishing area, economic zone and rectangle. } \description{ Calculate fishing days for a single fishing trip using data in the format described in the package vignette \emph{checking_data}. } \details{ The input is a single fishing trip. The format of the data should be checked by \code{\link{check_format}} before calling this function (see the package vignette \emph{checking_data} for more details). Fishing days is reported at the gear (type and mesh size), fishing area, economic zone and rectangle level. Passive and active gears are treated separately. For active gears, each fishing date has 1 fishing day that is spread equally over the active gears. For passive gears, each use of a passive gear is one fishing day, i.e. on fishing date can have several passive fishing days simultaneously. See the vignette \emph{calculating_fishing_effort} for more details. This function is called by \code{\link{calc_fishing_effort}}. } \examples{ trip1 <- data.frame( eunr_id = "my_boat", loa = 2000, gt = 70, kw = 400, trip_id = "trip1", # 4 day trip depdate = "20140718", deptime = "0615", retdate = "20140721", rettime = "1615", # Only fish on 2 of those fishdate = c("20140719", "20140719", "20140719", "20140719", "20140720", "20140720", "20140720"), gear = c("OTB","OTB","OTB","GN","OTB","GN","FPO"), gear_mesh_size = c(80,80,80,50,80,50,0), fishing_area = "27.4.B", economic_zone = "EU", rectangle = c("39F0","40F0","41F0","41F0","41F0","41F0","41F0"), stringsAsFactors = FALSE ) fd <- calc_fishing_days_trip(trip1) } \seealso{ See \code{\link{calc_fishing_effort}}. See the package vignette \emph{checking_data} for data preparation and the vignette \emph{calculating_fishing_effort} for the calculation details. }
/man/calc_fishing_days_trip.Rd
no_license
cran/fecR
R
false
true
2,113
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/calc_effort.R \name{calc_fishing_days_trip} \alias{calc_fishing_days_trip} \title{Calculate fishing days for a fishing trip.} \usage{ calc_fishing_days_trip(trip) } \arguments{ \item{trip}{Data.frame of the trip data} } \value{ A data.frame with the fishing days by gear, fishing area, economic zone and rectangle. } \description{ Calculate fishing days for a single fishing trip using data in the format described in the package vignette \emph{checking_data}. } \details{ The input is a single fishing trip. The format of the data should be checked by \code{\link{check_format}} before calling this function (see the package vignette \emph{checking_data} for more details). Fishing days is reported at the gear (type and mesh size), fishing area, economic zone and rectangle level. Passive and active gears are treated separately. For active gears, each fishing date has 1 fishing day that is spread equally over the active gears. For passive gears, each use of a passive gear is one fishing day, i.e. on fishing date can have several passive fishing days simultaneously. See the vignette \emph{calculating_fishing_effort} for more details. This function is called by \code{\link{calc_fishing_effort}}. } \examples{ trip1 <- data.frame( eunr_id = "my_boat", loa = 2000, gt = 70, kw = 400, trip_id = "trip1", # 4 day trip depdate = "20140718", deptime = "0615", retdate = "20140721", rettime = "1615", # Only fish on 2 of those fishdate = c("20140719", "20140719", "20140719", "20140719", "20140720", "20140720", "20140720"), gear = c("OTB","OTB","OTB","GN","OTB","GN","FPO"), gear_mesh_size = c(80,80,80,50,80,50,0), fishing_area = "27.4.B", economic_zone = "EU", rectangle = c("39F0","40F0","41F0","41F0","41F0","41F0","41F0"), stringsAsFactors = FALSE ) fd <- calc_fishing_days_trip(trip1) } \seealso{ See \code{\link{calc_fishing_effort}}. See the package vignette \emph{checking_data} for data preparation and the vignette \emph{calculating_fishing_effort} for the calculation details. }
#Examine lambda values with a Leslie matrix #set so R doesn't use scientific notation options("scipen"=100, "digits"=4) library(tidyverse) library(popbio) #devtools::install_github("BruceKendall/mpmtools") #In case you don't have mpmtools library(mpmtools) #-------------Age and survival settings-------------------------- YOY.survival <- 0.7 #young of year survival juvenile.survival <- 0.8 # juvenile survival adult.survival <- 0.825 # Adult survival repro.age <- 12 # set age of reproductive maturity max.age <- maxAge <- 50 #set the maximum age allowed in the simulation juv.ages <- repro.age - 1 #Years of being a juvenile ages <- c(0:maxAge) #Total ages adult.ages <- length(ages) - (juv.ages + 1) (Ma <- -log(adult.survival)) #Mortality of adults (Mj <- -log(juvenile.survival)) #Mortality of juveniles (Sa <- exp(-Ma)) #Survival of adults (Sj <- exp(-Mj)) #Survival of juveniles Mx <- 0.1 #Extra mortality (Sa_new <- exp(-Ma - Mx)) (Sj_new <- exp(-Mj - Mx)) #-----------------------Fecundity and reproduction settings--------------------------- #mating.periodicity <- 2 #number of years between mating; assigned to an individual and sticks with them through their life. So they're either a one or two year breeder. #non.conformists <- 0.05 #proportion of off-year breeders to randomly include off their breeding cycle - want to change this to non.conformists #num.mates <- c(1:3) #vector of potential number of mates per mating f <- (1-Adult.survival)/(YOY.survival * juvenile.survival^11) # adult fecundity at equilibrium if no age truncation # ff <- f/init.prop.female * mating.periodicity/mean(num.mates) # female fecundity per breeding cycle # ff # ff <- ff*(1-non.conformists) #Change female fecundity per breeding cycle to account for non-conformists # ff #Fecundity for Leslie matrix leslie.fecundity <- f #-----------------Leslie Matrix parameters-------------------- #Prep Leslie matrix input survival.vec <- c(YOY.survival, rep(Sj_new, times = juv.ages), rep(Sa_new, times = adult.ages - 1), 0) fecund.vec <- c(rep(0, times = repro.age), rep(leslie.fecundity, times = maxAge - juv.ages)) #Create dataframe for input to Leslie matrix Leslie_input <- data.frame( x = c(0:maxAge), #age sx = survival.vec, #survival mx = fecund.vec ) #Make Leslie matrix A1_pre <- make_Leslie_matrix(Leslie_input) #Calculate dominant eigenvalue (i.e. lambda) from transition matrix (lambda1 <- as_tibble(lambda1(A1_pre)) %>% rename(lambda = value)) #If wanting to convert to a post-breeding census Leslie matrix A1_post <- pre_to_post(Amat = A1_pre, S0 = YOY.survival) #View(A1_post)
/01_Data.generating.model/functions/Leslie_matrix_MAIN.R
no_license
JDSwenson/LemonSharkCKMR
R
false
false
2,602
r
#Examine lambda values with a Leslie matrix #set so R doesn't use scientific notation options("scipen"=100, "digits"=4) library(tidyverse) library(popbio) #devtools::install_github("BruceKendall/mpmtools") #In case you don't have mpmtools library(mpmtools) #-------------Age and survival settings-------------------------- YOY.survival <- 0.7 #young of year survival juvenile.survival <- 0.8 # juvenile survival adult.survival <- 0.825 # Adult survival repro.age <- 12 # set age of reproductive maturity max.age <- maxAge <- 50 #set the maximum age allowed in the simulation juv.ages <- repro.age - 1 #Years of being a juvenile ages <- c(0:maxAge) #Total ages adult.ages <- length(ages) - (juv.ages + 1) (Ma <- -log(adult.survival)) #Mortality of adults (Mj <- -log(juvenile.survival)) #Mortality of juveniles (Sa <- exp(-Ma)) #Survival of adults (Sj <- exp(-Mj)) #Survival of juveniles Mx <- 0.1 #Extra mortality (Sa_new <- exp(-Ma - Mx)) (Sj_new <- exp(-Mj - Mx)) #-----------------------Fecundity and reproduction settings--------------------------- #mating.periodicity <- 2 #number of years between mating; assigned to an individual and sticks with them through their life. So they're either a one or two year breeder. #non.conformists <- 0.05 #proportion of off-year breeders to randomly include off their breeding cycle - want to change this to non.conformists #num.mates <- c(1:3) #vector of potential number of mates per mating f <- (1-Adult.survival)/(YOY.survival * juvenile.survival^11) # adult fecundity at equilibrium if no age truncation # ff <- f/init.prop.female * mating.periodicity/mean(num.mates) # female fecundity per breeding cycle # ff # ff <- ff*(1-non.conformists) #Change female fecundity per breeding cycle to account for non-conformists # ff #Fecundity for Leslie matrix leslie.fecundity <- f #-----------------Leslie Matrix parameters-------------------- #Prep Leslie matrix input survival.vec <- c(YOY.survival, rep(Sj_new, times = juv.ages), rep(Sa_new, times = adult.ages - 1), 0) fecund.vec <- c(rep(0, times = repro.age), rep(leslie.fecundity, times = maxAge - juv.ages)) #Create dataframe for input to Leslie matrix Leslie_input <- data.frame( x = c(0:maxAge), #age sx = survival.vec, #survival mx = fecund.vec ) #Make Leslie matrix A1_pre <- make_Leslie_matrix(Leslie_input) #Calculate dominant eigenvalue (i.e. lambda) from transition matrix (lambda1 <- as_tibble(lambda1(A1_pre)) %>% rename(lambda = value)) #If wanting to convert to a post-breeding census Leslie matrix A1_post <- pre_to_post(Amat = A1_pre, S0 = YOY.survival) #View(A1_post)
# Using library(marelac) # ... as well as constants from # http://www.ices.dk/marine-data/tools/Pages/Unit-conversions.aspx x <- gas_O2sat(t = 15) # Saturation of O2 at 8 degrees C (given as mg/L) x # Saturation concentration in mg/L (9.43) x*1000/molweight("O2") # Saturation concentration in mmol/m3 (294.8) x*1000/molweight("O2")*0.022391 # Saturation concentration in ml/m3 (6.60) # or x*0.7 # in ml/m3 (6.60) - from the ICES page # Convert mg/L to saturation temp <- 8 conc <- 6 conc/gas_O2sat(t = temp)*100 # saturation # Convert ml/L to saturation temp <- 8 conc <- 4 (conc/0.7)/gas_O2sat(t = temp)*100 # saturation
/81 Oxygen concentration conversion.R
no_license
NIVANorge/Okokyst_oceanography
R
false
false
712
r
# Using library(marelac) # ... as well as constants from # http://www.ices.dk/marine-data/tools/Pages/Unit-conversions.aspx x <- gas_O2sat(t = 15) # Saturation of O2 at 8 degrees C (given as mg/L) x # Saturation concentration in mg/L (9.43) x*1000/molweight("O2") # Saturation concentration in mmol/m3 (294.8) x*1000/molweight("O2")*0.022391 # Saturation concentration in ml/m3 (6.60) # or x*0.7 # in ml/m3 (6.60) - from the ICES page # Convert mg/L to saturation temp <- 8 conc <- 6 conc/gas_O2sat(t = temp)*100 # saturation # Convert ml/L to saturation temp <- 8 conc <- 4 (conc/0.7)/gas_O2sat(t = temp)*100 # saturation
# Nested Repeated Measures Example # Guinea Pig Weights and Vitamin E # Packages packages <- c("tidyverse", "hasseDiagram", "car", "ggplot2", "afex", "emmeans") lapply(packages, library, character.only = TRUE) # Load Helper Functions source("https://raw.github.com/neilhatfield/STAT461/master/ANOVATools.R") # Set Global Options options("contrasts" = c("contr.sum","contr.poly")) # Hasse Diagram gpLab <- c("1 Grand Mean 1", "3 Vit. E 2", "6 Time Point 5", "15 (pigs) 12", "18 Vit. E X Time 10", "90 (pigs X Time) 60") gpMat <- matrix(data = F, nrow = 6, ncol = 6) gpMat[1, c(2:6)] = gpMat[2, c(4,5,6)] = T gpMat[3, c(5, 6)] = gpMat[c(4:5), 6] = T hasseDiagram::hasse(gpMat, gpLab) # Data File guineaPigs <- read.table( file = "https://raw.github.com/neilhatfield/STAT461/master/dataFiles/guineaPigs.dat", header = TRUE, sep = "," ) guineaPigs$Subject <- as.factor(guineaPigs$Subject) # Data are in Wide Format, we need will need Long Format for some calls pigsL <- tidyr::pivot_longer(guineaPigs, cols = dplyr::starts_with("Week"), names_to = "Time", names_ptypes = list("Time" = factor()), values_to = "weight") # Explore the data-On Your Own ## Data Visualizations ## Descriptive Statistics # Fit the Nested Repeated Measures Model model1 <- aov(weight ~ trt*Time + Error(Subject/Time), data = pigsL) # Check assumptions-On Your Own # Omnibus Test ## Using the aov call, Raw Output summary(model1) ### Professional looking is On Your Own ## Multivariate approach--Uses the original data frame ### Isolate the response values weights <- as.matrix(guineaPigs[ , 3:8]) ### Fit multivariate model--Notice the formula model2 <- lm(weights ~ 1 + trt, data = guineaPigs ) ### Define the Repeated Factor weeks <- factor(c("Week1","Week3","Week4","Week5","Week6","Week7")) ### Create the table object tabPigs <- car::Anova(model2, idata = data.frame(weeks), idesign = ~weeks, type = "III") ### Raw Output outRM <- summary(tabPigs, multivariate=FALSE) ### Professional Tables are On Your Own ## Omnibus Method 3--Using the Afex package ## This blends the two methods together model3 <- afex::aov_car(weight ~ trt*Time + Error(Subject %in% trt), data = pigsL) ## Raw Output summary(model3) # Post Hoc Analysis ## No Interaction phMeansTime <- emmeans::emmeans(model3, ~Time, adjust = "tukey", level = 0.9) phMeansTrt <- emmeans::emmeans(model3, ~trt, adjust = "tukey", level = 0.9) ### Raw Output phMeansTime phMeansTrt ## Interaction phMeansInt <- emmeans::emmeans(model3, ~Time|trt) phMeansInt ##Pairwise pairs <- emmeans::emmeans(model3, pairwise ~ Time|trt, adjust = "tukey", level = 0.9) pairs$emmeans pairs$contrasts pairs2 <- emmeans::emmeans(model3, pairwise ~ trt|Time, adjust = "tukey", level = 0.9) ## Growth Curves ggplot2::ggplot(data = pigsL, mapping = ggplot2::aes( y = weight, x = Time, group = Subject, color = trt )) + ggplot2::geom_point(size = 2) + ggplot2::geom_line(size = 1) + ggplot2::theme_bw() + xlab("Time Point") + ylab("Weight (grams)") + labs(color = "Vitmain E Dosage") ### Hard to Read, try spliting by treatment instead of color ggplot2::ggplot(data = pigsL, mapping = ggplot2::aes( y = weight, x = Time, group = Subject)) + ggplot2::geom_point(size = 2) + ggplot2::geom_line(size = 1) + ggplot2::facet_grid(trt ~ .) + ggplot2::theme_bw() + xlab("Time Point") + ylab("Weight (grams)")
/oldExampleRFiles/repeatedMeasures-Nested.R
no_license
neilhatfield/STAT461
R
false
false
3,888
r
# Nested Repeated Measures Example # Guinea Pig Weights and Vitamin E # Packages packages <- c("tidyverse", "hasseDiagram", "car", "ggplot2", "afex", "emmeans") lapply(packages, library, character.only = TRUE) # Load Helper Functions source("https://raw.github.com/neilhatfield/STAT461/master/ANOVATools.R") # Set Global Options options("contrasts" = c("contr.sum","contr.poly")) # Hasse Diagram gpLab <- c("1 Grand Mean 1", "3 Vit. E 2", "6 Time Point 5", "15 (pigs) 12", "18 Vit. E X Time 10", "90 (pigs X Time) 60") gpMat <- matrix(data = F, nrow = 6, ncol = 6) gpMat[1, c(2:6)] = gpMat[2, c(4,5,6)] = T gpMat[3, c(5, 6)] = gpMat[c(4:5), 6] = T hasseDiagram::hasse(gpMat, gpLab) # Data File guineaPigs <- read.table( file = "https://raw.github.com/neilhatfield/STAT461/master/dataFiles/guineaPigs.dat", header = TRUE, sep = "," ) guineaPigs$Subject <- as.factor(guineaPigs$Subject) # Data are in Wide Format, we need will need Long Format for some calls pigsL <- tidyr::pivot_longer(guineaPigs, cols = dplyr::starts_with("Week"), names_to = "Time", names_ptypes = list("Time" = factor()), values_to = "weight") # Explore the data-On Your Own ## Data Visualizations ## Descriptive Statistics # Fit the Nested Repeated Measures Model model1 <- aov(weight ~ trt*Time + Error(Subject/Time), data = pigsL) # Check assumptions-On Your Own # Omnibus Test ## Using the aov call, Raw Output summary(model1) ### Professional looking is On Your Own ## Multivariate approach--Uses the original data frame ### Isolate the response values weights <- as.matrix(guineaPigs[ , 3:8]) ### Fit multivariate model--Notice the formula model2 <- lm(weights ~ 1 + trt, data = guineaPigs ) ### Define the Repeated Factor weeks <- factor(c("Week1","Week3","Week4","Week5","Week6","Week7")) ### Create the table object tabPigs <- car::Anova(model2, idata = data.frame(weeks), idesign = ~weeks, type = "III") ### Raw Output outRM <- summary(tabPigs, multivariate=FALSE) ### Professional Tables are On Your Own ## Omnibus Method 3--Using the Afex package ## This blends the two methods together model3 <- afex::aov_car(weight ~ trt*Time + Error(Subject %in% trt), data = pigsL) ## Raw Output summary(model3) # Post Hoc Analysis ## No Interaction phMeansTime <- emmeans::emmeans(model3, ~Time, adjust = "tukey", level = 0.9) phMeansTrt <- emmeans::emmeans(model3, ~trt, adjust = "tukey", level = 0.9) ### Raw Output phMeansTime phMeansTrt ## Interaction phMeansInt <- emmeans::emmeans(model3, ~Time|trt) phMeansInt ##Pairwise pairs <- emmeans::emmeans(model3, pairwise ~ Time|trt, adjust = "tukey", level = 0.9) pairs$emmeans pairs$contrasts pairs2 <- emmeans::emmeans(model3, pairwise ~ trt|Time, adjust = "tukey", level = 0.9) ## Growth Curves ggplot2::ggplot(data = pigsL, mapping = ggplot2::aes( y = weight, x = Time, group = Subject, color = trt )) + ggplot2::geom_point(size = 2) + ggplot2::geom_line(size = 1) + ggplot2::theme_bw() + xlab("Time Point") + ylab("Weight (grams)") + labs(color = "Vitmain E Dosage") ### Hard to Read, try spliting by treatment instead of color ggplot2::ggplot(data = pigsL, mapping = ggplot2::aes( y = weight, x = Time, group = Subject)) + ggplot2::geom_point(size = 2) + ggplot2::geom_line(size = 1) + ggplot2::facet_grid(trt ~ .) + ggplot2::theme_bw() + xlab("Time Point") + ylab("Weight (grams)")
\name{family.glmmNPML} \alias{family.glmmNPML} \alias{family.glmmGQ} \alias{model.matrix.glmmNPML} \alias{model.matrix.glmmGQ} %- Also NEED an '\alias' for EACH other topic documented here. \title{Methods for objects of class glmmNPML or glmmGQ} \description{ Methods for the generic \code{family} and \code{model.matrix} functions } \usage{ \method{family}{glmmNPML}(object, ...) \method{family}{glmmGQ}(object, ...) \method{model.matrix}{glmmNPML}(object, ...) \method{model.matrix}{glmmGQ}(object, ...) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{object}{ object of class \code{glmmNPML} or \code{glmmGQ}. } \item{\dots}{ further arguments, ensuring compability with generic functions. } } %\details{ % ~~ If necessary, more details than the description above ~~ %} %\value{ % ~Describe the value returned % If it is a LIST, use % \item{comp1 }{Description of 'comp1'} % \item{comp2 }{Description of 'comp2'} % ... %} %\references{ ~put references to the literature/web site here ~ } \author{Jochen Einbeck and John Hinde (2007)} \note{ The generic \R functions \code{update()}, \code{coefficients()}, \code{coef()}, \code{fitted()}, \code{fitted.values()}, and \code{df.residual()} can also be applied straightforwardly on all objects of class \code{glmmNPML} or \code{glmmGQ}. They are not listed above as they use the generic default functions and are not separately implemented. Explicit implementations exist for \code{predict}, \code{summary}, \code{print}, and \code{plot}, and these functions are explained in the corresponding help files. } \seealso{ \code{\link{summary.glmmNPML}}, \code{\link{predict.glmmNPML}}, \code{\link{family}}, \code{\link{model.matrix}}, \code{\link{update}}, \code{\link{coefficients}}, \code{\link{alldist}}. } %\examples{ %##---- Should be DIRECTLY executable !! ---- %##-- ==> Define data, use random, %##-- or do help(data=index) for the standard data sets. % %## The function is currently defined as %function(object, ...) { % object$family % } %} \keyword{ models }% at least one, from doc/KEYWORDS \keyword{ regression }% __ONLY ONE__ keyword per line
/man/family.glmmNPML.Rd
no_license
cran/npmlreg
R
false
false
2,227
rd
\name{family.glmmNPML} \alias{family.glmmNPML} \alias{family.glmmGQ} \alias{model.matrix.glmmNPML} \alias{model.matrix.glmmGQ} %- Also NEED an '\alias' for EACH other topic documented here. \title{Methods for objects of class glmmNPML or glmmGQ} \description{ Methods for the generic \code{family} and \code{model.matrix} functions } \usage{ \method{family}{glmmNPML}(object, ...) \method{family}{glmmGQ}(object, ...) \method{model.matrix}{glmmNPML}(object, ...) \method{model.matrix}{glmmGQ}(object, ...) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{object}{ object of class \code{glmmNPML} or \code{glmmGQ}. } \item{\dots}{ further arguments, ensuring compability with generic functions. } } %\details{ % ~~ If necessary, more details than the description above ~~ %} %\value{ % ~Describe the value returned % If it is a LIST, use % \item{comp1 }{Description of 'comp1'} % \item{comp2 }{Description of 'comp2'} % ... %} %\references{ ~put references to the literature/web site here ~ } \author{Jochen Einbeck and John Hinde (2007)} \note{ The generic \R functions \code{update()}, \code{coefficients()}, \code{coef()}, \code{fitted()}, \code{fitted.values()}, and \code{df.residual()} can also be applied straightforwardly on all objects of class \code{glmmNPML} or \code{glmmGQ}. They are not listed above as they use the generic default functions and are not separately implemented. Explicit implementations exist for \code{predict}, \code{summary}, \code{print}, and \code{plot}, and these functions are explained in the corresponding help files. } \seealso{ \code{\link{summary.glmmNPML}}, \code{\link{predict.glmmNPML}}, \code{\link{family}}, \code{\link{model.matrix}}, \code{\link{update}}, \code{\link{coefficients}}, \code{\link{alldist}}. } %\examples{ %##---- Should be DIRECTLY executable !! ---- %##-- ==> Define data, use random, %##-- or do help(data=index) for the standard data sets. % %## The function is currently defined as %function(object, ...) { % object$family % } %} \keyword{ models }% at least one, from doc/KEYWORDS \keyword{ regression }% __ONLY ONE__ keyword per line
/TP Regularización Big Data UdeSA - AGUIAR_OVIEDO_PECORARI.r
no_license
nioviedo/Big-Data
R
false
false
9,447
r
source('http://bioconductor.org/biocLite.R') biocLite('DESeq2') biocLite("edgeR") biocLite("limma") biocLite("statmod") biocLite("affycoretools") biocLite("ReportingTools") library("edgeR") library("statmod") library('DESeq2') countdata_1 <- read.delim("Orthoblast_RSEM_merged_matrix", header=TRUE, row.names="library") #read in the file of the count data and call it countdata, row.names tells the name in the top left cell, the gene names countdata_1 = round(countdata_1) keep <- rowSums(cpm(countdata_1)>1) >=4 # keeps rows (genes) where at least 4 columns (libraries) have at least 1 count per million. This means that if a gene is only expressed in say one treatment (which has three replicates), this gene will not be thrown out of the analysis countdata<- countdata_1[keep,] #formatting for organizing the kept rows that summed to at least 1 cpm in the step above countdata=round(countdata) coldata <- read.delim("../column_2transcriptomes.txt", header=TRUE, row.names=1) #select one from below: countdata = countdata[, c(1,2,3,4,5,6,7,8,9,10,11,12)] #select on timepoint one (day 9) countdata = countdata[, c(1,2,5,6,9,11)] #select timepoint one GOL only countdata = countdata[, c(3,4,7,8,10,12)] #select timepoint one PAC only countdata = countdata[, c(13,14,15,16,17,18,19,20,21,22,23,24)] #select on timepoint two (day 29) countdata = countdata[, c(15,16,19,20,22,24)] #select timepoint two PAC only countdata = countdata[, c(13,14,17,18,21,23)] #select timpoint two GOL only countdata = countdata[, c(1,2,5,6,9,11,13,14,17,18,21,23)] #select all GOL (TP one and two, L and H) countdata = countdata[, c(3,4,7,8,10,12,15,16,19,20,22,24)] #select all PAC (Tp1&2, H&L) ddsFullCountTable <- DESeqDataSetFromMatrix(countData=countdata, colData=coldata, design = ~ TREATMENT) ddsFull <- DESeq(ddsFullCountTable) # this is the analysis! head(ddsFull) res=results(ddsFull) res resOrdered=res[order(res$padj),] head(resOrdered) sum(res$padj<0.05, na.rm=TRUE) rld <-rlogTransformation(ddsFull) write.table(assay(rld), "DEG_logtransformed_Orthoblast_matrix", row.names = TRUE, col.names = NA, quote=FALSE, sep = "\t") head(assay(rld)) hist(assay(rld)) #install.packages("gplots") library(gplots) #install.packages("RColorBrewer") library("RColorBrewer") library( "genefilter" ) topVarGenes <- head( order( rowVars( assay(rld) ), decreasing=TRUE ), 40) heatmap.2(assay(rld)[topVarGenes, ], scale="row", trace="none", dendrogram="both", key=TRUE, keysize = 1.5, margins =c(3,11), density.info = "density", col = colorRampPalette( rev(brewer.pal(9, "RdBu")) )(255)) par(lend = 1) # square line ends for the color legend legend("topright", # location of the legend on the heatmap plot legend = heatmap.2( assay(rld)[ topVarGenes, ], # category labels col = colorRampPalette( rev(brewer.pal(9, "RdBu")) )(255), # color key lty= 1, # line style lwd = 10)) # line width write.table(resOrdered, "output_DEG_GOLtp2.txt")
/DEG Analysis/DeSeq2.R
no_license
JoannaGriffiths/Coral-population-responses-to-acidification
R
false
false
3,086
r
source('http://bioconductor.org/biocLite.R') biocLite('DESeq2') biocLite("edgeR") biocLite("limma") biocLite("statmod") biocLite("affycoretools") biocLite("ReportingTools") library("edgeR") library("statmod") library('DESeq2') countdata_1 <- read.delim("Orthoblast_RSEM_merged_matrix", header=TRUE, row.names="library") #read in the file of the count data and call it countdata, row.names tells the name in the top left cell, the gene names countdata_1 = round(countdata_1) keep <- rowSums(cpm(countdata_1)>1) >=4 # keeps rows (genes) where at least 4 columns (libraries) have at least 1 count per million. This means that if a gene is only expressed in say one treatment (which has three replicates), this gene will not be thrown out of the analysis countdata<- countdata_1[keep,] #formatting for organizing the kept rows that summed to at least 1 cpm in the step above countdata=round(countdata) coldata <- read.delim("../column_2transcriptomes.txt", header=TRUE, row.names=1) #select one from below: countdata = countdata[, c(1,2,3,4,5,6,7,8,9,10,11,12)] #select on timepoint one (day 9) countdata = countdata[, c(1,2,5,6,9,11)] #select timepoint one GOL only countdata = countdata[, c(3,4,7,8,10,12)] #select timepoint one PAC only countdata = countdata[, c(13,14,15,16,17,18,19,20,21,22,23,24)] #select on timepoint two (day 29) countdata = countdata[, c(15,16,19,20,22,24)] #select timepoint two PAC only countdata = countdata[, c(13,14,17,18,21,23)] #select timpoint two GOL only countdata = countdata[, c(1,2,5,6,9,11,13,14,17,18,21,23)] #select all GOL (TP one and two, L and H) countdata = countdata[, c(3,4,7,8,10,12,15,16,19,20,22,24)] #select all PAC (Tp1&2, H&L) ddsFullCountTable <- DESeqDataSetFromMatrix(countData=countdata, colData=coldata, design = ~ TREATMENT) ddsFull <- DESeq(ddsFullCountTable) # this is the analysis! head(ddsFull) res=results(ddsFull) res resOrdered=res[order(res$padj),] head(resOrdered) sum(res$padj<0.05, na.rm=TRUE) rld <-rlogTransformation(ddsFull) write.table(assay(rld), "DEG_logtransformed_Orthoblast_matrix", row.names = TRUE, col.names = NA, quote=FALSE, sep = "\t") head(assay(rld)) hist(assay(rld)) #install.packages("gplots") library(gplots) #install.packages("RColorBrewer") library("RColorBrewer") library( "genefilter" ) topVarGenes <- head( order( rowVars( assay(rld) ), decreasing=TRUE ), 40) heatmap.2(assay(rld)[topVarGenes, ], scale="row", trace="none", dendrogram="both", key=TRUE, keysize = 1.5, margins =c(3,11), density.info = "density", col = colorRampPalette( rev(brewer.pal(9, "RdBu")) )(255)) par(lend = 1) # square line ends for the color legend legend("topright", # location of the legend on the heatmap plot legend = heatmap.2( assay(rld)[ topVarGenes, ], # category labels col = colorRampPalette( rev(brewer.pal(9, "RdBu")) )(255), # color key lty= 1, # line style lwd = 10)) # line width write.table(resOrdered, "output_DEG_GOLtp2.txt")
#' Sort rows using dplyr #' #' `step_arrange` creates a *specification* of a recipe step #' that will sort rows using [dplyr::arrange()]. #' #' @inheritParams step_center #' @param ... Comma separated list of unquoted variable names. #' Use `desc()`` to sort a variable in descending order. See #' [dplyr::arrange()] for more details. For the `tidy` #' method, these are not currently used. #' @param role Not used by this step since no new variables are #' created. #' @param inputs Quosure of values given by `...`. #' @return An updated version of `recipe` with the new step #' added to the sequence of existing steps (if any). For the #' `tidy` method, a tibble with columns `terms` which #' contains the sorting variable(s) or expression(s). The #' expressions are text representations and are not parsable. #' @details When an object in the user's global environment is #' referenced in the expression defining the new variable(s), #' it is a good idea to use quasiquotation (e.g. `!!!`) #' to embed the value of the object in the expression (to #' be portable between sessions). See the examples. #' @keywords datagen #' @concept preprocessing #' @export #' @examples #' rec <- recipe( ~ ., data = iris) %>% #' step_arrange(desc(Sepal.Length), 1/Petal.Length) #' #' prepped <- prep(rec, training = iris %>% slice(1:75)) #' tidy(prepped, number = 1) #' #' library(dplyr) #' #' dplyr_train <- #' iris %>% #' as_tibble() %>% #' slice(1:75) %>% #' dplyr::arrange(desc(Sepal.Length), 1/Petal.Length) #' #' rec_train <- bake(prepped, new_data = NULL) #' all.equal(dplyr_train, rec_train) #' #' dplyr_test <- #' iris %>% #' as_tibble() %>% #' slice(76:150) %>% #' dplyr::arrange(desc(Sepal.Length), 1/Petal.Length) #' rec_test <- bake(prepped, iris %>% slice(76:150)) #' all.equal(dplyr_test, rec_test) #' #' # When you have variables/expressions, you can create a #' # list of symbols with `rlang::syms()`` and splice them in #' # the call with `!!!`. See https://tidyeval.tidyverse.org #' #' sort_vars <- c("Sepal.Length", "Petal.Length") #' #' qq_rec <- #' recipe( ~ ., data = iris) %>% #' # Embed the `values` object in the call using !!! #' step_arrange(!!!syms(sort_vars)) %>% #' prep(training = iris) #' #' tidy(qq_rec, number = 1) step_arrange <- function( recipe, ..., role = NA, trained = FALSE, inputs = NULL, skip = FALSE, id = rand_id("arrange") ) { inputs <- enquos(...) add_step( recipe, step_arrange_new( terms = terms, trained = trained, role = role, inputs = inputs, skip = skip, id = id ) ) } step_arrange_new <- function(terms, role, trained, inputs, skip, id) { step( subclass = "arrange", terms = terms, role = role, trained = trained, inputs = inputs, skip = skip, id = id ) } #' @export prep.step_arrange <- function(x, training, info = NULL, ...) { step_arrange_new( terms = x$terms, trained = TRUE, role = x$role, inputs = x$inputs, skip = x$skip, id = x$id ) } #' @export bake.step_arrange <- function(object, new_data, ...) { dplyr::arrange(new_data, !!!object$inputs) } print.step_arrange <- function(x, width = max(20, options()$width - 35), ...) { cat("Row arrangement") if (x$trained) { cat(" [trained]\n") } else { cat("\n") } invisible(x) } #' @rdname step_arrange #' @param x A `step_arrange` object #' @export tidy.step_arrange <- function(x, ...) { cond_expr <- map(x$inputs, quo_get_expr) cond_expr <- map_chr(cond_expr, quo_text, width = options()$width, nlines = 1) tibble( terms = cond_expr, id = rep(x$id, length(x$inputs)) ) }
/R/arrange.R
permissive
labouz/recipes
R
false
false
3,722
r
#' Sort rows using dplyr #' #' `step_arrange` creates a *specification* of a recipe step #' that will sort rows using [dplyr::arrange()]. #' #' @inheritParams step_center #' @param ... Comma separated list of unquoted variable names. #' Use `desc()`` to sort a variable in descending order. See #' [dplyr::arrange()] for more details. For the `tidy` #' method, these are not currently used. #' @param role Not used by this step since no new variables are #' created. #' @param inputs Quosure of values given by `...`. #' @return An updated version of `recipe` with the new step #' added to the sequence of existing steps (if any). For the #' `tidy` method, a tibble with columns `terms` which #' contains the sorting variable(s) or expression(s). The #' expressions are text representations and are not parsable. #' @details When an object in the user's global environment is #' referenced in the expression defining the new variable(s), #' it is a good idea to use quasiquotation (e.g. `!!!`) #' to embed the value of the object in the expression (to #' be portable between sessions). See the examples. #' @keywords datagen #' @concept preprocessing #' @export #' @examples #' rec <- recipe( ~ ., data = iris) %>% #' step_arrange(desc(Sepal.Length), 1/Petal.Length) #' #' prepped <- prep(rec, training = iris %>% slice(1:75)) #' tidy(prepped, number = 1) #' #' library(dplyr) #' #' dplyr_train <- #' iris %>% #' as_tibble() %>% #' slice(1:75) %>% #' dplyr::arrange(desc(Sepal.Length), 1/Petal.Length) #' #' rec_train <- bake(prepped, new_data = NULL) #' all.equal(dplyr_train, rec_train) #' #' dplyr_test <- #' iris %>% #' as_tibble() %>% #' slice(76:150) %>% #' dplyr::arrange(desc(Sepal.Length), 1/Petal.Length) #' rec_test <- bake(prepped, iris %>% slice(76:150)) #' all.equal(dplyr_test, rec_test) #' #' # When you have variables/expressions, you can create a #' # list of symbols with `rlang::syms()`` and splice them in #' # the call with `!!!`. See https://tidyeval.tidyverse.org #' #' sort_vars <- c("Sepal.Length", "Petal.Length") #' #' qq_rec <- #' recipe( ~ ., data = iris) %>% #' # Embed the `values` object in the call using !!! #' step_arrange(!!!syms(sort_vars)) %>% #' prep(training = iris) #' #' tidy(qq_rec, number = 1) step_arrange <- function( recipe, ..., role = NA, trained = FALSE, inputs = NULL, skip = FALSE, id = rand_id("arrange") ) { inputs <- enquos(...) add_step( recipe, step_arrange_new( terms = terms, trained = trained, role = role, inputs = inputs, skip = skip, id = id ) ) } step_arrange_new <- function(terms, role, trained, inputs, skip, id) { step( subclass = "arrange", terms = terms, role = role, trained = trained, inputs = inputs, skip = skip, id = id ) } #' @export prep.step_arrange <- function(x, training, info = NULL, ...) { step_arrange_new( terms = x$terms, trained = TRUE, role = x$role, inputs = x$inputs, skip = x$skip, id = x$id ) } #' @export bake.step_arrange <- function(object, new_data, ...) { dplyr::arrange(new_data, !!!object$inputs) } print.step_arrange <- function(x, width = max(20, options()$width - 35), ...) { cat("Row arrangement") if (x$trained) { cat(" [trained]\n") } else { cat("\n") } invisible(x) } #' @rdname step_arrange #' @param x A `step_arrange` object #' @export tidy.step_arrange <- function(x, ...) { cond_expr <- map(x$inputs, quo_get_expr) cond_expr <- map_chr(cond_expr, quo_text, width = options()$width, nlines = 1) tibble( terms = cond_expr, id = rep(x$id, length(x$inputs)) ) }
#################################################### # # # IOWA MODEL # # # # Author: Ana Valdivia # # Date: April 2017 # # # #################################################### ## Libraries library(tm) library(SnowballC) library(caret) library(data.table) library(xgboost) library(plyr) library(pROC) ## Read DataSet DataSet <- read.csv("./data/DataSet_ALL_SAMs.csv") DataSet$X <- NULL summary(DataSet) DataSet_Weights <- DataSet # Function for weights weightAbs <- function(x){1-abs(x-0.5)} ## Compute OWA weights DataSet_Weights$BingWeights <- NA DataSet_Weights$CoreNLPWeights <- NA DataSet_Weights$MCScaleWeights <- NA DataSet_Weights$MicrosoftWeights <- NA DataSet_Weights$SentiStrWeights <- NA DataSet_Weights$VADERWeights <- NA for(i in 1:nrow(DataSet)){ weigthAbs_aux <- weightAbs(DataSet[i, 3:8]) DataSet_Weights$BingWeights[i] <- weightAbs(DataSet[i, 3]) DataSet_Weights$CoreNLPWeights[i] <- weightAbs(DataSet[i, 4]) DataSet_Weights$MCScaleWeights[i] <- weightAbs(DataSet[i, 5]) DataSet_Weights$MicrosoftWeights[i] <- weightAbs(DataSet[i, 6]) DataSet_Weights$SentiStrWeights[i] <- weightAbs(DataSet[i, 7]) DataSet_Weights$VADERWeights[i] <- weightAbs(DataSet[i, 8]) DataSet$ProNeuSentiment[i] <- sum((weigthAbs_aux/sum(weigthAbs_aux))*DataSet[i, 3:8]) DataSet_Weights$ProNeuSentiment[i] <- sum((weigthAbs_aux/sum(weigthAbs_aux))*DataSet[i, 3:8]) } write.csv(DataSet_Weights, paste0("./results/ProNeutrality_Repeated/DataSet_Weights.csv"), row.names = FALSE) DataSet$ProNeuSet <- ifelse(DataSet$ProNeuSentiment > 0.4 & DataSet$ProNeuSentiment < 0.6, 1, 0) aggregate(DataSet$ProNeuSet ~ DataSet$Corpus, FUN=sum) DataSet$ProNeuSentimentName <- ifelse(DataSet$ProNeuSentiment >= 0.6, "positive", ifelse(DataSet$ProNeuSentiment <= 0.4, "negative", "neutral")) DataSet <- DataSet[, c(1, 2, 15, 17)] DataSet$ProNeuSentimentName <- factor(DataSet$ProNeuSentimentName, levels = c("positive", "neutral", "negative")) ## Transfrom DataSet into Document-Term matrix # function word.tfidf (select most important words regarding to a sentiment) word.tfidf <- function(document.vector, sparsity = .999){ # construct corpus temp.corpus <- Corpus(VectorSource(document.vector)) # construct tf matrix and remove sparse terms temp.tf <- DocumentTermMatrix(temp.corpus, control = list(stopwords = stopwords("SMART"), stemming=TRUE, removePunctuation = TRUE, removeNumbers = TRUE, weighting = function(x) weightTfIdf(x, normalize = FALSE))) temp.tf <- removeSparseTerms(temp.tf, sparsity) temp.tf <- as.matrix(temp.tf) docTerm.df <- as.data.frame(temp.tf) # construct word frequency df freq.df <- colMeans(temp.tf) freq.df <- data.frame(word = names(freq.df), freq = freq.df) rownames(freq.df) <- NULL list(Freq = freq.df, Temp = docTerm.df) } totCorpus <- nlevels(DataSet$Corpus) # total number of different Corpus SentimentTools <- c("Bing", "CoreNLP", "MC", "Microsoft", "SentiStr", "VADER" ) totSentTools <- 1 # total number of tool sentiments Models <- c("xgboost", "svm") for(i in 1:totCorpus){ print("%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%") print(i) print("%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%") DataSetAux <- DataSet[DataSet$Corpus == levels(DataSet$Corpus)[i], ] word.tfidf.pos <- word.tfidf(DataSetAux[DataSetAux[, 4] == "positive", 1])$Freq # selecting positive reviews (First column) word.tfidf.neg <- word.tfidf(DataSetAux[DataSetAux[, 4] == "negative", 1])$Freq # selecting negative reviews (Second column) word.tfidf.pos <- as.data.table(word.tfidf.pos) word.tfidf.pos <- word.tfidf.pos[order(freq, decreasing = TRUE),] word.tfidf.neg <- as.data.table(word.tfidf.neg) word.tfidf.neg <- word.tfidf.neg[order(freq, decreasing = TRUE),] # Delete STOPWORDS wordPos <- word.tfidf.pos[!(word.tfidf.pos$word %in% stopwords("SMART")),] wordNeg <- word.tfidf.neg[!(word.tfidf.neg$word %in% stopwords("SMART")),] # Delete common WORDS wordPos <- wordPos[!(wordPos$word %in% wordNeg$word),] wordNeg <- wordNeg[!(wordNeg$word %in% wordPos$word),] rm(word.tfidf.pos) rm(word.tfidf.neg) # Order and select most 25 popular words wordPos <- wordPos[order(freq, decreasing = TRUE),] wordPos <- wordPos[1:10,] wordNeg <- wordNeg[order(freq, decreasing = TRUE),] wordNeg <- wordNeg[1:10,] # Merge wordPos and wordNeg wordPosNeg <- merge(wordPos, wordNeg, by = c("word", "freq"), all = TRUE) # Build Document-Term Matrix DataSetAux_PosNeg <- DataSetAux[DataSetAux[,4] != "neutral",] DataSetAux_DTM <- word.tfidf(DataSetAux_PosNeg$Text)$Temp DataSetAux_DTM <- as.data.frame(ifelse(DataSetAux_DTM > 0, 1, 0)) DataSetAux_DTM <- DataSetAux_DTM[, colnames(DataSetAux_DTM) %in% wordPosNeg$word] DataSetAux_DTM <- cbind(DataSetAux_DTM, DataSetAux[DataSetAux[,4] != "neutral",4]) setnames(DataSetAux_DTM, "DataSetAux[DataSetAux[, 4] != \"neutral\", 4]", "SentimentOWA") # Write the DataSet # assign(paste0("DataSet_", levels(DataSet$Corpus)[i], "_SentimentOWA"), DataSetAux_DTM) # write.csv(get(paste0("DataSet_", levels(DataSet$Corpus)[i], "_SentimentOWA")), # paste0("./data/Model_2_2/DataSet_", levels(DataSet$Corpus)[i], "_SentimentOWA.csv")) # # # Delete datasets rm(wordNeg, wordPos, wordPosNeg, DataSetAux_PosNeg, DataSetAux) # Building the model setnames(DataSetAux_DTM, "SentimentOWA", "SentimentClass") DataSetAux_DTM$SentimentClass <- factor(DataSetAux_DTM$SentimentClass) DataSetAux_DTM$id <- as.factor(row.names(DataSetAux_DTM)) colnames(DataSetAux_DTM)[1:(ncol(DataSetAux_DTM)-2)] <- paste0("X", 1:(ncol(DataSetAux_DTM)-2)) # Split data set set.seed(1234) ## 80% of the sample size sampleSize <- floor(0.8 * nrow(DataSetAux_DTM)) ## set the seed to make your partition reproductible trainIndex <- sample(seq_len(nrow(DataSetAux_DTM)), size = sampleSize) if(i == 8){ pos <- sample(seq_len(nrow(DataSetAux_DTM[DataSetAux_DTM$SentimentClass == "positive",])), size = 264) neg <- sample(seq_len(nrow(DataSetAux_DTM[DataSetAux_DTM$SentimentClass == "negative",])), size = 9) trainIndex <- c(pos, neg) rm(pos) rm(neg) } DataSet_TRAIN <- DataSetAux_DTM[trainIndex, ] DataSet_TRAIN_ID <- DataSet_TRAIN$id DataSet_TRAIN$id <- NULL DataSet_TEST <- DataSetAux_DTM[-trainIndex, ] DataSet_TEST_ID <- DataSet_TEST$id DataSet_TEST$id <- NULL DataSet_TEST_LABELS <- DataSet_TEST$SentimentClass DataSet_TEST$SentimentClass <- NULL # Create Data Partition trainIndex <- createDataPartition(DataSetAux_DTM$SentimentClass, times = 1, p = 0.85, list = FALSE) DataSet_TRAIN_aux <- DataSetAux_DTM[trainIndex, ] for(k in 1:length(Models)){ print(k) if(Models[k]=="xgboost"){ control <- trainControl(method = "cv", number = 5, classProbs = TRUE, summaryFunction = twoClassSummary, allowParallel = TRUE) set.seed(1234) predictors <- DataSet_TRAIN[,-ncol(DataSet_TRAIN)] # for(n in 1:ncol(predictors)){ # predictors[,n] <- as.numeric(as.character(predictors[,n])) # } label <- DataSet_TRAIN$SentimentClass ModelResults <- caret::train(x=predictors, y=label, method="xgbTree", trControl=control, metric="ROC") }else if(Models[k]=="svm"){ control <- trainControl(method = "cv", number = 5, classProbs = TRUE, summaryFunction = twoClassSummary, allowParallel = TRUE) set.seed(1234) ModelResults <- caret::train(SentimentClass ~ ., data=DataSet_TRAIN, method="svmLinear", scale = FALSE, trControl=control, metric="ROC") } # Print Results pathResults <- paste0("./results/ProNeutrality_Repeated/", levels(DataSet$Corpus)[i], "_", Models[k],".txt") sink(pathResults) print("%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%") print(paste0("EXPERIMENT: ", levels(DataSet$Corpus)[i], " ", "ProNeutrality", " ", Models[k])) print("%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%") print("DATA SET DESCRIPTION: ") print("Class Sentiment Distribution:") print("all:") print(summary(DataSetAux_DTM$SentimentClass)[1]) print(summary(DataSetAux_DTM$SentimentClass)[2]) print("in train:") print(summary(DataSet_TRAIN$SentimentClass)[1]) print(summary(DataSet_TRAIN$SentimentClass)[2]) print("Rows and features:") print(dim(DataSetAux_DTM)) print(paste0("in train:")) print(dim(DataSet_TRAIN)) print("%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%") print("MODEL RESULT: ") print(paste0("Model: ", Models[k])) print("Model summary: ") print(ModelResults) confMatrix <- confusionMatrix(ModelResults) print("Train Confusion Matrix: ") print(confMatrix) confTable <- (nrow(DataSet_TRAIN)/100)*(confMatrix$table) # Precision: tp/(tp+fp): precision <- confTable[2,2]/sum(confTable[2,1:2]) # Recall: tp/(tp + fn): spec <- confTable[2,2]/sum(confTable[1:2,2]) recall <- confTable[1,1]/sum(confTable[1:2,1]) # # F-Score: 2 * precision * recall /(precision + recall): # fscore <- 2 * precision * recall /(precision + recall) # # # G-measure: sqrt(precision*recall) # gmeasure <- sqrt(precision * recall) print(paste0("SensTRAIN: ", recall)) print(paste0("SpecTRAIN: ", spec)) # Prediction for(n in 1:ncol(DataSet_TEST)){ DataSet_TEST[,n] <- as.numeric(as.character(DataSet_TEST[,n])) } ModelResults_pred <- predict(ModelResults, DataSet_TEST) ModelResults_pred_prob <- predict(ModelResults, DataSet_TEST, type="prob") # print(confusionMatrix(xfbResults_pred[,2], LABELTripAdvisorFeaturesTEST)) # pred <- data.frame(DataSet_TEST_ID, DataSet_TEST_LABELS, ModelResults_pred[,2]) # setnames(pred, old=c("DataSet_TEST_ID", "DataSet_TEST_LABELS", "ModelResults_pred...2."), # new=c("id", "SentimentClass", "SentimentProb")) # pred$SentimentPred <- ifelse(pred$SentimentProb > 0.5, "positive", "negative") # Print confusion matrix test confTableTEST <- table(ModelResults_pred, DataSet_TEST_LABELS) print("ConfMatrix TEST: ") print(confTableTEST) # Precision: tp/(tp+fp): precision <- confTableTEST[2,2]/sum(confTableTEST[2,1:2]) # Recall: tp/(tp + fn): spec <- confTableTEST[2,2]/sum(confTableTEST[1:2,2]) recall <- confTableTEST[1,1]/sum(confTableTEST[1:2,1]) # # F-Score: 2 * precision * recall /(precision + recall): # fscore <- 2 * precision * recall /(precision + recall) # # # G-measure: sqrt(precision*recall) # gmeasure <- sqrt(precision * recall) print(auc(DataSet_TEST_LABELS, ModelResults_pred_prob$positive)) print(paste0("SensTEST: ", recall)) print(paste0("SpecTEST: ", spec)) sink() } }
/code/INFFUS/IOWA_ProNeutrality_Repetaed.R
no_license
llord1/phd
R
false
false
11,826
r
#################################################### # # # IOWA MODEL # # # # Author: Ana Valdivia # # Date: April 2017 # # # #################################################### ## Libraries library(tm) library(SnowballC) library(caret) library(data.table) library(xgboost) library(plyr) library(pROC) ## Read DataSet DataSet <- read.csv("./data/DataSet_ALL_SAMs.csv") DataSet$X <- NULL summary(DataSet) DataSet_Weights <- DataSet # Function for weights weightAbs <- function(x){1-abs(x-0.5)} ## Compute OWA weights DataSet_Weights$BingWeights <- NA DataSet_Weights$CoreNLPWeights <- NA DataSet_Weights$MCScaleWeights <- NA DataSet_Weights$MicrosoftWeights <- NA DataSet_Weights$SentiStrWeights <- NA DataSet_Weights$VADERWeights <- NA for(i in 1:nrow(DataSet)){ weigthAbs_aux <- weightAbs(DataSet[i, 3:8]) DataSet_Weights$BingWeights[i] <- weightAbs(DataSet[i, 3]) DataSet_Weights$CoreNLPWeights[i] <- weightAbs(DataSet[i, 4]) DataSet_Weights$MCScaleWeights[i] <- weightAbs(DataSet[i, 5]) DataSet_Weights$MicrosoftWeights[i] <- weightAbs(DataSet[i, 6]) DataSet_Weights$SentiStrWeights[i] <- weightAbs(DataSet[i, 7]) DataSet_Weights$VADERWeights[i] <- weightAbs(DataSet[i, 8]) DataSet$ProNeuSentiment[i] <- sum((weigthAbs_aux/sum(weigthAbs_aux))*DataSet[i, 3:8]) DataSet_Weights$ProNeuSentiment[i] <- sum((weigthAbs_aux/sum(weigthAbs_aux))*DataSet[i, 3:8]) } write.csv(DataSet_Weights, paste0("./results/ProNeutrality_Repeated/DataSet_Weights.csv"), row.names = FALSE) DataSet$ProNeuSet <- ifelse(DataSet$ProNeuSentiment > 0.4 & DataSet$ProNeuSentiment < 0.6, 1, 0) aggregate(DataSet$ProNeuSet ~ DataSet$Corpus, FUN=sum) DataSet$ProNeuSentimentName <- ifelse(DataSet$ProNeuSentiment >= 0.6, "positive", ifelse(DataSet$ProNeuSentiment <= 0.4, "negative", "neutral")) DataSet <- DataSet[, c(1, 2, 15, 17)] DataSet$ProNeuSentimentName <- factor(DataSet$ProNeuSentimentName, levels = c("positive", "neutral", "negative")) ## Transfrom DataSet into Document-Term matrix # function word.tfidf (select most important words regarding to a sentiment) word.tfidf <- function(document.vector, sparsity = .999){ # construct corpus temp.corpus <- Corpus(VectorSource(document.vector)) # construct tf matrix and remove sparse terms temp.tf <- DocumentTermMatrix(temp.corpus, control = list(stopwords = stopwords("SMART"), stemming=TRUE, removePunctuation = TRUE, removeNumbers = TRUE, weighting = function(x) weightTfIdf(x, normalize = FALSE))) temp.tf <- removeSparseTerms(temp.tf, sparsity) temp.tf <- as.matrix(temp.tf) docTerm.df <- as.data.frame(temp.tf) # construct word frequency df freq.df <- colMeans(temp.tf) freq.df <- data.frame(word = names(freq.df), freq = freq.df) rownames(freq.df) <- NULL list(Freq = freq.df, Temp = docTerm.df) } totCorpus <- nlevels(DataSet$Corpus) # total number of different Corpus SentimentTools <- c("Bing", "CoreNLP", "MC", "Microsoft", "SentiStr", "VADER" ) totSentTools <- 1 # total number of tool sentiments Models <- c("xgboost", "svm") for(i in 1:totCorpus){ print("%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%") print(i) print("%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%") DataSetAux <- DataSet[DataSet$Corpus == levels(DataSet$Corpus)[i], ] word.tfidf.pos <- word.tfidf(DataSetAux[DataSetAux[, 4] == "positive", 1])$Freq # selecting positive reviews (First column) word.tfidf.neg <- word.tfidf(DataSetAux[DataSetAux[, 4] == "negative", 1])$Freq # selecting negative reviews (Second column) word.tfidf.pos <- as.data.table(word.tfidf.pos) word.tfidf.pos <- word.tfidf.pos[order(freq, decreasing = TRUE),] word.tfidf.neg <- as.data.table(word.tfidf.neg) word.tfidf.neg <- word.tfidf.neg[order(freq, decreasing = TRUE),] # Delete STOPWORDS wordPos <- word.tfidf.pos[!(word.tfidf.pos$word %in% stopwords("SMART")),] wordNeg <- word.tfidf.neg[!(word.tfidf.neg$word %in% stopwords("SMART")),] # Delete common WORDS wordPos <- wordPos[!(wordPos$word %in% wordNeg$word),] wordNeg <- wordNeg[!(wordNeg$word %in% wordPos$word),] rm(word.tfidf.pos) rm(word.tfidf.neg) # Order and select most 25 popular words wordPos <- wordPos[order(freq, decreasing = TRUE),] wordPos <- wordPos[1:10,] wordNeg <- wordNeg[order(freq, decreasing = TRUE),] wordNeg <- wordNeg[1:10,] # Merge wordPos and wordNeg wordPosNeg <- merge(wordPos, wordNeg, by = c("word", "freq"), all = TRUE) # Build Document-Term Matrix DataSetAux_PosNeg <- DataSetAux[DataSetAux[,4] != "neutral",] DataSetAux_DTM <- word.tfidf(DataSetAux_PosNeg$Text)$Temp DataSetAux_DTM <- as.data.frame(ifelse(DataSetAux_DTM > 0, 1, 0)) DataSetAux_DTM <- DataSetAux_DTM[, colnames(DataSetAux_DTM) %in% wordPosNeg$word] DataSetAux_DTM <- cbind(DataSetAux_DTM, DataSetAux[DataSetAux[,4] != "neutral",4]) setnames(DataSetAux_DTM, "DataSetAux[DataSetAux[, 4] != \"neutral\", 4]", "SentimentOWA") # Write the DataSet # assign(paste0("DataSet_", levels(DataSet$Corpus)[i], "_SentimentOWA"), DataSetAux_DTM) # write.csv(get(paste0("DataSet_", levels(DataSet$Corpus)[i], "_SentimentOWA")), # paste0("./data/Model_2_2/DataSet_", levels(DataSet$Corpus)[i], "_SentimentOWA.csv")) # # # Delete datasets rm(wordNeg, wordPos, wordPosNeg, DataSetAux_PosNeg, DataSetAux) # Building the model setnames(DataSetAux_DTM, "SentimentOWA", "SentimentClass") DataSetAux_DTM$SentimentClass <- factor(DataSetAux_DTM$SentimentClass) DataSetAux_DTM$id <- as.factor(row.names(DataSetAux_DTM)) colnames(DataSetAux_DTM)[1:(ncol(DataSetAux_DTM)-2)] <- paste0("X", 1:(ncol(DataSetAux_DTM)-2)) # Split data set set.seed(1234) ## 80% of the sample size sampleSize <- floor(0.8 * nrow(DataSetAux_DTM)) ## set the seed to make your partition reproductible trainIndex <- sample(seq_len(nrow(DataSetAux_DTM)), size = sampleSize) if(i == 8){ pos <- sample(seq_len(nrow(DataSetAux_DTM[DataSetAux_DTM$SentimentClass == "positive",])), size = 264) neg <- sample(seq_len(nrow(DataSetAux_DTM[DataSetAux_DTM$SentimentClass == "negative",])), size = 9) trainIndex <- c(pos, neg) rm(pos) rm(neg) } DataSet_TRAIN <- DataSetAux_DTM[trainIndex, ] DataSet_TRAIN_ID <- DataSet_TRAIN$id DataSet_TRAIN$id <- NULL DataSet_TEST <- DataSetAux_DTM[-trainIndex, ] DataSet_TEST_ID <- DataSet_TEST$id DataSet_TEST$id <- NULL DataSet_TEST_LABELS <- DataSet_TEST$SentimentClass DataSet_TEST$SentimentClass <- NULL # Create Data Partition trainIndex <- createDataPartition(DataSetAux_DTM$SentimentClass, times = 1, p = 0.85, list = FALSE) DataSet_TRAIN_aux <- DataSetAux_DTM[trainIndex, ] for(k in 1:length(Models)){ print(k) if(Models[k]=="xgboost"){ control <- trainControl(method = "cv", number = 5, classProbs = TRUE, summaryFunction = twoClassSummary, allowParallel = TRUE) set.seed(1234) predictors <- DataSet_TRAIN[,-ncol(DataSet_TRAIN)] # for(n in 1:ncol(predictors)){ # predictors[,n] <- as.numeric(as.character(predictors[,n])) # } label <- DataSet_TRAIN$SentimentClass ModelResults <- caret::train(x=predictors, y=label, method="xgbTree", trControl=control, metric="ROC") }else if(Models[k]=="svm"){ control <- trainControl(method = "cv", number = 5, classProbs = TRUE, summaryFunction = twoClassSummary, allowParallel = TRUE) set.seed(1234) ModelResults <- caret::train(SentimentClass ~ ., data=DataSet_TRAIN, method="svmLinear", scale = FALSE, trControl=control, metric="ROC") } # Print Results pathResults <- paste0("./results/ProNeutrality_Repeated/", levels(DataSet$Corpus)[i], "_", Models[k],".txt") sink(pathResults) print("%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%") print(paste0("EXPERIMENT: ", levels(DataSet$Corpus)[i], " ", "ProNeutrality", " ", Models[k])) print("%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%") print("DATA SET DESCRIPTION: ") print("Class Sentiment Distribution:") print("all:") print(summary(DataSetAux_DTM$SentimentClass)[1]) print(summary(DataSetAux_DTM$SentimentClass)[2]) print("in train:") print(summary(DataSet_TRAIN$SentimentClass)[1]) print(summary(DataSet_TRAIN$SentimentClass)[2]) print("Rows and features:") print(dim(DataSetAux_DTM)) print(paste0("in train:")) print(dim(DataSet_TRAIN)) print("%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%") print("MODEL RESULT: ") print(paste0("Model: ", Models[k])) print("Model summary: ") print(ModelResults) confMatrix <- confusionMatrix(ModelResults) print("Train Confusion Matrix: ") print(confMatrix) confTable <- (nrow(DataSet_TRAIN)/100)*(confMatrix$table) # Precision: tp/(tp+fp): precision <- confTable[2,2]/sum(confTable[2,1:2]) # Recall: tp/(tp + fn): spec <- confTable[2,2]/sum(confTable[1:2,2]) recall <- confTable[1,1]/sum(confTable[1:2,1]) # # F-Score: 2 * precision * recall /(precision + recall): # fscore <- 2 * precision * recall /(precision + recall) # # # G-measure: sqrt(precision*recall) # gmeasure <- sqrt(precision * recall) print(paste0("SensTRAIN: ", recall)) print(paste0("SpecTRAIN: ", spec)) # Prediction for(n in 1:ncol(DataSet_TEST)){ DataSet_TEST[,n] <- as.numeric(as.character(DataSet_TEST[,n])) } ModelResults_pred <- predict(ModelResults, DataSet_TEST) ModelResults_pred_prob <- predict(ModelResults, DataSet_TEST, type="prob") # print(confusionMatrix(xfbResults_pred[,2], LABELTripAdvisorFeaturesTEST)) # pred <- data.frame(DataSet_TEST_ID, DataSet_TEST_LABELS, ModelResults_pred[,2]) # setnames(pred, old=c("DataSet_TEST_ID", "DataSet_TEST_LABELS", "ModelResults_pred...2."), # new=c("id", "SentimentClass", "SentimentProb")) # pred$SentimentPred <- ifelse(pred$SentimentProb > 0.5, "positive", "negative") # Print confusion matrix test confTableTEST <- table(ModelResults_pred, DataSet_TEST_LABELS) print("ConfMatrix TEST: ") print(confTableTEST) # Precision: tp/(tp+fp): precision <- confTableTEST[2,2]/sum(confTableTEST[2,1:2]) # Recall: tp/(tp + fn): spec <- confTableTEST[2,2]/sum(confTableTEST[1:2,2]) recall <- confTableTEST[1,1]/sum(confTableTEST[1:2,1]) # # F-Score: 2 * precision * recall /(precision + recall): # fscore <- 2 * precision * recall /(precision + recall) # # # G-measure: sqrt(precision*recall) # gmeasure <- sqrt(precision * recall) print(auc(DataSet_TEST_LABELS, ModelResults_pred_prob$positive)) print(paste0("SensTEST: ", recall)) print(paste0("SpecTEST: ", spec)) sink() } }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/maketikz.R \name{maketikz} \alias{maketikz} \title{Generate Latex tikz code for plotting a temporal DAG or PDAG.} \usage{ maketikz( model, xjit = 2, yjit = 2, markperiods = TRUE, xpgap = 4, annotateEdges = NULL, addAxis = TRUE, varLabels = NULL, periodLabels = NULL, annotationLabels = NULL, clipboard = TRUE, colorAnnotate = NULL ) } \arguments{ \item{model}{\code{tpdag} or \code{tamat} object to plot.} \item{xjit}{How much should nodes within a period be jittered horizontally.} \item{yjit}{Vertical distance between nodes within a period.} \item{markperiods}{If \code{TRUE}, gray boxes are drawn behind each period.} \item{xpgap}{Horizontal gap between different periods.} \item{annotateEdges}{If \code{TRUE}, add a text annotation to edges. If \code{annotationlabels} are supplied, these labels will be used. Otherwise, the value in the inputted adjacency matrix corresponding to the edge will be used.} \item{addAxis}{If \code{TRUE}, a horizontal axis with period labels are added.} \item{varLabels}{Optional labels for nodes (variables). Should be given as a named list, where the name is the variable name, and the entry is the label, e.g. \code{list(vname = "Label for vname")}.} \item{periodLabels}{Optional labels for periods. Should be given as a named list, where the name is the period name (as stored in the \code{tamat}), and the entry is the label, e.g. \code{list(periodname = "Label for period")}.} \item{annotationLabels}{Optional labels for edge annotations. Only used if \code{annotateEdges = TRUE}. Should be given as a named list, where the name is the edge annotation (as stored in the \code{tamat}), and the entry is the label, e.g. \code{list(h = "High")}.} \item{clipboard}{If \code{TRUE}, the tikz code is not printed, but instead copied to the clipboard, so it can easily be pasted into a Latex document.} \item{colorAnnotate}{Named list of colors to use to mark edge annotations instead of labels. This overrules \code{annotateEdges} and both are not available at the same time. The list should be given with annotations as names and colors as entries, e.g. \code{list(h = "blue")}.} } \value{ Silently returns a character vector with lines of tikz code. The function furthermore has a side-effect. If \code{clipboard = TRUE}, the side-effect is that the tikz code is also copied to the clipboard. If \code{clipboard = FALSE}, the tikz code is instead printed in the console. } \description{ Generate Latex tikz code for plotting a temporal DAG or PDAG. } \details{ Note that it is necessary to read in relevant tikz libraries in the Latex preamble. The relevant lines of code are (depending a bit on parameter settings): \cr \code{\\usepackage{tikz}} \cr \code{\\usetikzlibrary{arrows,shapes,snakes,automata,backgrounds,petri}} \cr \code{\\usepackage{pgfplots}} }
/man/maketikz.Rd
no_license
annennenne/causalDisco
R
false
true
2,910
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/maketikz.R \name{maketikz} \alias{maketikz} \title{Generate Latex tikz code for plotting a temporal DAG or PDAG.} \usage{ maketikz( model, xjit = 2, yjit = 2, markperiods = TRUE, xpgap = 4, annotateEdges = NULL, addAxis = TRUE, varLabels = NULL, periodLabels = NULL, annotationLabels = NULL, clipboard = TRUE, colorAnnotate = NULL ) } \arguments{ \item{model}{\code{tpdag} or \code{tamat} object to plot.} \item{xjit}{How much should nodes within a period be jittered horizontally.} \item{yjit}{Vertical distance between nodes within a period.} \item{markperiods}{If \code{TRUE}, gray boxes are drawn behind each period.} \item{xpgap}{Horizontal gap between different periods.} \item{annotateEdges}{If \code{TRUE}, add a text annotation to edges. If \code{annotationlabels} are supplied, these labels will be used. Otherwise, the value in the inputted adjacency matrix corresponding to the edge will be used.} \item{addAxis}{If \code{TRUE}, a horizontal axis with period labels are added.} \item{varLabels}{Optional labels for nodes (variables). Should be given as a named list, where the name is the variable name, and the entry is the label, e.g. \code{list(vname = "Label for vname")}.} \item{periodLabels}{Optional labels for periods. Should be given as a named list, where the name is the period name (as stored in the \code{tamat}), and the entry is the label, e.g. \code{list(periodname = "Label for period")}.} \item{annotationLabels}{Optional labels for edge annotations. Only used if \code{annotateEdges = TRUE}. Should be given as a named list, where the name is the edge annotation (as stored in the \code{tamat}), and the entry is the label, e.g. \code{list(h = "High")}.} \item{clipboard}{If \code{TRUE}, the tikz code is not printed, but instead copied to the clipboard, so it can easily be pasted into a Latex document.} \item{colorAnnotate}{Named list of colors to use to mark edge annotations instead of labels. This overrules \code{annotateEdges} and both are not available at the same time. The list should be given with annotations as names and colors as entries, e.g. \code{list(h = "blue")}.} } \value{ Silently returns a character vector with lines of tikz code. The function furthermore has a side-effect. If \code{clipboard = TRUE}, the side-effect is that the tikz code is also copied to the clipboard. If \code{clipboard = FALSE}, the tikz code is instead printed in the console. } \description{ Generate Latex tikz code for plotting a temporal DAG or PDAG. } \details{ Note that it is necessary to read in relevant tikz libraries in the Latex preamble. The relevant lines of code are (depending a bit on parameter settings): \cr \code{\\usepackage{tikz}} \cr \code{\\usetikzlibrary{arrows,shapes,snakes,automata,backgrounds,petri}} \cr \code{\\usepackage{pgfplots}} }
control.flexrsurv.additive <- function (epsilon = 1e-08, maxit = 25, trace = FALSE, ...){ # controls for flexrsurv additive models are glm.controls return(glm.control(epsilon = epsilon, maxit = maxit, trace = trace)) } control.flexrsurv.multiplicative <- function (epsilon = 1e-8, maxit = 100, trace = FALSE, epsilon.glm = 1e-1, maxit.glm=30, ...){ # controls for flexrsurv multiplicative models # epsilon : positive convergence tolerance of the iterative alternative algorithm; # the iterations converge when |loglik - loglik_old}/(|loglik| + 0.1) < epsilon. # maxit : integer giving the maximal number of iteration of the iterative alternative algorithm. # trace : logical indicating if output should be produced for each iteration. # epsilon.glm, maxit.glm : contols passed to the inner glm call of the itertive alternative algorithm if (!is.numeric(epsilon) || epsilon <= 0) stop("'epsilon' must be > 0", call.=FALSE) if (!is.numeric(epsilon.glm) || epsilon.glm <= 0) stop("'epsilon.glm' must be > 0", call.=FALSE) if (!is.numeric(maxit.glm) || maxit.glm <= 0) stop("maximum number of iterations 'maxit.glm' must be > 0", call.=FALSE) return(list(epsilon = epsilon, maxit = maxit, trace = trace, epsilon.glm = epsilon.glm, maxit.glm = maxit.glm)) }
/R/make.control.flexrsurv.R
no_license
cran/flexrsurv
R
false
false
1,296
r
control.flexrsurv.additive <- function (epsilon = 1e-08, maxit = 25, trace = FALSE, ...){ # controls for flexrsurv additive models are glm.controls return(glm.control(epsilon = epsilon, maxit = maxit, trace = trace)) } control.flexrsurv.multiplicative <- function (epsilon = 1e-8, maxit = 100, trace = FALSE, epsilon.glm = 1e-1, maxit.glm=30, ...){ # controls for flexrsurv multiplicative models # epsilon : positive convergence tolerance of the iterative alternative algorithm; # the iterations converge when |loglik - loglik_old}/(|loglik| + 0.1) < epsilon. # maxit : integer giving the maximal number of iteration of the iterative alternative algorithm. # trace : logical indicating if output should be produced for each iteration. # epsilon.glm, maxit.glm : contols passed to the inner glm call of the itertive alternative algorithm if (!is.numeric(epsilon) || epsilon <= 0) stop("'epsilon' must be > 0", call.=FALSE) if (!is.numeric(epsilon.glm) || epsilon.glm <= 0) stop("'epsilon.glm' must be > 0", call.=FALSE) if (!is.numeric(maxit.glm) || maxit.glm <= 0) stop("maximum number of iterations 'maxit.glm' must be > 0", call.=FALSE) return(list(epsilon = epsilon, maxit = maxit, trace = trace, epsilon.glm = epsilon.glm, maxit.glm = maxit.glm)) }
interbatt = function(X,Y){ Vxy = var(X,Y) rank = qr(Vxy)$rank print(paste("Rank of CoVar Matrix ",rank)) aib = eigen(t(Vxy)%*%Vxy) A = Vxy %*% aib$vectors %*% diag(aib$values^(-0.5)) TIB = as.matrix(X) %*% A rl = list(TIB, A) return(rl) } eval_func = function(y, yhat, cm_show = FALSE){ metrics = c() cm = table(y,yhat) if(cm_show == TRUE){ print(cm) } total = sum(cm) no_diag = cm[row(cm) != (col(cm))] acc = sum(diag(cm))/total error = sum(no_diag)/total metrics = c(acc,error) return(metrics) } plot_img = function(train_data,i,show_target=FALSE){ CUSTOM_COLORS = colorRampPalette(colors = c("black", "white")) if(show_target==TRUE){ print(train_data[i,1]) } train_data = train_data[,-1] z = unname(unlist((train_data[i,]))) k = matrix(z,nrow = 16,ncol = 16) rotate <- function(x) t(apply(x, 2, rev)) image(rotate(t(k)), col = CUSTOM_COLORS(256)) }
/A3 IBA/Util.R
no_license
krishnakalyan3/KernelMachineLearning
R
false
false
913
r
interbatt = function(X,Y){ Vxy = var(X,Y) rank = qr(Vxy)$rank print(paste("Rank of CoVar Matrix ",rank)) aib = eigen(t(Vxy)%*%Vxy) A = Vxy %*% aib$vectors %*% diag(aib$values^(-0.5)) TIB = as.matrix(X) %*% A rl = list(TIB, A) return(rl) } eval_func = function(y, yhat, cm_show = FALSE){ metrics = c() cm = table(y,yhat) if(cm_show == TRUE){ print(cm) } total = sum(cm) no_diag = cm[row(cm) != (col(cm))] acc = sum(diag(cm))/total error = sum(no_diag)/total metrics = c(acc,error) return(metrics) } plot_img = function(train_data,i,show_target=FALSE){ CUSTOM_COLORS = colorRampPalette(colors = c("black", "white")) if(show_target==TRUE){ print(train_data[i,1]) } train_data = train_data[,-1] z = unname(unlist((train_data[i,]))) k = matrix(z,nrow = 16,ncol = 16) rotate <- function(x) t(apply(x, 2, rev)) image(rotate(t(k)), col = CUSTOM_COLORS(256)) }
#' Convert g_c (\eqn{\mu}mol CO2/m^2/s/Pa) to g_w (\eqn{\mu}mol H2O /m^2/s/Pa) #' #' @param g_w conductance to water vapor in units #' (\eqn{\mu}mol H2O / (m^2 s Pa)) of class \code{units}. #' @param D_c diffusion coefficient for CO2 in air in units of m^2/s of class #' \code{units} #' @param D_w diffusion coefficient for H2O in air in units of m^2/s of class #' \code{units} #' @param a exponent used for conversion. Use 1 for still air; 0.67 for laminar flow (Jones 2014). Should be unitless. #' @param unitless Logical. Should scientific units of arguments be checked and set? TRUE is safer, but slower. If FALSE, values provided are assumed to be in correct units. #' #' \code{units} #' #' @return Value with units \eqn{\mu}mol / (m^2 s Pa) of class \code{units}. #' #' @details #' #' Diffusive conductance to CO2 is greater than that of H2O because of the #' higher molecular weight. To convert: #' #' \deqn{g_\mathrm{c} = g_\mathrm{w} (D_\mathrm{c} / D_\mathrm{w}) ^ a}{g_c = g_w (D_c / D_w) ^ a} #' \deqn{g_\mathrm{w} = g_\mathrm{c} (D_\mathrm{w} / D_\mathrm{c}) ^ a}{g_w = g_c (D_w / D_c) ^ a} #' #' @note This function will soon be moving to the standalone gunit package. #' #' @references #' Jones H. 2014. Plants and Microclimate (3rd edition). Cambridge University Press. #' #' @examples #' library(units) #' D_c = set_units(1.29e-05, "m^2/s") #' D_w = set_units(2.12e-05, "m^2/s") #' g_c = set_units(3, "umol/m^2/s/Pa") #' a = 1 #' g_w = gc2gw(g_c, D_c, D_w, a, unitless = FALSE) #' g_w #' #' gw2gc(g_w, D_c, D_w, a, unitless = FALSE) #' @export #' gw2gc = function( g_w, D_c, D_w, unitless, a ) { if (unitless) { if (is(g_w, "units")) g_w %<>% drop_units() if (is(D_c, "units")) D_c %<>% drop_units() if (is(D_w, "units")) D_w %<>% drop_units() if (is(a, "units")) a %<>% drop_units() g_c = g_w * (D_c / D_w) ^ a return(g_c) } else { g_w %<>% set_units("umol/m^2/s/Pa") D_c %<>% set_units("m^2/s") D_w %<>% set_units("m^2/s") # The exponent should be unitless if (is(a, "units")) a %<>% drop_units() g_c = g_w * (D_c / D_w) ^ a return(g_c) } } #' Convert g_c (umol CO2/m^2/s/Pa) to g_w (umol H2O /m^2/s/Pa) #' #' @rdname gw2gc #' #' @param g_c conductance to CO2 in units (\eqn{\mu}mol H2O / (m^2 s Pa)) of #' class \code{units}. #' @export #' gc2gw = function( g_c, D_c, D_w, unitless, a ) { if (unitless) { if (is(g_c, "units")) g_c %<>% drop_units() if (is(D_c, "units")) D_c %<>% drop_units() if (is(D_w, "units")) D_w %<>% drop_units() if (is(a, "units")) a %<>% drop_units() g_w = g_c * (D_w / D_c) ^ a return(g_w) } else { g_c %<>% set_units(umol / m^2 / s / Pa) D_c %<>% set_units(m^2 / s) D_w %<>% set_units(m^2 / s) # The exponent should be unitless if (is(a, "units")) a %<>% drop_units() g_w = g_c * (D_w / D_c) ^ a return(g_w) } }
/R/convert-species.R
permissive
cdmuir/gunit
R
false
false
2,919
r
#' Convert g_c (\eqn{\mu}mol CO2/m^2/s/Pa) to g_w (\eqn{\mu}mol H2O /m^2/s/Pa) #' #' @param g_w conductance to water vapor in units #' (\eqn{\mu}mol H2O / (m^2 s Pa)) of class \code{units}. #' @param D_c diffusion coefficient for CO2 in air in units of m^2/s of class #' \code{units} #' @param D_w diffusion coefficient for H2O in air in units of m^2/s of class #' \code{units} #' @param a exponent used for conversion. Use 1 for still air; 0.67 for laminar flow (Jones 2014). Should be unitless. #' @param unitless Logical. Should scientific units of arguments be checked and set? TRUE is safer, but slower. If FALSE, values provided are assumed to be in correct units. #' #' \code{units} #' #' @return Value with units \eqn{\mu}mol / (m^2 s Pa) of class \code{units}. #' #' @details #' #' Diffusive conductance to CO2 is greater than that of H2O because of the #' higher molecular weight. To convert: #' #' \deqn{g_\mathrm{c} = g_\mathrm{w} (D_\mathrm{c} / D_\mathrm{w}) ^ a}{g_c = g_w (D_c / D_w) ^ a} #' \deqn{g_\mathrm{w} = g_\mathrm{c} (D_\mathrm{w} / D_\mathrm{c}) ^ a}{g_w = g_c (D_w / D_c) ^ a} #' #' @note This function will soon be moving to the standalone gunit package. #' #' @references #' Jones H. 2014. Plants and Microclimate (3rd edition). Cambridge University Press. #' #' @examples #' library(units) #' D_c = set_units(1.29e-05, "m^2/s") #' D_w = set_units(2.12e-05, "m^2/s") #' g_c = set_units(3, "umol/m^2/s/Pa") #' a = 1 #' g_w = gc2gw(g_c, D_c, D_w, a, unitless = FALSE) #' g_w #' #' gw2gc(g_w, D_c, D_w, a, unitless = FALSE) #' @export #' gw2gc = function( g_w, D_c, D_w, unitless, a ) { if (unitless) { if (is(g_w, "units")) g_w %<>% drop_units() if (is(D_c, "units")) D_c %<>% drop_units() if (is(D_w, "units")) D_w %<>% drop_units() if (is(a, "units")) a %<>% drop_units() g_c = g_w * (D_c / D_w) ^ a return(g_c) } else { g_w %<>% set_units("umol/m^2/s/Pa") D_c %<>% set_units("m^2/s") D_w %<>% set_units("m^2/s") # The exponent should be unitless if (is(a, "units")) a %<>% drop_units() g_c = g_w * (D_c / D_w) ^ a return(g_c) } } #' Convert g_c (umol CO2/m^2/s/Pa) to g_w (umol H2O /m^2/s/Pa) #' #' @rdname gw2gc #' #' @param g_c conductance to CO2 in units (\eqn{\mu}mol H2O / (m^2 s Pa)) of #' class \code{units}. #' @export #' gc2gw = function( g_c, D_c, D_w, unitless, a ) { if (unitless) { if (is(g_c, "units")) g_c %<>% drop_units() if (is(D_c, "units")) D_c %<>% drop_units() if (is(D_w, "units")) D_w %<>% drop_units() if (is(a, "units")) a %<>% drop_units() g_w = g_c * (D_w / D_c) ^ a return(g_w) } else { g_c %<>% set_units(umol / m^2 / s / Pa) D_c %<>% set_units(m^2 / s) D_w %<>% set_units(m^2 / s) # The exponent should be unitless if (is(a, "units")) a %<>% drop_units() g_w = g_c * (D_w / D_c) ^ a return(g_w) } }
\name{SO2} \alias{SO2} \title{Sulphur dioxide measurements over Europe} \description{ The data document values of SO2, on a log scale, from monitoring stations across Europe from 1990 to 2001. The data were collected through the 'European monitoring and evaluation programme' (EMEP) and they are available at \url{https://www.emep.int}. The data recorded here have been organised into a convenient form for analysis. The data file consists of six variables: \code{site}: a site code for the monitoring station \code{longitude}: longitude of the monitoring station \code{latitude}: latitude of the monitoring station \code{year}: year of measurement \code{month}: month of measurement \code{logSO2}: SO2 measurement on a log scale } \references{ Spatiotemporal smoothing and sulphur dioxide trends over Europe A. W. Bowman, M. Giannitrapani and E. M. Scott. Applied Statistics, 58 (2009), 737--752 } \examples{ \dontrun{ Month <- SO2$month + (SO2$year - 1990) * 12 Year <- SO2$year + (SO2$month - 0.5) / 12 Location <- cbind(SO2$longitude, SO2$latitude) back <- I if (require(maps)) { mapxy <- map('world', plot = FALSE, xlim = range(SO2$longitude), ylim = range(SO2$latitude)) back <- function() map(mapxy, add = TRUE) } rp.plot4d(Location, Year, SO2$logSO2, col.palette = rev(heat.colors(12)), background.plot = back) }} \keyword{iplot} \keyword{dynamic}
/man/SO2.Rd
no_license
cran/rpanel
R
false
false
1,449
rd
\name{SO2} \alias{SO2} \title{Sulphur dioxide measurements over Europe} \description{ The data document values of SO2, on a log scale, from monitoring stations across Europe from 1990 to 2001. The data were collected through the 'European monitoring and evaluation programme' (EMEP) and they are available at \url{https://www.emep.int}. The data recorded here have been organised into a convenient form for analysis. The data file consists of six variables: \code{site}: a site code for the monitoring station \code{longitude}: longitude of the monitoring station \code{latitude}: latitude of the monitoring station \code{year}: year of measurement \code{month}: month of measurement \code{logSO2}: SO2 measurement on a log scale } \references{ Spatiotemporal smoothing and sulphur dioxide trends over Europe A. W. Bowman, M. Giannitrapani and E. M. Scott. Applied Statistics, 58 (2009), 737--752 } \examples{ \dontrun{ Month <- SO2$month + (SO2$year - 1990) * 12 Year <- SO2$year + (SO2$month - 0.5) / 12 Location <- cbind(SO2$longitude, SO2$latitude) back <- I if (require(maps)) { mapxy <- map('world', plot = FALSE, xlim = range(SO2$longitude), ylim = range(SO2$latitude)) back <- function() map(mapxy, add = TRUE) } rp.plot4d(Location, Year, SO2$logSO2, col.palette = rev(heat.colors(12)), background.plot = back) }} \keyword{iplot} \keyword{dynamic}
args <- commandArgs(trailingOnly=TRUE) # test if there is at least one argument: if not, return an error if (length(args)!= 2) { stop("Two arguments must be supplied. The First argument is CSV File. The second argument is PNG File.\n", call.=FALSE) } else { # default output file inFile <- args[1] outFile <- args[2] } ################################################################# library(cluster) library(clusterSim) library(ade4) ################################################################# #data <- read.table("/data/projects/metagenomics/enterotyping/MetaHIT_SangerSamples.genus.txt", header=T, row.names=1, dec=".", sep="\t") #data=read.table("/data/projects/metagenomics/enterotyping/genus.csv", header=T, row.names=1, dec=".", sep=",") data <- read.csv(file=inFile, header=TRUE, row.names=1, dec=".", sep=",") data=data[-1,] KLD <- function(x,y) sum(x * log(x/y)) JSD <- function(x,y) sqrt(0.5 * KLD(x, (x+y)/2) + 0.5 * KLD(y, (x+y)/2)) dist.JSD <- function(inMatrix, pseudocount=0.000001, ...) { KLD <- function(x,y) sum(x *log(x/y)) JSD<- function(x,y) sqrt(0.5 * KLD(x, (x+y)/2) + 0.5 * KLD(y, (x+y)/2)) matrixColSize <- length(colnames(inMatrix)) matrixRowSize <- length(rownames(inMatrix)) colnames <- colnames(inMatrix) resultsMatrix <- matrix(0, matrixColSize, matrixColSize) inMatrix = apply(inMatrix,1:2,function(x) ifelse (x==0,pseudocount,x)) for(i in 1:matrixColSize) { for(j in 1:matrixColSize) { resultsMatrix[i,j]=JSD(as.vector(inMatrix[,i]), as.vector(inMatrix[,j])) } } colnames -> colnames(resultsMatrix) -> rownames(resultsMatrix) as.dist(resultsMatrix)->resultsMatrix attr(resultsMatrix, "method") <- "dist" return(resultsMatrix) } #ALGORITHM data.dist = dist.JSD(data) pam.clustering = function(x,k) { # x is a distance matrix and k the number of clusters require(cluster) cluster = as.vector(pam(as.dist(x), k, diss=TRUE)$clustering) return(cluster) } #say we decided to cluster with cluster size=3 ##data.cluster=pam.clustering(data.dist, k=3) #OPTIMAL NUMBER OF CLUSTERS require(clusterSim) ##nclusters = index.G1(t(data), data.cluster, d = data.dist, centrotypes = "medoids") nclusters = NULL for (k in 1:20) { if (k==1) { nclusters[k]=NA } else { data.cluster_temp=pam.clustering(data.dist, k) nclusters[k]=index.G1(t(data),data.cluster_temp, d = data.dist, centrotypes = "medoids") } } data.cluster=pam.clustering(data.dist, k=which.max(nclusters)) #say we decided to cluster with cluster size=3 ##data.cluster=pam.clustering(data.dist, k=which.max(nclusters)) #setwd(outDir) #png(paste(rsrcName,"-",cancerType,"-clustersizes.png",sep="")) #plot(nclusters, type="h", xlab="k clusters", ylab="CH index") #dev.off() #CLUSTER VALIDATION obs.silhouette=mean(silhouette(data.cluster, data.dist)[,3]) #BETWEEN-CLASS ANALYSIS (BCA) #Prior to this analysis, in the Illumina dataset, genera with very low abundance were removed to decrease the noise, if their average abundance across all samples was below 0.01%. noise.removal <- function(dataframe, percent=0.01, top=NULL){ dataframe->Matrix bigones <- rowSums(Matrix)*100/(sum(rowSums(Matrix))) > percent Matrix_1 <- Matrix[bigones,] print(percent) return(Matrix_1) } #data=noise.removal(data, percent=0.01) require(ade4) ## plot 1 #obs.pca=dudi.pca(data.frame(t(data)), scannf=F, nf=10) #obs.bet=bca(obs.pca, fac=as.factor(data.cluster), scannf=F, nf=k-1) #dev.new() #s.class(obs.bet$ls, fac=as.factor(data.cluster), grid=F,sub="Between-class analysis") #text(obs.bet$ls,labels=rownames(obs.bet$ls),cex=0.5) #plot 2 obs.pcoa=dudi.pco(data.dist, scannf=F, nf=3) coul <- c("red", "blue", "brown", "green", "pink") #dev.new() png(outFile,width = 600, height = 600, units = "px", pointsize = 12) s.class(obs.pcoa$li, fac=as.factor(data.cluster), grid=F,sub="Principal coordiante analysis",col = coul) label = row.names(obs.pcoa$li) sampleType <- 0 for (i in 1:length(label)){ sampleType[i] <- paste(unlist(strsplit(label[i], "\\."))[3],".",substr(unlist(strsplit(label[i], "\\."))[4], 1, 1),sep="") } s.label(obs.pcoa$li,label = sampleType,clabel=0.9, boxes=T, grid=F, add.plot=TRUE, pch = 10, include.origin = TRUE) dev.off() outFile2 <- "/data/projects/bioxpress/v-2.1/generated/sampleqc/tcga" cancerType = strsplit(outFile,"-")[[1]][3] png(paste(outFile2,"-",cancerType,"-pca2-even.png",sep=""),width = 600, height = 600, units = "px", pointsize = 12) s.class(obs.pcoa$li, fac=as.factor(data.cluster), grid=F,sub="Principal coordiante analysis",col = coul) sampleType <- 0 for (i in 1:length(label)){ sampleType[i] <- substr(unlist(strsplit(label[i], "\\."))[4], 1, 1) } s.label(obs.pcoa$li,label = sampleType,clabel=1, boxes=T, grid=F, add.plot=TRUE, pch = 15, include.origin = TRUE) #text(obs.pcoa$li,labels=rownames(obs.pcoa$li),cex=0.8)
/old_files_not_used/sampleqc/script.1.r
no_license
GW-HIVE/bioxpress
R
false
false
4,862
r
args <- commandArgs(trailingOnly=TRUE) # test if there is at least one argument: if not, return an error if (length(args)!= 2) { stop("Two arguments must be supplied. The First argument is CSV File. The second argument is PNG File.\n", call.=FALSE) } else { # default output file inFile <- args[1] outFile <- args[2] } ################################################################# library(cluster) library(clusterSim) library(ade4) ################################################################# #data <- read.table("/data/projects/metagenomics/enterotyping/MetaHIT_SangerSamples.genus.txt", header=T, row.names=1, dec=".", sep="\t") #data=read.table("/data/projects/metagenomics/enterotyping/genus.csv", header=T, row.names=1, dec=".", sep=",") data <- read.csv(file=inFile, header=TRUE, row.names=1, dec=".", sep=",") data=data[-1,] KLD <- function(x,y) sum(x * log(x/y)) JSD <- function(x,y) sqrt(0.5 * KLD(x, (x+y)/2) + 0.5 * KLD(y, (x+y)/2)) dist.JSD <- function(inMatrix, pseudocount=0.000001, ...) { KLD <- function(x,y) sum(x *log(x/y)) JSD<- function(x,y) sqrt(0.5 * KLD(x, (x+y)/2) + 0.5 * KLD(y, (x+y)/2)) matrixColSize <- length(colnames(inMatrix)) matrixRowSize <- length(rownames(inMatrix)) colnames <- colnames(inMatrix) resultsMatrix <- matrix(0, matrixColSize, matrixColSize) inMatrix = apply(inMatrix,1:2,function(x) ifelse (x==0,pseudocount,x)) for(i in 1:matrixColSize) { for(j in 1:matrixColSize) { resultsMatrix[i,j]=JSD(as.vector(inMatrix[,i]), as.vector(inMatrix[,j])) } } colnames -> colnames(resultsMatrix) -> rownames(resultsMatrix) as.dist(resultsMatrix)->resultsMatrix attr(resultsMatrix, "method") <- "dist" return(resultsMatrix) } #ALGORITHM data.dist = dist.JSD(data) pam.clustering = function(x,k) { # x is a distance matrix and k the number of clusters require(cluster) cluster = as.vector(pam(as.dist(x), k, diss=TRUE)$clustering) return(cluster) } #say we decided to cluster with cluster size=3 ##data.cluster=pam.clustering(data.dist, k=3) #OPTIMAL NUMBER OF CLUSTERS require(clusterSim) ##nclusters = index.G1(t(data), data.cluster, d = data.dist, centrotypes = "medoids") nclusters = NULL for (k in 1:20) { if (k==1) { nclusters[k]=NA } else { data.cluster_temp=pam.clustering(data.dist, k) nclusters[k]=index.G1(t(data),data.cluster_temp, d = data.dist, centrotypes = "medoids") } } data.cluster=pam.clustering(data.dist, k=which.max(nclusters)) #say we decided to cluster with cluster size=3 ##data.cluster=pam.clustering(data.dist, k=which.max(nclusters)) #setwd(outDir) #png(paste(rsrcName,"-",cancerType,"-clustersizes.png",sep="")) #plot(nclusters, type="h", xlab="k clusters", ylab="CH index") #dev.off() #CLUSTER VALIDATION obs.silhouette=mean(silhouette(data.cluster, data.dist)[,3]) #BETWEEN-CLASS ANALYSIS (BCA) #Prior to this analysis, in the Illumina dataset, genera with very low abundance were removed to decrease the noise, if their average abundance across all samples was below 0.01%. noise.removal <- function(dataframe, percent=0.01, top=NULL){ dataframe->Matrix bigones <- rowSums(Matrix)*100/(sum(rowSums(Matrix))) > percent Matrix_1 <- Matrix[bigones,] print(percent) return(Matrix_1) } #data=noise.removal(data, percent=0.01) require(ade4) ## plot 1 #obs.pca=dudi.pca(data.frame(t(data)), scannf=F, nf=10) #obs.bet=bca(obs.pca, fac=as.factor(data.cluster), scannf=F, nf=k-1) #dev.new() #s.class(obs.bet$ls, fac=as.factor(data.cluster), grid=F,sub="Between-class analysis") #text(obs.bet$ls,labels=rownames(obs.bet$ls),cex=0.5) #plot 2 obs.pcoa=dudi.pco(data.dist, scannf=F, nf=3) coul <- c("red", "blue", "brown", "green", "pink") #dev.new() png(outFile,width = 600, height = 600, units = "px", pointsize = 12) s.class(obs.pcoa$li, fac=as.factor(data.cluster), grid=F,sub="Principal coordiante analysis",col = coul) label = row.names(obs.pcoa$li) sampleType <- 0 for (i in 1:length(label)){ sampleType[i] <- paste(unlist(strsplit(label[i], "\\."))[3],".",substr(unlist(strsplit(label[i], "\\."))[4], 1, 1),sep="") } s.label(obs.pcoa$li,label = sampleType,clabel=0.9, boxes=T, grid=F, add.plot=TRUE, pch = 10, include.origin = TRUE) dev.off() outFile2 <- "/data/projects/bioxpress/v-2.1/generated/sampleqc/tcga" cancerType = strsplit(outFile,"-")[[1]][3] png(paste(outFile2,"-",cancerType,"-pca2-even.png",sep=""),width = 600, height = 600, units = "px", pointsize = 12) s.class(obs.pcoa$li, fac=as.factor(data.cluster), grid=F,sub="Principal coordiante analysis",col = coul) sampleType <- 0 for (i in 1:length(label)){ sampleType[i] <- substr(unlist(strsplit(label[i], "\\."))[4], 1, 1) } s.label(obs.pcoa$li,label = sampleType,clabel=1, boxes=T, grid=F, add.plot=TRUE, pch = 15, include.origin = TRUE) #text(obs.pcoa$li,labels=rownames(obs.pcoa$li),cex=0.8)
/new_scripts/PRSset_rename_output_SDA.R
no_license
leosplasc/datasharing
R
false
false
4,389
r
## ----knitr_init, echo=FALSE, cache=FALSE---------------------------------------------------------- knitr::opts_chunk$set(echo = TRUE) options(width=100) ## ---- messages=TRUE, warnings=TRUE, errors=TRUE--------------------------------------------------- library(streamMetabolizer) ## ------------------------------------------------------------------------------------------------- suppressPackageStartupMessages(library(dplyr)) ## ------------------------------------------------------------------------------------------------- dat <- data_metab('3','30') ## ------------------------------------------------------------------------------------------------- mm_euler <- metab(specs(mm_name('mle', ode_method='euler')), dat) mm_trapezoid <- metab(specs(mm_name('mle', ode_method='trapezoid')), dat) mm_rk4 <- metab(specs(mm_name('mle', ode_method='rk4')), dat) mm_lsoda <- metab(specs(mm_name('mle', ode_method='lsoda')), dat) DO.standard <- rep(predict_DO(mm_rk4)$'DO.mod', times=4) ode_preds <- bind_rows( mutate(predict_DO(mm_euler), method='euler'), mutate(predict_DO(mm_trapezoid), method='trapezoid'), mutate(predict_DO(mm_rk4), method='rk4'), mutate(predict_DO(mm_lsoda), method='lsoda')) %>% mutate(DO.mod.diffeuler = DO.mod - DO.standard) library(ggplot2) ggplot(ode_preds, aes(x=solar.time)) + geom_point(aes(y=DO.obs), color='grey', alpha=0.3) + geom_line(aes(y=DO.mod, color=method), size=1) + theme_bw() ggplot(ode_preds, aes(x=solar.time)) + geom_point(aes(y=pmax(-0.2, pmin(0.2, DO.mod.diffeuler)), color=method), size=1, alpha=0.8) + scale_y_continuous(limits=c(-0.2,0.2)) + theme_bw() + ylab("Deviations from rk4 (capped at +/- 0.2)")
/inst/doc/ode_methods.R
permissive
weisoon/streamMetabolizer
R
false
false
1,671
r
## ----knitr_init, echo=FALSE, cache=FALSE---------------------------------------------------------- knitr::opts_chunk$set(echo = TRUE) options(width=100) ## ---- messages=TRUE, warnings=TRUE, errors=TRUE--------------------------------------------------- library(streamMetabolizer) ## ------------------------------------------------------------------------------------------------- suppressPackageStartupMessages(library(dplyr)) ## ------------------------------------------------------------------------------------------------- dat <- data_metab('3','30') ## ------------------------------------------------------------------------------------------------- mm_euler <- metab(specs(mm_name('mle', ode_method='euler')), dat) mm_trapezoid <- metab(specs(mm_name('mle', ode_method='trapezoid')), dat) mm_rk4 <- metab(specs(mm_name('mle', ode_method='rk4')), dat) mm_lsoda <- metab(specs(mm_name('mle', ode_method='lsoda')), dat) DO.standard <- rep(predict_DO(mm_rk4)$'DO.mod', times=4) ode_preds <- bind_rows( mutate(predict_DO(mm_euler), method='euler'), mutate(predict_DO(mm_trapezoid), method='trapezoid'), mutate(predict_DO(mm_rk4), method='rk4'), mutate(predict_DO(mm_lsoda), method='lsoda')) %>% mutate(DO.mod.diffeuler = DO.mod - DO.standard) library(ggplot2) ggplot(ode_preds, aes(x=solar.time)) + geom_point(aes(y=DO.obs), color='grey', alpha=0.3) + geom_line(aes(y=DO.mod, color=method), size=1) + theme_bw() ggplot(ode_preds, aes(x=solar.time)) + geom_point(aes(y=pmax(-0.2, pmin(0.2, DO.mod.diffeuler)), color=method), size=1, alpha=0.8) + scale_y_continuous(limits=c(-0.2,0.2)) + theme_bw() + ylab("Deviations from rk4 (capped at +/- 0.2)")
#' @title Log-likelihood function of a CUB model without covariates #' @description Compute the log-likelihood function of a CUB model without covariates for a given #' absolute frequency distribution. #' @aliases loglikcub00 #' @usage loglikcub00(m, freq, beta0, gama0) #' @param m Number of ordinal categories #' @param freq Vector of the absolute frequency distribution #' @param beta0 Logit transform of uncertainty parameter #' @param gama0 Logit transform of fFeeling parameter #' @keywords internal #' loglikcub00 <- function(m,freq,beta0,gama0){ pai<-1/(1+exp(-beta0)); csi<-1/(1+exp(-gama0)) t(freq)%*%log(probcub00(m,pai,csi)) }
/R/loglikcub00.R
no_license
CristianPachacama/FastCUB
R
false
false
685
r
#' @title Log-likelihood function of a CUB model without covariates #' @description Compute the log-likelihood function of a CUB model without covariates for a given #' absolute frequency distribution. #' @aliases loglikcub00 #' @usage loglikcub00(m, freq, beta0, gama0) #' @param m Number of ordinal categories #' @param freq Vector of the absolute frequency distribution #' @param beta0 Logit transform of uncertainty parameter #' @param gama0 Logit transform of fFeeling parameter #' @keywords internal #' loglikcub00 <- function(m,freq,beta0,gama0){ pai<-1/(1+exp(-beta0)); csi<-1/(1+exp(-gama0)) t(freq)%*%log(probcub00(m,pai,csi)) }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/exportReportsTyped.R \name{exportReportsTyped} \alias{exportReportsTyped} \alias{exportReportsTyped.redcapApiConnection} \title{Export a Report from a REDCap Project} \usage{ exportReportsTyped(rcon, report_id, ...) \method{exportReportsTyped}{redcapApiConnection}( rcon, report_id, drop_fields = NULL, na = list(), validation = list(), cast = list(), assignment = list(label = stripHTMLandUnicode, units = unitsFieldAnnotation), ..., config = list(), api_param = list(), csv_delimiter = "," ) } \arguments{ \item{rcon}{A \code{redcapConnection} object} \item{report_id}{\code{integerish(1)} The ID number of the report to be exported.} \item{...}{Argument to pass to other methods.} \item{drop_fields}{\code{character}. A vector of field names to remove from the export. Ignore if length = 0.} \item{na}{A named \code{list} of user specified functions to determine if the data is NA. This is useful when data is loaded that has coding for NA, e.g. -5 is NA. Keys must correspond to a truncated REDCap field type, i.e. {date_, datetime_, datetime_seconds_, time_mm_ss, time_hh_mm_ss, time, float, number, calc, int, integer, select, radio, dropdown, yesno, truefalse, checkbox, form_complete, sql}. The function will be provided the variables (x, field_name, coding). The function must return a vector of logicals matching the input. It defaults to \code{\link{isNAorBlank}} for all entries.} \item{validation}{A named \code{list} of user specified validation functions. The same named keys are supported as the na argument. The function will be provided the variables (x, field_name, coding). The function must return a vector of logical matching the input length. Helper functions to construct these are \code{\link{valRx}} and \code{\link{valChoice}}. Only fields that are not identified as NA will be passed to validation functions.} \item{cast}{A named \code{list} of user specified class casting functions. The same named keys are supported as the na argument. The function will be provided the variables (x, field_name, coding). The function must return a vector of logical matching the input length. See \code{\link{fieldValidationAndCasting}}} \item{assignment}{A named \code{list} of functions. These functions are provided, field_name, label, description and field_type and return a list of attributes to assign to the column. Defaults to creating a label attribute from the stripped HTML and UNICODE raw label and scanning for units={"UNITS"} in description to use as a units attribute.} \item{config}{named \code{list}. Additional configuration parameters to pass to \code{httr::POST}, These are appended to any parameters in \code{rcon$config}} \item{api_param}{named \code{list}. Additional API parameters to pass into the body of the API call. This provides users to execute calls with options that may not otherwise be supported by redcapAPI.} \item{csv_delimiter}{character. One of \code{c(",", "\t", ";", "|", "^")}. Designates the delimiter for the CSV file received from the API.} } \description{ Export a Report from a REDCap Project } \details{ This method allows you to export the data set of a report created on a project's 'Data Exports, Reports, and Stats' page. Note about export rights: Please be aware that Data Export user rights will be applied to this API request. For example, if you have 'No Access' data export rights in the project, then the API report export will fail and return an error. And if you have 'De-Identified' or 'Remove All Identifier Fields' data export rights, then some data fields *might* be removed and filtered out of the data set returned from the API. To make sure that no data is unnecessarily filtered out of your API request, you should have 'Full Data Set' export rights in the project. }
/man/exportReportsTyped.Rd
no_license
nutterb/redcapAPI
R
false
true
3,879
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/exportReportsTyped.R \name{exportReportsTyped} \alias{exportReportsTyped} \alias{exportReportsTyped.redcapApiConnection} \title{Export a Report from a REDCap Project} \usage{ exportReportsTyped(rcon, report_id, ...) \method{exportReportsTyped}{redcapApiConnection}( rcon, report_id, drop_fields = NULL, na = list(), validation = list(), cast = list(), assignment = list(label = stripHTMLandUnicode, units = unitsFieldAnnotation), ..., config = list(), api_param = list(), csv_delimiter = "," ) } \arguments{ \item{rcon}{A \code{redcapConnection} object} \item{report_id}{\code{integerish(1)} The ID number of the report to be exported.} \item{...}{Argument to pass to other methods.} \item{drop_fields}{\code{character}. A vector of field names to remove from the export. Ignore if length = 0.} \item{na}{A named \code{list} of user specified functions to determine if the data is NA. This is useful when data is loaded that has coding for NA, e.g. -5 is NA. Keys must correspond to a truncated REDCap field type, i.e. {date_, datetime_, datetime_seconds_, time_mm_ss, time_hh_mm_ss, time, float, number, calc, int, integer, select, radio, dropdown, yesno, truefalse, checkbox, form_complete, sql}. The function will be provided the variables (x, field_name, coding). The function must return a vector of logicals matching the input. It defaults to \code{\link{isNAorBlank}} for all entries.} \item{validation}{A named \code{list} of user specified validation functions. The same named keys are supported as the na argument. The function will be provided the variables (x, field_name, coding). The function must return a vector of logical matching the input length. Helper functions to construct these are \code{\link{valRx}} and \code{\link{valChoice}}. Only fields that are not identified as NA will be passed to validation functions.} \item{cast}{A named \code{list} of user specified class casting functions. The same named keys are supported as the na argument. The function will be provided the variables (x, field_name, coding). The function must return a vector of logical matching the input length. See \code{\link{fieldValidationAndCasting}}} \item{assignment}{A named \code{list} of functions. These functions are provided, field_name, label, description and field_type and return a list of attributes to assign to the column. Defaults to creating a label attribute from the stripped HTML and UNICODE raw label and scanning for units={"UNITS"} in description to use as a units attribute.} \item{config}{named \code{list}. Additional configuration parameters to pass to \code{httr::POST}, These are appended to any parameters in \code{rcon$config}} \item{api_param}{named \code{list}. Additional API parameters to pass into the body of the API call. This provides users to execute calls with options that may not otherwise be supported by redcapAPI.} \item{csv_delimiter}{character. One of \code{c(",", "\t", ";", "|", "^")}. Designates the delimiter for the CSV file received from the API.} } \description{ Export a Report from a REDCap Project } \details{ This method allows you to export the data set of a report created on a project's 'Data Exports, Reports, and Stats' page. Note about export rights: Please be aware that Data Export user rights will be applied to this API request. For example, if you have 'No Access' data export rights in the project, then the API report export will fail and return an error. And if you have 'De-Identified' or 'Remove All Identifier Fields' data export rights, then some data fields *might* be removed and filtered out of the data set returned from the API. To make sure that no data is unnecessarily filtered out of your API request, you should have 'Full Data Set' export rights in the project. }
# # Copyright (C) 1997-2010 Friedrich Leisch # $Id: mlbench-class.R 4612 2010-10-08 09:51:20Z leisch $ # mlbench.xor <- function(n, d=2){ x <- matrix(runif(n*d,-1,1),ncol=d,nrow=n) if((d != as.integer(d)) || (d<2)) stop("d must be an integer >=2") z <- rep(0, length=n) for(k in 1:n){ if(x[k,1]>=0){ tmp <- (x[k,2:d] >=0) z[k] <- 1+sum(tmp*2^(0:(d-2))) } else { tmp <- !(x[k,2:d] >=0) z[k] <- 1 + sum(tmp*2^(0:(d-2))) } } retval <- list(x=x, classes=factor(z)) class(retval) <- c("mlbench.xor", "mlbench") retval } mlbench.circle <- function(n, d=2){ x <- matrix(runif(n*d,-1,1),ncol=d,nrow=n) if((d != as.integer(d)) || (d<2)) stop("d must be an integer >=2") z <- rep(1, length=n) r <- (2^(d-1) * gamma(1+d/2) / (pi^(d/2)))^(1/d) z[apply(x, 1, function(x) sum(x^2)) > r^2] <- 2 retval <- list(x=x, classes=factor(z)) class(retval) <- c("mlbench.circle", "mlbench") retval } mlbench.2dnormals <- function(n, cl=2, r=sqrt(cl), sd=1){ e <- sample(0:(cl-1), size=n, replace=TRUE) m <- r * cbind(cos(pi/4 + e*2*pi/cl), sin(pi/4 + e*2*pi/cl)) x <- matrix(rnorm(2*n, sd=sd), ncol=2) + m retval <- list(x=x, classes=factor(e+1)) class(retval) <- c("mlbench.2dnormals", "mlbench") retval } mlbench.1spiral <- function(n, cycles=1, sd=0) { w <- seq(0, by=cycles/n, length=n) x <- matrix(0, nrow=n, ncol=2) x[,1] <- (2*w+1)*cos(2*pi*w)/3; x[,2] <- (2*w+1)*sin(2*pi*w)/3; if(sd>0){ e <- rnorm(n, sd=sd) xs <- cos(2*pi*w)-pi*(2*w+1)*sin(2*pi*w) ys <- sin(2*pi*w)+pi*(2*w+1)*cos(2*pi*w) nrm <- sqrt(xs^2+ys^2) x[,1] <- x[,1] + e*ys/nrm x[,2] <- x[,2] - e*xs/nrm } x } mlbench.spirals <- function(n, cycles=1, sd=0) { x <- matrix(0, nrow=n, ncol=2) c2 <- sample(1:n, size=n/2, replace=FALSE) cl <- factor(rep(1, length=n), levels=as.character(1:2)) cl[c2] <- 2 x[-c2,] <- mlbench.1spiral(n=n-length(c2), cycles=cycles, sd=sd) x[c2,] <- - mlbench.1spiral(n=length(c2), cycles=cycles, sd=sd) retval <- list(x=x, classes=cl) class(retval) <- c("mlbench.spirals", "mlbench") retval } mlbench.ringnorm <- function(n, d=20) { x <- matrix(0, nrow=n, ncol=d) c2 <- sample(1:n, size=n/2, replace=FALSE) cl <- factor(rep(1, length=n), levels=as.character(1:2)) cl[c2] <- 2 a <- 1/sqrt(d) x[-c2,] <- matrix(rnorm(n=d*(n-length(c2)), sd=2), ncol=d) x[c2,] <- matrix(rnorm(n=d*length(c2), mean=a), ncol=d) retval <- list(x=x, classes=cl) class(retval) <- c("mlbench.ringnorm", "mlbench") retval } mlbench.twonorm <- function (n, d = 20) { x <- matrix(0, nrow = n, ncol = d) c2 <- sample(1:n, size = n/2, replace = FALSE) cl <- factor(rep(1, length = n), levels = as.character(1:2)) cl[c2] <- 2 a <- 2/sqrt(d) x[-c2, ] <- matrix(rnorm(n = d * (n - length(c2)), mean = a, sd = 1), ncol = d) x[c2, ] <- matrix(rnorm(n = d * length(c2), mean = -a), ncol = d) retval <- list(x = x, classes = cl) class(retval) <- c("mlbench.twonorm", "mlbench") retval } mlbench.threenorm <- function (n, d = 20) { x <- matrix(0, nrow = n, ncol = d) c2 <- sample(1:n, size = n/2, replace = FALSE) cl <- factor(rep(1, length = n), levels = as.character(1:2)) cl[c2] <- 2 c1 <- (1:n)[-c2] a <- 2/sqrt(d) for (i in c1) { distr <- as.logical(round(runif(1,0,1))) if ( distr ) x[i, ] <- rnorm(n = d, mean = a) else x[i, ] <- rnorm(n = d, mean = -a) } m <- rep(c(a, -a), d/2) if ((d %% 2)==1) m <- c(m, a) x[c2, ] <- matrix(rnorm(n = d * length(c2), mean = m), ncol = d, byrow=TRUE) retval <- list(x = x, classes = cl) class(retval) <- c("mlbench.threenorm", "mlbench") retval } mlbench.waveform <- function (n) { Rnuminstances <- n retval <- .C("waveform", Rnuminstances = as.integer(Rnuminstances), x = double(21*n), type = integer(n), PACKAGE = "mlbench") x <- matrix (retval$x, ncol=21, byrow = TRUE) retval <- list (x=x, classes=as.factor(retval$type+1)) class(retval) <- c("mlbench.waveform","mlbench") return(retval) } mlbench.cassini <- function(n,relsize=c(2,2,1)) { cassinib <- function(x, a, c) { y <- numeric(2) y[1] <- -sqrt(-c^2 - x^2 + sqrt(a^4 + 4*c^2*x^2)) y[2] <- sqrt(-c^2 - x^2 + sqrt(a^4 + 4*c^2*x^2)) y } circle <- function(x, r) sqrt(r^2-x^2) big1<-relsize[1] big2<-relsize[2] small<-relsize[3] parts<-big1+big2+small npiece<-n/parts n1<-round(big1*npiece) n2<-round(big2*npiece) n3<-round(small*npiece) if ((n1+n2+n3)!=n) n3<-n3+1 a<-1 C<-0.97 Cell<-sqrt((1+C^2)/3) aell <- Cell*sqrt(2) transl <- 1.1 r <- 0.6 tmima1<-matrix(0,ncol=2,nrow=n1) tmima2<-matrix(0,ncol=2,nrow=n2) tmima3<-matrix(0,ncol=2,nrow=n3) n1found <- 0 while(n1found < n1) { x1 <- runif(1,min=-sqrt(a^2+C^2),max=sqrt(a^2+C^2)) y1 <- runif(1,min=-transl-1,max=-transl+0.6) if ((y1 < cassinib(x1,a,C)[2]-transl) && (y1 > cassinib(x1,aell,Cell)[1]-transl)) { n1found <- n1found +1 tmima1[n1found,]<-c(x1,y1) } } n2found <- 0 while(n2found < n2) { x2 <- runif(1,min=-sqrt(a^2+C^2),max=sqrt(a^2+C^2)) y2 <- runif(1,max= transl+1,min=transl-0.6) if ((y2 > cassinib(x2,a,C)[1]+transl) && (y2 < cassinib(x2,aell,Cell)[2]+transl)) { n2found <- n2found +1 tmima2[n2found,]<-c(x2,y2) } } n3found <- 0 while(n3found < n3) { x3<-runif(1,min=-r,max=r) y3<-runif(1,min=-r,max=r) if ((y3 > -circle(x3,r)) && (y3 < circle(x3,r))) { n3found <- n3found +1 tmima3[n3found,]<-c(x3,y3) } } teliko <- rbind(tmima1,tmima2,tmima3) cl <- factor(c(rep(1,n1),rep(2,n2),rep(3,n3))) retval<-list(x=teliko,classes=cl) class(retval) <- c("mlbench.cassini","mlbench") retval } mlbench.cuboids <- function (n, relsize=c(2,2,2,1)) { big1 <- relsize[1] big2 <- relsize[2] big3 <- relsize[3] small <- relsize[4] parts<-big1+big2++big3+small npiece<-n/parts n1<-round(big1*npiece) n2<-round(big2*npiece) n3<-round(big3*npiece) n4<-round(small*npiece) if ((n1+n2+n3+n4)!=n) n4<-n4+1 x1 <- cbind(runif(n1,min=0,max=1),runif(n1,min=0.75,max=1.0),runif(n1,min=0.75,max=1)) x2 <- cbind(runif(n2,min=0.75,max=1.0),runif(n2,min=0,max=0.25),runif(n2,min=0,max=1)) x3 <- cbind(runif(n3,min=0.0,max=0.25),runif(n3,min=0.0,max=1),runif(n3,min=0,max=0.25)) x4 <- cbind(runif(n4,min=0.4,max=0.6),runif(n4,min=0.4,max=0.6),runif(n4,min=0.4,max=0.6)) x<-rbind(x1,x2,x3,x4) retval <-list(x=x,classes=factor(c(rep(1,n1),rep(2,n2), rep(3,n3),rep(4,n4)))) class(retval) <- c("mlbench.cuboids","mlbench") return(retval) } mlbench.smiley <- function(n=500, sd1=.1, sd2=.05) { n1 <- round(n/6) n2 <- round(n/4) n3 <- n - 2 * n1 - n2 x1 <- cbind(rnorm(n1, -.8, sd1), rnorm(n1, 1, sd1)) x2 <- cbind(rnorm(n1, .8, sd1), rnorm(n1, 1, sd1)) x3 <- cbind(runif(n2, -.2, .2), runif(n2, 0, .75)) x3[,1] <- x3[,1]*(1-x3[,2]) x4 <- runif(n3, -1, 1) x4 <- cbind(x4, x4^2 - 1 + rnorm(n3, 0, sd2)) x <- retval <- list(x = rbind(x1, x2, x3, x4), classes=factor(c(rep(1,n1),rep(2,n1),rep(3,n2),rep(4,n3)))) class(retval) <- c("mlbench.smiley", "mlbench") retval } mlbench.shapes <- function(n=500) { n1 <- round(n/4) n2 <- n-3*n1 x1 <- cbind(rnorm(n1, -1, .2), rnorm(n1, 1.5, .2)) x2 <- cbind(runif(n1, -1.5, -0.5), runif(n1, -2, 0)) x3 <- cbind(runif(n1, -1, 1), runif(n1, 1, 2)) x3[,1] <- x3[,1]*(2-x3[,2])+1 x4 <- runif(n2, 0.5, 2) x4 <- cbind(x4, cos(4*x4)-x4+runif(n2,-.2,.2)) retval <- list(x = rbind(x1, x2, x3, x4), classes=factor(c(rep(1,n1),rep(2,n1),rep(3,n1),rep(4,n2)))) class(retval) <- c("mlbench.shapes", "mlbench") retval } ###********************************************************** ## Original ist bincombinations in e1071 hypercube <- function(d) { retval <- matrix(0, nrow=2^d, ncol=d) for(n in 1:d){ retval[,n] <- rep(c(rep(0, (2^d/2^n)), rep(1, (2^d/2^n))), length=2^d) } retval } mlbench.hypercube <- function(n=800, d=3, sides=rep(1,d), sd=0.1) { m <- hypercube(d) n1 <- round(n/2^d) sides <- rep(sides, length=d) z <- NULL for(k in 1:nrow(m)) { m[k,] <- m[k,]*sides z1 <- matrix(rnorm(d*n1, sd=sd), ncol=d) z1 <- sweep(z1, 2, m[k,], "+") z <- rbind(z, z1) } retval <- list(x=z, classes=factor(rep(1:nrow(m), rep(n1, nrow(m))))) class(retval) <- c("mlbench.hypercube", "mlbench") retval } ## for backwards compatibility mlbench.corners <- function(...) mlbench.hypercube(...) ###********************************************************** simplex <- function(d, sides, center = TRUE) { m <- matrix(0, d+1, d) cent <- rep(0,d) m[2,1] <- sides cent[1] <- sides/2 b <- sides/2 if(d>=2) { for(i in 2:d) { m[i+1,] <- cent m[i+1,i] <- sqrt(sides^2-b^2) cent[i] <- 1/(i+1)* m[i+1,i] b <- (1- 1/(i+1)) * m[i+1,i] } } if(center) m <- t(t(m) - cent) m } mlbench.simplex <- function (n = 800, d = 3, sides = 1, sd = 0.1, center=TRUE) { m <- simplex(d=d , sides=sides, center=center) n1 <- round(n/2^d) z <- NULL for (k in 1:nrow(m)) { z1 <- matrix(rnorm(d * n1, sd = sd), ncol = d) z1 <- sweep(z1, 2, m[k, ], "+") z <- rbind(z, z1) } retval <- list(x = z, classes = factor(rep(1:nrow(m), rep(n1, nrow(m))))) class(retval) <- c("mlbench.simplex", "mlbench") retval } ###********************************************************** bayesclass <- function(z) UseMethod("bayesclass") bayesclass.noerr <- function(z) z$classes bayesclass.mlbench.xor <- bayesclass.noerr bayesclass.mlbench.circle <- bayesclass.noerr bayesclass.mlbench.cassini <- bayesclass.noerr bayesclass.mlbench.cuboids <- bayesclass.noerr bayesclass.mlbench.2dnormals <- function(z){ ncl <- length(levels(z$classes)) z <- z$x for(k in 1:nrow(z)){ z[k,] <- z[k,] / sqrt(sum(z[k,]^2)) } winkel <- acos(z[,1] * sign(z[,2])) + pi * (z[,2]<0) winkel <- winkel - pi/ncl - pi/4 winkel[winkel < 0] <- winkel[winkel<0] + 2*pi retval <- (winkel)%/%(2 * pi/ncl) factor((retval+1)%%ncl+1) } bayesclass.mlbench.ringnorm <- function (z) { z <- z$x ndata <- dim(z)[1] ndim <- dim(z)[2] a <- 1/sqrt(ndim) center1 <- rep(0,ndim) center2 <- rep(a,ndim) m1 <- mahalanobis(z, center1, (4*diag(ndim)), inverted=FALSE) + ndim*log(4) m2 <- mahalanobis(z, center2, diag(ndim), inverted=FALSE) as.factor ((m1 > m2) +1) } bayesclass.mlbench.twonorm <- function (z) { z <- z$x ndata <- dim(z)[1] bayesclass <- integer(ndata) ndim <- dim(z)[2] a <- 2/sqrt(ndim) center1 <- rep(a,ndim) center2 <- rep(-a,ndim) for (i in 1:ndata) { dist1 <- sum((z[i, ] - center1) ^2) dist2 <- sum((z[i, ] - center2) ^2) bayesclass[i] <- (dist1 > dist2) +1 } as.factor(bayesclass) } ## Code by Julia Schiffner bayesclass.mlbench.threenorm <- function(z) { z <- z$x ndim <- dim(z)[2] a <- 2/sqrt(ndim) center1a <- rep(a, ndim) center1b <- rep(-a, ndim) center2 <- rep(c(a, -a), ndim/2) if ((ndim%%2) == 1) center2 <- c(center2, a) m1 <- 0.5 * exp(-0.5 * mahalanobis(z, center1a, diag(ndim), inverted = FALSE)) + 0.5 * exp(-0.5 * mahalanobis(z, center1b, diag(ndim), inverted = FALSE)) m2 <- exp(-0.5 * mahalanobis(z, center2, diag(ndim), inverted = FALSE)) as.factor((m1 < m2) + 1) } ###********************************************************** as.data.frame.mlbench <- function(x, row.names=NULL, optional=FALSE, ...) { data.frame(x=x$x, classes=x$classes) } plot.mlbench <- function(x, xlab="", ylab="", ...) { if(ncol(x$x)>2){ pairs(x$x, col=as.integer(x$classes), ...) } else{ plot(x$x, col=as.integer(x$classes), xlab=xlab, ylab=ylab, ...) } }
/mlbench/R/mlbench-class.R
no_license
ingted/R-Examples
R
false
false
12,968
r
# # Copyright (C) 1997-2010 Friedrich Leisch # $Id: mlbench-class.R 4612 2010-10-08 09:51:20Z leisch $ # mlbench.xor <- function(n, d=2){ x <- matrix(runif(n*d,-1,1),ncol=d,nrow=n) if((d != as.integer(d)) || (d<2)) stop("d must be an integer >=2") z <- rep(0, length=n) for(k in 1:n){ if(x[k,1]>=0){ tmp <- (x[k,2:d] >=0) z[k] <- 1+sum(tmp*2^(0:(d-2))) } else { tmp <- !(x[k,2:d] >=0) z[k] <- 1 + sum(tmp*2^(0:(d-2))) } } retval <- list(x=x, classes=factor(z)) class(retval) <- c("mlbench.xor", "mlbench") retval } mlbench.circle <- function(n, d=2){ x <- matrix(runif(n*d,-1,1),ncol=d,nrow=n) if((d != as.integer(d)) || (d<2)) stop("d must be an integer >=2") z <- rep(1, length=n) r <- (2^(d-1) * gamma(1+d/2) / (pi^(d/2)))^(1/d) z[apply(x, 1, function(x) sum(x^2)) > r^2] <- 2 retval <- list(x=x, classes=factor(z)) class(retval) <- c("mlbench.circle", "mlbench") retval } mlbench.2dnormals <- function(n, cl=2, r=sqrt(cl), sd=1){ e <- sample(0:(cl-1), size=n, replace=TRUE) m <- r * cbind(cos(pi/4 + e*2*pi/cl), sin(pi/4 + e*2*pi/cl)) x <- matrix(rnorm(2*n, sd=sd), ncol=2) + m retval <- list(x=x, classes=factor(e+1)) class(retval) <- c("mlbench.2dnormals", "mlbench") retval } mlbench.1spiral <- function(n, cycles=1, sd=0) { w <- seq(0, by=cycles/n, length=n) x <- matrix(0, nrow=n, ncol=2) x[,1] <- (2*w+1)*cos(2*pi*w)/3; x[,2] <- (2*w+1)*sin(2*pi*w)/3; if(sd>0){ e <- rnorm(n, sd=sd) xs <- cos(2*pi*w)-pi*(2*w+1)*sin(2*pi*w) ys <- sin(2*pi*w)+pi*(2*w+1)*cos(2*pi*w) nrm <- sqrt(xs^2+ys^2) x[,1] <- x[,1] + e*ys/nrm x[,2] <- x[,2] - e*xs/nrm } x } mlbench.spirals <- function(n, cycles=1, sd=0) { x <- matrix(0, nrow=n, ncol=2) c2 <- sample(1:n, size=n/2, replace=FALSE) cl <- factor(rep(1, length=n), levels=as.character(1:2)) cl[c2] <- 2 x[-c2,] <- mlbench.1spiral(n=n-length(c2), cycles=cycles, sd=sd) x[c2,] <- - mlbench.1spiral(n=length(c2), cycles=cycles, sd=sd) retval <- list(x=x, classes=cl) class(retval) <- c("mlbench.spirals", "mlbench") retval } mlbench.ringnorm <- function(n, d=20) { x <- matrix(0, nrow=n, ncol=d) c2 <- sample(1:n, size=n/2, replace=FALSE) cl <- factor(rep(1, length=n), levels=as.character(1:2)) cl[c2] <- 2 a <- 1/sqrt(d) x[-c2,] <- matrix(rnorm(n=d*(n-length(c2)), sd=2), ncol=d) x[c2,] <- matrix(rnorm(n=d*length(c2), mean=a), ncol=d) retval <- list(x=x, classes=cl) class(retval) <- c("mlbench.ringnorm", "mlbench") retval } mlbench.twonorm <- function (n, d = 20) { x <- matrix(0, nrow = n, ncol = d) c2 <- sample(1:n, size = n/2, replace = FALSE) cl <- factor(rep(1, length = n), levels = as.character(1:2)) cl[c2] <- 2 a <- 2/sqrt(d) x[-c2, ] <- matrix(rnorm(n = d * (n - length(c2)), mean = a, sd = 1), ncol = d) x[c2, ] <- matrix(rnorm(n = d * length(c2), mean = -a), ncol = d) retval <- list(x = x, classes = cl) class(retval) <- c("mlbench.twonorm", "mlbench") retval } mlbench.threenorm <- function (n, d = 20) { x <- matrix(0, nrow = n, ncol = d) c2 <- sample(1:n, size = n/2, replace = FALSE) cl <- factor(rep(1, length = n), levels = as.character(1:2)) cl[c2] <- 2 c1 <- (1:n)[-c2] a <- 2/sqrt(d) for (i in c1) { distr <- as.logical(round(runif(1,0,1))) if ( distr ) x[i, ] <- rnorm(n = d, mean = a) else x[i, ] <- rnorm(n = d, mean = -a) } m <- rep(c(a, -a), d/2) if ((d %% 2)==1) m <- c(m, a) x[c2, ] <- matrix(rnorm(n = d * length(c2), mean = m), ncol = d, byrow=TRUE) retval <- list(x = x, classes = cl) class(retval) <- c("mlbench.threenorm", "mlbench") retval } mlbench.waveform <- function (n) { Rnuminstances <- n retval <- .C("waveform", Rnuminstances = as.integer(Rnuminstances), x = double(21*n), type = integer(n), PACKAGE = "mlbench") x <- matrix (retval$x, ncol=21, byrow = TRUE) retval <- list (x=x, classes=as.factor(retval$type+1)) class(retval) <- c("mlbench.waveform","mlbench") return(retval) } mlbench.cassini <- function(n,relsize=c(2,2,1)) { cassinib <- function(x, a, c) { y <- numeric(2) y[1] <- -sqrt(-c^2 - x^2 + sqrt(a^4 + 4*c^2*x^2)) y[2] <- sqrt(-c^2 - x^2 + sqrt(a^4 + 4*c^2*x^2)) y } circle <- function(x, r) sqrt(r^2-x^2) big1<-relsize[1] big2<-relsize[2] small<-relsize[3] parts<-big1+big2+small npiece<-n/parts n1<-round(big1*npiece) n2<-round(big2*npiece) n3<-round(small*npiece) if ((n1+n2+n3)!=n) n3<-n3+1 a<-1 C<-0.97 Cell<-sqrt((1+C^2)/3) aell <- Cell*sqrt(2) transl <- 1.1 r <- 0.6 tmima1<-matrix(0,ncol=2,nrow=n1) tmima2<-matrix(0,ncol=2,nrow=n2) tmima3<-matrix(0,ncol=2,nrow=n3) n1found <- 0 while(n1found < n1) { x1 <- runif(1,min=-sqrt(a^2+C^2),max=sqrt(a^2+C^2)) y1 <- runif(1,min=-transl-1,max=-transl+0.6) if ((y1 < cassinib(x1,a,C)[2]-transl) && (y1 > cassinib(x1,aell,Cell)[1]-transl)) { n1found <- n1found +1 tmima1[n1found,]<-c(x1,y1) } } n2found <- 0 while(n2found < n2) { x2 <- runif(1,min=-sqrt(a^2+C^2),max=sqrt(a^2+C^2)) y2 <- runif(1,max= transl+1,min=transl-0.6) if ((y2 > cassinib(x2,a,C)[1]+transl) && (y2 < cassinib(x2,aell,Cell)[2]+transl)) { n2found <- n2found +1 tmima2[n2found,]<-c(x2,y2) } } n3found <- 0 while(n3found < n3) { x3<-runif(1,min=-r,max=r) y3<-runif(1,min=-r,max=r) if ((y3 > -circle(x3,r)) && (y3 < circle(x3,r))) { n3found <- n3found +1 tmima3[n3found,]<-c(x3,y3) } } teliko <- rbind(tmima1,tmima2,tmima3) cl <- factor(c(rep(1,n1),rep(2,n2),rep(3,n3))) retval<-list(x=teliko,classes=cl) class(retval) <- c("mlbench.cassini","mlbench") retval } mlbench.cuboids <- function (n, relsize=c(2,2,2,1)) { big1 <- relsize[1] big2 <- relsize[2] big3 <- relsize[3] small <- relsize[4] parts<-big1+big2++big3+small npiece<-n/parts n1<-round(big1*npiece) n2<-round(big2*npiece) n3<-round(big3*npiece) n4<-round(small*npiece) if ((n1+n2+n3+n4)!=n) n4<-n4+1 x1 <- cbind(runif(n1,min=0,max=1),runif(n1,min=0.75,max=1.0),runif(n1,min=0.75,max=1)) x2 <- cbind(runif(n2,min=0.75,max=1.0),runif(n2,min=0,max=0.25),runif(n2,min=0,max=1)) x3 <- cbind(runif(n3,min=0.0,max=0.25),runif(n3,min=0.0,max=1),runif(n3,min=0,max=0.25)) x4 <- cbind(runif(n4,min=0.4,max=0.6),runif(n4,min=0.4,max=0.6),runif(n4,min=0.4,max=0.6)) x<-rbind(x1,x2,x3,x4) retval <-list(x=x,classes=factor(c(rep(1,n1),rep(2,n2), rep(3,n3),rep(4,n4)))) class(retval) <- c("mlbench.cuboids","mlbench") return(retval) } mlbench.smiley <- function(n=500, sd1=.1, sd2=.05) { n1 <- round(n/6) n2 <- round(n/4) n3 <- n - 2 * n1 - n2 x1 <- cbind(rnorm(n1, -.8, sd1), rnorm(n1, 1, sd1)) x2 <- cbind(rnorm(n1, .8, sd1), rnorm(n1, 1, sd1)) x3 <- cbind(runif(n2, -.2, .2), runif(n2, 0, .75)) x3[,1] <- x3[,1]*(1-x3[,2]) x4 <- runif(n3, -1, 1) x4 <- cbind(x4, x4^2 - 1 + rnorm(n3, 0, sd2)) x <- retval <- list(x = rbind(x1, x2, x3, x4), classes=factor(c(rep(1,n1),rep(2,n1),rep(3,n2),rep(4,n3)))) class(retval) <- c("mlbench.smiley", "mlbench") retval } mlbench.shapes <- function(n=500) { n1 <- round(n/4) n2 <- n-3*n1 x1 <- cbind(rnorm(n1, -1, .2), rnorm(n1, 1.5, .2)) x2 <- cbind(runif(n1, -1.5, -0.5), runif(n1, -2, 0)) x3 <- cbind(runif(n1, -1, 1), runif(n1, 1, 2)) x3[,1] <- x3[,1]*(2-x3[,2])+1 x4 <- runif(n2, 0.5, 2) x4 <- cbind(x4, cos(4*x4)-x4+runif(n2,-.2,.2)) retval <- list(x = rbind(x1, x2, x3, x4), classes=factor(c(rep(1,n1),rep(2,n1),rep(3,n1),rep(4,n2)))) class(retval) <- c("mlbench.shapes", "mlbench") retval } ###********************************************************** ## Original ist bincombinations in e1071 hypercube <- function(d) { retval <- matrix(0, nrow=2^d, ncol=d) for(n in 1:d){ retval[,n] <- rep(c(rep(0, (2^d/2^n)), rep(1, (2^d/2^n))), length=2^d) } retval } mlbench.hypercube <- function(n=800, d=3, sides=rep(1,d), sd=0.1) { m <- hypercube(d) n1 <- round(n/2^d) sides <- rep(sides, length=d) z <- NULL for(k in 1:nrow(m)) { m[k,] <- m[k,]*sides z1 <- matrix(rnorm(d*n1, sd=sd), ncol=d) z1 <- sweep(z1, 2, m[k,], "+") z <- rbind(z, z1) } retval <- list(x=z, classes=factor(rep(1:nrow(m), rep(n1, nrow(m))))) class(retval) <- c("mlbench.hypercube", "mlbench") retval } ## for backwards compatibility mlbench.corners <- function(...) mlbench.hypercube(...) ###********************************************************** simplex <- function(d, sides, center = TRUE) { m <- matrix(0, d+1, d) cent <- rep(0,d) m[2,1] <- sides cent[1] <- sides/2 b <- sides/2 if(d>=2) { for(i in 2:d) { m[i+1,] <- cent m[i+1,i] <- sqrt(sides^2-b^2) cent[i] <- 1/(i+1)* m[i+1,i] b <- (1- 1/(i+1)) * m[i+1,i] } } if(center) m <- t(t(m) - cent) m } mlbench.simplex <- function (n = 800, d = 3, sides = 1, sd = 0.1, center=TRUE) { m <- simplex(d=d , sides=sides, center=center) n1 <- round(n/2^d) z <- NULL for (k in 1:nrow(m)) { z1 <- matrix(rnorm(d * n1, sd = sd), ncol = d) z1 <- sweep(z1, 2, m[k, ], "+") z <- rbind(z, z1) } retval <- list(x = z, classes = factor(rep(1:nrow(m), rep(n1, nrow(m))))) class(retval) <- c("mlbench.simplex", "mlbench") retval } ###********************************************************** bayesclass <- function(z) UseMethod("bayesclass") bayesclass.noerr <- function(z) z$classes bayesclass.mlbench.xor <- bayesclass.noerr bayesclass.mlbench.circle <- bayesclass.noerr bayesclass.mlbench.cassini <- bayesclass.noerr bayesclass.mlbench.cuboids <- bayesclass.noerr bayesclass.mlbench.2dnormals <- function(z){ ncl <- length(levels(z$classes)) z <- z$x for(k in 1:nrow(z)){ z[k,] <- z[k,] / sqrt(sum(z[k,]^2)) } winkel <- acos(z[,1] * sign(z[,2])) + pi * (z[,2]<0) winkel <- winkel - pi/ncl - pi/4 winkel[winkel < 0] <- winkel[winkel<0] + 2*pi retval <- (winkel)%/%(2 * pi/ncl) factor((retval+1)%%ncl+1) } bayesclass.mlbench.ringnorm <- function (z) { z <- z$x ndata <- dim(z)[1] ndim <- dim(z)[2] a <- 1/sqrt(ndim) center1 <- rep(0,ndim) center2 <- rep(a,ndim) m1 <- mahalanobis(z, center1, (4*diag(ndim)), inverted=FALSE) + ndim*log(4) m2 <- mahalanobis(z, center2, diag(ndim), inverted=FALSE) as.factor ((m1 > m2) +1) } bayesclass.mlbench.twonorm <- function (z) { z <- z$x ndata <- dim(z)[1] bayesclass <- integer(ndata) ndim <- dim(z)[2] a <- 2/sqrt(ndim) center1 <- rep(a,ndim) center2 <- rep(-a,ndim) for (i in 1:ndata) { dist1 <- sum((z[i, ] - center1) ^2) dist2 <- sum((z[i, ] - center2) ^2) bayesclass[i] <- (dist1 > dist2) +1 } as.factor(bayesclass) } ## Code by Julia Schiffner bayesclass.mlbench.threenorm <- function(z) { z <- z$x ndim <- dim(z)[2] a <- 2/sqrt(ndim) center1a <- rep(a, ndim) center1b <- rep(-a, ndim) center2 <- rep(c(a, -a), ndim/2) if ((ndim%%2) == 1) center2 <- c(center2, a) m1 <- 0.5 * exp(-0.5 * mahalanobis(z, center1a, diag(ndim), inverted = FALSE)) + 0.5 * exp(-0.5 * mahalanobis(z, center1b, diag(ndim), inverted = FALSE)) m2 <- exp(-0.5 * mahalanobis(z, center2, diag(ndim), inverted = FALSE)) as.factor((m1 < m2) + 1) } ###********************************************************** as.data.frame.mlbench <- function(x, row.names=NULL, optional=FALSE, ...) { data.frame(x=x$x, classes=x$classes) } plot.mlbench <- function(x, xlab="", ylab="", ...) { if(ncol(x$x)>2){ pairs(x$x, col=as.integer(x$classes), ...) } else{ plot(x$x, col=as.integer(x$classes), xlab=xlab, ylab=ylab, ...) } }
#!/applications/R/R-3.5.0/bin/Rscript # For all three wheat subgenomes, load and plot as bargraphs results from # hypergeometric tests to determine whether each # NLR-encoding gene quantile is over-represented or under-represented for # NLRs that are not part of an NLR cluster # (e.g., is the proportion of NLR genes within a given NLR gene quantile that # are members of an NLR cluster significantly greater or smaller than expected by chance # based on the hypergeometric distribution?) # P-value is the probability of drawing >= length(quantile_clust) [x] features # in a sample size of length(quantile_genes) [k] from a total feature set consisting of # length(genome_clust) [m] + ( length(genome_genes) - length(genome_clust)) [n] # Usage # ./proportion_not_clustered_in_NLR_quantiles_hypergeometricTest_bargraph_only.R 'cMMb' 'genes' 1 4 'genomewide' 100000 'grey20,turquoise4,turquoise3,turquoise1' library(methods) library(plotrix) library(ggplot2) library(ggbeeswarm) library(ggthemes) library(grid) library(gridExtra) library(extrafont) #libName <- "cMMb" #featRegion <- "genes" #quantileFirst <- 1 #quantileLast <- 4 #region <- "genomewide" #samplesNum <- 100000 #genomeColours <- unlist(strsplit('grey20,turquoise4,turquoise3,turquoise1', split = ",")) args <- commandArgs(trailingOnly = TRUE) libName <- args[1] featRegion <- args[2] quantileFirst <- as.integer(args[3]) quantileLast <- as.integer(args[4]) region <- args[5] samplesNum <- as.numeric(args[6]) genomeColours <- unlist(strsplit(args[7], split = ",")) # Define quantile colours quantileColours <- c("red", "purple", "blue", "navy") makeTransparent <- function(thisColour, alpha = 250) { newColour <- col2rgb(thisColour) apply(newColour, 2, function(x) { rgb(red = x[1], green = x[2], blue = x[3], alpha = alpha, maxColorValue = 255) }) } if(libName %in% c("cMMb", "HudsonRM_all")) { outDir <- paste0("quantiles_by_", libName, "/hypergeometricTests/") } else { outDir <- paste0("quantiles_by_", sub("_\\w+", "", libName), "_in_", featRegion, "/hypergeometricTests/") } plotDir <- paste0(outDir, "plots/combined_bargraph/") system(paste0("[ -d ", outDir, " ] || mkdir ", outDir)) system(paste0("[ -d ", plotDir, " ] || mkdir ", plotDir)) options(scipen = 100) # Plot bar graph summarising permutation test results genomeNames <- c("Agenome_Bgenome_Dgenome", "Agenome", "Bgenome", "Dgenome") hg_list <- lapply(seq_along(genomeNames), function(y) { hg_list_quantile <- list() for(z in quantileFirst:quantileLast) { if(libName %in% c("cMMb", "HudsonRM_all")) { load(paste0(outDir, "NLR_not_clustered_gene_representation_among_quantile", z, "_of_", quantileLast, "_by_", libName, "_of_NLR_genes_in_", genomeNames[y], "_", region, "_hypergeomTestRes.RData")) } else { load(paste0(outDir, "NLR_not_clustered_gene_representation_among_quantile", z, "_of_", quantileLast, "_by_log2_", libName, "_control_in_", featRegion, "_of_NLR_genes_in_", genomeNames[y], "_", region, "_hypergeomTestRes.RData")) } hg_list_quantile <- c(hg_list_quantile, hgTestResults) } return(hg_list_quantile) }) bargraph_df <- data.frame(Subgenome = rep(c("All genomes", "A genome", "B genome", "D genome"), each = quantileLast), Quantile = rep(paste0("Quantile ", quantileFirst:quantileLast), 4), log2ObsExp = c(sapply(seq_along(genomeNames), function(y) { sapply(seq_along(hg_list[[y]]), function(x) { hg_list[[y]][[x]]@log2obsexp }) })), log2alpha0.05 = c(sapply(seq_along(genomeNames), function(y) { sapply(seq_along(hg_list[[y]]), function(x) { hg_list[[y]][[x]]@log2alpha }) }))) bargraph_df$Quantile <- factor(bargraph_df$Quantile, levels = paste0("Quantile ", quantileFirst:quantileLast)) bargraph_df$Subgenome <- factor(bargraph_df$Subgenome, levels = c("All genomes", "A genome", "B genome", "D genome")) bp <- ggplot(data = bargraph_df, mapping = aes(x = Quantile, y = log2ObsExp, fill = Subgenome)) + geom_bar(stat = "identity", position = position_dodge()) + scale_fill_manual(name = "", values = genomeColours, labels = levels(bargraph_df$Subgenome)) + geom_point(mapping = aes(x = Quantile, y = log2alpha0.05), position = position_dodge(0.9), shape = "-", colour = "grey80", size = 20) + geom_segment(mapping = aes(x = 0.55, y = min(c(bargraph_df$log2ObsExp, bargraph_df$log2alpha0.05))-0.05, xend = 1.45, yend = min(c(bargraph_df$log2ObsExp, bargraph_df$log2alpha0.05))-0.05), colour = quantileColours[1], inherit.aes = F, size = 5) + geom_segment(mapping = aes(x = 1.55, y = min(c(bargraph_df$log2ObsExp, bargraph_df$log2alpha0.05))-0.05, xend = 2.45, yend = min(c(bargraph_df$log2ObsExp, bargraph_df$log2alpha0.05))-0.05), colour = quantileColours[2], inherit.aes = F, size = 5) + geom_segment(mapping = aes(x = 2.55, y = min(c(bargraph_df$log2ObsExp, bargraph_df$log2alpha0.05))-0.05, xend = 3.45, yend = min(c(bargraph_df$log2ObsExp, bargraph_df$log2alpha0.05))-0.05), colour = quantileColours[3], inherit.aes = F, size = 5) + geom_segment(mapping = aes(x = 3.55, y = min(c(bargraph_df$log2ObsExp, bargraph_df$log2alpha0.05))-0.05, xend = 4.45, yend = min(c(bargraph_df$log2ObsExp, bargraph_df$log2alpha0.05))-0.05), colour = quantileColours[4], inherit.aes = F, size = 5) + labs(y = bquote("Log"[2]*"(observed/expected) genes in quantile")) + scale_y_continuous(limits = c(-0.75, 0.75)) + # scale_y_continuous(limits = c(-1.5, 1.5)) + scale_x_discrete(position = "bottom") + guides(fill = guide_legend(direction = "horizontal", label.position = "top", label.theme = element_text(size = 20, hjust = 0, vjust = 0.5, angle = 90), nrow = 1, byrow = TRUE)) + theme_bw() + theme(axis.line.y = element_line(size = 1, colour = "black"), axis.ticks.y = element_line(size = 1, colour = "black"), axis.text.y = element_text(size = 25, colour = "black", hjust = 0.5, vjust = 0.5, angle = 90), axis.title.y = element_text(size = 25, colour = "black"), axis.ticks.x = element_blank(), axis.text.x = element_text(size = 32, colour = "black", hjust = 0.5, vjust = 0.5, angle = 180), axis.title.x = element_blank(), panel.grid = element_blank(), panel.border = element_blank(), panel.background = element_blank(), #legend.position = "none", #legend.position = c(0.05, 0.30), legend.background = element_rect(fill = "transparent"), legend.key = element_rect(colour = "transparent", fill = "transparent"), plot.margin = unit(c(5.5, 5.5, 40.5, 5.5), "pt"), plot.title = element_text(size = 24, colour = "black", hjust = 0.5)) + ggtitle(bquote("NLRs not in an NLR cluster in" ~ .(sub("_\\w+$", "", libName)) ~ "quantiles" ~ "(" * .(featRegion) * ")" ~ "(" * .(prettyNum(samplesNum, big.mark = ",", trim = T)) ~ "samples)")) if(libName %in% c("cMMb", "HudsonRM_all")) { ggsave(paste0(plotDir, "combined_bargraph_NLR_not_clustered_gene_representation_among_", quantileLast, "quantiles_by_", libName, "_of_NLR_genes_in_each_subgenome_", region, "_hypergeomTestRes.pdf"), plot = bp, height = 8, width = 16) # height = 8, width = 12) } else { ggsave(paste0(plotDir, "combined_bargraph_NLR_not_clustered_gene_representation_among_", quantileLast, "quantiles_by_log2_", libName, "_control_in_", featRegion, "_of_NLR_genes_in_each_subgenome_", region, "_hypergeomTestRes.pdf"), plot = bp, height = 8, width = 16) # height = 8, width = 12) }
/ASY1_CS/snakemake_ChIPseq/mapped/geneProfiles_subgenomes/NLR_quantiles/proportion_not_clustered_in_NLR_quantiles_hypergeometricTest_bargraph_only.R
no_license
ajtock/wheat
R
false
false
8,805
r
#!/applications/R/R-3.5.0/bin/Rscript # For all three wheat subgenomes, load and plot as bargraphs results from # hypergeometric tests to determine whether each # NLR-encoding gene quantile is over-represented or under-represented for # NLRs that are not part of an NLR cluster # (e.g., is the proportion of NLR genes within a given NLR gene quantile that # are members of an NLR cluster significantly greater or smaller than expected by chance # based on the hypergeometric distribution?) # P-value is the probability of drawing >= length(quantile_clust) [x] features # in a sample size of length(quantile_genes) [k] from a total feature set consisting of # length(genome_clust) [m] + ( length(genome_genes) - length(genome_clust)) [n] # Usage # ./proportion_not_clustered_in_NLR_quantiles_hypergeometricTest_bargraph_only.R 'cMMb' 'genes' 1 4 'genomewide' 100000 'grey20,turquoise4,turquoise3,turquoise1' library(methods) library(plotrix) library(ggplot2) library(ggbeeswarm) library(ggthemes) library(grid) library(gridExtra) library(extrafont) #libName <- "cMMb" #featRegion <- "genes" #quantileFirst <- 1 #quantileLast <- 4 #region <- "genomewide" #samplesNum <- 100000 #genomeColours <- unlist(strsplit('grey20,turquoise4,turquoise3,turquoise1', split = ",")) args <- commandArgs(trailingOnly = TRUE) libName <- args[1] featRegion <- args[2] quantileFirst <- as.integer(args[3]) quantileLast <- as.integer(args[4]) region <- args[5] samplesNum <- as.numeric(args[6]) genomeColours <- unlist(strsplit(args[7], split = ",")) # Define quantile colours quantileColours <- c("red", "purple", "blue", "navy") makeTransparent <- function(thisColour, alpha = 250) { newColour <- col2rgb(thisColour) apply(newColour, 2, function(x) { rgb(red = x[1], green = x[2], blue = x[3], alpha = alpha, maxColorValue = 255) }) } if(libName %in% c("cMMb", "HudsonRM_all")) { outDir <- paste0("quantiles_by_", libName, "/hypergeometricTests/") } else { outDir <- paste0("quantiles_by_", sub("_\\w+", "", libName), "_in_", featRegion, "/hypergeometricTests/") } plotDir <- paste0(outDir, "plots/combined_bargraph/") system(paste0("[ -d ", outDir, " ] || mkdir ", outDir)) system(paste0("[ -d ", plotDir, " ] || mkdir ", plotDir)) options(scipen = 100) # Plot bar graph summarising permutation test results genomeNames <- c("Agenome_Bgenome_Dgenome", "Agenome", "Bgenome", "Dgenome") hg_list <- lapply(seq_along(genomeNames), function(y) { hg_list_quantile <- list() for(z in quantileFirst:quantileLast) { if(libName %in% c("cMMb", "HudsonRM_all")) { load(paste0(outDir, "NLR_not_clustered_gene_representation_among_quantile", z, "_of_", quantileLast, "_by_", libName, "_of_NLR_genes_in_", genomeNames[y], "_", region, "_hypergeomTestRes.RData")) } else { load(paste0(outDir, "NLR_not_clustered_gene_representation_among_quantile", z, "_of_", quantileLast, "_by_log2_", libName, "_control_in_", featRegion, "_of_NLR_genes_in_", genomeNames[y], "_", region, "_hypergeomTestRes.RData")) } hg_list_quantile <- c(hg_list_quantile, hgTestResults) } return(hg_list_quantile) }) bargraph_df <- data.frame(Subgenome = rep(c("All genomes", "A genome", "B genome", "D genome"), each = quantileLast), Quantile = rep(paste0("Quantile ", quantileFirst:quantileLast), 4), log2ObsExp = c(sapply(seq_along(genomeNames), function(y) { sapply(seq_along(hg_list[[y]]), function(x) { hg_list[[y]][[x]]@log2obsexp }) })), log2alpha0.05 = c(sapply(seq_along(genomeNames), function(y) { sapply(seq_along(hg_list[[y]]), function(x) { hg_list[[y]][[x]]@log2alpha }) }))) bargraph_df$Quantile <- factor(bargraph_df$Quantile, levels = paste0("Quantile ", quantileFirst:quantileLast)) bargraph_df$Subgenome <- factor(bargraph_df$Subgenome, levels = c("All genomes", "A genome", "B genome", "D genome")) bp <- ggplot(data = bargraph_df, mapping = aes(x = Quantile, y = log2ObsExp, fill = Subgenome)) + geom_bar(stat = "identity", position = position_dodge()) + scale_fill_manual(name = "", values = genomeColours, labels = levels(bargraph_df$Subgenome)) + geom_point(mapping = aes(x = Quantile, y = log2alpha0.05), position = position_dodge(0.9), shape = "-", colour = "grey80", size = 20) + geom_segment(mapping = aes(x = 0.55, y = min(c(bargraph_df$log2ObsExp, bargraph_df$log2alpha0.05))-0.05, xend = 1.45, yend = min(c(bargraph_df$log2ObsExp, bargraph_df$log2alpha0.05))-0.05), colour = quantileColours[1], inherit.aes = F, size = 5) + geom_segment(mapping = aes(x = 1.55, y = min(c(bargraph_df$log2ObsExp, bargraph_df$log2alpha0.05))-0.05, xend = 2.45, yend = min(c(bargraph_df$log2ObsExp, bargraph_df$log2alpha0.05))-0.05), colour = quantileColours[2], inherit.aes = F, size = 5) + geom_segment(mapping = aes(x = 2.55, y = min(c(bargraph_df$log2ObsExp, bargraph_df$log2alpha0.05))-0.05, xend = 3.45, yend = min(c(bargraph_df$log2ObsExp, bargraph_df$log2alpha0.05))-0.05), colour = quantileColours[3], inherit.aes = F, size = 5) + geom_segment(mapping = aes(x = 3.55, y = min(c(bargraph_df$log2ObsExp, bargraph_df$log2alpha0.05))-0.05, xend = 4.45, yend = min(c(bargraph_df$log2ObsExp, bargraph_df$log2alpha0.05))-0.05), colour = quantileColours[4], inherit.aes = F, size = 5) + labs(y = bquote("Log"[2]*"(observed/expected) genes in quantile")) + scale_y_continuous(limits = c(-0.75, 0.75)) + # scale_y_continuous(limits = c(-1.5, 1.5)) + scale_x_discrete(position = "bottom") + guides(fill = guide_legend(direction = "horizontal", label.position = "top", label.theme = element_text(size = 20, hjust = 0, vjust = 0.5, angle = 90), nrow = 1, byrow = TRUE)) + theme_bw() + theme(axis.line.y = element_line(size = 1, colour = "black"), axis.ticks.y = element_line(size = 1, colour = "black"), axis.text.y = element_text(size = 25, colour = "black", hjust = 0.5, vjust = 0.5, angle = 90), axis.title.y = element_text(size = 25, colour = "black"), axis.ticks.x = element_blank(), axis.text.x = element_text(size = 32, colour = "black", hjust = 0.5, vjust = 0.5, angle = 180), axis.title.x = element_blank(), panel.grid = element_blank(), panel.border = element_blank(), panel.background = element_blank(), #legend.position = "none", #legend.position = c(0.05, 0.30), legend.background = element_rect(fill = "transparent"), legend.key = element_rect(colour = "transparent", fill = "transparent"), plot.margin = unit(c(5.5, 5.5, 40.5, 5.5), "pt"), plot.title = element_text(size = 24, colour = "black", hjust = 0.5)) + ggtitle(bquote("NLRs not in an NLR cluster in" ~ .(sub("_\\w+$", "", libName)) ~ "quantiles" ~ "(" * .(featRegion) * ")" ~ "(" * .(prettyNum(samplesNum, big.mark = ",", trim = T)) ~ "samples)")) if(libName %in% c("cMMb", "HudsonRM_all")) { ggsave(paste0(plotDir, "combined_bargraph_NLR_not_clustered_gene_representation_among_", quantileLast, "quantiles_by_", libName, "_of_NLR_genes_in_each_subgenome_", region, "_hypergeomTestRes.pdf"), plot = bp, height = 8, width = 16) # height = 8, width = 12) } else { ggsave(paste0(plotDir, "combined_bargraph_NLR_not_clustered_gene_representation_among_", quantileLast, "quantiles_by_log2_", libName, "_control_in_", featRegion, "_of_NLR_genes_in_each_subgenome_", region, "_hypergeomTestRes.pdf"), plot = bp, height = 8, width = 16) # height = 8, width = 12) }
% Generated by roxygen2 (4.1.1): do not edit by hand % Please edit documentation in R/data.R \name{fcl} \alias{fcl} \title{FCL items description.} \format{A data frame with 776 observations of 6 variables. \describe{ \item{fcl}{FCL-code of item, numeric.} }} \description{ FCL items description. }
/man/fcl.Rd
no_license
malexan/fclhs
R
false
false
302
rd
% Generated by roxygen2 (4.1.1): do not edit by hand % Please edit documentation in R/data.R \name{fcl} \alias{fcl} \title{FCL items description.} \format{A data frame with 776 observations of 6 variables. \describe{ \item{fcl}{FCL-code of item, numeric.} }} \description{ FCL items description. }
unzip("data/exdata-data-NEI_data.zip", exdir = "data" ) NEI <- readRDS("data//summarySCC_PM25.rds") SCC <- readRDS("data//Source_Classification_Code.rds") motorRowsFilter <- grep("motor", SCC$Short.Name, ignore.case = TRUE) motorRowsinSCC <- SCC[motorRowsFilter, ] motorRowsIds <- as.character(motorRowsinSCC$SCC) NEI$SCC <- as.character(NEI$SCC) emissionByMotor <- NEI[NEI$SCC %in% motorRowsIds, ] dataBaltimoreAndLosAngles <- emissionByMotor[which(emissionByMotor$fips %in% c( "24510","06037") ) ,] library(data.table) dataTable <- data.table(dataBaltimoreAndLosAngles) emissionByYear <- dataTable[, sum(Emissions), by = c("year","fips")] colnames(emissionByYear) <- c("year", "fips", "Emissions") emissionByYear$County <- "NA" emissionByYear[fips == "24510"]$County <- "Baltimore" emissionByYear[fips == "06037"]$County <- "os Angeles County" qplot(year, Emissions, data = emissionByYear, group = County, color = County, geom = c("point", "line"), ylab = expression("PM"[2.5]), xlab = "Year", main = "Comparision of Motor Emisions") dev.copy(png, file = "Plot6.png", height=480, width=480) dev.off()
/Course Project 2/Plot6.R
no_license
element824/Exploratory-Data-Analysis-on-Coursera
R
false
false
1,133
r
unzip("data/exdata-data-NEI_data.zip", exdir = "data" ) NEI <- readRDS("data//summarySCC_PM25.rds") SCC <- readRDS("data//Source_Classification_Code.rds") motorRowsFilter <- grep("motor", SCC$Short.Name, ignore.case = TRUE) motorRowsinSCC <- SCC[motorRowsFilter, ] motorRowsIds <- as.character(motorRowsinSCC$SCC) NEI$SCC <- as.character(NEI$SCC) emissionByMotor <- NEI[NEI$SCC %in% motorRowsIds, ] dataBaltimoreAndLosAngles <- emissionByMotor[which(emissionByMotor$fips %in% c( "24510","06037") ) ,] library(data.table) dataTable <- data.table(dataBaltimoreAndLosAngles) emissionByYear <- dataTable[, sum(Emissions), by = c("year","fips")] colnames(emissionByYear) <- c("year", "fips", "Emissions") emissionByYear$County <- "NA" emissionByYear[fips == "24510"]$County <- "Baltimore" emissionByYear[fips == "06037"]$County <- "os Angeles County" qplot(year, Emissions, data = emissionByYear, group = County, color = County, geom = c("point", "line"), ylab = expression("PM"[2.5]), xlab = "Year", main = "Comparision of Motor Emisions") dev.copy(png, file = "Plot6.png", height=480, width=480) dev.off()
# getdateplot.R # short script to extract and write data to build the location plot setwd("/Users/vjdorazio/Desktop/github/TwoRavens/R") library(countrycode) library(stringi) ## this is a good option to set... options(stringsAsFactors = FALSE) #read the data data <- read.delim("../data/samplePhox.csv",sep=",") data$cname <- data$AdminInfo data$cname <- stri_trans_general(data$cname,"Latin-ASCII") data$cname[which(data$cname=="United Kingdom of Great Britain and Northern Ireland")] <- "United Kingdom" data$cname[which(nchar(data$cname)!=3)] <- countrycode(data$cname[which(nchar(data$cname)!=3)], "country.name", "iso3c", warn=TRUE) data$freq <- 1 ## aggregating aggdata <- aggregate(data$freq, by=list(data$cname), FUN="sum") colnames(aggdata) <- c("cname","freq") aggdata$fullcname <- countrycode(aggdata$cname, "iso3c","country.name",warn=TRUE) aggdata$id <- 1:nrow(aggdata) aggdata <- aggdata[,c("id","cname","freq","fullcname")] write.table(aggdata,"../data/locationplot.tsv", row.names=FALSE, quote=FALSE, sep="\t")
/R/getlocationdata.R
no_license
TwoRavens/EventData
R
false
false
1,070
r
# getdateplot.R # short script to extract and write data to build the location plot setwd("/Users/vjdorazio/Desktop/github/TwoRavens/R") library(countrycode) library(stringi) ## this is a good option to set... options(stringsAsFactors = FALSE) #read the data data <- read.delim("../data/samplePhox.csv",sep=",") data$cname <- data$AdminInfo data$cname <- stri_trans_general(data$cname,"Latin-ASCII") data$cname[which(data$cname=="United Kingdom of Great Britain and Northern Ireland")] <- "United Kingdom" data$cname[which(nchar(data$cname)!=3)] <- countrycode(data$cname[which(nchar(data$cname)!=3)], "country.name", "iso3c", warn=TRUE) data$freq <- 1 ## aggregating aggdata <- aggregate(data$freq, by=list(data$cname), FUN="sum") colnames(aggdata) <- c("cname","freq") aggdata$fullcname <- countrycode(aggdata$cname, "iso3c","country.name",warn=TRUE) aggdata$id <- 1:nrow(aggdata) aggdata <- aggdata[,c("id","cname","freq","fullcname")] write.table(aggdata,"../data/locationplot.tsv", row.names=FALSE, quote=FALSE, sep="\t")
library(tidyverse) library(stringi) sequence_data = read.table("mm10_promoters_sequences_500up_2500down.txt", stringsAsFactors = F) colnames(sequence_data) = c("Gene", "sequence") sequence_data$class = "ICP" for (i in 1:length(sequence_data$sequence)) { seqtxt = sequence_data$sequence[i] seqlen = nchar(seqtxt) hcpstat = F icpstat = F for (j in 0:(seqlen - 500)) { seqstart = 1+j seqstop = 500+j subseq = substr(seqtxt, seqstart, seqstop) cpg_count = stri_count(subseq, fixed = "CG") c_count = stri_count(subseq, fixed = "C") g_count = stri_count(subseq, fixed = "G") gcfrac = (c_count + g_count)/500 cpg_oe = cpg_count*500/(c_count*g_count) if (c_count == 0 | g_count == 0) { next } if (gcfrac >= 0.55 & cpg_oe >=0.6) { hcpstat = T break } if (cpg_oe >= 0.4) { icpstat = T } } if (hcpstat) { sequence_data$class[i] = "HCP" } else if (!icpstat) { sequence_data$class[i] = "LCP" } print(i) }
/sequence_cpg_analysis.R
no_license
shah-rohan/bivalency
R
false
false
1,042
r
library(tidyverse) library(stringi) sequence_data = read.table("mm10_promoters_sequences_500up_2500down.txt", stringsAsFactors = F) colnames(sequence_data) = c("Gene", "sequence") sequence_data$class = "ICP" for (i in 1:length(sequence_data$sequence)) { seqtxt = sequence_data$sequence[i] seqlen = nchar(seqtxt) hcpstat = F icpstat = F for (j in 0:(seqlen - 500)) { seqstart = 1+j seqstop = 500+j subseq = substr(seqtxt, seqstart, seqstop) cpg_count = stri_count(subseq, fixed = "CG") c_count = stri_count(subseq, fixed = "C") g_count = stri_count(subseq, fixed = "G") gcfrac = (c_count + g_count)/500 cpg_oe = cpg_count*500/(c_count*g_count) if (c_count == 0 | g_count == 0) { next } if (gcfrac >= 0.55 & cpg_oe >=0.6) { hcpstat = T break } if (cpg_oe >= 0.4) { icpstat = T } } if (hcpstat) { sequence_data$class[i] = "HCP" } else if (!icpstat) { sequence_data$class[i] = "LCP" } print(i) }
## function makeCacheMatrix ## ## input : an invertible matrix ## output : a list of function-type objects ## ## processing : the function sets a memory location (m) with the matrix (x) received in input ## and creates a list of function objects that can be called from solveMatrix. ## ## function objects 'set' and 'get' store the original matrix received in input into a global memory location ## and make it available to subsequent processing by cacheSolve ## function objects 'setInvMtx' and 'getInvMtx' allow for caching and reading from cache ## the inverted matrix as calculated by cacheSolve makeCacheMatrix <- function(x = matrix()) { m <- NULL set <- function(y) { x <<- y m <<- NULL } get <- function() x setInvMtx <- function(invMtx) m <<- invMtx getInvMtx <- function() m list(set = set, get = get, setInvMtx = setInvMtx, getInvMtx = getInvMtx) } ## function cacheSolve ## ## input : the list of function objects created by the function makeCacheMatrix ## output : the inverse of the matrix used as input for makeCacheMatrix ## ## processing : the function checks if a cached version of the inverse matrix exist. ## If found, the function prints the message "getting cached data" and returns the inverted matrix. Processing stops. ## If not found, the function reads the original matrix from the memory location where it was stored by makeCacheMatrix, ## calculates the inverse (by using the function 'solve'), sets the cache memory with the inverted matrix through a call ## to the appropriate function in makeCacheMatrix, returns the calculated inverse and exits the function cacheSolve <- function(x, ...) { m <- x$getInvMtx() if(!is.null(m)) { message("getting cached data") return(m) } data <- x$get() m <- solve(data, ...) x$setInvMtx(m) m }
/cachematrix.R
no_license
espillier/ProgrammingAssignment2
R
false
false
1,903
r
## function makeCacheMatrix ## ## input : an invertible matrix ## output : a list of function-type objects ## ## processing : the function sets a memory location (m) with the matrix (x) received in input ## and creates a list of function objects that can be called from solveMatrix. ## ## function objects 'set' and 'get' store the original matrix received in input into a global memory location ## and make it available to subsequent processing by cacheSolve ## function objects 'setInvMtx' and 'getInvMtx' allow for caching and reading from cache ## the inverted matrix as calculated by cacheSolve makeCacheMatrix <- function(x = matrix()) { m <- NULL set <- function(y) { x <<- y m <<- NULL } get <- function() x setInvMtx <- function(invMtx) m <<- invMtx getInvMtx <- function() m list(set = set, get = get, setInvMtx = setInvMtx, getInvMtx = getInvMtx) } ## function cacheSolve ## ## input : the list of function objects created by the function makeCacheMatrix ## output : the inverse of the matrix used as input for makeCacheMatrix ## ## processing : the function checks if a cached version of the inverse matrix exist. ## If found, the function prints the message "getting cached data" and returns the inverted matrix. Processing stops. ## If not found, the function reads the original matrix from the memory location where it was stored by makeCacheMatrix, ## calculates the inverse (by using the function 'solve'), sets the cache memory with the inverted matrix through a call ## to the appropriate function in makeCacheMatrix, returns the calculated inverse and exits the function cacheSolve <- function(x, ...) { m <- x$getInvMtx() if(!is.null(m)) { message("getting cached data") return(m) } data <- x$get() m <- solve(data, ...) x$setInvMtx(m) m }
#' Example Data #' #' A dataset containing true values and predictions of an unknown #' quantity in March an April 2020 on #' the two planets Tatooine and Coruscant. Predictions were generated using #' different models. The data on Coruscant is just a simple modulation #' of the values observed on Tatooine. #' #' @format A data frame with 104000 rows and 6 variables: #' \describe{ #' \item{geography}{the location where values where observed and predicted} #' \item{model}{the model used to generate predictions} #' \item{sample_nr}{the sample number that identifies a predictive sample #' for a specific geography and date} #' \item{date}{the date for which the prediction was made and the true #' value was observed} #' \item{y_pred}{the value predicted for a given date and region} #' \item{y_obs}{the true value observed for a given date and region} #' } "example_data"
/R/data.R
permissive
FelipeJColon/stackr
R
false
false
897
r
#' Example Data #' #' A dataset containing true values and predictions of an unknown #' quantity in March an April 2020 on #' the two planets Tatooine and Coruscant. Predictions were generated using #' different models. The data on Coruscant is just a simple modulation #' of the values observed on Tatooine. #' #' @format A data frame with 104000 rows and 6 variables: #' \describe{ #' \item{geography}{the location where values where observed and predicted} #' \item{model}{the model used to generate predictions} #' \item{sample_nr}{the sample number that identifies a predictive sample #' for a specific geography and date} #' \item{date}{the date for which the prediction was made and the true #' value was observed} #' \item{y_pred}{the value predicted for a given date and region} #' \item{y_obs}{the true value observed for a given date and region} #' } "example_data"
##---------------------------------------------------------------------------- ## Title: Choropleth tematic map ## Author: Armando Enriquez Z. ## Date: October 28th, 2014 ## Purpose: Create a choropleth tematic map with ggplot tools ##---------------------------------------------------------------------------- ## R libraries required ## Probably you haven't got the packages yet: type "install.packages("package")" library(ggplot2) ## graphics library(sp) ## spatial objects library(maps) library(maptools) library(foreign) ## reading foreign files into R library(mapproj) library(RColorBrewer) ## color palettes ## Loading the information data frame mapDesc <- read.csv("MEX_adm1.csv") ## state-level map information str(mapDesc) ## Loading the shapefiles and converting to data frame mapMex <- readShapePoly("MEX_adm1.shp") ## state-level map mapMex <- fortify(mapMex) str(mapMex) ## Loading data (life expectancy at birth per state) lifeExp <- read.csv("life.csv", header = TRUE) ## Notice that the ids in the mapMex dataframe begin with "0", and the ids in ## the lifeExp dataframe begin with "1". Additionally, id in mapMex is of ## class "character". mapMex$id <- as.numeric(mapMex$id) mapMex$id <- mapMex$id + 1 ## The variables have the same values now summary(mapMex$id) summary(lifeExp$id) ## We already know that the id's between lifeExp and mapMex do not match ## The problem are Baja, Baja Sur, Coahuila, Colima, Chiapas & Chihuahua ## Fix the problem by rearranging the lifeExp data frame to coincide w/ mapMex lifeExp[2, 1] <- 3 ## Baja California to id 3 lifeExp[3, 1] <- 2 ## Baja Sur to id 2 lifeExp[5, 1] <- 7 ## Coahuila to id 7 lifeExp[6, 1] <- 8 ## Colima to id 8 lifeExp[7, 1] <- 5 ## Chiapas to id 5 lifeExp[8, 1] <- 6 ## Chihuahua to id 6 ## We are almost done! ## Since lifeexp is a continuous variable, let us take the quantiles quant <- quantile(lifeExp$lifeexp, c(0, 0.2, 0.4, 0.6, 0.8, 1)) lifeExp$quant <- cut(lifeExp$lifeexp, quant, labels = c("71.5-74.1", "74.1-74.7", "74.7-75.0", "75.0-75.2", "75.2-75.8"), include.lowest = TRUE) ## Create a 5-color palette for the variable of interest ## There are useful palettes (in hexadecimal format) at colorbrewer2.com colors <- c("#FFFFB2", "#FECC5C", "#FD8D3C", "#F03B20", "#BD0026") ## The choropleth map ggplot(lifeExp, aes(map_id = id, fill = quant)) + geom_map(map = mapMex, colour = "black") + scale_fill_manual(values = colors) + expand_limits(x = mapMex$long, y = mapMex$lat) + coord_map("polyconic") + labs(fill = "Life expectancy 2013\nAges per quintiles") + xlab("") + ylab("")
/lifeChoropleth.R
no_license
elabuel-o/choropleth-map1
R
false
false
2,673
r
##---------------------------------------------------------------------------- ## Title: Choropleth tematic map ## Author: Armando Enriquez Z. ## Date: October 28th, 2014 ## Purpose: Create a choropleth tematic map with ggplot tools ##---------------------------------------------------------------------------- ## R libraries required ## Probably you haven't got the packages yet: type "install.packages("package")" library(ggplot2) ## graphics library(sp) ## spatial objects library(maps) library(maptools) library(foreign) ## reading foreign files into R library(mapproj) library(RColorBrewer) ## color palettes ## Loading the information data frame mapDesc <- read.csv("MEX_adm1.csv") ## state-level map information str(mapDesc) ## Loading the shapefiles and converting to data frame mapMex <- readShapePoly("MEX_adm1.shp") ## state-level map mapMex <- fortify(mapMex) str(mapMex) ## Loading data (life expectancy at birth per state) lifeExp <- read.csv("life.csv", header = TRUE) ## Notice that the ids in the mapMex dataframe begin with "0", and the ids in ## the lifeExp dataframe begin with "1". Additionally, id in mapMex is of ## class "character". mapMex$id <- as.numeric(mapMex$id) mapMex$id <- mapMex$id + 1 ## The variables have the same values now summary(mapMex$id) summary(lifeExp$id) ## We already know that the id's between lifeExp and mapMex do not match ## The problem are Baja, Baja Sur, Coahuila, Colima, Chiapas & Chihuahua ## Fix the problem by rearranging the lifeExp data frame to coincide w/ mapMex lifeExp[2, 1] <- 3 ## Baja California to id 3 lifeExp[3, 1] <- 2 ## Baja Sur to id 2 lifeExp[5, 1] <- 7 ## Coahuila to id 7 lifeExp[6, 1] <- 8 ## Colima to id 8 lifeExp[7, 1] <- 5 ## Chiapas to id 5 lifeExp[8, 1] <- 6 ## Chihuahua to id 6 ## We are almost done! ## Since lifeexp is a continuous variable, let us take the quantiles quant <- quantile(lifeExp$lifeexp, c(0, 0.2, 0.4, 0.6, 0.8, 1)) lifeExp$quant <- cut(lifeExp$lifeexp, quant, labels = c("71.5-74.1", "74.1-74.7", "74.7-75.0", "75.0-75.2", "75.2-75.8"), include.lowest = TRUE) ## Create a 5-color palette for the variable of interest ## There are useful palettes (in hexadecimal format) at colorbrewer2.com colors <- c("#FFFFB2", "#FECC5C", "#FD8D3C", "#F03B20", "#BD0026") ## The choropleth map ggplot(lifeExp, aes(map_id = id, fill = quant)) + geom_map(map = mapMex, colour = "black") + scale_fill_manual(values = colors) + expand_limits(x = mapMex$long, y = mapMex$lat) + coord_map("polyconic") + labs(fill = "Life expectancy 2013\nAges per quintiles") + xlab("") + ylab("")
library(cheddar) ### Name: PlotNPSDistribution ### Title: Plot distributions of node properties ### Aliases: PlotNPSDistribution PlotBDistribution PlotMDistribution ### PlotNDistribution PlotDegreeDistribution ### Keywords: hplot ### ** Examples data(TL84) PlotMDistribution(TL84) # A bandwidth of 3 PlotMDistribution(TL84, density.args=list(bw=3)) PlotDegreeDistribution(TL84)
/data/genthat_extracted_code/cheddar/examples/PlotNPSDistribution.Rd.R
no_license
surayaaramli/typeRrh
R
false
false
390
r
library(cheddar) ### Name: PlotNPSDistribution ### Title: Plot distributions of node properties ### Aliases: PlotNPSDistribution PlotBDistribution PlotMDistribution ### PlotNDistribution PlotDegreeDistribution ### Keywords: hplot ### ** Examples data(TL84) PlotMDistribution(TL84) # A bandwidth of 3 PlotMDistribution(TL84, density.args=list(bw=3)) PlotDegreeDistribution(TL84)
# MSM ----------------------------------------------------------------- #' @title Network Resimulation Module #' #' @description Module function for resimulating the main, casual, and one-off #' networks for one time step. #' #' @inheritParams aging_msm #' #' @keywords module msm #' #' @export #' simnet_msm <- function(dat, at) { ## Edges correction dat <- edges_correct_msm(dat, at) ## Main network nwparam.m <- EpiModel::get_nwparam(dat, network = 1) dat <- updatenwp_msm(dat, network = 1) dat$el[[1]] <- tergmLite::simulate_network(p = dat$p[[1]], el = dat$el[[1]], coef.form = nwparam.m$coef.form, coef.diss = nwparam.m$coef.diss$coef.adj, save.changes = TRUE) dat$temp$new.edges <- NULL if (at == 2) { new.edges.m <- matrix(dat$el[[1]], ncol = 2) } else { new.edges.m <- attributes(dat$el[[1]])$changes new.edges.m <- new.edges.m[new.edges.m[, "to"] == 1, 1:2, drop = FALSE] } dat$temp$new.edges <- matrix(dat$attr$uid[new.edges.m], ncol = 2) ## Casual network nwparam.p <- EpiModel::get_nwparam(dat, network = 2) dat <- updatenwp_msm(dat, network = 2) dat$el[[2]] <- tergmLite::simulate_network(p = dat$p[[2]], el = dat$el[[2]], coef.form = nwparam.p$coef.form, coef.diss = nwparam.p$coef.diss$coef.adj, save.changes = TRUE) if (at == 2) { new.edges.p <- matrix(dat$el[[2]], ncol = 2) } else { new.edges.p <- attributes(dat$el[[2]])$changes new.edges.p <- new.edges.p[new.edges.p[, "to"] == 1, 1:2, drop = FALSE] } dat$temp$new.edges <- rbind(dat$temp$new.edges, matrix(dat$attr$uid[new.edges.p], ncol = 2)) ## One-off network nwparam.i <- EpiModel::get_nwparam(dat, network = 3) dat <- updatenwp_msm(dat, network = 3) dat$el[[3]] <- tergmLite::simulate_ergm(p = dat$p[[3]], el = dat$el[[3]], coef = nwparam.i$coef.form) if (dat$control$save.nwstats == TRUE) { dat <- calc_resim_nwstats(dat, at) } return(dat) } calc_resim_nwstats <- function(dat, at) { for (nw in 1:3) { n <- attr(dat$el[[nw]], "n") edges <- nrow(dat$el[[nw]]) meandeg <- round(edges / n, 3) concurrent <- round(mean(get_degree(dat$el[[nw]]) > 1), 3) mat <- matrix(c(edges, meandeg, concurrent), ncol = 3, nrow = 1) if (at == 2) { dat$stats$nwstats[[nw]] <- mat colnames(dat$stats$nwstats[[nw]]) <- c("edges", "meand", "conc") } if (at > 2) { dat$stats$nwstats[[nw]] <- rbind(dat$stats$nwstats[[nw]], mat) } } return(dat) } #' @title Adjustment for the Edges Coefficient with Changing Network Size #' #' @description Adjusts the edges coefficients in a dynamic network model #' to preserve the mean degree. #' #' @inheritParams aging_msm #' #' @details #' In HIV/STI modeling, there is typically an assumption that changes in #' population size do not affect one's number of partners, specified as the #' mean degree for network models. A person would not have 10 times the number #' of partners should he move from a city 10 times as large. This module uses #' the adjustment of Krivitsky et al. to adjust the edges coefficients on the #' three network models to account for varying population size in order to #' preserve that mean degree. #' #' @return #' The network model parameters stored in \code{dat$nwparam} are updated for #' each of the three network models. #' #' @references #' Krivitsky PN, Handcock MS, and Morris M. "Adjusting for network size and #' composition effects in exponential-family random graph models." Statistical #' Methodology. 2011; 8.4: 319-339. #' #' @keywords module msm #' #' @export #' edges_correct_msm <- function(dat, at) { old.num <- dat$epi$num[at - 1] new.num <- sum(dat$attr$active == 1, na.rm = TRUE) adjust <- log(old.num) - log(new.num) coef.form.m <- get_nwparam(dat, network = 1)$coef.form coef.form.m[1] <- coef.form.m[1] + adjust dat$nwparam[[1]]$coef.form <- coef.form.m coef.form.p <- get_nwparam(dat, network = 2)$coef.form coef.form.p[1] <- coef.form.p[1] + adjust dat$nwparam[[2]]$coef.form <- coef.form.p coef.form.i <- get_nwparam(dat, network = 3)$coef.form coef.form.i[1] <- coef.form.i[1] + adjust dat$nwparam[[3]]$coef.form <- coef.form.i return(dat) } #' @title Update Network Data Structure and Parameters #' #' @description Updates the internal data structure containing the main data #' passed into the TERGM resimulation algorithm. This step is #' necessary with the new tergmLite approach. #' #' @param dat Data object created in initialization module. #' @param network Integer value for network number #' #' @keywords module msm #' #' @export #' #' updatenwp_msm <- function(dat, network) { n <- attributes(dat$el[[1]])$n maxdyads <- choose(n, 2) p <- dat$p[[network]] if (network == 1) { mf <- p$model.form md <- p$model.diss mhf <- p$MHproposal.form mhd <- p$MHproposal.diss if (!identical(mf$coef.names, c("edges", "nodefactor.deg.pers.1", "nodefactor.deg.pers.2", "absdiff.sqrt.age", "nodematch.role.class.I", "nodematch.role.class.R"))) { stop("updatenwp_msm will not currently work with this formula, contact SJ") } ## Update model.form ## # edges mf$terms[[1]]$maxval <- maxdyads # nodefactor("deg.pers") dat$attr$deg.pers <- get_degree(dat$el[[2]]) nodecov <- dat$attr$deg.pers u <- sort(unique(nodecov)) u <- u[-1] # remove base values here nodecov <- match(nodecov, u, nomatch = length(u) + 1) ui <- seq(along = u) inputs <- c(ui, nodecov) mf$terms[[2]]$inputs <- c(length(ui), length(mf$terms[[2]]$coef.names), length(inputs), inputs) # absdiff("sqrt.age") nodecov <- dat$attr$sqrt.age power <- 1 inputs <- c(power, nodecov) mf$terms[[3]]$inputs <- c(0, length(mf$terms[[3]]$coef.names), length(inputs), inputs) # nodematch("role.class) nodecov <- dat$attr$role.class u <- sort(unique(nodecov)) u <- u[1:2] # keep = 1:2 nodecov <- match(nodecov, u, nomatch = length(u) + 1) dontmatch <- nodecov == (length(u) + 1) nodecov[dontmatch] <- length(u) + (1:sum(dontmatch)) ui <- seq(along = u) inputs <- c(ui, nodecov) mf$terms[[4]]$inputs <- c(0, length(mf$terms[[4]]$coef.names), length(inputs), inputs) ## combined maxval ## mf$maxval[1] <- maxdyads ## Update model.diss ## md$terms[[1]]$maxval <- maxdyads md$maxval <- maxdyads ## Update MHproposal.form ## mhf$arguments$constraints$bd$attribs <- matrix(rep(mhf$arguments$constraints$bd$attribs[1], n), ncol = 1) mhf$arguments$constraints$bd$maxout <- matrix(rep(mhf$arguments$constraints$bd$maxout[1], n), ncol = 1) mhf$arguments$constraints$bd$maxin <- matrix(rep(n - 1, n), ncol = 1) mhf$arguments$constraints$bd$minout <- mhf$arguments$constraints$bd$minin <- matrix(rep(0, n), ncol = 1) ## Update MHproposal.diss ## mhd$arguments$constraints$bd <- mhf$arguments$constraints$bd dat$p[[network]] <- list(model.form = mf, model.diss = md, MHproposal.form = mhf, MHproposal.diss = mhd) } if (network == 2) { mf <- p$model.form md <- p$model.diss mhf <- p$MHproposal.form mhd <- p$MHproposal.diss if (!identical(mf$coef.names, c("edges", "nodefactor.deg.main.1", "concurrent", "absdiff.sqrt.age", "nodematch.role.class.I", "nodematch.role.class.R"))) { stop("updatenwp_msm will not currently work with this formula, contact SJ") } ## Update model.form ## # edges mf$terms[[1]]$maxval <- maxdyads # nodefactor("deg.main") dat$attr$deg.main <- get_degree(dat$el[[1]]) nodecov <- dat$attr$deg.main u <- sort(unique(nodecov)) u <- u[-1] # remove base values here nodecov <- match(nodecov, u, nomatch = length(u) + 1) ui <- seq(along = u) inputs <- c(ui, nodecov) mf$terms[[2]]$inputs <- c(length(ui), length(mf$terms[[2]]$coef.names), length(inputs), inputs) # concurrent mf$terms[[3]]$maxval <- n # absdiff("sqrt.age") nodecov <- dat$attr$sqrt.age power <- 1 inputs <- c(power, nodecov) mf$terms[[4]]$inputs <- c(0, length(mf$terms[[4]]$coef.names), length(inputs), inputs) # nodematch("role.class) nodecov <- dat$attr$role.class u <- sort(unique(nodecov)) u <- u[1:2] # keep = 1:2 nodecov <- match(nodecov, u, nomatch = length(u) + 1) dontmatch <- nodecov == (length(u) + 1) nodecov[dontmatch] <- length(u) + (1:sum(dontmatch)) ui <- seq(along = u) inputs <- c(ui, nodecov) mf$terms[[5]]$inputs <- c(0, length(mf$terms[[5]]$coef.names), length(inputs), inputs) ## combined maxval ## mf$maxval[1] <- maxdyads mf$maxval[3] <- n ## Update model.diss ## md$terms[[1]]$maxval <- maxdyads md$maxval <- maxdyads ## Update MHproposal.form ## mhf$arguments$constraints$bd$attribs <- matrix(rep(mhf$arguments$constraints$bd$attribs[1], n), ncol = 1) mhf$arguments$constraints$bd$maxout <- matrix(rep(mhf$arguments$constraints$bd$maxout[1], n), ncol = 1) mhf$arguments$constraints$bd$maxin <- matrix(rep(n - 1, n), ncol = 1) mhf$arguments$constraints$bd$minout <- mhf$arguments$constraints$bd$minin <- matrix(rep(0, n), ncol = 1) ## Update MHproposal.diss ## mhd$arguments$constraints$bd <- mhf$arguments$constraints$bd dat$p[[network]] <- list(model.form = mf, model.diss = md, MHproposal.form = mhf, MHproposal.diss = mhd) } if (network == 3) { mf <- p$model.form mhf <- p$MHproposal if (!identical(mf$coef.names, c("edges", "nodefactor.deg.main.deg.pers.0.1", "nodefactor.deg.main.deg.pers.0.2", "nodefactor.deg.main.deg.pers.1.0", "nodefactor.deg.main.deg.pers.1.1", "nodefactor.deg.main.deg.pers.1.2", "nodefactor.riskg.1", "nodefactor.riskg.2", "nodefactor.riskg.4", "nodefactor.riskg.5", "absdiff.sqrt.age", "nodematch.role.class.I", "nodematch.role.class.R"))) { stop("updatenwp_msm will not currently work with this formula, contact SJ") } ## Update model.form ## # edges mf$terms[[1]]$maxval <- maxdyads # nodefactor(c("deg.main", "deg.pers")) # current main degree already written in last conditional block dat$attr$deg.pers <- get_degree(dat$el[[2]]) nodecov <- do.call(paste, c(sapply(c("deg.main", "deg.pers"), function(oneattr) dat$attr[[oneattr]], simplify = FALSE), sep = ".")) u <- sort(unique(nodecov)) u <- u[-1] # remove base values here nodecov <- match(nodecov, u, nomatch = length(u) + 1) ui <- seq(along = u) inputs <- c(ui, nodecov) mf$terms[[2]]$inputs <- c(length(ui), length(mf$terms[[2]]$coef.names), length(inputs), inputs) # nodefactor("riskg", base = 3) nodecov <- dat$attr$riskg u <- sort(unique(nodecov)) u <- u[-3] # remove base values here nodecov <- match(nodecov, u, nomatch = length(u) + 1) ui <- seq(along = u) inputs <- c(ui, nodecov) mf$terms[[3]]$inputs <- c(length(ui), length(mf$terms[[3]]$coef.names), length(inputs), inputs) # absdiff("sqrt.age") nodecov <- dat$attr$sqrt.age power <- 1 inputs <- c(power, nodecov) mf$terms[[4]]$inputs <- c(0, length(mf$terms[[4]]$coef.names), length(inputs), inputs) # nodematch("role.class) nodecov <- dat$attr$role.class u <- sort(unique(nodecov)) u <- u[1:2] # keep = 1:2 nodecov <- match(nodecov, u, nomatch = length(u) + 1) dontmatch <- nodecov == (length(u) + 1) nodecov[dontmatch] <- length(u) + (1:sum(dontmatch)) ui <- seq(along = u) inputs <- c(ui, nodecov) mf$terms[[5]]$inputs <- c(0, length(mf$terms[[5]]$coef.names), length(inputs), inputs) ## combined maxval ## mf$maxval[1] <- maxdyads ## Update MHproposal ## # no changes dat$p[[network]] <- list(model.form = mf, MHproposal = mhf) } return(dat) } # HET ----------------------------------------------------------------- #' @title Network Resimulation Module #' #' @description Module function to resimulate the dynamic network forward one #' time step conditional on current network structure and vertex #' attributes. #' #' @inheritParams aging_het #' #' @keywords module het #' #' @export #' simnet_het <- function(dat, at) { # Update edges coefficients dat <- edges_correct_het(dat, at) # Update internal ergm data dat <- update_nwp_het(dat) # Pull network parameters nwparam <- get_nwparam(dat) # Simulate edgelist dat$el <- tergmLite::simulate_network(p = dat$p, el = dat$el, coef.form = nwparam$coef.form, coef.diss = nwparam$coef.diss$coef.adj) return(dat) } update_nwp_het <- function(dat) { mf <- dat$p$model.form md <- dat$p$model.diss mhf <- dat$p$MHproposal.form mhd <- dat$p$MHproposal.diss n <- attributes(dat$el)$n maxdyads <- choose(n, 2) ## 1. Update model.form ## # edges # inputs <- c(0, 1, 0) # not changed mf$terms[[1]]$maxval <- maxdyads # nodematch nodecov <- dat$attr$male u <- sort(unique(nodecov)) nodecov <- match(nodecov, u, nomatch = length(u) + 1) inputs <- nodecov mf$terms[[2]]$inputs <- c(0, 1, length(inputs), inputs) ## Update combined maxval here mf$maxval <- c(maxdyads, Inf) ## 2. Update model.diss ## md$terms[[1]]$maxval <- maxdyads md$maxval <- maxdyads ## 3. Update MHproposal.form ## mhf$arguments$constraints$bd$attribs <- matrix(rep(mhf$arguments$constraints$bd$attribs[1], n), ncol = 1) mhf$arguments$constraints$bd$maxout <- matrix(rep(mhf$arguments$constraints$bd$maxout[1], n), ncol = 1) mhf$arguments$constraints$bd$maxin <- matrix(rep(n, n), ncol = 1) mhf$arguments$constraints$bd$minout <- mhf$arguments$constraints$bd$minin <- matrix(rep(0, n), ncol = 1) ## 4. Update MHproposal.diss ## mhd$arguments$constraints$bd <- mhf$arguments$constraints$bd ## 5. Output ## p <- list(model.form = mf, model.diss = md, MHproposal.form = mhf, MHproposal.diss = mhd) dat$p <- p return(dat) } #' @title Adjustment for the Edges Coefficient with Changing Network Size #' #' @description Adjusts the edges coefficients in a dynamic network model #' to preserve the mean degree. #' #' @inheritParams aging_het #' #' @details #' In HIV/STI modeling, there is typically an assumption that changes in #' population size do not affect one's number of partners, specified as the #' mean degree for network models. A person would not have 10 times the number #' of partners should he move from a city 10 times as large. This module uses #' the adjustment of Krivitsky et al. to adjust the edges coefficients on the #' three network models to account for varying population size in order to #' preserve that mean degree. #' #' @return #' The network model parameters stored in \code{dat$nwparam} are updated. #' #' @references #' Krivitsky PN, Handcock MS, and Morris M. "Adjusting for network size and #' composition effects in exponential-family random graph models." Statistical #' Methodology. 2011; 8.4: 319-339. #' #' @keywords module het #' #' @export #' edges_correct_het <- function(dat, at) { # Popsize old.num <- dat$epi$num[at - 1] new.num <- sum(dat$attr$active == 1, na.rm = TRUE) # New Coefs coef.form <- get_nwparam(dat)$coef.form coef.form[1] <- coef.form[1] + log(old.num) - log(new.num) dat$nwparam[[1]]$coef.form <- coef.form return(dat) }
/tempR/mod.simnet.R
no_license
dth2/EpiModelHIV_SHAMP
R
false
false
17,211
r
# MSM ----------------------------------------------------------------- #' @title Network Resimulation Module #' #' @description Module function for resimulating the main, casual, and one-off #' networks for one time step. #' #' @inheritParams aging_msm #' #' @keywords module msm #' #' @export #' simnet_msm <- function(dat, at) { ## Edges correction dat <- edges_correct_msm(dat, at) ## Main network nwparam.m <- EpiModel::get_nwparam(dat, network = 1) dat <- updatenwp_msm(dat, network = 1) dat$el[[1]] <- tergmLite::simulate_network(p = dat$p[[1]], el = dat$el[[1]], coef.form = nwparam.m$coef.form, coef.diss = nwparam.m$coef.diss$coef.adj, save.changes = TRUE) dat$temp$new.edges <- NULL if (at == 2) { new.edges.m <- matrix(dat$el[[1]], ncol = 2) } else { new.edges.m <- attributes(dat$el[[1]])$changes new.edges.m <- new.edges.m[new.edges.m[, "to"] == 1, 1:2, drop = FALSE] } dat$temp$new.edges <- matrix(dat$attr$uid[new.edges.m], ncol = 2) ## Casual network nwparam.p <- EpiModel::get_nwparam(dat, network = 2) dat <- updatenwp_msm(dat, network = 2) dat$el[[2]] <- tergmLite::simulate_network(p = dat$p[[2]], el = dat$el[[2]], coef.form = nwparam.p$coef.form, coef.diss = nwparam.p$coef.diss$coef.adj, save.changes = TRUE) if (at == 2) { new.edges.p <- matrix(dat$el[[2]], ncol = 2) } else { new.edges.p <- attributes(dat$el[[2]])$changes new.edges.p <- new.edges.p[new.edges.p[, "to"] == 1, 1:2, drop = FALSE] } dat$temp$new.edges <- rbind(dat$temp$new.edges, matrix(dat$attr$uid[new.edges.p], ncol = 2)) ## One-off network nwparam.i <- EpiModel::get_nwparam(dat, network = 3) dat <- updatenwp_msm(dat, network = 3) dat$el[[3]] <- tergmLite::simulate_ergm(p = dat$p[[3]], el = dat$el[[3]], coef = nwparam.i$coef.form) if (dat$control$save.nwstats == TRUE) { dat <- calc_resim_nwstats(dat, at) } return(dat) } calc_resim_nwstats <- function(dat, at) { for (nw in 1:3) { n <- attr(dat$el[[nw]], "n") edges <- nrow(dat$el[[nw]]) meandeg <- round(edges / n, 3) concurrent <- round(mean(get_degree(dat$el[[nw]]) > 1), 3) mat <- matrix(c(edges, meandeg, concurrent), ncol = 3, nrow = 1) if (at == 2) { dat$stats$nwstats[[nw]] <- mat colnames(dat$stats$nwstats[[nw]]) <- c("edges", "meand", "conc") } if (at > 2) { dat$stats$nwstats[[nw]] <- rbind(dat$stats$nwstats[[nw]], mat) } } return(dat) } #' @title Adjustment for the Edges Coefficient with Changing Network Size #' #' @description Adjusts the edges coefficients in a dynamic network model #' to preserve the mean degree. #' #' @inheritParams aging_msm #' #' @details #' In HIV/STI modeling, there is typically an assumption that changes in #' population size do not affect one's number of partners, specified as the #' mean degree for network models. A person would not have 10 times the number #' of partners should he move from a city 10 times as large. This module uses #' the adjustment of Krivitsky et al. to adjust the edges coefficients on the #' three network models to account for varying population size in order to #' preserve that mean degree. #' #' @return #' The network model parameters stored in \code{dat$nwparam} are updated for #' each of the three network models. #' #' @references #' Krivitsky PN, Handcock MS, and Morris M. "Adjusting for network size and #' composition effects in exponential-family random graph models." Statistical #' Methodology. 2011; 8.4: 319-339. #' #' @keywords module msm #' #' @export #' edges_correct_msm <- function(dat, at) { old.num <- dat$epi$num[at - 1] new.num <- sum(dat$attr$active == 1, na.rm = TRUE) adjust <- log(old.num) - log(new.num) coef.form.m <- get_nwparam(dat, network = 1)$coef.form coef.form.m[1] <- coef.form.m[1] + adjust dat$nwparam[[1]]$coef.form <- coef.form.m coef.form.p <- get_nwparam(dat, network = 2)$coef.form coef.form.p[1] <- coef.form.p[1] + adjust dat$nwparam[[2]]$coef.form <- coef.form.p coef.form.i <- get_nwparam(dat, network = 3)$coef.form coef.form.i[1] <- coef.form.i[1] + adjust dat$nwparam[[3]]$coef.form <- coef.form.i return(dat) } #' @title Update Network Data Structure and Parameters #' #' @description Updates the internal data structure containing the main data #' passed into the TERGM resimulation algorithm. This step is #' necessary with the new tergmLite approach. #' #' @param dat Data object created in initialization module. #' @param network Integer value for network number #' #' @keywords module msm #' #' @export #' #' updatenwp_msm <- function(dat, network) { n <- attributes(dat$el[[1]])$n maxdyads <- choose(n, 2) p <- dat$p[[network]] if (network == 1) { mf <- p$model.form md <- p$model.diss mhf <- p$MHproposal.form mhd <- p$MHproposal.diss if (!identical(mf$coef.names, c("edges", "nodefactor.deg.pers.1", "nodefactor.deg.pers.2", "absdiff.sqrt.age", "nodematch.role.class.I", "nodematch.role.class.R"))) { stop("updatenwp_msm will not currently work with this formula, contact SJ") } ## Update model.form ## # edges mf$terms[[1]]$maxval <- maxdyads # nodefactor("deg.pers") dat$attr$deg.pers <- get_degree(dat$el[[2]]) nodecov <- dat$attr$deg.pers u <- sort(unique(nodecov)) u <- u[-1] # remove base values here nodecov <- match(nodecov, u, nomatch = length(u) + 1) ui <- seq(along = u) inputs <- c(ui, nodecov) mf$terms[[2]]$inputs <- c(length(ui), length(mf$terms[[2]]$coef.names), length(inputs), inputs) # absdiff("sqrt.age") nodecov <- dat$attr$sqrt.age power <- 1 inputs <- c(power, nodecov) mf$terms[[3]]$inputs <- c(0, length(mf$terms[[3]]$coef.names), length(inputs), inputs) # nodematch("role.class) nodecov <- dat$attr$role.class u <- sort(unique(nodecov)) u <- u[1:2] # keep = 1:2 nodecov <- match(nodecov, u, nomatch = length(u) + 1) dontmatch <- nodecov == (length(u) + 1) nodecov[dontmatch] <- length(u) + (1:sum(dontmatch)) ui <- seq(along = u) inputs <- c(ui, nodecov) mf$terms[[4]]$inputs <- c(0, length(mf$terms[[4]]$coef.names), length(inputs), inputs) ## combined maxval ## mf$maxval[1] <- maxdyads ## Update model.diss ## md$terms[[1]]$maxval <- maxdyads md$maxval <- maxdyads ## Update MHproposal.form ## mhf$arguments$constraints$bd$attribs <- matrix(rep(mhf$arguments$constraints$bd$attribs[1], n), ncol = 1) mhf$arguments$constraints$bd$maxout <- matrix(rep(mhf$arguments$constraints$bd$maxout[1], n), ncol = 1) mhf$arguments$constraints$bd$maxin <- matrix(rep(n - 1, n), ncol = 1) mhf$arguments$constraints$bd$minout <- mhf$arguments$constraints$bd$minin <- matrix(rep(0, n), ncol = 1) ## Update MHproposal.diss ## mhd$arguments$constraints$bd <- mhf$arguments$constraints$bd dat$p[[network]] <- list(model.form = mf, model.diss = md, MHproposal.form = mhf, MHproposal.diss = mhd) } if (network == 2) { mf <- p$model.form md <- p$model.diss mhf <- p$MHproposal.form mhd <- p$MHproposal.diss if (!identical(mf$coef.names, c("edges", "nodefactor.deg.main.1", "concurrent", "absdiff.sqrt.age", "nodematch.role.class.I", "nodematch.role.class.R"))) { stop("updatenwp_msm will not currently work with this formula, contact SJ") } ## Update model.form ## # edges mf$terms[[1]]$maxval <- maxdyads # nodefactor("deg.main") dat$attr$deg.main <- get_degree(dat$el[[1]]) nodecov <- dat$attr$deg.main u <- sort(unique(nodecov)) u <- u[-1] # remove base values here nodecov <- match(nodecov, u, nomatch = length(u) + 1) ui <- seq(along = u) inputs <- c(ui, nodecov) mf$terms[[2]]$inputs <- c(length(ui), length(mf$terms[[2]]$coef.names), length(inputs), inputs) # concurrent mf$terms[[3]]$maxval <- n # absdiff("sqrt.age") nodecov <- dat$attr$sqrt.age power <- 1 inputs <- c(power, nodecov) mf$terms[[4]]$inputs <- c(0, length(mf$terms[[4]]$coef.names), length(inputs), inputs) # nodematch("role.class) nodecov <- dat$attr$role.class u <- sort(unique(nodecov)) u <- u[1:2] # keep = 1:2 nodecov <- match(nodecov, u, nomatch = length(u) + 1) dontmatch <- nodecov == (length(u) + 1) nodecov[dontmatch] <- length(u) + (1:sum(dontmatch)) ui <- seq(along = u) inputs <- c(ui, nodecov) mf$terms[[5]]$inputs <- c(0, length(mf$terms[[5]]$coef.names), length(inputs), inputs) ## combined maxval ## mf$maxval[1] <- maxdyads mf$maxval[3] <- n ## Update model.diss ## md$terms[[1]]$maxval <- maxdyads md$maxval <- maxdyads ## Update MHproposal.form ## mhf$arguments$constraints$bd$attribs <- matrix(rep(mhf$arguments$constraints$bd$attribs[1], n), ncol = 1) mhf$arguments$constraints$bd$maxout <- matrix(rep(mhf$arguments$constraints$bd$maxout[1], n), ncol = 1) mhf$arguments$constraints$bd$maxin <- matrix(rep(n - 1, n), ncol = 1) mhf$arguments$constraints$bd$minout <- mhf$arguments$constraints$bd$minin <- matrix(rep(0, n), ncol = 1) ## Update MHproposal.diss ## mhd$arguments$constraints$bd <- mhf$arguments$constraints$bd dat$p[[network]] <- list(model.form = mf, model.diss = md, MHproposal.form = mhf, MHproposal.diss = mhd) } if (network == 3) { mf <- p$model.form mhf <- p$MHproposal if (!identical(mf$coef.names, c("edges", "nodefactor.deg.main.deg.pers.0.1", "nodefactor.deg.main.deg.pers.0.2", "nodefactor.deg.main.deg.pers.1.0", "nodefactor.deg.main.deg.pers.1.1", "nodefactor.deg.main.deg.pers.1.2", "nodefactor.riskg.1", "nodefactor.riskg.2", "nodefactor.riskg.4", "nodefactor.riskg.5", "absdiff.sqrt.age", "nodematch.role.class.I", "nodematch.role.class.R"))) { stop("updatenwp_msm will not currently work with this formula, contact SJ") } ## Update model.form ## # edges mf$terms[[1]]$maxval <- maxdyads # nodefactor(c("deg.main", "deg.pers")) # current main degree already written in last conditional block dat$attr$deg.pers <- get_degree(dat$el[[2]]) nodecov <- do.call(paste, c(sapply(c("deg.main", "deg.pers"), function(oneattr) dat$attr[[oneattr]], simplify = FALSE), sep = ".")) u <- sort(unique(nodecov)) u <- u[-1] # remove base values here nodecov <- match(nodecov, u, nomatch = length(u) + 1) ui <- seq(along = u) inputs <- c(ui, nodecov) mf$terms[[2]]$inputs <- c(length(ui), length(mf$terms[[2]]$coef.names), length(inputs), inputs) # nodefactor("riskg", base = 3) nodecov <- dat$attr$riskg u <- sort(unique(nodecov)) u <- u[-3] # remove base values here nodecov <- match(nodecov, u, nomatch = length(u) + 1) ui <- seq(along = u) inputs <- c(ui, nodecov) mf$terms[[3]]$inputs <- c(length(ui), length(mf$terms[[3]]$coef.names), length(inputs), inputs) # absdiff("sqrt.age") nodecov <- dat$attr$sqrt.age power <- 1 inputs <- c(power, nodecov) mf$terms[[4]]$inputs <- c(0, length(mf$terms[[4]]$coef.names), length(inputs), inputs) # nodematch("role.class) nodecov <- dat$attr$role.class u <- sort(unique(nodecov)) u <- u[1:2] # keep = 1:2 nodecov <- match(nodecov, u, nomatch = length(u) + 1) dontmatch <- nodecov == (length(u) + 1) nodecov[dontmatch] <- length(u) + (1:sum(dontmatch)) ui <- seq(along = u) inputs <- c(ui, nodecov) mf$terms[[5]]$inputs <- c(0, length(mf$terms[[5]]$coef.names), length(inputs), inputs) ## combined maxval ## mf$maxval[1] <- maxdyads ## Update MHproposal ## # no changes dat$p[[network]] <- list(model.form = mf, MHproposal = mhf) } return(dat) } # HET ----------------------------------------------------------------- #' @title Network Resimulation Module #' #' @description Module function to resimulate the dynamic network forward one #' time step conditional on current network structure and vertex #' attributes. #' #' @inheritParams aging_het #' #' @keywords module het #' #' @export #' simnet_het <- function(dat, at) { # Update edges coefficients dat <- edges_correct_het(dat, at) # Update internal ergm data dat <- update_nwp_het(dat) # Pull network parameters nwparam <- get_nwparam(dat) # Simulate edgelist dat$el <- tergmLite::simulate_network(p = dat$p, el = dat$el, coef.form = nwparam$coef.form, coef.diss = nwparam$coef.diss$coef.adj) return(dat) } update_nwp_het <- function(dat) { mf <- dat$p$model.form md <- dat$p$model.diss mhf <- dat$p$MHproposal.form mhd <- dat$p$MHproposal.diss n <- attributes(dat$el)$n maxdyads <- choose(n, 2) ## 1. Update model.form ## # edges # inputs <- c(0, 1, 0) # not changed mf$terms[[1]]$maxval <- maxdyads # nodematch nodecov <- dat$attr$male u <- sort(unique(nodecov)) nodecov <- match(nodecov, u, nomatch = length(u) + 1) inputs <- nodecov mf$terms[[2]]$inputs <- c(0, 1, length(inputs), inputs) ## Update combined maxval here mf$maxval <- c(maxdyads, Inf) ## 2. Update model.diss ## md$terms[[1]]$maxval <- maxdyads md$maxval <- maxdyads ## 3. Update MHproposal.form ## mhf$arguments$constraints$bd$attribs <- matrix(rep(mhf$arguments$constraints$bd$attribs[1], n), ncol = 1) mhf$arguments$constraints$bd$maxout <- matrix(rep(mhf$arguments$constraints$bd$maxout[1], n), ncol = 1) mhf$arguments$constraints$bd$maxin <- matrix(rep(n, n), ncol = 1) mhf$arguments$constraints$bd$minout <- mhf$arguments$constraints$bd$minin <- matrix(rep(0, n), ncol = 1) ## 4. Update MHproposal.diss ## mhd$arguments$constraints$bd <- mhf$arguments$constraints$bd ## 5. Output ## p <- list(model.form = mf, model.diss = md, MHproposal.form = mhf, MHproposal.diss = mhd) dat$p <- p return(dat) } #' @title Adjustment for the Edges Coefficient with Changing Network Size #' #' @description Adjusts the edges coefficients in a dynamic network model #' to preserve the mean degree. #' #' @inheritParams aging_het #' #' @details #' In HIV/STI modeling, there is typically an assumption that changes in #' population size do not affect one's number of partners, specified as the #' mean degree for network models. A person would not have 10 times the number #' of partners should he move from a city 10 times as large. This module uses #' the adjustment of Krivitsky et al. to adjust the edges coefficients on the #' three network models to account for varying population size in order to #' preserve that mean degree. #' #' @return #' The network model parameters stored in \code{dat$nwparam} are updated. #' #' @references #' Krivitsky PN, Handcock MS, and Morris M. "Adjusting for network size and #' composition effects in exponential-family random graph models." Statistical #' Methodology. 2011; 8.4: 319-339. #' #' @keywords module het #' #' @export #' edges_correct_het <- function(dat, at) { # Popsize old.num <- dat$epi$num[at - 1] new.num <- sum(dat$attr$active == 1, na.rm = TRUE) # New Coefs coef.form <- get_nwparam(dat)$coef.form coef.form[1] <- coef.form[1] + log(old.num) - log(new.num) dat$nwparam[[1]]$coef.form <- coef.form return(dat) }
library(tidyverse) library(shiny) ui <-fluidPage( h2("BoxPlot and Histogram", style = "color:red"), h4("by Mahima Palamanda", style ="color:blue"), numericInput(inputId = "n", label = "Sample size", value = 25), plotOutput(outputId = "boxplot"), plotOutput(outputId = "histogramplot") ) server <- function(input, output) { output$boxplot <- renderPlot({ boxplot(rnorm(input$n)) }) output$histogramplot <-renderPlot({ hist(rnorm(input$n),breaks=seq(from=-6,to=6,by=0.5)) }) } shinyApp(ui = ui, server = server)
/Shiny-HW-11.R
no_license
mahima229/Shiny11
R
false
false
544
r
library(tidyverse) library(shiny) ui <-fluidPage( h2("BoxPlot and Histogram", style = "color:red"), h4("by Mahima Palamanda", style ="color:blue"), numericInput(inputId = "n", label = "Sample size", value = 25), plotOutput(outputId = "boxplot"), plotOutput(outputId = "histogramplot") ) server <- function(input, output) { output$boxplot <- renderPlot({ boxplot(rnorm(input$n)) }) output$histogramplot <-renderPlot({ hist(rnorm(input$n),breaks=seq(from=-6,to=6,by=0.5)) }) } shinyApp(ui = ui, server = server)
# Reading, naming and subsetting power consumption data power <- read.table("household_power_consumption.txt",skip=1,sep=";") names(power) <- c("Date","Time","Global_active_power","Global_reactive_power","Voltage","Global_intensity","Sub_metering_1","Sub_metering_2","Sub_metering_3") subpower <- subset(power,power$Date=="1/2/2007" | power$Date =="2/2/2007") # Transforming the Date and Time vars from characters into objects of type Date and POSIXlt respectively subpower$Date <- as.Date(subpower$Date, format="%d/%m/%Y") subpower$Time <- strptime(subpower$Time, format="%H:%M:%S") subpower[1:1440,"Time"] <- format(subpower[1:1440,"Time"],"2007-02-01 %H:%M:%S") subpower[1441:2880,"Time"] <- format(subpower[1441:2880,"Time"],"2007-02-02 %H:%M:%S") #to save in png format png("plot2.png") # calling the basic plot function plot(subpower$Time,as.numeric(as.character(subpower$Global_active_power)),type="l",xlab="",ylab="Global Active Power (kilowatts)") # annotating graph title(main="Global Active Power Vs Time") dev.off()
/Plot2.R
no_license
akashggupta/Exporatory-Data-Analysis-Project-1
R
false
false
1,032
r
# Reading, naming and subsetting power consumption data power <- read.table("household_power_consumption.txt",skip=1,sep=";") names(power) <- c("Date","Time","Global_active_power","Global_reactive_power","Voltage","Global_intensity","Sub_metering_1","Sub_metering_2","Sub_metering_3") subpower <- subset(power,power$Date=="1/2/2007" | power$Date =="2/2/2007") # Transforming the Date and Time vars from characters into objects of type Date and POSIXlt respectively subpower$Date <- as.Date(subpower$Date, format="%d/%m/%Y") subpower$Time <- strptime(subpower$Time, format="%H:%M:%S") subpower[1:1440,"Time"] <- format(subpower[1:1440,"Time"],"2007-02-01 %H:%M:%S") subpower[1441:2880,"Time"] <- format(subpower[1441:2880,"Time"],"2007-02-02 %H:%M:%S") #to save in png format png("plot2.png") # calling the basic plot function plot(subpower$Time,as.numeric(as.character(subpower$Global_active_power)),type="l",xlab="",ylab="Global Active Power (kilowatts)") # annotating graph title(main="Global Active Power Vs Time") dev.off()
cvolumbag<-function(component,AtomlistAtom,AtomlistNext,low,upp,steppi) { d<-dim(low)[2] componum<-length(component) volume<-matrix(0,componum,1) for (i in 1:componum){ curvolu<-0 pointer<-component[i] while (pointer>0){ atto<-AtomlistAtom[pointer] vol<-1 for (j in 1:d){ vol<-vol*(upp[atto,j]-low[atto,j])*steppi[j] } curvolu<-curvolu+vol pointer<-AtomlistNext[pointer] } volume[i]<-curvolu } return(volume) }
/R/cvolumbag.R
no_license
cran/denpro
R
false
false
490
r
cvolumbag<-function(component,AtomlistAtom,AtomlistNext,low,upp,steppi) { d<-dim(low)[2] componum<-length(component) volume<-matrix(0,componum,1) for (i in 1:componum){ curvolu<-0 pointer<-component[i] while (pointer>0){ atto<-AtomlistAtom[pointer] vol<-1 for (j in 1:d){ vol<-vol*(upp[atto,j]-low[atto,j])*steppi[j] } curvolu<-curvolu+vol pointer<-AtomlistNext[pointer] } volume[i]<-curvolu } return(volume) }
#' Keep track of the active scenarios name and give a clojure to check them later #' #' @param ... #' a list of scenarios (probably their names) #' #' @return #' a clojure function that gets a list of scenarios (probably their names) and return TRUE/FALSE if they are in the set #' @export #' #' @examples #' is_there <- sv_set_scenario('a', 'b') #' print(is_there('a')) # prints TRUE sv_set_scenario <- function( ... ){ the.scenarios = c(...) cloj.is_scenario <- function(...){ some.scenarios <-c(...) result <- any(some.scenarios %in% the.scenarios) return(result) } return(cloj.is_scenario) }
/R/scenario.R
no_license
shahryareiv/singleverbr
R
false
false
624
r
#' Keep track of the active scenarios name and give a clojure to check them later #' #' @param ... #' a list of scenarios (probably their names) #' #' @return #' a clojure function that gets a list of scenarios (probably their names) and return TRUE/FALSE if they are in the set #' @export #' #' @examples #' is_there <- sv_set_scenario('a', 'b') #' print(is_there('a')) # prints TRUE sv_set_scenario <- function( ... ){ the.scenarios = c(...) cloj.is_scenario <- function(...){ some.scenarios <-c(...) result <- any(some.scenarios %in% the.scenarios) return(result) } return(cloj.is_scenario) }
library("quanteda") library("dplyr") library("readr") library("purrr") library("stringr") library("readtext") library("reshape2") library("magrittr") library("ggplot2") library("gridExtra") masterIndex <- read_csv("master_index_10Q_withFile.csv") tickers <- unique(masterIndex$ticker) # use unique(masterIndex) if we wish to scale this across multiple years, or keep tickers.txt updated StopWordsList <- readLines("../Data/Text Data/StopWordsList.txt") sections <- c("1", "1A", "2", "3", "4", "5", "6") file_path <- "../Data/Text Data/10-Q/" file_type <- ".txt" section_extractor <- function(statement, section){ name <- statement$doc_id pattern <- paste0("(?i)°Item ", section, "[^\\w|\\d]", ".*?°") section_hits <- str_extract_all(statement, pattern=pattern, simplify=TRUE) if (is_empty(section_hits) == TRUE){ empty_vec <- "empty" names(empty_vec) <- paste(name, section, sep = "_") print(paste("No hits for section", section, "of filing", name)) return(empty_vec) } word_counts <- map_int(section_hits, ntoken) max_hit <- which(word_counts == max(word_counts)) max_filing <- section_hits[[max_hit[length(max_hit)]]] if (max(word_counts) < 250 & str_detect(max_filing, pattern = "(?i)(incorporated by reference)|(incorporated herein by reference)") == TRUE){ empty_vec <- "empty" names(empty_vec) <- paste(name, section, sep = "_") print(paste("Section", section, "of filing", name, "incorporates by reference its information")) return(empty_vec) } names(max_filing) <- paste(name, section, sep = "_") return(max_filing) } section_dfm <- function(statements_list, section, min_words, tf){ map(statements_list, section_extractor, section=section) %>% map(corpus) %>% reduce(`+`) %>% dfm(tolower=TRUE, remove=StopWordsList, remove_punct=TRUE) %>% dfm_subset(., rowSums(.) > min_words) %>% when(tf==TRUE ~ tf(., scheme="log"), ~ .) } filing_dfm <- function(sections, filings_list, min_words, tf){ map(sections, section_dfm, statements_list=filings_list, min_words=min_words, tf=tf) } dist_parser <- function(distObj, section){ melted_frame <- as.matrix(distObj) %>% {. * upper.tri(.)} %>% melt(varnames = c("previous_filing", "filing"), value.name = paste0("sec", section, "dist")) melted_frame$previous_filing %<>% str_extract(pattern = ".*?(?=\\.)") melted_frame$filing %<>% str_extract(pattern = ".*?(?=\\.)") return(melted_frame) } filing_similarity <- function(dfm_list, method){ map(dfm_list, textstat_simil, method=method) %>% map(dist_parser)} index_filing_filterer <- function(the_ticker, index){ filter(index, ticker == the_ticker) %>% arrange(date) %>% pull(filing) } distance_returns_calculator <- function(the_ticker){ file_names <- index_filing_filterer(the_ticker, masterIndex) if (length(file_names) <= 1){ empty_list <- data_frame() print(paste("Only one filing available for ticker", the_ticker)) return(empty_list) } file_locations <- paste0(file_path, file_names, file_type) filings_list <- map(file_locations, readtext) similarity_list <- map(sections, section_dfm, statements_list=filings_list, min_words=10, tf=TRUE) %>% map(textstat_simil, method="cosine") %>% map2(sections, dist_parser) %>% reduce(left_join, by = c("previous_filing", "filing")) prev_current_mapping <- data_frame(previous_filing = file_names[-length(file_names)], filing = file_names[-1]) distance_returns_df <- left_join(prev_current_mapping, similarity_list, by = c("previous_filing", "filing")) print(paste("Successfully mapped distance scores to financial returns for ticker", the_ticker)) return(distance_returns_df)} distance_df <- map(tickers, distance_returns_calculator) %>% reduce(rbind) masterIndex %<>% left_join(distance_df, by = "filing") write.csv(masterIndex, file = "index_distance_10Q.csv", row.names = FALSE)
/Documentation/Text-Distance-Algo-10Q.R
no_license
EricHe98/Financial-Statements-Text-Analysis
R
false
false
3,926
r
library("quanteda") library("dplyr") library("readr") library("purrr") library("stringr") library("readtext") library("reshape2") library("magrittr") library("ggplot2") library("gridExtra") masterIndex <- read_csv("master_index_10Q_withFile.csv") tickers <- unique(masterIndex$ticker) # use unique(masterIndex) if we wish to scale this across multiple years, or keep tickers.txt updated StopWordsList <- readLines("../Data/Text Data/StopWordsList.txt") sections <- c("1", "1A", "2", "3", "4", "5", "6") file_path <- "../Data/Text Data/10-Q/" file_type <- ".txt" section_extractor <- function(statement, section){ name <- statement$doc_id pattern <- paste0("(?i)°Item ", section, "[^\\w|\\d]", ".*?°") section_hits <- str_extract_all(statement, pattern=pattern, simplify=TRUE) if (is_empty(section_hits) == TRUE){ empty_vec <- "empty" names(empty_vec) <- paste(name, section, sep = "_") print(paste("No hits for section", section, "of filing", name)) return(empty_vec) } word_counts <- map_int(section_hits, ntoken) max_hit <- which(word_counts == max(word_counts)) max_filing <- section_hits[[max_hit[length(max_hit)]]] if (max(word_counts) < 250 & str_detect(max_filing, pattern = "(?i)(incorporated by reference)|(incorporated herein by reference)") == TRUE){ empty_vec <- "empty" names(empty_vec) <- paste(name, section, sep = "_") print(paste("Section", section, "of filing", name, "incorporates by reference its information")) return(empty_vec) } names(max_filing) <- paste(name, section, sep = "_") return(max_filing) } section_dfm <- function(statements_list, section, min_words, tf){ map(statements_list, section_extractor, section=section) %>% map(corpus) %>% reduce(`+`) %>% dfm(tolower=TRUE, remove=StopWordsList, remove_punct=TRUE) %>% dfm_subset(., rowSums(.) > min_words) %>% when(tf==TRUE ~ tf(., scheme="log"), ~ .) } filing_dfm <- function(sections, filings_list, min_words, tf){ map(sections, section_dfm, statements_list=filings_list, min_words=min_words, tf=tf) } dist_parser <- function(distObj, section){ melted_frame <- as.matrix(distObj) %>% {. * upper.tri(.)} %>% melt(varnames = c("previous_filing", "filing"), value.name = paste0("sec", section, "dist")) melted_frame$previous_filing %<>% str_extract(pattern = ".*?(?=\\.)") melted_frame$filing %<>% str_extract(pattern = ".*?(?=\\.)") return(melted_frame) } filing_similarity <- function(dfm_list, method){ map(dfm_list, textstat_simil, method=method) %>% map(dist_parser)} index_filing_filterer <- function(the_ticker, index){ filter(index, ticker == the_ticker) %>% arrange(date) %>% pull(filing) } distance_returns_calculator <- function(the_ticker){ file_names <- index_filing_filterer(the_ticker, masterIndex) if (length(file_names) <= 1){ empty_list <- data_frame() print(paste("Only one filing available for ticker", the_ticker)) return(empty_list) } file_locations <- paste0(file_path, file_names, file_type) filings_list <- map(file_locations, readtext) similarity_list <- map(sections, section_dfm, statements_list=filings_list, min_words=10, tf=TRUE) %>% map(textstat_simil, method="cosine") %>% map2(sections, dist_parser) %>% reduce(left_join, by = c("previous_filing", "filing")) prev_current_mapping <- data_frame(previous_filing = file_names[-length(file_names)], filing = file_names[-1]) distance_returns_df <- left_join(prev_current_mapping, similarity_list, by = c("previous_filing", "filing")) print(paste("Successfully mapped distance scores to financial returns for ticker", the_ticker)) return(distance_returns_df)} distance_df <- map(tickers, distance_returns_calculator) %>% reduce(rbind) masterIndex %<>% left_join(distance_df, by = "filing") write.csv(masterIndex, file = "index_distance_10Q.csv", row.names = FALSE)
library(bold) ### Name: bold_seqspec ### Title: Get BOLD specimen + sequence data. ### Aliases: bold_seqspec ### ** Examples ## Not run: ##D bold_seqspec(taxon='Osmia') ##D bold_seqspec(taxon='Osmia', format='xml') ##D bold_seqspec(taxon='Osmia', response=TRUE) ##D res <- bold_seqspec(taxon='Osmia', sepfasta=TRUE) ##D res$fasta[1:2] ##D res$fasta['GBAH0293-06'] ##D ##D # records that match a marker name ##D res <- bold_seqspec(taxon="Melanogrammus aeglefinus", marker="COI-5P") ##D ##D # records that match a geographic locality ##D res <- bold_seqspec(taxon="Melanogrammus aeglefinus", geo="Canada") ##D ##D ## curl debugging ##D ### You can do many things, including get verbose output on the curl call, ##D ### and set a timeout ##D head(bold_seqspec(taxon='Osmia', verbose = TRUE)) ##D ## timeout ##D # head(bold_seqspec(taxon='Osmia', timeout_ms = 1)) ## End(Not run)
/data/genthat_extracted_code/bold/examples/bold_seqspec.Rd.R
no_license
surayaaramli/typeRrh
R
false
false
888
r
library(bold) ### Name: bold_seqspec ### Title: Get BOLD specimen + sequence data. ### Aliases: bold_seqspec ### ** Examples ## Not run: ##D bold_seqspec(taxon='Osmia') ##D bold_seqspec(taxon='Osmia', format='xml') ##D bold_seqspec(taxon='Osmia', response=TRUE) ##D res <- bold_seqspec(taxon='Osmia', sepfasta=TRUE) ##D res$fasta[1:2] ##D res$fasta['GBAH0293-06'] ##D ##D # records that match a marker name ##D res <- bold_seqspec(taxon="Melanogrammus aeglefinus", marker="COI-5P") ##D ##D # records that match a geographic locality ##D res <- bold_seqspec(taxon="Melanogrammus aeglefinus", geo="Canada") ##D ##D ## curl debugging ##D ### You can do many things, including get verbose output on the curl call, ##D ### and set a timeout ##D head(bold_seqspec(taxon='Osmia', verbose = TRUE)) ##D ## timeout ##D # head(bold_seqspec(taxon='Osmia', timeout_ms = 1)) ## End(Not run)
round2 = function(x, n) { #this function ensures that values ending in 0.5 are round up to the next integer - not down to zero (R's default) posneg = sign(x) z = abs(x)*10^n z = z + 0.5 + sqrt(.Machine$double.eps) z = trunc(z) z = z/10^n z*posneg } addLenMeasInfo <- function(df = NULL){ #set LENMEASTYPE to standard length (2) for all, then overwrite weirdies #set LNGTCODE to 1cm for all, then overwrite weirdies df$LENMEASTYPE <- 2 df$LNGTCODE <- 1 #crabs - 7 "Carapace Width"; in mm df[(df$SPEC >= 2506 & df$SPEC <= 2547) | df$SPEC == 6006,c("LENMEASTYPE", "LNGTCODE")]<-data.frame(7,0.1) #lobsters - 6 "Carapace Length"; mm df[df$SPEC %in% c(2550,2551,2552,2553,8145),c("LENMEASTYPE", "LNGTCODE")]<-data.frame(6,0.1) #scallops - 9 "Shell Height"; mm df[df$SPEC %in% c(4320,4321,4322,4324,4325),c("LENMEASTYPE", "LNGTCODE")]<-data.frame(9,0.1) #squid - 5 "Mantle Length" df[df$SPEC %in% c(4511,4512,4514,4664),"LENMEASTYPE"]<-5 #and LNGTCODE is default (cm) #herring recorded in mm df[df$SPEC ==60 ,"LNGTCODE"]<-0.1 return(df) } addLNGTCLASS <- function(df=NULL){ df$LNGTCLASS <- NA df$LNGTCLASS<- ceiling(df$FLEN/df$LNGTCODE) * df$LNGTCODE return(df) }
/R/Mar_utilities.R
permissive
Maritimes/CanaDatras
R
false
false
1,219
r
round2 = function(x, n) { #this function ensures that values ending in 0.5 are round up to the next integer - not down to zero (R's default) posneg = sign(x) z = abs(x)*10^n z = z + 0.5 + sqrt(.Machine$double.eps) z = trunc(z) z = z/10^n z*posneg } addLenMeasInfo <- function(df = NULL){ #set LENMEASTYPE to standard length (2) for all, then overwrite weirdies #set LNGTCODE to 1cm for all, then overwrite weirdies df$LENMEASTYPE <- 2 df$LNGTCODE <- 1 #crabs - 7 "Carapace Width"; in mm df[(df$SPEC >= 2506 & df$SPEC <= 2547) | df$SPEC == 6006,c("LENMEASTYPE", "LNGTCODE")]<-data.frame(7,0.1) #lobsters - 6 "Carapace Length"; mm df[df$SPEC %in% c(2550,2551,2552,2553,8145),c("LENMEASTYPE", "LNGTCODE")]<-data.frame(6,0.1) #scallops - 9 "Shell Height"; mm df[df$SPEC %in% c(4320,4321,4322,4324,4325),c("LENMEASTYPE", "LNGTCODE")]<-data.frame(9,0.1) #squid - 5 "Mantle Length" df[df$SPEC %in% c(4511,4512,4514,4664),"LENMEASTYPE"]<-5 #and LNGTCODE is default (cm) #herring recorded in mm df[df$SPEC ==60 ,"LNGTCODE"]<-0.1 return(df) } addLNGTCLASS <- function(df=NULL){ df$LNGTCLASS <- NA df$LNGTCLASS<- ceiling(df$FLEN/df$LNGTCODE) * df$LNGTCODE return(df) }
plots <- function(){ # It's assumed that the required file is already contained in the working directory # Each plot is saved in a .PNG file # Read data file, but only the rows with date='2/2/2007' or date='1/2/2007 # Convert the column named "Date" to type Date require(sqldf) testdata <- read.csv.sql("C:\\Users\\ok\\Documents\\R\\household_power_consumption.txt", sql = "select * from file where Date = '2/2/2007' OR Date='1/2/2007' ",sep=";") testdata$Date<-as.POSIXct(paste(testdata$Date, testdata$Time), format = "%d/%m/%Y %T") # Plot 1 png("plot1.png", width = 480, height = 480) hist(testdata$Global_active_power, col="red", main="Global Active Power", xlab="Global Active Power (kilowatts)", ylab="Frequency") # Close connection dev.off() # Plot 2 png("plot2.png", width = 480, height = 480) plot(testdata$Global_active_power~testdata$Date, type="l", xlab="", ylab="Global Active Power (kilowatts)") # Close connection dev.off() # Plot 3 png("plot3.png", width = 480, height = 480) plot(testdata$Date, testdata$Sub_metering_1, type="l", xlab="", ylab="Energy sub metering") lines(testdata$Date, testdata$Sub_metering_2, type="l", col="red") lines(testdata$Date, testdata$Sub_metering_3, type="l", col="blue") # Add a legend to the plot legend("topright",col=c("black", "red", "blue"),c("Sub_metering_1","Sub_metering_2","Sub_metering_3"),lty=1) # Close connection dev.off() # Plot 4 png("plot4.png", width = 480, height = 480) # Change settings first to include all for plots into one par(mfrow=c(2,2)) plot(testdata$Global_active_power~testdata$Date, type="l", xlab="", ylab="Global Active Power") plot(testdata$Voltage~testdata$Date, type="l", ylab="Voltage", xlab="datetime") plot(testdata$Date, testdata$Sub_metering_1, type="l", xlab="", ylab="Energy sub metering") lines(testdata$Date, testdata$Sub_metering_2, type="l", col="red") lines(testdata$Date, testdata$Sub_metering_3, type="l", col="blue") legend("topright",col=c("black", "red", "blue"),c("Sub_metering_1","Sub_metering_2","Sub_metering_3"),lty=1) plot(testdata$Global_reactive_power~testdata$Date, type="l", xlab="datetime", ylab="Global_reactive_power") # Close connection dev.off() }
/week1_plots.R
no_license
Laik2/ExData_Plotting1
R
false
false
2,289
r
plots <- function(){ # It's assumed that the required file is already contained in the working directory # Each plot is saved in a .PNG file # Read data file, but only the rows with date='2/2/2007' or date='1/2/2007 # Convert the column named "Date" to type Date require(sqldf) testdata <- read.csv.sql("C:\\Users\\ok\\Documents\\R\\household_power_consumption.txt", sql = "select * from file where Date = '2/2/2007' OR Date='1/2/2007' ",sep=";") testdata$Date<-as.POSIXct(paste(testdata$Date, testdata$Time), format = "%d/%m/%Y %T") # Plot 1 png("plot1.png", width = 480, height = 480) hist(testdata$Global_active_power, col="red", main="Global Active Power", xlab="Global Active Power (kilowatts)", ylab="Frequency") # Close connection dev.off() # Plot 2 png("plot2.png", width = 480, height = 480) plot(testdata$Global_active_power~testdata$Date, type="l", xlab="", ylab="Global Active Power (kilowatts)") # Close connection dev.off() # Plot 3 png("plot3.png", width = 480, height = 480) plot(testdata$Date, testdata$Sub_metering_1, type="l", xlab="", ylab="Energy sub metering") lines(testdata$Date, testdata$Sub_metering_2, type="l", col="red") lines(testdata$Date, testdata$Sub_metering_3, type="l", col="blue") # Add a legend to the plot legend("topright",col=c("black", "red", "blue"),c("Sub_metering_1","Sub_metering_2","Sub_metering_3"),lty=1) # Close connection dev.off() # Plot 4 png("plot4.png", width = 480, height = 480) # Change settings first to include all for plots into one par(mfrow=c(2,2)) plot(testdata$Global_active_power~testdata$Date, type="l", xlab="", ylab="Global Active Power") plot(testdata$Voltage~testdata$Date, type="l", ylab="Voltage", xlab="datetime") plot(testdata$Date, testdata$Sub_metering_1, type="l", xlab="", ylab="Energy sub metering") lines(testdata$Date, testdata$Sub_metering_2, type="l", col="red") lines(testdata$Date, testdata$Sub_metering_3, type="l", col="blue") legend("topright",col=c("black", "red", "blue"),c("Sub_metering_1","Sub_metering_2","Sub_metering_3"),lty=1) plot(testdata$Global_reactive_power~testdata$Date, type="l", xlab="datetime", ylab="Global_reactive_power") # Close connection dev.off() }
# Module UI function notification_module_output <- function(id){ ns <- NS(id) dropdownMenuOutput("notification") } # Module server function notification_module <- function(input, output, session){ output$notification <- renderMenu({ dropdownMenu(type = "notifications", badgeStatus = "warning", notificationItem(text = "Sample size is not enough for BF analysis.", icon = icon("ok", lib = "glyphicon"), status = "danger")) }) } # Module util functions # There are no util functions for this module.
/real_time_reporting/modules/notification_module.R
no_license
marton-balazs-kovacs/going_real_time
R
false
false
622
r
# Module UI function notification_module_output <- function(id){ ns <- NS(id) dropdownMenuOutput("notification") } # Module server function notification_module <- function(input, output, session){ output$notification <- renderMenu({ dropdownMenu(type = "notifications", badgeStatus = "warning", notificationItem(text = "Sample size is not enough for BF analysis.", icon = icon("ok", lib = "glyphicon"), status = "danger")) }) } # Module util functions # There are no util functions for this module.
unitize_1 <- function(z) { # min-max normalization - 0 -1 min.z <- min(z) max.z <- max(z) if ((max.z-min.z)==0) return(z) (z - min.z )/(max.z-min.z) } unitize_2 <- function(z) { # Mean and SD normalization mean.z <- mean(z) sd.z <- sd(z) #print(sd.z) if (sd.z==0) return(z) (z - mean.z )/sd.z } unitize_3 <- function(z) { # Median and IQR normalization median.z <- median(z) iqr.z <- IQR(z) if (iqr.z==0) return(z) (z - median.z )/iqr.z }
/R/utils.R
permissive
sevvandi/dobin
R
false
false
487
r
unitize_1 <- function(z) { # min-max normalization - 0 -1 min.z <- min(z) max.z <- max(z) if ((max.z-min.z)==0) return(z) (z - min.z )/(max.z-min.z) } unitize_2 <- function(z) { # Mean and SD normalization mean.z <- mean(z) sd.z <- sd(z) #print(sd.z) if (sd.z==0) return(z) (z - mean.z )/sd.z } unitize_3 <- function(z) { # Median and IQR normalization median.z <- median(z) iqr.z <- IQR(z) if (iqr.z==0) return(z) (z - median.z )/iqr.z }
auc <- function(sens, spec) { .tab.res <- data.frame(se=sens, sp=spec) .tab.res <- .tab.res[!is.na(.tab.res$sp + .tab.res$se),] .tab.res$sp1 <- 1-.tab.res$sp .tab.res <- .tab.res[order(.tab.res$sp1, .tab.res$se),] .tab.res <- rbind(c(0,1,0), .tab.res, c(1,0,1)) return( sum((.tab.res$sp1[2:length(.tab.res$sp1)] - .tab.res$sp1[1:(length(.tab.res$sp1)-1)]) * 0.5 * (.tab.res$se[2:length(.tab.res$se)]+.tab.res$se[1:length(.tab.res$se)-1])) ) }
/R/auc.R
no_license
cran/RISCA
R
false
false
467
r
auc <- function(sens, spec) { .tab.res <- data.frame(se=sens, sp=spec) .tab.res <- .tab.res[!is.na(.tab.res$sp + .tab.res$se),] .tab.res$sp1 <- 1-.tab.res$sp .tab.res <- .tab.res[order(.tab.res$sp1, .tab.res$se),] .tab.res <- rbind(c(0,1,0), .tab.res, c(1,0,1)) return( sum((.tab.res$sp1[2:length(.tab.res$sp1)] - .tab.res$sp1[1:(length(.tab.res$sp1)-1)]) * 0.5 * (.tab.res$se[2:length(.tab.res$se)]+.tab.res$se[1:length(.tab.res$se)-1])) ) }
flower <- read.csv("https://storage.googleapis.com/dimensionl ess/Analytics/flower.csv",header = FALSE) flower str(flower) dim(flower) flowermatrix <- as.matrix(flower) flowervector <- as.vector(flowermatrix) dim(flowermatrix) distance1<- dist(flowervector,method = "euclidean") distance1 clusterintensity<-hclust(distance1,method = "ward.D") clusterintensity plot(clusterintensity) flowercluster<-cutree(clusterintensity,k =3) flowercluster table(flowercluster) tapply(flowervector,flowercluster,mean) dim(flowercluster) =c(50,50) # 2nd method to mat<-matrix(flowercluster,nrow = 50,ncol = 50,byrow = F) image(flowercluster,axes=FALSE) image(flowercluster,axes=FALSE,col = grey(seq(0,1,length=256))) image(flowermatrix,axes=FALSE,col = grey(seq(0,1,length=256))) healthy<-read.csv("https://storage.googleapis.com/dime nsionless/Analytics/healthy.csv",header=FALSE) healthymatrix<-as.matrix(healthy) dim(healthymatrix) healthyvector<-as.vector(healthymatrix) image(healthymatrix,axes=FALSE,col = grey(seq(0,1,length=256))) k=5 set.seed = 1 KMC<-kmeans(healthyvector,centers = k,iter.max = 100) KMC KMC$cluster KMC$centers KMC$totss KMC$withinss KMC$tot.withinss # SUM OF ALL WITHINSS KMC$size healthycluster<-KMC$cluster healthycluster dim(healthycluster)<-c(566,646) image(healthycluster,axes=FALSE,col = rainbow(5)) image(healthymatrix,axes=FALSE,col = grey(seq(0,1,length=256))) par(mfrow = c(1,2)) # to get images in 2X1 format/c(1,2) is for 1 row, 2 column KMC2<-kmeans(healthyvector,centers = 2,iter.max = 100) KMC2$tot.withinss totwithinss = NULL for (k in 2:10){ set.seed(1) totwithinss[k-1] = (kmeans(healthyvector, centers =k, iter.max = 1000))$tot.withinss } par(mfrow = c(1,1)) NumClusters = seq(2,10,1) plot(NumClusters,SumWithinss,type = "b") tumor<- read.csv("https://storage.googleapis.com/dimensionl ess/Analytics/tumor.csv",header= FALSE) tumormatrix<-as.matrix(tumor) tumorvector<-as.vector(tumormatrix) KMC.kcca = as.kcca(KMC,healthyvector) KMC.kcca KMC.kcca@totaldist KMC.kcca@clusinfo tumorClusters1<-predict(KMC.kcca,newdata=tumorvector) table(tumorClusters1) d<-image(tumormatrix,axes=FALSE,col = rainbow(5)) dim(tumormatrix) movies<-read.table ("https://storage.googleapis.com/dimensionless/Analytics/u.item.txt",header = FALSE,sep = "|",quote="") str(movies) colnames(movies)=c("ID", "Title", "ReleaseDate", "VideoReleaseDate", "IMDB", "Unknown", "Action", "Adventure", "Animation", "Children", "Comedy", "Crime","Documentary","Drama","Fantasy","FilmNoir", "Horror", "Musical", "Mystery", "Romance", "SciFi", "Thriller","War","Western") str(movies) movies$ID=NULL movies$ReleaseDate=NULL movies$VideoReleaseDate=NULL movies$IMDB=NULL class(movies) str(movies) movies<-unique(movies) str(movies) # Quick Question table(movies$Comedy) table(movies$Western) table(movies$Romance,movies$Drama) distance<-dist(movies[2:20],method = "euclidean") str(distance) summary(distance) set.seed(50) spl<-sample.split(movies$Title,SplitRatio = 0.8) table(spl) ntrain <-subset(movies,spl==T) ntest <-subset(movies,spl==F) distance3<-dist(ntrain[2:20],method = "euclidean") cluster_train<-hclust(distance3,method = "ward.D") cluster_train plot(cluster_train) clustergroup<-cutree(cluster_train,k=10) agg_train<-aggregate(ntrain[2:20],list(clustergroup),mean) #insall flexclustpackage hcclust.kcca<-as.kcca(object = cluster_train,k=10,data=ntrain[2:20]) pred1<-predict(hcclust.kcca,newdata=ntest[2:20]) verify<-subset(ntest,pred1==8) View(verify) set.seed(50) KMC_n<-kmeans(ntrain[2:20],centers = x,iter.max = 100) agg_KMC<-aggregate(x= ntrain[2:20],by =list(KMC_n$cluster),mean) View(agg_KMC) hcclust.kcca1<-as.kcca(object = KMC_n,k=10,ntrain[2:20]) predic_n2<-predict(hcclust.kcca1,newdata=ntest[2:20]) verify<-subset(ntest,predic_n2==8) View(verify) NumClusters <- seq(2,15,1) plot(NumClusters,,type="b") KMC_n$tot.withinss #principal component analysis
/Image segmentation.R
no_license
Deva-Vid/R-github
R
false
false
4,011
r
flower <- read.csv("https://storage.googleapis.com/dimensionl ess/Analytics/flower.csv",header = FALSE) flower str(flower) dim(flower) flowermatrix <- as.matrix(flower) flowervector <- as.vector(flowermatrix) dim(flowermatrix) distance1<- dist(flowervector,method = "euclidean") distance1 clusterintensity<-hclust(distance1,method = "ward.D") clusterintensity plot(clusterintensity) flowercluster<-cutree(clusterintensity,k =3) flowercluster table(flowercluster) tapply(flowervector,flowercluster,mean) dim(flowercluster) =c(50,50) # 2nd method to mat<-matrix(flowercluster,nrow = 50,ncol = 50,byrow = F) image(flowercluster,axes=FALSE) image(flowercluster,axes=FALSE,col = grey(seq(0,1,length=256))) image(flowermatrix,axes=FALSE,col = grey(seq(0,1,length=256))) healthy<-read.csv("https://storage.googleapis.com/dime nsionless/Analytics/healthy.csv",header=FALSE) healthymatrix<-as.matrix(healthy) dim(healthymatrix) healthyvector<-as.vector(healthymatrix) image(healthymatrix,axes=FALSE,col = grey(seq(0,1,length=256))) k=5 set.seed = 1 KMC<-kmeans(healthyvector,centers = k,iter.max = 100) KMC KMC$cluster KMC$centers KMC$totss KMC$withinss KMC$tot.withinss # SUM OF ALL WITHINSS KMC$size healthycluster<-KMC$cluster healthycluster dim(healthycluster)<-c(566,646) image(healthycluster,axes=FALSE,col = rainbow(5)) image(healthymatrix,axes=FALSE,col = grey(seq(0,1,length=256))) par(mfrow = c(1,2)) # to get images in 2X1 format/c(1,2) is for 1 row, 2 column KMC2<-kmeans(healthyvector,centers = 2,iter.max = 100) KMC2$tot.withinss totwithinss = NULL for (k in 2:10){ set.seed(1) totwithinss[k-1] = (kmeans(healthyvector, centers =k, iter.max = 1000))$tot.withinss } par(mfrow = c(1,1)) NumClusters = seq(2,10,1) plot(NumClusters,SumWithinss,type = "b") tumor<- read.csv("https://storage.googleapis.com/dimensionl ess/Analytics/tumor.csv",header= FALSE) tumormatrix<-as.matrix(tumor) tumorvector<-as.vector(tumormatrix) KMC.kcca = as.kcca(KMC,healthyvector) KMC.kcca KMC.kcca@totaldist KMC.kcca@clusinfo tumorClusters1<-predict(KMC.kcca,newdata=tumorvector) table(tumorClusters1) d<-image(tumormatrix,axes=FALSE,col = rainbow(5)) dim(tumormatrix) movies<-read.table ("https://storage.googleapis.com/dimensionless/Analytics/u.item.txt",header = FALSE,sep = "|",quote="") str(movies) colnames(movies)=c("ID", "Title", "ReleaseDate", "VideoReleaseDate", "IMDB", "Unknown", "Action", "Adventure", "Animation", "Children", "Comedy", "Crime","Documentary","Drama","Fantasy","FilmNoir", "Horror", "Musical", "Mystery", "Romance", "SciFi", "Thriller","War","Western") str(movies) movies$ID=NULL movies$ReleaseDate=NULL movies$VideoReleaseDate=NULL movies$IMDB=NULL class(movies) str(movies) movies<-unique(movies) str(movies) # Quick Question table(movies$Comedy) table(movies$Western) table(movies$Romance,movies$Drama) distance<-dist(movies[2:20],method = "euclidean") str(distance) summary(distance) set.seed(50) spl<-sample.split(movies$Title,SplitRatio = 0.8) table(spl) ntrain <-subset(movies,spl==T) ntest <-subset(movies,spl==F) distance3<-dist(ntrain[2:20],method = "euclidean") cluster_train<-hclust(distance3,method = "ward.D") cluster_train plot(cluster_train) clustergroup<-cutree(cluster_train,k=10) agg_train<-aggregate(ntrain[2:20],list(clustergroup),mean) #insall flexclustpackage hcclust.kcca<-as.kcca(object = cluster_train,k=10,data=ntrain[2:20]) pred1<-predict(hcclust.kcca,newdata=ntest[2:20]) verify<-subset(ntest,pred1==8) View(verify) set.seed(50) KMC_n<-kmeans(ntrain[2:20],centers = x,iter.max = 100) agg_KMC<-aggregate(x= ntrain[2:20],by =list(KMC_n$cluster),mean) View(agg_KMC) hcclust.kcca1<-as.kcca(object = KMC_n,k=10,ntrain[2:20]) predic_n2<-predict(hcclust.kcca1,newdata=ntest[2:20]) verify<-subset(ntest,predic_n2==8) View(verify) NumClusters <- seq(2,15,1) plot(NumClusters,,type="b") KMC_n$tot.withinss #principal component analysis
########################################################### ############## VIP_NPLSDADietBiom.R ############### ########################################################### # Author: Leandro Balzano-Nogueira # Genetics Institute, University of Florida (Gainesville) # This script is to create the Dietary Biomarkers dataset to calculate the NPLSDA and VIP selection ########################################################### homedir<- "/home/leobalzano/Dropbox (UFL)/TEDDY/Paper1/NatureCommFormat/NCommV1/NCommV1ToShare/SupplementaryData/DietaryBiomarkers/" # Home directory where all your results are going to be contained setwd(homedir) getwd() ########################################################### # Functions: "/home/leobalzano/Dropbox (UFL)/TEDDY/Paper1/NatureCommFormat/NCommV1/NCommV1ToShare/ScriptsForNComm/Tools/" # Location of TEDDYtools source ("/home/leobalzano/Dropbox (UFL)/TEDDY/Paper1/NatureCommFormat/NCommV1/NCommV1ToShare/ScriptsForNComm/Tools/TEDDYtools2.R") # These are the functions to reformat the data source ("/home/leobalzano/Dropbox (UFL)/TEDDY/Paper1/NatureCommFormat/NCommV1/NCommV1ToShare/ScriptsForNComm/Tools/NPLSDAfunctionsApr11.R") # These are the functions created to perform the NPLSDA ########################################################### # Data: # Dietary Biomarkers DBRaw<-read.csv ("/home/leobalzano/Dropbox (UFL)/TEDDY/Paper1/NatureCommFormat/NCommV1/NCommV1ToShare/SupplementaryData/DietaryBiomarkers/DBRaw.csv",header = FALSE) DBRaw[1:10,1:10] DBprocessed<-read.csv ("/home/leobalzano/Dropbox (UFL)/TEDDY/Paper1/NatureCommFormat/NCommV1/NCommV1ToShare/SupplementaryData/DietaryBiomarkers/DBprocessed.csv",header = FALSE) DBprocessed[1:10,1:10] # Response Variable CohortData<-read.csv ("/home/leobalzano/Dropbox (UFL)/TEDDY/Paper1/NatureCommFormat/NCommV1/NCommV1ToShare/SupplementaryData/CohortData.csv",header = TRUE) CohortData[1:10,] # List of Cases with at least 3 out of 5 time points with data patients3tps<-data.frame(V1=CohortData$Individual.Id[CohortData$Model.or.Validation=="Model"]) patients3tps ########################################################### # Libraries: library("gdata") require("VennDiagram") ########################################################### # Gene Expression: DietBiomRaw<-t(DBRaw) colnames(DietBiomRaw)<- DietBiomRaw[1,]; DietBiomRaw<-DietBiomRaw[-1,] DietBiomRaw <- transform(DietBiomRaw, Individual.Id =as.numeric(as.character(Individual.Id)), Age.in.Months = as.numeric(as.character(Age.in.Months)), Time.to.IA = as.numeric(as.character(Time.to.IA))) DietBiomRaw[1:10,1:10] # Subsetting the data to the 136 pairs with values reported in 3 0ut 0f 5 time points DietBiomRaw136<- DietBiomRaw[is.element(DietBiomRaw$Individual.Id, patients3tps[,1]),] dim(DietBiomRaw136) length(unique(DietBiomRaw136$Individual.Id)) ########################################################### # GE data in 3D structure dim(DietBiomRaw136) # 276 x 46 colnames(DietBiomRaw136[1:23]) DBX<-DietBiomRaw136[,c(1,4:46)] # Just ID and the variables DBX12<-DBX[DietBiomRaw136$Time.to.IA == "-12",]; dim(DBX12);dim(DBX) DBX9<-DBX[DietBiomRaw136$Time.to.IA == "-9",];dim(DBX9) DBX6<-DBX[DietBiomRaw136$Time.to.IA == "-6",];dim(DBX6) DBX3<-DBX[DietBiomRaw136$Time.to.IA == "-3",];dim(DBX3) DBX0<-DBX[DietBiomRaw136$Time.to.IA == "0",];dim(DBX0) ########################################################### # Merging with all cases patients3tps2<-patients3tps colnames(patients3tps2)<- "Individual.Id" DBX12total<-merge(patients3tps2,DBX12, by="Individual.Id", all.x = T);dim(DBX12total);dim(patients3tps2);dim(DBX12) DBX12total[1:5,1:5] rownames(DBX12total)<- DBX12total[,1] DBX12total<- as.matrix(DBX12total[,c(-1)]) dim(DBX12total) DBX9total<-merge(patients3tps2,DBX9, by="Individual.Id", all.x = T);dim(DBX9total);dim(patients3tps2);dim(DBX9) DBX9total[1:5,1:5] rownames(DBX9total)<- DBX9total[,1] DBX9total<- as.matrix(DBX9total[,c(-1)]) dim(DBX9total) DBX6total<-merge(patients3tps2,DBX6, by="Individual.Id", all.x = T);dim(DBX6total);dim(patients3tps2);dim(DBX6total) DBX6total[1:5,1:5] rownames(DBX6total)<- DBX6total[,1] DBX6total<- as.matrix(DBX6total[,c(-1)]) dim(DBX6total) DBX3total<-merge(patients3tps2,DBX3, by="Individual.Id", all.x = T);dim(DBX3total);dim(patients3tps2);dim(DBX3) DBX3total[1:5,1:5] rownames(DBX3total)<- DBX3total[,1] DBX3total<- as.matrix(DBX3total[,c(-1)]) dim(DBX3total) DBX0total<-merge(patients3tps2,DBX0, by="Individual.Id", all.x = T);dim(DBX0total);dim(patients3tps2);dim(DBX0) DBX0total[1:5,1:5] rownames(DBX0total)<- DBX0total[,1] DBX0total<- as.matrix(DBX0total[,c(-1)]) dim(DBX0total) # Dimensions are 136*43*5 ########################################################### dim(DBX0total) arrayDBXMarch <- array(data = NA, dim = c(136,43,5),dimnames = list(NULL, NULL, c("-12","-9","-6", "-3", "0"))) arrayDBXMarch arrayDBXMarch[,,1] <- DBX12total arrayDBXMarch[,,2] <- DBX9total arrayDBXMarch[,,3] <- DBX6total arrayDBXMarch[,,4] <- DBX3total arrayDBXMarch[,,5] <- DBX0total rownames(arrayDBXMarch)<-rownames(DBX12total) colnames(arrayDBXMarch)<-colnames (DBX12total) arrayDBXMarch[1:50,1:5,1] arrayDBXMarch136<-arrayDBXMarch dim(arrayDBXMarch136) # 136 * 43 * 5 # This is how the data array for DB expression was created ########################################################### # Imputation (For convenience it must be done in an HPC) # 1) Determining the best fitted model modelDBXnuevo<-bestfittedmodel (X=arrayDBXMarch136,centering=0) # 0= No centering; 1= centering by Individuals; 2= centering by Variables;3= centering by Time # The best model was 4,4,3 # 2) Imputing the best fitted model data FullarrayGEMARCH136<-Imputemethod(X=arrayDBXMarch136,fac=c(4, 4, 3), conver = 1e-07, max.iter = 1000) summary(FullarrayGEMARCH136) dim(FullarrayGEMARCH136) # 3) NPLSDA (For convenience it must be done in an HPC) NPLSDAFullarrayGEMARCH136<-NPLSDAmod(XN=FullarrayGEMARCH136, YN=outcomedummyarray136, outcome.Y=NULL, factors=3, centering=0) # 4) Plotting ploteoNPLSDAFullarrayGEMARCH136<- plotNPLSDAmod (X=NPLSDAFullarrayGEMARCH136, PCs = c(1, 2), labels = NULL, main = substitute(X), cutoff = 20, factors=2, penalty=1) ########################################################### # Repeat the same procedure for processed data # The process allow us to select Vitamin C, Vitamin D and Alpha-tocopherol as variables for the final model
/VIP_NPLSDADietBiom.R
no_license
ConesaLab/TEDDY_Multi-Omics_Integration
R
false
false
6,508
r
########################################################### ############## VIP_NPLSDADietBiom.R ############### ########################################################### # Author: Leandro Balzano-Nogueira # Genetics Institute, University of Florida (Gainesville) # This script is to create the Dietary Biomarkers dataset to calculate the NPLSDA and VIP selection ########################################################### homedir<- "/home/leobalzano/Dropbox (UFL)/TEDDY/Paper1/NatureCommFormat/NCommV1/NCommV1ToShare/SupplementaryData/DietaryBiomarkers/" # Home directory where all your results are going to be contained setwd(homedir) getwd() ########################################################### # Functions: "/home/leobalzano/Dropbox (UFL)/TEDDY/Paper1/NatureCommFormat/NCommV1/NCommV1ToShare/ScriptsForNComm/Tools/" # Location of TEDDYtools source ("/home/leobalzano/Dropbox (UFL)/TEDDY/Paper1/NatureCommFormat/NCommV1/NCommV1ToShare/ScriptsForNComm/Tools/TEDDYtools2.R") # These are the functions to reformat the data source ("/home/leobalzano/Dropbox (UFL)/TEDDY/Paper1/NatureCommFormat/NCommV1/NCommV1ToShare/ScriptsForNComm/Tools/NPLSDAfunctionsApr11.R") # These are the functions created to perform the NPLSDA ########################################################### # Data: # Dietary Biomarkers DBRaw<-read.csv ("/home/leobalzano/Dropbox (UFL)/TEDDY/Paper1/NatureCommFormat/NCommV1/NCommV1ToShare/SupplementaryData/DietaryBiomarkers/DBRaw.csv",header = FALSE) DBRaw[1:10,1:10] DBprocessed<-read.csv ("/home/leobalzano/Dropbox (UFL)/TEDDY/Paper1/NatureCommFormat/NCommV1/NCommV1ToShare/SupplementaryData/DietaryBiomarkers/DBprocessed.csv",header = FALSE) DBprocessed[1:10,1:10] # Response Variable CohortData<-read.csv ("/home/leobalzano/Dropbox (UFL)/TEDDY/Paper1/NatureCommFormat/NCommV1/NCommV1ToShare/SupplementaryData/CohortData.csv",header = TRUE) CohortData[1:10,] # List of Cases with at least 3 out of 5 time points with data patients3tps<-data.frame(V1=CohortData$Individual.Id[CohortData$Model.or.Validation=="Model"]) patients3tps ########################################################### # Libraries: library("gdata") require("VennDiagram") ########################################################### # Gene Expression: DietBiomRaw<-t(DBRaw) colnames(DietBiomRaw)<- DietBiomRaw[1,]; DietBiomRaw<-DietBiomRaw[-1,] DietBiomRaw <- transform(DietBiomRaw, Individual.Id =as.numeric(as.character(Individual.Id)), Age.in.Months = as.numeric(as.character(Age.in.Months)), Time.to.IA = as.numeric(as.character(Time.to.IA))) DietBiomRaw[1:10,1:10] # Subsetting the data to the 136 pairs with values reported in 3 0ut 0f 5 time points DietBiomRaw136<- DietBiomRaw[is.element(DietBiomRaw$Individual.Id, patients3tps[,1]),] dim(DietBiomRaw136) length(unique(DietBiomRaw136$Individual.Id)) ########################################################### # GE data in 3D structure dim(DietBiomRaw136) # 276 x 46 colnames(DietBiomRaw136[1:23]) DBX<-DietBiomRaw136[,c(1,4:46)] # Just ID and the variables DBX12<-DBX[DietBiomRaw136$Time.to.IA == "-12",]; dim(DBX12);dim(DBX) DBX9<-DBX[DietBiomRaw136$Time.to.IA == "-9",];dim(DBX9) DBX6<-DBX[DietBiomRaw136$Time.to.IA == "-6",];dim(DBX6) DBX3<-DBX[DietBiomRaw136$Time.to.IA == "-3",];dim(DBX3) DBX0<-DBX[DietBiomRaw136$Time.to.IA == "0",];dim(DBX0) ########################################################### # Merging with all cases patients3tps2<-patients3tps colnames(patients3tps2)<- "Individual.Id" DBX12total<-merge(patients3tps2,DBX12, by="Individual.Id", all.x = T);dim(DBX12total);dim(patients3tps2);dim(DBX12) DBX12total[1:5,1:5] rownames(DBX12total)<- DBX12total[,1] DBX12total<- as.matrix(DBX12total[,c(-1)]) dim(DBX12total) DBX9total<-merge(patients3tps2,DBX9, by="Individual.Id", all.x = T);dim(DBX9total);dim(patients3tps2);dim(DBX9) DBX9total[1:5,1:5] rownames(DBX9total)<- DBX9total[,1] DBX9total<- as.matrix(DBX9total[,c(-1)]) dim(DBX9total) DBX6total<-merge(patients3tps2,DBX6, by="Individual.Id", all.x = T);dim(DBX6total);dim(patients3tps2);dim(DBX6total) DBX6total[1:5,1:5] rownames(DBX6total)<- DBX6total[,1] DBX6total<- as.matrix(DBX6total[,c(-1)]) dim(DBX6total) DBX3total<-merge(patients3tps2,DBX3, by="Individual.Id", all.x = T);dim(DBX3total);dim(patients3tps2);dim(DBX3) DBX3total[1:5,1:5] rownames(DBX3total)<- DBX3total[,1] DBX3total<- as.matrix(DBX3total[,c(-1)]) dim(DBX3total) DBX0total<-merge(patients3tps2,DBX0, by="Individual.Id", all.x = T);dim(DBX0total);dim(patients3tps2);dim(DBX0) DBX0total[1:5,1:5] rownames(DBX0total)<- DBX0total[,1] DBX0total<- as.matrix(DBX0total[,c(-1)]) dim(DBX0total) # Dimensions are 136*43*5 ########################################################### dim(DBX0total) arrayDBXMarch <- array(data = NA, dim = c(136,43,5),dimnames = list(NULL, NULL, c("-12","-9","-6", "-3", "0"))) arrayDBXMarch arrayDBXMarch[,,1] <- DBX12total arrayDBXMarch[,,2] <- DBX9total arrayDBXMarch[,,3] <- DBX6total arrayDBXMarch[,,4] <- DBX3total arrayDBXMarch[,,5] <- DBX0total rownames(arrayDBXMarch)<-rownames(DBX12total) colnames(arrayDBXMarch)<-colnames (DBX12total) arrayDBXMarch[1:50,1:5,1] arrayDBXMarch136<-arrayDBXMarch dim(arrayDBXMarch136) # 136 * 43 * 5 # This is how the data array for DB expression was created ########################################################### # Imputation (For convenience it must be done in an HPC) # 1) Determining the best fitted model modelDBXnuevo<-bestfittedmodel (X=arrayDBXMarch136,centering=0) # 0= No centering; 1= centering by Individuals; 2= centering by Variables;3= centering by Time # The best model was 4,4,3 # 2) Imputing the best fitted model data FullarrayGEMARCH136<-Imputemethod(X=arrayDBXMarch136,fac=c(4, 4, 3), conver = 1e-07, max.iter = 1000) summary(FullarrayGEMARCH136) dim(FullarrayGEMARCH136) # 3) NPLSDA (For convenience it must be done in an HPC) NPLSDAFullarrayGEMARCH136<-NPLSDAmod(XN=FullarrayGEMARCH136, YN=outcomedummyarray136, outcome.Y=NULL, factors=3, centering=0) # 4) Plotting ploteoNPLSDAFullarrayGEMARCH136<- plotNPLSDAmod (X=NPLSDAFullarrayGEMARCH136, PCs = c(1, 2), labels = NULL, main = substitute(X), cutoff = 20, factors=2, penalty=1) ########################################################### # Repeat the same procedure for processed data # The process allow us to select Vitamin C, Vitamin D and Alpha-tocopherol as variables for the final model
context("knob") test_that("default", { tagkb <- knobInput( inputId = "myKnob", label = "Display previous:", value = 50, min = -100, displayPrevious = TRUE, fgColor = "#428BCA", inputColor = "#428BCA" ) expect_is(tagkb, "shiny.tag") expect_length(htmltools::findDependencies(tagkb), 2) expect_identical(htmltools::findDependencies(tagkb)[[2]]$script, c("jquery.knob.min.js")) expect_true(htmltools::tagHasAttribute(tagkb$children[[3]], "id")) expect_identical(htmltools::tagGetAttribute(tagkb$children[[3]], "id"), "myKnob") }) test_that("updateKnobInput", { session <- as.environment(list( sendInputMessage = function(inputId, message) { session$lastInputMessage = list(id = inputId, message = message) }, sendCustomMessage = function(type, message) { session$lastCustomMessage <- list(type = type, message = message) }, sendInsertUI = function(selector, multiple, where, content) { session$lastInsertUI <- list(selector = selector, multiple = multiple, where = where, content = content) }, onFlushed = function(callback, once) { list(callback = callback, once = once) } )) updateKnobInput( session = session, inputId = "MY_ID", value = 10 ) msgki <- session$lastInputMessage expect_length(msgki, 2) expect_identical(msgki$id, "MY_ID") expect_equal(msgki$message$value, 10) })
/tests/testthat/test-knob.R
permissive
micahwilhelm/shinyWidgets
R
false
false
1,475
r
context("knob") test_that("default", { tagkb <- knobInput( inputId = "myKnob", label = "Display previous:", value = 50, min = -100, displayPrevious = TRUE, fgColor = "#428BCA", inputColor = "#428BCA" ) expect_is(tagkb, "shiny.tag") expect_length(htmltools::findDependencies(tagkb), 2) expect_identical(htmltools::findDependencies(tagkb)[[2]]$script, c("jquery.knob.min.js")) expect_true(htmltools::tagHasAttribute(tagkb$children[[3]], "id")) expect_identical(htmltools::tagGetAttribute(tagkb$children[[3]], "id"), "myKnob") }) test_that("updateKnobInput", { session <- as.environment(list( sendInputMessage = function(inputId, message) { session$lastInputMessage = list(id = inputId, message = message) }, sendCustomMessage = function(type, message) { session$lastCustomMessage <- list(type = type, message = message) }, sendInsertUI = function(selector, multiple, where, content) { session$lastInsertUI <- list(selector = selector, multiple = multiple, where = where, content = content) }, onFlushed = function(callback, once) { list(callback = callback, once = once) } )) updateKnobInput( session = session, inputId = "MY_ID", value = 10 ) msgki <- session$lastInputMessage expect_length(msgki, 2) expect_identical(msgki$id, "MY_ID") expect_equal(msgki$message$value, 10) })
#reading the data, assume that the data file is in the folder up from the code data <- read.table("../household_power_consumption.txt", sep=";") #taking out the feb data febdata <- data[data$V1== "1/2/2007" | data$V1 == "2/2/2007",] #naming the columns namesdata <- c("Date", "Time", "Global_active_power", "Global_reactive_power","Voltage", "Global_intensity","Sub_metering_1","Sub_metering_2","Sub_metering_3") names(febdata) <- namesdata #Converting the factor variables to numeric plotdata <- as.numeric( as.character(febdata$Global_active_power)) plotdate <- as.character(febdata$Date) #Actual plotting with(febdata, plot(plotdata,type ="l", ylab="Global Active Power (kilowatts)", xlab="",xaxt="n")) axis(1,at=c(1,1440,2880),c("Thu","Fri","Sat")) #Writing to the png file dev.copy(png,file="plot2.png",width=480,height=480) dev.off()
/plot2.R
no_license
vivekdarera/ExData_Plotting1
R
false
false
845
r
#reading the data, assume that the data file is in the folder up from the code data <- read.table("../household_power_consumption.txt", sep=";") #taking out the feb data febdata <- data[data$V1== "1/2/2007" | data$V1 == "2/2/2007",] #naming the columns namesdata <- c("Date", "Time", "Global_active_power", "Global_reactive_power","Voltage", "Global_intensity","Sub_metering_1","Sub_metering_2","Sub_metering_3") names(febdata) <- namesdata #Converting the factor variables to numeric plotdata <- as.numeric( as.character(febdata$Global_active_power)) plotdate <- as.character(febdata$Date) #Actual plotting with(febdata, plot(plotdata,type ="l", ylab="Global Active Power (kilowatts)", xlab="",xaxt="n")) axis(1,at=c(1,1440,2880),c("Thu","Fri","Sat")) #Writing to the png file dev.copy(png,file="plot2.png",width=480,height=480) dev.off()
## Matrix inversion is usually a costly computation and there may be some benefit ## to caching the inverse of a matrix rather than computing it repeatedly. This is ## implemented in this program. ## makeCacheMatrix function takes in a default matrix data structure and creates ## a special "matrix" object that can cache its inverse. ## cacheSolve function checks if the special matrix object already has its inverse ## cached. If it does, it returns the cached inverse. If not, it calculates the ## inverse, caches it and then returns it. ## makeCacheMatrix creates a special "matrix" object that can cache its inverse. ## It takes a default matrix data structure as its argument and creates a special ## "matrix" object. makeCacheMatrix <- function(x = matrix()) { #set inverse of matrix to null inv <-NULL #set new value of matrix and set inverse to null set<-function(y){ x<<-y inv <<-NULL } #return matrix value get <- function() x #set the new inverse setinverse <- function(new_inv) inv <<- new_inv #get the inverser of hte matrix getinverse <- function() inv #list of 4 functions set/get matrix value getmatrix/setmatrix list(set=set, get=get, setinverse =setinverse, getinverse=getinverse) } ## cacheSolve computes the inverse of the special "matrix" returned by ## makeCacheMatrix above. If the inverse has already been calculated ## (and the matrix has not changed), then the cacheSolve function would retrieve ## the inverse from the cache. cacheSolve <- function(x, ...) { ## Return a matrix that is the inverse of 'x' #get inverse inv <- x$getinverse() #if inverse is not null, return it if (!is.null(inv)){ message("getting cached data") return(inv) } #get matrix value matrix <- x$get() #compute inverse inv <- solve(matrix,...) #save inverse x$setinverse(inv) #return inverse inv }
/cachematrix.R
no_license
npatta01/ProgrammingAssignment2
R
false
false
1,917
r
## Matrix inversion is usually a costly computation and there may be some benefit ## to caching the inverse of a matrix rather than computing it repeatedly. This is ## implemented in this program. ## makeCacheMatrix function takes in a default matrix data structure and creates ## a special "matrix" object that can cache its inverse. ## cacheSolve function checks if the special matrix object already has its inverse ## cached. If it does, it returns the cached inverse. If not, it calculates the ## inverse, caches it and then returns it. ## makeCacheMatrix creates a special "matrix" object that can cache its inverse. ## It takes a default matrix data structure as its argument and creates a special ## "matrix" object. makeCacheMatrix <- function(x = matrix()) { #set inverse of matrix to null inv <-NULL #set new value of matrix and set inverse to null set<-function(y){ x<<-y inv <<-NULL } #return matrix value get <- function() x #set the new inverse setinverse <- function(new_inv) inv <<- new_inv #get the inverser of hte matrix getinverse <- function() inv #list of 4 functions set/get matrix value getmatrix/setmatrix list(set=set, get=get, setinverse =setinverse, getinverse=getinverse) } ## cacheSolve computes the inverse of the special "matrix" returned by ## makeCacheMatrix above. If the inverse has already been calculated ## (and the matrix has not changed), then the cacheSolve function would retrieve ## the inverse from the cache. cacheSolve <- function(x, ...) { ## Return a matrix that is the inverse of 'x' #get inverse inv <- x$getinverse() #if inverse is not null, return it if (!is.null(inv)){ message("getting cached data") return(inv) } #get matrix value matrix <- x$get() #compute inverse inv <- solve(matrix,...) #save inverse x$setinverse(inv) #return inverse inv }
## server.R library(shiny) shinyServer(function(input, output) { output$selectSeason <- renderUI({ selectInput('selectSeason', 'Season:', season.values(input$selectCountry), selected = input$selectSeason) }) output$selectTier <- renderUI({ if (length(tier.values(input$selectCountry, input$selectSeason))==0) return() radioButtons('selectTier', 'League:', inline = T, tier.values(input$selectCountry, input$selectSeason)) }) output$heatMap <- renderChart2({ create.heat.map(input$selectCountry, input$selectSeason, input$selectTier, input$selectDate) }) output$selectDate <- renderUI({ dateInput('selectDate',"Show me what the league looked like on:", format="M dd yyyy", min=date.values(input$selectCountry, input$selectSeason, input$selectTier)[[1]], max=date.values(input$selectCountry, input$selectSeason, input$selectTier)[[2]], value=date.values(input$selectCountry, input$selectSeason, input$selectTier)[[2]]) }) output$standingsTable <- renderDataTable( standings.table.data(input$selectCountry, input$selectSeason, input$selectTier, input$selectDate), options = list(lengthChange=F,autoWidth=F, columnDefs=list(list(width="48%",targets=0)) )) output$storyPlot <- renderChart2({ create.bump.chart(input$selectCountry, input$selectSeason, input$selectTier, input$selectDate) }) })
/server.R
permissive
maxmax65/footy-visualization
R
false
false
2,590
r
## server.R library(shiny) shinyServer(function(input, output) { output$selectSeason <- renderUI({ selectInput('selectSeason', 'Season:', season.values(input$selectCountry), selected = input$selectSeason) }) output$selectTier <- renderUI({ if (length(tier.values(input$selectCountry, input$selectSeason))==0) return() radioButtons('selectTier', 'League:', inline = T, tier.values(input$selectCountry, input$selectSeason)) }) output$heatMap <- renderChart2({ create.heat.map(input$selectCountry, input$selectSeason, input$selectTier, input$selectDate) }) output$selectDate <- renderUI({ dateInput('selectDate',"Show me what the league looked like on:", format="M dd yyyy", min=date.values(input$selectCountry, input$selectSeason, input$selectTier)[[1]], max=date.values(input$selectCountry, input$selectSeason, input$selectTier)[[2]], value=date.values(input$selectCountry, input$selectSeason, input$selectTier)[[2]]) }) output$standingsTable <- renderDataTable( standings.table.data(input$selectCountry, input$selectSeason, input$selectTier, input$selectDate), options = list(lengthChange=F,autoWidth=F, columnDefs=list(list(width="48%",targets=0)) )) output$storyPlot <- renderChart2({ create.bump.chart(input$selectCountry, input$selectSeason, input$selectTier, input$selectDate) }) })
setwd("/data/gaos2/tmp/mousehuman/FigureS6") rm(list=ls()) library(Seurat) load("../analysis/humanmouse.RData") human <- NormalizeData(human) mouse <- NormalizeData(mouse) # Gene selection for input to CCA human <- FindVariableGenes(human, do.plot = F) mouse <- FindVariableGenes(mouse, do.plot = F) g.1 <- head(rownames(human@hvg.info), 4000) g.2 <- head(rownames(mouse@hvg.info), 4000) genes.use <- unique(c(g.1, g.2)) human <- ScaleData(human, display.progress = F, genes.use = genes.use) mouse <- ScaleData(mouse, display.progress = F, genes.use = genes.use) mouseData<-as.data.frame(t(mouse@scale.data)) mouseData$celltype<-mouse@meta.data$celltype mouseClusterData<-aggregate(mouseData, list(mouseData$celltype),mean) rownames(mouseClusterData)<-paste("mouse", mouseClusterData$Group.1, sep="_") mouseClusterData<-mouseClusterData[, c(-1, -5832)] mouseClusterData<-mouseClusterData[rownames(mouseClusterData)!="mouse_NotypeDefination",] humanData<-as.data.frame(t(human@scale.data)) humanData$celltype<-human@meta.data$celltype humanClusterData<-aggregate(humanData, list(humanData$celltype),mean) rownames(humanClusterData)<-paste("human", humanClusterData$Group.1, sep="_") humanClusterData<-humanClusterData[, c(-1, -5832)] data.combined<-rbind(humanClusterData, mouseClusterData) dist<-1-cor(t(data.combined)) clusters <- hclust(as.dist(dist)) png("hcl_mouse_human_cluster.png", width=1000, height=1000, res=150) plot(clusters, main = "conserved clustering", xlab = "clusters") dev.off()
/jeerthiliza/mousehuman/FigureS6/analysisCorrelation.R
no_license
shouguog/hematopoiesis
R
false
false
1,504
r
setwd("/data/gaos2/tmp/mousehuman/FigureS6") rm(list=ls()) library(Seurat) load("../analysis/humanmouse.RData") human <- NormalizeData(human) mouse <- NormalizeData(mouse) # Gene selection for input to CCA human <- FindVariableGenes(human, do.plot = F) mouse <- FindVariableGenes(mouse, do.plot = F) g.1 <- head(rownames(human@hvg.info), 4000) g.2 <- head(rownames(mouse@hvg.info), 4000) genes.use <- unique(c(g.1, g.2)) human <- ScaleData(human, display.progress = F, genes.use = genes.use) mouse <- ScaleData(mouse, display.progress = F, genes.use = genes.use) mouseData<-as.data.frame(t(mouse@scale.data)) mouseData$celltype<-mouse@meta.data$celltype mouseClusterData<-aggregate(mouseData, list(mouseData$celltype),mean) rownames(mouseClusterData)<-paste("mouse", mouseClusterData$Group.1, sep="_") mouseClusterData<-mouseClusterData[, c(-1, -5832)] mouseClusterData<-mouseClusterData[rownames(mouseClusterData)!="mouse_NotypeDefination",] humanData<-as.data.frame(t(human@scale.data)) humanData$celltype<-human@meta.data$celltype humanClusterData<-aggregate(humanData, list(humanData$celltype),mean) rownames(humanClusterData)<-paste("human", humanClusterData$Group.1, sep="_") humanClusterData<-humanClusterData[, c(-1, -5832)] data.combined<-rbind(humanClusterData, mouseClusterData) dist<-1-cor(t(data.combined)) clusters <- hclust(as.dist(dist)) png("hcl_mouse_human_cluster.png", width=1000, height=1000, res=150) plot(clusters, main = "conserved clustering", xlab = "clusters") dev.off()
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/json.R \name{read_json_arrow} \alias{read_json_arrow} \title{Read a JSON file} \usage{ read_json_arrow( file, col_select = NULL, as_data_frame = TRUE, schema = NULL, ... ) } \arguments{ \item{file}{A character file name or URI, \code{raw} vector, an Arrow input stream, or a \code{FileSystem} with path (\code{SubTreeFileSystem}). If a file name, a memory-mapped Arrow \link{InputStream} will be opened and closed when finished; compression will be detected from the file extension and handled automatically. If an input stream is provided, it will be left open.} \item{col_select}{A character vector of column names to keep, as in the "select" argument to \code{data.table::fread()}, or a \link[tidyselect:eval_select]{tidy selection specification} of columns, as used in \code{dplyr::select()}.} \item{as_data_frame}{Should the function return a \code{data.frame} (default) or an Arrow \link{Table}?} \item{schema}{\link{Schema} that describes the table.} \item{...}{Additional options passed to \code{JsonTableReader$create()}} } \value{ A \code{data.frame}, or a Table if \code{as_data_frame = FALSE}. } \description{ Wrapper around \link{JsonTableReader} to read a newline-delimited JSON (ndjson) file into a data frame or Arrow Table. } \details{ If passed a path, will detect and handle compression from the file extension (e.g. \code{.json.gz}). If \code{schema} is not provided, Arrow data types are inferred from the data: \itemize{ \item JSON null values convert to the \code{\link[=null]{null()}} type, but can fall back to any other type. \item JSON booleans convert to \code{\link[=boolean]{boolean()}}. \item JSON numbers convert to \code{\link[=int64]{int64()}}, falling back to \code{\link[=float64]{float64()}} if a non-integer is encountered. \item JSON strings of the kind "YYYY-MM-DD" and "YYYY-MM-DD hh:mm:ss" convert to \code{\link[=timestamp]{timestamp(unit = "s")}}, falling back to \code{\link[=utf8]{utf8()}} if a conversion error occurs. \item JSON arrays convert to a \code{\link[=list_of]{list_of()}} type, and inference proceeds recursively on the JSON arrays' values. \item Nested JSON objects convert to a \code{\link[=struct]{struct()}} type, and inference proceeds recursively on the JSON objects' values. } When \code{as_data_frame = TRUE}, Arrow types are further converted to R types. } \examples{ \dontshow{if (arrow_with_json()) (if (getRversion() >= "3.4") withAutoprint else force)(\{ # examplesIf} tf <- tempfile() on.exit(unlink(tf)) writeLines(' { "hello": 3.5, "world": false, "yo": "thing" } { "hello": 3.25, "world": null } { "hello": 0.0, "world": true, "yo": null } ', tf, useBytes = TRUE) read_json_arrow(tf) \dontshow{\}) # examplesIf} }
/r/man/read_json_arrow.Rd
permissive
andygrove/arrow
R
false
true
2,798
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/json.R \name{read_json_arrow} \alias{read_json_arrow} \title{Read a JSON file} \usage{ read_json_arrow( file, col_select = NULL, as_data_frame = TRUE, schema = NULL, ... ) } \arguments{ \item{file}{A character file name or URI, \code{raw} vector, an Arrow input stream, or a \code{FileSystem} with path (\code{SubTreeFileSystem}). If a file name, a memory-mapped Arrow \link{InputStream} will be opened and closed when finished; compression will be detected from the file extension and handled automatically. If an input stream is provided, it will be left open.} \item{col_select}{A character vector of column names to keep, as in the "select" argument to \code{data.table::fread()}, or a \link[tidyselect:eval_select]{tidy selection specification} of columns, as used in \code{dplyr::select()}.} \item{as_data_frame}{Should the function return a \code{data.frame} (default) or an Arrow \link{Table}?} \item{schema}{\link{Schema} that describes the table.} \item{...}{Additional options passed to \code{JsonTableReader$create()}} } \value{ A \code{data.frame}, or a Table if \code{as_data_frame = FALSE}. } \description{ Wrapper around \link{JsonTableReader} to read a newline-delimited JSON (ndjson) file into a data frame or Arrow Table. } \details{ If passed a path, will detect and handle compression from the file extension (e.g. \code{.json.gz}). If \code{schema} is not provided, Arrow data types are inferred from the data: \itemize{ \item JSON null values convert to the \code{\link[=null]{null()}} type, but can fall back to any other type. \item JSON booleans convert to \code{\link[=boolean]{boolean()}}. \item JSON numbers convert to \code{\link[=int64]{int64()}}, falling back to \code{\link[=float64]{float64()}} if a non-integer is encountered. \item JSON strings of the kind "YYYY-MM-DD" and "YYYY-MM-DD hh:mm:ss" convert to \code{\link[=timestamp]{timestamp(unit = "s")}}, falling back to \code{\link[=utf8]{utf8()}} if a conversion error occurs. \item JSON arrays convert to a \code{\link[=list_of]{list_of()}} type, and inference proceeds recursively on the JSON arrays' values. \item Nested JSON objects convert to a \code{\link[=struct]{struct()}} type, and inference proceeds recursively on the JSON objects' values. } When \code{as_data_frame = TRUE}, Arrow types are further converted to R types. } \examples{ \dontshow{if (arrow_with_json()) (if (getRversion() >= "3.4") withAutoprint else force)(\{ # examplesIf} tf <- tempfile() on.exit(unlink(tf)) writeLines(' { "hello": 3.5, "world": false, "yo": "thing" } { "hello": 3.25, "world": null } { "hello": 0.0, "world": true, "yo": null } ', tf, useBytes = TRUE) read_json_arrow(tf) \dontshow{\}) # examplesIf} }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/de.R \name{de} \alias{de} \title{Finds all decendants of a particular node in a tree.} \usage{ de(x, tree) } \arguments{ \item{x}{node} \item{tree}{interaction tree} } \value{ Returns all the descendant nodes from the node `x` } \description{ This function identifies all nodes in a given tree structure which are descendants of the input node. } \examples{ set.seed(10) dat<- gdataM(1000,2,3,1) tre <- grow.ITR(dat, split.var = 1:4) de('01', tree = tre) # "011" "012" "0121" "0122" }
/man/de.Rd
no_license
menghaomiao/ITR.Forest
R
false
true
596
rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/de.R \name{de} \alias{de} \title{Finds all decendants of a particular node in a tree.} \usage{ de(x, tree) } \arguments{ \item{x}{node} \item{tree}{interaction tree} } \value{ Returns all the descendant nodes from the node `x` } \description{ This function identifies all nodes in a given tree structure which are descendants of the input node. } \examples{ set.seed(10) dat<- gdataM(1000,2,3,1) tre <- grow.ITR(dat, split.var = 1:4) de('01', tree = tre) # "011" "012" "0121" "0122" }
## PR 1271 detach("package:base") crashes R. tools::assertError(detach("package:base")) ## invalid 'lib.loc' stopifnot(length(installed.packages("mgcv")) == 0) ## gave a low-level error message ## package.skeleton() with metadata-only code ## work in current (= ./tests/ directory): tmp <- tempfile() writeLines(c('setClass("foo", contains="numeric")', 'setMethod("show", "foo",', ' function(object) cat("I am a \\"foo\\"\\n"))'), tmp) if(file.exists("myTst")) unlink("myTst", recursive=TRUE) package.skeleton("myTst", code_files = tmp)# with a file name warning file.copy(tmp, (tm2 <- paste(tmp,".R", sep=""))) unlink("myTst", recursive=TRUE) op <- options(warn=2) # *NO* "invalid file name" warning {failed in 2.7.[01]}: package.skeleton("myTst", code_files = tm2) options(op) ##_2_ only a class, no generics/methods: writeLines(c('setClass("DocLink",', 'representation(name="character",', ' desc="character"))'), tmp) if(file.exists("myTst2")) unlink("myTst2", recursive=TRUE) package.skeleton("myTst2", code_files = tmp) ##- end_2_ # failed in R 2.11.0 stopifnot(1 == grep("setClass", readLines(list.files("myTst/R", full.names=TRUE))), c("foo-class.Rd","show-methods.Rd") %in% list.files("myTst/man")) ## failed for several reasons in R < 2.7.0 ## ## Part 2: -- build, install, load and "inspect" the package: build.pkg <- function(dir) { stopifnot(dir.exists(dir)) patt <- paste(basename(dir), ".*tar\\.gz$", sep="_") unlink(dir('.', pattern = patt)) Rcmd <- paste(shQuote(file.path(R.home("bin"), "R")), "CMD") r <- tail(system(paste(Rcmd, "build --keep-empty-dirs", shQuote(dir)), intern = TRUE), 3) ## return name of tar file built structure(dir('.', pattern = patt), log3 = r) } build.pkg("myTst") ## clean up any previous attempt (which might have left a 00LOCK) unlink("myLib", recursive = TRUE) dir.create("myLib") install.packages("myTst", lib = "myLib", repos=NULL, type = "source") # with warnings print(installed.packages(lib.loc= "myLib", priority= "NA"))## (PR#13332) stopifnot(require("myTst",lib = "myLib")) sm <- findMethods(show, where= as.environment("package:myTst")) stopifnot(names(sm@names) == "foo") unlink("myTst_*") ## getPackageName() for "package:foo": require('methods') library(tools) oo <- options(warn=2) detach("package:tools", unload=TRUE) options(oo) ## gave warning (-> Error) about creating package name ## More building & installing packages ## NB: tests were added here for 2.11.0. ## NB^2: do not do this in the R sources (but in a build != src directory!) ## and this testdir is not installed. if(interactive() && Sys.getenv("USER") == "maechler") Sys.setenv(SRCDIR = normalizePath("~/R/D/r-devel/R/tests")) (pkgSrcPath <- file.path(Sys.getenv("SRCDIR"), "Pkgs"))# e.g., -> "../../R/tests/Pkgs" if(!file_test("-d", pkgSrcPath) && !interactive()) { unlink("myTst", recursive=TRUE) print(proc.time()) q("no") } ## else w/o clause: do.cleanup <- !nzchar(Sys.getenv("R_TESTS_NO_CLEAN")) has.symlink <- (.Platform$OS.type != "windows") ## Installing "on to" a package existing as symlink in the lib.loc ## -- used to fail with misleading error message (#PR 16725): if(has.symlink && dir.create("myLib_2") && file.rename("myLib/myTst", "myLib_2/myTst") && file.symlink("../myLib_2/myTst", "myLib/myTst")) install.packages("myTst", lib = "myLib", repos=NULL, type = "source") ## In R <= 3.3.2 gave error with *misleading* error message: ## ERROR: ‘myTst’ is not a legal package name ## file.copy(pkgSrcPath, tempdir(), recursive = TRUE) - not ok: replaces symlink by copy system(paste('cp -R', shQuote(pkgSrcPath), shQuote(tempdir()))) pkgPath <- file.path(tempdir(), "Pkgs") ## pkgB tests an empty R directory dir.create(file.path(pkgPath, "pkgB", "R"), recursive = TRUE, showWarnings = FALSE) p.lis <- if("Matrix" %in% row.names(installed.packages(.Library))) c("pkgA", "pkgB", "exNSS4") else "exNSS4" pkgApath <- file.path(pkgPath, "pkgA") if("pkgA" %in% p.lis && !dir.exists(d <- pkgApath)) { cat("symlink 'pkgA' does not exist as directory ",d,"; copying it\n", sep='') file.copy(file.path(pkgPath, "xDir", "pkg"), to = d, recursive=TRUE) ## if even the copy failed (NB: pkgB depends on pkgA) if(!dir.exists(d)) p.lis <- p.lis[!(p.lis %in% c("pkgA", "pkgB"))] } for(p. in p.lis) { cat("building package", p., "...\n") r <- build.pkg(file.path(pkgPath, p.)) cat("installing package", p., "using file", r, "...\n") ## we could install the tar file ... (see build.pkg()'s definition) install.packages(r, lib = "myLib", repos=NULL, type = "source") stopifnot(require(p.,lib = "myLib", character.only=TRUE)) detach(pos = match(p., sub("^package:","", search()))) } (res <- installed.packages(lib.loc = "myLib", priority = "NA")) stopifnot(identical(res[,"Package"], setNames(,sort(c(p.lis, "myTst")))), res[,"LibPath"] == "myLib") ### Specific Tests on our "special" packages: ------------------------------ ## These used to fail because of the sym.link in pkgA if("pkgA" %in% p.lis && dir.exists(pkgApath)) { cat("undoc(pkgA):\n"); print(uA <- tools::undoc(dir = pkgApath)) cat("codoc(pkgA):\n"); print(cA <- tools::codoc(dir = pkgApath)) stopifnot(identical(uA$`code objects`, c("nil", "search")), identical(uA$`data sets`, "nilData")) } ## - Check conflict message. ## - Find objects which are NULL via "::" -- not to be expected often ## we have one in our pkgA, but only if Matrix is present. if(dir.exists(file.path("myLib", "pkgA"))) { msgs <- capture.output(require(pkgA, lib="myLib"), type = "message") writeLines(msgs) stopifnot(length(msgs) > 2, length(grep("The following object is masked.*package:base", msgs)) > 0, length(grep("\\bsearch\\b", msgs)) > 0) data(package = "pkgA") # -> nilData stopifnot(is.null( pkgA:: nil), is.null( pkgA::: nil), is.null( pkgA:: nilData)) # <- ## R-devel (pre 3.2.0) wrongly errored for NULL lazy data ## ::: does not apply to data sets: tools::assertError(is.null(pkgA:::nilData)) } ## Check error from invalid logical field in DESCRIPTION: (okA <- dir.exists(pkgApath) && file.exists(DN <- file.path(pkgApath, "DESCRIPTION"))) if(okA) { Dlns <- readLines(DN); i <- grep("^LazyData:", Dlns) Dlns[i] <- paste0(Dlns[i], ",") ## adding a "," writeLines(Dlns, con = DN) if(interactive()) { ## FIXME! Why does this fail, e.g., when run via 'make' ? ## install.packages() should give "the correct" error but we cannot catch it ## One level lower is not much better, needing sink() as capture.output() fails ftf <- file(tf <- tempfile("inst_pkg"), open = "wt") sink(ftf); sink(ftf, type = "message")# "message" should be sufficient tools:::.install_packages(c("--clean", "--library=myLib", pkgApath)) ## ----------------- ---- sink(type="message"); sink()## ; close(ftf); rm(ftf)# end sink() writeLines(paste(" ", msgs <- readLines(tf))) print(err <- grep("^ERROR:", msgs, value=TRUE)) stopifnot(length(err) > 0, grepl("invalid .*LazyData .*DESCRIPTION", err)) } } ## tests here should *NOT* assume recommended packages, ## let alone where they are installed if(dir.exists(file.path("myLib", "exNSS4")) && dir.exists(file.path(.Library, "Matrix"))) { for(ns in c(rev(p.lis), "Matrix")) unloadNamespace(ns) ## Both exNSS4 and Matrix define "atomicVector" *the same*, ## but 'exNSS4' has it extended - and hence *both* are registered in cache -> "conflicts" requireNamespace("exNSS4", lib= "myLib") ## Found in cache, since there is only one definition. ## Might confuse users. stopifnot(isVirtualClass(getClass("atomicVector"))) requireNamespace("Matrix", lib= .Library) ## Throws an error, because there is ambiguity in the cache, ## and the dynamic search will not find anything, since the packages ## are not attached. tools::assertCondition( acl <- getClass("atomicVector") ) ## Once Matrix is attached, we find a unique definition. library(Matrix) stopifnot(isVirtualClass(getClass("atomicVector"))) } ## clean up rmL <- c("myLib", if(has.symlink) "myLib_2", "myTst", file.path(pkgPath)) if(do.cleanup) { for(nm in rmL) unlink(nm, recursive = TRUE) } else { cat("Not cleaning, i.e., keeping ", paste(rmL, collapse=", "), "\n") } proc.time()
/source/macOS/R-Portable-Mac/tests/reg-packages.R
permissive
romanhaa/Cerebro
R
false
false
8,537
r
## PR 1271 detach("package:base") crashes R. tools::assertError(detach("package:base")) ## invalid 'lib.loc' stopifnot(length(installed.packages("mgcv")) == 0) ## gave a low-level error message ## package.skeleton() with metadata-only code ## work in current (= ./tests/ directory): tmp <- tempfile() writeLines(c('setClass("foo", contains="numeric")', 'setMethod("show", "foo",', ' function(object) cat("I am a \\"foo\\"\\n"))'), tmp) if(file.exists("myTst")) unlink("myTst", recursive=TRUE) package.skeleton("myTst", code_files = tmp)# with a file name warning file.copy(tmp, (tm2 <- paste(tmp,".R", sep=""))) unlink("myTst", recursive=TRUE) op <- options(warn=2) # *NO* "invalid file name" warning {failed in 2.7.[01]}: package.skeleton("myTst", code_files = tm2) options(op) ##_2_ only a class, no generics/methods: writeLines(c('setClass("DocLink",', 'representation(name="character",', ' desc="character"))'), tmp) if(file.exists("myTst2")) unlink("myTst2", recursive=TRUE) package.skeleton("myTst2", code_files = tmp) ##- end_2_ # failed in R 2.11.0 stopifnot(1 == grep("setClass", readLines(list.files("myTst/R", full.names=TRUE))), c("foo-class.Rd","show-methods.Rd") %in% list.files("myTst/man")) ## failed for several reasons in R < 2.7.0 ## ## Part 2: -- build, install, load and "inspect" the package: build.pkg <- function(dir) { stopifnot(dir.exists(dir)) patt <- paste(basename(dir), ".*tar\\.gz$", sep="_") unlink(dir('.', pattern = patt)) Rcmd <- paste(shQuote(file.path(R.home("bin"), "R")), "CMD") r <- tail(system(paste(Rcmd, "build --keep-empty-dirs", shQuote(dir)), intern = TRUE), 3) ## return name of tar file built structure(dir('.', pattern = patt), log3 = r) } build.pkg("myTst") ## clean up any previous attempt (which might have left a 00LOCK) unlink("myLib", recursive = TRUE) dir.create("myLib") install.packages("myTst", lib = "myLib", repos=NULL, type = "source") # with warnings print(installed.packages(lib.loc= "myLib", priority= "NA"))## (PR#13332) stopifnot(require("myTst",lib = "myLib")) sm <- findMethods(show, where= as.environment("package:myTst")) stopifnot(names(sm@names) == "foo") unlink("myTst_*") ## getPackageName() for "package:foo": require('methods') library(tools) oo <- options(warn=2) detach("package:tools", unload=TRUE) options(oo) ## gave warning (-> Error) about creating package name ## More building & installing packages ## NB: tests were added here for 2.11.0. ## NB^2: do not do this in the R sources (but in a build != src directory!) ## and this testdir is not installed. if(interactive() && Sys.getenv("USER") == "maechler") Sys.setenv(SRCDIR = normalizePath("~/R/D/r-devel/R/tests")) (pkgSrcPath <- file.path(Sys.getenv("SRCDIR"), "Pkgs"))# e.g., -> "../../R/tests/Pkgs" if(!file_test("-d", pkgSrcPath) && !interactive()) { unlink("myTst", recursive=TRUE) print(proc.time()) q("no") } ## else w/o clause: do.cleanup <- !nzchar(Sys.getenv("R_TESTS_NO_CLEAN")) has.symlink <- (.Platform$OS.type != "windows") ## Installing "on to" a package existing as symlink in the lib.loc ## -- used to fail with misleading error message (#PR 16725): if(has.symlink && dir.create("myLib_2") && file.rename("myLib/myTst", "myLib_2/myTst") && file.symlink("../myLib_2/myTst", "myLib/myTst")) install.packages("myTst", lib = "myLib", repos=NULL, type = "source") ## In R <= 3.3.2 gave error with *misleading* error message: ## ERROR: ‘myTst’ is not a legal package name ## file.copy(pkgSrcPath, tempdir(), recursive = TRUE) - not ok: replaces symlink by copy system(paste('cp -R', shQuote(pkgSrcPath), shQuote(tempdir()))) pkgPath <- file.path(tempdir(), "Pkgs") ## pkgB tests an empty R directory dir.create(file.path(pkgPath, "pkgB", "R"), recursive = TRUE, showWarnings = FALSE) p.lis <- if("Matrix" %in% row.names(installed.packages(.Library))) c("pkgA", "pkgB", "exNSS4") else "exNSS4" pkgApath <- file.path(pkgPath, "pkgA") if("pkgA" %in% p.lis && !dir.exists(d <- pkgApath)) { cat("symlink 'pkgA' does not exist as directory ",d,"; copying it\n", sep='') file.copy(file.path(pkgPath, "xDir", "pkg"), to = d, recursive=TRUE) ## if even the copy failed (NB: pkgB depends on pkgA) if(!dir.exists(d)) p.lis <- p.lis[!(p.lis %in% c("pkgA", "pkgB"))] } for(p. in p.lis) { cat("building package", p., "...\n") r <- build.pkg(file.path(pkgPath, p.)) cat("installing package", p., "using file", r, "...\n") ## we could install the tar file ... (see build.pkg()'s definition) install.packages(r, lib = "myLib", repos=NULL, type = "source") stopifnot(require(p.,lib = "myLib", character.only=TRUE)) detach(pos = match(p., sub("^package:","", search()))) } (res <- installed.packages(lib.loc = "myLib", priority = "NA")) stopifnot(identical(res[,"Package"], setNames(,sort(c(p.lis, "myTst")))), res[,"LibPath"] == "myLib") ### Specific Tests on our "special" packages: ------------------------------ ## These used to fail because of the sym.link in pkgA if("pkgA" %in% p.lis && dir.exists(pkgApath)) { cat("undoc(pkgA):\n"); print(uA <- tools::undoc(dir = pkgApath)) cat("codoc(pkgA):\n"); print(cA <- tools::codoc(dir = pkgApath)) stopifnot(identical(uA$`code objects`, c("nil", "search")), identical(uA$`data sets`, "nilData")) } ## - Check conflict message. ## - Find objects which are NULL via "::" -- not to be expected often ## we have one in our pkgA, but only if Matrix is present. if(dir.exists(file.path("myLib", "pkgA"))) { msgs <- capture.output(require(pkgA, lib="myLib"), type = "message") writeLines(msgs) stopifnot(length(msgs) > 2, length(grep("The following object is masked.*package:base", msgs)) > 0, length(grep("\\bsearch\\b", msgs)) > 0) data(package = "pkgA") # -> nilData stopifnot(is.null( pkgA:: nil), is.null( pkgA::: nil), is.null( pkgA:: nilData)) # <- ## R-devel (pre 3.2.0) wrongly errored for NULL lazy data ## ::: does not apply to data sets: tools::assertError(is.null(pkgA:::nilData)) } ## Check error from invalid logical field in DESCRIPTION: (okA <- dir.exists(pkgApath) && file.exists(DN <- file.path(pkgApath, "DESCRIPTION"))) if(okA) { Dlns <- readLines(DN); i <- grep("^LazyData:", Dlns) Dlns[i] <- paste0(Dlns[i], ",") ## adding a "," writeLines(Dlns, con = DN) if(interactive()) { ## FIXME! Why does this fail, e.g., when run via 'make' ? ## install.packages() should give "the correct" error but we cannot catch it ## One level lower is not much better, needing sink() as capture.output() fails ftf <- file(tf <- tempfile("inst_pkg"), open = "wt") sink(ftf); sink(ftf, type = "message")# "message" should be sufficient tools:::.install_packages(c("--clean", "--library=myLib", pkgApath)) ## ----------------- ---- sink(type="message"); sink()## ; close(ftf); rm(ftf)# end sink() writeLines(paste(" ", msgs <- readLines(tf))) print(err <- grep("^ERROR:", msgs, value=TRUE)) stopifnot(length(err) > 0, grepl("invalid .*LazyData .*DESCRIPTION", err)) } } ## tests here should *NOT* assume recommended packages, ## let alone where they are installed if(dir.exists(file.path("myLib", "exNSS4")) && dir.exists(file.path(.Library, "Matrix"))) { for(ns in c(rev(p.lis), "Matrix")) unloadNamespace(ns) ## Both exNSS4 and Matrix define "atomicVector" *the same*, ## but 'exNSS4' has it extended - and hence *both* are registered in cache -> "conflicts" requireNamespace("exNSS4", lib= "myLib") ## Found in cache, since there is only one definition. ## Might confuse users. stopifnot(isVirtualClass(getClass("atomicVector"))) requireNamespace("Matrix", lib= .Library) ## Throws an error, because there is ambiguity in the cache, ## and the dynamic search will not find anything, since the packages ## are not attached. tools::assertCondition( acl <- getClass("atomicVector") ) ## Once Matrix is attached, we find a unique definition. library(Matrix) stopifnot(isVirtualClass(getClass("atomicVector"))) } ## clean up rmL <- c("myLib", if(has.symlink) "myLib_2", "myTst", file.path(pkgPath)) if(do.cleanup) { for(nm in rmL) unlink(nm, recursive = TRUE) } else { cat("Not cleaning, i.e., keeping ", paste(rmL, collapse=", "), "\n") } proc.time()
/man/MVA.corplot.Rd
no_license
SeptiawanAjiP/RVAideMemoire
R
false
false
10,948
rd
testlist <- list(m = NULL, repetitions = 0L, in_m = structure(c(2.31584307392677e+77, 9.53818252170339e+295, 1.22809678694366e+146, 4.12396251261199e-221, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(10L, 3L))) result <- do.call(CNull:::communities_individual_based_sampling_alpha,testlist) str(result)
/CNull/inst/testfiles/communities_individual_based_sampling_alpha/AFL_communities_individual_based_sampling_alpha/communities_individual_based_sampling_alpha_valgrind_files/1615778939-test.R
no_license
akhikolla/updatedatatype-list2
R
false
false
348
r
testlist <- list(m = NULL, repetitions = 0L, in_m = structure(c(2.31584307392677e+77, 9.53818252170339e+295, 1.22809678694366e+146, 4.12396251261199e-221, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(10L, 3L))) result <- do.call(CNull:::communities_individual_based_sampling_alpha,testlist) str(result)
library( magrittr ) ## reading raw tsv StandardCurrencyTypeList <- data.table::fread( file = "data-raw/StandardCurrencyTypeList.tsv", encoding = "UTF-8" ) ## trimming character columns lapply( X = names( x = StandardCurrencyTypeList ), FUN = function( col ) { if( is.character( StandardCurrencyTypeList[[ col ]] ) ) { data.table::set( x = StandardCurrencyTypeList, j = col, value = trimws( x = StandardCurrencyTypeList[[ col ]], which = "both" ) ) } } ) ## removing empty columns data.table::set( x = StandardCurrencyTypeList, j = vapply( X = StandardCurrencyTypeList, FUN = collapse::allNA, FUN.VALUE = TRUE ) %>% which() %>% names(), value = NULL ) ## save package data in the correct format usethis::use_data( StandardCurrencyTypeList, overwrite = TRUE )
/data-raw/StandardCurrencyTypeList.R
no_license
krose/entsoeapi
R
false
false
1,040
r
library( magrittr ) ## reading raw tsv StandardCurrencyTypeList <- data.table::fread( file = "data-raw/StandardCurrencyTypeList.tsv", encoding = "UTF-8" ) ## trimming character columns lapply( X = names( x = StandardCurrencyTypeList ), FUN = function( col ) { if( is.character( StandardCurrencyTypeList[[ col ]] ) ) { data.table::set( x = StandardCurrencyTypeList, j = col, value = trimws( x = StandardCurrencyTypeList[[ col ]], which = "both" ) ) } } ) ## removing empty columns data.table::set( x = StandardCurrencyTypeList, j = vapply( X = StandardCurrencyTypeList, FUN = collapse::allNA, FUN.VALUE = TRUE ) %>% which() %>% names(), value = NULL ) ## save package data in the correct format usethis::use_data( StandardCurrencyTypeList, overwrite = TRUE )
## Working Directory with the zip file. library(dplyr) unzip("exdata-data-household_power_consumption.zip", exdir =".", unzip = "internal", setTimes = TRUE) dir() bigfile <- read.table("household_power_consumption.txt", na.strings = c("?", ""), sep = ";", header = T) tblbigfile <- tbl_df(bigfile) str(tblbigfile) tblbigfile$Date <- as.Date(tblbigfile$Date, format = "%d/%m/%Y") tblbigfile$timetemp <- paste(tblbigfile$Date, tblbigfile$Time) tblbigfile$Time <- strptime(tblbigfile$timetemp, format = "%Y-%m-%d %H:%M:%S") str(bigfiledf) tblbigfile <- tblbigfile[ , 1:9] day1 <- as.Date("2007-02-01") day2 <- as.Date("2007-02-02") twodays <- tblbigfile[tblbigfile$Date >= day1 & tblbigfile$Date <= day2,] ##Construct the plot and save it to a PNG file with a width of 480 pixels and a height of 480 pixels. png(file="plot3.png",width=480,height=480, units = "px") plot(twodays$Time, twodays$Sub_metering_1, ylab = "Energy sub metering", type = "l") lines(twodays$Time,twodays$Sub_metering_2, type = "l", col = "red") lines(twodays$Time,twodays$Sub_metering_3, type = "l", col = "blue") legend("topright", c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lty = c(1,1,1), lwd = c(2.5, 2.5, 2.5), col= c("black", "red", "blue")) dev.off()
/plot3.R
no_license
Mahidur/ExData_Plotting1
R
false
false
1,415
r
## Working Directory with the zip file. library(dplyr) unzip("exdata-data-household_power_consumption.zip", exdir =".", unzip = "internal", setTimes = TRUE) dir() bigfile <- read.table("household_power_consumption.txt", na.strings = c("?", ""), sep = ";", header = T) tblbigfile <- tbl_df(bigfile) str(tblbigfile) tblbigfile$Date <- as.Date(tblbigfile$Date, format = "%d/%m/%Y") tblbigfile$timetemp <- paste(tblbigfile$Date, tblbigfile$Time) tblbigfile$Time <- strptime(tblbigfile$timetemp, format = "%Y-%m-%d %H:%M:%S") str(bigfiledf) tblbigfile <- tblbigfile[ , 1:9] day1 <- as.Date("2007-02-01") day2 <- as.Date("2007-02-02") twodays <- tblbigfile[tblbigfile$Date >= day1 & tblbigfile$Date <= day2,] ##Construct the plot and save it to a PNG file with a width of 480 pixels and a height of 480 pixels. png(file="plot3.png",width=480,height=480, units = "px") plot(twodays$Time, twodays$Sub_metering_1, ylab = "Energy sub metering", type = "l") lines(twodays$Time,twodays$Sub_metering_2, type = "l", col = "red") lines(twodays$Time,twodays$Sub_metering_3, type = "l", col = "blue") legend("topright", c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lty = c(1,1,1), lwd = c(2.5, 2.5, 2.5), col= c("black", "red", "blue")) dev.off()
#' Compute Spans for Ticks on Custom Axes #' #' @param breaks Vector of breaks for a ggplot object #' @param span Proportion of the range of the breaks the ticks should span in each direction #' #' @return Value representing the distance left or right the tick should span from the drawn axis #' #' #' size_ticks <- function(breaks, span = .0125){ (max(breaks, na.rm = TRUE)-min(breaks, na.rm = TRUE)) * span }
/R/size_ticks.R
permissive
ryan-heslin/matador
R
false
false
414
r
#' Compute Spans for Ticks on Custom Axes #' #' @param breaks Vector of breaks for a ggplot object #' @param span Proportion of the range of the breaks the ticks should span in each direction #' #' @return Value representing the distance left or right the tick should span from the drawn axis #' #' #' size_ticks <- function(breaks, span = .0125){ (max(breaks, na.rm = TRUE)-min(breaks, na.rm = TRUE)) * span }
#! /usr/bin/env R library(tidyverse) library(TCGAbiolinks) tcga_ids <- as_tibble(getGDCprojects()) %>% dplyr::select(id) %>% dplyr::filter(grepl("TCGA", id)) %>% dplyr::mutate(id = gsub("TCGA-", "", id)) %>% unlist() %>% unname() ##each time run, some disease types were missed, so rerun twice to get all 32 GDCquery_Maf(tcga_ids, pipelines="mutect2") GDCquery_Maf(tcga_ids, pipelines="mutect2") GDCquery_Maf(tcga_ids, pipelines="mutect2")
/bin/TCGAbiolinks.maf_dl.R
no_license
brucemoran/TCGA_MAF_GeneList-vs-Random
R
false
false
490
r
#! /usr/bin/env R library(tidyverse) library(TCGAbiolinks) tcga_ids <- as_tibble(getGDCprojects()) %>% dplyr::select(id) %>% dplyr::filter(grepl("TCGA", id)) %>% dplyr::mutate(id = gsub("TCGA-", "", id)) %>% unlist() %>% unname() ##each time run, some disease types were missed, so rerun twice to get all 32 GDCquery_Maf(tcga_ids, pipelines="mutect2") GDCquery_Maf(tcga_ids, pipelines="mutect2") GDCquery_Maf(tcga_ids, pipelines="mutect2")
closeAllConnections() rm(list = ls(all = TRUE)) library(stringr) source("C://users/John/Google Drive/R libraries/Libraries.R") source("C://users/John/Google Drive/Mind games/Investment strategies/functions3.R") #Run the analysis monte_carlo_investment()
/main.R
no_license
johnbrussell/investment_strategies
R
false
false
256
r
closeAllConnections() rm(list = ls(all = TRUE)) library(stringr) source("C://users/John/Google Drive/R libraries/Libraries.R") source("C://users/John/Google Drive/Mind games/Investment strategies/functions3.R") #Run the analysis monte_carlo_investment()
library(raster) # Package to handle raster-formatted spatial data library(rasterVis) # The rasterVis package complements the raster package, providing a set of methods for enhanced visualization and interaction # Defines visualisation methods with 'levelplot' library(dismo) # Dismo has the SDM analyses for maxent and support vector machines used by R library(rgeos) # To define circles with a radius around the subsampled points # geos is a geometry engine, need to install package to access these capabilities (such as defining circumfrances) library(rJava) library(rgdal) # Provides access to projection/transformation operations from a different library # Coordinate referancing system** library(sp) # Coordinate referancing system library(ncdf4) # Opens access to read and write on netCDF files library(kernlab) # Required for support vector machines # installed and running BUT UNSURE of function library(grDevices) # For colouring maps library(colorRamps) #Allows easy construction of color palettes #Loading data for project now #Ensure WD is in correct place WILL BE IN NEW PLACE FOR EACH SPECIES setwd("~/Documents/UoY/Dissertation/Common Cockle") locs = read.csv("Cockle_Severn_UTM.csv", header=T, sep = ",") #loading severn files #ALL 9 TO START WITH + MASK2? tidal_range<-raster("Severn_unaltered Cockle/tidal_range_masked.tif") subtidal<-raster("Severn_unaltered Cockle/subtidal_masked.tif") min_elev<-raster("Severn_unaltered Cockle/min_elev_masked.tif") max_velocity<-raster("Severn_unaltered Cockle/max_vel_masked.tif") max_elev<-raster("Severn_unaltered Cockle/max_elev_masked.tif") mask_2<-raster("Severn_unaltered Cockle/mask2.tif") depth<-raster("Severn_unaltered Cockle/bathy_masked.tif") dry_always<-raster("Severn_unaltered Cockle/always_dry_masked.tif") intertidal<-raster("Severn_unaltered Cockle/intertidal_masked.tif") avg_velocity<-raster("Severn_unaltered Cockle/av_vel_masked.tif") mask<-depth # Extract depth values to table of species co-ordinates locs_ext=extract(depth, locs[,c("X","Y")]) #this has created a VALUE of depth for each single point as dictated by x&y coordinates from species data #now each species seen has a depth based on its coordinates in the depth raster file we are given!! # Build a data frame of species occurrence data and depth data locs = data.frame(locs, locs_ext) # added locs_ext to the final column in locs file so now coordinates for species can be coupled with their depth in teh same file # Remove points with NA values for depth, i.e. on land locs = subset(locs, !is.na(locs_ext)) e = extent(depth) #subset extracted all values and rows with 'na' from the locs_ext column # WHAT DOES EXTENT DO?! # without using the 'mask' technique above will this still remove all 'land' data above? #what is "e"?? - is it simply giving the 'extent' of the data set in a min and max of x and y? # Create sequences of X and Y values to define a grid # this a 1x1 km grid xgrid = seq(e@xmin,e@xmax,1000) ygrid = seq(e@ymin,e@ymax,1000) #"seq()" works by 'from', 'to', 'by incremental step' #generated a sequence from xmin value to xmax value in "e" that increase by 1000 # Identify occurrence points within each grid cell, then draw one at random subs = c() for(i in 1:(length(xgrid)-1)) { for(j in 1:(length(ygrid)-1)) { gridsq = subset(locs, Y > ygrid[j] & Y < ygrid[j+1] & X > xgrid[i] & X < xgrid[i+1]) if(dim(gridsq)[1]>0) { subs = rbind(subs, gridsq[sample(1:dim(gridsq)[1],1 ), ]) } } } dim(locs);dim(subs) # Confirm that you have a smaller dataset than you started with (1st number) #for is an argument that will loop a desired action on a given value in a vector #length will get value the legth of vectors and factors in a defined object ##this a loop going through x values (every 1000m) and at each new x square, looping through all the y's related to that x (and so on for all the x values) #gridsq is a complex way of saying the square is greater than the start of one x/y value and less than the next one after it #rbind & cbind combine/create a matrix by rows (rbind) or columns (cbind) of the two seperate vector sets # Assign correct co-ordinate reference system to subset coordinates <- cbind(subs$X, subs$Y) subs_df <- SpatialPointsDataFrame(coordinates, subs, proj4string=CRS("+proj=utm +zone=30 ellps=WGS84")) #cbind of subs$X and subs$Y created a new data set/matrix called coordinates that only has coordinate data in it! # we create 20,000 random "background points". There are other ways to do this, but start with this. #NOTE psa <- randomPoints(mask, 20000, ext=e) # Stack raster layers into one variable #NOTE (make it match your environmental variables from above) env_uk<-stack(depth,subtidal,min_elev) #NEED TO CHECK THAT IS ALL OF THEM #HAVE REMOVED ALWAYS DRY # Pull environmental data for the sumbsampled-presence points from the raster stack presence_uk= extract(env_uk, subs_df[,c("X","Y")]) #Warning messages: transforming SpatialPoints to the CRS of the Raster? # Pull environmental data for the pseudo-absence points from the raster stack pseudo_uk = extract(env_uk, psa) # Build some useful dataframes, with two columns of coordinates followed by the environmental variables. For the presence points: presence_uk = data.frame(X=subs_df$X, Y=subs_df$Y, presence_uk) #HOW IS THIS DIFFERENT TO ABOVE FUCNTION WITH "EXTRACT"? # Convert psa from atomic vector matrix to data.frame psapoints=data.frame(psa) # Bind co-ordinates coordinates <- cbind(psapoints$x, psapoints$y) # Create spatial data frame of pseudo absences psadf <- SpatialPointsDataFrame(coordinates, psapoints, proj4string=CRS("+proj=utm +zone=30 ellps=WGS84")) # Build dataframe, with two columns of coordinates followed by the 5 environmental variables. For the pseudo-absences: psadfx = psadf@coords colnames(psadfx) = c("X","Y") pseudo_uk = data.frame(cbind(psadfx,pseudo_uk)) # Vector of group assignments splitting the subsampled presence points data fram with environmental data into 5 groups group_p = kfold(presence_uk, 5) #kfold partitions a data set k times (in this case 5 times) for model testing purposes # Repeat above step for pseudo-absence points group_a = kfold(pseudo_uk, 5) # create output required for the loop evaluations = list(5) models = list(5) # where it says maxent - you may need to swap this for other functions if you're exploring different models # Note that some model may need different inputs etc. Read the docs to figure this out. # This is our k-fold test. You will want to spend a bit of time making predictions on each of the 5 sub-models # created here to check you can make decent predictions even with missing data for (test in 1:5) { # Then we use test and the kfold groupings to divide the presence and absence points: train_p = presence_uk[group_p!=test, c("X","Y")] train_a = pseudo_uk[group_a!=test, c("X","Y")] test_p = presence_uk[group_p==test, c("X","Y")] test_a = pseudo_uk[group_a==test, c("X","Y")] # Now, estimate a maxent model using the "training" points and the environmental data. This may take a few moments to run: models[test] = maxent(env_uk, p=train_p, a=train_a) # To validate the model, we use the appropriately named function. # Produces warning message about implicit list embedding being depreciated. May fail in future versions of R evaluations[test] = evaluate(test_p, test_a, models[[test]], env_uk) } # print out the AUC for the k-fold tests # ideally should be > 0.75 for all cat("K-FOLD AUC: ") for (test in 1:5) { cat(paste0(evaluations[[test]]@auc,",")) } cat("\n") # Assess Spatial Sorting Bias (SSB) pres_train_me <- train_p pres_test_me <- test_p back_train_me <- train_a back_test_me <- test_a sb <- ssb(pres_test_me, back_test_me, pres_train_me) sb[,1] / sb[,2] #creates a model of spacial biasing to compare to given preditions # Adjust for SSB if present via distance based point-wise sampling i <- pwdSample(pres_test_me, back_test_me, pres_train_me, n=1, tr=0.1) pres_test_pwd_me <- pres_test_me[!is.na(i[,1]), ] back_test_pwd_me <- back_test_me[na.omit(as.vector(i)), ] sb2 <- ssb(pres_test_pwd_me, back_test_pwd_me, pres_train_me) sb2[1]/ sb2[2] #creates full model without any K fold statistics etc pres_points = presence_uk[c("X","Y")] abs_points = pseudo_uk[c("X","Y")] # create full maxent with all points model <- maxent(env_uk, p=pres_points, a=abs_points) #turn model into prediction that can be plotted into a raster pred_PredFull <- predict(model, env_uk) #to see model and obtain jpeg plot(pred_PredFull) #Gives AUC for full model (pred_PredFull2) evaluate_full <- evaluate(presence_uk[c("X","Y")], pseudo_uk[c("X","Y")], model, env_uk) #see what AUC is by typing it in evaluate_full #see what the specific sensitivity values is of species #use value givenas a base level or higher that one would expect to see species (compare to evaluate_full) message(threshold(evaluate_full)$spec_sens) #check response curves to see if they change the FULL MODEL response(model) #creates a raster file in the WD #will be useful when putting a file into qgis! writeRaster(pred_PredFull, filename="pred7_me Cockle.tif", options="INTERLEAVE=BAND", overwrite=TRUE) ########################################################### ########################################################### ########################################################### #Load env Data for Barrage Scenario depth_Barrage<-raster("Future Data Cockle/Barrage/bathy_masked.tif") min_fs_Barrage<-raster("Future Data Cockle/Barrage/min_elev_masked.tif") subtidal_Barrage<-raster("Future Data Cockle/Barrage/subtidal_masked.tif") #Stack raster layers into one file env_Barrage<-stack(subtidal_Barrage,depth_Barrage,min_fs_Barrage) # Now, predict a maxent model using the "trained model" and the environmental data for the Barrage Scenario. This may take a few moments to run: pred_barrage = predict(model, env_Barrage,progress='text') plot(pred_barrage) writeRaster(pred_barrage, filename="pred_barrage Cockle.tif", options="INTERLEAVE=BAND", overwrite=TRUE) #CANT SEE RESPONSE CURVES FOR THIS MODEL? -maybe dont need to either? #AUC and sensitivity values for barrage model evaluate_barrage<-evaluate(presence_uk[c("X","Y")], pseudo_uk[c("X","Y")], model, env_Barrage) evaluate_barrage message(threshold(evaluate_barrage)$spec_sens) #Load env Data for Cardiff Lagoon Scenario depth_Lagoon<-raster("Future Data Cockle/Cardiff_Lagoon/bathy_masked.tif") min_fs_Lagoon<-raster("Future Data Cockle/Cardiff_Lagoon/min_elev_masked.tif") subtidal_Lagoon<-raster("Future Data Cockle/Cardiff_Lagoon/subtidal_masked.tif") #Raster stack commence for lagoon scenariooo env_Lagoon<-stack(depth_Lagoon,min_fs_Lagoon,subtidal_Lagoon) # Now, predict a maxent model using the "trained model" and the environmental data for the Barrage Scenario. This may take a few moments to run: pred_lagoon = predict(model, env_Lagoon,progress='text') plot(pred_lagoon) writeRaster(pred_lagoon, filename="pred_lagoon Cockle.tif", options="INTERLEAVE=BAND", overwrite=TRUE) #CANT SEE RESPONSE CURVES FOR THIS MODEL? -maybe dont need to either? #AUC and sensitivity values for barrage model evaluate_lagoon<-evaluate(presence_uk[c("X","Y")], pseudo_uk[c("X","Y")], model, env_Lagoon) evaluate_lagoon message(threshold(evaluate_lagoon)$spec_sens)
/Future Code Iteration Cockle.R
no_license
laxmack21/Severn-Estuary-SDMS
R
false
false
11,321
r
library(raster) # Package to handle raster-formatted spatial data library(rasterVis) # The rasterVis package complements the raster package, providing a set of methods for enhanced visualization and interaction # Defines visualisation methods with 'levelplot' library(dismo) # Dismo has the SDM analyses for maxent and support vector machines used by R library(rgeos) # To define circles with a radius around the subsampled points # geos is a geometry engine, need to install package to access these capabilities (such as defining circumfrances) library(rJava) library(rgdal) # Provides access to projection/transformation operations from a different library # Coordinate referancing system** library(sp) # Coordinate referancing system library(ncdf4) # Opens access to read and write on netCDF files library(kernlab) # Required for support vector machines # installed and running BUT UNSURE of function library(grDevices) # For colouring maps library(colorRamps) #Allows easy construction of color palettes #Loading data for project now #Ensure WD is in correct place WILL BE IN NEW PLACE FOR EACH SPECIES setwd("~/Documents/UoY/Dissertation/Common Cockle") locs = read.csv("Cockle_Severn_UTM.csv", header=T, sep = ",") #loading severn files #ALL 9 TO START WITH + MASK2? tidal_range<-raster("Severn_unaltered Cockle/tidal_range_masked.tif") subtidal<-raster("Severn_unaltered Cockle/subtidal_masked.tif") min_elev<-raster("Severn_unaltered Cockle/min_elev_masked.tif") max_velocity<-raster("Severn_unaltered Cockle/max_vel_masked.tif") max_elev<-raster("Severn_unaltered Cockle/max_elev_masked.tif") mask_2<-raster("Severn_unaltered Cockle/mask2.tif") depth<-raster("Severn_unaltered Cockle/bathy_masked.tif") dry_always<-raster("Severn_unaltered Cockle/always_dry_masked.tif") intertidal<-raster("Severn_unaltered Cockle/intertidal_masked.tif") avg_velocity<-raster("Severn_unaltered Cockle/av_vel_masked.tif") mask<-depth # Extract depth values to table of species co-ordinates locs_ext=extract(depth, locs[,c("X","Y")]) #this has created a VALUE of depth for each single point as dictated by x&y coordinates from species data #now each species seen has a depth based on its coordinates in the depth raster file we are given!! # Build a data frame of species occurrence data and depth data locs = data.frame(locs, locs_ext) # added locs_ext to the final column in locs file so now coordinates for species can be coupled with their depth in teh same file # Remove points with NA values for depth, i.e. on land locs = subset(locs, !is.na(locs_ext)) e = extent(depth) #subset extracted all values and rows with 'na' from the locs_ext column # WHAT DOES EXTENT DO?! # without using the 'mask' technique above will this still remove all 'land' data above? #what is "e"?? - is it simply giving the 'extent' of the data set in a min and max of x and y? # Create sequences of X and Y values to define a grid # this a 1x1 km grid xgrid = seq(e@xmin,e@xmax,1000) ygrid = seq(e@ymin,e@ymax,1000) #"seq()" works by 'from', 'to', 'by incremental step' #generated a sequence from xmin value to xmax value in "e" that increase by 1000 # Identify occurrence points within each grid cell, then draw one at random subs = c() for(i in 1:(length(xgrid)-1)) { for(j in 1:(length(ygrid)-1)) { gridsq = subset(locs, Y > ygrid[j] & Y < ygrid[j+1] & X > xgrid[i] & X < xgrid[i+1]) if(dim(gridsq)[1]>0) { subs = rbind(subs, gridsq[sample(1:dim(gridsq)[1],1 ), ]) } } } dim(locs);dim(subs) # Confirm that you have a smaller dataset than you started with (1st number) #for is an argument that will loop a desired action on a given value in a vector #length will get value the legth of vectors and factors in a defined object ##this a loop going through x values (every 1000m) and at each new x square, looping through all the y's related to that x (and so on for all the x values) #gridsq is a complex way of saying the square is greater than the start of one x/y value and less than the next one after it #rbind & cbind combine/create a matrix by rows (rbind) or columns (cbind) of the two seperate vector sets # Assign correct co-ordinate reference system to subset coordinates <- cbind(subs$X, subs$Y) subs_df <- SpatialPointsDataFrame(coordinates, subs, proj4string=CRS("+proj=utm +zone=30 ellps=WGS84")) #cbind of subs$X and subs$Y created a new data set/matrix called coordinates that only has coordinate data in it! # we create 20,000 random "background points". There are other ways to do this, but start with this. #NOTE psa <- randomPoints(mask, 20000, ext=e) # Stack raster layers into one variable #NOTE (make it match your environmental variables from above) env_uk<-stack(depth,subtidal,min_elev) #NEED TO CHECK THAT IS ALL OF THEM #HAVE REMOVED ALWAYS DRY # Pull environmental data for the sumbsampled-presence points from the raster stack presence_uk= extract(env_uk, subs_df[,c("X","Y")]) #Warning messages: transforming SpatialPoints to the CRS of the Raster? # Pull environmental data for the pseudo-absence points from the raster stack pseudo_uk = extract(env_uk, psa) # Build some useful dataframes, with two columns of coordinates followed by the environmental variables. For the presence points: presence_uk = data.frame(X=subs_df$X, Y=subs_df$Y, presence_uk) #HOW IS THIS DIFFERENT TO ABOVE FUCNTION WITH "EXTRACT"? # Convert psa from atomic vector matrix to data.frame psapoints=data.frame(psa) # Bind co-ordinates coordinates <- cbind(psapoints$x, psapoints$y) # Create spatial data frame of pseudo absences psadf <- SpatialPointsDataFrame(coordinates, psapoints, proj4string=CRS("+proj=utm +zone=30 ellps=WGS84")) # Build dataframe, with two columns of coordinates followed by the 5 environmental variables. For the pseudo-absences: psadfx = psadf@coords colnames(psadfx) = c("X","Y") pseudo_uk = data.frame(cbind(psadfx,pseudo_uk)) # Vector of group assignments splitting the subsampled presence points data fram with environmental data into 5 groups group_p = kfold(presence_uk, 5) #kfold partitions a data set k times (in this case 5 times) for model testing purposes # Repeat above step for pseudo-absence points group_a = kfold(pseudo_uk, 5) # create output required for the loop evaluations = list(5) models = list(5) # where it says maxent - you may need to swap this for other functions if you're exploring different models # Note that some model may need different inputs etc. Read the docs to figure this out. # This is our k-fold test. You will want to spend a bit of time making predictions on each of the 5 sub-models # created here to check you can make decent predictions even with missing data for (test in 1:5) { # Then we use test and the kfold groupings to divide the presence and absence points: train_p = presence_uk[group_p!=test, c("X","Y")] train_a = pseudo_uk[group_a!=test, c("X","Y")] test_p = presence_uk[group_p==test, c("X","Y")] test_a = pseudo_uk[group_a==test, c("X","Y")] # Now, estimate a maxent model using the "training" points and the environmental data. This may take a few moments to run: models[test] = maxent(env_uk, p=train_p, a=train_a) # To validate the model, we use the appropriately named function. # Produces warning message about implicit list embedding being depreciated. May fail in future versions of R evaluations[test] = evaluate(test_p, test_a, models[[test]], env_uk) } # print out the AUC for the k-fold tests # ideally should be > 0.75 for all cat("K-FOLD AUC: ") for (test in 1:5) { cat(paste0(evaluations[[test]]@auc,",")) } cat("\n") # Assess Spatial Sorting Bias (SSB) pres_train_me <- train_p pres_test_me <- test_p back_train_me <- train_a back_test_me <- test_a sb <- ssb(pres_test_me, back_test_me, pres_train_me) sb[,1] / sb[,2] #creates a model of spacial biasing to compare to given preditions # Adjust for SSB if present via distance based point-wise sampling i <- pwdSample(pres_test_me, back_test_me, pres_train_me, n=1, tr=0.1) pres_test_pwd_me <- pres_test_me[!is.na(i[,1]), ] back_test_pwd_me <- back_test_me[na.omit(as.vector(i)), ] sb2 <- ssb(pres_test_pwd_me, back_test_pwd_me, pres_train_me) sb2[1]/ sb2[2] #creates full model without any K fold statistics etc pres_points = presence_uk[c("X","Y")] abs_points = pseudo_uk[c("X","Y")] # create full maxent with all points model <- maxent(env_uk, p=pres_points, a=abs_points) #turn model into prediction that can be plotted into a raster pred_PredFull <- predict(model, env_uk) #to see model and obtain jpeg plot(pred_PredFull) #Gives AUC for full model (pred_PredFull2) evaluate_full <- evaluate(presence_uk[c("X","Y")], pseudo_uk[c("X","Y")], model, env_uk) #see what AUC is by typing it in evaluate_full #see what the specific sensitivity values is of species #use value givenas a base level or higher that one would expect to see species (compare to evaluate_full) message(threshold(evaluate_full)$spec_sens) #check response curves to see if they change the FULL MODEL response(model) #creates a raster file in the WD #will be useful when putting a file into qgis! writeRaster(pred_PredFull, filename="pred7_me Cockle.tif", options="INTERLEAVE=BAND", overwrite=TRUE) ########################################################### ########################################################### ########################################################### #Load env Data for Barrage Scenario depth_Barrage<-raster("Future Data Cockle/Barrage/bathy_masked.tif") min_fs_Barrage<-raster("Future Data Cockle/Barrage/min_elev_masked.tif") subtidal_Barrage<-raster("Future Data Cockle/Barrage/subtidal_masked.tif") #Stack raster layers into one file env_Barrage<-stack(subtidal_Barrage,depth_Barrage,min_fs_Barrage) # Now, predict a maxent model using the "trained model" and the environmental data for the Barrage Scenario. This may take a few moments to run: pred_barrage = predict(model, env_Barrage,progress='text') plot(pred_barrage) writeRaster(pred_barrage, filename="pred_barrage Cockle.tif", options="INTERLEAVE=BAND", overwrite=TRUE) #CANT SEE RESPONSE CURVES FOR THIS MODEL? -maybe dont need to either? #AUC and sensitivity values for barrage model evaluate_barrage<-evaluate(presence_uk[c("X","Y")], pseudo_uk[c("X","Y")], model, env_Barrage) evaluate_barrage message(threshold(evaluate_barrage)$spec_sens) #Load env Data for Cardiff Lagoon Scenario depth_Lagoon<-raster("Future Data Cockle/Cardiff_Lagoon/bathy_masked.tif") min_fs_Lagoon<-raster("Future Data Cockle/Cardiff_Lagoon/min_elev_masked.tif") subtidal_Lagoon<-raster("Future Data Cockle/Cardiff_Lagoon/subtidal_masked.tif") #Raster stack commence for lagoon scenariooo env_Lagoon<-stack(depth_Lagoon,min_fs_Lagoon,subtidal_Lagoon) # Now, predict a maxent model using the "trained model" and the environmental data for the Barrage Scenario. This may take a few moments to run: pred_lagoon = predict(model, env_Lagoon,progress='text') plot(pred_lagoon) writeRaster(pred_lagoon, filename="pred_lagoon Cockle.tif", options="INTERLEAVE=BAND", overwrite=TRUE) #CANT SEE RESPONSE CURVES FOR THIS MODEL? -maybe dont need to either? #AUC and sensitivity values for barrage model evaluate_lagoon<-evaluate(presence_uk[c("X","Y")], pseudo_uk[c("X","Y")], model, env_Lagoon) evaluate_lagoon message(threshold(evaluate_lagoon)$spec_sens)