blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 327 | content_id stringlengths 40 40 | detected_licenses listlengths 0 91 | license_type stringclasses 2 values | repo_name stringlengths 5 134 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 46 values | visit_date timestamp[us]date 2016-08-02 22:44:29 2023-09-06 08:39:28 | revision_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | committer_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | github_id int64 19.4k 671M ⌀ | star_events_count int64 0 40k | fork_events_count int64 0 32.4k | gha_license_id stringclasses 14 values | gha_event_created_at timestamp[us]date 2012-06-21 16:39:19 2023-09-14 21:52:42 ⌀ | gha_created_at timestamp[us]date 2008-05-25 01:21:32 2023-06-28 13:19:12 ⌀ | gha_language stringclasses 60 values | src_encoding stringclasses 24 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 7 9.18M | extension stringclasses 20 values | filename stringlengths 1 141 | content stringlengths 7 9.18M |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
9c3d3e431c59e73e86ca4fd5f8dd4d4f4b51f010 | 426771aafae3b1784b502b7ce868a83f58c2143a | /toh_cs250m.R | 9c1a19609b3d5a188cc65746d61e62194eec5fd7 | [] | no_license | cyborginhas/gis714final | 574b0d1bb57af9a2b17fa2c2017a0546b9e795eb | cdc3b84b1e921b1fdddff41771cde9d82d89247f | refs/heads/main | 2023-04-19T07:32:17.227502 | 2021-05-05T04:44:42 | 2021-05-05T04:44:42 | 364,436,189 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 12,908 | r | toh_cs250m.R | #Load packages ----
require(biomod2)
require(abind)
require(ade4)
require(caret)
require(checkmate)
require(dismo)
require(doParallel)
require(dplyr)
require(earth)
require(ecospat)
require(ENMeval)
require(foreach)
require(foreign)
require(gam)
require(gbm)
require(ggplot2)
require(Hmisc)
require(lattice)
require(MASS)
require(maxnet)
require(mda)
require(mgcv)
require(methods)
require(nnet)
require(parallel)
require(PresenceAbsence)
require(pROC)
require(purrr)
require(randomForest)
require(raster)
require(rasterVis)
require(reshape)
require(rlang)
require(rpart)
require(sp)
require(stats)
require(testthat)
require(tidyr)
require(utils)
require(rgdal)
setwd("~/Desktop/tohexports/")
#----
# 1. Setup study extent----
usa <- readOGR('~/Google Drive/Shared drives/Data/Vector/USA/us_lower_48_states.gpkg')
usa <- spTransform(usa, CRS('+proj=longlat +datum=WGS84 +no_defs +ellps=WGS84 +towgs84=0,0,0'))
borders <- usa[usa$STATE_NAME%in%c('District of Columbia', 'Delaware', 'New Jersey', 'Maryland',
'West Virginia', 'Ohio','Pennsylvania','Virginia', 'New York', 'Kentucky','Tennessee'),]
# 2. Load species data----
#FIA data----
#FIA pts plot
#tiff('fiapts.tiff', units="in", width=8, height=8, res=300, compression = 'lzw')
#plot(aa.fia.pts,pch=21, axes=TRUE,bg='blue', xlim=c(-90,-72))
#plot(borders, add=TRUE)
#dev.off()
#Citizen science data----
aa.cs.ib <- read.csv2('~/Google Drive/My Drive/PhD/pops/slf/toh_exports/cs_ib.csv')[, c(4,5)]
aa.cs.oob <- read.csv2('~/Google Drive/My Drive/PhD/pops/slf/toh_exports/cs_oob.csv')[, c(4,5)]
names(aa.cs.ib) <- c('Latitude', 'Longitude')
aa.cs.ib<-aa.cs.ib[,c(2,1)]
aa.cs.pts <- SpatialPoints(coords = aa.cs.ib)
crs(aa.cs.pts) <-crs(borders)
aa.cs.pts <- crop(aa.cs.pts, borders)
plot(aa.cs.pts)
#CS pts plot
#tiff('fiapts.tiff', units="in", width=8, height=8, res=300, compression = 'lzw')
#plot(aa.cs.pts,pch=21, axes=TRUE,bg='salmon', xlim=c(-90,-72))
#plot(borders, add=TRUE)
#dev.off()
#Examine distribution of species data points (FIA vs. CS)----
#Create hex grids of various sizes across study area, pull out areas where fia sampling effort same as cs
#cs_l<-seq(from = 0.2, to = 2, by=0.1)
#list_id<-seq(from=1, to= 19, by=1)
#samps<-as.data.frame(cbind(cs_l,list_id))
#my_list<-list()#list of sample point data derived from individual hex polygons (by res)
#my_list2<-list()#list of hex polygons associated individual hex polygons (by res)
#library(GISTools)
#borders<-gUnion(borders,borders)
#plot(borders)
#for(i in 1:length(samps$cs_l)){
#hex_points <- spsample(borders, type = "hexagonal", cellsize = samps[i,1])
#hex_grid <- HexPoints2SpatialPolygons(hex_points, dx = cs_l[i])
#fia_grid<-as.data.frame(poly.counts(aa.fia.pts, hex_grid))
#cs_grid<-as.data.frame(poly.counts(aa.cs.pts, hex_grid))
#res<-cs_l[i]
#listid<-samps[i,2]
#grid_pts<-cbind(fia_grid,cs_grid, rownames(fia_grid),res,listid)
#names(grid_pts)<-c("fia","cs","id","res","listid")
#grid_pts$diff<-round((grid_pts$fia/grid_pts$cs),2)
#test<-hex_grid[c(grid_pts$id)]
#my_list[[(length(my_list) + 1)]]<-as.data.frame(grid_pts)
#my_list2[[length(my_list2) + 1]]<-hex_grid
#}
#library(plyr)
#my_df<-(ldply(my_list)[,])
#my_df<-my_df[my_df$diff>=0.9 & my_df$diff<=1.1,]
#my_df$sefia<-my_df$fia/my_df$res
#my_df$secs<-my_df$cs/my_df$res
#my_df<-unique(my_df)
#hist(my_df$sefia)
#my_df$hexidn<-as.numeric(sub("ID","",my_df$id))
#my_list3<-list()#extract polygons where sampling effort is even (fia vs. cs) and high (points per area)
#for (i in 1:length(my_df$listid)){
#p<-my_list2[[my_df$listid[i]]][my_df$hexidn[i]]
#my_list3[[length(my_list3) + 1]]<-p
#}
#for (i in 1:length(my_list3)){
#my_list3[[i]]@polygons[[1]]@ID<-paste0(my_df[i,3],"_",my_df[i,2])
#}
#evenhex<-SpatialPolygons(lapply(my_list3, function(x){x@polygons[[1]]}))
#evenhex<-gUnion(evenhex,evenhex)
#evenhex<-crop(evenhex,borders)
#plot(borders)
#plot(evenhex,add=TRUE)
# load the environmental raster layers (could be any supported format by the raster package)----
# Environmental variables extracted from Worldclim
#myExpl_250m <- raster::getData('worldclim', download=T, var='bio', res=10)
biodir <- '~/Google Drive/Shared drives/APHIS Projects/shared resources/data/worldclim1k/US/'
biovars <- stack(lapply(X=list.files(biodir), FUN=function(X){raster(paste(biodir, X, sep=''))}))
biovars <- stack(biovars[[1]],biovars[[12]])
e<-extent(borders)
e<-e*1.1
#roads.d <- raster('~/Google Drive/Shared drives/Data/Raster/Regional/roadsD_NE_1k.tif')
#pop<-raster('~/Google Drive/Shared drives/Data/Raster/Regional/popden_NE_1k.tif')
rails<-raster('~/Google Drive/Shared drives/Data/Raster/Regional/railsD_NE_1k.tif')
#canopy<-raster('~/Google Drive/Shared drives/Data/Raster/Regional/canop_NE_1k.tif')
pop<-raster('~/Google Drive/Shared drives/Data/Raster/Regional/popden_NE_250m.tif')
biovars<-crop(biovars,e)
rails<-crop(rails,e)
biovars <- resample(biovars, pop, method='bilinear')
rails <- resample(rails, pop, method='bilinear')
myExpl_250m <- mask(stack(biovars,rails), borders)
myExpl_250m <- crop(myExpl_250m, borders)
myExpl_250m <- stack(myExpl_250m)
names(myExpl_250m)<-c("BIO1","BIO12","rails")
#Plot predictors
#tiff('/~Desktop/myExpl_250m.tiff', units="in", width=8, height=8, res=300, compression = 'lzw')
#par(oma=c(0,0,1,2))
#plot(myExpl_250m[[1]],axes=F)
#plot(evenhex, lwd=2)
#dev.off()
#myExpl_df<-as.data.frame(myExpl_250m)# Check for multicollinearity
#M <- cor(na.omit(myExpl_df))
#tiff('~/Desktop/predcorrplot_evenhex_rem.tiff', units="in", width=8, height=8, res=300, compression = 'lzw')
#corrplot.mixed(M,tl.cex=0.5)#removed biovar6 and roads bc of multicollinearity issues
#dev.off()
#3. Rasterize response data to create presence and pseudoabsences----
#3A. FIA data
aa.cs.ras <- rasterize(x=aa.cs.pts, y=myExpl_250m[[1]], fun='count', background=0);
aa.cs.ras <- (aa.cs.ras*((myExpl_250m[[1]]*0)+1))>0
a2.cs.pts <- rasterToPoints(aa.cs.ras)
myRespName <- 'A_altissima_cs'
myResp <- a2.cs.pts[, 3] # the presence/absences data for our species
myResp[myResp==0] <- NA # setting 'true absences' to undefined
myRespXY <- a2.cs.pts[, c(1,2)] # the XY coordinates of species data
sum(na.omit(myResp))
myBiomodData.cs250m <- BIOMOD_FormatingData(resp.var = myResp,
expl.var = myExpl_250m,
resp.xy = myRespXY,
resp.name = myRespName,
PA.nb.rep = 1,
PA.strategy = 'random',
PA.nb.absences = sum(myResp, na.rm=T))
plot(myBiomodData.cs250m)
saveRDS(myBiomodData.cs250m, file = "myBiomodData.cs.rds")#3617 presences
#3Ai. Defining Models Options using default options.
myBiomodOption <- BIOMOD_ModelingOptions()
#3Aii. Computing the models
getwd()#outputs placed here
myBiomodModelOut.cs250m <- biomod2::BIOMOD_Modeling(myBiomodData.cs250m,
models = c(#'CTA', 'SRE', 'MAXENT.Phillips','MAXENT.Phillips.2'
'GLM',
'GAM',
'MARS',
'FDA',
'GBM',
'RF',
'ANN'),
models.options = myBiomodOption,
NbRunEval = 5,
DataSplit = 100,
Prevalence = 0.5,
VarImport = 3,
models.eval.meth = 'TSS',
SaveObj = TRUE,
rescal.all.models = FALSE,
do.full.models = FALSE,
modeling.id=paste(myRespName,"FirstModeling.cs250m",sep=""))
saveRDS(myBiomodModelOut.cs250m, file = "myBiomodModelOut.cs250m.rds")
myBiomodModelEval.cs250m <- get_evaluations(myBiomodModelOut.cs250m) # get all models evaluation
dimnames(myBiomodModelEval.cs250m) # print the dimnames of this object
allmodels.cs250m.eval<-myBiomodModelEval.cs250m[c('TSS'),"Testing.data",,,] # print the eval scores of all selected models
allmodels.cs250m.eval<-as.data.frame(allmodels.cs250m.eval)
names(allmodels.cs250m.eval)<-"TSS"
models<-rownames(allmodels.cs250m.eval)
res<-250
db<-"cs"
allmodels<-cbind(models,as.data.frame(allmodels.cs250m.eval),res,db)
write.table(allmodels,"allmodels.csv", sep=",", row.names = FALSE,col.names = TRUE,append=TRUE)
vars_importance.cs250m <- as.data.frame(get_variables_importance(myBiomodModelOut.cs250m)) # print variable
var_means<-rowMeans((vars_importance.cs250m),na.rm=TRUE)
res<-250
db<-"cs"
pred<-names(var_means)
var_means<-cbind(pred,as.data.frame(var_means),res,db)
#tiff('~/Desktop/tohexports/cs250mmeanvarimp.tiff', units="in", width=8, height=8, res=300, compression = 'lzw')
#barplot(colMeans(vars_imp_means_cs250m), ylab="Mean Variance Importance Score", xlab="Predictors")
#dev.off()
var_rank<-rank((vars_importance.cs250m)*-1,ties.method="average",na.last=NA)
var_rank<-cbind(pred,as.data.frame(var_rank),res,db)
write.table(var_means,"var_means.csv", row.names = FALSE, col.names = TRUE, append=TRUE, sep=",")#remove roads,canopy, and population
write.table(var_rank,"var_ranks.csv", row.names = FALSE, col.names = TRUE, append=TRUE, sep=",")#remove roads,canopy, and population
#3AiiiEnsembling the models
myBiomodEM.cs250m <- BIOMOD_EnsembleModeling(modeling.output = myBiomodModelOut.cs250m,
chosen.models = 'all',
em.by='all',
eval.metric = c('TSS'),
eval.metric.quality.threshold = c(0.01),
prob.mean = T,
prob.cv = F, #don't use
prob.ci = F, #prob.ci.alpha = 0.05,
prob.median = F,
committee.averaging = F,
prob.mean.weight = T,
prob.mean.weight.decay = 'proportional' )
saveRDS(myBiomodEM.cs250m, file = "myBiomodEM.cs250m.rds")
ensmodeleval<-get_evaluations(myBiomodEM.cs250m) # get evaluation scores
ensmodeleval<-ensmodeleval[[2]]
stats<-row.names(ensmodeleval)
ensmodeleval<-cbind(stats,as.data.frame(ensmodeleval))
write.table(append = T, sep=",", ensmodeleval, "enseval.csv", row.names = FALSE, col.names = TRUE)
###3Aiv. projection over the globe under current conditions
myBiomodProj.cs250m <- BIOMOD_Projection(modeling.output = myBiomodModelOut.cs250m,
new.env = myExpl_250m,
proj.name = 'current.cs',
selected.models = 'all',
binary.meth = 'TSS',
compress = 'xz',
clamping.mask = F,
output.format = '.grd')
saveRDS(myBiomodProj.cs250m, file = "myBiomodProj.cs250m.rds")
myCurrentProj.cs <- get_predictions(myBiomodProj.cs250m) # if you want to make custom plots, you can also get the projected map
myBiomodEF.cs250m <- BIOMOD_EnsembleForecasting(EM.output = myBiomodEM.cs250m,
projection.output = myBiomodProj.cs250m)
saveRDS(myBiomodEF.cs250m, file = "myBiomodEF.cs250m.rds")
plot(myBiomodEF.cs250m)
pred.out.cs250m <- myBiomodEF.cs250m@proj@val[[1]]
pred.out.cs250m[]<-pred.out.cs250m[]/1000
plot(pred.out.cs250m)
writeRaster(pred.out.cs250m, filename = 'toh_ens_cs250m_wgs.tif', format="GTiff")
crs<-"+proj=lcc +lat_1=20 +lat_2=60 +lat_0=40 +lon_0=-96 +x_0=0 +y_0=0 +datum=NAD83 +units=m +no_defs"
pred.out.cs250m.lcc<-(projectRaster(pred.out.cs250m,crs=crs))
writeRaster(pred.out.cs250m.lcc, filename = 'toh_ens_cs250m_lcc.tif', format="GTiff")
#3Av. output plot
rpts <- rasterToPoints(pred.out.cs250m.lcc)
rdf <- as.data.frame(rpts)
ggsdm <- ggplot() + geom_raster(data=rdf, aes(x=x, y=y, fill=rdf[,3])) +
geom_path(data=borders, aes(x=long, y=lat, group=group), col='white', lwd=1.1, alpha=.3) +
scale_fill_continuous(type='viridis') + theme_void() + theme(legend.position='none')
png(paste('pred.out.cs.250m.lcc',
gsub(':', '', substr(Sys.time(), 12, 19)), '.png', sep=''),
height=1080, width=2160); plot(ggsdm); dev.off()
|
98098a482a68ab1ad6ff9946cfee38665855ce10 | 71e68ac700c98810a21000184a156852b99fc134 | /R/upliftKNN.R | 214daf27aacfe15d5181e1857f69991cd00c1e2e | [] | no_license | cran/uplift | 5b31835ba3696e4e8993ff0df485a12934ded08c | 95965272e71c312623c95c439fb0b84f95c185b7 | refs/heads/master | 2020-05-16T16:26:12.053179 | 2014-03-17T00:00:00 | 2014-03-17T00:00:00 | 17,700,694 | 13 | 12 | null | null | null | null | UTF-8 | R | false | false | 4,119 | r | upliftKNN.R | ######################################################################
# Uplift K-Nearest Neighbour Regression and Classification
######################################################################
# train = matrix or data frame of training set cases.
# test = matrix or data frame of test set cases. A vector will be interpreted as a row vector for a single case.
# y = a numeric response (must be coded as 0/1 for binary response)
# ct = factor or numeric vector representing the treatment to which each train case is assigned.
# At least 2 groups is required (e.g. treatment and control). Multi-treatments is
# also supported
# k = number of neighbours considered.
# dist.method = the distance to be used in calculating the neighbors. Any method supported in function
# \link{dist} is valid.
# p = the power of the Minkowski distance.
# ties.meth = method to handle ties for the kth neighbor. The default is "min" which uses all
# ties. Alternatives include "max" which uses none if there are ties for the k-th
# nearest neighbor, "random" which selects among the ties randomly and "first"
# which uses the ties in their order in the data.
# agg.method = method to combine responses of the nearest neighbors, defaults to "majority"
# for classification and "mean" for continuous responses
# majority: with ties broken at random
# In details, the logic for the code follows closely that the knn and xknnflex package, the later
#currently discontinued in CRAN.
### majority vote function
majority <- function(x) {
x <- as.factor(x)
n <- nlevels(x)
votes <- rep(0, n)
for (i in 1:length(x)) votes[as.integer(x[i])] <- votes[as.integer(x[i])] + 1
as.numeric(levels(x)[order(votes, decreasing=TRUE, sample(1:n,n))[1]])
}
### uplift k-nearest neighbour
upliftKNN <- function(train, test, y, ct, k = 1, dist.method = "euclidean", p = 2,
ties.meth = "min", agg.method = "mean") {
### perform various checks
train <- as.matrix(train)
if(is.null(dim(test))) dim(test) <- c(1, length(test))
test <- as.matrix(test)
if(any(is.na(train)) || any(is.na(test)) || any(is.na(y)) || any(is.na(ct)))
stop("upliftKNN: no missing values are allowed")
ptr <- ncol(train)
ntr <- nrow(train)
fct <- as.factor(ct)
if (length(unique(fct)) < 2) stop("uplift: ct must have at least 2 distinct values")
if (!is.numeric(y)) stop("uplift: y must be a numeric variable")
if (length(y) != ntr) stop("uplift: 'train' and 'y' have different lengths")
if (length(ct) != ntr) stop("uplift: 'train' and 'ct' have different lengths")
if(ntr < k) {
warning(gettextf("uplift: k = %d exceeds number %d of patterns. Reset k = %d.", k, ntr, ntr),
domain = NA)
k <- ntr
}
if (k < 1)
stop(gettextf("uplift: k = %d must be at least 1", k), domain = NA)
nte <- nrow(test)
if(ncol(test) != ptr) stop("uplift: dims of 'test' and 'train' differ")
am <- charmatch(tolower(agg.method), c("mean", "majority"))
if (is.na(am)) stop("uplift: agg.method must be one of 'mean' or 'majority'")
### compute distance matrix
x <- rbind(train, test)
dist. <- as.matrix(dist(x, dist.method, p))
### only need the rows for the train data and columns for the test data
dist. <- as.data.frame(dist.[1:ntr, ntr + 1:nte])
### split distance matrix by the number of treatment grups
dist.split <- split(dist., fct, drop = TRUE)
y.split <- split(y, ct, drop = TRUE)
ranks <- lapply(1:length(dist.split), function(i) t(apply(dist.split[[i]], 2,
function(x) rank(x, ties.method = ties.meth))))
agg <- lapply(1:length(ranks), function(i)
apply(ranks[[i]], 1, function(x)
apply(data.frame(y.split[[i]][x <= k]), 2, agg.method)))
# create output matrix with outcomes associated with each test obs and treatment type
res <- matrix(nrow = nte, ncol = length(unique(ct)))
for (i in 1:length(unique(ct))) {
res[, i] <- agg[[i]]
}
colnames(res) <- names(dist.split)
return(res)
}
|
ac1e5f26276bf48cecf03ad8217f05f9a75014cf | 8b7b04400c9e413fca2223be167b560d3825818d | /tests/testthat/test_RCMIP5.R | c73a9971c76fedafa140d8d1abd72d7e31a7c971 | [] | no_license | cran/RCMIP5 | 8d597ed7452cac82926ce421042c31a020799ce9 | 488bafee609f8c2695290476735159de2d29d862 | refs/heads/master | 2021-01-19T01:43:20.061269 | 2016-07-30T18:53:27 | 2016-07-30T18:53:27 | 25,301,608 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,568 | r | test_RCMIP5.R | # Testing code for the RCMIP5 scripts in 'RCMIP5.R'
# Uses the testthat package
# See http://journal.r-project.org/archive/2011-1/RJournal_2011-1_Wickham.pdf
library(testthat)
# To run this code:
# source("RCMIP5.R")
# library(testthat)
# test_file("tests/testthat/test_RCMIP5.R")
context("RCMIP5")
implementations <- c("data.frame", "array")
test_that("cmip5data print method works", {
for(i in implementations) {
d <- cmip5data(2000:2005, loadAs=i)
expect_output(print(d), "CMIP5", info=i)
# not sure what to test here, except that no error
}
})
test_that("cmip5data summary method detects summaries", {
i <- "data.frame"
# for(i in implementations) TODO
{
d <- cmip5data(2000:2005, Z=T, loadAs=i)
expect_output(print(summary(d)), "CMIP5")
# Summary should let user know data have been run through stat fn
da <- makeAnnualStat(d)
expect_output(print(summary(da)), "annual summary", info=i)
dm <- makeMonthlyStat(d)
expect_output(print(summary(dm)), "monthly summary", info=i)
dz <- makeZStat(d)
expect_output(print(summary(dz)), "Z summary", info=i)
dg <- makeGlobalStat(d)
expect_output(print(summary(dg)), "spatial summary", info=i)
# Multiple stat functions should be detected
dag <- makeGlobalStat(da)
expect_output(print(summary(dag)), "annual summary", info=i)
expect_output(print(summary(dag)), "spatial summary", info=i)
daz <- makeZStat(da)
expect_output(print(summary(daz)), "annual summary", info=i)
expect_output(print(summary(daz)), "Z summary", info=i)
dmg <- makeGlobalStat(dm)
expect_output(print(summary(dmg)), "monthly summary", info=i)
expect_output(print(summary(dmg)), "spatial summary", info=i)
dmz <- makeZStat(dm)
expect_output(print(summary(dmz)), "monthly summary", info=i)
expect_output(print(summary(dmz)), "Z summary", info=i)
# All filter functions should be detected
df <- filterDimensions(d, yearRange=floor(range(d$time)))
expect_output(print(summary(df)), "filtered", info=i)
df <- filterDimensions(d, monthRange=c(1,2))
expect_output(print(summary(df)), "filtered", info=i)
df <- filterDimensions(d, lonRange=range(d$lon))
expect_output(print(summary(df)), "filtered", info=i)
df <- filterDimensions(d, latRange=range(d$lat))
expect_output(print(summary(df)), "filtered", info=i)
df <- filterDimensions(d, ZRange=range(d$Z))
expect_output(print(summary(df)), "filtered", info=i)
}
})
test_that("as.data.frame works", {
for(i in implementations) {
df <- as.data.frame(cmip5data(2000:2002, Z=T, loadAs=i))
expect_is(df, "data.frame", info=i)
expect_equal(names(df), c("lon", "lat", "Z", "time", "value"), info=i)
}
})
test_that("as.array works", {
for(i in implementations) {
arr <- as.array(cmip5data(2000:2002, Z=T))
expect_is(arr, "array", info=i)
expect_equal(dim(arr), c(10, 10, 5, 36), info=i)
arr <- as.array(cmip5data(2000:2002), info=i)
expect_is(arr, "array", info=i)
expect_equal(dim(arr), c(10, 10, 36), info=i)
arr <- as.array(cmip5data(2000:2002), drop=FALSE)
expect_is(arr, "array", info=i)
expect_equal(dim(arr), c(10, 10, 1, 36), info=i)
}
})
test_that("weighted.mean works", {
for(i in implementations) {
}
})
|
3106e1168c48c8f88be71377fa084834431a8176 | 1fdd9cbe860fe7582fd46a3fde14be950d3f8d42 | /man/rescaling_summarise.Rd | 0389c1d58571277bb21b2f55ad52defbbc03b076 | [
"MIT"
] | permissive | WeiquanLuo/stNet | c13b9e9236b1ad8eb8142c1a68b2ce99d520421b | ecb015ea3533d31edcc957e396b3b2004d7a5c40 | refs/heads/master | 2020-09-14T13:26:25.242291 | 2019-11-22T18:38:20 | 2019-11-22T18:38:20 | 223,140,303 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 679 | rd | rescaling_summarise.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rescaling_summarise.R
\name{rescaling_summarise}
\alias{rescaling_summarise}
\title{rescaling the temporal resolution by method}
\usage{
rescaling_summarise(data, rescallingMethod)
}
\arguments{
\item{data}{a list of numeric data}
\item{rescallingMethod}{a string elements have to be those destinaGroup generic methods,
such as min, max, mean, etc.(see: ??dplyr::summarise)}
}
\description{
rescaling the temporal resolution by method
}
\examples{
data <- c(NA,round(runif(n = 20, min = 0, max = 100)))
rescallingMethod <- "max"
rescaling_summarise(data= data, rescallingMethod = rescallingMethod)
}
|
40b0a87bb488f46ce6840202f37ffb8ec71b63ac | 769898772e7225264fd942b2e5a666af3105d3a1 | /man/impute.loess.Rd | 80a5003c90529114311ddc782355128d99cce6c3 | [] | no_license | cran/spatialEco | 3fa4393496453b091c547cc7601a984e54bf2be6 | 22944d790b25451c848d420b61d386471073b1ee | refs/heads/master | 2023-07-08T05:04:12.117110 | 2023-06-30T07:40:02 | 2023-06-30T07:40:02 | 30,218,937 | 5 | 3 | null | null | null | null | UTF-8 | R | false | true | 1,239 | rd | impute.loess.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/impute.loess.R
\name{impute.loess}
\alias{impute.loess}
\title{Impute loess}
\usage{
impute.loess(y, s = 0.2, smooth = FALSE)
}
\arguments{
\item{y}{A vector to impute}
\item{s}{Smoothing parameter ()}
\item{smooth}{(FALSE/TRUE) Smooth data, else only replace NA's}
}
\value{
a vector the same length as x with NA values filled or the
data smoothed (or both)..
}
\description{
Imputes missing data or smooths using Loess regression
}
\details{
Performs a local polynomial regression to smooth data or to
impute NA values. The minimal number of non-NA observations to reliably
impute/smooth values is 6. There is not a reliably way to impute NA's
on the tails of the distributions so if the missing data is in the
first or last position of the vector it will remain NA. Please note
that smooth needs to be TRUE to return a smoothed vector, else only
NA's will be imputed.
}
\examples{
data(cor.data)
d <- cor.data[[1]][,2]
plot(d, type="l")
lines(impute.loess(d, s=0.3, smooth=TRUE), lwd=2, col="red")
# add some NA's
d <- d[1:100]
d[sample(30:70, 5)] <- NA
d
impute.loess(d, s=0.2)
}
\author{
Jeffrey S. Evans <jeffrey_evans<at>tnc.org>
}
|
d7bee71ba1fcf9f39868765eb1370ab98e6ee1d3 | 5a4b23e8a1b0d2ba1b861c358b1d7c5c3907883a | /myrepo-covid19/Predykcja.r | 515faf4eb08c87ee115f6ab7dd4f375c21d25d37 | [] | no_license | juliakoott/myrepo | 924be1395994f01974debbd87fc69567d9c4c874 | 365506fc737d501b1c7cc7ee38aca181a376e98d | refs/heads/master | 2021-05-24T08:31:38.275806 | 2020-09-10T18:30:28 | 2020-09-10T18:30:28 | 253,471,926 | 1 | 2 | null | 2020-05-10T19:25:54 | 2020-04-06T11:03:26 | null | UTF-8 | R | false | false | 5,018 | r | Predykcja.r | #Instalacja paczek
#install.packages("data sets")
#require("data sets")
#install.packages("ggplot2")
require("ggplot2")
#install.packages("GGally")
require("GGally")
#nstall.packages("COVID19")
require("COVID19")
######################################################################
#ISO | vector of ISO codes to retrieve (alpha-2, alpha-3 or numeric). Each country is identified by one of its ISO codes
#level | integer. Granularity level. 1: country-level data. 2: state-level data. 3: city-level data.
#start | the start date of the period of interest.
#end | the end date of the period of interest.
#vintage | logical. Retrieve the snapshot of the dataset at the end date instead of using the latest version? Default FALSE.
#raw | logical. Skip data cleaning? Default FALSE.
#cache | logical. Memory caching? Significantly improves performance on successive calls. Default TRUE.
# Worldwide data by country
covid19()
# Worldwide data by state
covid19(level = 2)
# US data by state
covid19("USA", level = 2)
# Swiss data by state (cantons)
covid19("CHE", level = 2)
# Italian data by state (regions)
covid19("ITA", level = 2)
# Italian and US data by city
#####################################################################
#Wczytanie danych dla Polski
covid_PL = covid19("PL")
#covid_PL = cbind(covid_PL, covid_PL_dni)
covid_PL_dates = covid_PL[,2]
covid_PL_deaths = cbind(covid_PL_dates,covid_PL[,3])
covid_PL_infected = cbind(covid_PL_dates,covid_PL[,4])
covid_PL_recovered = cbind(covid_PL_dates,covid_PL[,6])
y_deaths = data.matrix(covid_PL_deaths[,2])
x_deaths <- as.Date(covid_PL_deaths$date, "%Y/%m/%d")
y_infected = data.matrix(covid_PL_infected[,2])
x_infected <- as.Date(covid_PL_infected$date, "%Y/%m/%d")
y_recovered = data.matrix(covid_PL_recovered[,2])
x_recovered <- as.Date(covid_PL_recovered$date, "%Y/%m/%d")
#####################################################################
#Podstawowe ploty dla Polski
plot(x_deaths,y_deaths,xlab="Czas", ylab="l. ofiar")
title("COVID19 | Polska | zgony")
grid(nx = NULL, ny = NULL, col = "lightgray", lty = "dotted")
plot(x_infected,y_infected,xlab="Czas", ylab="l. zarażonych")
title("COVID19 | Polska | zarażeni")
grid(nx = NULL, ny = NULL, col = "lightgray", lty = "dotted")
plot(x_recovered,y_recovered,xlab="Czas", ylab="l. wyleczonych")
title("COVID19 | Polska | wyleczeni")
grid(nx = NULL, ny = NULL, col = "lightgray", lty = "dotted")
####################################################################
# Predykcja
#https://www.dataquest.io/blog/statistical-learning-for-predictive-modeling-r/
covid_PL = covid_PL[covid_PL[,4] > 0,]
covid_PL_dni = as.data.frame(c(1:nrow(covid_PL[,2])))
names(covid_PL_dni)[1] <- "dni"
covid_PL[,2] = covid_PL_dni
ggpairs(data=covid_PL, columns=c(4,2), title="COVID-19 PL")
model <- lm(confirmed ~ date, data = covid_PL)
summary(model)
ggplot(data=covid_PL, aes(model$residuals)) +
geom_histogram(binwidth = 1, color = "black", fill = "purple4") +
theme(panel.background = element_rect(fill = "white"),
axis.line.x=element_line(),
axis.line.y=element_line()) +
ggtitle("Histogram for Model Residuals")
###TO
ggplot(data = covid_PL, aes(x = date, y = confirmed)) +
geom_point() +
stat_smooth(method = "lm", col = "dodgerblue3") +
theme(panel.background = element_rect(fill = "white"),
axis.line.x=element_line(),
axis.line.y=element_line()) +
ggtitle("Linear Model Fitted to Data")
predykcja = data.frame(predict(model, covid_PL))
predykcja = predict(model,newdata = data.frame(date = c((nrow(covid_PL_dates) + 1),(nrow(covid_PL_dates) + 2),(nrow(covid_PL_dates) + 3),(nrow(covid_PL_dates) + 4),(nrow(covid_PL_dates) + 5))))
#######
#Estimate = lm(date ~ confirmed, data = covid_PL)
#logEstimate = lm(date ~ log(confirmed), data = covid_PL)
#plot(covid_PL$confirmed,predict(Estimate),type='l',col='blue')
#lines(covid_PL$confirmed,predict(logEstimate),col='red')
#points(covid_PL$confirmed,covid_PL$date)
######
#http://www.sthda.com/english/articles/40-regression-analysis/162-nonlinear-regression-essentials-in-r-polynomial-and-spline-regression-models/
# Build the model
model2 <- lm(confirmed ~ poly(date,5 , raw = TRUE), data = covid_PL)
# Make predictions
predykcja2 <- predict(model2)
plot(covid_PL$date,predict(model2),type='l',col='blue')
lines(covid_PL$date,predict(model),col='red')
points(covid_PL$date,covid_PL$confirmed)
predykcja2 = predict(model2,newdata = data.frame(date = c((nrow(covid_PL_dates) + 1),(nrow(covid_PL_dates) + 2),(nrow(covid_PL_dates) + 3),(nrow(covid_PL_dates) + 4),(nrow(covid_PL_dates) + 5),(nrow(covid_PL_dates) + 6))))
p1=as.numeric( floor(predykcja2[1]))
p2=as.numeric( floor(predykcja2[2]))
p3=as.numeric( floor(predykcja2[3]))
p4=as.numeric( floor(predykcja2[4]))
p5=as.numeric( floor(predykcja2[5]))
p6=as.numeric( floor(predykcja2[6]))
|
15b72c30c7285b4775415c8aadbc113339f864f5 | 2646da6beb532a45e05aa0bbf213f6bcd53929d8 | /Iris_Rscript/3mergeTwo_hadoop.R | eba682391223b890af83919956ac36eb40a41591 | [] | no_license | junjunruan/Big-Data-Classification | fd6c5737bb016b1077be903b0056486f5a16f8bb | 66c28189033b0c3a0019175c88bd757213af9a79 | refs/heads/master | 2016-09-06T19:53:21.729117 | 2015-09-28T17:43:10 | 2015-09-28T17:43:10 | 42,276,124 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,116 | r | 3mergeTwo_hadoop.R | Sys.setenv(HADOOP_HOME='/usr/local/hadoop')
Sys.setenv(HADOOP_CMD='/usr/local/hadoop/bin/hadoop')
Sys.setenv(HADOOP_STREAMING='/usr/local/hadoop/share/hadoop/tools/lib/hadoop-streaming-2.2.0.jar')
start.time <- Sys.time()
data <- read.table ("/home/hduser/Documents/R/Iris_output/iris.data.txt", sep = "")
library(rmr2)
library(rhdfs)
hdfs.init()
data.content <- to.dfs(data)
data.map.fn <- function(k,v){
key <- 1
class <- unique (v[,5])
c2 <- sample(class, 2, replace=FALSE)
m1 <- read.table (paste("/home/hduser/Documents/R/Iris_output/",c2[1],".txt", sep=""))
m2 <- read.table (paste("/home/hduser/Documents/R/Iris_output/",c2[2],".txt", sep=""))
val <- rbind(m1, m2)
keyval(key,val)
}
data.reduce.fn <- function(k,v){
keyval(k,v)
write.table(v,paste("/home/hduser/Documents/R/Iris_output/cc12_hadoop.txt"))
}
classify <- mapreduce(input=data.content,
map=data.map.fn,
reduce=data.reduce.fn)
from.dfs(classify)
end.time <- Sys.time()
time.taken <- end.time - start.time
time.taken #46.25848 secs
|
26846be0932e6fb32ba1ce0f1825740c1029ff93 | 10f4fbcb3fcbe8601a226dd49ab362a0256cba51 | /R/zzz.R | 46fb0d03b2e8acf31f423fb14d4897e41eb876dc | [] | no_license | anders-kolstad/sdmShiny | 55461ce6e9e12edeffe931a5340d8114b6d89521 | e323ac9e4ddd073c80523774f9db781e49eb2843 | refs/heads/master | 2023-01-23T00:39:07.829744 | 2020-12-03T08:35:39 | 2020-12-03T08:35:39 | 255,807,989 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 117 | r | zzz.R | .onAttach <- function(libname, pkgname) {
packageStartupMessage("Hello. This is the sdmShiny package. Have fun.")
} |
7de16174e92305e97b1af9821b9164606bf06008 | 4ba706744ab27ff97c2f02655f90c3242b350aa1 | /R/name_model.R | 3df3b65888b9ac7470c851a2f31521d293e669b7 | [] | no_license | PengInGitHub/Kaggle-Titanic | 3d6cd6e66dd72e6425c9c4de5c903d1322243704 | 795d4a6b464a0c0e23ea89b420f3b9d18dca9e98 | refs/heads/master | 2020-03-26T20:34:46.680789 | 2018-08-26T13:56:48 | 2018-08-26T13:56:48 | 145,333,643 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,459 | r | name_model.R | #resource
#https://www.kaggle.com/cdeotte/titanic-using-name-only-0-81818
#simple gender model
#if male: 0 else: 1
setwd("~/go/src/github.com/user/Titanic/data/")
test <- read.csv("test.csv", stringsAsFactors = FALSE)
test$Survied[test$Sex=='male']=0
test$Survied[test$Sex=='female']=1
submit = data.frame(PassengerId=test$PassengerId, Survived=test$Survied)
write.csv(submit, "gender_model.csv",row.names=F)
#improve gender model
#which male survied and which female perished
train <- read.csv("train.csv", stringsAsFactors = FALSE)
#boy, survive rate 50%
table(train$Survived[train$Sex=='male' & train$Age<16])
#female in class 3, survive rate only 50%
table(train$Survived[train$Sex=='female'&train$Pclass==3])
#focuse on identifying these two sub groups
#get title - man, female and boy
#in column 'Name', retrieve string from ',' to '.', eliminate space and punctuations
train$Title <- substring(train$Name,regexpr(",",train$Name)+2,regexpr("\\.",train$Name)-1)
male_title <- c("Capt", "Don", "Major", "Col", "Rev", "Dr", "Sir", "Mr", "Jonkheer")
female_title <- c("Mrs", "the Countess", "Dona", "Mme", "Mlle", "Ms", "Miss", "Lady")
boy_title <- c("Master")
train$Title[train$Title %in% male_title] <- "man"
train$Title[train$Title %in% female_title] <- "woman"
train$Title[train$Title %in% boy_title] <- "boy"
#get surname
train$Surname <- substring(train$Name, 0, regexpr(",", train$Name)-1)
head(train$Surname)
#build feature 'woman-child-group'
#preclude male
train$Surname[train$Title=="man"] <- "noGroup"
train$SurnameFreq <- ave(1:nrow(train), train$Surname, FUN=length)
table(train$Surname)
#preclude single ones
train$Surname[train$SurnameFreq<=1] <- "noGroup"
#calculate survival rate
#ave: Group Average Over Level Combinations of Factors
#usage: ave(a_variable, grouping_variables, func_to_apply_for_each_factor_level_combination)
train$SurviveRate <- ave(train$Survived, train$Surname)
table(train$SurviveRate[train$Title != 'noGroup'])
#some statistics on the woman-child-group
#all perish
all_perished = train$Surname[train$SurviveRate == 0]
unique(all_perished[order(all_perished)])
#all survived
all_survived = train$Surname[train$SurviveRate==1]
unique(all_survived[order(all_survived)])
#sum of this two conditions
#mistake made here, nrow(train[train$SurviveRate == 0])
nrow(train[train$SurviveRate == 0 | train$SurviveRate == 1,])
nrow(train[train$Surname != 'noGroup',])
#124 of 142 woman-child pairs have either all survived or all perished
#test new feature
#cross validation
library(ggplot2)
#adjusted survival rate
train$AdjustedSurvival <- (train$SurviveRate * train$SurnameFreq - train$Survived) / (train$SurnameFreq-1)
table(train$AdjustedSurvival)
all_survived = train$Surname[train$AdjustedSurvival==1]
#apply gender model + new feature
train$Predict = 0
train$Predict[train$Title == 'woman'] = 1
train$Predict[train$Title == 'boy' & train$AdjustedSurvival==1] = 1
train$Predict[train$Title == 'woman' & train$AdjustedSurvival==0] = 0
table(train$Predict, train$Title)
#plot how new feature improves simple gender model
#35 women are correctly predicted as perished
ggplot(train[train$Title=='woman',]) +
geom_jitter(aes(x=Pclass,y=Predict,color=factor(Survived))) +
labs(title="36 female predictions change from gender model on training set") +
labs(x="Pclass",y="New Predictor") +
geom_rect(alpha=0,color="black",aes(xmin=2.5,xmax=3.5,ymin=-0.45,ymax=0.45))
table(train$Survived[train$Title=='woman' & train$Predict==0])
#15 of 16 boys are correctly predicted as survived
ggplot(train[train$Title!='woman',]) +
geom_jitter(aes(x=Title,y=Predict,color=factor(Survived))) +
labs(title="16 male predictions change from gender model on training set") +
labs(x="Title",y="New Predictor") +
geom_rect(alpha=0,color="black",aes(xmin=0.5,xmax=1.5,ymin=0.55,ymax=1.45))
table(train$Survived[train$Title!='woman' & train$Predict==1])
#in overall the new predictor made 36+16=52 corrections from the gender model
#cross validation
#to test if new feature could improve the prediction
#Perform 25 trials of 10-fold cross validation
trials = 25; sum = 0
for (j in 1:trials){
x = sample(1:890); s = 0
for (i in 0:9){
# Engineer "woman-child-groups" from training subset
train$Surname <- substring(train$Name,0,regexpr(",",train$Name)-1)
train$Surname[train$Title=='man'] <- 'noGroup'
train$SurnameFreq <- ave(1:891,train$Surname,FUN=length)
train$Surname[train$SurnameFreq<=1] <- 'noGroup'
train$SurnameSurvival <- NA
# calculate training subset's surname survival rate
train$SurnameSurvival[-x[1:89+i*89]] <- ave(train$Survived[-x[1:89+i*89]],train$Surname[-x[1:89+i*89]])
# calculate testing subset's surname survival rate from training set's rate
for (k in x[1:89+i*89])
train$SurnameSurvival[k] <- train$SurnameSurvival[which(!is.na(train$SurnameSurvival) & train$Surname==train$Surname[k])[1]]
# apply gender model plus new predictor
train$Predict <- 0
train$Predict[train$Title=='woman'] <- 1
train$Predict[train$Title=='boy' & train$SurnameSurvival==1] <- 1
train$Predict[train$Title=='woman' & train$SurnameSurvival==0] <- 0
c = sum(abs(train$Predict[x[1:89+i*89]] - train$Survived[x[1:89+i*89]]))
s = s + c
}
cat( sprintf("Trial %d has 10-fold CV accuracy = %f\n",j,1-s/890))
sum = sum + 1-s/890
}
cat(sprintf("Average 10-fold CV accuracy from %d trials = %f\n",trials,sum/trials))
#submission
|
3cadf9cf6167bd9f895d0cd774ca10c4593a1faa | 8e005c6cf3bfd3d0ec1f1d1dd138565ccfa5af33 | /Musfiq.R | 9b4a35c78164733196a1a69f627c6619a2fd62ac | [] | no_license | fazlerabbi2248/Time-Series-Analysis | 1e04e0444655e09f41d3e4104ed7913f5fbeec97 | 8ae6775a2a07b5df0cf66fd4e582f343c80c65ac | refs/heads/master | 2021-03-11T09:54:18.351106 | 2020-03-18T14:18:20 | 2020-03-18T14:18:20 | 246,521,873 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 586 | r | Musfiq.R | musV<- c(253,78,0,0,70,0,90,0,0,202,11,
0,107,191,220,0,69,24,0,0,0,156,0,
0,0,0,0,0,0,0,0,44,112,0,42,
0,0,1,0,116,163,0,0,0,178,0,0,
142,0,0,0,0,0,110,0,302,83,0,133,
0,46,0,0,166,327,215,0,0,0,0,0,
0,0,74)
print(musV)
musTs<- ts(musV,start = c(2014,2),frequency =12)
print(musTs)
plot.ts(musTs)
library(TTR)
musLog<- log(musTs)
plot(musLog)
musTsSMA3<- SMA(musTs,n=3)
plot(musTs)
musdecompose<- decompose(musTs)
plot(musdecompose)
mushfore<- HoltWinters(musTs,beta = FALSE,gamma = FALSE)
mushfore
mushfore$fitted
plot(mushfore)
|
ae3e01b248eeb1c093aab8a44fbec78f932e8826 | c2788594bd272c37b9e307f0f6087cd9a0cb6e8e | /Graficos.R | f1e62a51674ab4b3c2a1b445088c40aee6a2dd05 | [] | no_license | thalisreboucas/Manipula-o-de-dados-R | dfc3ac589183260681659806c82e04add0f3af0f | a02e4cc509bf6770cfd9af2146fdfd81e7d098f2 | refs/heads/master | 2020-08-05T03:39:39.690330 | 2019-10-02T15:51:24 | 2019-10-02T15:51:24 | 212,380,069 | 1 | 0 | null | null | null | null | ISO-8859-1 | R | false | false | 1,108 | r | Graficos.R | library(tidyverse)
library("ggthemes")
#esquisse()
#histograma com escala fixa
ggplot(big) +
aes(x = Taxes) +
geom_histogram(bins = 30L, fill = "#617a89") +
labs(x = "Taxas", y = "Empresas", title = "Histograma das empresas pelas taxas") +
theme_minimal() +
facet_wrap(vars(Level))
#boxplot das empresas sobre taxas
ggplot(big) +
aes(x = Level, y = Taxes, group = Level) +
geom_boxplot(fill = "#377eb8") +
labs(x = "empresas", y = "Taxas", title = "Boxplot das empresas em relação as taxas") +
theme_minimal()
#histograma das taxas por empresas
ggplot(big) +
aes(x = Taxes, group = Level) +
geom_histogram(bins = 30L, fill = "#575c6d") +
labs(x = "Taxes", y = "Número de empresas", title = "Histograma das Taxas") +
theme_minimal() +
facet_wrap(vars(Level), scales = "free")
#gráfico de distribuição por empresa em função do spam
ggplot(big) +
aes(x = SPAM, group = Level) +
geom_bar(fill = "#575c6d") +
labs(x = "spam", y = "número de empresas", title = "Gráfico de barra ") +
theme_minimal() +
facet_wrap(vars(Level), scales = "free")
|
f730e5ec19b524b2459420969f4f1de91cceffe2 | 0500ba15e741ce1c84bfd397f0f3b43af8cb5ffb | /cran/paws.management/man/opsworks_stop_stack.Rd | aefe57969686287614fd094414ffa4e8255d3338 | [
"Apache-2.0"
] | permissive | paws-r/paws | 196d42a2b9aca0e551a51ea5e6f34daca739591b | a689da2aee079391e100060524f6b973130f4e40 | refs/heads/main | 2023-08-18T00:33:48.538539 | 2023-08-09T09:31:24 | 2023-08-09T09:31:24 | 154,419,943 | 293 | 45 | NOASSERTION | 2023-09-14T15:31:32 | 2018-10-24T01:28:47 | R | UTF-8 | R | false | true | 429 | rd | opsworks_stop_stack.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/opsworks_operations.R
\name{opsworks_stop_stack}
\alias{opsworks_stop_stack}
\title{Stops a specified stack}
\usage{
opsworks_stop_stack(StackId)
}
\arguments{
\item{StackId}{[required] The stack ID.}
}
\description{
Stops a specified stack.
See \url{https://www.paws-r-sdk.com/docs/opsworks_stop_stack/} for full documentation.
}
\keyword{internal}
|
8a7e3535ca1e08c670fefb56d545c63700e8820b | bed6e385166a316d7c1c6ab754fec0bf4c505847 | /statistics/statistics-final-words.R | 24439cfd64e9ab55b466b8416c20efff8a493bcb | [] | no_license | zhou-dong/people-to-people | 05b96b2ea9c86a16082235a19eab7b5fb624ef2b | e9376ea6ffa8bbc2ee11d7204b7e18b54396a125 | refs/heads/master | 2021-01-13T02:36:32.113749 | 2015-05-14T05:12:56 | 2015-05-14T05:12:56 | 31,597,206 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,443 | r | statistics-final-words.R | library(rmongodb)
mongo <- mongo.create()
if (!mongo.is.connected(mongo))
error("No connection to MongoDB")
db_skills <- "linkedin.final_skills"
query = mongo.bson.buffer.create()
query = mongo.bson.from.buffer(query)
fields = mongo.bson.buffer.create()
mongo.bson.buffer.append(fields, "value", TRUE)
fields = mongo.bson.from.buffer(fields)
counts <- vector("integer", length = 10000L)
limit_number = 1000L
all_words_count <- mongo.count(mongo, db_skills)
cursor_time = all_words_count / limit_number
if(all_words_count %% limit_number != 0)
cursor_time = cursor_time + 1
cursor_time <- as.integer(cursor_time)
for(i in 1:cursor_time){
skip_number = (i - 1) * limit_number
cursor = mongo.find(mongo, ns = db_skills, query = query, fields = fields, limit = limit_number, skip = as.integer(skip_number))
while (mongo.cursor.next(cursor)) {
tmp = mongo.bson.to.list(mongo.cursor.value(cursor))
length = (length(tmp))
if(length==1)
next
count = as.integer(tmp[2])
counts[count] <- counts[count] + 1
}
err <- mongo.cursor.destroy(cursor)
}
mongo.disconnect(mongo)
mongo.destroy(mongo)
counts <- counts[!is.na(counts)]
counts <- counts[counts > 0]
print(sum(counts))
count_labels <- round(counts/sum(counts) * 100, 1)
pie(counts, main="Final Keywords", col=rainbow(11), labels=count_labels)
# pie(counts, main="Final Keywords", col=rainbow(length(cars))) |
56aa99405a29fc15bd6a48f6fb08c353446b8bb9 | b3111e2321fab38b1639824e3c07951aac692453 | /tests/testthat.R | 8de3878803c1b28e535abf84f705aeabc77a3edc | [] | no_license | paullevchuk/CustSegs | f1a793a223b01f0eb2f90b69c9c4ac4470dfd339 | fca99a2d6fbeea22854da934778cef6d2f7e90dc | refs/heads/master | 2021-01-11T18:27:55.343464 | 2017-01-20T11:09:41 | 2017-01-20T11:09:41 | 79,552,252 | 0 | 0 | null | 2017-01-20T11:06:56 | 2017-01-20T11:06:56 | null | UTF-8 | R | false | false | 60 | r | testthat.R | library(testthat)
library(CustSegs)
test_check("CustSegs")
|
22a5ca8b8b85db8b236c1265a086a382337fc768 | 70c8284c90085e62df100238bdcc466d6ce7f1d6 | /R_s/VCF-1.r | a2b1949422d0ed67c82294bdc2e06f57f807c611 | [] | no_license | limeng12/My-Rscript | 74285f68ef68be8df960392428858f62122d1484 | 753778c9bf4fb88f094c5fc858a04c1d20e3b21d | refs/heads/master | 2021-05-16T02:58:18.851892 | 2017-05-22T06:34:35 | 2017-05-22T06:34:35 | 40,741,924 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 14,064 | r | VCF-1.r | #library(RJDBC)
library(plyr)
#library(data.table)
#这个文件主要是处理VCF文件的,包括各种各样的功能,是一个库文件被调用的。
#filter by 1000 genome
#main class and method
setClass("VCF",
representation(chr="character",
pos="integer",
id="character",
ref="character",
alt="character",
qual="character",
filter="character",
info="character",
format="character",
samples="data.frame"
),
prototype=prototype(chr="",
pos=integer(1),
id="",
ref="",
alt="",
qual="",
filter="",
info="",
format="",
samples=data.frame() )
)
#read the snp from VCF file
setGeneric("readFromVCFFile",function(object,filename){})
setMethod("readFromVCFFile","VCF",function(object,filename){
dataFile<-readLines(filename);
n=1;
names<-""
for(i in 1:length(dataFile)){
n=n+1;
if(grepl("^##",dataFile[i]))
next;
if(grepl("^#",dataFile[i])){
names<-strsplit(dataFile[i],split="\t")[[1]];
break;
}
}
data<-matrix(ncol=length(names),nrow=(length(dataFile)-n+1) );
colnames(data)<-names;
dataFile<-dataFile[n:length(dataFile)];
arr<-strsplit(dataFile,split="\t");
#cat(class(arr))
for(i in 1:length(arr))
data[i,]<-arr[[i]];
dataf<-as.data.frame(data,stringsAsFactors=F)
object<-buildFromDataframe(object,dataf)
return(object)
})
#setGeneric("as.data.frame",function(object){})
#setMethod("as.data.frame",signature(x="VCF",row.names="ANY",optional="ANY"),
setMethod("as.data.frame",signature(x="VCF"),
function(x,row.names=NULL,optional=NULL){
result<-cbind(x@chr,x@pos,x@id,x@ref,x@alt,
x@qual,x@filter,x@info,x@format,
x@samples
)
return(result)
})
setGeneric("buildFromDataframe",function(object,data){})
setMethod("buildFromDataframe","VCF",
function(object,data){
object@chr=data[,1];
object@pos=as.integer(data[,2]);
object@id=data[,3]
object@ref=data[,4];
object@alt=data[,5];
object@qual=data[,6];
object@filter=data[,7];
object@info=data[,8];
if(ncol(data)<9){
cat("there are no samples here.");
object@format=rep("",length(object@info));
#object@samples=data.frame(nosample= rep("",length(object@info)) , stringsAsFactors=FALSE);
return(object);
}
object@format=data[,9];
object@samples=data[,10:ncol(data)];
return(object);
})
setGeneric("getSampleNames",function(object){})
setMethod("getSampleNames","VCF",
function(object){
return(colnames(object@samples));
})
setGeneric("getDataFrame",function(object){})
setMethod("getDataFrame","VCF",
function(object){
dataf<- data.frame(
chr=object@chr,
pos=object@pos,
id=object@id,
ref=object@ref,
alt=object@alt,
qual=object@qual,
filter=object@filter,
info=object@info,
format=object@format
);
if( length(object@samples)>0 )
dataf<-cbind(dataf,object@samples);
return(dataf);
}
)
#setGeneric("[",function(object,index){})
setMethod("[", signature(x = "VCF", i = "numeric",j="missing",drop="ANY"), function(x,i,j=0,drop=FALSE){
return(vcfSubset(x,i));
})
#setGeneric("filterBy1000genome",function(object){})
#setMethod("filterBy1000genome","VCF",function(object){
# dataf<-getDataFrame(object);
#
# sqlStr<-paste(
# "select pid from mergevcf INNER JOIN ensembl.variation ON(ensembl.variation.name=mergevcf.id)",
# " where ensembl.variation.validation_status LIKE '%1000Genome%'",sep="");
#
# drv <- JDBC("com.mysql.jdbc.Driver","/home/limeng/Downloads/mysql-connector-java-5.1.31/mysql-connector-java-5.1.31-bin.jar",identifier.quote="`")
# conn<-dbConnect(drv, "jdbc:mysql://111.117.59.63/drugresistant", "limeng", "880108")
#
# pid<-1:nrow(dataf);
# dataf<-cbind(pid,dataf);
# idTable<-data.frame(
# pid<-1:nrow(dataf),
# id<-dataf[,2]
# )
#
# colnames(idTable)<-c("id","pid");
# dbWriteTable(conn, "mergevcf", idTable,overwrite=TRUE)
#
# #dbWriteTable(conn, "mergevcf", dataf, overwrite=TRUE)
# res<-dbSendQuery(conn,sqlStr);
# pid<-dbFetch(res);
# dataf<-dataf[pid,-1];
#
# exceptionStr<-dbGetException(conn);
# cat(exceptionStr);
# mergeno1000Genome <- dbReadTable(conn, "mergeindelfilter1000genome")
# return(buildFromDataframe(object,mergeno1000Genome));
#})
setGeneric("vcfSubset",function(object,subset){})
setMethod("vcfSubset","VCF",
function(object,subset){
object@chr=object@chr[subset];
object@pos=as.integer(object@pos[subset]);
object@id=object@id[subset]
object@ref=object@ref[subset];
object@alt=object@alt[subset];
object@qual=object@qual[subset];
object@filter=object@filter[subset];
object@info=object@info[subset];
object@format=object@format[subset];
if(nrow(object@samples)<1 )
return(object);
object@samples=object@samples[subset,];
return(object);
})
setGeneric("vcfGetAttribute",function(object,column){})
setMethod("vcfGetAttribute","VCF",
function(object,column){
geneNamePattern<-paste(column,"[^;]*;",sep="");
str<-regexpr(geneNamePattern,object@info)
begPos=str[1:length(str)];
endPos=attr(str,"match.length");
attrs<-c();
for(i in 1:length(str)){
attrs<-c(attrs,substr(object@info[i],begPos[i]+nchar(column)+1,begPos[i]+endPos[i]-1-1) ) ;
}
return(attrs);
})
setGeneric("vcfFilterByAttr",function(object,pattern){})
setMethod("vcfFilterByAttr","VCF",
function(object,pattern){
inPattern<-grepl(pattern,object@info);
subset<-vcfSubset(object,inPattern);
return(subset);
})
setGeneric("vcfGetAttributeFunc",function(object,x){})
setMethod("vcfGetAttributeFunc","VCF",function(object,x){
geneNamePattern<-paste(";",x,"[^;]*;",sep="");
str<-regexpr(geneNamePattern,info)#str是向量。
begPos=str[1:length(str)];
endPos=attr(str,"match.length");
attrs<-c();
for(i in 1:length(str)){
attrs<-c(attrs,substr(info[i],begPos[i]+nchar(x)+1+1,begPos[i]+endPos[i]-1-1) ) ;
}
return(attrs);
})
#the snpMatrix 0 stand for don't have a snp here, 1 stand for having a snp here.
setGeneric("getSnpMatrix",function(object){})
setMethod("getSnpMatrix","VCF",
function(object){
#snpIndels<-object@samples;
snpIndels<-apply(object@samples,c(1,2),function(x){return(as.integer(grepl("\\.",x))); } )
snpIndels<-as.data.frame(snpIndels,row.names=1:nrow(snpIndels))
#cat(class(snpIndels))
#snpIndelDis<-cbind(attrs,snpIndels);
return(snpIndels);
}
)
setGeneric("distributionInGenes",function(object,column){})
setMethod("distributionInGenes","VCF",
function(object,column){
attrs<-vcfGetAttribute(object,column);
#attrs<-as.factor(attrs);
#snpIndels<-apply(object@samples,c(1,2),function(x){return(as.integer(grepl("\\.",x))); } )
#snpIndels<-as.data.frame(snpIndels,row.names=1:nrow(snpIndels))
#cat(class(snpIndels))
snpIndelDis<-getSnpMatrix(object);
snpIndelDis<-cbind(attrs,snpIndelDis);
snpIndelDis<-aggregate(snpIndelDis[-1],by=list(snpIndelDis$attrs) ,sum)
return(snpIndelDis);
}
)
setGeneric("rmSample",function(object,samplename){})
setMethod("rmSample","VCF",function(object,samplename){
object@samples=object@samples[,colnames(object@samples)!=samplename];
return(object);
})
setGeneric("oderBySample",function(object,samplenames){})
setMethod("oderBySample","VCF",function(object,samplenames){
object@samples<-object@samples[,samplenames]
return(object);
})
setGeneric("rmIndel",function(object){})
setMethod("rmIndel","VCF",function(object){
#resut<-c()
result<-apply(cbind(object@ref,object@alt),1,
function(x){
#if((nchar(x[1])==1)&&(nchar(x[2])==1))
# return(TRUE);
result<-FALSE;
refAllels<-strsplit(x[1],",")[[1]];
altAllels<-strsplit(x[2],",")[[1]];
for(i in 1:length(refAllels)){
if(nchar(refAllels[i])==1)
result<-TRUE;
}
for(i in 1:length(altAllels)){
if(nchar(altAllels[i])>1)
result<-TRUE&&result;
}
return(result);
});
return(vcfSubset(object,result));
})
setGeneric("rmSNP",function(object){})
setMethod("rmSNP","VCF",function(object){
result<-apply(cbind(object@ref,object@alt),1,
function(x){if((nchar(x[1])==1)&&(nchar(x[2])==1)) return(FALSE);return(TRUE)});
return(vcfSubset(object,result));
})
setMethod("length","VCF",
function(x){
return(length(x@chr))
})
setGeneric("filterVCF",function(object,column){})
setMethod("filterVCF","VCF",
function(object,column){
result<-sapply(object@filter,function(x){ if(x==column) return(TRUE); return(FALSE)});
return(vcfSubset(object,result))
})
setGeneric("snpLevelUnparametric",function(object,drug){})
setMethod("snpLevelUnparametric","VCF",
function(object,drug){
snpIndelDis<-getSnpMatrix(object);
result<-apply(snpIndelDis,1,
function(snp,drug){
Group1<-c()
Group2<-c()
for(i in 1:length(snp)){
if(snp[i]==1)
Group1<-c(Group1,drug[i]);
if(snp[i]==0)
Group2<-c(Group2,drug[i]);
}
if(length(Group1)<2)
return("WRONG");
if(length(Group2)<2)
return("WRONG");
if(min(Group1)>max(Group2))
return( paste(length(Group1),length(Group2)," ",max(Group1),":",min(Group2) ,sep="") )
if(max(Group1)<min(Group2))
return( paste(length(Group1),length(Group2)," ",min(Group1),":",max(Group2) ,sep="") )
return("WRONG");
},drug );
targetSnpIndelsMatrix<-snpIndelDis[which(result!="WRONG"),]
#targetNames<-rownames(targetSnpIndelsMatrix)
#return(targetSnpIndelsMatrix)
targetSnpIndel<-vcfSubset(object,which(result!="WRONG"));
resultOfDrug<-data.frame(
chr=targetSnpIndel@chr,
pos=targetSnpIndel@pos,
ref=targetSnpIndel@ref,
alt=targetSnpIndel@alt,
RatioAndBounding=result[which(result!="WRONG")]
)
#return(result);
return(resultOfDrug);
})
setGeneric("snpGroupAnalysis",function(object,group1,group2){})
setMethod("snpGroupAnalysis","VCF",
function(object,group1,group2){
snpIndelDis<-getSnpMatrix(object);
group1SnpIndel<-snpIndelDis[,group1,drop=FALSE];
group2SnpIndel<-snpIndelDis[,group2,drop=FALSE];
minGroup1<-apply(group1SnpIndel,1,min);
maxGroup1<-apply(group1SnpIndel,1,max);
minGroup2<-apply(group2SnpIndel,1,min);
maxGroup2<-apply(group2SnpIndel,1,max);
sg1<-(minGroup1>maxGroup2)
sg2<-(minGroup2>maxGroup1)
result<-sg1|sg2;
return(vcfSubset(object,result));
})
setGeneric("geneLevelSpearman",function(object,column,drug){})
setMethod("geneLevelSpearman","VCF",
function(object,column,drug){
attrs<-vcfGetAttribute(vcf,"Gene");
snpDisInGenes<-distributionInGenes(object,column);
spearmanResult<-apply(snpDisInGenes[-1],1,function(x){return(cor(x,drug,method="spearman")) });
geneLevelSpearman<-cbind(snpDisInGenes,spearmanResult)
geneLevelSpearman<-geneLevelSpearman[order(abs(geneLevelSpearman[ncol(geneLevelSpearman)]),decreasing=TRUE),]
return(geneLevelSpearman);
}
)
|
8ca29fabe05b8ab6513f5c26bc4b080c615bbe08 | c21feb043f907de5c6048cbb71c34358082a98c3 | /models/arima/code/may_cleandata.R | c1c0f0e2f0cfbd003b1ab034bb4d545d61fc0496 | [
"MIT"
] | permissive | alan-turing-institute/CROP | 9d344ce6bcff859ae7f87252fd13b3c03067f889 | 649fdc62e61568d1ee7f9b0d931077288993c0ab | refs/heads/main | 2023-05-13T09:21:08.436419 | 2023-04-13T08:56:24 | 2023-04-13T08:56:24 | 224,865,173 | 23 | 2 | MIT | 2023-03-29T09:48:36 | 2019-11-29T14:02:49 | Python | UTF-8 | R | false | false | 6,122 | r | may_cleandata.R | # This script outputs the hourly average temperature, rel hum and energy for the latest data
# It then combines all the data frames using left_join with a time vector called my_time, to be sure the data contains all the timestamps in the last year, even if no data was collected
#
# load bespoke functions
delete.na <- function(DF, n=0) {
# By default, it will eliminate all NAs:
# n is maximum number NAs allowed
DF[rowSums(is.na(DF)) <= n,]
}
decimal_hour <- function(my_timestamp){
# my_timestamp must be posixct
# requires lubridate
hour(my_timestamp) + minute(my_timestamp) / 60 + second(my_timestamp) / 360
}
hourly_av_sensor <- function(trh, sensor_index,my_time){
# subset the data set for the sensor of choice
trh_sub<- subset(trh, name== sensor_index)
# Find the mean temp and RH for this new HourPM
trh_ph <- plyr::ddply(trh_sub, .(DatePM,HourPM),
summarise,
Temperature=mean(temperature, na.rm = T),
Humidity=mean(humidity, na.rm = T))
# create a timestamp corresponding to this hour and date
trh_ph$Timestamp <- as.POSIXct(paste(trh_ph$DatePM, trh_ph$HourPM),tz="UTC",format="%Y-%m-%d %H")
# Drop lines with NAs, representing the temperature at times between h15-h45 min.
# apply(trh_ph,2, function(x) sum(is.na(x)))
trh_ph <- delete.na(trh_ph,1)
# calculate a second hourly average where averaging over the entire hour rather than between xxh45-xxh15.
trh_ph2 <- plyr::ddply(trh_sub, .(Date,Hour),
summarise,
Temp_hourav=mean(temperature, na.rm = T),
Humid_hourav = mean(humidity, na.rm = T))
trh_ph2$Timestamp <- as.POSIXct(paste(trh_ph2$Date, trh_ph2$Hour),tz="UTC",format="%Y-%m-%d %H")
trh_ph<- left_join(trh_ph,trh_ph2,by=c("Timestamp") )
trh_ph$FarmTimestamp <- trh_ph$Timestamp
trh_ph_all <- left_join(my_time, trh_ph[c("FarmTimestamp","Timestamp","Temperature","Humidity")])
return(trh_ph_all)
}
# Clean env data ===========
# sort by sensor
# create a copy of env_raw in order to keep the raw file intact and only modify trh
trh <- env_raw
# Create two hour columns, one with truncated hour, one with decimal hour
trh$Hour <- hour(trh$Timestamp2)
trh$HourDec <- decimal_hour(trh$Timestamp2)
# Create new hour column which encompasses 15 min before and after the hour (PM stands for plus minus)
trh$HourPM <- ifelse(abs(trh$Hour-trh$HourDec)<=0.25 ,trh$Hour,NA)
trh$HourPM <- ifelse(abs(trh$Hour+1-trh$HourDec)<=0.25,trh$Hour+1,trh$HourPM)
# add special case for midnight!
trh$HourPM <- ifelse(abs(24-trh$HourDec)<=0.25,0,trh$HourPM)
# Create a date column which corresponds to the rounded hour
trh$Date <- as.Date(trh$Timestamp2)
trh$DatePM <- trh$Date
trh$DatePM <- as.Date(ifelse(trh$HourPM>=23.5, trh$Date + 1 ,trh$Date))
trh$Timestamp <- as.POSIXct(paste(trh$Date, trh$Hour),tz="UTC",format="%Y-%m-%d %H")
# select the time duration over which you want to find new hourly averages
my_time <- data.frame(FarmTimestamp = seq(from= min(trh$Timestamp)+3600,
to= max(trh$Timestamp),by ="1 hour"))
# function to find hourly average temperature by sensor
#
# Identify each unique sensor
sensor_names <- unique(trh$name)
# Select sensors: "FARM_16B1" "FARM_16B4" "Farm_16B2"
#select_sensors <- sensor_names[c(1,6,9)]
select_sensors <- sensor_names[c(1)]
sensor_names_2<- gsub("_T/RH", "", select_sensors)
#sensor_names_2<- select_sensors
# create list where all sensor data is saved
all_sensors<- vector(mode = "list", length = length(select_sensors))
names(all_sensors) <- sensor_names_2
#hourly_av_sensor(trh ,"FARM_T/RH_16B1",my_time)
# load hourly average temperature and RH in the list
for (xx in 1:length(select_sensors)){
print(sensor_names_2[xx])
all_sensors[[xx]] <-hourly_av_sensor(trh ,select_sensors[xx],my_time)
}
# transform the list of data frames into a data frame with ldply
all_sensors_df <- ldply(all_sensors)
#all_sensors_df <- dcast(all_sensors_df, FarmTimestamp+Timestamp ~ .id, value.var = c("Temperature","Humidity"))
#
# reframe the dataframe so that each column is either temperature or humidity of each sensor
all_sensors_df <- melt(all_sensors_df, measure.vars = c("Temperature","Humidity"))
all_sensors_df2 <- dcast(all_sensors_df, FarmTimestamp ~ variable + .id, value.var = c("value"))
saveRDS(all_sensors_df2,"trh_API.RDS")
## Clean energy data -----------------------
ecp <- energy_raw
ecpp <- dcast(energy_raw, Timestamp2 ~sensor_id,value.var = "electricity_consumption")
names(ecpp) <- c("Timestamp2","EnergyCC","EnergyCP")
ecpp$Hour <- hour(ecpp$Timestamp2)
ecpp$HourDec <- decimal_hour(ecpp$Timestamp2)
# Add column with moving average so that the hourly energy used is the centered average around the hour (because data is half hourly)
ecpp$ECC_ma <- ma(ecpp$EnergyCC,order=2)
ecpp$ECP_ma <- ma(ecpp$EnergyCP,order=2)
names(ecpp)
ecpp$Date <- as.Date(ecpp$Timestamp2)
ecp_ph <- ddply(ecpp, .(Date,Hour),
summarise,
EnergyCP=mean(EnergyCP, na.rm = T),
EnergyCP_ma=first(ECP_ma),
EnergyCC=mean(EnergyCC, na.rm = T),
EnergyCC_ma=first(ECC_ma))
ecp_ph$Timestamp <- as.POSIXct(paste(ecp_ph$Date, ecp_ph$Hour),tz="UTC",format="%Y-%m-%d %H")
# Assume ECP is in UTC, but time needs to be shifted back an hour
ecp_ph$FarmTimestamp <- as.POSIXct(ecp_ph$Timestamp)
saveRDS(ecp_ph,"ecp_API.RDS")
## Combine data frames ---------
# check how many rows in data frames to ensure using join function the right direction
# nrow(my_time)
# nrow(my_time) -nrow(all_sensors_df2)
# nrow(my_time) -nrow(ecp_ph)
# Join with my_time, which contains every hour in the year (in case of missing data)
t_e1 <- left_join(my_time, all_sensors_df2)
# Join with my_time, which contains every hour in the year (in case of missing data)
t_e2 <- left_join(my_time, ecp_ph)
# Now can join temperature and energy together
t_ee <- left_join(t_e1, t_e2)
## save the cleaned data frame for future use --------------
setwd(daughterfolder)
saveRDS(t_ee,"t_ee_may_208.RDS")
|
d41c8d1b2be62962a26013c5b1a1a090fe386454 | e084949e627ed5b297dbfd46a0c8f897b8e2f63e | /server.R | fcb0036acbcd13f03c9e4aed70a6591fe9016b9d | [] | no_license | fcherance/GoF | 492c6fa984b5ae8c0dbe23d30e104b56378b7193 | a95109dc2c2418486ba157460c9ee98a3f5cba27 | refs/heads/master | 2021-06-01T10:10:26.169235 | 2016-05-19T20:46:02 | 2016-05-19T20:46:02 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,580 | r | server.R | # Server Script -----------------------------------------------------------
# Title: ROC curve sensitivity to underlying distribution of classes
# Author: Fabien CHERANCE
# Date: 19 May 2016
# Mantainer: Fabien CHERANCE <fabien.cherance@gmail.com>
# Description: Setting a variaty of spreads between a Low Propensity Class
# and a High Propensity Class, and a variaty of imbalances
# of the original classes.
# We measure the values of AUC as shortcut for the GINI
# as sensitivities of the two parameters.
# Predictions are the "real" propensities, the ones used to generate
# the observations.
#Load the libraris
list.of.packages <- c("shiny", "pROC","ROCR","ggplot2","datasets")
#new.packages <- list.of.packages[!(list.of.packages %in% installed.packages()[,"Package"])]
#if(length(new.packages)) install.packages(new.packages)
lapply(list.of.packages, require, character.only = TRUE)
# Define server logic required to plot various variables against
shinyServer(function(input, output) {
#Reactivity to Spread / parameterizing the Low Propensity level and the balance of classes
formulaText <- reactive({
AUC=matrix(unlist(AUC_VEC(SENSI_SPREAD,N*input$balance*0.01,5000,input$PBL*0.01)),100,1)
GRAPH1=data.frame(xx=1:100,yy=AUC*100)
})
# Generate a plot of the requested
output$Curve <- renderPlot({
GRAPH1=formulaText()
plot(GRAPH1$xx,GRAPH1$yy, type='l', xlab='Spread', ylab='AUC')
})
})
|
198334b25eb64e477a8922f1297b7fe82d5b4d98 | 2bb617584eae4b44305330750da8c37c15be69e0 | /run_analysis.R | 10b704c06b26196eeab619ad797d7e86ff380ed2 | [] | no_license | jsbarretor/Getting-and-Cleaning-Data-Course-Project | 697c1d0e63b09d32e1ecaf58a59667b9427810fd | 6501f40fe30f3857f7bd9a929e54a04df4af18ba | refs/heads/master | 2022-12-04T16:20:32.197264 | 2020-08-10T22:34:54 | 2020-08-10T22:34:54 | 286,590,633 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,316 | r | run_analysis.R | # Load libraries
library(dplyr)
# Set WD 1
setwd("D:/Coursera/Getting and Cleaning Data/Proyecto")
# URL data for the project:
url.data <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
# Download the data
download.file(url.data, destfile = "getdata_projectfiles_UCI HAR Dataset.zip")
# Unzip
unzip("getdata_projectfiles_UCI HAR Dataset.zip")
# Read files
features <- read.table("D:/Coursera/Getting and Cleaning Data/Proyecto/UCI HAR Dataset/features.txt", col.names = c("n","functions"))
activity.labels <- read.table("D:/Coursera/Getting and Cleaning Data/Proyecto/UCI HAR Dataset/activity_labels.txt", col.names = c("code", "activity"))
x.test <- read.table("D:/Coursera/Getting and Cleaning Data/Proyecto/UCI HAR Dataset/test/X_test.txt", col.names = features$functions)
y.test <- read.table("D:/Coursera/Getting and Cleaning Data/Proyecto/UCI HAR Dataset/test/y_test.txt", col.names = "code")
subject.test <- read.table("D:/Coursera/Getting and Cleaning Data/Proyecto/UCI HAR Dataset/test/subject_test.txt", col.names = "subject")
x.train <- read.table("D:/Coursera/Getting and Cleaning Data/Proyecto/UCI HAR Dataset/train/X_train.txt", col.names = features$functions)
y.train <- read.table("D:/Coursera/Getting and Cleaning Data/Proyecto/UCI HAR Dataset/train/y_train.txt", col.names = "code")
subject_train <- read.table("D:/Coursera/Getting and Cleaning Data/Proyecto/UCI HAR Dataset/train/subject_train.txt", col.names = "subject")
# 1. Merges the training and the test sets to create one data set
x.data <- rbind(x.train, x.test)
y.data <- rbind(y.train, y.test)
subjects <- rbind(subject_train, subject.test)
all.data <- cbind(x.data, y.data, subjects)
# 2. Extracts only the measurements on the mean and standard deviation for each measurement
tidy.data <- all.data %>% select(subject, code, contains("mean"), contains("std"))
# 3. Uses descriptive activity names to name the activities in the data set
tidy.data$code <- activity.labels[tidy.data$code, 2]
# 4. Appropriately labels the data set with descriptive variable names.
names(tidy.data)[2] = "activity"
names(tidy.data)<-gsub("Acc", "Accelerometer", names(tidy.data))
names(tidy.data)<-gsub("Gyro", "Gyroscope", names(tidy.data))
names(tidy.data)<-gsub("BodyBody", "Body", names(tidy.data))
names(tidy.data)<-gsub("Mag", "Magnitude", names(tidy.data))
names(tidy.data)<-gsub("^t", "Time", names(tidy.data))
names(tidy.data)<-gsub("^f", "Frequency", names(tidy.data))
names(tidy.data)<-gsub("tBody", "TimeBody", names(tidy.data))
names(tidy.data)<-gsub("-mean()", "Mean", names(tidy.data), ignore.case = T)
names(tidy.data)<-gsub("-std()", "STD", names(tidy.data), ignore.case = T)
names(tidy.data)<-gsub("-freq()", "Frequency", names(tidy.data), ignore.case = T)
names(tidy.data)<-gsub("angle", "Angle", names(tidy.data))
names(tidy.data)<-gsub("gravity", "Gravity", names(tidy.data))
# 5. From the data set in step 4, creates a second, independent tidy data set with
# the average of each variable for each activity and each subject.
final.tidy.data <- tidy.data %>%
group_by(subject, activity) %>%
summarise_all(funs(mean))
write.table(final.tidy.data, "Final_Tidy_Data.txt", row.name=F) |
3c033c8c0307a457211f47e1197828528db9092c | 7cd6da22844e9a9f64d9d69ff6e6194a214d67c7 | /R/scraping/Rcrawler.R | 89a362df9ed708296f9fe6882bc7ee3684e8f19d | [] | no_license | sillasgonzaga/lsdb | 2be0ac54a0efab743063d24e410acdb8c5c26fbc | d4c53141821cfd18d1a4cfa14b59f5fd58959790 | refs/heads/master | 2021-07-07T11:37:16.273334 | 2017-10-05T02:42:55 | 2017-10-05T02:42:55 | 105,339,906 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 206 | r | Rcrawler.R | library(Rcrawler)
# https://lsdb.eu/set/194650/sean-tyas-degenerate-radio-episode-049-15-12-15
Rcrawler("https://lsdb.eu/", no_cores = 4, no_conn = 4, DIR = "crawler",
urlregexfilter = "*/set/*") |
8987ecb3388ef188ebcfbed7712a5bc27d9f6520 | 22ff9556893a8684058c2eef978b049601fd15f3 | /functions/lenique.R | 033a3cd39caef416751d5ff58533f2bcd361f7b8 | [
"MIT"
] | permissive | camkay/edld610_fpr_finalproject | aed8f273396e88ed8ab62292cfca281a8f04d503 | 8dff16842ae84be2d76de4d92137045e7ccecbad | refs/heads/master | 2020-05-07T16:10:04.939463 | 2019-06-13T00:35:27 | 2019-06-13T00:35:27 | 180,670,926 | 0 | 5 | MIT | 2019-06-03T17:01:22 | 2019-04-10T22:07:55 | TeX | UTF-8 | R | false | false | 358 | r | lenique.R | #################################################
#### Custom function 8 (at least 2 required) ####
#################################################
# Streamlines `length(unique(x))` into on function. Relies on the warnings
# produced by `length()` and `unique()`.
lenique <- function(x) {
# calculate the length of unique values
length(unique(x))
}
|
85ad6155643a6bd5b7c7286907398c21dda5b50b | 9d680e799f36291ef0406729e61315b8b3d9d0a1 | /man/get-methods.Rd | b9e6ef7e208ef8b014fcc38850a55ebbbfb9389b | [] | no_license | cran/chemosensors | ffe070d193178a9274c6273fbdea6e256d028550 | b8bf614e42a6b0bea7c4eb5eec14c06f679d17b1 | refs/heads/master | 2021-01-01T16:59:55.106040 | 2014-08-31T00:00:00 | 2014-08-31T00:00:00 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,983 | rd | get-methods.Rd | \name{get-methods}
\alias{alpha}
\alias{alpha,SorptionModel-method}
\alias{beta}
\alias{beta,SensorModel-method}
\alias{coef}
\alias{coef,ANY-method}
\alias{coefficients}
\alias{coefficients,SensorArray-method}
\alias{coefficients,SensorDynamics-method}
\alias{coefficients,SensorModel-method}
\alias{coefnames}
\alias{coefnames,SensorNoiseModel-method}
\alias{concUnits}
\alias{concUnits,ANY-method}
\alias{concUnitsInt}
\alias{concUnitsInt,ANY-method}
\alias{enableDyn}
\alias{enableDyn,SensorDynamics-method}
\alias{enableSorption}
\alias{enableSorption,SensorArray-method}
\alias{gases}
\alias{gases,ANY-method}
\alias{gases,missing-method}
\alias{get-methods}
\alias{gind}
\alias{gind,ANY-method}
\alias{gind,missing-method}
\alias{gnames}
\alias{gnames,ANY-method}
\alias{gnames,missing-method}
\alias{idx}
\alias{idx,ANY-method}
\alias{modelName}
\alias{modelName,DriftNoiseModel-method}
\alias{modelName,SensorModel-method}
\alias{ncoef}
\alias{ncoef,SensorDynamics-method}
\alias{ncoef,SensorModel-method}
\alias{ncoef,SensorNoiseModel-method}
\alias{ngases}
\alias{ngases,ANY-method}
\alias{ngases,missing-method}
\alias{nsensors}
\alias{nsensors,ANY-method}
\alias{nsensors,SorptionModel-method}
\alias{num}
\alias{num,ANY-method}
\alias{snames}
\alias{snames,ANY-method}
\alias{tunit}
\alias{tunit,Scenario-method}
\alias{tunit,SensorDynamics-method}
\alias{type}
\alias{type,ConcNoiseModel-method}
\alias{type,DriftNoiseModel-method}
\alias{type,SensorNoiseModel-method}
\title{Get Methods in R package chemosensors.}
\description{
Get Methods in R package chemosensors.
Method alpha.
Method beta.
Method modelName.
Method tunit.
Method enableSorption.
Method enableDyn.
Method num.
Method idx.
Method gases.
Method gind.
Method ngases.
Method gnames.
Method coefficients.
Method coef.
Method nsensors.
Method snames.
Method concUnits.
Method concUnitsInt.
Method type.
Method coefnames.
Method ncoef.
}
|
c1d872f27cafcba7c026fd5a33e88d68f0e9d3f0 | c0fd09facad89d8095ebd50c1c6b1cc9b3b0afad | /src/recommendation_final.R | 1502651db229152c1e41e272fa37f4267faefa3d | [] | no_license | gabby-camara/WhiteSpaces | 9db8cb58baf698493111ebe0de030799b33231e6 | f52b3b454532d4bdc8e3d78b1549dcffe510327e | refs/heads/master | 2020-04-01T17:50:07.350645 | 2018-11-21T09:14:34 | 2018-11-21T09:14:34 | 153,453,644 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 15,969 | r | recommendation_final.R | # Final recommendation algorithms
# --------------------
# data
# --------------------
rdf_final = sector_product %>%
select(Client.Sector, Segment, Legal.Entity.Subsidiary, Product, log.Actual) %>%
group_by(Client.Sector, Segment, Legal.Entity.Subsidiary, Product) %>%
mutate(log.Actual = sum(log.Actual)) %>%
distinct()
rdf_final = spread(rdf_final, Product, log.Actual)
# unsacle pred ratings over entire dataset
# --------------------
unscale_pred = function(rdf, opp){
unscaled = rdf[, -c(1:3)]
products = colnames(rdf[, -c(1:3)])
# perhaps make 0 == NA?
for(i in 1:length(products)){
product = products[i]
# unscaled[i] = numeric()
# i = 1
min = min(rdf[i+3], na.rm = TRUE)
max = max(rdf[i+3], na.rm = TRUE)
unscaled[i] = sapply(opp[i], function(x) round((10^((x/10)*(max-min)+min)), 2))
}
return(unscaled)
}
# correct format of df
round_df = function(x, digits) {
# round all numeric variables
# x: data frame
# digits: number of digits to round
numeric_columns <- sapply(x, mode) == 'numeric'
x[numeric_columns] <- round(x[numeric_columns], digits)
x
}
# predictions method:
IBCF_Z_E = Recommender(getData(e, "train"), "IBCF",
param=list(normalize = "Z-score",method="Euclidean"))
# parameters
# --------------------
# r_method = IBCF_N_E # pull in best performing algorithm
# distance = 'Euclidian'
# normalise = NULL
# sector = 'Agriculture'
# data = rdf_final
# method = IBCF_Z_E
# function
# --------------------
define_opportunities = function(method, sector, data){
# get correct data per sector; rescale log values between 0-10
data_sector = data %>% filter(Client.Sector == sector) # unscaled df (log of Actual values)
data_sector[ , -c(1:3)] = data.frame(lapply(data_sector[ , -c(1:3)], function(x) scales::rescale(x, to = c(0, 10)))) # scale between 0-10
# log values of actual revenue stored
rdf_final = data %>% filter(Client.Sector == sector) # unscaled values of actual product holding
#data_sector$Client.Sector = NULL
#data_sector$Segment = NULL
#rownames(data_sector) = data_sector$Legal.Entity.Subsidiary # suggest storing in vector
# convert to correct rmat
rmat = as.matrix(data_sector[ , -c(1,2,3)])
rmat = as(rmat,"realRatingMatrix")
# predict ratings
# pass parameters instead
method = Recommender(rmat, "IBCF",
param=list(normalize = "Z-score", method="Euclidean"))
opportunities = predict(method,
rmat,
type = 'ratings')
# define boundaries
opportunities@data@x[opportunities@data@x[] < 0] = 0
opportunities@data@x[opportunities@data@x[] > 10] = 10
opportunities = as.data.frame(as.matrix(opportunities@data)) # predicted ratings (scaled between 0-10)
# unscaled = unscale_pred(data, opportunities)
unscaled = unscale_pred(rdf_final, opportunities) # unscale opportunities from ratings (0-10) to log of Actual Predicted revenue
# unscaled = round_df(unscaled, 2)
# full product & predicted value
products_fill = unscaled
total_cols = length(unscaled) # number of products
total_rows = nrow(unscaled) # number of entities in sector
true_false = as.data.frame(is.na(rdf_final[, -c(1:3)]))
for(i in 1:total_rows){
for(j in 1:total_cols){
if (true_false[i, j] == FALSE) {
products_fill[i,j] = (10^rdf_final[i, j + 3]) - a_log # unlog actual values
} else {
products_fill[i, j] = unscaled[i,j]
}
}
}
colnames(products_fill) = colnames(opportunities)
products_fill[is.na(products_fill)] = 'Not Recommended'
# keep only product with a rating > 0
opportunities = opportunities[ , colSums(opportunities) >0]
products_fill = products_fill[colSums(!is.na(products_fill)) > 0] #to determine if necessary
products_fill$Legal.Entity.Subsidiary = rdf_final$Legal.Entity.Subsidiary
products_fill$Segment = rdf_final$Segment
products_fill$Client.Sector = rdf_final$Client.Sector
products_fill = products_fill %>% select(Client.Sector, Segment, Legal.Entity.Subsidiary, everything())
products_fill = as.data.frame(products_fill)
# colnames(opportunities) = colnames(data[, -c(1:3)])
# correct format
opportunities = round_df(opportunities, 0)
opportunities$Legal.Entity.Subsidiary = data_sector$Legal.Entity.Subsidiary
opportunities$Segment = data_sector$Segment
opportunities$Client.Sector = data_sector$Client.Sector
opportunities = opportunities %>% select(Client.Sector, Segment, Legal.Entity.Subsidiary, everything()) %>% mutate_if(is.numeric, as.character)
products = colnames(opportunities)
id = match(products, names(data_sector))
whitespaces = data_sector[ , id]
# whitespaces = data_sector
total_cols = length(whitespaces)
total_rows = length(whitespaces$Legal.Entity.Subsidiary)
true_false = as.data.frame(is.na(whitespaces))
for(i in 1:total_rows){
for(j in 4:total_cols){
if(true_false[i, j] == FALSE){
opportunities[i,j] = 'GOT IT'
}
}
}
return(list(opportunities, products_fill))
# return(opportunities)
}
# create list of all sesctors
# -------------------
sectors = unique(sector_product$Client.Sector)
# apply function(opportunities) to all sectors
# produces df per sector listing 2 outputs: recommendations[1] = rating opportunities; recommendations[2] = predicted & actual revenue values
# ------------------
for(i in 1:length(sectors)){
assign(make.names(paste('recommendations.', sectors[i], sep = "")), define_opportunities(data = rdf_final, sector = sectors[i]))
}
# only opp dataframes
# for(i in 1:length(sectors)){
# assign(make.names(paste('opportunities.', sectors[i], sep = "")), define_opportunities(data = rdf_final, sector = sectors[i]))
# }
# merage all dataframes in order to derive customer view
# -------------------
# pull rated opportunities per sector
opportunities.Agriculture = as.data.frame(recommendations.Agriculture[1])
opportunities.Construction = as.data.frame(recommendations.Construction[1])
opportunities.Diversified = as.data.frame(recommendations.Diversified[1])
opportunities.Financial.Institutions = as.data.frame(recommendations.Financial.Institutions[1])
opportunities.Healthcare = as.data.frame(recommendations.Healthcare[1])
opportunities.Hotel...Leisure = as.data.frame(recommendations.Hotel...Leisure[1])
opportunities.Industrials = as.data.frame(recommendations.Industrials[1])
opportunities.Manufacturing = as.data.frame(recommendations.Manufacturing[1])
opportunities.Mining...Metals = as.data.frame(recommendations.Mining...Metals[1])
opportunities.Natural.Resources = as.data.frame(recommendations.Natural.Resources[1])
opportunities.Non.Banking.Financial.Institutions = as.data.frame(recommendations.Non.Banking.Financial.Institutions[1])
opportunities.Oil...Gas = as.data.frame(recommendations.Oil...Gas[1])
opportunities.Other = as.data.frame(recommendations.Other[1])
opportunities.Professional.Services = as.data.frame(recommendations.Professional.Services[1])
opportunities.Public.Sector = as.data.frame(recommendations.Public.Sector[1])
opportunities.PUI = as.data.frame(recommendations.PUI[1])
opportunities.Real.Estate = as.data.frame(recommendations.Real.Estate[1])
opportunities.Retail = as.data.frame(recommendations.Retail[1])
opportunities.TMT = as.data.frame(recommendations.TMT[1])
opportunities.Unknown = as.data.frame(recommendations.Unknown[1])
# pull predicted & actual revenue values per sector
revenue_r.Agriculture = as.data.frame(recommendations.Agriculture[2])
revenue_r.Construction = as.data.frame(recommendations.Construction[2])
revenue_r.Diversified = as.data.frame(recommendations.Diversified[2])
revenue_r.Financial.Institutions = as.data.frame(recommendations.Financial.Institutions[2])
revenue_r.Healthcare = as.data.frame(recommendations.Healthcare[2])
revenue_r.Hotel...Leisure = as.data.frame(recommendations.Hotel...Leisure[2])
revenue_r.Industrials = as.data.frame(recommendations.Industrials[2])
revenue_r.Manufacturing = as.data.frame(recommendations.Manufacturing[2])
revenue_r.Mining...Metals = as.data.frame(recommendations.Mining...Metals[2])
revenue_r.Natural.Resources = as.data.frame(recommendations.Natural.Resources[2])
revenue_r.Non.Banking.Financial.Institutions = as.data.frame(recommendations.Non.Banking.Financial.Institutions[2])
revenue_r.Oil...Gas = as.data.frame(recommendations.Oil...Gas[2])
revenue_r.Other = as.data.frame(recommendations.Other[2])
revenue_r.Professional.Services = as.data.frame(recommendations.Professional.Services[2])
revenue_r.Public.Sector = as.data.frame(recommendations.Public.Sector[2])
revenue_r.PUI = as.data.frame(recommendations.PUI[2])
revenue_r.Real.Estate = as.data.frame(recommendations.Real.Estate[2])
revenue_r.Retail = as.data.frame(recommendations.Retail[2])
revenue_r.TMT = as.data.frame(recommendations.TMT[2])
revenue_r.Unknown = as.data.frame(recommendations.Unknown[2])
# bind opportunities to one dataframe
client_opp_full = bind_rows(opportunities.Agriculture,
opportunities.Construction,
opportunities.Diversified,
opportunities.Financial.Institutions,
opportunities.Healthcare,
opportunities.Hotel...Leisure,
opportunities.Industrials,
opportunities.Manufacturing,
opportunities.Mining...Metals,
opportunities.Natural.Resources,
opportunities.Non.Banking.Financial.Institutions,
opportunities.Oil...Gas,
opportunities.Other,
opportunities.Professional.Services,
opportunities.Public.Sector,
opportunities.PUI,
opportunities.Real.Estate,
opportunities.Retail,
opportunities.TMT,
opportunities.Unknown)
# bind revenue vlaues to one dataframe (used by shiny dashboard)
products_fill=rbind(revenue_r.Agriculture,
revenue_r.Construction,
revenue_r.Diversified,
revenue_r.Financial.Institutions,
revenue_r.Healthcare,
revenue_r.Hotel...Leisure,
revenue_r.Industrials,
revenue_r.Manufacturing,
revenue_r.Mining...Metals,
revenue_r.Natural.Resources,
revenue_r.Non.Banking.Financial.Institutions,
revenue_r.Oil...Gas,
revenue_r.Other,
revenue_r.Professional.Services,
revenue_r.Public.Sector,
revenue_r.PUI,
revenue_r.Real.Estate,
revenue_r.Retail,
revenue_r.TMT,
revenue_r.Unknown)
# gather df
#client_opp_full = opportunities.Agriculture
# convert to correct product naming standards
names(client_opp_full) = gsub(x = names(client_opp_full),
pattern = "\\.",
replacement = " ")
names(client_opp_full) = gsub(x = names(client_opp_full),
pattern = " ",
replacement = " - ")
names(client_opp_full)[names(client_opp_full) == 'OVERDRAFTS CHEQUE '] = 'OVERDRAFTS (CHEQUE)'
names(client_opp_full)[names(client_opp_full) == 'LOAN PORTFOLIO CORPORATE '] = 'LOAN PORTFOLIO (CORPORATE)'
names(client_opp_full)[names(client_opp_full) == 'Client Sector'] = 'Client.Sector'
names(client_opp_full)[names(client_opp_full) == 'Legal Entity Subsidiary'] = 'Legal.Entity.Subsidiary'
# overdrafts (cheque); loan portfolio (corporate)
client_opp_full[is.na(client_opp_full)] = 'Not Recommended'
#client_opp_full$Legal.Entity.Subsidiary = as.character(client_opp_full$Legal.Entity.Subsidiary)
#client_opp_full$Product = as.character(client_opp_full$Product)
client_opp_full = client_opp_full[, -c(1:2)] %>% gather(Product, pred.Rating, -Legal.Entity.Subsidiary) %>% arrange(desc(pred.Rating))
# gather revenue & predicted revenue values
colnames(products_fill) = colnames(rdf_final)
products_fill = products_fill[, -c(1:2)] %>% gather(Product, pred.Rating, -Legal.Entity.Subsidiary) %>% arrange(desc(pred.Rating))
# convert to correct data types
#client_opp_full$Legal.Entity.Subsidiary = as.character(client_opp_full$Legal.Entity.Subsidiary)
#client_opp_full$Product = as.character(client_opp_full$Product)
#products_fill$Legal.Entity.Subsidiary = as.character(products_fill$Legal.Entity.Subsidiary)
#products_fill$Product = as.character(products_fill$Product)
# pull correct product.selection (matched CVP : ensure matched; else dropped in Shiny)
client_opp_full$Product.Selection = product_metadata$Product.Selection[match(client_opp_full$Product,
product_metadata$Product)]
# add revenue values
# client_opp_full$Revenue = NULL
# colnames= match
client_opp_full$Revenue = products_fill$pred.Rating[match(interaction(client_opp_full$Legal.Entity.Subsidiary, client_opp_full$Product),
interaction(products_fill$Legal.Entity.Subsidiary, products_fill$Product))]
# tidying data up
client_opp_full$Revenue = as.numeric(client_opp_full$Revenue)
client_opp_full$Revenue = round(client_opp_full$Revenue, 2) # round values; 2 decimals
client_opp_full$Revenue[is.na(client_opp_full$Revenue)] = 'Not Recommended'
# format view
client_opp_full = client_opp_full %>% select(Legal.Entity.Subsidiary, Product.Selection, Product, pred.Rating, Revenue)
# client_opp_full = client_opp_full %>% select(Legal.Entity.Subsidiary, Product.Selection, Product, pred.Rating)
# get metadata pulled correctly
# -------------------
metadata = sector_product %>%
select(Parent, Legal.Entity.Subsidiary, Banker, Client.Sector) %>%
unique()
# clean up
rm(revenue_r.Agriculture,
revenue_r.Construction,
revenue_r.Diversified,
revenue_r.Financial.Institutions,
revenue_r.Healthcare,
revenue_r.Hotel...Leisure,
revenue_r.Industrials,
revenue_r.Manufacturing,
revenue_r.Mining...Metals,
revenue_r.Natural.Resources,
revenue_r.Non.Banking.Financial.Institutions,
revenue_r.Oil...Gas,
revenue_r.Other,
revenue_r.Professional.Services,
revenue_r.Public.Sector,
revenue_r.PUI,
revenue_r.Real.Estate,
revenue_r.Retail,
revenue_r.TMT,
revenue_r.Unknown)
rm(recommendations.Agriculture,
recommendations.Construction,
recommendations.Diversified,
recommendations.Financial.Institutions,
recommendations.Healthcare,
recommendations.Hotel...Leisure,
recommendations.Industrials,
recommendations.Manufacturing,
recommendations.Mining...Metals,
recommendations.Natural.Resources,
recommendations.Non.Banking.Financial.Institutions,
recommendations.Oil...Gas,
recommendations.Other,
recommendations.Professional.Services,
recommendations.Public.Sector,
recommendations.PUI,
recommendations.Real.Estate,
recommendations.Retail,
recommendations.TMT,
recommendations.Unknown)
|
c9409ece10e6b40765e331b1644e249a337a59f7 | ee3262b507fab7a58db15d375d9ff05513bce10d | /server.R | 7826e81bdf297ff1edb42460ad8a8058fc5e7668 | [] | no_license | franmarq/Twittersentimentanalysis | eb15799433feb09b6a5a0ccb5fd4e91ffa6bd6a8 | 3c1f0ffad49bfc7d17c9420577e6e847433da978 | refs/heads/master | 2021-04-12T11:23:54.267128 | 2018-03-22T19:13:27 | 2018-03-22T19:28:55 | 126,383,129 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,259 | r | server.R | #
# This is the server logic of a Shiny web application. You can run the
# application by clicking 'Run App' above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
library(tm)
library(wordcloud)
library(twitteR)
# Define server logic required to draw a histogram
shinyServer(function(input, output, session) {
setup_twitter_oauth(consumer_key = "94PqLCmFU8kdgyZBkTRtvOXgv", consumer_secret = "a9fclQgZYMv2jgQl1JSkUW0P3w2Ya1K462CKjqYtT6dvFj9z9y",access_token= "17401995-4ITOMZvM8BcqKGj5tKswiARrjCO0ioe8wYj972IAs",access_secret="O4sJHrKckN5JyW1VrkICMtoLJpHxz1TT7ll6Upk9Xvh6i")
output$currentTime <- renderText({invalidateLater(1000, session) #Here I will show the current time
paste("Current time is: ",Sys.time())})
observe({
invalidateLater(60000,session)
count_positive = 0
count_negative = 0
count_neutral = 0
positive_text <- vector()
negative_text <- vector()
neutral_text <- vector()
vector_users <- vector()
vector_sentiments <- vector()
tweets_result = ""
t1 <- c("bueno", "gracias", "bien","excelente","puntual","honesto","confiable","rapido","seguro","buen servicio","puntualidad",
"amable","calidad","buena atencion", "disponible", "fiesta","prestame","efectivo","puedes","disfruta")
t2 <- c("malo", "pesimo", "mal","terrible","inpuntual","deshonesto","terrible","lento","inseguro","largas","colas","corrupto","dictadura",
"regimen","hambre", "escazes","corrupcion","abuso","presos","protestas","robaron","protestar","justicia",
"comer basura","renuncia","saqueo","dictador","renuncia","sanciones","tirano","hiperinflacion")
tweets_result = searchTwitter(input$search1,n=100,resultType="popular",lang = "es")
for (tweet in tweets_result){
print(paste(tweet$screenName, ":", tweet$text))
vector_users <- c(vector_users, as.character(tweet$screenName));
if (grepl(paste(t1, collapse = "|"), tweet$text, ignore.case = FALSE, perl = TRUE) == TRUE ){
count_positive = count_positive + 1
print("positivo")
vector_sentiments <- c(vector_sentiments, "Positive")
positive_text <- c(positive_text, as.character(tweet$text))
} else if (grepl(paste(t2, collapse = "|"), tweet$text, ignore.case = FALSE,perl = TRUE)) {
count_negative = count_negative + 1
print("negativo")
vector_sentiments <- c(vector_sentiments, "Negative")
negative_text <- c(negative_text, as.character(tweet$text))
} else {
count_neutral = count_neutral + 1
print("neutral")
vector_sentiments <- c(vector_sentiments, "Neutral")
neutral_text <- c(neutral_text, as.character(tweet$text))
}
}
df_users_sentiment <- data.frame(vector_users, vector_sentiments)
output$tweets_table = renderDataTable({
df_users_sentiment
})
output$distPlot <- renderPlot({
results = data.frame(tweets = c("Positive", "Negative", "Neutral"), numbers = c(count_positive,count_negative,count_neutral))
barplot(results$numbers, names = results$tweets, xlab = "Sentiment", ylab = "Counts", col = c("Green","Red","Blue"))
if (length(positive_text) > 0){
output$positive_wordcloud <- renderPlot({ wordcloud(paste(positive_text, collapse=" "), min.freq = 0, random.color=TRUE, max.words=100 ,colors=brewer.pal(8, "Dark2"),fixed.asp=TRUE) })
}
if (length(negative_text) > 0) {
output$negative_wordcloud <- renderPlot({ wordcloud(paste(negative_text, collapse=" "), random.color=TRUE, min.freq = 0, max.words=100 ,colors=brewer.pal(8,"Set3"),fixed.asp=TRUE) })
}
if (length(neutral_text) > 0){
output$neutral_wordcloud <- renderPlot({ wordcloud(paste(neutral_text, collapse=" "), min.freq = 0, random.color=TRUE , max.words=50 ,colors=brewer.pal(8, "Dark2"),fixed.asp=TRUE) })
}
})
})
}) |
a8d767b123b27712243cb4b734835440b20c4ec9 | dce955667845c701e937d7c4714e1c2bd5e17c10 | /R/fctdata_generator.R | 8d9e35d64286243480e8ef02a1fdea34c281220e | [] | no_license | Qrtsaad/CHANGEPOINT | 3b7047e21c4405bce5c51b126a857a1d0cc661da | d2a0e4e44dea6364bd7743f17ca18c3bd04040b9 | refs/heads/main | 2023-07-17T01:20:28.239312 | 2021-08-28T10:07:07 | 2021-08-28T10:07:07 | 359,913,093 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 877 | r | fctdata_generator.R | #' Make changepoints
#'
#' @param n an integer
#'
#' @return a vector of changepoints for data_generator function
#' @export
#'
#' @examples
make_chpts <- function(n)
{
if (n<0){stop('n must be non-negative')}
else if (n <= 100){res <- seq(from = 0, to = n-1, by = n/2)}
else if (n <= 200){res <- seq(from = 0, to = n-1, by = n/4)}
else if (n <= 500){res <- seq(from = 0, to = n-1, by = n/5)}
else if (n <= 1000){res <- seq(from = 0, to = n-1, by = n/10)}
else {res <- seq(from = 0, to = n-1, by = n/20)}
return (res)
}
#' Make means
#'
#' @param n an integer
#'
#'
#' @return a vector of means for data_generator function
#' @export
#'
#' @examples
make_means <- function(n)
{
res <- NULL
tmp <- 0
for (i in 1:(n+1)){
rand <- sample(0:10, 1)
while (rand == tmp){rand <- sample(1:10, 1)}
tmp <- rand
res <- c(res,rand)
}
return (res)
}
|
7de998d89e7a8de3e90bdb986cdf74e6b83356b7 | 28cafe6fac761bde26ed54f809c0a7a0f23a083f | /man/check_features_targets.Rd | 31a2bd5de872cd0badfdc266ebd76cbf453ed3c7 | [] | no_license | tdhock/penaltyLearning | 20bf238b8052b677ab94ad260b4943258b73cb4a | ecf9f5d5b0d8951a14956783780df15f26740c4c | refs/heads/master | 2021-11-27T00:36:54.241660 | 2021-04-21T21:44:15 | 2021-04-21T21:44:15 | 78,790,677 | 14 | 7 | null | 2021-02-05T18:07:08 | 2017-01-12T21:58:14 | R | UTF-8 | R | false | false | 460 | rd | check_features_targets.Rd | \name{check_features_targets}
\alias{check_features_targets}
\title{check features targets}
\description{stop with an informative error if there is a problem with the
feature or target matrix.}
\usage{check_features_targets(feature.mat,
target.mat)}
\arguments{
\item{feature.mat}{n x p numeric input feature matrix.}
\item{target.mat}{n x 2 matrix of target interval limits.}
}
\value{number of observations/rows.}
\author{Toby Dylan Hocking}
|
65a1fae0eb4ade565de0505f1db0e3ea122f1a24 | cac77f2c9b5a33da7e285fa82a710477814a24c3 | /doc/dual_trajectory.R | bb8d55c12749fd84404ed8bf7c90858a21ffd23e | [] | no_license | haoshu2017/bayestraj | 485598e43234527a810d81511a886ff767d8d4a5 | 96f8c01f23d8b68490c51bc15ff423fe69854539 | refs/heads/master | 2022-04-29T02:56:15.793123 | 2020-04-29T00:42:28 | 2020-04-29T00:42:28 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,658 | r | dual_trajectory.R | ## ---- include = FALSE----------------------------------------------------
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
## ----setup---------------------------------------------------------------
library(BayesTraj)
## ------------------------------------------------------------------------
N=1000 #number of units
T1=9 #Time periods for Group 1
T2=9 #Time periods for Group 2
pi1=c(0.5,0.2,0.3) #Group 1 membership probabilities
#Transition Matrix
pi1_2=matrix(c(0.3,0.3,0.4,
0.49,0.50,0.01,
0.7,0.2,0.1),
nrow=3,ncol=3,byrow=TRUE)
K1 = length(pi1) #Number of groups in series 1
K2 = dim(pi1_2)[2] #Number of groups in series 2
#Coefficients for Series 1
beta1=matrix(c(110,5,-0.5,
111,-2,0.1,
118,3,0.1),nrow=3,ncol=3,byrow=TRUE)
#Coefficients for Series 2
beta2=matrix(c(110,6,-0.6,
111,-3,0.1,
112,2,0.7),nrow=3,ncol=3,byrow=TRUE)
sigma1=2 #standard deviation of Series 1 outcomes
sigma2=4 #standard deviation of Series 2outcomes
set.seed(1)
data = gen_data_dual(N=N,
T1=T1,
T2=T2,
pi1=pi1,
pi2=pi1_2,
beta1=beta1,
beta2=beta2,
sigma1=sigma1,
sigma2=sigma2,
poly = 2) #degree of polynomial
## ------------------------------------------------------------------------
X1=data$X1
X2=data$X2
y1=data$Y1
y2=data$Y2
## ------------------------------------------------------------------------
print(head(X1,18))
## ------------------------------------------------------------------------
print(head(y1,18))
## ------------------------------------------------------------------------
iter = 5000
thin = 1
z1 = matrix(1,nrow=K1,ncol=dim(X1)[2])
z2 = matrix(1,nrow=K2,ncol=dim(X2)[2])
model = dualtraj(X1=X1, #data matrix Series 1
X2=X2, #data matrix Series 2
y1=y1, #outcomes Series 1
y2=y2, #outcomes Series 2
K1=K1, #number of groups Series 1
K2=K2, #number of groups Series 2
z1=z1, #functional form matrix Series 1
z2=z2, #functional form matrix Series 2
iterations=iter, #number of iterations
thin=thin, #thinning
dispIter=1000) #Print a message every 1000 iterations
## ------------------------------------------------------------------------
z1_=matrix(c(1,1,1,
1,1,1,
1,1,0),nrow=3,ncol=3,byrow=TRUE)
## ------------------------------------------------------------------------
head(model$beta1[[1]]) #Series 1 group 1's coefficients
head(model$beta1[[2]]) #Series 1 group 2's coefficients
head(model$beta1[[3]]) #Series 1 group 3's coefficients
head(model$sigma1) #Series 1 variance - NOT THE STANDARD DEVIATION
model$c1[1:6,1:10] #Series 1 unit-level group memberships
head(model$pi1) #Series 1 group-membership probabilities
head(model$pi2) #Series 2 group-membership probabilities
model$pi1_2[1,,] #Transition probabilities from Series 1 Group 1.
model$pi12[1,,] #Joint probability of both Series group memberships
## ------------------------------------------------------------------------
burn = 0.9
summary = summary_dual(model,X1,X2,y1,y2,z1,z2,burn)
## ------------------------------------------------------------------------
print(summary$estimates)
## ------------------------------------------------------------------------
print(summary$log.likelihood)
## ------------------------------------------------------------------------
plot(model$beta1[[1]][1000:5000,1],type='l')
|
629073ca685815be2dfa7a2bbfb11b4acecadc98 | 876f1e027ec3bb6325df1cd46aa3e0ada1cb47b4 | /Stochastic_process/HW_4/R_proj/h_dvd_2.R | b588c42d17339475fc44746e34cc20d496a1cb80 | [] | no_license | hazirliver/6_sem | d0c9b91bd54118dd04b6ef1c4617520fb1f423a1 | 32fc28e8279ca6cbf1d16bcf38787f72009da64a | refs/heads/master | 2021-01-04T03:10:53.292658 | 2020-05-19T08:49:24 | 2020-05-19T08:49:24 | 240,349,703 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,669 | r | h_dvd_2.R | png(filename = "../img/2d_1_ext.png",
width = 1920, height = 1080,
res = 96 * 2)
plot(pairs.extnd.list[[1]], type = "l", main = "Trajectory 1, h = 0.01",
xlab = "", ylab = "")
dev.off()
png(filename = "../img/2d_6_ext.png",
width = 1920, height = 1080,
res = 96 * 2)
plot(pairs.extnd.list[[6]], type = "l", main = "Trajectory 6, h = 0.01",
xlab = "", ylab = "")
dev.off()
png(filename = "../img/2d_70_ext.png",
width = 1920, height = 1080,
res = 96 * 2)
plot(pairs.extnd.list[[70]], type = "l", main = "Trajectory 70, h = 0.01",
xlab = "", ylab = "")
dev.off()
png(filename = "../img/2d_140_ext.png",
width = 1920, height = 1080,
res = 96 * 2)
plot(pairs.extnd.list[[140]], type = "l", main = "Trajectory 140, h = 0.01",
xlab = "", ylab = "")
dev.off()
png(filename = "../img/2d_1_1.png",
width = 1920, height = 1080,
res = 96 * 2)
plot(pairs.list[[1]], type = "l",
main = "Trajectory 1, h = 0.02, h = 0.01", xlab = "", ylab = "",
col = "#82c7ff")
lines(pairs.extnd.list[[1]], type = "l", col = "#ff82a1")
legend("topright", inset = 0.02, legend = c("h = 0.02", "h = 0.01"),
col = c("#82c7ff", "#ff82a1"), lwd = c(1,1))
dev.off()
png(filename = "../img/2d_1_1_reduced.png",
width = 1920, height = 1080,
res = 96 * 2.5)
plot(pairs.extnd.list[[1]][1:20,], type = "n", main = "Trajectory 1 reduced, h = 0.02, 0.01 with interm. points",
xlab = "", ylab = "")
lines(pairs.list[[1]][2:10,], type = "l", col = "#82c7ff",
lwd = 2)
points(pairs.extnd.list[[1]][seq(4,19,2),], lwd = 1)
lines(pairs.extnd.list[[1]][3:19,], type = "l", col = "#ff82a1",
lwd = 2)
legend("topleft", inset = 0.02, legend = c("h = 0.02", "h = 0.01", "interm. points"),
col = c("#82c7ff", "#ff82a1", "black"), lwd = c(2,2,1), lty = c(1,1,NA), pch = c(NA,NA,1))
dev.off()
##################
vars.extnd <- t(sapply(pairs.extnd.list, function(x) apply(x, 2, var_foo)))
head(vars.extnd,5)
tail(vars.extnd,5)
mean_vars.extnd <- colMeans(vars.extnd)
mean_vars.extnd
sq.vars.extnd <- t(sapply(pairs.extnd.list, function(x) apply(x, 2, sq.var_foo)))
head(sq.vars.extnd ,5)
tail(sq.vars.extnd ,5)
mean_sq.vars.extnd <- colMeans(sq.vars.extnd )
mean_sq.vars.extnd
compare_table <- data.table(mean_vars = mean_vars,
mean_vars.extnd = mean_vars.extnd,
mean_sq.vars = mean_sq.vars,
mean_sq.vars.extnd = mean_sq.vars.extnd)
compare_table
compare_table.rel <- data.table(compare_table[,1] / compare_table[,2],
compare_table[,3] / compare_table[,4])
compare_table.rel
|
f2684a8bb6c07ac94387f4cee07abfa1489ebb84 | 5bdc4b4c6b2a6ea11a1f020404f496fa8160ef07 | /man/comprSensitivity.Rd | 6607ae646308910b71ba60ab77187af9d0679423 | [] | no_license | Rong0707/survSens | fcc16eef2e57896a9b660f4571722dee483f7fed | 1d25c250cabd28b8d6716dd059ee2c4b473fa47b | refs/heads/master | 2023-06-09T17:15:51.072128 | 2023-05-27T23:47:38 | 2023-05-27T23:47:38 | 255,234,016 | 2 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,140 | rd | comprSensitivity.Rd | \name{comprSensitivity}
\alias{comprSensitivity}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Sensitivity analysis of treatment effect to unmeasured confounding with competing risks outcomes.
}
\description{
\code{comprSensitivity} performs a dual-parameter sensitivity analysis of treatment effect to unmeasured confounding in observational studies with competing risks outcomes.
}
\usage{
comprSensitivity(t, d, Z, X, method, zetaT = seq(-2,2,by=0.5),
zetat2 = 0, zetaZ = seq(-2,2,by=0.5), theta = 0.5, B = 50, Bem = 200)
}
\arguments{
\item{t}{survival outcomes with competing risks.}
\item{d}{indicator of occurrence of event, with \code{d == 0} denotes right censoring, \code{d==1} denotes event of interest, \code{d==2} denotes competing risk.}
\item{Z}{indicator of treatment.}
\item{X}{pre-treatment covariates that will be included in the model as measured confounders.}
\item{method}{needs to be one of \code{"stoEM_reg"}, \code{"stoEM_IPW"} and \code{"EM_reg"}.}
\item{zetaT}{range of coefficient of \eqn{U} in the event of interest response model.}
\item{zetat2}{value of coefficient of \eqn{U} in the competing risk response model}
\item{zetaZ}{range of coefficient of \eqn{U} in the treatment model.}
\item{theta}{marginal probability of \eqn{U=1}.}
\item{B}{iteration in the stochastic EM algorithm.}
\item{Bem}{iteration used to estimate the variance-covariance matrix in the EM algorithm.}
}
\details{
This function performs a dual-parameter sensitivity analysis of treatment effect to unmeasured confounding by either drawing simulated potential confounders \eqn{U} from the conditional distribution of \eqn{U} given observed response, treatment and covariates or the Expectation-Maximization algorithm. We assume \eqn{U} is following \eqn{Bernoulli(\pi)} (default 0.5). Given \eqn{Z}, \eqn{X} and \eqn{U}, the hazard rate of the jth type of failure is modeled using the Cox proportional hazards (PH) regression:
\deqn{\lambda_j (t | Z, X, U) = \lambda_{j0} (t) exp( \tau_j Z + X' \beta_j + \zeta_j U).}
Given \eqn{X} and \eqn{U}, \eqn{Z} follows a generalized linear model:
\deqn{P(Z=1 | X, U) = \Phi(X' \beta_z + \zeta_z U).}
}
\value{
\item{tau1}{a data.frame with zetaz, zetat1, zetat2, tau1, tau1.se and t statistic in the event of interest response model.}
\item{tau2}{a data.frame with zetaz, zetat, zetat2, tau2, tau2.se and t statistic in the competing risks response model.}
}
\references{
Huang, R., Xu, R., & Dulai, P. S. (2019). Sensitivity Analysis of Treatment Effect to Unmeasured Confounding in Observational Studies with Survival and Competing Risks Outcomes. arXiv preprint arXiv:1908.01444.
}
\author{
Rong Huang
}
\examples{
#load the dataset included in the package
data(comprdata)
#stochastic EM with regression
tau.sto = comprSensitivity(comprdata$t, comprdata$d, comprdata$Z, comprdata$X,
"stoEM_reg", zetaT = 0.5, zetaZ = 0.5, B = 3)
#EM with regression
tau.em = comprSensitivity(comprdata$t, comprdata$d, comprdata$Z, comprdata$X,
"EM_reg", zetaT = 0.5, zetaZ = 0.5, Bem = 50)
}
\keyword{sensitivity analysis}
\keyword{competing risks outcomes}
|
a496946a484c79a9871ad453cffd8f079de0d2c5 | 161cd6cee62d2c65ae23c6e9eeec9256abe78634 | /preprocessing/preprocessing.R | 68f9131b11ff0c8552c47133666a968a35abcc61 | [] | no_license | NathanKolbow/MLBPredictions | a74bb6e6ae88abb3e0a7d16efe6c9f6e0838e3be | a801ed11659498251fac46e52619b0cce41a0f58 | refs/heads/master | 2023-01-31T04:34:12.536393 | 2020-12-17T22:43:55 | 2020-12-17T22:43:55 | 310,713,202 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,919 | r | preprocessing.R | library(xgboost)
library(tidyverse)
library(caret)
library(fastDummies)
library(tidymodels)
library(magrittr)
library(lme4)
library(tictoc)
library(baseballr)
# reading in data
pitches_2019 <- read_csv("project_data.csv")
get_player_id <- baseballr::playerid_lookup("Baez", "Javier")
# getting rid of columns that the model won't need.
pitches_2019 %<>%
select(-c("spin_dir", "spin_rate_deprecated", "break_angle_deprecated", "break_length_deprecated", "game_type", "type",
"hit_location", "bb_type", "game_year", "hc_x", "hc_y", "tfs_deprecated", "tfs_zulu_deprecated", "umpire", "sv_id",
"fielder_2", "hit_distance_sc", "launch_speed", "launch_angle", "pitcher_1", "fielder_3", "fielder_4", "fielder_5",
"fielder_6", "fielder_7", "fielder_8", "fielder_9", "estimated_ba_using_speedangle", "estimated_woba_using_speedangle",
"woba_value", "woba_denom", "babip_value", "iso_value", "launch_speed_angle", "at_bat_number", "pitch_number",
"pitch_name", "home_score", "away_score", "fld_score", "post_away_score", "post_home_score",
"post_fld_score", "if_fielding_alignment", "of_fielding_alignment", "barrel", "pitcher", "batter",
"events", "des", "home_team", "away_team"))
# Convert the descriptions to strike/ball/hit/hit by pitch factors
pitches_2019$description <- factor(pitches_2019$description,
levels = c("hit_into_play", "called_strike", "swinging_strike_blocked", "ball",
"foul", "swinging_strike", "hit_into_play_score", "hit_into_play_no_out",
"foul_tip", "blocked_ball", "hit_by_pitch", "foul_bunt", "missed_bunt",
"bunt_foul_tip", "pitchout"),
labels = c("hit", "strike", "strike", "ball", "hit", "strike", "hit", "hit",
"hit", "ball", "hit_by_pitch", "strike", "strike", "strike", "ball"))
# Filter out hit-by-pitch incidents
pitches_2019 <- pitches_2019 %>%
filter(description != "hit_by_pitch")
# turn on_3b/2b/1b into binary (yes/no)
pitches_2019 <- pitches_2019 %>%
mutate(on_3b_yes_no = ifelse(is.na(on_3b), 0, 1)) %>%
mutate(on_2b_yes_no = ifelse(is.na(on_2b), 0, 1)) %>%
mutate(on_1b_yes_no = ifelse(is.na(on_1b), 0, 1)) %>%
select(-c("on_3b", "on_2b", "on_1b"))
# remove pitch_type: KN, "", EP, FO. and make KC = CU
pitches_2019$pitch_type[pitches_2019$pitch_type == 'KC'] <- 'CU'
pitches_2019 <- pitches_2019[!(pitches_2019$pitch_type =="KN" |
pitches_2019$pitch_type =="" |
pitches_2019$pitch_type =="EP" |
pitches_2019$pitch_type =="FO"),]
# player look up by mlbam key
chadiwck_reduced <- chadwick_player_lu_table %>% select (key_mlbam, name_last, name_first)
# remove NA to make df smaller
chadiwck_reduced <- chadiwck_reduced[!is.na(chadiwck_reduced$key_mlbam),]
# getting each catcher's name
pitches_2019 <- merge(x = pitches_2019, y = chadiwck_reduced, by.x = "fielder_2_1", by.y = "key_mlbam")
pitches_2019 <- mutate(pitches_2019, catcher_name = paste(name_first, name_last, sep = " "))
pitches_2019 <- subset(pitches_2019, select = -c(name_last, name_first))
#can now remove fielder_2 b/c we have catcher name
pitches_2019 <- pitches_2019 %>%
select(-c("fielder_2_1"))
#umpires
umpire_ids_game_pk <- read_csv("umpires_ids_game_pk.csv")
names(umpire_ids_game_pk) = c("id", "position", "umpire_name", "game_pk", "game_date")
hp_umpires <- umpire_ids_game_pk %>%
filter(position == "HP") %>%
filter(game_date < "2019-9-30" & game_date > "2019-03-22") %>%
select(c("game_pk", "umpire_name"))
pitches_2019 <- merge(pitches_2019, hp_umpires, by = c("game_pk"))
pitches_2019 %<>% select(-c("game_pk"))
#nrow(pitches_2019) = 380841 before umps
#nrow(pitches_2019) = 378517 - after umps
pitches_2019 %<>%
mutate(batter_name = player_name) %>%
select(-c("player_name"))
# turning strings into factors
pitches_2019 %<>%
mutate(pitch_type = as.factor(pitch_type)) %>%
mutate(batter_name = as.factor(batter_name)) %>%
mutate(stand = as.factor(stand)) %>%
mutate(p_throws = as.factor(p_throws)) %>%
mutate(balls = as.factor(balls)) %>%
mutate(strikes = as.factor(strikes)) %>%
mutate(outs_when_up = as.factor(outs_when_up)) %>%
mutate(inning = as.factor(inning)) %>%
mutate(inning_topbot = as.factor(inning_topbot)) %>%
mutate(pitcher_name = as.factor(pitcher_name)) %>%
mutate(catcher_name = as.factor(catcher_name)) %>%
mutate(umpire_name = as.factor(umpire_name))
# turning pitch type variable into dummy variables
pitches_2019 <- fastDummies::dummy_cols(pitches_2019,select_columns = c("pitch_type"))
pitches_2019 <- pitches_2019[complete.cases(pitches_2019),]
write.csv(pitches_2019, "pitches.csv")
|
fa787d9b052f9d291a96a0114c6e06e6394892f5 | 385d3fae6b145716027feae92aefbf02da8c76f2 | /tests/testthat/test-errorHandling.R | e0aa2673fb2998883425b93809e39872108e98a2 | [] | no_license | AnotherSamWilson/ParBayesianOptimization | c818c31e6bd0133db0d89c20abc11e2fce409ba9 | af44dd96f9c0fa1bde3ae9bf3fa53d735bac119f | refs/heads/master | 2022-11-05T17:50:55.350676 | 2022-10-18T14:05:24 | 2022-10-18T14:05:24 | 155,993,502 | 102 | 22 | null | 2022-07-28T05:43:59 | 2018-11-03T14:57:56 | R | UTF-8 | R | false | false | 2,359 | r | test-errorHandling.R | context('errorHandling')
testthat::test_that(
"continue"
, {
skip_on_cran()
set.seed(10)
sf <- function(x,y) 1000 - (x-5)^2 - (y + 10)^2
FUN <- function(x,y) {
if (runif(1) > 0.5) stop("You foo'd when you should have bar'd.")
return(list(Score = sf(x,y)))
}
bounds = list(
x = c(0,15)
, y = c(-20,100)
)
optObj <- bayesOpt(
FUN
, bounds
, initPoints = 3
, iters.n = 6
, errorHandling = "continue"
, verbose = 1
)
expect_equal(
optObj$stopStatus
, "OK"
)
}
)
testthat::test_that(
"Error Limit"
, {
skip_on_cran()
set.seed(10)
sf <- function(x,y) 1000 - (x-5)^2 - (y + 10)^2
FUN <- function(x,y) {
if (runif(1) > 0.5) stop("You foo'd when you should have bar'd.")
return(list(Score = sf(x,y)))
}
bounds = list(
x = c(0,15)
, y = c(-20,100)
)
optObj <- bayesOpt(
FUN
, bounds
, initPoints = 3
, iters.n = 8
, errorHandling = 2
, verbose = 1
)
expect_equal(
optObj$stopStatus
, ParBayesianOptimization:::makeStopEarlyMessage("Errors from FUN exceeded errorHandling limit")
)
}
)
testthat::test_that(
"1D Error Handling"
, {
skip_on_cran()
set.seed(14)
sf <- function(x) 1000 - x^2
FUN <- function(x) {
if (runif(1) > 0.5) stop("You foo'd when you should have bar'd.")
return(list(Score = sf(x)))
}
bounds = list(
x = c(-1000,1000)
)
optObj <- bayesOpt(
FUN
, bounds
, initPoints = 3
, iters.n = 8
, errorHandling = 2
, verbose = 1
)
optObj$scoreSummary
expect_equal(
optObj$stopStatus
, ParBayesianOptimization:::makeStopEarlyMessage("Errors from FUN exceeded errorHandling limit")
)
}
)
testthat::test_that(
"Malformed FUN Return"
, {
skip_on_cran()
set.seed(14)
sf <- function(x) 1000 - x^2
FUN <- function(x) {
ot <- if (runif(1) > 0.75) c(0,1) else 1
return(list(Score = sf(x), ot = ot))
}
bounds = list(
x = c(-1000,1000)
)
expect_error(
bayesOpt(
FUN
, bounds
, initPoints = 3
, iters.n = 8
, errorHandling = 2
, verbose = 1
)
)
}
)
|
59b71939725999c1d8c60774b084ed7c6d9e1b66 | 14751e8a07a4f9ee40a1adc5411e7e8a68d13ad1 | /probabilidad/probabilidad.R | ca1f9d2ab738db6ed41a48c1fe407fc2257574f9 | [] | no_license | jrlacalle/material_docente | 4ba380a356059d8f692844bcdd02d774038e8d76 | f7470910c5931b66f981b76875e6e89cd8e477ee | refs/heads/main | 2023-06-04T22:47:04.655249 | 2021-06-23T14:46:48 | 2021-06-23T14:46:48 | 379,634,711 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 348 | r | probabilidad.R | # Definimos el espacio muestral
m <- factor(c(0,1), levels = c("cara","cruz"))
m <- c(0,1)
x <- sample(m,5,replace = TRUE)
sum(x)/length(x)
x <- sample(m,25,replace = TRUE)
sum(x)/length(x)
x <- sample(m,100,replace = TRUE)
sum(x)/length(x)
x <- sample(m,1000,replace = TRUE)
sum(x)/length(x)
x <- sample(m,1000000,replace = TRUE)
sum(x)/length(x)
|
fad7106506d9a02b8779c43c353bfc25b5ed89a8 | a069ee82caf0f49641bdb9c1161f1da6a02a6083 | /ExerciseCodes/E6.R | a43fabf6754bf66bff64fc01b5b8743aa7509752 | [] | no_license | nhannguyen2610/R_Statistics | 154fc9ee685e885533737db8b2028f5611ea2e53 | 1233cd76c2278ea50fd65d534e90951dd32c56c0 | refs/heads/master | 2021-05-10T17:03:24.604914 | 2018-02-27T08:27:52 | 2018-02-27T08:27:52 | 118,596,706 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,600 | r | E6.R | setwd("~/Desktop/S-S/Aalto/IN Stats/Excercise/ex_18")
# 1b
# Require e1071 package to calculate skewness and kurtosis
library(e1071)
BS <- function(x) {
n <- length(x)
v <- skewness(x)
k <- kurtosis(x)
n*(v^2/6+k^2/24)
}
BS(runif(20))
BS(runif(50))
BS(runif(100))
BS(runif(500))
BS(runif(1000))
#BS-statistic starts to 'explore' with relatively high n
#the p-value can be computed as
sample <- runif(50)
bs1 <- BS(sample)
pval <- 2*min(pchisq(bs1,2),1-pchisq(bs1,2))
pval
# let's test with a bimodal distribution
BS(c(rnorm(10),rnorm(10)+5))
BS(c(rnorm(20),rnorm(20)+5))
BS(c(rnorm(50),rnorm(50)+5))
BS(c(rnorm(100),rnorm(100)+5))
BS(c(rnorm(500),rnorm(500)+5))
# 2a
?qqplot
?qqline
?rchisq
?runif
# prepare data sets
norm.sample <- rnorm(50)
chi.sample <- rchisq(50, df=3)
uni.sample <- runif(50, min = 0, max = 1)
outlier.nsample <- rnorm(50)
outlier.nsample[1] = -15 # add an outlier to the sample
# plot histograms for datasets
par(mfrow=c(2,2))
hist(norm.sample); hist(chi.sample); hist(uni.sample); hist(outlier.nsample)
par(mfrow=c(1,1))
# Calculate the sample means, medians, and the skewness and kurtosis values of the samples.
summary(norm.sample)
summary(chi.sample)
summary(uni.sample)
summary(outlier.nsample)
# Vectorize our computations; collect all data into data frame.
data <- data.frame(norm.sample,chi.sample,uni.sample,outlier.nsample)
apply(data,2,median)
v <- apply(data,2,skewness)
k <- apply(data,2,kurtosis)
# Require tseries package to do Bowman and shenton test (same as jarque and bera test)
library(tseries)
apply(data, 2, jarque.bera.test)
# jarque.bera.test(norm.sample)
# jarque.bera.test(chi.sample)
# jarque.bera.test(uni.sample)
# jarque.bera.test(outlier.nsample)
# Plot rank plots of each sample, and test normality using the
# Shapiro-Wilk normality test.
apply(data, 2, shapiro.test)
par(mfrow=c(2,2))
# Plot qqplots for every sample data
qqnorm(norm.sample); qqline(norm.sample)
qqnorm(chi.sample); qqline(chi.sample)
qqnorm(uni.sample); qqline(uni.sample)
qqnorm(outlier.nsample); qqline(outlier.nsample)
par(mfrow=c(1,1))
# Bart rolls one dice 120 times and gets the score one 12 times, two 16
# times, three 20 times, four 17 times, five 22 times, and the score six
# 33 times. Use the Chi-square goodness of fit test to test the fairness of Bart’s
# dice.
fre_observe <- c(12,16,20,17,22,33)
barplot(fre_observe)
expected_prop <- c(1,1,1,1,1,1)/6 # or use expected_prop = rep(1/6,6)
chisq.test(x = fre_observe, p = expected_prop, correct = F)
# Use significance level of 3%. The null hypothes is rejected because p-value = 0.022
|
f306d7116dc00dd3ed4a6f189730df25b5060c65 | 6263ea947bf83a6a7267f42dfccce47f9ee7a6da | /ml/plumb.R | aeff0f3d32117d116b2540e120efa64a1a62a66a | [] | no_license | Is0tope/potluck | f99b3f34e9aec90eb2b77bf71c7de33769838550 | d89aedcc62912133f5674006973ad91240a5b61a | refs/heads/master | 2021-05-09T17:29:45.577755 | 2018-01-28T09:16:26 | 2018-01-28T09:16:26 | 119,140,367 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 85 | r | plumb.R | library(plumber)
pr <- plumber::plumb("api.R")
pr$run(host = '0.0.0.0',port = 8000) |
2027e598214ad269085fb0cb0a99f158093b8511 | 253d4a133a8a6e568f30523447358689d182f473 | /man/contour.stars.Rd | 19ee50e0e107d988addf8056eb85e66b0c37cba7 | [
"Apache-2.0"
] | permissive | r-spatial/stars | 081a7fc96089aeb276051107911145d17365de79 | 0e17daf1c49b69f990ec77275e80cfd471210aee | refs/heads/main | 2023-09-04T10:58:52.888008 | 2023-08-19T09:15:37 | 2023-08-19T09:15:37 | 78,360,080 | 489 | 126 | Apache-2.0 | 2023-03-20T09:17:18 | 2017-01-08T17:48:24 | R | UTF-8 | R | false | true | 812 | rd | contour.stars.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot.R
\name{contour.stars}
\alias{contour.stars}
\title{plot contours of a stars object}
\usage{
\method{contour}{stars}(x, ...)
}
\arguments{
\item{x}{object of class \code{stars}}
\item{...}{other parameters passed on to \link[graphics]{contour}}
}
\description{
plot contours of a stars object
}
\details{
this uses the R internal contour algorithm, which (by default) plots contours; \link{st_contour} uses the GDAL contour algorithm that returns contours as simple features.
}
\examples{
d = st_dimensions(x = 1:ncol(volcano), y = 1:nrow(volcano))
r = st_as_stars(t(volcano))
r = st_set_dimensions(r, 1, offset = 0, delta = 1)
r = st_set_dimensions(r, 2, offset = 0, delta = -1)
plot(r, reset = FALSE)
contour(r, add = TRUE)
}
|
beed0c2af0f152adcb77ac451fb488a120d301bf | 08013da4e6dd2088d30fb462883f70f7c8a5b224 | /R/surv_prob.R | 41f9cbcb71e61b43734212d6150ec0d13a1c17ae | [] | no_license | apjacobson/parmsurvfit | 312341e5b918bc52ed0007ea89c034e2533ba762 | 131e2bdf3b353adb99b60c09c141af790493d4ea | refs/heads/master | 2020-03-29T10:08:45.079224 | 2018-12-07T04:12:02 | 2018-12-07T04:12:02 | 149,791,202 | 2 | 1 | null | 2018-12-06T19:55:46 | 2018-09-21T16:42:15 | R | UTF-8 | R | false | false | 2,489 | r | surv_prob.R |
#' Survival probability based on parametric distribution
#'
#' Computes probability of survival beyond time t given that the data follows a specified parametric distribution.
#' @param data A dataframe containing a time column and a censor column.
#' @param dist A string name for a distribution that has a corresponding density function and a distribution function.
#' Examples include "norm", "lnorm", "exp", "weibull", "logis", "llogis", "gompertz", etc.
#' @param x A scalar quantity, time at which the probability of survival is computed
#' @param lower.tail Logical; if \code{F} (default), probability is P(T > \code{x}), otherwise, P(T < \code{x}).
#' @param time The string name of the time column of the dataframe. Defaults to "time".
#' @param censor The string name of the censor column of the dataframe. Defaults to "censor". The censor column must be
#' a numeric indicator variable where complete times correspond to a value of 1 and incomplete times correspond to 0.
#' @param by The string name of a grouping variable. If specified, the function prints probability for each group
#' individually along with the overall probability.
#' Variable can contain logical, string, character, or numeric data.
#' @examples
#' data("rearrest")
#' surv_prob(rearrest, "lnorm", 110, time = "months")
#' surv_prob(rearrest, "weibull", 90, time = "months", lower.tail = TRUE)
#' @export
surv_prob <- function(data, dist, x, lower.tail = F, time = "time", censor = "censor", by = "") {
#if there's a grouping variable
if (by != "") {
#stores grouping variable as a vector
data[[by]] <- as.factor(data[[by]])
b <- as.factor(as.vector(data[[by]]))
#loops through the levels in the grouping variable
for (i in levels(b)) {
#subsets dataframe
d2 <- data[data[[by]] == i, ]
d2[[by]] <- NULL
#calls surv_prob recursively
cat("\nFor level =", i, "\n")
surv_prob(d2, dist, x, lower.tail, time, censor)
cat("\n")
}
}
#fits data to distribution
fit <- fit_data(data, dist, time, censor)
#creates argument list
l <- c(q = x, fit$estimate, lower.tail = lower.tail)
args <- split(unname(l),names(l))
#finds cdf funciton
pfunc <- match.fun(paste("p", dist, sep = ""))
if (by != "") {
cat("\nFor all levels\n")
}
#prints probability
if (lower.tail == F) {
cat("P(T > ", x, ") = ", do.call(pfunc, args), sep = "")
} else {
cat("P(T < ", x, ") = ", do.call(pfunc, args), sep = "")
}
}
|
28f7f00d5854cbbdd401868d36f48faf724ad3b8 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/updog/examples/oracle_plot.Rd.R | fba079bd08fb5bd5749997b8dfcefcab181e5aaf | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 365 | r | oracle_plot.Rd.R | library(updog)
### Name: oracle_plot
### Title: Construct an oracle plot from the output of 'oracle_joint'.
### Aliases: oracle_plot
### ** Examples
ploidy <- 6
dist <- stats::dbinom(0:ploidy, ploidy, 0.75)
jd <- oracle_joint(n = 100, ploidy = ploidy, seq = 0.001,
bias = 0.7, od = 0.01, dist = dist)
pl <- oracle_plot(jd = jd)
print(pl)
|
0fb0041ac6785b48269524d56dda2ce5003c8272 | 9ac330d47854c44ae239ec22465adb8ce0a24f00 | /backtests/Engine Validation Results/workings.R | b33cbe0b21a3693176a4eb54e862188cc83b74fc | [] | no_license | riazarbi/equity_analysis_trials | 21e40b523dc7531ef4e200867fb3968ae09ac0e1 | 7a4250cdaec29721a08d9329a2d8ce717e65a6b5 | refs/heads/master | 2020-05-30T15:30:03.605347 | 2019-06-02T08:31:01 | 2019-06-02T08:31:01 | 189,820,812 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,178 | r | workings.R | # specify directory
direc <- "backtests/Engine Validation Results/"
# load all trial returns
trials <- as.character(8:14)
trial_returns <- paste(trials, "_returns.feather", sep="")
trial_full_paths <- paste(direc, trial_returns, sep="")
library(feather)
library(magrittr)
library(dplyr)
for (i in seq_along(1:length(trial_full_paths))) {
df <- read_feather(trial_full_paths[i])
assign(paste("trial", i+7, sep = ""), df)
}
rm(df)
trial8 <- trial8 %>% dplyr::rename("trial_8" = !!names(.[2])) %>%
mutate(trial_8_index = (1+trial_8) * lag(1+trial_8) )
trial9 <- trial9 %>% dplyr::rename("trial_9" = !!names(.[2])) %>%
mutate(trial_9_index = (1+trial_9) * lag(1+trial_9) )
trial10 <- trial10 %>% dplyr::rename("trial_10" = !!names(.[2])) %>%
mutate(trial_10_index = (1+trial_10) * lag(1+trial_10) )
trial11 <- trial11 %>% dplyr::rename("trial_11" = !!names(.[2]))%>%
mutate(trial_11_index = (1+trial_11) * lag(1+trial_11) )
trial12 <- trial12 %>% dplyr::rename("trial_12" = !!names(.[2])) %>%
mutate(trial_12_index = (1+trial_12) * lag(1+trial_12) )
trial13 <- trial13 %>% dplyr::rename("trial_13" = !!names(.[2])) %>%
mutate(trial_13_index = (1+trial_13) * lag(1+trial_13) )
trial14 <- trial14 %>% dplyr::rename("trial_14" = !!names(.[2])) %>%
mutate(trial_14_index = (1+trial_14) * lag(1+trial_14) )
# build a returns matrix
all_returns <- full_join(trial8, trial9)
all_returns <-full_join(all_returns, trial10)
all_returns <-full_join(all_returns, trial11)
all_returns <-full_join(all_returns, trial12)
all_returns <-full_join(all_returns, trial13)
all_returns <-full_join(all_returns, trial14)
# load price data .Rds from trial 8 temp dir after backtest completes
price_data <- readRDS(paste(direc, "ticker_data.Rds", sep=""))
constituents <- readRDS(paste(direc, "constituent_list.Rds", sep=""))
# compute market returns
start_date <- min(all_returns$date)
end_date <- max(all_returns$date)
index_cap_seq <- numeric()
for (score_date in seq(from=start_date, to=end_date, by="day")) {
score_date <- lubridate::as_date(score_date)
print(score_date)
index <- constituents %>%
dplyr::filter(date <= score_date) %>%
dplyr::filter(date == max(date)) %>%
select(ticker)
market_size <- numeric()
for (i in seq_along(1:length(index$ticker))) {
tick_cap <- price_data[[index$ticker[i]]] %>% filter(date ==score_date) %>%
select(market_cap) %>% pull()
market_size <- c(market_size, tick_cap)
}
index_cap <- sum(market_size)
index_cap_seq <- c(index_cap_seq, index_cap)
}
index_cap_ts <- data.frame(all_returns$date, index_cap_seq)
colnames(index_cap_ts) <- c("date", "market_cap")
index_returns <- index_cap_ts %>%
mutate(market_return = market_cap / lag(market_cap) - 1) %>%
select(-market_cap)
all_returns <-full_join(all_returns, index_returns, by = "date")
all_returns <- all_returns %>% tidyr::drop_na()
colnames(all_returns)
correlation <- cor(all_returns %>% dplyr::select(trial_8,
trial_9,
trial_10,
market_return))
# Rename to be in coformance with the labeling reported in my dissertation
colnames(correlation) <- c("Trial A", "Trial B", "Trial C", "Market Return")
rownames(correlation) <- c("Trial A", "Trial B", "Trial C", "Market Return")
saveRDS(correlation, "paper/data/correlation.Rds")
#######################################################################
trial_stats <- paste(direc, trials, "_portfolio_stats.feather", sep="")
for (i in seq_along(1:length(trial_stats))) {
df <- read_feather(trial_stats[i]) %>% select(date, portfolio_value)
assign(paste("trial_stats", i+7, sep = ""), df)
}
rm(df)
trial8_trial_stats <- trial_stats8 %>% dplyr::rename("trial_8_trial_stats" = !!names(.[2]))
trial9_trial_stats <- trial_stats9 %>% dplyr::rename("trial_9_trial_stats" = !!names(.[2]))
trial10_trial_stats <- trial_stats10 %>% dplyr::rename("trial_10_trial_stats" = !!names(.[2]))
trial11_trial_stats <- trial_stats11 %>% dplyr::rename("trial_11_trial_stats" = !!names(.[2]))
trial12_trial_stats <- trial_stats12 %>% dplyr::rename("trial_12_trial_stats" = !!names(.[2]))
trial13_trial_stats <- trial_stats13 %>% dplyr::rename("trial_13_trial_stats" = !!names(.[2]))
trial14_trial_stats <- trial_stats14 %>% dplyr::rename("trial14_trial_stats" = !!names(.[2]))
# build a returns matrix
all_values <- full_join(trial8_trial_stats, trial9_trial_stats)
all_values <-full_join(all_values, trial10_trial_stats)
all_values <-full_join(all_values, trial11_trial_stats)
all_values <-full_join(all_values, trial12_trial_stats)
all_values <-full_join(all_values, trial13_trial_stats)
all_values <-full_join(all_values, trial14_trial_stats)
all_values <- full_join(all_values, index_cap_ts)
saveRDS(all_values, "paper/data/all_trial_returns.Rds")
library(xts)
library(dygraphs)
all_values_xts <- xts(all_values %>%
select(trial_8_trial_stats,
trial_11_trial_stats,
trial_12_trial_stats), order.by=all_values$date)
# Rename to be in coformance with the labeling reported in my dissertation
colnames(all_values_xts) <- c("Trial A", "Trial D", "Trial E")
dygraph(all_values_xts,
main = "All Trials Normalised Total Returns",
ylab = "Indexed Value") %>%
dyLegend(width = 600) %>%
dyOptions(maxNumberWidth = 20, stackedGraph = FALSE) %>%
dyRangeSelector %>%
dyHighlight(highlightSeriesOpts = list(strokeWidth = 3))
all_values_xts <- xts(all_values %>%
select(trial_12_trial_stats,
trial_13_trial_stats), order.by=all_values$date)
# Rename to be in coformance with the labeling reported in my dissertation
colnames(all_values_xts) <- c("Trial E", "Trial F")
dygraph(all_values_xts,
main = "All Trials Normalised Total Returns",
ylab = "Indexed Value") %>%
dyLegend(width = 600) %>%
dyOptions(maxNumberWidth = 20, stackedGraph = FALSE) %>%
dyRangeSelector %>%
dyRebase(value=100) %>%
dyHighlight(highlightSeriesOpts = list(strokeWidth = 3))
|
06f699433cd2b37e2a778660b6eee4320aefcbda | a35945446bba341325c243f33acd08267441bf58 | /testing/data-frame-test.R | 60b82edcea46ce82935a3c7a6ec7354a1ee4768a | [] | no_license | MSS-Project/mss-r-code | c8186b0d204a5664c2c5b489e0afb51ef01ba61f | 518bf2c8e5b4583ba4f71a1d5ecee9cef031ca19 | refs/heads/master | 2020-04-05T04:28:48.599571 | 2018-11-07T10:50:07 | 2018-11-07T10:50:07 | 156,553,176 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,558 | r | data-frame-test.R | library(funnelR)
###real data test
df = read.delim("/Users/linjo/Box Sync/Morocco13/Project-MSS/Techstuff/fiche_rectum.csv",sep=",", colClasses = c(rep("character", 130)))
length(unique(df$numdossier))
#45
keeps=c("numdossier","opérateur1","opérateur2","clavien_90","date_sortie")
#keeps=c("opérateur1","clavien_90")
df2 = df[,keeps]
deadDf = df2[which(df2$clavien_90==5),]
deadDf2 = data.frame(table(deadDf$opérateur1))
colnames(deadDf2) = c("opérateur1","deaths")
totalDf=data.frame(table(df2$opérateur1))
colnames(totalDf) = c("opérateur1","patientCount")
df3 = merge(df2,deadDf2,by=c("opérateur1"),all=T)
#probably need to rethink how I am coming up with total patient counts and deaths since manipulation of data frame is below
df3[is.na(df3$deaths),]$deaths=0
df4 = merge(df3,totalDf,by=c("opérateur1"))
keeps = c("opérateur1","patientCount","deaths")
df5 = df4[keeps]
df5 = unique(df5)
df5$deathRate = df5$deaths/df5$patientCount
df6=df5
colnames(df6)[4] = "n2"
colnames(df6)[2] = "d"
colnames(df6)[3] = "n"
dataSet = fundata(input=df6,
alpha=0.95,
alpha2=0.997,
benchmark=0.089,
method='approximate',
step=1)
testPlot = funplot(input=df6,
fundata=dataSet)
testPlot
#opérateur 1 is lead surgeon?
counts=as.data.frame(table(df2))
summary(Indometh)
wide <- reshape(Indometh, v.names = "conc", idvar = "Subject",
timevar = "time",
direction = "wide")
wide
|
f03c0b293cf1f5f0decb866e37786463742c9a0e | 977824dce5737a2566cd8f20661ccbea82864211 | /postprocess/frameshift.and.stop.remover.R | fb124848ec27ff2dafe15b45105230d07c65eca6 | [] | no_license | weshorton/tcr_sequencing_tools | 91747dfab43784721652c0a8d4bcb33880ad2cb1 | 42560483a4f2b71edd5c4170c78538af17a1f498 | refs/heads/master | 2021-01-18T04:51:18.419159 | 2016-02-19T20:26:45 | 2016-02-19T20:26:45 | 51,036,205 | 0 | 0 | null | 2016-02-17T18:35:11 | 2016-02-03T22:44:03 | R | UTF-8 | R | false | false | 5,111 | r | frameshift.and.stop.remover.R | # Script for flagging and/or removing records with stop codons and frameshifts
#
# Set the value of the remove.clone.table parameter to indicate whether clonotypes
# containing stop codons and frameshifts should be removed
#
# Script expects as inputs a file generated by MiXCR's "exportClones()" function,
# which has this schema:
#
# Clone count Clone fraction Clonal sequence(s) Clonal sequence quality(s) All V hits All D hits All J hits All C hits All V alignment All D alignment All J alignment All C alignment N. Seq. FR1 Min. qual. FR1 N. Seq. CDR1 Min. qual. CDR1 N. Seq. FR2 Min. qual. FR2 N. Seq. CDR2 Min. qual. CDR2 N. Seq. FR3 Min. qual. FR3 N. Seq. CDR3 Min. qual. CDR3 N. Seq. FR4 Min. qual. FR4 AA. seq. FR1 AA. seq. CDR1 AA. seq. FR2 AA. seq. CDR2 AA. seq. FR3 AA. seq. CDR3 AA. seq. FR4 Best V hit Best J hit
# Ensures that numeric outputs are not expressed in scientific notation
options(scipen=999);
process.frameshifts.stop.codons <- function(input.file.path, remove.clone.table=FALSE) {
# Specify characters to search for here.
# Note that since "grep()" is used to identify records containing these
# special characters, escapes may be required (e.g. "\\*" instead of "*").
special.characters <- c("\\*", "_");
# read in file
clone.table <- read.delim(input.file.path,
check.names=FALSE,
stringsAsFactors=FALSE);
# Append additional columns to data.frame
clone.table$"Contains frameshift or stop codon" <- 0;
clone.table$"Adjusted clone fraction" <- 0;
# identify records containing the special character, and flag them
for(i in 1:length(special.characters)) {
aas <- clone.table$"AA. seq. CDR3";
dirty.clone.indices <- integer();
dirty.clone.indices <- grep(special.characters[i], aas);
# flag them
if(length(dirty.clone.indices) > 0) {
clone.table[dirty.clone.indices,]$"Contains frameshift or stop codon" <- 1;
} # fi
} # for i
# Adjust clone fractions
clean.clone.indices <- which(clone.table$"Contains frameshift or stop codon" == 0);
sum.clean.clones <- sum(clone.table[clean.clone.indices,]$"Clone fraction");
clone.table[clean.clone.indices,]$"Adjusted clone fraction" <- clone.table[clean.clone.indices,]$"Clone fraction" / sum.clean.clones;
# If remove.clone.table is TRUE, then remove the appropriate records
# and copy the values for "Adjusted clone fraction" values to the
# "Clone fraction" column
if(remove.clone.table) {
# remove records
indices.to.remove <- which(clone.table$"Contains frameshift or stop codon" == 1);
if(length(indices.to.remove > 0)) {
clone.table <- clone.table[-indices.to.remove,];
} # fi
# copy values
clone.table$"Clone fraction" <- clone.table$"Adjusted clone fraction";
} # fi
# write output to file
output.file.name <- sub("[.][^.]*$", "", basename(input.file.path));
if(remove.clone.table) {
output.file.name <- paste(output.file.name, "_postprocessed_removed.txt", sep="");
} else {
output.file.name <- paste(output.file.name, "_postprocessed.txt", sep="");
}
output.file.name <- file.path(dirname(input.file.path), output.file.name);
write.table(clone.table,
file=output.file.name,
row.names=FALSE,
sep="\t",
quote=FALSE);
} # process.frameshifts.stop.codons()
# This is the original function, which took as input.file.paths formatted for use in
# VDJTools. Such files had this schema:
#
# #count freq cdr3nt cdr3aa v d j VEnd DStart DEnd JStart
#
#process.frameshifts.stop.codons <- function(input.file.path) {
#
# # Specify characters to search for here.
# # Note that since "grep()" is used to identify records containing these
# # special characters, escapes may be required (e.g. "\\*" instead of "*").
# special.characters <- c("\\*", "_");
#
# # read in file
# clone.table <- read.delim(input.file.path, stringsAsFactors=FALSE);
# # identify records containing the special character
# for(i in 1:length(special.characters)) {
# aas <- clone.table$cdr3aa;
# dirty.clone.indices <- integer();
# dirty.clone.indices <- grep(special.characters[i], aas);
# # process.frameshifts.stop.codons
# if(length(dirty.clone.indices > 0)) {
# clone.table <- clone.table[-dirty.clone.indices,];
# } # fi
# } # for i
# # write output to file
#
# colnames(clone.table)[1] <- "#count";
# output.file.name <- sub("[.][^.]*$", "", basename(input.file.path));
# output.file.name <- paste(output.file.name, "_no_fssc.txt", sep="");
#
# write.table(clone.table,
# file=output.file.name,
# row.names=FALSE,
# sep="\t",
# quote=FALSE);
#
#} # process.frameshifts.stop.codons()
#
|
10d0faf587b16d223e9af7c2a3430baa1368c420 | 2fd3d364b6ffe4718981c63b9d66c2bf6571811a | /Research-tumor,genome/genecompare.R | 9b8eb8a4f2e28e7211f75c5df581b7b0b58fe952 | [] | no_license | simon1405/R | ba11e2b76ef3ad03d762ee461285e9d2591a9142 | 49903c9594a5c0b202523776b15cb71b320dab1d | refs/heads/master | 2020-04-21T20:05:48.656785 | 2019-02-15T14:27:04 | 2019-02-15T14:27:04 | 169,830,991 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,254 | r | genecompare.R | gene<-Normalized_geneExpression_HHCsamples
colnames(gene)
x<-read.table("C:/Users/simon/Desktop/Winter Intern/MEDICAL/Liver_ImmGen_results_Ver2_new.txt",header=TRUE,)
dim(x)
dim(gene[,2:163])
colnames(gene)
rownames(x)
newgene<-as.matrix(gene[2:163,])
genename<-gene[,1]
dim(genename)
dim(genecom)
for (i in 1:37582){
colnames(genecom)[i+6]<-genename[i,1]
}
genecom<-cbind(x,newgene)
colnames(genecom)
genecom[79,1]
cor(genecom$)
write.csv(genecom,file="genecombined.csv")
cor.test(as.vector(genecom[2:163,2]),as.vector(genecom[2:163,7]))
genecom[4,4]
library(readr)
genecombined <- read_csv("~/genecombined.csv")
dim(genecombined)
geneN<-genecombined[1:78,2:37589]
geneT<-genecombined[79:162,2:37589]
dim(geneN)
colnames(geneN)
pvalue<-matrix(0,nrow=6,ncol=37582)
cortable<-matrix(0,nrow=6,ncol=37582)
for (i in 1:6){
for (j in 7:37588){
cor1<c()
cor1<-cor.test(as.matrix(geneN[,i]),as.matrix(geneN[,j]))
pvalue[i,j-6]<-cor1$p.value
cortable[i,j-6]<-cor1$estimate
}
}
pvalueN<-pvalue
rsqN<-rsq
pvalueT<-matrix(0,nrow=6,ncol=37582)
cortableT<-matrix(0,nrow=6,ncol=37582)
for (i in 1:6){
for (j in 7:37588){
cor1<c()
cor1<-cor.test(as.matrix(geneN[,i]),as.matrix(geneN[,j]))
pvalueT[i,j-6]<-cor1$p.value
cortableT[i,j-6]<-cor1$estimate
}
}
#genes name
gena<-read.delim("C:/Users/simon/Desktop/Winter Intern/MEDICAL/Affy2.0_GeneChip_annotation_data.txt")
newpvaluen<-t(pvalueN)
newpvaluen<-cbind((colnames(genecombined)[8:37589]),newpvaluen)
colnames(newpvaluen)<-c("ID","NavB","MEmB","CD8T","CD4T","NKcell","Monocyte")
library("dplyr")
dict<-select(gena,ID,Gene.symbol)
newpvaluen<-as.data.frame((newpvaluen))
newpvaluen<-right_join(newpvaluen,dict,by="ID")
#newpvaluen$Gene.symbol[which(newpvaluen$Gene.symbol=="")]<-"not available"
write.csv(newpvaluen,"pvalue with normal samples")
m1st<-read.delim("C:/Users/simon/Desktop/Winter Intern/MEDICAL/node.txt")
firsttry<-as.data.frame(m1st$ID)
colnames(firsttry)<-"Gene.symbol"
result1st<-inner_join(firsttry,newpvaluen,by="Gene.symbol")
#
newpvaluet<-t(pvalueT)
newpvaluet<-cbind((colnames(genecombined)[8:37589]),newpvaluet)
colnames(newpvaluet)<-c("ID","NavB","MEmB","CD8T","CD4T","NKcell","Monocyte")
newpvaluet<-as.data.frame((newpvaluet))
newpvaluet<-right_join(newpvaluet,dict,by="ID")
write.csv(newpvaluet,"pvalue with tumors samples")
tresult1st<-inner_join(firsttry,newpvaluet,by="Gene.symbol")
m2nd<-read.delim("C:/Users/simon/Desktop/Winter Intern/MEDICAL/node2.txt")
firsttry<-as.data.frame(m2nd$Approved.Symbol)
colnames(firsttry)<-"Gene.symbol"
result2nd<-inner_join(firsttry,newpvaluet,by="Gene.symbol")
m3rd<-read.delim("C:/Users/simon/Desktop/Winter Intern/MEDICAL/node3.txt")
firsttry<-as.data.frame(m3rd$Approved.Symbol)
colnames(firsttry)<-"Gene.symbol"
result3rd<-inner_join(firsttry,newpvaluet,by="Gene.symbol")
library("ggplot2")
ggplot(data=newpvaluet,aes(x=c("NavB","MEmB","CD8T","CD4T","NKcell","Monocyte"),y=ID))+geom_tile(aes(fill=pvalueT))
write.csv(tresult1st,"pvalue with tumor samples comparing to Interleukins (IL).csv")
write.csv(result2nd,"pvalue with tumor samples comparing to Interferons (IFN).csv")
write.csv(result3rd,"pvalue with tumor samples comparing to Chemokine ligands (CCL).csv")
|
225ea0d43376482709b8e713927ba420c6294dba | acea526728f55e3d64a7f2c44ad72eecea91d95a | /opdc postcodes.R | 383bb572560e48ea4748606c7cd566eb7a140afb | [] | no_license | seanolondon/R | 3375b49a0b182efac35f5d767fe354d627fee19c | 4c4375857e03c1277c5cce9acc8a57d8b2e8ecc6 | refs/heads/master | 2021-08-22T21:10:47.243036 | 2017-12-01T09:00:41 | 2017-12-01T09:00:41 | 112,718,765 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,389 | r | opdc postcodes.R | library(rgeos)
library(sp)
library(rgdal)
pacman::p_load(dplyr, data.table, tidyr)
rm(list=ls())
area_names <- data.frame(OBJECTID = as.factor(c(6,7,5,3,9,11,8,10,12)),
Sub_Area = c("Northern Park Royal",
"Southern Park Royal",
"Victoria RD & Old Oak Lane",
"Main Development Area",
"Harlesden Fringe",
"Rhapsody Court",
"Park Royal Fringe",
"North Kensington Fringe",
"East Acton Fringe"))
####DO SOME GIS####
#Join the large sites point data to ward and MSOA polygons
setwd("N:/jasper/Shapefiles")
opdc <- readOGR(dsn = ".", layer = "opdc_sub_areas")
opdc@data <- left_join(opdc@data, area_names, by="OBJECTID")
head(opdc@data)
postcodes <- readOGR(dsn = "W:/GISDataMapInfo/BaseMapping/Addressing/OSCodePoint/2017_Jan/London.gdb",
layer = "Postcodes_London2017")
proj4string(opdc) <- proj4string(postcodes)
data_join <- cbind(as.data.frame(postcodes), over(postcodes, opdc)) %>%
filter(!is.na(Sub_Area)) %>%
select(POSTCODE, DISTRICT_NAME, Sub_Area)
data_join <- left_join(data_join, area_names, by="OBJECTID")
|
bec340d4a9ad950344d520feea6a6de1bbf4d899 | e1a3abc30055c43440773900ec95ebbbea4815a9 | /R/Task8.R | d4a137ff7607f9a6a7365f0b8f689a395a1b35fe | [] | no_license | lnonell/IntegromicsPractical | ca7dd2316aa208615c81b00a8f50c809db6d7972 | 81a7eff69541782493a438b41a1e8a6eaddbdeba | refs/heads/master | 2021-05-23T12:04:26.230916 | 2020-06-12T12:11:31 | 2020-06-12T12:11:31 | 253,277,526 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 8,685 | r | Task8.R | #Task 8 Oscar Paniello Pérez-Hickman
#Purpose: Plot results of MFA, mixOmics, together with rawdata (mRNA, CN and methylation data) and significative correlation data.
#Filter previously each raw data set to get the 10% of the genes with the highest sd. The plot should be informative.
#input: data.frames obtained as output files in tasks 1 to 5. 100 most correlated variables obtained in tasks 6 and 7. Path to store Circos plot
#output: Circos plot in specified format.
task8<-function(df_samples, mrna, cnv, meth, cor.cnv, cor.cnv.sig = 3, cor.meth, cor.meth.sig = 3, sd_mrna = 0.01, sd_cnv = 1, sd_meth = 0.01,path = getwd()){
#library
suppressPackageStartupMessages(library(OmicCircos))
suppressPackageStartupMessages(library(biomaRt))
suppressPackageStartupMessages(library(TxDb.Hsapiens.UCSC.hg38.knownGene))
suppressPackageStartupMessages(library(dplyr))
suppressPackageStartupMessages(library(compare))
suppressPackageStartupMessages(library(TCGAbiolinks))
suppressPackageStartupMessages(library(edgeR))
suppressPackageStartupMessages(library(SummarizedExperiment))
# filter 10% genes by highest sd
cat("\nfilter 10% genes by highest sd.\n")
sd <- apply(mrna,1,sd)
sd10 <- head(sort(sd,decreasing = TRUE), round(nrow(mrna)*sd_mrna))
exp_final <- mrna[names(sd10),]
sd <- apply(cnv,1,sd)
sd10 <- head(sort(sd,decreasing = TRUE), round(nrow(cnv)*sd_cnv))
cnv_final <- cnv[names(sd10),]
sd <- apply(meth,1,sd)
sd10 <- head(sort(sd,decreasing = TRUE), round(nrow(meth)*sd_meth))
meth_final <- meth[names(sd10),]
cat("\nDownloading annotation of human genes.\n")
#Set up an gene annotation template to use
txdb <- TxDb.Hsapiens.UCSC.hg38.knownGene
genes <- genes(txdb)
ensembl <- useMart("ensembl")
mart <- useMart(biomart="ensembl", dataset="hsapiens_gene_ensembl", host="www.ensembl.org")
#obtain chr, start, and HGNC name of all the genes annotated in hg38
annot_df <- getBM(attributes = c("chromosome_name","start_position","end_position","hgnc_symbol"), filters = "entrezgene_id", values = genes$gene_id, mart = mart)
annot_df<- annot_df[annot_df[,4]!="" & annot_df[,1] %in% c(1:22,"X","Y"),] #remove those not annotated
annot_df <- annot_df[order(annot_df[,2]),] #order by start position
genes <- annot_df[order(annot_df[,1]),] #order by chromosome
genes<-genes %>% distinct(hgnc_symbol, .keep_all = TRUE) #we select just one copy of the gene with it starting and end positon
rownames(genes)<-genes$hgnc_symbol
cat("\nChecking the common genes between the df and the annotation.\n")
#Obtain just the positions of the wanted genes in common with the annotation gene list.
#Exp
common.gene.exp <- intersect(row.names(genes), row.names(exp_final))
gene.pos.exp <- genes[common.gene.exp,]
drops <- c("hgnc_symbol")
gene.pos.exp <- gene.pos.exp[ , !(names(gene.pos.exp) %in% drops)]
exp_final <- exp_final[common.gene.exp,]
#CNV
common.gene.cnv <- intersect(row.names(genes), row.names(cnv_final))
gene.pos.cnv <- genes[common.gene.cnv,]
drops <- c("hgnc_symbol")
gene.pos.cnv <- gene.pos.cnv[ , !(names(gene.pos.cnv) %in% drops)]
cnv_final <- cnv_final[common.gene.cnv,]
n_stagei <- sum(df_samples$tumor_stage=='stage i')
n_stageiv <- sum(df_samples$tumor_stage=='stage iv')
cnv_final.stagei <- cnv_final[,c(1:n_stagei)]
cnv_final.stageiv <- cnv_final[,c((n_stagei+1):(n_stagei+n_stageiv))]
cnv.stagei.m <- merge(gene.pos.cnv, cnv_final.stagei, by=0, all=TRUE) # merge by row names (by=0 or by="row.names")
cnv.stagei.m[is.na(cnv.stagei.m)] <- 0 # replace NA values
cnv.stagei <- cnv.stagei.m[,-1]
rownames(cnv.stagei) <- cnv.stagei.m[,1]
cnv.stageiv.m <- merge(gene.pos.cnv, cnv_final.stageiv, by=0, all=TRUE) # merge by row names (by=0 or by="row.names")
cnv.stageiv.m[is.na(cnv.stageiv.m)] <- 0 # replace NA values
cnv.stageiv <- cnv.stageiv.m[,-1]
rownames(cnv.stageiv) <- cnv.stageiv.m[,1]
#Meth
common.gene.meth <- intersect(row.names(genes), row.names(meth_final))
gene.pos.meth <- genes[common.gene.meth,]
drops <- c("hgnc_symbol")
gene.pos.meth <- gene.pos.meth[ , !(names(gene.pos.meth) %in% drops)]
meth_final <- meth_final[common.gene.meth,]
#Correlation exp-cnv
common.gene.corr.cnv <- intersect(row.names(genes), row.names(cor.cnv))
gene.pos.corr.cnv <- genes[common.gene.corr.cnv,]
drops <- c("hgnc_symbol")
gene.pos.corr.cnv <- gene.pos.corr.cnv[ , !(names(gene.pos.corr.cnv) %in% drops)]
#-log10(pvalue) to make a better visual correlation in the circos
logpvalue.cnv.corr <- -1 * log10(cor.cnv[,3])
cor.cnv[,3] <- logpvalue.cnv.corr
#prepare data for the circosplot
pvalue.exp.cnv <- merge(gene.pos.corr.cnv, cor.cnv, by="row.names",all.x=TRUE)
rownames(pvalue.exp.cnv)<-pvalue.exp.cnv[,1]
pvalue.exp.cnv <- pvalue.exp.cnv[,c(-1,-5,-6)]
mask.exp.cnv <- pvalue.exp.cnv[,"pval.adj"]>cor.cnv.sig
sig.pvalue.exp.cnv <- pvalue.exp.cnv[mask.exp.cnv,]
sig.pvalue.exp.cnv[,5] <- rownames(sig.pvalue.exp.cnv)
#Correlation exp-meth
common.gene.corr.meth <- intersect(row.names(genes), row.names(cor.meth))
gene.pos.corr.meth <- genes[common.gene.corr.meth,]
drops <- c("hgnc_symbol")
gene.pos.corr.meth <- gene.pos.corr.meth[ , !(names(gene.pos.corr.meth) %in% drops)]
#-log10(pvalue) to make a better visual correlation in the circos
logpvalue.meth.corr <- -1 * log10(cor.meth[,3])
cor.meth[,3] <- logpvalue.meth.corr
#prepare data for the circosplot
pvalue.exp.meth <- merge(gene.pos.corr.meth, cor.meth, by="row.names",all.x=TRUE)
rownames(pvalue.exp.meth)<-pvalue.exp.meth[,1]
pvalue.exp.meth <- pvalue.exp.meth[,c(-1,-5,-6)]
mask.exp.meth <- pvalue.exp.meth[,"pval.adj"]>cor.meth.sig
sig.pvalue.exp.meth <- pvalue.exp.meth[mask.exp.meth,]
sig.pvalue.exp.meth[,5] <- rownames(sig.pvalue.exp.meth)
circ.exp <- data.frame("chr"=gene.pos.exp$chromosome_name,"Start"=as.integer(gene.pos.exp$start_position),
"End"=as.integer(gene.pos.exp$end_position),log2(exp_final+1),row.names=NULL)
circ.meth <- data.frame("chr"=gene.pos.meth$chromosome_name,"Start"=as.integer(gene.pos.meth$start_position),
"End"=as.integer(gene.pos.meth$end_position),meth_final,row.names=NULL)
cat("\nPlotting the circos as pdf in the working directory.\n")
pdf("circosplot.pdf")
options(stringsAsFactors = FALSE)
par(mar=c(2, 2, 2, 2));
plot(c(1,800), c(1,800), type="n", axes=F, xlab="", ylab="", main="");
circos(R=400, cir="hg19", W=3, type="chr", print.chr.lab=T, scale=T);
circos(R=320, cir="hg19", W=75, mapping=circ.exp, col.v=4, type="heatmap2",B=FALSE, cluster=FALSE, col.bar=F, lwd=0.1, col="blue");
circos(R=240, cir="hg19", W=75, mapping=circ.meth, col.v=4, type="heatmap2",B=FALSE, cluster=FALSE, col.bar=F, lwd=0.1, col="blue")
circos(R=160, cir="hg19", W=75, mapping=cnv.stagei, col.v=4, type="ml3", B=FALSE, lwd=1, cutoff=0);
circos(R=120, cir="hg19", W=75, mapping=cnv.stageiv, col.v=4, type="ml3", B=FALSE, lwd=1, cutoff=0);
colors <- rainbow( 1 , start=0, end = 1, alpha=0.5 )
circos(R=90, cir="hg18", W=40, mapping=pvalue.exp.cnv, col.v=4, type="s", B=FALSE, lwd=0.1, col=colors, cex=0.2)
colors <- rainbow( 1 , start = 0.7, end = 1, alpha=0.5 )
circos(R=90, cir="hg18", W=40, mapping=pvalue.exp.meth, col.v=4, type="s", B=FALSE, lwd=0.1, col=colors, cex=0.2)
if (nrow(sig.pvalue.exp.cnv) > 0) {
circos(R=120, cir="hg18", W=40, mapping=sig.pvalue.exp.cnv, col.v=5, type="label2", B=FALSE, lwd=1, col='red', cex=0.2)
}
if (nrow(sig.pvalue.exp.meth) > 0) {
circos(R=120, cir="hg18", W=40, mapping=sig.pvalue.exp.meth, col.v=5, type="label2", B=FALSE, lwd=1, col='blue', cex=0.2)
}
dev.off()
return()
}
##############
#Testing
##############
task8(LUAD.pts)
task8(KIRC.pts)
task8(HNSC.pts)
task8(STAD.pts)
task8(LUSC.pts)
task8(KICH.pts)
task8(SKCM.pts, mrna = SKCM.exp, cnv = SKCM.cnv, meth = SKCM.meth, cor.cnv = SKCM.cnv.corr, cor.meth = SKCM.meth.corr, path = getwd())
task8(df_samples = KIRP.pts,mrna = KIRP.exp,cnv = KIRP.cnv,meth = KIRP.meth,cor.cnv = KIRP.cnv.corr,cor.meth = KIRP.meth.corr)
task8(df_samples=ESCA.pts, mrna=ESCA.mrna, cnv=ESCA.cn, meth=ESCA.meth, cor.cnv=ESCA.cnv.corr, cor.cnv.sig = 1, cor.meth=ESCA.meth.corr, cor.meth.sig = 3, sd_mrna = 0.01, sd_cnv = 1, sd_meth = 0.01)
df_samples <- ESCA.pts
mrna <- ESCA.mrna
cnv <- ESCA.cn
meth <- ESCA.meth
cor.cnv <- ESCA.cnv.corr
cor.cnv.sig <- 1
cor.meth <- ESCA.meth.corr
cor.meth.sig <- 3
sd_mrna <- 0.01
sd_cnv <- 1
sd_meth <- 0.01
|
7825148864fd3760ebca3bb6ef27f8135152ade3 | fc3dcbfc159fa55cd65866728e94d2ceda703fc6 | /setupdata.R | ef559e5a4cb7ba8d9f9a3c2da20d465d6d83062e | [] | no_license | flanaj/ExData_Plotting1 | 6cd844fe2872955a97095076d0a5b6a15758d672 | bc5c853566b526397ac87bf1db2313e8b1027217 | refs/heads/master | 2022-12-10T23:51:13.757989 | 2020-09-07T19:05:38 | 2020-09-07T19:05:38 | 293,564,086 | 0 | 0 | null | 2020-09-07T15:23:38 | 2020-09-07T15:23:37 | null | UTF-8 | R | false | false | 1,724 | r | setupdata.R | library(dplyr)
#
# download data
#
fname <- "exdata_data_household_power_consumption.zip"
if (!file.exists(fname))
{
url <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
download.file(url, fname, method="curl")
}
#
# extract data
#
extractedfname <- "household_power_consumption.txt"
if (!file.exists(extractedfname))
{
unzip(fname)
}
#
# read data from 2/1/2007 and 2/2/2007
#
colNames <- c("Date", "Time", "GlobalActivePower", "GlobalReactivePower", "Voltage", "GlobalIntensity", "SubMetering1", "SubMetering2", "SubMetering3")
dfpcd <- data.frame(matrix(vector(), 0, 9, dimnames=list(c(), colNames)), stringsAsFactors=FALSE)
con <- file(extractedfname, "r")
line <- readLines(con, n=1)
while (length(line) > 0)
{
if (grepl("^(1/2/2007|2/2/2007)", line))
{
record <- strsplit(line, ";")[[1]]
names(record) <- colNames
dfpcd <- rbind(dfpcd, record)
}
line <- readLines(con, n=1)
}
close(con)
names(dfpcd) <- colNames
#
# data conversion
#
dfpcd <- mutate(dfpcd, DateTime=strptime(paste(Date,Time), format="%d/%m/%Y %H:%M:%S"), .after=Time)
dfpcd <- mutate(dfpcd, GlobalActivePower=as.double(GlobalActivePower))
dfpcd <- mutate(dfpcd, GlobalActivePower=as.double(GlobalActivePower))
dfpcd <- mutate(dfpcd, GlobalReactivePower=as.double(GlobalReactivePower))
dfpcd <- mutate(dfpcd, Voltage=as.double(Voltage))
dfpcd <- mutate(dfpcd, GlobalIntensity=as.double(GlobalIntensity))
dfpcd <- mutate(dfpcd, SubMetering1=as.double(SubMetering1))
dfpcd <- mutate(dfpcd, SubMetering2=as.double(SubMetering2))
dfpcd <- mutate(dfpcd, SubMetering3=as.double(SubMetering3))
dfpcd <- select(dfpcd, -c(Date, Time)) |
805c25419f51855fdc6210c1a6d44d8f7831e13b | 125af6bd1dedd45089b3d5c95e0a4691e1fe715b | /man/trajectory_list.Rd | 39ca1c3e037bbc8914f807d3e0ad58afaf61f5dc | [
"MIT"
] | permissive | envhyf/SplitR | b068a2997d20831281a6f210801cc6b0b2a40265 | 386c65067c6ad62bcec442fabe7f93c81826f495 | refs/heads/master | 2021-01-18T10:30:53.412897 | 2015-01-24T10:03:04 | 2015-01-24T10:03:04 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 579 | rd | trajectory_list.Rd | % Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/trajectory_list.R
\name{trajectory_list}
\alias{trajectory_list}
\title{List HYSPLIT trajectory output archive files or folders}
\usage{
trajectory_list(output_folder)
}
\arguments{
\item{output_folder}{the absolute path for the trajectory archive files (UNIX) or folders (Windows) is to be provided.}
}
\value{
data frame with information on trajectory model output data archives
}
\description{
The function lists the HYSPLIT trajectory output files that reside in a specified directory.
}
|
34b12c0b9545b5ccd8651fdffa6c893b8d4705e1 | 53525167102f0b02d3116a8a11b1a9f70e957acb | /plot1.R | 5e2d664ba96ac6419d62b8dac612cea8af0b0b50 | [] | no_license | yihueijiang/ExData_Plotting1 | 81ab2ec0618737946978ef56c1626c668b87ba54 | 183480341f988ce54545abe1bb3749119dc6fff6 | refs/heads/master | 2021-01-21T03:13:41.459305 | 2015-06-07T09:02:13 | 2015-06-07T09:02:13 | 24,881,208 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 583 | r | plot1.R | rm(list=ls())
#read data
data<- read.table(file="c:/Users/Yi-Huei/Desktop/ExData/Project1/household_power_consumption.txt", sep=";",nrows= 2075260, header=TRUE, quote= "", strip.white=TRUE, stringsAsFactors = FALSE, na.strings= "?")
# select subdata with two days from the full data
subdata<- subset(data, (data$Date == "1/2/2007" | data$Date== "2/2/2007"))
# create Plot1
png("c:/Users/Yi-Huei/Desktop/plot1.png", width = 480, height = 480)
hist(subdata$Global_active_power, main="Global Active Power",col='red',ylab= "Frequency", xlab="Global Active Power(kilowatts)")
dev.off() |
52178442af95fc57a248680ecf8ee433587ba448 | 1162f8a2de179f8b93369f7a76a058180e98aa21 | /R/t_test_beta.R | 21a86d69cf18b740241d2c4e099fbb4b493fb8b5 | [] | no_license | elwood-shannon/esfstatistika | 96c99139f211bb14d1aee65e8f4742a9294e66c6 | b5cdb4fbaad627c63263bf35d284597c9b90f768 | refs/heads/master | 2022-12-19T13:57:27.459372 | 2020-09-24T12:29:24 | 2020-09-24T12:29:24 | 250,825,730 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,429 | r | t_test_beta.R |
#' \eqn{t}-test beta
#'
#' Funkce pro vypocet testovaciho kriteria a kritickych oboru \eqn{t}-testu pro regresni parametr beta.
#
#
#' @param b Odhad parametru beta
#' @param s Smerodatna odchylka parametru beta.
#' @param n Pocet pozorovani.
#' @param p Pocet parametru v modelu (bez urovonove konstanty).
#' @param alfa Hladina vyznamnosti (je 0.05, pokud neni stanoveno jinak).
#' @param dec Pocet desetinnych mist v konecnem vysledku (je 10, pokud neni stanoveno jinak).
#' @return Vypocita kriticke obory a testovaci kriterium \eqn{t}-testu pro parametr beta.
#' @export
t_test_beta <- function(b, s, n, p, alfa = 0.05, dec = 10){
kriterium <- b / s
o_h1p <- qt(p = 1 - alfa / 2, df = n - p - 1)
o_h1n <- -1 * qt(p = 1 - alfa / 2, df = n - p - 1)
l_h1 <- -1 * qt(p = 1 - alfa, df = n - p - 1)
p_h1 <- qt(p = 1 - alfa, df = n - p - 1)
print('Oboustranna H1 -------------------------------------')
print(paste('W je: (- nekonecno', ',', round(o_h1n, dec), ']', 'u', '[', round(o_h1p, dec), ',', '+ nekonecno)'))
print('Levostranna H1 -------------------------------------')
print(paste('W je: (- nekonecno', ',', round(l_h1, dec), ']'))
print('Pravostranna H1 ------------------------------------')
print(paste('W je:', '[', round(p_h1, dec), ',', '+ nekonecno)'))
print('----------------------------------------------------')
print(paste('Testovaci kriterium je: ', round(kriterium, dec)))
}
|
c9dfa74be076b2f805099e35a115c4f6a7a1f5cb | 0c3758367c95fde408b29ba5bd93a31fb25f213f | /mainMonteCarlos.r | 482ab53b204fed00bf0c49b47c6422077585cdfa | [] | no_license | ccrismancox/JOP_substantiveEffectsInBinaryOutcomePanelModels | fa2ccfda854c8140e6932c3e510f7cfe38266352 | 75bf24f66ed70075f6c2c141f227ab74bce83831 | refs/heads/master | 2020-07-07T01:36:12.285166 | 2019-08-22T17:58:03 | 2019-08-22T17:58:03 | 203,202,224 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 28,495 | r | mainMonteCarlos.r | rm(list=ls())
library(doRNG)
library(doParallel)
library(survival)
library(brglm)
library(lme4)
library(plm)
library(Matrix)
library(spam)
library(ggplot2)
library(reshape2)
library(scales)
library(matrixStats)
library(data.table)
source("sparseFirth.R")
beck.logit <- function(alpha, cml, X, y){
theta <- c(cml, alpha)
XB <- drop(X %*% theta)
return(
-sum(ifelse(y==1,
plogis(XB, log.p=T),
plogis(XB, lower.tail = F, log.p=T))))
}
cl <- makeCluster(30)
registerDoParallel(cl)
set.seed(123456)
n <- c(20, 40, 60, 80, 100)
t <- c(3, 5, 10,25)
g <- c(3.25,2.25)
NT <- expand.grid(n,t,g)
B <- 1000
M <- 50 #cutpoint for numeric issue
results <- list()
for(i in 1:nrow(NT)){
N <- NT[i, 1]
T <- NT[i, 2]
gamma <- NT[i, 3]
alpha <- rnorm(N, mean=-4)
out <- foreach(b= 1:B, .combine = 'rbind',
.packages = c("brglm", "survival", "lme4", "data.table", "Matrix", "spam", "plm"),
.errorhandling = "remove") %dorng%{
indexes <- expand.grid(1:T,1:N)
c <- rep(alpha, each=T) #this is z in the paper
r <- 0
data1 <- list(y=0)
all.zero <- T
while(abs(r)<.3 | sum(data1$y)==0 || all.zero){
X <- rnorm(N*T)+ c
e <- rlogis(N*T) + c*gamma
r <- cor(X,c)
y.linear <- -2*X + e
data1 <- data.table(state=factor(indexes[,2]),
year=indexes[,1],
y.linear=y.linear,
X=X)
data1$y <- ifelse(data1$y.linear>0,1,0)
data1[, sum.y:=sum(y), by=state]
data1$cgamma <- c*gamma
within.y <- mean(data1[, mean(y), by=state]$V1)
data2 <- data1[(sum.y>0 & sum.y<T)]
data1[,state2 := ifelse((sum.y==0), N+1, state)]
data1[,state2 := ifelse((sum.y==T), N+2, state2)]
data1[,state2:=factor(state2)]
data2$state <- droplevels(data2$state)
ca <- c[data1$sum.y>0 & data1$sum.y<T]
prop.all.zero <- 1-length(unique(data2$state))/length(unique(data1$state))
if(gamma == g[1]){
all.zero <- !((prop.all.zero <1) & (prop.all.zero > 0))
}else{
all.zero <- FALSE
}
}
#note: state is already a factor variable (see above)
t2 <- system.time({g2 <- try(glm(y~X+(state)-1, family=binomial, data=data2, x=T,y=T))})[3]
t3 <- system.time({g3 <- try(sp.ffe(y~X+(state)-1, data=data1))})[3]
t4 <- system.time({g4 <- try(clogit(y~X+strata(state), data=data1))})[3]
Z <- tapply(data1$X, data1$state, mean) %x% rep(1, T)
t5 <- system.time({g5 <- try(glmer(y~X+(1|state)+Z, data=data1, family =binomial))})[3]
t6 <- system.time({g6 <- try(glmer(y~X+(1|state), data=data1, family =binomial))})[3]
t7 <- system.time({g7 <- try(lm(y~X+state+0,data=data1))})[3]
if(class(g4)[1]=="clogit"){
t.beck <- system.time({g.beck <- try(optim(par=rep(0, length(unique(data2$state))),
fn=beck.logit,method="BFGS",
cml=g4$coefficients,
X=sparse.model.matrix(~X+state+0, data=data2),
y=data2$y))})[3]
if(class(g.beck)!="try-error" && g.beck$convergence==0){
g.beck <- list(coefficients=c(g4$coefficients, g.beck$par), conv=g.beck$convergence)
}else{
g.beck <- list(coefficients=rep(NA, 1+length(unique(data2$state))))
t.beck <- NA
}
}else{
g.beck <- list(coefficients=rep(NA, 1+length(unique(data2$state))))
t.beck <- NA
}
t.beck <- t.beck+t4
newData <- data1[,mean(X), by=state]; setnames(newData, "V1", "X"); newData$Z <- newData$X
newData$state2 <- unique(data1[,list(state, state2)])$state2
newData2 <- data2[,mean(X), by=state]; setnames(newData2, "V1", "X")
X.mem <- sparse.model.matrix(~X+(state)-1, data=newData)
X.mem2 <- sparse.model.matrix(~X+(state)-1, data=newData2)
if(class(g2)=="try-error"||!g2$conv||any(is.na(g2$coef))|| abs(g2$coef[1]) > M){
g2 <- list(coef=rep(NA,N+1))
mar2 <- NA; bmar2 <- NA; ame2 <- NA; t2 <- NA
}else{
prob.g2 <- g2$fitted
prob.g2.new <- g2$fitted #if MEM use predict(g2, newdata=newData2, type="response")
mar2 <- mean((prob.g2 -plogis(-2*data2$X + data2$cgamma))^2)
bmar2 <-(mean(prob.g2 -plogis(-2*data2$X + data2$cgamma)))
ame2 <-(prob.g2.new*(1-prob.g2.new)*(coef(g2)[1]))
}
if(class(g3)=="try-error"||!(g3$conv==0) || any(is.na(g3$coefficients))||any(abs(g3$coefficients) > M)){
g3 <- list(coef=rep(NA,N+1))
mar3 <- NA; bmar3 <- NA; ame3 <- NA; t3 <- NA
}else{
g3$coef.table <- NULL
prob.g3 <- g3$fitted
prob.g3.new <- g3$fitted #if MEM use plogis(drop(X.mem %*% g3$coef))
mar3 <- mean((prob.g3 -plogis(-2*data1$X + data1$cgamma))^2)
bmar3 <-(mean(prob.g3 -plogis(-2*data1$X + data1$cgamma)))
ame3 <- (prob.g3.new*(1-prob.g3.new)*(coef(g3)[1]))
}
if(class(g4)=="try-error"||!g4$iter<10|| any(is.na(g4$coef))||any(abs(g4$coef) > M)){
g4 <- list(coef=rep(NA,1)); t4 <- NA; t4 <- NA
}
if(class(g5)=="try-error"||g5@optinfo$conv$opt!=0||anyNA(coef(g5)$state)||abs(unique(coef(g5)$state$X)) > M){
beta.CRE <- NA;alpha.CRE <- rep(NA,N);mar5 <- NA;bmar5 <- NA; ame5 <- NA; t5 <- NA
}else{
beta.CRE <- g5@beta[2]
alpha.CRE <- rowSums(unique(cbind(1, Z)) * as.matrix(coef(g5)$state[,-2]))
if(any(abs(c(beta.CRE, alpha.CRE))> M)){
beta.CRE <- alpha.CRE <- mar5 <- NA;bmar5 <- NA; ame5 <- NA
}else{
mar5 <- mean((predict(g5, type="response") -plogis(-2*data1$X + c*gamma))^2)
bmar5 <-(mean(predict(g5, type="response") -plogis(-2*data1$X + c*gamma)))
ame5 <- ((predict(g5, type="response", newdata=data1)*(1-predict(g5, type="response", newdata=data1))*(coef(g5)$state$X[1])))
}
}
if(class(g6)=="try-error"||g6@optinfo$conv$opt!=0||anyNA(coef(g6)$state)||abs(unique(coef(g6)$state$X)) > M){
beta.RE <- NA;alpha.RE <- rep(NA,N);mar6 <- NA;bmar6 <- NA; ame6 <- NA; t6 <- NA
}else{
beta.RE <- g6@beta[2]
alpha.RE <- as.matrix(coef(g6)$state[,-2])
if(any(abs(c(beta.RE, alpha.RE))> M)){
beta.RE <- alpha.RE <- mar6 <- NA;bmar6 <- NA; ame6 <- NA
}else{
mar6 <- mean((predict(g6, type="response") -plogis(-2*data1$X + data1$cgamma))^2)
bmar6 <-(mean(predict(g6, type="response") -plogis(-2*data1$X + data1$cgamma)))
ame6 <- ((predict(g6, type="response", newdata=data1)*(1-predict(g6, type="response", newdata=data1))*(coef(g6)$state$X[1])))
}
}
if(class(g.beck)=="try-error"|| any(is.na(g.beck$coef))||any(abs(g.beck$coef) > M)){
g.beck <- list(coef=rep(NA, length(unique(data2$state))+1))
mar.beck <- NA; bmar.beck <- NA; ame.beck <- NA; t.beck <- NA
}else{
X.beck <- model.matrix(~X+state-1, data=data2)
p.beck <- plogis(X.beck %*% g.beck$coef)
mar.beck <- mean((p.beck -plogis(-2*data2$X + data2$cgamma))^2)
bmar.beck <-(mean(p.beck -plogis(-2*data2$X + data2$cgamma)))
p.beck.new <- plogis(model.matrix(~X+state-1, data=data2) %*% g.beck$coefficients)
ame.beck <- (p.beck.new*(1-p.beck.new)*(g.beck$coef[1]))
}
if(class(g7)=="try-error" || any(is.na(g7$coef))||any(abs(g7$coef) > M)){
mar7 <- NA; bmar7 <- NA; ame7 <- NA; t7 <- NA
}else{
mar7 <- mean((predict(g7) -plogis(-2*data1$X + c*gamma))^2)
bmar7 <-(mean(predict(g7) -plogis(-2*data1$X + c*gamma)))
ame7 <- coef(g7)[1]
}
keep <- as.numeric(as.character(unique(data1[!(sum.y==0 | sum.y==T)]$state)))
m3 <- mean((gamma*alpha-g3$coef[-1])^2)
m2 <- mean((gamma*alpha[keep]-g2$coef[-1])^2)
m5 <- mean((gamma*alpha-alpha.CRE)^2)
m6 <- mean((gamma*alpha-alpha.RE)^2)
mbeck <- mean((gamma*alpha[keep]-g.beck$coef[-1])^2)
b3 <-(mean(gamma*alpha-g3$coef[-1]))
b2 <-(mean(gamma*alpha[keep]-g2$coef[-1]))
b5 <-(mean(gamma*alpha-alpha.CRE))
b6 <-(mean(gamma*alpha-alpha.RE))
bbeck <-(mean(gamma*alpha[keep]-g.beck$coef[-1]))
ame.truth <- (plogis(-2*data1$X + data1$cgamma)*(1-plogis(-2*data1$X + data1$cgamma))*-2)
truth.ame <- mean(ame.truth)
ame2a <- sum(ame2)/nrow(data1)
ame2 <- mean(ame2)
ame3 <- mean(ame3)
ame5 <- mean(ame5)
ame6 <- mean(ame6)
ame.beck <- mean(ame.beck)
output <- c(g4$coef[1],
g2$coef[1],
g3$coef[1],
beta.CRE,
beta.RE,
g.beck$coef[1],
m2, m3, m5, m6, mbeck,
b2, b3, b5, b6, bbeck,
mar2, mar3, mar5, mar6, mar7, mar.beck,
bmar2, bmar3, bmar5, bmar6, bmar7, bmar.beck,
ame2, ame2a, ame3, ame5, ame6, ame7, ame.beck, truth.ame,
t4, t2, t3,t5, t6, t7, t.beck,
within.y, prop.all.zero,
r)
output
}
colnames(out) <- c("CML", "MLDV", "FFE", "CRE", "RE", "Beck",
"mse.MLDV","mse.FFE", "mse.CRE", "mse.RE", "mse.beck",
"bias.MLDV", "bias.FFE", "bias.CRE", "bias.RE", "bias.beck",
"pred.MLDV", "pred.FFE","pred.CRE", "pred.RE", "pred.LPM", "pred.beck",
"bias.pred.MLDV", "bias.pred.FFE","bias.pred.CRE", "bias.pred.RE", "bias.pred.LPM", "bias.pred.beck",
"ame2", "ame2a", "ame3", "ame5", "ame6", "ame7", "ame.beck", "ame.truth",
"time.CML", "time.MLDV", "time.FFE", "time.CRE", "time.RE", "time.LPM", "time.beck",
"Prop.1", "Prop.dropped",
"corXc")
out <- as.data.frame(out)
out$bias.mem.MLDV <- out$ame2 - out$ame.truth
out$bias.mem.MLDV2 <- out$ame2a - out$ame.truth
out$bias.mem.FFE <- out$ame3 - out$ame.truth
out$bias.mem.CRE <- out$ame5 - out$ame.truth
out$bias.mem.RE <- out$ame6 - out$ame.truth
out$bias.mem.LPM <- out$ame7 - out$ame.truth
out$bias.mem.beck <- out$ame.beck - out$ame.truth
out$mse.mem.MLDV <- (out$ame2 - out$ame.truth)^2
out$mse.mem.MLDV2 <- (out$ame2a - out$ame.truth)^2
out$mse.mem.FFE <- (out$ame3 - out$ame.truth)^2
out$mse.mem.CRE <- (out$ame5 - out$ame.truth)^2
out$mse.mem.RE <- (out$ame6 - out$ame.truth)^2
out$mse.mem.LPM <- (out$ame7 - out$ame.truth)^2
out$mse.mem.beck <- (out$ame.beck - out$ame.truth)^2
out <- as.matrix(out)
results[[i]] <- out
}
stopCluster(cl)
save.image("MC_main.rdata")
rm(list=ls())
load("MC_main.rdata")
library(ggplot2)
library(extrafont)
library(matrixStats)
library(reshape2)
library(scales)
library(data.table)
adInfo <- cbind(NT, do.call(rbind,lapply(results, function(x){return(cbind(mean(x[,"Prop.1"]), mean(x[,"Prop.dropped"])))})))
adInfo <- data.table(adInfo)
adInfo[,Var1:=NULL]
colnames(adInfo) <- c("T", "Rare", "ybar", "dropped")
adInfo[,ybar:= round(mean(ybar),2), by=list(T, Rare)]
adInfo[,dropped:=round(mean(dropped),2)*100, by=list(T, Rare)]
adInfo$Rare <- ifelse(adInfo$Rare==g[1], "Rare event", "Non-rare Event")
adInfo$Rare <- factor(adInfo$Rare, levels=c("Rare event", "Non-rare Event"))
adInfo$T <- factor(paste("T =", adInfo$T), levels=paste("T =", t))
adInfo$label <- with(adInfo, paste("textstyle('%'~dropped) == ", dropped, sep=""))
results <- lapply(results, as.matrix)
p1data <- cbind(NT,
do.call(rbind,
lapply(results,
function(x){
sqrt(colVars(x[,c(
"CML", "FFE", "CRE", "MLDV", "RE", "Beck")], na.rm=T) +
(colMeans(x[,c(
"CML", "FFE", "CRE", "MLDV", "RE", "Beck")], na.rm=T)+ 2)^2)})))
colnames(p1data)[colnames(p1data)=="FFE"] <- c("PML")
p1data <- melt(p1data, id.vars = c("Var1", "Var2", "Var3"))
colnames(p1data) <- c("N", "T", "Rare", "Estimator", "RMSE")
p1data$Rare <- ifelse(p1data$Rare==g[1], "Rare event", "Non-rare Event")
p1data$Rare <- factor(p1data$Rare, levels=c("Rare event", "Non-rare Event"))
p1data$T <- factor(paste("T =", p1data$T), levels=paste("T =", t))
p2data <- cbind(NT, do.call(rbind, lapply(results,
function(x){
abs(colMeans(x[,c( "CML", "FFE", "CRE", "MLDV", "RE", "Beck")]+2, na.rm=T))})))
colnames(p2data)[colnames(p2data)=="FFE"] <- c("PML")
p2data <- melt(p2data, id.vars = c("Var1", "Var2", "Var3"))
colnames(p2data) <- c("N", "T", "Rare", "Estimator", "Bias")
p2data$Rare <- ifelse(p2data$Rare==g[1], "Rare event", "Non-rare Event")
p2data$Rare <- factor(p2data$Rare, levels=c("Rare event", "Non-rare Event"))
p2data$T <- factor(paste("T =", p2data$T), levels=paste("T =", t))
p3data <- cbind(NT, do.call(rbind,
lapply(results,
function(x){ sqrt(colMeans(x[,c(
"mse.FFE", "mse.CRE", "mse.MLDV", "mse.RE", "mse.beck"
)], na.rm=T))})))
colnames(p3data)[4:8] <- c("PML", "CRE", "MLDV", "RE", "Beck")
p3data <- melt(p3data, id.vars = c("Var1", "Var2", "Var3"))
colnames(p3data) <- c("N", "T", "Rare", "Estimator", "RMSE")
p3data$RMSE <- sqrt(p3data$RMSE)
p3data$Rare <- ifelse(p3data$Rare==g[1], "Rare event", "Non-rare Event")
p3data$Rare <- factor(p3data$Rare, levels=c("Rare event", "Non-rare Event"))
p3data$T <- factor(paste("T =", p3data$T), levels=paste("T =", t))
p4data <- cbind(NT, do.call(rbind,
lapply(results,
function(x){ abs(colMeans(x[,c( #"bias.firth", "bias.cre",
"bias.FFE", "bias.CRE", "bias.MLDV", "bias.RE", "bias.beck"
)], na.rm=T))})))
colnames(p4data)[4:8] <- c("PML", "CRE", "MLDV", "RE", "Beck")
p4data <- melt(p4data, id.vars = c("Var1", "Var2", "Var3"))
colnames(p4data) <- c("N", "T", "Rare", "Estimator", "Bias")
p4data$Rare <- ifelse(p4data$Rare==g[1], "Rare event", "Non-rare Event")
p4data$Rare <- factor(p4data$Rare, levels=c("Rare event", "Non-rare Event"))
p4data$T <- factor(paste("T =", p4data$T), levels=paste("T =", t))
p5data <- cbind(NT, do.call(rbind,
lapply(results,
function(x){ colMeans(x[,c(
"pred.FFE", "pred.CRE", "pred.MLDV", "pred.RE", "pred.beck", "pred.LPM"
)], na.rm=T)})))
colnames(p5data)[4:9] <- c("PML", "CRE", "MLDV", "RE", "Beck", "LPM")
p5data <- melt(p5data, id.vars = c("Var1", "Var2", "Var3"))
colnames(p5data) <- c("N", "T", "Rare", "Estimator", "MSE")
p5data$Rare <- ifelse(p5data$Rare==g[1], "Rare event", "Non-rare Event")
p5data$Rare <- factor(p5data$Rare, levels=c("Rare event", "Non-rare Event"))
p5data$T <- factor(paste("T =", p5data$T), levels=paste("T =", t))
p6data <- cbind(NT, do.call(rbind,
lapply(results,
function(x){ abs(colMeans(x[,c(
"bias.pred.FFE", "bias.pred.CRE", "bias.pred.MLDV",
"bias.pred.RE", "bias.pred.beck","bias.pred.LPM"
)], na.rm=T))})))
colnames(p6data)[4:9] <- c("PML", "CRE", "MLDV", "RE", "Beck","LPM")
p6data <- melt(p6data, id.vars = c("Var1", "Var2", "Var3"))
colnames(p6data) <- c("N", "T", "Rare", "Estimator", "Bias")
p6data$Rare <- ifelse(p6data$Rare==g[1], "Rare event", "Non-rare Event")
p6data$Rare <- factor(p6data$Rare, levels=c("Rare event", "Non-rare Event"))
p6data$T <- factor(paste("T =", p6data$T), levels=paste("T =", t))
p7data <- cbind(NT, do.call(rbind,
lapply(results,
function(x){ abs(colMeans(x[,c(
"bias.mem.FFE", "bias.mem.CRE",
"bias.mem.MLDV", "bias.mem.RE",
"bias.mem.beck","bias.mem.LPM"
)], na.rm=T))})))
colnames(p7data)[4:9] <- c("PML", "CRE", "MLDV", "RE", "Beck","LPM")
p7data <- melt(p7data, id.vars = c("Var1", "Var2", "Var3"))
colnames(p7data) <- c("N", "T", "Rare", "Estimator", "Bias")
p7data$Rare <- ifelse(p7data$Rare==g[1], "Rare event", "Non-rare Event")
p7data$Rare <- factor(p7data$Rare, levels=c("Rare event", "Non-rare Event"))
p7data$T <- factor(paste("T =", p7data$T), levels=paste("T =", t))
p8data <- cbind(NT, do.call(rbind,
lapply(results,
function(x){colMeans( (x[,c(
"mse.mem.FFE", "mse.mem.CRE",
"mse.mem.MLDV", "mse.mem.RE",
"mse.mem.beck", "mse.mem.LPM"
)]), na.rm=T)})))
colnames(p8data)[4:9] <- c("PML", "CRE", "MLDV", "RE", "Beck", "LPM")
p8data <- melt(p8data, id.vars = c("Var1", "Var2", "Var3"))
colnames(p8data) <- c("N", "T", "Rare", "Estimator", "MSE")
p8data$Rare <- ifelse(p8data$Rare==g[1], "Rare event", "Non-rare Event")
p8data$Rare <- factor(p8data$Rare, levels=c("Rare event", "Non-rare Event"))
p8data$T <- factor(paste("T =", p8data$T), levels=paste("T =", t))
p9data <- cbind(NT, do.call(rbind,
lapply(results,
function(x){colMeans( (x[,c(
"time.FFE", "time.CRE",
"time.MLDV", "time.RE",
"time.beck", "time.LPM")]), na.rm=T)})))
colnames(p9data)[4:9] <- c("PML", "CRE", "MLDV", "RE", "Beck", "LPM")
p9data <- melt(p9data, id.vars = c("Var1", "Var2", "Var3"))
colnames(p9data) <- c("N", "T", "Rare", "Estimator", "Time")
p9data$Rare <- ifelse(p9data$Rare==g[1], "Rare event", "Non-rare Event")
p9data$Rare <- factor(p9data$Rare, levels=c("Rare event", "Non-rare Event"))
p9data$T <- factor(paste("T =", p9data$T), levels=paste("T =", t))
g1 <- ggplot(p1data[! p1data$Estimator %in% c("RE","Beck", "LPM", "PML"),])+ #RMSE in beta
geom_line(aes(x=N, y=(RMSE), color=Estimator, linetype=Estimator), size=1)+
facet_grid(Rare~T )+
geom_text(aes(x=95,y=Inf, label=label),
data=adInfo, parse=TRUE, vjust=1.1, size=5, hjust=1, check_overlap = TRUE) +
theme_bw(16)+
ylab('RMSE')+
xlab(expression(N))+
scale_linetype_manual(values=c("dotted", "solid", "dashed", "dotdash"))+
scale_color_manual(values=hue_pal()(4)[c(2,1,3,4)])+
theme(legend.position="bottom",
legend.title = element_text(size=18,
face="bold"),
legend.text = element_text(size = 18),
strip.text.y = element_text(size=14),
legend.key.size = unit(.5,"in"))
print(g1)
g2 <- ggplot(p2data[! p2data$Estimator %in% c("RE","Beck", "LPM", "PML"),])+ #bias in beta
geom_line(aes(x=N, y=(Bias), color=Estimator, linetype=Estimator), size=1)+
facet_grid(Rare~T )+
geom_text(aes(x=95,y=Inf, label=label),
data=adInfo, parse=TRUE, vjust=1.1, size=5, hjust=.9, check_overlap = TRUE) +
theme_bw(16)+
ylab(' Absolute Bias')+
xlab(expression(N))+
scale_linetype_manual(values=c("dotted", "solid", "dashed", "dotdash"))+
scale_color_manual(values=hue_pal()(4)[c(2,1,3,4)])+
theme(legend.position="bottom",
legend.title = element_text(size=18,
face="bold"),
legend.text = element_text(size = 18),
strip.text.y = element_text(size=14),
legend.key.size = unit(.5,"in"))
print(g2)
g3 <- ggplot(p3data[! p3data$Estimator %in% c("RE","Beck", "LPM", "PML"),])+ #rmse in c
geom_line(aes(x=N, y=(RMSE), color=Estimator, linetype=Estimator), size=1)+
facet_grid(Rare~T)+
geom_text(aes(x=95,y=Inf, label=label),
data=adInfo, parse=TRUE, vjust=1.1, size=5, hjust=.9, check_overlap = TRUE) +
theme_bw(16)+
ylab('RMSE')+
xlab(expression(N))+
scale_linetype_manual(values=c( "solid", "dashed", "dotdash"))+
scale_color_manual(values=hue_pal()(4)[c(1,3,4)])+
theme(legend.position="bottom",
legend.title = element_text(size=18,
face="bold"),
legend.text = element_text(size = 18),
strip.text.y = element_text(size=14),
legend.key.size = unit(.5,"in"))
print(g3)
g5 <- ggplot(p5data[!p5data$Estimator %in% c("RE","Beck", "LPM", "PML"),])+ #rmse in pr
geom_line(aes(x=N, y=sqrt(MSE), color=Estimator, linetype=Estimator), size=1)+
facet_grid(Rare~T )+
geom_text(aes(x=95,y=Inf, label=label),
data=adInfo, parse=TRUE, vjust=1.1, size=5, hjust=.9, check_overlap = TRUE) +
theme_bw(16)+
ylab('RMSE')+ylim(0,.45)+
xlab(expression(N))+
scale_linetype_manual(values=c( "solid", "dashed", "dotdash"))+
scale_color_manual(values=hue_pal()(4)[c(1,3,4)])+
theme(legend.position="bottom",
legend.title = element_text(size=18,
face="bold"),
legend.text = element_text(size = 18),
strip.text.y = element_text(size=14),
legend.key.size = unit(.5,"in"))
print(g5)
g6 <- ggplot(p6data[!p6data$Estimator %in% c("RE","Beck", "LPM", "PML"),])+ #bias in pr
geom_line(aes(x=N, y=Bias, color=Estimator, linetype=Estimator), size=1)+
facet_grid(Rare~T )+
geom_text(aes(x=95,y=Inf, label=label),
data=adInfo, parse=TRUE, vjust=1.1, size=5, hjust=.9, check_overlap = TRUE) +
theme_bw(16)+
ylab('Absolute Bias')+ylim(0,.3)+
xlab(expression(N))+
scale_linetype_manual(values=c( "solid", "dashed", "dotdash"))+
scale_color_manual(values=hue_pal()(4)[c(1,3,4)])+
theme(legend.position="bottom",
legend.title = element_text(size=18,
face="bold"),
legend.text = element_text(size = 18),
strip.text.y = element_text(size=14),
legend.key.size = unit(.5,"in"))
print(g6)
g7 <- ggplot(p7data[! p7data$Estimator %in% c("RE","Beck", "LPM", "PML"),])+ #bias in marginal
geom_line(aes(x=N, y=Bias, color=Estimator, linetype=Estimator), size=1)+
facet_grid(Rare~T )+
geom_text(aes(x=95,y=Inf, label=label),
data=adInfo, parse=TRUE, vjust=1.1, size=5, hjust=.9, check_overlap = TRUE) +
theme_bw(16)+
ylab('Absolute Bias')+
xlab(expression(N))+
scale_linetype_manual(values=c( "solid", "dashed", "dotdash"))+
scale_color_manual(values=hue_pal()(4)[c(1,3,4)])+
theme(legend.position="bottom",
legend.title = element_text(size=18,
face="bold"),
legend.text = element_text(size = 18),
strip.text.y = element_text(size=14),
legend.key.size = unit(.5,"in"))
print(g7)
g8 <- ggplot(p8data[! p8data$Estimator %in% c("RE","Beck", "LPM", "PML"),])+ #rmse in marginal
geom_line(aes(x=N, y=sqrt(MSE), color=Estimator, linetype=Estimator), size=1)+
facet_grid(Rare~T )+
geom_text(aes(x=95,y=Inf, label=label),
data=adInfo, parse=TRUE, vjust=1.1, size=5, hjust=.9, check_overlap = TRUE) +
theme_bw(16)+
ylab('RMSE')+ylim(0,.45)+
xlab(expression(N))+
scale_linetype_manual(values=c( "solid", "dashed", "dotdash"))+
scale_color_manual(values=hue_pal()(4)[c(1,3,4)])+
theme(legend.position="bottom",
legend.title = element_text(size=18,
face="bold"),
legend.text = element_text(size = 18),
strip.text.y = element_text(size=14),
legend.key.size = unit(.5,"in"))
print(g8)
h=7; w=9
pdf(file="Figure1.pdf", height=h, width=w); g1; dev.off()
pdf(file="FigureA1.pdf", height=h, width=w); g2; dev.off()
pdf(file="Figure2.pdf", height=h, width=w); g3; dev.off()
pdf(file="Figure3.pdf", height=h, width=w); g5; dev.off()
pdf(file="FigureA2.pdf", height=h, width=w); g6; dev.off()
pdf(file="FigureA3.pdf", height=h, width=w); g7; dev.off()
pdf(file="Figure4.pdf", height=h, width=w); g8; dev.off()
#### APPENDIX B####
final.table <- rbind(c(p1data[p1data$N==100 & p1data$T== "T = 25" & p1data$Rare=="Non-rare Event",]$RMSE, NA),
c(NA,sqrt(p5data[p5data$N==100 & p5data$T== "T = 25" & p5data$Rare=="Non-rare Event",]$MSE)),
c(NA,sqrt(p8data[p8data$N==100 & p8data$T== "T = 25" & p8data$Rare=="Non-rare Event",]$MSE)))
colnames(final.table) <- c(as.character(p1data[p1data$N==100 & p1data$T== "T = 25" & p1data$Rare=="Non-rare Event",]$Estimator), "LPM")
rownames(final.table) <- c("RMSE in $\\hat\\beta$",
"RMSE in $\\hat{p}$",
"RMSE in AME")
print(round((final.table/final.table[,"CRE"])[,-3], 2))
final.table.rare <- rbind(c(p1data[p1data$N==100 & p1data$T== "T = 25" & p1data$Rare=="Rare event",]$RMSE, NA),
c(NA,sqrt(p5data[p5data$N==100 & p5data$T== "T = 25" & p5data$Rare=="Rare event",]$MSE)),
c(NA,sqrt(p8data[p8data$N==100 & p8data$T== "T = 25" & p8data$Rare=="Rare event",]$MSE)))
colnames(final.table.rare) <- c(as.character(p1data[p1data$N==100 & p1data$T== "T = 25" & p1data$Rare=="Non-rare Event",]$Estimator), "LPM")
rownames(final.table.rare) <- c("RMSE in $\\hat\\beta$",
"RMSE in $\\hat{p}$",
"RMSE in AME")
print(round((final.table.rare/final.table.rare[,"CRE"])[,-3],2))
|
e9843ee4cbca14698c35e6ab5a8b0d1b713c30b8 | 66b5bd8a08bef5dd328a86c465dc7a7036f09143 | /Apps/R/Graphs/performance differences per level.R | 38fa52c3a653ae88cb4b8162f8cb40cf64fe96e3 | [
"MIT"
] | permissive | RGreinacher/bachelor-thesis | 634b1021797ebd88c936ff1f15e84a5d1ea9207a | 60dbc03ce40e3ec42f2538d67a6aabfea6fbbfc8 | refs/heads/master | 2021-01-01T20:37:55.754031 | 2018-07-12T12:24:48 | 2018-07-12T12:24:48 | 98,902,427 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,811 | r | performance differences per level.R | library(readr)
library(ggplot2)
library(gridExtra)
# constants
data_frame <- read_delim("~/Nextcloud/Uni/Bachelorarbeit/Apps/Auswertung/data/data_frame.csv", ";", escape_double = FALSE, trim_ws = TRUE)
color_coding = c(
"keine Assistenz" = "#7A88A5",
"10% Assistenz" = "#BF3865",
"50% Assistenz" = "#E0B558",
"90% Assistenz" = "#93B449")
color_coding_block_level = c(
"1-kA" = "#7A88A5",
"2-kA" = "#7A88A5",
"3-kA" = "#7A88A5",
"4-kA" = "#7A88A5",
"1-10" = "#BF3865",
"2-10" = "#BF3865",
"3-10" = "#BF3865",
"4-10" = "#BF3865",
"1-50" = "#E0B558",
"2-50" = "#E0B558",
"3-50" = "#E0B558",
"4-50" = "#E0B558",
"1-90" = "#93B449",
"2-90" = "#93B449",
"3-90" = "#93B449",
"4-90" = "#93B449")
# box blots / medians for combined blocks
relevant_data <- data.frame(
"condition" = character(),
"block" = numeric(),
"block_condition" = character(),
"correctness" = numeric())
for (block_id in 0:3) {
for (subject_id in 1:66) {
data_row = data_frame[subject_id,]
datum = data_row[[sprintf('block_%d_correctness', block_id)]]
if (data_row[[sprintf('block_%d_assistance_present', block_id)]] == "True") {
condition = data_row$assistance_level
} else {
condition = "kA"
}
relevant_data <- rbind(
relevant_data,
data.frame(
condition = condition,
block = (block_id + 1),
block_condition = sprintf('%d-%s', (block_id + 1), condition),
correctness = datum))
}
}
relevant_data$block_condition <- factor(relevant_data$block_condition, levels = c("1-kA", "1-10", "1-50", "1-90", "2-kA", "2-10", "2-50", "2-90", "3-kA", "3-10", "3-50", "3-90", "4-kA", "4-10", "4-50", "4-90"))
ggplot(
relevant_data,
aes(
block_condition,
correctness,
colour = block_condition)) +
geom_boxplot() +
labs(
x = "Block Nr. - Stufe der Assistenz",
y = "Median der Richtigkeiten") +
scale_color_manual(
name="Legende",
values=color_coding_block_level) +
theme(legend.position="none") +
ggtitle("Median der Richtigkeiten pro Block, jeweils nach Stufen") +
ggsave(
"box_plot_median_correctness_per_block_and_level.png",
width = 8,
height = 4.5)
# box blots / medians for combined blocks
relevant_data <- data.frame(
"condition" = character(),
"correctness" = numeric())
for (i in 1:66) {
data_row = data_frame[i,]
if (data_row$block_0_assistance_present == "True") {
correctness_with_assistance = data_row$block_0_correctness + data_row$block_2_correctness
correctness_without_assistance = data_row$block_1_correctness + data_row$block_3_correctness
} else {
correctness_with_assistance = data_row$block_1_correctness + data_row$block_3_correctness
correctness_without_assistance = data_row$block_0_correctness + data_row$block_2_correctness
}
relevant_data <- rbind(
relevant_data,
data.frame(
condition = sprintf("%d%% Assistenz", data_row$assistance_level), # condition / assistance level
correctness = (correctness_with_assistance / 2)))
relevant_data <- rbind(
relevant_data,
data.frame(
condition = "keine Assistenz",
correctness = (correctness_without_assistance / 2)))
}
relevant_data$condition <- factor(relevant_data$condition, levels = c("keine Assistenz", "10% Assistenz", "50% Assistenz", "90% Assistenz"))
ggplot(
relevant_data,
aes(
condition,
correctness,
colour = condition)) +
geom_boxplot() +
labs(
x = "Stufen der Assistenz",
y = "Median der Richtigkeiten") +
scale_color_manual(
name="Legende",
values=color_coding) +
theme(legend.position="none") +
ggtitle("Median der Richtigkeiten über alle Blöcke nach Stufen") +
ggsave(
"box_plot_median_correctness_per_level.png",
width = 8,
height = 4.5)
|
820341800376192b9429a28e4c3aa87f05743e66 | 29585dff702209dd446c0ab52ceea046c58e384e | /iGasso/R/mlmp.test.R | 1d2f5c1d84cd16d2692084efb33ee7410afb74d2 | [] | no_license | ingted/R-Examples | 825440ce468ce608c4d73e2af4c0a0213b81c0fe | d0917dbaf698cb8bc0789db0c3ab07453016eab9 | refs/heads/master | 2020-04-14T12:29:22.336088 | 2016-07-21T14:01:14 | 2016-07-21T14:01:14 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,680 | r | mlmp.test.R | mlmp.test = function(g, y, weights=NULL, stat="score")
{
DNAME = paste(deparse(substitute(g)), "and", deparse(substitute(y)))
y = scale(y) # dimension n x q
n = nrow(y)
q = ncol(y)
A = crossprod(y)
vg = apply(g, 2, var)
g = as.matrix(g[,is.finite(1/vg)], nrow=n) # remove non-informative SNPs
if (is.null(weights)) weights = 1/vg[is.finite(1/vg)] # Not the square root, different from SKAT
else weights = weights[is.finite(1/vg)]
g = g %*% diag(sqrt(weights))
Sigma0 = var(g, g)
p = ncol(g)
B.t = solve(A, crossprod(y, g))
H = crossprod(B.t, A %*% B.t)
Sigma = ((n-1)*Sigma0 - H)/(n-1-q) # the unbiased estimate of Sigma
if (stat=="F"){
S = sum(diag(H))/q/sum(diag(Sigma))
names(S) = "F stat"
lmbd = eigen(Sigma, symmetric=TRUE, only.values=TRUE)$values
p.value = davies(0, lambda=c(lmbd, -S*q/(n-1-q)*lmbd), h=rep(c(q, n-1-q), c(p, p)))$Qq
}
else if (stat=="Wald"){
S = sum(diag(H))/q/sum(diag(Sigma))
names(S) = "Wald stat"
lmbd = eigen(Sigma, symmetric=TRUE, only.values=TRUE)$values
p.value = davies(S*q*sum(diag(Sigma)), lambda=lmbd, h=rep(q, p))$Qq
}
else if (stat=="score"){
S = sum(diag(H))/q/sum(diag(Sigma0))
names(S) = "Score stat"
lmbd = eigen(Sigma0, symmetric=TRUE, only.values=TRUE)$values
p.value = davies(S*q*sum(diag(Sigma0)), lambda=lmbd, h=rep(q, p))$Qq
}
PAR = c(p, q)
names(PAR) = c("# loci", "# traits")
structure(list(statistic = S, p.value = p.value, parameter = PAR,
method = "Multilocus multiple phenotypes test of association", data.name = DNAME),
class = "htest")
}
|
13f2bab911d1d0725961749d617d34cf7d5adde4 | d2eafe165be8e006a48de556070b763f3f7b15c1 | /plot1.R | 2607639be3d3635b4343cf9b593cbf6db2510ea2 | [] | no_license | brianblank/courera_ExData_Plotting1 | 4bf628277ac7ed2e94621cf32e5ff0212f7432cf | 6b461a352500091537bb8a4a25d86e9af7b07f1d | refs/heads/master | 2021-01-17T20:49:42.056240 | 2014-11-09T03:10:50 | 2014-11-09T03:10:50 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,074 | r | plot1.R | plot1 <- function(directory = ".") {
# Load data for dates Feb 1 2007 and Feb 2 2007
data <- read.csv(sprintf("%s/%s", directory, "household_power_consumption.txt"),
sep=";",
header=FALSE,
colClasses=c(rep("character",2),
rep("numeric",7)),
na.strings="?",
skip=66637,
nrows=2880)
# Load the column headers
names(data) <- read.csv(sprintf("%s/%s", directory, "household_power_consumption.txt"),
sep=";",
header=FALSE,
colClasses=c(rep("character",9)),
nrows=1)
# Reformat the date column to POSIXlt/POSIXt
data <- cbind(datetime=strptime(paste(data$Date,data$Time), "%d/%m/%Y %H:%M:%OS"),
data[,3:9])
# Open the png file
png(file=sprintf("%s/%s", directory, "plot1.png"),
width=480,
height=480)
# Define a single graph in the png file
par(mfrow=c(1,1))
# Create the histogram
hist(data$Global_active_power,
col=c("red"),
xlab="Global Active Power (kilowatts)",
main="Global Active Power")
# Close the file
dev.off()
# Return success
sprintf("%s/plot1.png created.", directory)
}
|
de26c018851fc7783c633298b281398d25b69b91 | ee358af808580e6b1653a015e3427aeabfba81f5 | /run_analysis.R | ea80f01121e93253617a9a6e54e9fa355dc4a4d2 | [] | no_license | marmai123/Getting-and-Cleaning-Data-Course-Project | c801965b01377ea4a0b4d55fe9fe95fa33157ffe | fddffe8ac43695bd545440401af1dabecf1ffefe | refs/heads/main | 2023-06-17T04:47:14.499726 | 2021-07-15T12:02:58 | 2021-07-15T12:02:58 | 377,470,886 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,614 | r | run_analysis.R | library(data.table)
library(dplyr)
# Download data file and store it in "data" directory, unzip file
if(!file.exists("data")){
dir.create("data")
}
url <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
download.file(url,destfile = "./data/mydata.zip")
unzip("mydata.zip")
# Download variable names (features), remove numbering
features <- read.csv("./UCI HAR Dataset/features.txt", header = FALSE, sep = " ")
features <- features[,2]
# Store training and test data/labels in tables
test_data <- read.table("./UCI HAR Dataset/test/X_test.txt")
test_labels <- read.table("./UCI HAR Dataset/test/y_test.txt")
test_subject <- read.table("./UCI HAR Dataset/test/subject_test.txt")
train_data <- read.table("./UCI HAR Dataset/train/X_train.txt")
train_labels <- read.table("./UCI HAR Dataset/train/y_train.txt")
train_subject <- read.table("./UCI HAR Dataset/train/subject_train.txt")
# Add variable names to test and training data
names(test_data) = features
names(train_data) = features
# Append labels and subject to test and training data
test_data$activity <- test_labels[,1]
test_data$subject <- test_subject[,1]
train_data$activity <- train_labels[,1]
train_data$subject <- train_subject[,1]
#### 1. Merge the training and the test sets to create one data set.
data <- rbind(test_data,train_data)
### 2. Extract only the measurements on the mean and standard deviation for each measurement.
# Select relevant variables, Put "subject" and "label" first in table.
data_new <- select(data,c(subject,activity,contains("mean"),contains("std")))
# Remove Angle variables
data_new <- select(data_new, !contains("angle"))
### 3. Use descriptive activity names to name the activities in the data set.
activities <- read.table("./UCI HAR Dataset/activity_labels.txt")
activities <- activities[,2]
data_new$activity <- activities[data_new$activity]
### 4. Appropriately labels the data set with descriptive variable names.
names_new <- names(data_new)
names_new <- gsub("Freq","Frequency",names_new)
names_new <- gsub("Acc","Acceleration",names_new)
names_new <- gsub("std","StandardDeviation",names_new)
names_new <- gsub("^t","TimeDomain_",names_new)
names_new <- gsub("^f","FrequencyDomain_",names_new)
names(data_new) <- names_new
### 5. From the data set in step 4, creates a second, independent tidy data set with the average of each variable for each activity and each subject.
tidy_data <- aggregate(data_new[,3:81], by = list(subject = data_new$subject, activity = data_new$activity), FUN = mean)
write.table(tidy_data,"TidyData.txt", row.name=FALSE)
|
c0c0ede9090710bb7d3fc8e34666fcc7a3de6ff3 | 3c12c737fc61f19eed718026f569adab78e3aa41 | /model_selection_sims.R | aa2ba1f2f5475009ec5af59d291d3020de899e2f | [] | no_license | TraciPopejoy/Trend_comparison | 54c73a8357603e897f0e6bf739dcac9d89649038 | fa7f130720fb2e60a8c0b1e878666cb20b7f367d | refs/heads/master | 2022-12-20T03:32:29.311303 | 2020-10-14T21:59:51 | 2020-10-14T21:59:51 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 7,868 | r | model_selection_sims.R | library(bbsBayes)
library(tidyverse)
library(plyr)
library(jagsUI)
library(ggmcmc)
library(beepr)
#set up for WOTH
#load eBird simulated TRUE trends
sim_trends <- read.csv("data/ebird-trends_simulated.csv", skipNul = TRUE,colClasses = c("character","integer",rep("numeric",9)))
#will need to load eBird estimated trends as well, should use same methods as the sim for formatting
#not needed for model selection
#functions
#log transform fxn trends and CI-values
log_trans <- function(x){
log((x/100)+1)
}
# function to transform CI width into variance ----------------------------
## this function will work with trends on the %-change or log-scale
CI_to_var <- function(lci, #value of lower credible limit
uci, #value of upper credible limit
CI_p){ #95% CI in data, if otherwise, insert the type of CI here (e.g., if 80% CI, CI_p = 0.8)
qth <- qnorm(1-((1-CI_p)/2))
return(((uci-lci)/(qth*2))^2) #returns the variance
}
for(h in 1:10){
#load the simulated eBird data run through our models
wothsim <- read.csv(paste("data/eBird_WOTH_sim", h, "_spatial_CAR_trends.csv", sep = ""))
#put TRUE trends and MOD trends into one df
#all steps combined - do this for each dataset, then stack dataframes and pivot
#WOTH scenario 1 true trends
#use same code for eBirdTM estimates
sim_trends_true <- sim_trends %>%
mutate(Region = paste(trunc(lat), trunc(lon), sep = "_")) %>% #stratify to region
mutate(betahat = log_trans(abd_ppy_true), #log transform trends and CIs
log_lci = log_trans(abd_ppy_lower),
log_uci = log_trans(abd_ppy_upper)) %>%
mutate(tau.betahat = 1/CI_to_var(lci = log_lci,uci = log_uci, CI_p = 0.8)) %>% #calculate precision
filter(rss_name == "woothr_breeding", #filter for species and scenario
scenario == 1) %>% ################ this is where we could make it a function for all scenarios
group_by(Region) %>%
dplyr::summarize(betahat = mean(betahat),
tau.betahat = mean(tau.betahat)) #
#%>% #summarize by region (stratum)
sim_trends_true <- mutate(sim_trends_true,dataset = rep("TRU", length(sim_trends_true$Region))) #add column with dataset identification
#474 obs
#WOTH scenario 1 estimated trends using our model and ebird data
sim_trends_mod <- wothsim %>%
mutate(dataset = rep("MOD", length(wothsim$Region))) %>% #add column with dataset identification
mutate(betahat = log_trans(Trend), # log transform
log_lci = log_trans(Trend_Q0.025),
log_uci = log_trans(Trend_Q0.975)) %>%
mutate(tau.betahat = 1/CI_to_var(lci = log_lci,uci = log_uci, CI_p = 0.95)) # precision
#436 obs
#stack dataframes, create full dataframe for scenario one with both true trends and estimated trends
sc1 <- rbind.fill(sim_trends_true, sim_trends_mod) %>%
pivot_wider(id_cols = Region, names_from = dataset, values_from = c(betahat,tau.betahat) ) %>%
filter(.,complete.cases(betahat_TRU, betahat_MOD))
#note: for final summaries, will need to transform back from the log scale, but not necessary for model selection
nstrata = nrow(sc1)
betahat = as.matrix(sc1[,c("betahat_TRU","betahat_MOD")])
tau.betahat = as.matrix(sc1[,c("tau.betahat_TRU","tau.betahat_MOD")])
jags_data <- list(nstrata = nstrata,
betahat = betahat,
tau.betahat = tau.betahat)
M1 = lm(data = sc1,
formula = betahat_TRU~betahat_MOD,
weights = tau.betahat_MOD)
### model for comparing trend estimates from two (indexed by e) different analyses, for different regions or strata (indexed by s)
### for this comparison, e==1 are estimates from the BBS and e==2 from eBird
### Model is modified from the grassland bird model from Link and Barker 2010, pg 90, also described in Sauer and Link 2002, Ecology 83:1743-1751
### and includes covariance components and corelation coefficient aspects from Matzke et al. 2017, https://doi.org/10.1525/collabra.78
### in essence, this is a paired t-test style comparison, that accounts for the imprecision in each s*e trend estimate.
### input data compiled in R, consist of nstrata (the number of strata), as well as 2 matrices: tau.betahat and betahat;
###both have nstrata rows and 2 columns
## tau.betahat = estaimtes of the precision of the trend estimates (1/variance)
## betahat = estimates of the trends
## nstrata = number of strata
modl <- "
model{
for (e in 1:2) {
for(s in 1:nstrata) {
betahat[s,e] ~ dnorm(beta[s,e],tau.betahat[s,e]) #betahat = data = trend estimates, tau.betahat = data = precision of trend estimate
} # end of s loop
mu[e] ~ dnorm(0,1) #mean trend for each survey
sd.beta[e] <- 1/sqrt(tau.beta[e])
} #end of e loop (indexing two models being compared)
tau.beta[1] ~ dscaled.gamma(0.001,50)
tau.beta[2] ~ dscaled.gamma(0.01,50)
#### multivariate normal structure for the among-strata between survey variation in trends
for(s in 1:nstrata) {
beta[s,1:2] ~ dmnorm(mu[1:2],ISigma_cov[1:2,1:2])
}
rho ~ dunif(-1,1) #estimated Pearson correlation coefficient
### sigma_cov is the covariance matrix
Sigma_cov[1,1] <- pow(sd.beta[1],2)
Sigma_cov[1,2] <- rho*sd.beta[1]*sd.beta[2]
Sigma_cov[2,1] <- rho*sd.beta[1]*sd.beta[2]
Sigma_cov[2,2] <- pow(sd.beta[2],2)
ISigma_cov[1:2,1:2] <- inverse(Sigma_cov[1:2,1:2])
for(s in 1:nstrata) {
dif[s] <- beta[s,1]-beta[s,2] # dif is a vector of the strata-specific trend differences after accounting for the imprecision of each estimate's trend and the group (survey/monitoring program) structure
} # end of second s-strata loop
m.dif <- mean(dif[])
} # end of model
"
trend_comp = "trend_comparison_correlation.txt"
cat(modl,file = trend_comp)
params <- c("m.dif", #this is the mean difference (trend1 - trend2) it's an estimate of the difference in the average fine-scale trends (like a difference in teh continental estimate accounting for uncertainty of the local trends, but ignoring the abundance-weights)
"Sigma_cov",#covariance matrix
#"dif", # these values could be mapped to show the spatial pattern in the differences between trends
#"beta", #these beta values could be used as posterior estimates of the regional trends aftern accounting for the precision and correlation
"rho", #rho is the correlation coefficient
"mu")
burnInSteps = 5000 #2000 # Number of steps to "burn-in" the samplers. this is sufficient for testing, but you'll want to increase this
nChains = 3 # Number of chains to run.
numSavedSteps=2000 #1000 # Total number of steps in each chain to save. this is sufficient for testing, but you'll want to increase this
thinSteps=10 #10 # Number of steps to "thin" (1=keep every step).
nIter = ceiling( ( (numSavedSteps * thinSteps )+burnInSteps)) # Steps per chain.
start.time <- Sys.time()
out = jagsUI(data = jags_data,
parameters.to.save = params,
n.chains = 3,
n.burnin = burnInSteps,
n.thin = thinSteps,
n.iter = nIter,
parallel = T,
model.file = trend_comp)
beep("mario")
end.time <- Sys.time()
time.taken <- round(end.time - start.time, 2)
time.taken
summr = out$summary #table showing a summary of the posterior distribution and some basic convergence stats for all the monitored parameters
out_ggs = ggs(out$samples)
#ggmcmc(out_ggs,file = "correlation_convergence_summaries.pdf", param_page = 8)
ggmcmc(out_ggs,file = paste("correlation_convergence_summaries_WOTH", h, ".pdf", sep = ""), param_page = 8)
cor <- cor(jags_data$betahat[,1],jags_data$betahat[,2]) # raw Pearson correlation without considering uncertainty
rho <- out$summary["rho",] ## estimated correlation accounting for uncertainty
cor
rho
save(list = c("cor", "rho"), file = paste("WOTH_sim", h, "results.RData", sep = ""))
|
245c70e1224955eee2ac6c5dec60a8e5b3598aee | 563f761040775133293553aaf17c4b8eac01d4ee | /scripts/run_me.R | db2a2a35ac952e4c717d37eb991b3f3cd41d6b67 | [
"MIT"
] | permissive | brendanhcullen/personality-diabetes | d77a30b34f6722833b13891d12585051f3ee606c | 6ac3ce8f5dc34eb03f78429e1f83edc45266beb6 | refs/heads/master | 2023-02-10T10:49:39.990733 | 2021-01-01T19:59:08 | 2021-01-01T19:59:08 | 198,284,368 | 0 | 0 | MIT | 2020-12-17T00:12:01 | 2019-07-22T18:53:46 | R | UTF-8 | R | false | false | 3,877 | r | run_me.R | # This script imports raw SAPA data and performs the following cleaning operations:
# 1) score SPI data
# 2) convert variables to correct type (e.g. factors)
# 3) create composite demographic variables (SES)
# load libraries
library(here)
library(tidyverse)
library(psych)
library(janitor)
library(missMDA)
# Source pre-processing functions and import SPI keys ---------------------
source(here("scripts/0.3_score_spi.R"))
source(here("scripts/0.2_residualize.R"))
source(here("scripts/0.4_impute.R"))
# read in keys for SPI scoring
keys = read.csv(here("data/superKey.csv"), header = TRUE, row.names = 1)
# Import data -------------------------------------------------------------
######################### IMPORT ACTUAL DATASET HERE #########################
# Import toy data ---------------------------------------------------------
######################### REMOVE THIS FOR REAL ANALYSIS #########################
# load in toy dataset
load(here("data/toydata.Rdata"))
# add RID variable to be consistent with real SAPA data
data = toydata %>%
rownames_to_column() %>%
rename(RID = rowname)
rm(toydata)
# Filter data -------------------------------------------------------------
# get SPI names
spi_names = get_spi_names(keys)
spi_5_names = spi_names$spi_5
spi_27_names = spi_names$spi_27
spi_135_names = spi_names$spi_135
all_spi_names = unlist(spi_names, use.names = FALSE)
# min number of responses to SPI-135 items required to be included in analysis
#min_n_valid = 27
min_n_valid = 10 # just for toy data
data = data %>%
mutate(n_valid_135 = apply(.[,spi_135_names], 1, function(x) sum(!is.na(x)))) %>%
filter(!is.na(diabetes), # only people who responsed to diabetes question
country == "USA", # only USA data
n_valid_135 >= min_n_valid) %>% # only people with at least 27 responses on SPI-135 items
select(-n_valid_135)
# only retain SPI items that are part of the SPI-135
data = data %>%
select(spi_135_names) %>%
cbind(select(data, -starts_with("q_")), .)
# Score SPI-5 (i.e. Big 5) ------------------------------------------------
spi_5_scores = score_spi_5(data = data, keys = keys)
# add SPI-5 scores to data
data = cbind(select(data, -starts_with("q_")),
spi_5_scores,
select(data, starts_with("q_")))
# Score SPI-27 (using IRT) ------------------------------------------------
path_to_IRT_calibrations = here("data/IRTinfoSPI27.rdata") # specify where IRT calibrations file is saved
spi_27_scores = score_spi_27(data = data,
keys = keys,
path_to_IRT_calibrations = path_to_IRT_calibrations)
# add IRT scores to data
data = cbind(select(data, -starts_with("q_")),
spi_27_scores,
select(data, starts_with("q_")))
# Residualize -------------------------------------------------------------
demographic_vars = c(
"age", # age
"ethnic", # ethnicity
"jobstatus", # current job status
"education", "occPrestige", "occIncomeEst", # self SES
"p1edu", "p1occPrestige", "p1occIncomeEst", # parent 1 SES
"p2edu", "p2occPrestige", "p2occIncomeEst") # parent 2 SES
# convert relevant variables to factors
data = data %>%
mutate_at(c("ethnic", "jobstatus", "education", "p1edu", "p2edu"), as.factor)
VOI = all_spi_names
VTC = demographic_vars
id = "RID"
# extract residuals
residualized_data = residualize(VOI = all_spi_names, VTC = demographic_vars, data = data, id = "RID")
rbind(describe(data[,VOI], fast = T),
describe(residualized_data[,VOI], fast = T)) %>%
mutate(g1 = rep(c("raw scores", "residualized scores"), each = length(VOI)),
category = rep(c(rep("Big 5", 5), rep("SPI 27", 27), rep("Items", 135)), 2)) %>%
ggplot(aes(x = -vars, y = sd, fill = category)) +
geom_bar(stat = "identity") +
coord_flip()+
facet_wrap(~g1)
|
a1b6474ca36ac444f30ce4bc0f436b924ed3438d | cd5b4927b7496c6f4f8b322a3041404665185e1e | /man/prop2coord.Rd | cfd3a9df9001c280129000168ae841998581fc94 | [
"MIT"
] | permissive | rintukutum/rdonuts | 2510f19d347dd1d5ced4f4c9b7068335da74ae6f | 3348b930326c95e535b1b71907c4ec4ff67b171f | refs/heads/master | 2021-01-23T18:51:29.628316 | 2017-09-21T04:41:34 | 2017-09-21T04:41:34 | 83,003,411 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 257 | rd | prop2coord.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/donut-prop2coord.R
\name{prop2coord}
\alias{prop2coord}
\title{prop2coord}
\usage{
prop2coord(x = 1, y = 1, radius = 1, size = 0.5, fill = "skyblue")
}
\description{
prop2coord
}
|
0c1a05c2280610e3ae7cd2165e34466ece53ceaf | 0599b04224cbd89f4257c1befd8e2e0407fef9cf | /TIBS Wetland Model Plotting Functions v1.R | 26dfeee796c37d5ebf2a9b8fea4c1d729dc2e93d | [] | no_license | LooiXIV/Wetland-Model | 30f85ffc3768a038ec6ae51f70118b0ed5b751df | aaf1040999c914db4611d6c314bbdfe526fcf867 | refs/heads/master | 2020-06-15T06:52:41.469517 | 2017-03-11T02:46:57 | 2017-03-11T02:46:57 | 80,171,826 | 1 | 1 | null | null | null | null | UTF-8 | R | false | false | 4,432 | r | TIBS Wetland Model Plotting Functions v1.R | # df <- data.frame(cbind(1:10, 11:20))
# names(df) <- c("y","x")
#
# par(mar = c(7, 7, 7, 7))
# plot(y ~ x, data = df, ylab = "", xlab = "independent")
# axis(side = 4, at = c(1:10))
# mtext(side = 2, line = 3.5, text = y.lab, cex = mag+.5)
# text(par("usr")[2] + 2.2, 5.5, srt=-90, labels = y.lab,
# xpd = TRUE, cex = mag+0.5)
#####################
# Boxplot function #
#####################
model.boxplots = function(Box.data, y.min, y.max, y.lab,
means, sds,
folder.name, file.name, YI){
# Check to see if the directory was already created
check.folder = list.files(pattern = folder.name)
if (length(check.folder) == 0){
dir.create(folder.name)
}
fig.let = c("A", "B", "C", "D")
# set to new WD
setwd(paste(site.wd, "/", folder.name, sep = ""))
png(filename = file.name,
width = p.width, height = p.length)
if(YI %% 2 == 0){
s.n = 4
mar.vals = c(5, 1, 2, 7)
ang = -90
} else {
mar.vals = c(5, 7, 2, 1)
s.n = 2
ang = 0
}
par(cex = 1.25, mar = mar.vals)
x.axis = boxplot(Box.data, xaxt = "n", ylim = c(y.min, y.max),
xlab = "", ylab = "", pch = point.types, yaxt = "n")
abline(h = 0)
# plot the means and SD
lines(means, col = 6, type = "b", lwd = 2, pch = point.types)
arrows(c(1:12), means-sds, c(1:12), col = 6, lwd = 2,
means+sds, length=0.05, angle=90, code=3)
# Create new figure labels
y.pos = (y.max+y.min)/2
if(YI %% 2 == 0){
text(par("usr")[2] + 1.2, y.pos, srt = -90, labels = y.lab,
xpd = TRUE, cex = mag+0.35)
} else {
text(par("usr")[1] - 1.2, y.pos, srt = 90, labels = y.lab,
xpd = TRUE, cex = mag+0.35)
}
# Axis
y.rang = (y.max-y.min)/5
if(y.rang < 1) {digit = 1} else {digit = 0}
y.ticks = seq(y.min, y.max, by = y.rang)
axis(side = s.n, at = y.ticks, labels = y.ticks,
las = 2, cex.axis = mag)
axis(side = 1, at = 1:12, label = month.names, cex.axis = mag)
mtext(side = 1, line = 2.75, text = "Month", cex = mag+.5)
text(0.35, c(y.max-(0.05*y.max)), fig.let[YI], cex = mag+1)
dev.off()
graphics.off()
}
##############################
# Nutrient plotting Function #
##############################
Nutrient.Month.plot = function(Nut.avg, Nut.sd, y.min, y.max, y.lab,
FieldN.Data, FieldN.Data.m,
FieldN.raw.D, FieldN.raw.M,
folder.nut.name, file.name, mag){
if(sw %% 2 == 0){
s.n = 4
mar.vals = c(5, 1, 2, 7)
ang = -90
} else {
mar.vals = c(5, 7, 2, 1)
s.n = 2
ang = 0
}
# Check to see if the directory already exists if it does not
# exist then create a new directory
check.folder = list.files(pattern = folder.nut.name)
if(length(check.folder) == 0) {
dir.create(folder.nut.name)
}
fig.let = c("A","B","C", "D", "E", "F")
# set the new work directory
setwd(paste(site.wd, "/", folder.nut.name, sep = ""))
Nut.Max = max(Nut.avg+Nut.sd, FieldN.Data, na.rm = T)
png(filename = file.name,
width = p.width, height = p.length)
par(cex = mag, mar = mar.vals)
x.axis = plot(Nut.avg, type = "b", pch = point.types,
xlab = "", ylab = "",
xaxt="n", yaxt = "n", col = "2",
ylim = c(y.min, y.max), lwd = 2)
abline(h = 0)
y.pos = (y.max+y.min)/2
# Create new figure labels
if(sw %% 2 == 0){
text(par("usr")[2] + 1.2, y.pos, srt=-90, labels = y.lab,
xpd = TRUE, cex = mag+0.25)
} else {
text(par("usr")[1] - 1.2, y.pos, srt=90, labels = y.lab,
xpd = TRUE, cex = mag+0.25)
# mtext(side = s.n, line = 3.5, text = y.lab, cex = mag+0.5)
}
y.rang = (y.max-y.min)/5
if(y.rang < 1) {digit = 1} else {digit = 0}
mtext(side = 1, line = 2.75, text = "Month", cex = mag+1)
# Axis
y.ticks = seq(y.min, y.max, by = round((y.max-y.min)/5, digits = digit))
axis(side = s.n, at = y.ticks, labels = y.ticks,
las = 2, cex.axis = mag)
axis(side = 1, at = 1:12, label = month.names, cex.axis = mag)
# add raw data
points(FieldN.Data.m, FieldN.Data, col = 4, pch = point.types, cex = mag)
points(FieldN.raw.M, FieldN.raw.D, col = 4, pch = point.types, cex = mag)
arrows(c(1:12),Nut.avg-Nut.sd, c(1:12), Nut.avg+Nut.sd,
length=0.05, angle=90, code=3, lwd = 2)
text(0.9, c(y.max-0.025*y.max), fig.let[sw], cex = mag+1)
dev.off()
graphics.off()
} |
cd587ba1f6fbbde3ccb444d394241fc3dd68ef0b | 642d338221f44aad742b39230cffcbc297f48306 | /R/whiten2.R | 5673f0c4ab86efe8b7ef480f2149636dcaeb43cb | [] | no_license | stnava/itkImageR | 3577ccb5785893c510f6e0fabd6a68d5d6c095a2 | 8b218e81d8e3cc642c8891a4eb4c43dc941cf870 | refs/heads/master | 2016-09-05T14:29:26.851337 | 2013-11-25T22:53:34 | 2013-11-25T22:53:34 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 220 | r | whiten2.R | whiten2 <- function(x) {
if (nargs() == 0) {
print("Usage: x_whitened<-whiten( x ) ")
return(1)
}
svdx <- svd(x)
dd <- (svdx$d)^(-1/2)
xw <- ((svdx$u %*% diag(dd)) %*% t(svdx$u)) %*% x
return(xw)
}
|
5b790322ca6af20e7be37acc44f1ddde09d285dc | de295eb6f94aa692143985f8b89abaa32525584b | /r/load-tdt.R | 570a7e9eee6a7e08c1377fcde9c3804b749b16c5 | [] | no_license | chrisnatali/FinX | ebe7124b85f56eae8d1b17e6d90e03e23c456ed2 | a5a223172cbcf8ea5fd1a7e2a604d6eb48c06187 | refs/heads/master | 2016-09-05T16:58:40.139310 | 2012-03-04T23:37:29 | 2012-03-04T23:37:29 | 3,127,199 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,371 | r | load-tdt.R | tdt=read.table("tdt")
names(tdt)=c("sym", "day", "op", "hi", "lo", "cls", "vol")
sym.new=c(1, which(tdt$sym[-1] != tdt$sym[-nrow(tdt)])+1)
sym.lens=c(diff(sym.new), nrow(tdt)-(sym.new[length(sym.new)]-1))
i.sym=rep(sym.new, sym.lens)
o.sym=unlist(sapply(sym.lens, function(n) {0:(n-1)}))
chg=c(NA, (tdt$cls[2:nrow(tdt)]-tdt$cls[1:(nrow(tdt)-1)])/tdt$cls[1:(nrow(tdt)-1)])
tdt$i.sym=i.sym
tdt$o.sym=o.sym
tdt$chg=chg
tdt[tdt$o.sym==0, "chg"]=NA
sl.20=c(rep(NA, nrow(tdt)))
r.sq.20=c(rep(NA, nrow(tdt)))
for (n in 21:nrow(tdt)) {
m = lm(tdt$cls[(n-20):(n-1)] ~ c(1:20))
m.sum = summary(m)
sl.20[n] = coefficients(m)[2]/tdt$cls[n-20]*100
r.sq.20[n] = m.sum$r.squared
}
# m.20=c(rep(NA, 20), sapply(21:nrow(tdt), function(n) { lm(tdt$cls[(n-20):(n-1)]~c(1:20)) }));
# sl.20=c(rep(NA, 20), sapply(21:nrow(tdt), function(n) { m=m.20[n]; coefficients(m)[2]/tdt$cls[n-20]*100 }));
# r.sq.20=c(rep(NA, 20), sapply(21:nrow(tdt), function(n) { m=m.20[n]; m.sum=summary(m); m.sum$r.squared }));
tdt$sl.20=sl.20
tdt$r.sq.20=r.sq.20
tdt[tdt$o.sym<21, "sl.20"]=NA
tdt[tdt$o.sym<21, "r.sq.20"]=NA
ma.200=c(rep(NA, 200), sapply(201:nrow(tdt), function(n) { mean(tdt$cls[(n-200):(n-1)]) } ))
tdt$ma.200=ma.200
tdt[tdt$o.sym<201, "ma.200"]=NA
tdt$id=1:nrow(tdt)
day.f=factor(tdt$day)
qqq=tdt[tdt$sym=="QQQ",]
q.sl.50=c(rep(NA, 50), sapply(51:nrow(qqq), function(n) { m=lm(qqq$cls[(n-50):(n-1)]~c(1:50)); coefficients(m)[2]/qqq$cls[n-50]*100 }));
# Find the outlier tickers (used as a negative filter)
split.syms=unique(tdt$sym[tdt$o.sym != 0 & (tdt$chg > 0.55 | tdt$chg < -0.55)])
# Find the "buy" tickers/days via iterating over tdt by day
# day.ticks=by(tdt, day.f, function(x) { q=x[x$sym=="QQQ",]; if((nrow(q)==1) && !is.na(tdt$ma.200[q$id-1]) && (tdt$cls[q$id-1] > tdt$ma.200[q$id-1]) && (nrow(x) > 2)) { ticks=x[!is.na(x$ma.200) & (x$r.sq.20 > 0.5) & (x$sl.20 > 0.05) & (tdt$chg[x$id-1] < -0.04) & !(x$sym %in% split.syms),]; ticks[sample(1:nrow(ticks), min(10, nrow(ticks))),] } else { x[FALSE,] } })
day.ticks=by(tdt, day.f, function(x) { if(nrow(x) > 2 && !is.na(x$sl.20)) { ticks=x[!is.na(x$ma.200) & (x$r.sq.20 > 0.8) & (x$sl.20 > 0.05) & (tdt$chg[x$id-1] < -0.04) & !(x$sym %in% split.syms),]; ticks[sample(1:nrow(ticks), min(10, nrow(ticks))),] } else { x[FALSE,] } })
save.image()
|
0cbcc1cccc1bfc7bc79e4b600dd35fbb21bee82c | 5ffe62a29eec525f568d2e71b854d3ce442e98ef | /Genotyping/Analysis/PRBI/drafts/assemblePRBIgenotypes090713.R | bda096e263da6b37b742fb45a8675f284e28f3d1 | [
"MIT"
] | permissive | pinskylab/PhilippinesAnemonefish2008 | df3ac238f4db30d90cd564f68a23a2bf8eff86f6 | 4b21bc01c6be794181feff95b44f7e0295ecf061 | refs/heads/main | 2023-08-11T06:47:06.704422 | 2021-09-30T17:27:36 | 2021-09-30T17:27:36 | 307,220,641 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 56,663 | r | assemblePRBIgenotypes090713.R | setwd("/Users/mpinsky/Documents/Stanford/Philippines/2008/Genotyping/Genotypes")
#### Read the genotypes ####
colstoread = c("NULL", "character", "NULL", "character", "NULL", "numeric", "numeric", "NULL", "NULL", "NULL", "NULL", "NULL", "NULL", "NULL", "NULL", "NULL", "NULL", "NULL", "NULL", "NULL", "NULL", "NULL", "NULL", "NULL", "NULL", "NULL", "NULL", "NULL", "NULL", "NULL", "NULL", "NULL", "NULL", "NULL")
afg1 = read.table("AFG1-3_PRBI Genotypes 090703.txt", header = T, sep ="\t", colClasses = colstoread, na.string="?")
afg9 = read.table("AFG9-10_PRBI Genotypes 090621.txt", header = T, sep ="\t", colClasses = colstoread, na.string="?")
afg1.m.mg = read.table("../MattGribble/AFG1.M_PRBI_MG Genotypes 090702.txt", header = T, sep ="\t", colClasses = colstoread, na.string="?")
afg2.m.mg = read.table("../MattGribble/AFG2.M_PRBI_MG Genotypes 090702.txt", header = T, sep ="\t", colClasses = colstoread, na.string="?")
afg3.m.mg = read.table("../MattGribble/AFG3.M_PRBI_MG Genotypes 090702.txt", header = T, sep ="\t", colClasses = colstoread, na.string="?")
afg4_5.m.mg = read.table("../MattGribble/AFG4and5.M_PRBI_MG Genotypes 090702.txt", header = T, sep ="\t", colClasses = colstoread, na.string="?")
afg6.m = read.table("../MattGribble/AFG6-7.M_PRBI_MG Genotypes 090710.txt", header=T, sep="\t", colClasses = colstoread, na.string="?")
afg8.m = read.table("../MattGribble/AFG8.M_PRBI_MG Genotypes 090710.txt", header=T, sep="\t", colClasses = colstoread, na.string="?")
afg9.m = read.table("../MattGribble/AFG9.M_PRBI_MG Genotypes 090710.txt", header=T, sep="\t", colClasses = colstoread, na.string="?")
afg1_3.m.mlp = read.table("AFG1-3.M_PRBI_MLP Genotypes 090703.txt", header = T, sep ="\t", colClasses = colstoread, na.string="?")
afg4_5.m.mlp = read.table("AFG4-5.M_PRBI_MLP Genotypes 090709.txt", header = T, sep ="\t", colClasses = colstoread, na.string="?")
#### Compare MLP vs. MG genotype calls ####
colnames = c("Sample", "Marker", "A1", "A2")
colnames(afg1.m.mg) = paste(colnames, c("","",".MG", ".MG"), sep="")
colnames(afg2.m.mg) = paste(colnames, c("","",".MG", ".MG"), sep="")
colnames(afg3.m.mg) = paste(colnames, c("","",".MG", ".MG"), sep="")
colnames(afg4_5.m.mg) = paste(colnames, c("","",".MG", ".MG"), sep="")
colnames(afg1_3.m.mlp) = paste(colnames, c("","",".MLP", ".MLP"), sep="")
colnames(afg4_5.m.mlp) = paste(colnames, c("","",".MLP", ".MLP"), sep="")
# remove the duplicated loci from AFG1-3.M that are also NAs
afg1_3.m.mg = rbind(afg1.m.mg, afg2.m.mg, afg3.m.mg)
afg1_3.m.mg$SampleMarker = paste(afg1_3.m.mg$Sample, afg1_3.m.mg$Marker, sep="")
afg1_3.m.mlp$SampleMarker = paste(afg1_3.m.mlp$Sample, afg1_3.m.mlp$Marker, sep="")
i = order(afg1_3.m.mg$A1.MG, afg1_3.m.mg$A2.MG, na.last=TRUE)
afg1_3.m.mg = afg1_3.m.mg[i,]
afg1_3.m.mg = afg1_3.m.mg[!duplicated(afg1_3.m.mg$SampleMarker),]
i = order(afg1_3.m.mlp$A1.MLP, afg1_3.m.mlp$A2.MLP, na.last=TRUE)
afg1_3.m.mlp = afg1_3.m.mlp[i,]
afg1_3.m.mlp = afg1_3.m.mlp[!duplicated(afg1_3.m.mlp$SampleMarker),]
afg1_3.m = merge(afg1_3.m.mg, subset(afg1_3.m.mlp, select=c(-Marker, -Sample)), by="SampleMarker")
afg1_3.m$A1 = NA
afg1_3.m$A2 = NA
afg1_3.m$error = NA
for(i in 1:length(afg1_3.m$SampleMarker)){
a = identical(afg1_3.m$A1.MG[i], afg1_3.m$A1.MLP[i])
b = identical(afg1_3.m$A2.MG[i], afg1_3.m$A2.MLP[i])
if(a & b){
afg1_3.m$A1[i] = afg1_3.m$A1.MLP[i]
afg1_3.m$A1[i] = afg1_3.m$A1.MLP[i]
afg1_3.m$error[i] = FALSE
} else {
afg1_3.m$error[i] = TRUE
}
}
print(paste("Errors: ", sum(afg1_3.m$error), " (", round(sum(afg1_3.m$error)/length(afg1_3.m$error)*100, digits=2), "%)", sep=""))
afg1_3.m[afg1_3.m$error,]
## AFG4-5.M
afg4_5.m.mg$SampleMarker = paste(afg4_5.m.mg$Sample, afg4_5.m.mg$Marker, sep="")
afg4_5.m.mlp$SampleMarker = paste(afg4_5.m.mlp$Sample, afg4_5.m.mlp$Marker, sep="")
i = order(afg4_5.m.mg$A1.MG, afg4_5.m.mg$A2.MG, na.last=TRUE)
afg4_5.m.mg = afg4_5.m.mg[i,]
afg4_5.m.mg = afg4_5.m.mg[!duplicated(afg4_5.m.mg$SampleMarker),]
i = order(afg4_5.m.mlp$A1.MLP, afg4_5.m.mlp$A2.MLP, na.last=TRUE)
afg4_5.m.mlp = afg4_5.m.mlp[i,]
afg4_5.m.mlp = afg4_5.m.mlp[!duplicated(afg4_5.m.mlp$SampleMarker),]
afg4_5.m = merge(afg4_5.m.mg, subset(afg4_5.m.mlp, select=c(-Marker, -Sample)), by="SampleMarker")
afg4_5.m$A1 = NA
afg4_5.m$A2 = NA
afg4_5.m$error = NA
for(i in 1:length(afg4_5.m$SampleMarker)){
a = identical(afg4_5.m$A1.MG[i], afg4_5.m$A1.MLP[i])
b = identical(afg4_5.m$A2.MG[i], afg4_5.m$A2.MLP[i])
if(a & b){
afg4_5.m$A1[i] = afg4_5.m$A1.MLP[i]
afg4_5.m$A1[i] = afg4_5.m$A1.MLP[i]
afg4_5.m$error[i] = FALSE
} else {
afg4_5.m$error[i] = TRUE
}
}
print(paste("Errors: ", sum(afg4_5.m$error), " (", round(sum(afg4_5.m$error)/length(afg4_5.m$error)*100, digits=2), "%)", sep=""))
print(paste("Errors: ", sum(afg4_5.m$error[afg4_5.m$Marker != "APY_2"]), " (", round(sum(afg4_5.m$error[afg4_5.m$Marker != "APY_2"])/length(afg4_5.m$error[afg4_5.m$Marker != "APY_2"])*100, digits=2), "%)", sep=""))
afg4_5.m[afg4_5.m$error,]
afg4_5.m[afg4_5.m$error & afg4_5.m$Marker != "APY_2",]
# choose MLP genotypes as consensus (only after checking by eye!)
afg1.m = afg1_3.m[,c("Sample", "Marker", "A1.MLP", "A2.MLP")]
afg4.m = afg4_5.m[,c("Sample", "Marker", "A1.MLP", "A2.MLP")]
#### Assemble the genotypes ####
colnames = c("Sample", "Marker", "A1", "A2")
colnames(afg1) = paste(colnames, c("","",".AFG1", ".AFG1"), sep="")
colnames(afg9) = paste(colnames, c("","",".AFG9", ".AFG9"), sep="")
colnames(afg1.m) = paste(colnames, c("","",".AFG1.M", ".AFG1.M"), sep="")
colnames(afg4.m) = paste(colnames, c("","",".AFG4.M", ".AFG4.M"), sep="")
colnames(afg6.m) = paste(colnames, c("","",".AFG6.M", ".AFG6.M"), sep="")
colnames(afg8.m) = paste(colnames, c("","",".AFG8.M", ".AFG8.M"), sep="")
colnames(afg9.m) = paste(colnames, c("","",".AFG9.M", ".AFG9.M"), sep="")
# trim to PRBI samples and focal loci
afg1 = afg1[intersect(grep("PRBI",afg1$Sample), which(!is.na(afg1$A1))),] # remove extra loci from AFG1
afg9 = afg9[grep("PRBI",afg9$Sample),]
afg1.m = afg1.m[intersect(grep("PRBI",afg1.m$Sample), which(!is.na(afg1.m$A1))),] # remove extra genotypes from AFG1-3.M
afg4.m = afg4.m[intersect(grep("PRBI",afg4.m$Sample), which(!is.na(afg4.m$A1))),] # remove extra genotypes from AFG4 and 5.M
afg6.m = afg6.m[intersect(grep("PRBI",afg6.m$Sample), which(!is.na(afg6.m$A1))),] # remove extra genotypes from AFG6 and 7.M
afg8.m = afg8.m[intersect(grep("PRBI",afg8.m$Sample), which(!is.na(afg8.m$A1))),] # remove extra genotypes from AFG8.M
afg9.m = afg9.m[intersect(grep("PRBI",afg9.m$Sample), which(!is.na(afg9.m$A1))),] # remove extra genotypes from AFG9.M
#check for duplicated sample/loci combinations
which(duplicated(paste(afg1$Sample, afg1$Marker))) # yes
which(duplicated(paste(afg9$Sample, afg9$Marker)))
which(duplicated(paste(afg1.m$Sample, afg1.m$Marker)))
which(duplicated(paste(afg4.m$Sample, afg4.m$Marker)))
which(duplicated(paste(afg6.m$Sample, afg6.m$Marker)))
which(duplicated(paste(afg8.m$Sample, afg8.m$Marker)))
which(duplicated(paste(afg9.m$Sample, afg9.m$Marker)))
# do duplicates in files have identical genotypes?
# equalities should return TRUE if all genos are identical
sum(duplicated(paste(afg1$Sample, afg1$Marker))) == sum(duplicated(paste(afg1$Sample, afg1$Marker, afg1$A1, afg1$A2)))
# trim duplicates from file_ (AFG__) and file_ (AFG__)
afg1 = afg1[!duplicated(paste(afg1$Sample, afg1$Marker)),]
dim(afg1)
dim(afg9)
dim(afg1.m)
dim(afg4.m)
dim(afg6.m)
dim(afg8.m)
dim(afg9.m)
geno = merge(afg1, afg9, all.x=T, all.y = T, by = c("Sample", "Marker"))
dim(geno)
geno = merge(geno, afg1.m, all.x=T, all.y = T, by = c("Sample", "Marker"))
dim(geno)
geno = merge(geno, afg4.m, all.x=T, all.y = T, by = c("Sample", "Marker"))
dim(geno)
geno = merge(geno, afg6.m, all.x=T, all.y = T, by = c("Sample", "Marker"))
dim(geno)
geno = merge(geno, afg8.m, all.x=T, all.y = T, by = c("Sample", "Marker"))
dim(geno)
geno = merge(geno, afg9.m, all.x=T, all.y = T, by = c("Sample", "Marker"))
dim(geno)
# Fill in homozygotes
alleleones = grep("A1.", names(geno), fixed=T)
alleletwos = grep("A2.", names(geno), fixed=T)
for(i in 1:length(alleleones)){
j <- !is.na(geno[,alleleones[i]]) & is.na(geno[,alleletwos[i]]) # find homozygotes
geno[j,alleletwos[i]] <- geno[j,alleleones[i]]
}
# Make sure we list each individual for each locus in geno, remove other loci
remove = c("APY_2", "APY_44", "APY_45", "APY_65")
for(i in 1:length(remove)){
j = which(geno$Marker==remove[i])
geno = geno[-j,]
}
markers = unique(geno$Marker)
markers = markers[!is.na(markers)]
geno= geno[geno$Sample!="PRBIContam",] # remove a contaminated well
indivs = unique(geno$Sample)
for(i in 1:length(markers)){
theseindivs = unique(geno$Sample[geno$Marker==markers[i]])
if(length(theseindivs) < length(indivs)){
new = setdiff(indivs, theseindivs)
add = data.frame(Sample = new, Marker = markers[i])
geno = merge(geno, add, all.x=T, all.y=T)
dim(geno)
}
if(length(theseindivs) > length(indivs)){
print(paste("Too many indivs for marker ", marker[i]))
}
}
# Find consensus genotypes and check for errors
geno$error = NA
geno$numgenos = 0
geno$A1consens = NA
geno$A2consens = NA
alleleones = grep("A1.", names(geno), fixed=T)
alleletwos = grep("A2.", names(geno), fixed=T)
for(i in 1:length(geno$Sample)){
checkone = alleleones[!is.na(geno[i,alleleones])] # get allele cols that aren't na
checktwo = alleletwos[!is.na(geno[i,alleletwos])] # get allele cols that aren't na
if(length(checkone) != length(checktwo)){ print(paste("A1 and A2 not equal length", i))}
if(length(checkone)>0 & length(checktwo)>0){
matchone = all(geno[i,checkone] == geno[i,checkone[1]]) # check for equality
matchtwo = all(geno[i,checktwo] == geno[i,checktwo[1]]) # check for equality
geno$numgenos[i] = length(checkone)
if(matchone & matchtwo){
if(length(checkone)>1){
geno$error[i] = FALSE
}
geno$A1consens[i] = geno[i,checkone[1]]
geno$A2consens[i] = geno[i,checktwo[1]]
}
if(!(matchone & matchtwo)){
geno$error[i] = TRUE
}
}
if((i %% 500) == 0){ print(i)}
}
# Find errors
err=table(geno$error, geno$Marker)
if(dim(err)[1]==1){
err = rbind(err, rep(0, dim(err)[2]))
rownames(err) = c("FALSE", "TRUE")
}
err = rbind(err, (err[1,]+err[2,]))
err = rbind(err,round((err[2,]/err[3,])*100,1))
rownames(err) = c("No Errors", "Errors", "Error %")
t(err)
# Write out errors
i = which(geno$error==T)
write.csv(geno[i,], paste("Genotyping_errors_PRBI", Sys.Date(), ".csv", sep=""))
# Select a subset to redo for errorchecking
redo = data.frame(Sample=character(0), Marker=character(0))
redomarks = markers[markers!="AC1359" & markers!="AC1578" & markers!="APR_Cf29" & markers!="NNG_012"]
for(i in 1:length(redomarks)){
a = err[3, which(colnames(err)==redomarks[i])]
if(a < 20){
redo = rbind(redo, data.frame(Sample=sample(unique(geno$Sample), 20), Marker=redomarks[i]))
}
}
dim(redo)
#write.csv(redo, paste("RedoPRBI_", Sys.Date(), ".csv", sep=""), row.names=FALSE)
# Calc % complete
i = table(!is.na(geno$A1consens), geno$Marker)
i = rbind(i,round((i[2,]/(i[1,]+i[2,]))*100,1))
rownames(i) = c("Missing", "Complete", "% Complete")
t(i)
colSums(i[1:2,]) # number of indivs at each locus
# Which indivs are still missing at each locus? Write that out if desired
out = data.frame(Marker = character(0), Sample = character(0))
missingmarkers = c("ACH_A11", "ACH_A4", "ACH_B9", "NNG_028")
for(i in 1:length(missingmarkers)){
a = geno[geno$Marker==missingmarkers[i] & is.na(geno$A1consens), c("Marker", "Sample")]
out = rbind(out, a)
}
dim(out)
#write.csv(out, paste("MissingPRBI_", Sys.Date(), ".csv", sep=""), row.names=FALSE)
# Calc # genos missing per individual
i = table(!is.na(geno$A1consens), geno$Sample)
max(i[1,])
j = matrix(data= c(0,1,2,3,4,5,6), nrow=3, ncol=7, byrow=T)
j[2,] = hist(i[1,], plot=F, breaks = c(-0.1, 0.5, 1.5, 2.5, 3.5, 4.5, 5.5,6.5))$counts
j[3,] = round(j[2,]/378*100)
rownames(j) = c("Num Missing", "Num Samples", "% Samples")
print(j)
# Calc # genos missing per individual (w/out APR_Cf42 and ACH_B9)
temp = geno
dim(temp)
i = c(grep("ACH_B9", temp$Marker), grep("APR_Cf42", temp$Marker))
temp = temp[-i,]
dim(temp)
i = table(!is.na(temp$A1consens), temp$Sample)
max(i[1,])
j = matrix(data= c(0,1,2,3,4,5,6), nrow=3, ncol=7, byrow=T)
j[2,] = hist(i[1,], plot=F, breaks = c(-0.1, 0.5, 1.5, 2.5, 3.5, 4.5, 5.5,6.5))$counts
j[3,] = round(j[2,]/378*100)
rownames(j) = c("Num Missing", "Num Samples", "% Samples")
print(j)
# Fix genotyping errors where calls were fixable
# (none left to fix)
# Check loci for regular alleles (no alleles 1bp apart, should match bins in GeneMapper)
loci = unique(geno$Marker)
for(i in loci){
j=sort(unique(c(geno$A1consens[geno$Marker == i], geno$A2consens[geno$Marker== i])))
cat(c(i,j))
cat("\n")
}
###################################################
# Write out whole table
write.csv(geno, paste("Aclarkii_genotypes_", Sys.Date(), ".csv", sep=""))
###################################################
########################
# Format for GenAlEx 6
dropnames = names(geno)
dropnames = c(dropnames[grep("A[[:digit:]][[:punct:]]", dropnames)], "error", "numgenos")
genowide = reshape(geno, direction="wide", v.names = c("A1consens", "A2consens"), timevar = "Marker", idvar = "Sample", drop=dropnames)
widenames = names(genowide)
widenames = gsub("consens", "", widenames)
names(genowide) = widenames
dim(genowide)
# add population and lat/long info
surveys = read.csv("../../Surveys/GPSSurveys2009-01-08.csv")
locations = read.csv("../../Surveys/Collections2008-11-21.csv")
locations$Sample = paste(locations$Spp, locations$ID, sep="")
genowide = merge(subset(locations, select=c(Sample,SurveyNum,lat,long)), genowide, all.y=T, by="Sample")
genowide = merge(subset(surveys, select=c(SurveyNum,SiteNum,Name,Region,Municipality)), genowide, all.y=T)
dim(genowide)
# sort by region and sample ID
i = order(genowide$Region, genowide$SiteNum, genowide$Sample)
genowide = genowide[i,]
# remove ACH_B9 and APR_Cf42 because of high error and low completeness (3/9/09)
widenames = names(genowide)
i = c(grep("ACH_B9", widenames), grep("APR_Cf42", widenames))
genowide = genowide[,-i]
# Write to file
genalexfile = file(paste("Aclarkii_GenAlEx_", Sys.Date(), ".csv", sep=""))
open(genalexfile, "w")
# Header: num loci, num samples, num pops, # indiv in each pop, # regions, # indivs in each region
outline = c(14, dim(genowide)[1], length(unique(genowide$SiteNum)), table(genowide$SiteNum), length(unique(genowide$Region)), table(genowide$Region))
cat(outline, file=genalexfile, sep=",", append=F)
cat("\n", file=genalexfile, append=T)
outline = c("Aclarkii 3 regions", "", "", names(table(genowide$SiteNum)), "", names(table(genowide$Region)))
cat(outline, file=genalexfile, sep=",", append=T)
cat("\n", file=genalexfile, append=T)
i = grep("A1", names(genowide))
j = gsub("A1.", "", names(genowide)[i], fixed=T)
j = paste(j, collapse=",,")
outline = paste("Sample no.,Pop,",j, ",,,Lat,Long",sep="")
cat(outline, file=genalexfile, append=T)
cat("\n", file=genalexfile, append=T)
# Data: Sample, Pop, cols of genotype data, blank col, Lat, Long
genowide$blank = ""
widenames = names(genowide)
outfile = genowide[,c(grep("Sample",widenames),grep("SiteNum",widenames),grep("A[[:digit:]]",widenames),grep("blank",widenames),grep("lat",widenames), grep("long",widenames))]
write.table(outfile, file=genalexfile, append=T, quote=F,row.names=F, col.names=F, sep=",", na="0")
close(genalexfile)
###############################################
## Divide by size classes, output to GenAlEx
geno = read.csv("Aclarkii_genotypes_2009-03-13.csv", row.names=1)
dropnames = names(geno)
dropnames = c(dropnames[grep("A[[:digit:]][[:punct:]]", dropnames)], "error", "numgenos")
genowide = reshape(geno, direction="wide", v.names = c("A1consens", "A2consens"), timevar = "Marker", idvar = "Sample", drop=dropnames)
widenames = names(genowide)
widenames = gsub("consens", "", widenames)
names(genowide) = widenames
dim(genowide)
# add population and lat/long info
surveys = read.csv("../../Surveys/GPSSurveys2009-01-08.csv")
locations = read.csv("../../Surveys/Collections2008-11-21.csv")
locations$Sample = paste(locations$Spp, locations$ID, sep="")
genowide = merge(subset(locations, select=c(Sample,SurveyNum,Size,lat,long)), genowide, all.y=T, by="Sample")
genowide = merge(subset(surveys, select=c(SurveyNum,SiteNum,Name,Region,Municipality)), genowide, all.y=T)
dim(genowide)
hist(genowide$Size)
quants = quantile(genowide$Size, probs = c(0.25, 0.5, 0.75))
first = genowide$Size <= quants[1]
second = genowide$Size > quants[1] & genowide$Size <= quants[2]
third = genowide$Size > quants[2] & genowide$Size <= quants[3]
fourth = genowide$Size > quants[3]
sum(first)
sum(second)
sum(third)
sum(fourth)
genowide$sizeclass = NA
genowide$sizeclass[first] = 1
genowide$sizeclass[second] = 2
genowide$sizeclass[third] = 3
genowide$sizeclass[fourth] = 4
# order by sizeclass
i = order(genowide$sizeclass)
genowide = genowide[i,]
# remove ACH_A7, ACH_B9 and APR_Cf42 because out of HWE, high error or low completeness (3/15/09)
widenames = names(genowide)
i = c(grep("ACH_A7", widenames), grep("ACH_B9", widenames), grep("APR_Cf42", widenames))
genowide = genowide[,-i]
# Write to file
genalexfile = file(paste("Aclarkii_sizes_", Sys.Date(), "_GX.csv", sep=""))
open(genalexfile, "w")
# Header: num loci, num samples, num pops, # indiv in each pop, # regions, # indivs in each region
outline = c(13, dim(genowide)[1], length(unique(genowide$sizeclass)), table(genowide$sizeclass))
cat(outline, file=genalexfile, sep=",", append=F)
cat("\n", file=genalexfile, append=T)
outline = c("Aclarkii sizeclasses", "", "", names(table(genowide$sizeclass)))
cat(outline, file=genalexfile, sep=",", append=T)
cat("\n", file=genalexfile, append=T)
i = grep("A1", names(genowide))
j = gsub("A1.", "", names(genowide)[i], fixed=T)
j = paste(j, collapse=",,")
outline = paste("Sample no.,Pop,",j, ",,,Lat,Long",sep="")
cat(outline, file=genalexfile, append=T)
cat("\n", file=genalexfile, append=T)
# Data: Sample, Pop, cols of genotype data, blank col, Lat, Long
genowide$blank = ""
widenames = names(genowide)
outfile = genowide[,c(grep("Sample",widenames),grep("sizeclass",widenames),grep("A[[:digit:]]",widenames),grep("blank",widenames),grep("lat",widenames), grep("long",widenames))]
write.table(outfile, file=genalexfile, append=T, quote=F,row.names=F, col.names=F, sep=",", na="0")
close(genalexfile)
###############################################
## Output to MLNE2: All samples (lower quantile vs. larger quantile, four pops as focal, others as source)
focalpops = data.frame(a = c(7,8,9,10), b = c(8,9,10,11), c = c(9,10,11,13), d = c(10,11,13,14), e = c(11,13,14,15), f = c(13,14,15,16), g = c(14,15,16,17), h = c(18,19,20,22), i = c(19,20,22,23), j = c(20,22,23,24), k = c(22,23,24,25), l = c(23,24,25,27))
geno = read.csv("Aclarkii_genotypes_2009-03-13.csv", row.names=1)
# reshape to wide
dropnames = names(geno)
dropnames = c(dropnames[grep("A[[:digit:]][[:punct:]]", dropnames)], "error", "numgenos")
genowide = reshape(geno, direction="wide", v.names = c("A1consens", "A2consens"), timevar = "Marker", idvar = "Sample", drop=dropnames)
widenames = names(genowide)
widenames = gsub("consens", "", widenames)
names(genowide) = widenames
dim(genowide)
# remove ACH_A7, ACH_B9 and APR_Cf42 because out of HWE, high error or low completeness (3/15/09)
widenames = names(genowide)
i = c(grep("ACH_A7", widenames), grep("ACH_B9", widenames), grep("APR_Cf42", widenames))
genowide = genowide[,-i]
# add population and lat/long info
surveys = read.csv("../../Surveys/GPSSurveys2009-01-08.csv")
locations = read.csv("../../Surveys/Collections2008-11-21.csv")
locations$Sample = paste(locations$Spp, locations$ID, sep="")
genowide = merge(subset(locations, select=c(Sample,SurveyNum,Size,lat,long)), genowide, all.y=T, by="Sample")
genowide = merge(subset(surveys, select=c(SurveyNum,SiteNum,Name,Region,Municipality)), genowide, all.y=T)
dim(genowide)
quants = quantile(genowide$Size, probs = c(0.25, 0.5, 0.75))
first = genowide$Size <= quants[1]
second = genowide$Size > quants[1] & genowide$Size <= quants[2]
third = genowide$Size > quants[2] & genowide$Size <= quants[3]
fourth = genowide$Size > quants[3]
fourth_inclusive = genowide$Size >= quants[3]
#sum(first)
#sum(second)
#sum(third)
#sum(fourth)
genowide$sizeclass = NA
genowide$sizeclass[first] = 1
genowide$sizeclass[second] = 2
genowide$sizeclass[third] = 3
genowide$sizeclass[fourth] = 4
genowide$sizeclassinclusive = NA
genowide$sizeclassinclusive[first] = 1
genowide$sizeclassinclusive[second] = 2
genowide$sizeclassinclusive[third] = 3
genowide$sizeclassinclusive[fourth_inclusive] = 4
# order by sizeclass
i = order(genowide$sizeclass)
genowide = genowide[i,]
# How many adults and juvs in each population group? Use sizeclassinclusive so that we get enough adults
for(rep in 1:(dim(focalpops)[2])){
focal = focalpops[1,rep]
focal2 = focalpops[2,rep]
focal3 = focalpops[3,rep]
focal4 = focalpops[4,rep]
a = genowide$sizeclassinclusive == 4 & (genowide$SiteNum == focal | genowide$SiteNum == focal2 | genowide$SiteNum == focal3 | genowide$SiteNum == focal4)
b = genowide$sizeclass == 1 & (genowide$SiteNum == focal | genowide$SiteNum == focal2 | genowide$SiteNum == focal3 | genowide$SiteNum == focal4)
print(paste("Rep ", rep, ": Num Adults: ", sum(a), " Num Juvs: ", sum(b), sep=""))
}
# Write all groups to files
for(rep in 1:(dim(focalpops)[2])){
focal = focalpops[1,rep]
focal2 = focalpops[2,rep]
focal3 = focalpops[3,rep]
focal4 = focalpops[4,rep]
# Write to file
mlnefile = file(paste("Aclarkii_", Sys.Date(), "_MLNEfocal", focal, focal2, focal3, focal4, ".csv", sep=""))
open(mlnefile, "w")
# Header: 1 (open pop), then max Ne, then screen indicator, num cpus (0 for all), then num loci,
cat(1, file=mlnefile, sep=",", append=F)
cat("\n", file= mlnefile, append=T)
cat(35000, file=mlnefile, sep=",", append=T)
cat("\n", file= mlnefile, append=T)
cat(3, file=mlnefile, sep=",", append=T)
cat("\n", file= mlnefile, append=T)
cat(0, file=mlnefile, sep=",", append=T)
cat("\n", file= mlnefile, append=T)
cat(13, file=mlnefile, sep=",", append=T)
cat("\n", file= mlnefile, append=T)
# num alleles per locus (only for adults and juvs, sizeclasses 4 or 1)
outline = numeric()
cols = grep("A1.", names(genowide))
alleles = vector("list", 13)
a = genowide$sizeclass == 1 | genowide$sizeclassinclusive == 4
for(i in cols){ # for each locus
j = union(genowide[a,i], genowide[a,i+1])
j = j[!is.na(j)]
alleles[match(i,cols)] = list(sort(j))
outline = c(outline, length(j))
}
cat(paste(outline, collapse=","),file=mlnefile, sep=",", append=T)
cat("\n", file= mlnefile, append=T)
# num samples from focal pop
cat(2, file=mlnefile, sep=",", append=T)
cat("\n", file= mlnefile, append=T)
# generations when samples were taken
cat("0,1", file=mlnefile, sep=",", append=T)
cat("\n", file= mlnefile, append=T)
# For adults in focal pops:
# Numbers of copies of each allele, at each locus, from each sample of the focal population
cols = grep("A1.", names(genowide))
a = genowide$sizeclassinclusive == 4 & (genowide$SiteNum == focal | genowide$SiteNum == focal2 | genowide$SiteNum == focal3 | genowide$SiteNum == focal4)
print(paste("Rep ", rep, ": Num Adults: ", sum(a), sep=""))
for(i in cols){ # for each locus
thesealleles = sort(c(genowide[a,i], genowide[a,i+1]))
allalleles = alleles[[match(i,cols)]]
outline = numeric()
for(j in 1:length(allalleles)){
outline = c(outline, sum(thesealleles == allalleles[j]))
}
cat(paste(outline, collapse=","), file=mlnefile, sep=",", append=T)
cat("\n", file= mlnefile, append=T)
}
# For juvs in focal pop
cols = grep("A1.", names(genowide))
a = genowide$sizeclass == 1 & (genowide$SiteNum == focal | genowide$SiteNum == focal2 | genowide$SiteNum == focal3 | genowide$SiteNum == focal4)
for(i in cols){ # for each locus
thesealleles = sort(c(genowide[a,i], genowide[a,i+1]))
allalleles = alleles[[match(i,cols)]]
outline = numeric()
for(j in 1:length(allalleles)){
outline = c(outline, sum(thesealleles == allalleles[j]))
}
cat(paste(outline, collapse=","), file=mlnefile, sep=",", append=T)
cat("\n", file= mlnefile, append=T)
}
# For source pop (use all samples not from focal)
cols = grep("A1.", names(genowide))
a = genowide$SiteNum != focal & genowide$SiteNum != focal2 & genowide$SiteNum != focal3 & genowide$SiteNum != focal4
for(i in cols){ # for each locus
thesealleles = sort(c(genowide[a,i], genowide[a,i+1]))
allalleles = alleles[[match(i,cols)]]
outline = numeric()
for(j in 1:length(allalleles)){
outline = c(outline, sum(thesealleles == allalleles[j]))
}
cat(paste(outline, collapse=","), file=mlnefile, sep=",", append=T)
cat("\n", file= mlnefile, append=T)
}
# number of starting points
cat("1", file=mlnefile, sep=",", append=T)
cat("\n", file= mlnefile, append=T)
close(mlnefile)
}
###############################################
## Output to MLNE2: All samples (TopTwo on anemone >= 8cm vs. lower quartile, some pops as focal, others as source)
# sets of 4
focalpops = list(a = c(7,8,9,10), b = c(8,9,10,11), c = c(9,10,11,13), d = c(10,11,13,14), e = c(11,13,14,15), f = c(13,14,15,16), g = c(14,15,16,17), h = c(18,19,20,22), i = c(19,20,22,23), j = c(20,22,23,24), k = c(22,23,24,25), l = c(23,24,25,27))
prefix="TopTwo"
# sets of 4, 3, 2, and 1 (for testing only)
focalpops = list(a = c(7,8,9,10), b = c(8,9,10,11), c = c(9,10,11,13), d = c(10,11,13,14), e = c(11,13,14,15), f = c(13,14,15,16), g = c(14,15,16,17), h = c(18,19,20,22), i = c(19,20,22,23), j = c(20,22,23,24), k = c(22,23,24,25), l = c(23,24,25,27), m = c(7,8,9), n = c(8,9,10), o=c(9,10,11), p=c(10,11,13), q=c(11,13,14), r=c(13,14,15), s=c(14,15,16), t=c(15,16,17), u=c(18,19,20), v=c(19,20,22), w=c(20,22,23), x=c(22,23,24), y=c(23,24,25), z=c(24,25,27), aa=c(7,8), bb=c(8,9), cc=c(9,10), dd=c(10,11), ee=c(11,13), ff=c(13,14), gg=c(14,15), hh=c(15,16), ii=c(16,17), jj=c(18,19), kk=c(19,20), ll=c(20,22), mm=c(22,23), nn=c(23,24), oo=c(24,25), pp=c(25,27), qq=7, rr=8, ss=9, tt=10, uu=11, vv=13, ww=14, xx=15, yy=16, zz=17, aaa=18, bbb=19, ccc=20, ddd=22, eee=23, fff=24, ggg=25, hhh=27)
# sets of 1,2, and 3, all using pop 25 (determined from sets above)
focalpops = list(a = 25, b = c(24,25), c=c(23,24,25))
prefix="TopTwo"
# sets of 2
focalpops = list(aa=c(7,8), bb=c(8,9), cc=c(9,10), dd=c(10,11), ee=c(11,13), ff=c(13,14), gg=c(14,15), hh=c(15,16), ii=c(16,17), jj=c(18,19), kk=c(19,20), ll=c(20,22), mm=c(22,23), nn=c(23,24), oo=c(24,25), pp=c(25,27))
prefix="TopTwo"
# sets of 1
focalpops = list(qq=7, rr=8, ss=9, tt=10, uu=11, vv=13, ww=14, xx=15, yy=16, zz=17, aaa=18, bbb=19, ccc=20, ddd=22, eee=23, fff=24, ggg=25, hhh=27)
prefix="TopTwo"
NeMax = 10000
setwd("/Users/mpinsky/Documents/Stanford/Philippines/2008/Genotyping/Genotypes")
geno = read.csv("Aclarkii_genotypes_2009-03-13.csv", row.names=1)
source("../Analysis/matchall.R")
# reshape to wide
dropnames = names(geno)
dropnames = c(dropnames[grep("A[[:digit:]][[:punct:]]", dropnames)], "error", "numgenos")
genowide = reshape(geno, direction="wide", v.names = c("A1consens", "A2consens"), timevar = "Marker", idvar = "Sample", drop=dropnames)
widenames = names(genowide)
widenames = gsub("consens", "", widenames)
names(genowide) = widenames
dim(genowide)
# remove ACH_A7, ACH_B9 and APR_Cf42 because out of HWE, high error or low completeness (3/15/09)
widenames = names(genowide)
i = c(grep("ACH_A7", widenames), grep("ACH_B9", widenames), grep("APR_Cf42", widenames))
genowide = genowide[,-i]
## REMOVE APCL285, 286 because identical
i = genowide$Sample == "APCL285" | genowide$Sample == "APCL286"
genowide = genowide[-i,]
# add population and lat/long info
surveys = read.csv("../../Surveys/GPSSurveys2009-01-08.csv")
locations = read.csv("../../Surveys/Collections2009-03-26.csv")
locations$Sample = paste(locations$Spp, locations$ID, sep="")
genowide = merge(subset(locations, select=c(Sample,SurveyNum,Size,lat,long, TopTwo)), genowide, all.y=T, by="Sample")
genowide = merge(subset(surveys, select=c(SurveyNum,SiteNum,Name,Region,Municipality)), genowide, all.y=T)
dim(genowide)
quants = quantile(genowide$Size, probs = c(0.25, 0.5, 0.75))
first = genowide$Size <= quants[1]
second = genowide$Size > quants[1] & genowide$Size <= quants[2]
third = genowide$Size > quants[2] & genowide$Size <= quants[3]
fourth = genowide$Size > quants[3]
fourth_inclusive = genowide$Size >= quants[3] # the largest fish
genowide$sizeclass = NA
genowide$sizeclass[first] = 1
genowide$sizeclass[second] = 2
genowide$sizeclass[third] = 3
genowide$sizeclass[fourth] = 4 # the largest fish
# order by sizeclass
i = order(genowide$sizeclass)
genowide = genowide[i,]
# How many breeding adults (TopTwo and > 8cm) and juvs (lowest quartile) in each population group?
for(rep in 1:length(focalpops)){
focal = focalpops[[rep]]
sites = matchall(focal, genowide$SiteNum)
a = intersect(which(genowide$TopTwo & genowide$Size >= 8), sites)
b = intersect(which(genowide$sizeclass == 1), sites)
print(paste("Pops ", paste(focal, collapse=","), ": Num Adults: ", length(a), " Num Juvs: ", length(b), sep=""))
}
genowide$AdJuv = ""
genowide$AdJuv[genowide$TopTwo & genowide$Size >= 8] = "Ad"
genowide$AdJuv[genowide$sizeclass == 1] = "Juv"
# Write all groups to files
for(rep in 1:length(focalpops)){
focal = focalpops[[rep]]
# Write to file
mlnefile = file(paste("MLNE/Aclarkii_", Sys.Date(), "_MLNEfocal", prefix, paste(focal, collapse=""), ".csv", sep=""))
open(mlnefile, "w")
# Header: 1 (open pop), then max Ne, then screen indicator, num cpus (0 for all), then num loci,
cat(1, file=mlnefile, sep=",", append=F)
cat("\n", file= mlnefile, append=T)
cat(NeMax, file=mlnefile, sep=",", append=T)
cat("\n", file= mlnefile, append=T)
cat(2, file=mlnefile, sep=",", append=T)
cat("\n", file= mlnefile, append=T)
cat(0, file=mlnefile, sep=",", append=T) # number of CPUs
cat("\n", file= mlnefile, append=T)
cat(13, file=mlnefile, sep=",", append=T)
cat("\n", file= mlnefile, append=T)
# num alleles per locus (only for adults and juvs)
outline = numeric()
cols = grep("A1.", names(genowide))
alleles = vector("list", 13)
a = genowide$AdJuv == "Ad" | genowide$AdJuv == "Juv"
for(i in cols){ # for each locus
j = union(genowide[a,i], genowide[a,i+1])
j = j[!is.na(j)]
alleles[match(i,cols)] = list(sort(j))
outline = c(outline, length(j))
}
cat(paste(outline, collapse=","),file=mlnefile, sep=",", append=T)
cat("\n", file= mlnefile, append=T)
# num samples from focal pop
cat(2, file=mlnefile, sep=",", append=T)
cat("\n", file= mlnefile, append=T)
# generations when samples were taken
cat("0,1", file=mlnefile, sep=",", append=T)
cat("\n", file= mlnefile, append=T)
sites = matchall(focal, genowide$SiteNum)
# For adults in focal pops:
# Numbers of copies of each allele, at each locus, from each sample of the focal population
cols = grep("A1.", names(genowide))
a = intersect(which(genowide$AdJuv == "Ad"), sites)
print(paste("Pops ", paste(focal, collapse=","), ": Num Adults: ", length(a), sep=""))
for(i in cols){ # for each locus
thesealleles = sort(c(genowide[a,i], genowide[a,i+1]))
allalleles = alleles[[match(i,cols)]]
outline = numeric()
for(j in 1:length(allalleles)){
outline = c(outline, sum(thesealleles == allalleles[j]))
}
cat(paste(outline, collapse=","), file=mlnefile, sep=",", append=T)
cat("\n", file= mlnefile, append=T)
}
# For juvs in focal pop
cols = grep("A1.", names(genowide))
a = intersect(which(genowide$AdJuv == "Juv"), sites)
print(paste("Pops ", paste(focal, collapse=","), ": Num Juvs: ", length(a), sep=""))
for(i in cols){ # for each locus
thesealleles = sort(c(genowide[a,i], genowide[a,i+1]))
allalleles = alleles[[match(i,cols)]]
outline = numeric()
for(j in 1:length(allalleles)){
outline = c(outline, sum(thesealleles == allalleles[j]))
}
cat(paste(outline, collapse=","), file=mlnefile, sep=",", append=T)
cat("\n", file= mlnefile, append=T)
}
# For source pop (use all samples not from focal)
cols = grep("A1.", names(genowide))
a = setdiff(1:length(genowide$SiteNum), sites)
print(paste("Pops ", paste(focal, collapse=","), ": Num Source: ", length(a), sep=""))
for(i in cols){ # for each locus
thesealleles = sort(c(genowide[a,i], genowide[a,i+1]))
allalleles = alleles[[match(i,cols)]]
outline = numeric()
for(j in 1:length(allalleles)){
outline = c(outline, sum(thesealleles == allalleles[j]))
}
cat(paste(outline, collapse=","), file=mlnefile, sep=",", append=T)
cat("\n", file= mlnefile, append=T)
}
# number of starting points
cat("1", file=mlnefile, sep=",", append=T)
cat("\n", file= mlnefile, append=T)
close(mlnefile)
}
###############################################
## Output to MLNE2: Focal pops as closed (TopTwo on anemone >= 8cm vs. lower quartile)
# sets of 4
focalpops = list(a = c(7,8,9,10), b = c(8,9,10,11), c = c(9,10,11,13), d = c(10,11,13,14), e = c(11,13,14,15), f = c(13,14,15,16), g = c(14,15,16,17), h = c(18,19,20,22), i = c(19,20,22,23), j = c(20,22,23,24), k = c(22,23,24,25), l = c(23,24,25,27))
prefix="TopTwo"
# sets of 4, 3, 2, and 1 (for testing only)
focalpops = list(a = c(7,8,9,10), b = c(8,9,10,11), c = c(9,10,11,13), d = c(10,11,13,14), e = c(11,13,14,15), f = c(13,14,15,16), g = c(14,15,16,17), h = c(18,19,20,22), i = c(19,20,22,23), j = c(20,22,23,24), k = c(22,23,24,25), l = c(23,24,25,27), m = c(7,8,9), n = c(8,9,10), o=c(9,10,11), p=c(10,11,13), q=c(11,13,14), r=c(13,14,15), s=c(14,15,16), t=c(15,16,17), u=c(18,19,20), v=c(19,20,22), w=c(20,22,23), x=c(22,23,24), y=c(23,24,25), z=c(24,25,27), aa=c(7,8), bb=c(8,9), cc=c(9,10), dd=c(10,11), ee=c(11,13), ff=c(13,14), gg=c(14,15), hh=c(15,16), ii=c(16,17), jj=c(18,19), kk=c(19,20), ll=c(20,22), mm=c(22,23), nn=c(23,24), oo=c(24,25), pp=c(25,27), qq=7, rr=8, ss=9, tt=10, uu=11, vv=13, ww=14, xx=15, yy=16, zz=17, aaa=18, bbb=19, ccc=20, ddd=22, eee=23, fff=24, ggg=25, hhh=27)
# sets of 1,2, and 3, all using pop 25 (determined from sets above)
focalpops = list(a = 25, b = c(24,25), c=c(23,24,25))
prefix="TopTwo"
# sets of 2
focalpops = list(aa=c(7,8), bb=c(8,9), cc=c(9,10), dd=c(10,11), ee=c(11,13), ff=c(13,14), gg=c(14,15), hh=c(15,16), ii=c(16,17), jj=c(18,19), kk=c(19,20), ll=c(20,22), mm=c(22,23), nn=c(23,24), oo=c(24,25), pp=c(25,27))
prefix="TopTwo"
# sets of 1
focalpops = list(qq=7, rr=8, ss=9, tt=10, uu=11, vv=13, ww=14, xx=15, yy=16, zz=17, aaa=18, bbb=19, ccc=20, ddd=22, eee=23, fff=24, ggg=25, hhh=27)
prefix="TopTwo"
NeMax = 10000
setwd("/Users/mpinsky/Documents/Stanford/Philippines/2008/Genotyping/Genotypes")
geno = read.csv("Aclarkii_genotypes_2009-03-13.csv", row.names=1)
source("../Analysis/matchall.R")
# reshape to wide
dropnames = names(geno)
dropnames = c(dropnames[grep("A[[:digit:]][[:punct:]]", dropnames)], "error", "numgenos")
genowide = reshape(geno, direction="wide", v.names = c("A1consens", "A2consens"), timevar = "Marker", idvar = "Sample", drop=dropnames)
widenames = names(genowide)
widenames = gsub("consens", "", widenames)
names(genowide) = widenames
dim(genowide)
# remove ACH_A7, ACH_B9 and APR_Cf42 because out of HWE, high error or low completeness (3/15/09)
widenames = names(genowide)
i = c(grep("ACH_A7", widenames), grep("ACH_B9", widenames), grep("APR_Cf42", widenames))
genowide = genowide[,-i]
# add population and lat/long info
surveys = read.csv("../../Surveys/GPSSurveys2009-01-08.csv")
locations = read.csv("../../Surveys/Collections2009-03-26.csv")
locations$Sample = paste(locations$Spp, locations$ID, sep="")
genowide = merge(subset(locations, select=c(Sample,SurveyNum,Size,lat,long, TopTwo)), genowide, all.y=T, by="Sample")
genowide = merge(subset(surveys, select=c(SurveyNum,SiteNum,Name,Region,Municipality)), genowide, all.y=T)
dim(genowide)
quants = quantile(genowide$Size, probs = c(0.25, 0.5, 0.75))
first = genowide$Size <= quants[1]
second = genowide$Size > quants[1] & genowide$Size <= quants[2]
third = genowide$Size > quants[2] & genowide$Size <= quants[3]
fourth = genowide$Size > quants[3]
fourth_inclusive = genowide$Size >= quants[3] # the largest fish
genowide$sizeclass = NA
genowide$sizeclass[first] = 1
genowide$sizeclass[second] = 2
genowide$sizeclass[third] = 3
genowide$sizeclass[fourth] = 4 # the largest fish
# order by sizeclass
i = order(genowide$sizeclass)
genowide = genowide[i,]
# How many breeding adults (TopTwo and > 8cm) and juvs (lowest quartile) in each population group?
for(rep in 1:length(focalpops)){
focal = focalpops[[rep]]
sites = matchall(focal, genowide$SiteNum)
a = intersect(which(genowide$TopTwo & genowide$Size >= 8), sites)
b = intersect(which(genowide$sizeclass == 1), sites)
print(paste("Pops ", paste(focal, collapse=","), ": Num Adults: ", length(a), " Num Juvs: ", length(b), sep=""))
}
genowide$AdJuv = ""
genowide$AdJuv[genowide$TopTwo & genowide$Size >= 8] = "Ad"
genowide$AdJuv[genowide$sizeclass == 1] = "Juv"
# Write all groups to files
for(rep in 1:length(focalpops)){
focal = focalpops[[rep]]
# Write to file
mlnefile = file(paste("MLNE/Aclarkii_", Sys.Date(), "_MLNEclosed", prefix, paste(focal, collapse=""), ".csv", sep=""))
open(mlnefile, "w")
# Header: 0 (closed pop), then max Ne, then screen indicator, num cpus (0 for all), then num loci,
cat(0, file=mlnefile, sep=",", append=F)
cat("\n", file= mlnefile, append=T)
cat(NeMax, file=mlnefile, sep=",", append=T)
cat("\n", file= mlnefile, append=T)
cat(2, file=mlnefile, sep=",", append=T)
cat("\n", file= mlnefile, append=T)
cat(1, file=mlnefile, sep=",", append=T) # number of CPUs
cat("\n", file= mlnefile, append=T)
cat(13, file=mlnefile, sep=",", append=T)
cat("\n", file= mlnefile, append=T)
sites = matchall(focal, genowide$SiteNum)
# num alleles per locus (only for adults and juvs in focal pop)
outline = numeric()
cols = grep("A1.", names(genowide))
alleles = vector("list", 13)
a = intersect(which(genowide$AdJuv == "Ad" | genowide$AdJuv == "Juv"), sites)
for(i in cols){ # for each locus
j = union(genowide[a,i], genowide[a,i+1])
j = j[!is.na(j)]
alleles[match(i,cols)] = list(sort(j))
outline = c(outline, length(j))
}
cat(paste(outline, collapse=","),file=mlnefile, sep=",", append=T)
cat("\n", file= mlnefile, append=T)
# num samples from focal pop
cat(2, file=mlnefile, sep=",", append=T)
cat("\n", file= mlnefile, append=T)
# generations when samples were taken
cat("0,1", file=mlnefile, sep=",", append=T)
cat("\n", file= mlnefile, append=T)
# For adults in focal pops:
# Numbers of copies of each allele, at each locus, from each sample of the focal population
cols = grep("A1.", names(genowide))
a = intersect(which(genowide$AdJuv == "Ad"), sites)
print(paste("Pops ", paste(focal, collapse=","), ": Num Adults: ", length(a), sep=""))
for(i in cols){ # for each locus
thesealleles = sort(c(genowide[a,i], genowide[a,i+1]))
allalleles = alleles[[match(i,cols)]]
outline = numeric()
for(j in 1:length(allalleles)){
outline = c(outline, sum(thesealleles == allalleles[j]))
}
cat(paste(outline, collapse=","), file=mlnefile, sep=",", append=T)
cat("\n", file= mlnefile, append=T)
}
# For juvs in focal pop
cols = grep("A1.", names(genowide))
a = intersect(which(genowide$AdJuv == "Juv"), sites)
print(paste("Pops ", paste(focal, collapse=","), ": Num Juvs: ", length(a), sep=""))
for(i in cols){ # for each locus
thesealleles = sort(c(genowide[a,i], genowide[a,i+1]))
allalleles = alleles[[match(i,cols)]]
outline = numeric()
for(j in 1:length(allalleles)){
outline = c(outline, sum(thesealleles == allalleles[j]))
}
cat(paste(outline, collapse=","), file=mlnefile, sep=",", append=T)
cat("\n", file= mlnefile, append=T)
}
close(mlnefile)
}
###############################################
## Output for Geneland
geno = read.csv("Aclarkii_genotypes_2009-03-13.csv", row.names=1)
# reshape to wide
dropnames = names(geno)
dropnames = c(dropnames[grep("A[[:digit:]][[:punct:]]", dropnames)], "error", "numgenos")
genowide = reshape(geno, direction="wide", v.names = c("A1consens", "A2consens"), timevar = "Marker", idvar = "Sample", drop=dropnames)
widenames = names(genowide)
widenames = gsub("consens", "", widenames)
names(genowide) = widenames
dim(genowide)
# remove ACH_A7, ACH_B9 and APR_Cf42 because out of HWE, high error or low completeness (3/15/09)
widenames = names(genowide)
i = c(grep("ACH_A7", widenames), grep("ACH_B9", widenames), grep("APR_Cf42", widenames))
genowide = genowide[,-i]
gnlndfile =(paste("Aclarkii_", Sys.Date(), "_Gnlndgen.csv", sep=""))
geofile =(paste("Aclarkii_", Sys.Date(), "_Gnlndgeo.csv", sep=""))
# Write out genetic data
write.table(subset(genowide, select=c(-Sample)), gnlndfile, sep=" ", row.names=F, col.names=F)
# add lat/long info
i = genowide$Sample
locations = read.csv("../../Surveys/Collections2008-11-21.csv")
locations$Sample = paste(locations$Spp, locations$ID, sep="")
genowide = merge(genowide, subset(locations, select=c(Sample,SurveyNum,Size,lat,long)), all.x=T, by="Sample", sort=F)
dim(genowide)
identical(i, genowide$Sample) # must be TRUE: same order of samples
# Write out geographic data in Lambert coordinates
## load packages
require(mapproj)
require(maps)
## check
plot(genowide$long, genowide$lat,type="n",xlab="Lon",ylab="Lat",asp=1)
points(genowide$long, genowide$lat,col=2)
map(resol=0,add=TRUE)
## convert (Lon,Lat) coordinates into Lambert
mapproj.res <- mapproject(x=genowide$long, y=genowide$lat, projection="lambert",
param=c(min(genowide$lat),max(genowide$lat)))
## save planar coordinates as a two-column matrix
coord.lamb <- cbind(mapproj.res$x,mapproj.res$y)
colnames(coord.lamb) = c("X", "Y")
write.table(coord.lamb, geofile, row.names=F, col.names=F)
###############################################
## Output for structure
geno = read.csv("Aclarkii_genotypes_2009-03-13.csv", row.names=1)
# reshape to wide
dropnames = names(geno)
dropnames = c(dropnames[grep("A[[:digit:]][[:punct:]]", dropnames)], "error", "numgenos")
genowide = reshape(geno, direction="wide", v.names = c("A1consens", "A2consens"), timevar = "Marker", idvar = "Sample", drop=dropnames)
widenames = names(genowide)
widenames = gsub("consens", "", widenames)
names(genowide) = widenames
dim(genowide)
# remove ACH_A7, ACH_B9 and APR_Cf42 because out of HWE, high error or low completeness (3/15/09)
widenames = names(genowide)
i = c(grep("ACH_A7", widenames), grep("ACH_B9", widenames), grep("APR_Cf42", widenames))
genowide = genowide[,-i]
# Write to file
structfile =file(paste("Aclarkii_", Sys.Date(), "_struct.csv", sep=""))
open(structfile, "w")
# Header: loci names
i = grep("A1", names(genowide))
j = gsub("A1.", "", names(genowide)[i], fixed=T)
j = paste(j, collapse=" ")
j = paste(" ",j,sep="")
cat(j, file= structfile, sep=" ", append=F)
cat("\n", file= structfile, append=T)
# Data: Sample, cols of genotype data
widenames = names(genowide)
outfile = genowide[,c(grep("Sample",widenames),grep("A[[:digit:]]",widenames))]
write.table(outfile, file= structfile, append=T, quote=F,row.names=F, col.names=F, sep=" ", na="-99")
close(structfile)
###############################################
## Output for Alleles in Space (Miller 2005 J Hered)
geno = read.csv("Aclarkii_genotypes_2009-03-13.csv", row.names=1)
# reshape to wide
dropnames = names(geno)
dropnames = c(dropnames[grep("A[[:digit:]][[:punct:]]", dropnames)], "error", "numgenos")
genowide = reshape(geno, direction="wide", v.names = c("A1consens", "A2consens"), timevar = "Marker", idvar = "Sample", drop=dropnames)
widenames = names(genowide)
widenames = gsub("consens", "", widenames)
names(genowide) = widenames
dim(genowide)
# remove ACH_A7, ACH_B9 and APR_Cf42 because out of HWE, high error or low completeness (3/15/09)
widenames = names(genowide)
i = c(grep("ACH_A7", widenames), grep("ACH_B9", widenames), grep("APR_Cf42", widenames))
genowide = genowide[,-i]
aisfile =(paste("Aclarkii_", Sys.Date(), "_AISgen.csv", sep=""))
geofile =(paste("Aclarkii_", Sys.Date(), "_AISgeo.csv", sep=""))
# Write out genetic data
write.table(13,aisfile, sep="", row.names=F, col.names=F, append=F) # num loci
# collapse locus alleles with \
genowide[is.na(genowide)] = 0
cols = grep("A1.", names(genowide))
out = as.character(genowide$Sample)
out = cbind(out, paste(genowide[,cols[1]], genowide[,(cols[1]+1)], sep="\\"))
for(i in cols[2:length(cols)]){
out = cbind(out, paste(genowide[,i], genowide[,(i+1)], sep="\\"))
}
write.table(out, aisfile, sep=", ", row.names=F, col.names=F, append=T, quote=F)
write.table(";", aisfile, sep=", ", row.names=F, col.names=F, append=T, quote=F)
# add lat/long info
i = genowide$Sample
locations = read.csv("../../Surveys/Collections2008-11-21.csv")
locations$Sample = paste(locations$Spp, locations$ID, sep="")
genowide = merge(genowide, subset(locations, select=c(Sample,SurveyNum,Size,lat,long)), all.x=T, by="Sample", sort=F)
dim(genowide)
identical(i, genowide$Sample) # must be TRUE: same order of samples
# Write out geographic data in Lambert coordinates
## load packages
require(mapproj)
require(maps)
## check
plot(genowide$long, genowide$lat,type="n",xlab="Lon",ylab="Lat",asp=1)
points(genowide$long, genowide$lat,col=2)
map(resol=0,add=TRUE)
## convert (Lon,Lat) coordinates into Lambert
mapproj.res <- mapproject(x=genowide$long, y=genowide$lat, projection="lambert",
param=c(min(genowide$lat),max(genowide$lat)))
## save planar coordinates as a two-column matrix
offset=1 # add an arbitrary offset so that all coords are positive
coord.lamb <- cbind(as.character(genowide$Sample), (mapproj.res$x+offset),(mapproj.res$y+offset))
write.table(coord.lamb, geofile, row.names=F, col.names=F, sep=", ", quote=F, append=F)
write.table(";", geofile, sep=", ", row.names=F, col.names=F, append=T, quote=F)
###############################################
## LDNE: Output juvs and adults separately: TopTwo on anemone >= 8cm vs. lower quartile)
## Uses Genepop format
setwd("/Users/mpinsky/Documents/Stanford/Philippines/2008/Genotyping/Genotypes")
geno = read.csv("Aclarkii_genotypes_2009-03-13.csv", row.names=1)
source("../Analysis/matchall.R")
# reshape to wide
dropnames = names(geno)
dropnames = c(dropnames[grep("A[[:digit:]][[:punct:]]", dropnames)], "error", "numgenos")
genowide = reshape(geno, direction="wide", v.names = c("A1consens", "A2consens"), timevar = "Marker", idvar = "Sample", drop=dropnames)
widenames = names(genowide)
widenames = gsub("consens", "", widenames)
names(genowide) = widenames
dim(genowide)
# remove ACH_A7, ACH_B9 and APR_Cf42 because out of HWE, high error or low completeness (3/15/09)
widenames = names(genowide)
i = c(grep("ACH_A7", widenames), grep("ACH_B9", widenames), grep("APR_Cf42", widenames))
genowide = genowide[,-i]
# add population and lat/long info
surveys = read.csv("../../Surveys/GPSSurveys2009-01-08.csv")
locations = read.csv("../../Surveys/Collections2009-03-26.csv")
locations$Sample = paste(locations$Spp, locations$ID, sep="")
genowide = merge(subset(locations, select=c(Sample,SurveyNum,Size,lat,long, TopTwo)), genowide, all.y=T, by="Sample")
genowide = merge(subset(surveys, select=c(SurveyNum,SiteNum,Name,Region,Municipality)), genowide, all.y=T)
dim(genowide)
quants = quantile(genowide$Size, probs = c(0.25, 0.5, 0.75))
first = genowide$Size <= quants[1]
second = genowide$Size > quants[1] & genowide$Size <= quants[2]
third = genowide$Size > quants[2] & genowide$Size <= quants[3]
fourth = genowide$Size > quants[3]
fourth_inclusive = genowide$Size >= quants[3] # the largest fish
genowide$sizeclass = NA
genowide$sizeclass[first] = 1
genowide$sizeclass[second] = 2
genowide$sizeclass[third] = 3
genowide$sizeclass[fourth] = 4 # the largest fish
# order by sizeclass
i = order(genowide$sizeclass)
genowide = genowide[i,]
genowide$AdJuv = ""
genowide$AdJuv[genowide$TopTwo & genowide$Size >= 8] = "Ad"
genowide$AdJuv[genowide$sizeclass == 1] = "Juv"
# How many breeding adults (TopTwo and > 8cm) and juvs (lowest quartile) in each population group?
groups =c("Ad", "Juv")
pops = sort(unique(genowide$SiteNum))
for(j in 1:length(pops)){
a = which(genowide$SiteNum==pops[j] & genowide$AdJuv == "Ad")
b = which(genowide$SiteNum==pops[j] & genowide$AdJuv == "Juv")
print(paste("Pops ", pops[j], ": Num Adults: ", length(a), " Num Juvs: ", length(b), sep=""))
}
# Write all groups to files
cols = grep("A1.", names(genowide))
loci = unlist(strsplit(names(genowide)[cols], "A1.", fixed=T))[seq(2,26, by=2)]
for(i in 1:2){
# Write to file
ldnename = paste("Aclarkii_", Sys.Date(), "_LDNE", groups[i], ".gen", sep="")
ldnefile = file(ldnename)
open(ldnefile, "w")
# Header
cat(paste("Title line: Aclarkii all pops, ", groups[i], "s only", sep=""), file= ldnefile, sep=",", append=F)
cat("\n", file= ldnefile, append=T)
cat(paste(loci, collapse="\n"), file= ldnefile, append=T)
cat("\n", file= ldnefile, append=T)
for(j in 1:length(pops)){
k = which(genowide$SiteNum==pops[j] & genowide$AdJuv == groups[i])
if(length(k)>0){
cat("Pop", file= ldnefile, sep="", append=T)
cat("\n", file= ldnefile, append=T)
# collapse locus alleles with ""
out = paste(" ", rep(pops[j], length(k)), ",", sep="") # name each indiv according to population name
out = cbind(out, paste(formatC(genowide[k,cols[1]], width=3, flag="0"), formatC(genowide[k,(cols[1]+1)], width=3, flag="0"), sep=""))
for(c in cols[2:length(cols)]){
out = cbind(out, paste(formatC(genowide[k,c], width=3, flag="0"), formatC(genowide[k,(c+1)], width=3, flag="0"), sep=""))
}
out[out==" NA NA"] = "000000"
for(c in 1:dim(out)[1]){
cat(paste(out[c,], collapse=" "), file=ldnefile, append=T)
cat("\n", file= ldnefile, append=T)
}
}
}
close(ldnefile)
}
###############################################
## TMVP: Output juvs and adults from one focal pop: TopTwo on anemone >= 8cm vs. lower quartile)
## Uses Genepop format with two populations. Run through Formatomatic to get to TMVP
setwd("/Users/mpinsky/Documents/Stanford/Philippines/2008/Genotyping/Genotypes")
geno = read.csv("Aclarkii_genotypes_2009-03-13.csv", row.names=1)
source("../Analysis/matchall.R")
# reshape to wide
dropnames = names(geno)
dropnames = c(dropnames[grep("A[[:digit:]][[:punct:]]", dropnames)], "error", "numgenos")
genowide = reshape(geno, direction="wide", v.names = c("A1consens", "A2consens"), timevar = "Marker", idvar = "Sample", drop=dropnames)
widenames = names(genowide)
widenames = gsub("consens", "", widenames)
names(genowide) = widenames
dim(genowide)
# remove ACH_A7, ACH_B9 and APR_Cf42 because out of HWE, high error or low completeness (3/15/09)
widenames = names(genowide)
i = c(grep("ACH_A7", widenames), grep("ACH_B9", widenames), grep("APR_Cf42", widenames))
genowide = genowide[,-i]
# add population and lat/long info
surveys = read.csv("../../Surveys/GPSSurveys2009-01-08.csv")
locations = read.csv("../../Surveys/Collections2009-03-26.csv")
locations$Sample = paste(locations$Spp, locations$ID, sep="")
genowide = merge(subset(locations, select=c(Sample,SurveyNum,Size,lat,long, TopTwo)), genowide, all.y=T, by="Sample")
genowide = merge(subset(surveys, select=c(SurveyNum,SiteNum,Name,Region,Municipality)), genowide, all.y=T)
dim(genowide)
quants = quantile(genowide$Size, probs = c(0.25, 0.5, 0.75))
first = genowide$Size <= quants[1]
second = genowide$Size > quants[1] & genowide$Size <= quants[2]
third = genowide$Size > quants[2] & genowide$Size <= quants[3]
fourth = genowide$Size > quants[3]
fourth_inclusive = genowide$Size >= quants[3] # the largest fish
genowide$sizeclass = NA
genowide$sizeclass[first] = 1
genowide$sizeclass[second] = 2
genowide$sizeclass[third] = 3
genowide$sizeclass[fourth] = 4 # the largest fish
# order by sizeclass
i = order(genowide$sizeclass)
genowide = genowide[i,]
genowide$AdJuv = ""
genowide$AdJuv[genowide$TopTwo & genowide$Size >= 8] = "Ad"
genowide$AdJuv[genowide$sizeclass == 1] = "Juv"
# How many breeding adults (TopTwo and > 8cm) and juvs (lowest quartile) in each population group?
groups =c("Ad", "Juv")
pops = sort(unique(genowide$SiteNum))
for(j in 1:length(pops)){
a = which(genowide$SiteNum==pops[j] & genowide$AdJuv == "Ad")
b = which(genowide$SiteNum==pops[j] & genowide$AdJuv == "Juv")
print(paste("Pops ", pops[j], ": Num Adults: ", length(a), " Num Juvs: ", length(b), sep=""))
}
# Write out focal pop
pop = 25
gens = c(0,1) # generation times of the groups
cols = grep("A1.", names(genowide))
loci = unlist(strsplit(names(genowide)[cols], "A1.", fixed=T))[seq(2,26, by=2)]
# Write to file
name = paste("Aclarkii_", Sys.Date(), "_TVMP", pop, ".gen", sep="")
file = file(name)
open(file, "w")
# Header
cat(paste("Title line: Aclarkii pops, ", pop, " Ads and Juvs", sep=""), file= file, sep=",", append=F)
cat("\n", file= file, append=T)
cat(paste(loci, collapse="\n"), file= file, append=T)
cat("\n", file= file, append=T)
for(i in 1:length(groups)){
k = which(genowide$SiteNum==pop & genowide$AdJuv == groups[i])
if(length(k)>0){
cat("Pop", file= file, sep="", append=T)
cat("\n", file= file, append=T)
# collapse locus alleles with ""
out = paste(" ", rep(gens[i], length(k)), ",", sep="") # name each indiv according to population name
out = cbind(out, paste(formatC(genowide[k,cols[1]], width=3, flag="0"), formatC(genowide[k,(cols[1]+1)], width=3, flag="0"), sep=""))
for(c in cols[2:length(cols)]){
out = cbind(out, paste(formatC(genowide[k,c], width=3, flag="0"), formatC(genowide[k,(c+1)], width=3, flag="0"), sep=""))
}
out[out==" NA NA"] = "000000"
for(c in 1:dim(out)[1]){
cat(paste(out[c,], collapse=" "), file=file, append=T)
cat("\n", file= file, append=T)
}
}
}
close(file)
}
########################
# Output for create and FaMoz
setwd("/Users/mpinsky/Documents/Stanford/Philippines/2008/Genotyping/Genotypes")
geno = read.csv("Aclarkii_genotypes_2009-03-13.csv", row.names=1)
dropnames = names(geno)
dropnames = c(dropnames[grep("A[[:digit:]][[:punct:]]", dropnames)], "error", "numgenos")
genowide = reshape(geno, direction="wide", v.names = c("A1consens", "A2consens"), timevar = "Marker", idvar = "Sample", drop=dropnames)
widenames = names(genowide)
widenames = gsub("consens", "", widenames)
names(genowide) = widenames
dim(genowide)
# add population info
surveys = read.csv("../../Surveys/GPSSurveys2009-01-08.csv")
locations = read.csv("../../Surveys/Collections2009-03-26.csv")
locations$Sample = paste(locations$Spp, locations$ID, sep="")
genowide = merge(subset(locations, select=c(Sample,SurveyNum,Size,lat,long, TopTwo)), genowide, all.y=T, by="Sample")
genowide = merge(subset(surveys, select=c(SurveyNum,SiteNum,Name,Region,Municipality)), genowide, all.y=T)
dim(genowide)
# remove ACH_A7, ACH_B9 and APR_Cf42 because out of HWE, high error or low completeness (3/15/09)
widenames = names(genowide)
i = c(grep("ACH_A7", widenames), grep("ACH_B9", widenames), grep("APR_Cf42", widenames))
genowide = genowide[,-i]
# add Ad/juv info: Adults are top two on anemone if >=8cm, juvs are everything else
genowide$AdJuv = 0
i = genowide$TopTwo & genowide$Size >= 8
genowide$AdJuv[i] = 0 # Adults
genowide$AdJuv[!i] = 1 # Juvs
# remove pops 18 and 7?
genowide = genowide[genowide$SiteNum != 7 & genowide$SiteNum != 18,]
# Write to file
# file = file(paste("Aclarkii_create_", Sys.Date(), ".csv", sep=""))
file = file(paste("Aclarkii_create_", Sys.Date(), "no18or7.csv", sep=""))
open(file, "w")
i = grep("A1", names(genowide))
j = names(genowide)[i[1]:(i[length(i)]+1)]
j = paste(j, collapse=",")
j = gsub(".", "_", j, fixed=T)
outline = paste("Pop,Indiv,Cohort,",j,sep="")
cat(outline, file=file, append=T)
cat("\n", file=file, append=T)
# Data: Sample, Pop, cols of genotype data, blank col, Lat, Long
genowide$blank = ""
widenames = names(genowide)
outfile = genowide[,c(grep("SiteNum",widenames),grep("Sample",widenames),grep("AdJuv", widenames), grep("A[[:digit:]]",widenames))]
write.table(outfile, file=file, append=T, quote=F,row.names=F, col.names=F, sep=",", na="0")
close(file)
|
96c9d44a04895de43dcd91c17a8b6332a9f496ea | f3bc7e85e05d77ae3ac0a3dd5138a6e005b80efc | /tests/testthat.R | 479810d4aa811382a87ce48b375e498f02d8a90c | [] | no_license | cran/boxoffice | 304c9232089ff5787efc4efce203ac0c83b53264 | f3e4eb63fa8bb56b1d4947857cd25f6961c10e62 | refs/heads/master | 2020-12-25T14:38:43.552547 | 2019-10-26T03:30:02 | 2019-10-26T03:30:02 | 66,137,002 | 1 | 1 | null | null | null | null | UTF-8 | R | false | false | 66 | r | testthat.R | library(testthat)
library(boxoffice)
test_check("boxoffice")
|
33096fdf07b768bb1d408d94e2fa3dda118d00ff | ba0761362e4bc842e41f5d1404fefca97a712abd | /tests/testthat/test-backend-snowflake.R | e73c7e6504500c1c918ea27cf265d6e58ad7ac88 | [
"MIT"
] | permissive | ndiquattro/flaky | 14908a9ab6970e49dadfc7802004e9cf9596f503 | 56683ea32378eb83035c134724732ed59b5c7e4a | refs/heads/master | 2021-01-07T20:36:10.213915 | 2020-11-17T05:21:59 | 2020-11-17T05:21:59 | 241,814,281 | 7 | 1 | NOASSERTION | 2020-11-17T05:22:00 | 2020-02-20T06:52:21 | R | UTF-8 | R | false | false | 779 | r | test-backend-snowflake.R | test_that("custom stringr functions translated correctly", {
trans <- function(x) {
dbplyr::translate_sql(!!rlang::enquo(x), con = dbplyr::simulate_dbi("Snowflake"))
}
expect_equal(trans(str_detect(x, y)), dbplyr::sql("CONTAINS(`x`, `y`)"))
expect_equal(trans(str_detect(x, y, negate = TRUE)), dbplyr::sql("NOT(CONTAINS(`x`, `y`))"))
expect_equal(trans(str_remove_all(x, y)), dbplyr::sql("REGEXP_REPLACE(`x`, `y`)"))
expect_equal(trans(str_starts(x, y)), dbplyr::sql("STARTSWITH(`x`, `y`)"))
expect_equal(trans(str_starts(x, y, negate = TRUE)), dbplyr::sql("NOT(STARTSWITH(`x`, `y`))"))
expect_equal(trans(str_ends(x, y)), dbplyr::sql("ENDSWITH(`x`, `y`)"))
expect_equal(trans(str_ends(x, y, negate = TRUE)), dbplyr::sql("NOT(ENDSWITH(`x`, `y`))"))
})
|
568011399bff84375b8c90e0da581a85d7adf719 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/sensitivity/examples/sobolTIIlo.Rd.R | f5c417487d5de7de5c428c199b77688fb9589448 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 683 | r | sobolTIIlo.Rd.R | library(sensitivity)
### Name: sobolTIIlo
### Title: Liu and Owen Estimation of Total Interaction Indices
### Aliases: sobolTIIlo tell.sobolTIIlo print.sobolTIIlo plot.sobolTIIlo
### plotFG.sobolTIIlo
### Keywords: design
### ** Examples
# Test case : the Ishigami function
# The method requires 2 samples
n <- 1000
X1 <- data.frame(matrix(runif(3 * n, -pi, pi), nrow = n))
X2 <- data.frame(matrix(runif(3 * n, -pi, pi), nrow = n))
# sensitivity analysis (the true values of the scaled TIIs are 0, 0.244, 0)
x <- sobolTIIlo(model = ishigami.fun, X1 = X1, X2 = X2)
print(x)
# plot of tiis and FANOVA graph
plot(x)
## No test:
library(igraph)
plotFG(x)
## End(No test)
|
27ef29558a0e52f38cbd131aaa98cc644518a997 | 83ae362a3e99619cfb980a35d9cabb26400756e7 | /list_matrix_06_10.R | dbedd8797f40ca7e50f3d47243091db3aa38e42d | [] | no_license | rlatjsrb/R_basic | 8e9456692f1e4898a6441fa378ce6012f920e6b5 | c85140356a411c6f27720b1d724c235f16ef396f | refs/heads/main | 2023-05-28T05:48:03.515075 | 2021-06-14T08:33:42 | 2021-06-14T08:33:42 | 375,417,135 | 0 | 0 | null | null | null | null | UHC | R | false | false | 3,436 | r | list_matrix_06_10.R |
R version 4.0.3 (2020-10-10) -- "Bunny-Wunnies Freak Out"
Copyright (C) 2020 The R Foundation for Statistical Computing
Platform: x86_64-w64-mingw32/x64 (64-bit)
R은 자유 소프트웨어이며, 어떠한 형태의 보증없이 배포됩니다.
또한, 일정한 조건하에서 이것을 재배포 할 수 있습니다.
배포와 관련된 상세한 내용은 'license()' 또는 'licence()'을 통하여 확인할 수 있습니다.
R은 많은 기여자들이 참여하는 공동프로젝트입니다.
'contributors()'라고 입력하시면 이에 대한 더 많은 정보를 확인하실 수 있습니다.
그리고, R 또는 R 패키지들을 출판물에 인용하는 방법에 대해서는 'citation()'을 통해 확인하시길 부탁드립니다.
'demo()'를 입력하신다면 몇가지 데모를 보실 수 있으며, 'help()'를 입력하시면 온라인 도움말을 이용하실 수 있습니다.
또한, 'help.start()'의 입력을 통하여 HTML 브라우저에 의한 도움말을 사용하실수 있습니다
R의 종료를 원하시면 'q()'을 입력해주세요.
[이전에 저장한 작업공간을 복구하였습니다]
> my1<-list("kim", "student", "korea", 1.2)
> my1
[[1]]
[1] "kim"
[[2]]
[1] "student"
[[3]]
[1] "korea"
[[4]]
[1] 1.2
> myfavorite<-list(friend='Lee', mynumber=7, myalpha='z')
> myfavorite
$friend
[1] "Lee"
$mynumber
[1] 7
$myalpha
[1] "z"
> myfavorite$myalpha
[1] "z"
> myfavorite$mynum-2
[1] 5
> myfavorite$mysong<-'hello'
> myfavorite
$friend
[1] "Lee"
$mynumber
[1] 7
$myalpha
[1] "z"
$mysong
[1] "hello"
> myfavorite$myfood<-c('chocolate', 'candy', 'cake', 'chicken', 'cola')
> myfavorite
$friend
[1] "Lee"
$mynumber
[1] 7
$myalpha
[1] "z"
$mysong
[1] "hello"
$myfood
[1] "chocolate" "candy" "cake" "chicken" "cola"
> age<-matrix(c(25,33,32,37,27,38),nrow = 2, ncol = 3)
> age
[,1] [,2] [,3]
[1,] 25 32 27
[2,] 33 37 38
> age<-matrix(c(25,33,32,37,27,38),ncol = 3, byrow = TRUE)
> age
[,1] [,2] [,3]
[1,] 25 33 32
[2,] 37 27 38
> info<-matrix(c('177cm','68kg','156cm','57kg','160cm','48kg','175cm','60kg'),ncol=2,byrow=TRUE)
> info
[,1] [,2]
[1,] "177cm" "68kg"
[2,] "156cm" "57kg"
[3,] "160cm" "48kg"
[4,] "175cm" "60kg"
> dimnames(info)<-list(c('1.','2.',3.','4.'),c('height','weight'))
에러: 예상하지 못한 문자열 상수(string constant)입니다. in "dimnames(info)<-list(c('1.','2.',3.','"
> dimnames(info)<-list(c('1.','2.','3.','4.'),c('height','weight'))
> info
height weight
1. "177cm" "68kg"
2. "156cm" "57kg"
3. "160cm" "48kg"
4. "175cm" "60kg"
> cbind(info,c('M','M','F','F'))
height weight
1. "177cm" "68kg" "M"
2. "156cm" "57kg" "M"
3. "160cm" "48kg" "F"
4. "175cm" "60kg" "F"
> info
height weight
1. "177cm" "68kg"
2. "156cm" "57kg"
3. "160cm" "48kg"
4. "175cm" "60kg"
> info<-cbind(info,c('M','M','F','F'))
> info
height weight
1. "177cm" "68kg" "M"
2. "156cm" "57kg" "M"
3. "160cm" "48kg" "F"
4. "175cm" "60kg" "F"
> a1<-c('180cm','70kg','M')
> a2<-c('185cm','68kg','M')
> rbind(info,a1,a2)
height weight
1. "177cm" "68kg" "M"
2. "156cm" "57kg" "M"
3. "160cm" "48kg" "F"
4. "175cm" "60kg" "F"
a1 "180cm" "70kg" "M"
a2 "185cm" "68kg" "M"
> save.image("C:\\Users\\kskyu\\Desktop\\Rscript\\list_matrix_06_10")
>
|
1940e283ec10027ad2c12bd07dc4e51e44a8dab5 | 6ce1b9f3806a01331d1f0f536b85df764a414564 | /tests/testthat.R | e3798af1a5d7e1216e9c137c9cd2c659a975a17d | [
"CC0-1.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | ndiquattro/flophouse | 77caaa040176b82d08449b0dab64c7dfe6a478a0 | 235d9f74f0f18461b113471a7f21681cba650bef | refs/heads/master | 2021-08-28T20:37:50.764819 | 2017-12-13T04:56:43 | 2017-12-13T04:56:43 | 113,827,726 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 62 | r | testthat.R | library(testthat)
library(flophouse)
test_check("flophouse")
|
a6ad9a79891507ae6bf7f03c9d44554f1963745a | 64c71251ad5df390e5954c3919d7c1fc10a443bb | /3.Figures.R | b3fe8d0439e87921dc202549362fbc05014e3e34 | [] | no_license | robwschlegel/Trend_Analysis | 767631a11f88b2610f43a4e3a3a1f2f417422b53 | e84da681b67e880d374e9ced12965207cd7f39ea | refs/heads/master | 2021-06-11T13:05:51.885285 | 2017-01-29T12:57:44 | 2017-01-29T12:57:44 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 23,648 | r | 3.Figures.R | ###########################################################################
### "figures.R"
## This script shows the code used to generate the figures for the paper
# 1. Load all packages required for the analyses
# 2. Load all functions etc. found in other scripts
# 3. Load the site list and spatial data used for the map
# 4. Create the map of Africa to be windowed
# 5. Create the map of southern Africa
# 6. Add site list information
# 7. Create figure01
# 8. Load analysis results
# 9.
#############################################################################
#############################################################################
### 1. Load all packages required for the analyses
library(ggplot2)
library(plyr)
library(dplyr)
library(reshape2)
library(tidyr)
library(tibble)
library(doMC); doMC::registerDoMC(cores = 4)
#############################################################################
### 2. Load all functions etc. found in other scripts
source("func/scaleBarFunc.R") # A custom ggplot function that creates a very snazy scale bar
source("setupParams/themes.R") # The ggplot theme used for all figures
#############################################################################
### 3. Load the site list and spatial data used for the map
## The site data
load("data/SACTN_sub2.Rdata")
## The site list
sites <- read.csv("setupParams/site_list_v4.0.csv")
sites$index <- as.factor(paste(sites$site, sites$src, sep = "/ "))
# Subset to the 84 time series used
sites_sub <- sites[sites$index %in% levels(SACTN_sub2$index),]
## Coastline of African Continent
load("graph/africa_coast.RData")
## Borders of African countries
load("graph/africa_borders.Rdata")
## Coastline of Southern Africa
load("graph/south_africa_coast.RData")
## Province borders
load("graph/sa_provinces_new.RData")
# Reduce prvonice border resolution
# sa_provinces_new$index <- 1:12 # Reduce it by 92%
# sa_provinces_new <- droplevels(subset(sa_provinces_new, index == 1))
##########################################################################
### 4. Create the map of Africa to be windowed
## The map + SA filled in
africa <- ggplot(africa_coast, aes(x = lon, y = lat)) + # Select the main dataset with which to graph
theme_bw() + # Set the theme to black and white
coord_equal() + # Forces lon/ lat to be equitably displayed so that the map isn't squished
geom_polygon(aes(group = group), colour = "black", fill = "grey80") + # Draw the coast
geom_polygon(data = sa_provinces_new, (aes(group = group))) +
annotate("text", label = "Africa", x = 16.0, y = 15.0, size = 3) + # Change Africa label size and position
theme(panel.border = element_rect(colour = "black", size = 0.4), # Creates border
plot.background = element_blank(), # Makes background transparent
axis.ticks = element_blank(), # Remove tick marks
axis.text = element_blank(), # Remove lat/ lon numbers
axis.title = element_blank(), # Remove lat/ lon labels
panel.grid.major = element_blank(), # Removes major grid lines
panel.grid.minor = element_blank()) +# Removes minor grid lines
coord_map(xlim = c(-20, 53), ylim = c(-36, 38), projection = "mercator") # Constricts view to Africa
africa
##########################################################################
### The map of southern Africa
## Create the base figure
SA <- ggplot() + coord_equal() + theme_bw() +
# Landmass
geom_polygon(data = south_africa_coast, aes(x = lon, y = lat, group = group),
colour = NA, fill = "grey80") +
# International borders
geom_path(data = africa_borders, aes(x = lon, y = lat, group = group),
size = 1.0, colour = "black") +
# Thick coastal border
geom_polygon(data = south_africa_coast, aes(x = lon, y = lat, group = group),
size = 1.0, colour = "black", fill = NA) +
# Scale bar
scaleBar(lon = 29, lat = -35.8, distanceLon = 200, distanceLat = 20, distanceLegend = 40, dist.unit = "km",
arrow.length = 90, arrow.distance = 60, arrow.North.size = 5) +
# Map plotting limits
coord_cartesian(xlim = c(14.5, 33.5), ylim = c(-27, -36)) +
theme(axis.title = element_blank()) # Remove lat/ lon labels)
SA
##########################################################################
### # 6. Add site list information
map <- SA +
# Oceans
annotate("text", label = "Indian\nOcean", x = 32.60, y = -32.9, size = 5.0, angle = 0) +
annotate("text", label = "Atlantic\nOcean", x = 15.50, y = -32.9, size = 5.0, angle = 0) +
# Benguela
geom_segment(aes(x = 17.2, y = -32.6, xend = 15.2, yend = -29.5),
arrow = arrow(length = unit(0.4, "cm")), size = 1.0, colour = "grey50") +
annotate("text", label = "Benguela", x = 16.1, y = -31.5, size = 4.0, angle = 297) +
# Agulhas
geom_segment(aes(x = 33, y = -29.5, xend = 29.8, yend = -33.0),
arrow = arrow(length = unit(0.4, "cm")), size = 1.0, colour = "grey50") +
annotate("text", label = "Agulhas", x = 31.6, y = -31.6, size = 4.0, angle = 53) +
# Landmass
annotate("text", label = "South\nAfrica", x = 24.00, y = -31.00, size = 8, angle = 0) +
# The unused stations
geom_point(data = sites, aes(lon, lat), colour = "black", size = 4.0, alpha = 0.3) +
geom_point(data = sites, aes(lon, lat), colour = "white", size = 2.5, alpha = 0.3) +
# The used stations
geom_point(data = sites_sub, aes(lon, lat), colour = "black", size = 4.0) +
geom_point(data = sites_sub, aes(lon, lat), colour = "white", size = 2.5) +
# scale_colour_grey(breaks = c("new", "old", "thermo"),
# label = c("new", "old", "thermo")) +
# guides(shape = guide_legend("Type", override.aes = list(size = 2.5, colour = "black"))) +
labs(title = NULL, x = NULL, y = NULL) #+
# theme(legend.key = element_blank())
map
##########################################################################
### 7. Create figure01
## Combine the two maps to create one inset map
pdf("graph/SA_sites.pdf", width = 8, height = 5, pointsize = 6) # Set PDF dimensions
vp1 <- viewport(x = -0.00, y = 0.05, w = 0.25, h = 0.25, just = c("left", "bottom")) # Africa
vp2 <- viewport(x = 1.0, y = 1.0, w = 1.00, h = 1.00, just = c("right", "top")) # South Africa
print(map, vp = vp2)
print(africa, vp = vp1)
dev.off()
##########################################################################
### 8. Load analysis results
load(file = "data/gls_fitted_full_nointerp_natural.RData") # for non-interpolated, full, natural
gls_nat <- as_tibble(gls_df)
glimpse(gls_nat)
rm(gls_df)
# the interpolated, grown data need to be replaced with non-interpolated, grown data...
load(file = "data/gls_fitted_full_nointerp_grown.RData") # for interpolated, full, grown
gls_gro <- as_tibble(gls_df)
glimpse(gls_gro)
rm(gls_df)
load("data/SACTN_flat_interp.Rdata")
SACTN_flat <- as_tibble(SACTN_flat)
SACTN_flat
# boxplots ----------------------------------------------------------------
SACTN_flat %>% # remains unchanged
ggplot(aes(x = index, y = temp, group = index)) +
geom_boxplot(size = 0.3, outlier.size = 0.5, show.legend = FALSE,
outlier.shape = 21, notch = TRUE, fill = "grey80", varwidth = TRUE) +
geom_hline(yintercept = 0, size = 0.8, col = "black", linetype = "dashed") +
scale_x_discrete(name = "Time series no.", labels = 1:length(levels(SACTN_flat$index))) +
scale_y_continuous(name = expression(paste("Detrended temperature anomaly (", degree, "C)"))) +
theme(axis.text.x = element_text(angle = 90, vjust = 0.5, size = 8),
axis.title = element_text(size = 12))
ggsave("graph/all_plt1.pdf", plot = last_plot(), width = 8.0, height = 3.25, units = "in")
# data prep for correlation -----------------------------------------------
dat_w <- gls_nat %>%
unite(fac, site, src, remove = TRUE) %>%
select(fac, DT, length, DT_model, prec)
x1 <- filter(dat_w, prec == "prec0001")
x2 <- filter(dat_w, prec == "prec001")
x3 <- filter(dat_w, prec == "prec01")
x4 <- filter(dat_w, prec == "prec05")
length(x1) == length(x2) # they are all of the same length; proceed...
# t-tests and correlations ------------------------------------------------
t.test(x1$DT_model, x2$DT_model, paired = TRUE) # different?!
t.test(x1$DT_model, x3$DT_model, paired = TRUE) # not different!!
t.test(x1$DT_model, x4$DT_model, paired = TRUE) # different!
cor1 <- cor.test(x = x1$DT_model, y = x2$DT_model)
cor2 <- cor.test(x = x1$DT_model, y = x4$DT_model)
# correlation plots -------------------------------------------------------
pdf(file = "graph/correlations_new.pdf", width = 6, height = 3)
par(mfrow=c(1, 2))
plot(x1$DT_model, x2$DT_model, pch = ".", col = "black", type = "p",
xlab = "Precision: 0.001", ylab = "Precision: 0.01")
plot(x1$DT_model, x4$DT_model, pch = ".", col = "black", type = "p",
xlab = "Precision: 0.001", ylab = "Precision: 0.5")
par(mfrow=c(1, 1))
dev.off()
# RMSE --------------------------------------------------------------------
# where vec1 is the reference; vec1 and vec2 of equal length
rmse <- function(vec1, vec2) {
sqrt(mean((vec1 - vec2)^2))
}
# assume pred0001 is best, i.e. reference, then...
rmse(x1$DT_model, x2$DT_model) # smaller is better
rmse(x1$DT_model, x3$DT_model)
rmse(x1$DT_model, x4$DT_model)
# data prep for plotting --------------------------------------------------
dat <- gls_nat %>%
filter(prec == "prec001") %>%
select(site, src, DT, DT_model, se_trend, sd_initial, sd_residual,
p_trend, length) %>%
unite(fac, site, src, remove = FALSE)
dat$DT[dat$DT == "DT000"] <- 0
dat$DT[dat$DT == "DT005"] <- 0.05
dat$DT[dat$DT == "DT010"] <- 0.10
dat$DT[dat$DT == "DT015"] <- 0.15
dat$DT[dat$DT == "DT020"] <- 0.20
dat$DT <- as.numeric(dat$DT)
dat_gro <- gls_gro %>%
select(site, src, DT, DT_model, se_trend, sd_initial, sd_residual,
p_trend, length, year_index) %>%
unite(fac, site, src, remove = FALSE)
dat_gro$DT[dat_gro$DT == "DT000"] <- 0
dat_gro$DT[dat_gro$DT == "DT005"] <- 0.05
dat_gro$DT[dat_gro$DT == "DT010"] <- 0.10
dat_gro$DT[dat_gro$DT == "DT015"] <- 0.15
dat_gro$DT[dat_gro$DT == "DT020"] <- 0.20
dat_gro$DT <- as.numeric(dat_gro$DT)
# other questions ---------------------------------------------------------
# the relationship between precision and regression (slope) SE?
# the relationship between sd_initial and regression (slope) SE?
dat %>%
ggplot(aes(x = DT, y = DT_model, group = as.factor(DT))) +
geom_jitter(aes(col = sd_initial), show.legend = TRUE, width = 0.015, shape = 1) +
geom_boxplot(fill = "grey20", alpha = 0.35, outlier.colour = NA, size = 0.3) +
scale_x_continuous(name = expression(paste("Actual trend (", degree, "C/dec)"))) +
scale_y_continuous(name = expression(paste("Model trend (", degree, "C/dec)")),
breaks = c(-0.2, 0, 0.05, 0.1, 0.15, 0.2, 0.4)) +
scale_colour_distiller(name = "Initial SD", palette = "Spectral") +
theme(axis.text.x = element_text(angle = 90, vjust = 0.5))
ggsave("graph/all_plt0_no_interp_natural.pdf", plot = last_plot(), width = 7, height = 4.5, units = "in")
bins <- cut(dat$length, breaks = seq(from = min(dat$length) - 1, to = max(dat$length), length.out = 10),
right = FALSE, include.lowest = TRUE)
dat %>%
ggplot(aes(x = DT, y = DT_model, group = as.factor(DT))) +
geom_boxplot(fill = "white", outlier.colour = NA, size = 0.3) +
geom_jitter(aes(size = length), show.legend = TRUE, width = 0.025, shape = 1) +
scale_x_continuous(name = expression(paste("Actual trend (", degree, "C/dec)"))) +
scale_y_continuous(name = expression(paste("Model trend (", degree, "C/dec)")),
breaks = c(-0.2, 0, 0.05, 0.1, 0.15, 0.2, 0.4)) +
# scale_colour_grey(name = "Time series length (months)", start = 1, end = 0.1, na.value = "red") +
scale_size_area(name = "Time series length (months)", max_size = 3) +
theme(axis.text.x = element_text(angle = 90, vjust = 0.5),
axis.title = element_text(size = 14),
legend.title = element_text(size = 10))
ggsave("graph/all_plt1_no_interp_natural.pdf", plot = last_plot(), width = 7, height = 4.5, units = "in")
# plotting modelled trend vs. length (natural, no-interp) -----------------
dat %>%
ggplot(aes(x = length, y = DT_model)) +
geom_line(col = "black", show.legend = TRUE) +
scale_x_continuous(name = "Time series length (months)") +
scale_y_continuous(name = expression(paste("Model trend (", degree, "C)")),
limits = c(-0.5, 0.5)) +
facet_wrap("DT", ncol = 5) +
theme(axis.text.x = element_text(angle = 90, vjust = 0.5))
ggsave("graph/all_plt2_no_interp_natural.pdf", plot = last_plot(), width = 8, height = 2, units = "in")
# plotting modelled trend vs. length (grown, no-interp) -------------------
dat_gro %>%
ggplot(aes(x = year_index, y = DT_model, group = fac)) +
geom_line(aes(col = se_trend), show.legend = TRUE, alpha = 0.85, size = 0.3) +
scale_x_continuous(name = "Time series length (years)") +
scale_y_continuous(name = expression(paste("Model trend (", degree, "C)")),
limits = c(-2, 2)) +
scale_colour_distiller(name = "SE of trend", palette = "Greys") +
facet_wrap("DT", ncol = 5) +
theme(axis.text.x = element_text(angle = 90, vjust = 0.5),
legend.position = "right",
legend.direction = "vertical",
axis.title = element_text(size = 14),
legend.title = element_text(size = 10))
ggsave("graph/all_plt2_no_interp_gro.pdf", plot = last_plot(), width = 8, height = 2.0, units = "in")
# plotting p-value vs. SD (initial) (natural, no-interp) ------------------
dat %>%
ggplot(aes(x = sd_initial, y = p_trend)) +
geom_hline(yintercept = 0.05, col = "red") +
geom_point(aes(size = length), col = "black", shape = 21, stroke = 0.2) +
scale_y_continuous(name = "p-value", limits = c(0, 1)) +
scale_x_continuous(name = expression(paste("Initial SD (", degree, "C)"))) +
scale_size_continuous(name = "Time series length (months)") +
facet_wrap("DT", ncol = 1)
ggsave("graph/all_plt4_no_interp_natural.pdf", plot = last_plot(), width = 5, height = 7,
units = "in")
## NB: "graph/all_plt4_no_interp_natural_coast.pdf" created in "5.Text.R"
# plotting p-value vs. SD (initial) (grown, no-interp) --------------------
dat_gro %>%
ggplot(aes(x = sd_initial, y = p_trend)) +
geom_hline(yintercept = 0.05, col = "red") +
geom_point(aes(size = length), col = "black", shape = 21, stroke = 0.2) +
scale_y_continuous(name = "p-value", limits = c(0, 1)) +
scale_x_continuous(name = expression(paste("Initial SD (", degree, "C)"))) +
scale_size_continuous(name = "Length (months)") +
facet_wrap("DT", ncol = 1)
ggsave("graph/all_plt4_no_interp_gro.pdf", plot = last_plot(), width = 5, height = 7,
units = "in")
# plotting DT/DT_modeled vs. length (natural, no-interp) ------------------
dat %>%
ggplot(aes(x = length, y = abs(DT/DT_model))) +
geom_point(aes(size = se_trend/20, alpha = ((1/se_trend) * 2)), col = "black",
shape = 21, show.legend = TRUE) +
scale_x_continuous(name = "Time series length (months)") +
scale_y_continuous(name = "Actual trend / Model trend", limits = c(-0.1, 1)) +
scale_alpha_continuous(guide = FALSE) +
scale_size_continuous(name = "SE of trend") +
facet_wrap("DT", ncol = 5)
ggsave("graph/all_plt6_no_interp_natural.pdf", plot = last_plot(), width = 8, height = 2.45, units = "in")
# plotting DT/DT_modeled vs. length (grown, no-interp) --------------------
dat_gro %>%
ggplot(aes(x = year_index, y = abs(DT/DT_model))) +
geom_point(aes(size = se_trend/20, alpha = ((1/se_trend) * 2)), col = "black",
shape = 21, show.legend = TRUE) +
scale_x_continuous(name = "Time series length (months)") +
scale_y_continuous(name = "Actual trend / Model trend", limits = c(-0.1, 1)) +
scale_alpha_continuous(guide = FALSE) +
scale_size_continuous(name = "SE of trend") +
facet_wrap("DT", ncol = 5)
ggsave("graph/all_plt6_no_interp_gro.pdf", plot = last_plot(), width = 8, height = 2.45, units = "in")
# plotting ts length vs. se_trend (natural, no-interp) --------------------
dat %>%
ggplot(aes(x = length, y = se_trend)) +
geom_line(col = "black", show.legend = TRUE) +
scale_x_continuous(name = "Time series length (months)") +
scale_y_continuous(name = "SE of trend") +
facet_wrap("DT", ncol = 5) +
theme(axis.text.x = element_text(angle = 90, vjust = 0.5))
ggsave("graph/all_plt7_no_interp_natural.pdf", plot = last_plot(), width = 8, height = 2, units = "in")
# plotting ts length vs. se_trend (grown, no-interp) ----------------------
dat_gro_DT020 <- dat_gro %>%
filter(DT == "DT020")
dat_gro %>%
filter(DT == 0.20) %>%
ggplot(aes(x = year_index, y = se_trend, group = fac)) +
geom_line(aes(col = sd_initial), alpha = 0.7, show.legend = TRUE, size = 0.5) +
scale_x_continuous(name = "Time series length (years)") +
scale_y_continuous(name = "SE of trend") +
scale_colour_distiller(name = expression(paste("Initial SD (", degree, "C)")),
direction = 1, palette = "Greys") +
# facet_wrap("DT", ncol = 1) +
theme(axis.text.x = element_text(angle = 90, vjust = 0.5))
ggsave("graph/all_plt7_no_interp_grown.pdf", plot = last_plot(), width = 4, height = 2, units = "in")
# The effect of interpolation on the data ---------------------------------
## Load the data that has percent NA and type columns to add them to the GLS data frames
load("data/SACTN_sub2.Rdata")
colnames(SACTN_sub2)[8] <- "na_perc"
SACTN_sub2 <- as.data.frame(SACTN_sub2)
## Load the modelled data and add the index and type columns
# The interpolated data
load("data/gls_fitted_full_interp_grown.RData")
gls_df_interp <- gls_df
gls_df_interp$index <- as.factor(paste(gls_df_interp$site, gls_df_interp$src, sep = "/ "))
length(gls_df_interp$site[gls_df_interp$p_trend <= 0.05])
# The non-interpolated data
load("data/gls_fitted_full_nointerp_grown.RData")
gls_df_non <- gls_df; rm(gls_df)
length(gls_df_non$site[gls_df_non$p_trend <= 0.05])
test <- gls_df_non[gls_df_non$p_trend <= 0.05,]
## Subset gls_fitted_full_interp_grown results by max(year_index) and Prec0.001 # Or just use a "natural" data frame
# The interpolated data
gls_df_interp <- gls_df_interp %>%
group_by(site, src) %>%
filter(prec == "prec0001") %>%
filter(year_index == max(year_index))
# The non-interpolated data
gls_df_non <- gls_df_non %>%
group_by(site, src) %>%
filter(prec == "prec0001") %>%
filter(year_index == max(year_index))
## Add NA% from data_summary2
# The interpolated data
gls_df_interp <- gls_df_interp %>%
group_by(index) %>%
mutate(interp_perc = SACTN_sub2$na_perc[SACTN_sub2$index == index][1])
gls_df_interp <- data.frame(gls_df_interp)
# The non-interpolated data
gls_df_non <- gls_df_non %>%
group_by(index) %>%
mutate(na_perc = SACTN_sub2$na_perc[SACTN_sub2$index == index][1])
gls_df_non <- data.frame(gls_df_non)
## "Grow" the limit of NA/ Interp used on the data
# Set limits for missing data
miss_limit <- c(1, 2.5, 5, 7.5, 10, 12.5, 15)
# Grow the interpolated data
gls_df_interp_grow <- data.frame()
for(i in 1:length(miss_limit)){
data1 <- data.frame(gls_df_interp[gls_df_interp$interp_perc <= miss_limit[i],])
data1$miss_limit <- miss_limit[i]
gls_df_interp_grow <- rbind(gls_df_interp_grow, data1)
}; rm(data1)
# Grow the non-interpolated data
gls_df_non_grow <- data.frame()
for(i in 1:length(miss_limit)) {
data1 <- data.frame(gls_df_non[gls_df_non$na_perc <= miss_limit[i],])
data1$miss_limit <- miss_limit[i]
gls_df_non_grow <- rbind(gls_df_non_grow, data1)
}; rm(data1)
## Correct the "DT" column for plotting
# The interpolated data
gls_df_interp_grow$DT[gls_df_interp_grow$DT == "DT000"] <- 0
gls_df_interp_grow$DT[gls_df_interp_grow$DT == "DT005"] <- 0.05
gls_df_interp_grow$DT[gls_df_interp_grow$DT == "DT010"] <- 0.10
gls_df_interp_grow$DT[gls_df_interp_grow$DT == "DT015"] <- 0.15
gls_df_interp_grow$DT[gls_df_interp_grow$DT == "DT020"] <- 0.20
gls_df_interp_grow$DT <- as.numeric(gls_df_interp_grow$DT)
# The non-interpolated data
gls_df_non_grow$DT[gls_df_non_grow$DT == "DT000"] <- 0
gls_df_non_grow$DT[gls_df_non_grow$DT == "DT005"] <- 0.05
gls_df_non_grow$DT[gls_df_non_grow$DT == "DT010"] <- 0.10
gls_df_non_grow$DT[gls_df_non_grow$DT == "DT015"] <- 0.15
gls_df_non_grow$DT[gls_df_non_grow$DT == "DT020"] <- 0.20
gls_df_non_grow$DT <- as.numeric(gls_df_non_grow$DT)
## Graph the relationship between p_trend and missing values
# The interpolated data
gls_df_interp_grow %>%
filter(miss_limit != 7.5) %>%
ggplot(aes(x = log(interp_perc), y = p_trend)) +
geom_point(aes(shape = as.factor(DT)), col = "black", stroke = 0.3) +
geom_smooth(aes(fill = as.factor(DT)), method = "lm", size = 0.3, col = "black") +
# geom_smooth(aes(colour = as.factor(DT)), method = "glm", method.args = list(family = "poisson")) +
scale_x_continuous(name = "Log % NA Interpolated") +
scale_y_continuous(name = "p-value") +
scale_fill_grey(name = expression(paste("Trend (", degree, "C/dec)")), start = 0.1, end = 0.9) +
scale_shape_discrete(name = expression(paste("Trend (", degree, "C/dec)"))) +
facet_wrap(~miss_limit, scales = "free_x")
ggsave("graph/interp_NA_perc.pdf", height = 6, width = 10)
# The non-interpolated data
library(fitdistrplus)
descdist(gls_df_non_grow$na_perc, boot = 500, graph = TRUE)
plot(hist(gls_df_non_grow$na_perc))
plot(hist(log(gls_df_non_grow$na_perc)))
gls_df_non_grow %>%
filter(miss_limit != 7.5) %>%
ggplot(aes(x = log(na_perc), y = p_trend)) +
geom_point(aes(shape = as.factor(DT)), col = "black", stroke = 0.3) +
geom_smooth(aes(fill = as.factor(DT)), method = "lm", size = 0.3, col = "black") +
# geom_smooth(aes(colour = as.factor(DT)), method = "glm", method.args = list(family = "poisson")) +
scale_x_continuous(name = "Log % NA") +
scale_y_continuous(name = "p-value") +
scale_fill_discrete(name = expression(paste("Trend (", degree, "C/dec)"))) +
scale_shape_discrete(name = expression(paste("Trend (", degree, "C/dec)"))) +
facet_wrap(~miss_limit, scales = "free_x") +
theme(axis.title = element_text(size = 16),
legend.title = element_text(size = 14))
ggsave("graph/non_NA_perc.pdf", height = 6, width = 10)
## Calculate correlation summary between p_trend and missing values
# The interpolated data
# gls_df_interp_stats <- gls_df_interp_grow %>%
# group_by(DT, miss_limit) %>%
# mutate(p_mean = mean(p_trend)) %>%
# mutate(r_miss_p = cor(p_trend, interp_perc)) %>%
# mutate(p_miss_p = as.numeric(cor.test(p_trend, interp_perc)[3]))
# gls_df_interp_stats <- gls_df_interp_stats[c(3,19:22)]
# gls_df_interp_stats <- gls_df_interp_stats %>%
# unique() %>%
# mutate(method = "interp")
# The non-interpolated data
gls_df_non_stats <- gls_df_non_grow %>%
group_by(DT, miss_limit) %>%
mutate(p_mean = mean(p_trend)) %>%
mutate(r_miss_p = cor(p_trend, na_perc)) %>%
mutate(p_miss_p = as.numeric(cor.test(p_trend, na_perc)[3]))
gls_df_non_stats <- gls_df_non_stats[c(3,18:21)]
gls_df_non_stats <- gls_df_non_stats %>%
unique() %>%
mutate(method = "non-interp")
# Combine and Melt for plotting
gls_df_all_stats <- rbind(gls_df_interp_stats, gls_df_non_stats)
gls_df_all_stats_long <- melt(gls_df_all_stats, id = c("method", "DT", "miss_limit"))
# xtable::xtable(gls_df_interp_stats)
## Compare the two sets of stats visually
ggplot(data = gls_df_all_stats_long, aes(x = miss_limit, y = value)) +# bw_update +
geom_point(aes(colour = DT, linetype = method)) +
geom_line(aes(colour = DT, linetype = method)) +
facet_wrap(~variable, ncol = 1, scales = "free_y")
ggsave("interp_vs_non_stats.pdf", height = 6, width = 10)
|
aecfe4e94be3bce87883212274138237c1f4c7d8 | 7164d4515036f3ebce26f3a0b7f0b3031683aaa1 | /man/createSimmodule.Rd | 62a14098c09049d5dcf84c868978bca0861187e0 | [] | no_license | kcha193/simarioV2 | 0a96193cf09f32acea2287d76687a9e1ee2eb237 | 66c7bfbb3dfd3dbd7d9b95d7d9b84632e6aa5aca | refs/heads/master | 2023-03-15T14:34:46.222056 | 2023-02-26T01:59:06 | 2023-02-26T01:59:06 | 57,259,491 | 5 | 3 | null | null | null | null | UTF-8 | R | false | true | 1,486 | rd | createSimmodule.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Simmodule.r
\name{createSimmodule}
\alias{createSimmodule}
\title{Creates a Simulation module object.}
\usage{
createSimmodule(name)
}
\arguments{
\item{name}{name of this object}
}
\value{
a list
}
\description{
A simulation module is really the core of a simulation. It contains the code and output for a distinct set
of results generated, eg: health outcomes for years 1 - 10.
It contains the following key elements:
}
\details{
outcomes - a list of all outcome matrices for the Simmodule.
each Simmodule has a \code{simulateRun} method which transforms the simframe. Typically, transformations will
move variables for micro-units in the simframe through multiple iterations (or time steps).
At the end of each iteration, for outcome variables (as defined in the simframe), the current values
for all micro-units are stored in an outcome matrix.
An outcome matrix contains the set of values for each micro-unit during each iteration.
At the end of each run a set of run stats is calculated for outcomes. A run stat is essentially a function that takes
an outcome matrix and produces an aggregate value for each iteration.
This aggregate value may be a single value (eg: mean), a vector (eg: frequencies, quantiles, summary),
or a matrix (eg: 2 way table).
Run stats are averaged across multiple runs by collateRunStats to get a final simulation result.
}
|
97b3b44696a2f5156a711fe924c3c25f254e5593 | 57eb613a446a89e08918c18e4a2ef5b7904754a3 | /tests/testthat/test-parentage.R | 445cbd03283a4909192ec89b2f7a282968f01807 | [] | no_license | kate-crosby/ProgenyArray | c0fec380460e1d21d16477a05d05361e88e59f70 | 6fde9526f0bcb953251a28473a7c042b40254211 | refs/heads/master | 2020-12-28T23:49:59.252767 | 2015-05-15T23:44:22 | 2015-05-15T23:44:22 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,836 | r | test-parentage.R | # parentage.R --
# Copyright (C) 2014 Vince Buffalo <vsbuffalo@gmail.com>
# Distributed under terms of the BSD license.
# not testing over this parameters space
NPARENT <- 100
NPROGENY <- 100
SELFING <- 0.5
error_message <- function(ex, act) sprintf("proportion correct below expectations: %0.3f expected, %0.3f actual", ex, act)
# test parentage, expecting a certain degree of correct calls for some set of parameters
test_parentage <- function(max_error, nloci, prop_parent_missing, prop_progeny_missing,
ehet, ehom, ehet_inf=ehet, ehom_inf=ehom) {
set.seed(0)
shutup <- function(x) suppressMessages(suppressWarnings(x))
pa <- shutup(SimulatedProgenyArray(nparent=NPARENT, nprogeny=NPROGENY, nloci=nloci, selfing=SELFING,
prop_parent_missing=prop_parent_missing, prop_progeny_missing=prop_progeny_missing,
ehet=ehet, ehom=ehom))
pa <- shutup(inferParents(pa, ehet_inf, ehom_inf))
error <- propCorrectParents(pa)
msg <- sprintf("parentage error rate exceeded with loci=%d, error=(%0.2f, %0.2f), missing=(%0.2f, %0.2f)",
nloci, ehet, ehom, prop_parent_missing, prop_progeny_missing)
test_that(msg, {
expect_true(error > max_error, error_message(max_error, error))})
}
## To regression test, we use simulated data and send bounds for errors
context("parentage tests, no error")
test_parentage(0.90, 50, 0, 0, 0, 0)
test_parentage(0.96, 100, 0, 0, 0, 0)
context("parentage tests, with error")
# 500 loci with error = (het=0.5, hom=0.1)
test_parentage(0.85, 500, 0, 0, 0.5, 0.1)
# 1000 loci with error = (het=0.5, hom=0.1)
test_parentage(0.98, 1000, 0, 0, 0.5, 0.1)
# 5000 loci with error = (het=0.5, hom=0.1) and missing = (0.01, 0.1)
test_parentage(0.95, 5000, 0.01, 0.1, 0.5, 0.1)
|
fbe184fecfc87f61a5201c0d9563ff3269ddad65 | e6a115b95ffba2caac921daeb2ef911b504d0142 | /CD109_analyses_with_plot.R | 4c39cb06e5441a1a57a9957c39560b3247eaa86b | [] | no_license | zhsiru/BMD_WES_paper | 46472ab1f8244c15281909191d477f0cf38e2b7c | 16695ca11cba9b355f339c308d0fc8de4bb4bee9 | refs/heads/main | 2023-09-05T23:29:44.397834 | 2021-11-11T20:48:17 | 2021-11-11T20:48:17 | 427,138,669 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,820 | r | CD109_analyses_with_plot.R | setwd("C:\\Users\\sirui.zhou\\work\\regeneron-wes")
setwd("C:\\Users\\Sirui\\Desktop\\WORKS\\regeneron")
dt <- data.table::fread("CD109_5cells_mineral_v2.txt")
dt[, Cell := factor(Cell, levels = unique(Cell))]
levels(dt$Cell)
dt_western <- data.table::fread("CD109_5cells_western.txt")
dt_main <- dplyr::left_join(dt,
dt_western[, .(med_scaled = median(Level_perc)), by = Cell])
# M2 <- lme4::lmer(Mineralization ~ Cell + (1|Exp),
# data=dt)
#
# M2_null <- lme4::lmer(Mineralization ~ 1 + (1|Exp),
# data=dt)
#
# # Test the exposure. I.e cell
# anova(M2, M2_null)
M2.cs <- nlme::gls(Mineralization ~ Cell, data = dt,
corr = nlme::corCompSymm(form = ~ 1 | Exp), method = "ML" )
M2.cs_null <- nlme::gls(Mineralization ~ 1, data = dt,
corr = nlme::corCompSymm(form = ~ 1 | Exp), method = "ML" )
anova.res <- anova(M2.cs, M2.cs_null)
anova.res
anova.res$`p-value`[2]
# Effect of knock-down/median CD109 expression level on mineralization
M2.cs_level <- nlme::gls(Mineralization ~ med_scaled, data = dt_main,
corr = nlme::corCompSymm(form = ~ 1 | Exp) )
coef(summary(M2.cs_level))
###Plot Sirui###
library(tidyverse)
library(rstatix)
library(ggpubr)
require(gridExtra)
group_by(dt_main, Cell) %>% summarize(m = mean(Mineralization))
dt_western$Exp <- as.factor(dt_western$Exp)
dt_western %>%
group_by(Cell) %>%
get_summary_stats(Level_perc, type = "mean_sd")
dt_western$Cell <- factor(dt_western$Cell,
levels = c('control','70A116','72A144','72A124','72A123','70A146'),ordered = TRUE)
bxp_w <- ggboxplot(dt_western, x = "Cell", y = "Level_perc", add = "point", fill = "lightblue", alpha=0.7, color="black") +
labs(y= "CD109 Level percentage to control") + ylim(0,1)
res.aov_w <- anova_test(data = dt_western, dv = Level_perc, wid = Exp, within = Cell)
get_anova_table(res.aov_w)
A=bxp_w +
labs(
subtitle = get_test_label(res.aov_w, detailed = F)
) + xlab("")
dt_main$Exp <- as.factor(dt_main$Exp)
dt_main$Cell <- factor(dt_main$Cell,
levels = c('control','70A116','72A144','72A124','72A123','70A146'), ordered = TRUE)
dt_main %>%
group_by(Cell) %>%
get_summary_stats(Mineralization, type = "mean_sd")
bxp_m <- ggboxplot(dt_main, x = "Cell", y = "Mineralization", fill = "#f03b20", color="black", alpha = 0.7, add = "point")
B=bxp_m +
labs(
subtitle = expression(paste("Change in CD109 expression level on mineralization: Beta = -1.71, p = 1.8x10"^{"-7"})),
y= "Mineralization per ug CD109") +
theme(text=element_text(size=13,
family="Sans")) + xlab("")
####
s_m2 <- summary(M2.cs)
coef_dt <- data.table::data.table(param = row.names(coef(s_m2)), coef(s_m2), confint(M2.cs))
# Remove intercept
coef_dt <- coef_dt[grep("intercept", param, ignore.case = T, invert = T)]
# Order by median cd109 level
coef_dt[, Cell := gsub("Cell", "", param)]
coef_dt[, Cell := factor(Cell, levels = c('70A116','72A144','72A124','72A123','70A146'))]
C <- ggplot(coef_dt, aes(x = Cell, y = Value, ymin = `2.5 %`, ymax = `97.5 %`)) +
geom_pointrange(size = 0.6, position = position_dodge(width = 0.5)) +
geom_hline(yintercept = 0, linetype = "dashed") +
geom_errorbar(width = 0.2, position = position_dodge(width = 0.5)) +
xlab("Cell") +
ylab("Effect on CD109 mineralization\ncompared to control (95% CI)") +
labs(
subtitle = expression(paste("Anova, p = 6.9x10"^{"-10"}))) + theme_classic() +
theme(text=element_text(size=13,
family="Sans"),
axis.text.x=element_text(size=13, color = "black", hjust=0.5),
axis.text.y=element_text(size=13, color = "black"))
grid.arrange(A, B, C)
###end###
# Plot
library(ggplot2)
ggplot(coef_dt, aes(x = Cell, y = Value, ymin = `2.5 %`, ymax = `97.5 %`)) +
geom_pointrange(size = 1.5, position = position_dodge(width = 0.5)) +
geom_hline(yintercept = 0, linetype = "dashed") +
geom_errorbar(width = 0.2, position = position_dodge(width = 0.5)) +
xlab("Cell line") +
ylab("Effect on CD109 mineralization\ncompared to control (95% CI)") +
theme_bw() +
theme(plot.title = element_text(size = 11, face = "bold", hjust = 0.5),
text = element_text(size = 12),
axis.title = element_text(face="bold"),
axis.text.y=element_text(size = 10),
axis.text.x=element_text(face = "bold"),
legend.direction = "horizontal",
legend.position = c(0.2,0.9),
legend.title = element_text(face="bold"))
ggsave(file.path(wd, "cell_x_mineralization.pdf"), dpi = "retina", width = 10, height = 5)
plot_dt <- data.table::data.table(dt_main)
order_var <- dt_western[, .(med_scaled = median(Level_perc)), by = Cell][order(med_scaled)][, .(Cell)]
plot_dt[, Cell_ord := factor(Cell, levels = order_var$Cell)]
med_control <- plot_dt[Cell == "control", median(Mineralization)]
ggplot(plot_dt[Cell != "control"], aes(x = Cell_ord, y = Mineralization)) +
geom_boxplot() +
geom_hline(yintercept = med_control, linetype = "dashed", col = "blue") +
xlab("Cell line") +
ylab("CD109 Mineralization") +
theme_bw() +
theme(plot.title = element_text(size = 11, face = "bold", hjust = 0.5),
text = element_text(size = 12),
axis.title = element_text(face="bold"),
axis.text.y=element_text(size = 10),
axis.text.x=element_text(face = "bold"),
legend.direction = "horizontal",
legend.position = c(0.2,0.9),
legend.title = element_text(face="bold"))
ggsave(file.path(wd, "level_x_mineralization.pdf"), dpi = "retina", width = 10, height = 5)
|
72b86b83817155dc5f1e067674b3af820156db65 | 184aae15a38a32ca59befbfd8775406e559bd9c5 | /ReadingNotes/ggplot learning.R | b22d80cff4e7437929a3bf22b483a54501746c4c | [] | no_license | WangLiuying/R_scripts | d20703060e74dbbf005acae04066c14bd7a430b4 | 9f873d1a9f656d15c6b24ea712897883a220f14d | refs/heads/master | 2021-06-26T06:56:11.329934 | 2018-05-17T03:41:02 | 2018-05-17T03:41:02 | 96,893,364 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 1,662 | r | ggplot learning.R | ##ggplot学习
library(ggplot2)
data(mpg)
p <- ggplot(data=mpg,mapping=aes(x=cty,y=hwy))
p +geom_point()
p <- ggplot(data=mpg,mapping=aes(x=cty,y=hwy,colour=factor(year)))
p +geom_point()
p+geom_point()+stat_smooth(method=loess)
p <- ggplot(data=mpg,mapping=aes(x=cty,y=hwy))
p+geom_point(aes(colour=factor(year)))+stat_smooth()
p+geom_point(aes(colour=factor(year)))+stat_smooth()+
scale_color_manual(values=c("lightblue","pink"))
p+geom_point(aes(colour=factor(year),size=displ))+stat_smooth()+
scale_color_manual(values=c("lightblue","pink"))
p+geom_point(aes(colour=factor(year),size=displ),alpha=0.5,position = "jitter")+stat_smooth()+
scale_color_manual(values=c("blue","pink"))+
scale_size_continuous(range=c(1,4))
p+geom_point(aes(colour=factor(class),size=displ),alpha=0.6,position = "jitter")+stat_smooth()+
scale_size_continuous(range=c(1,5))+
facet_wrap(~year,ncol=1)+
labs(title="main title",x="xlab",y="ylab")+
guides(color=guide_legend(title="type",override.aes=list(size=10)),
size=guide_legend(title="displ"))
##直方图
p <- ggplot(mpg,aes(x=hwy))
p+geom_histogram()
p+geom_histogram(aes(fill=factor(year),y=..density..),alpha=0.3,color="black")+
facet_grid(~year)+
stat_density(geom="line",position="identity",size=1.2,aes(color=factor(year)))
##条形图
p <- ggplot(mpg,aes(x=class))
p+geom_bar(aes(fill=factor(year)),alpha=0.5,position = "identity")
#饼图
p <- ggplot(mpg,aes(x=factor(1),fill=factor(class)))+
geom_bar(width=1)
p+coord_polar(theta="y")
##箱线图
p <- ggplot(mpg,aes(class,hwy,fill=class))
p+geom_boxplot()
p+geom_violin(alpha=0.7,width=0.9)+
geom_jitter(shape=3)
## |
46f66398e8c0f411976fbd1d709901866509acf2 | 0a072d9ed28ddc9b4256b6c574497078d8dd5360 | /man/list_delete.Rd | 021fddf49feb8b901b55bafe5a42b385bbd543ff | [] | no_license | theoroe3/mailchimpr | c175458f01b20e71bf0d7a5df3f4f35429e7cddf | 848224c13d7b6a5ca114675a9eeeddf62472b691 | refs/heads/master | 2020-12-23T04:58:55.218590 | 2020-01-30T23:24:04 | 2020-01-30T23:24:04 | 237,041,523 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 445 | rd | list_delete.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/lists.R
\name{list_delete}
\alias{list_delete}
\title{Delete a list/audience}
\usage{
list_delete(api = NULL, list_id)
}
\arguments{
\item{api}{Character. Your private api key. If api is `NULL`, the environment variable `Sys.getenv("mailchimp_api")` is used.}
\item{list_id}{Character. The ID of a list. See `get_lists()`.}
}
\description{
Delete a list/audience.
}
|
ca58f3290e9d3e46ef4aaa28f15439edffe6c26f | 67615957a9f5d2f74817db4ce219fe10644c0ae0 | /courses/stat587Eng/slides/Inference/I05-Confidence_intervals/Sampling_distribution.R | 11a66397ea6e5da9b6afcd984898c80407843a4e | [] | no_license | jarad/jarad.github.com | 29ed8dc5583a52a57cd26bac252d071a0ff623a9 | 00a2bada3de6d6aa89b4795f52d5b134dd3edfe7 | refs/heads/master | 2023-08-09T10:30:19.203097 | 2023-07-30T14:54:31 | 2023-07-30T14:54:31 | 6,108,556 | 9 | 21 | null | null | null | null | UTF-8 | R | false | false | 4,371 | r | Sampling_distribution.R | ## ----libraries, message=FALSE, warning=FALSE, cache=FALSE------------------------------------------------------
library("plyr")
library("dplyr")
library("tidyr")
library("ggplot2")
## ----set_seed, echo=FALSE--------------------------------------------------------------------------------------
set.seed(2)
## ----normal_samples, cache = TRUE------------------------------------------------------------------------------
mu = 35
sigma = 5
ns = 10*(2:5)
samples = expand.grid(rep = 1:1000,
n = ns,
mu = mu,
sigma = sigma) %>%
dplyr::group_by(rep, n) %>%
do(data.frame(samples = rnorm(.$n, mean = mu, sd = sigma)))
## ----normal_average, dependson = "normal_samples"--------------------------------------------------------------
d = samples %>%
dplyr::summarize(average = mean(samples),
.groups = "keep") %>%
dplyr::mutate(n = paste("n =", n))
density = expand.grid(x = seq(from = mu-sigma, to = mu+sigma, length = 1001),
n = ns) %>%
dplyr::mutate(density = dnorm(x, mean = mu, sd = sigma/sqrt(n)),
n = paste("n =", n))
ggplot(d, aes(x = average)) +
geom_histogram(aes(y=..density..), binwidth = .1) +
geom_line(data = density, aes(x=x, y = density), color = "red") +
facet_wrap(~n, scales = "free_y") +
labs(title = paste0("Sampling distribution for N(",mu,", ",sigma^2,") average")) +
theme_bw()
## ----t_statistic, dependson = "normal_samples", fig.height=3.7-------------------------------------------------
mu = 35
sigma = 5
ns = 10*(2:5)
d = samples %>%
dplyr::summarize(sample_mean = mean(samples),
sample_sd = sd(samples),
t = (sample_mean - mu)/(sample_sd/sqrt(n)),
n = paste("n =", n),
.groups = "keep")
density = expand.grid(x = seq(from = -4, to = 4, length = 1001),
n = ns) %>%
dplyr::mutate(density = dt(x, df = n-1),
n = paste("n =", n))
ggplot(d, aes(x = t)) +
geom_histogram(aes(y=..density..), binwidth = .1) +
geom_line(data = density, aes(x=x, y = density), color = "red") +
facet_wrap(~n, scales = "free_y") +
labs(title = paste0("Sampling distribution of the t-statistic")) +
theme_bw()
## ----binomial_samples, cache = TRUE----------------------------------------------------------------------------
ns = c(10,100)
ps = c(.5,.8)
samples = expand.grid(rep = 1:1000,
n = ns,
p = ps) %>%
dplyr::group_by(n, p) %>%
dplyr::mutate(y = rbinom(n(), size = n, prob = p),
phat = y/n,
p = paste("p =", p),
n = paste("n =", n))
## ----binomial_proportion, dependson = "binomial_samples", fig.height=3.7---------------------------------------
pmf = expand.grid(n = ns, p = ps, values = (0:max(ns))/max(ns)) %>%
dplyr::group_by(n, p) %>%
do(data.frame(values = (0:max(.$n))/max(.$n))) %>%
dplyr::mutate(
pmf = dbinom(values*n, size = n, prob = p),
p = paste("p =", p),
n = paste("n =", n)) %>%
dplyr::filter(pmf > 0)
ggplot(samples, aes(x = phat)) +
geom_bar(aes(y = ..prop..)) +
geom_point(data = pmf, aes(x=values, y = pmf), color = "red") +
facet_grid(n~p, scales = "free_y") +
labs(title = paste0("Sampling distribution for binomial proportion"),
x = "Sample proportion (y/n)",
y = "") +
theme_bw()
## ----dependson = "binomial_samples", fig.height=3.7------------------------------------------------------------
pmf = expand.grid(n = ns, p = ps,
prop = seq(0,1,length=101)) %>%
dplyr::mutate(
pmf = dnorm(prop, mean = p, sd = sqrt(p*(1-p)/n)),
p = paste("p =", p),
n = paste("n =", n)) %>%
dplyr::filter(n > 30)
ggplot(samples %>%
dplyr::group_by(n,p,phat) %>%
dplyr::summarize(count = n(), .groups = "keep") %>%
dplyr::group_by(n,p) %>%
dplyr::arrange(phat) %>%
dplyr::mutate(height = count / sum(count) / min(diff(phat))),
aes(x = phat, y = height)) +
geom_bar(stat = "identity") +
geom_line(data = pmf, aes(x=prop, y = pmf), color = "red") +
facet_grid(n~p, scales = "free_y") +
labs(title = paste0("Approximate sampling distributions for binomial proportion"),
x = "Sample proportion (y/n)",
y = "") +
theme_bw()
|
746933a6da71ecd7fe2563c9b64676e8f7ab1eb4 | 39d3e308c0faba3b645d51f097320ce787524bb5 | /05 Reproducible research/reproducible lectures.R | 8fb935f5e75068af86c3c892f635eb1806412028 | [] | no_license | ksetdekov/datasciencecoursera | df24e3d45010526689b8f55db63944a0cf4d40b9 | fab28b845d56a824bcf08a3ac04ca69b7e562ad1 | refs/heads/master | 2021-07-15T06:34:21.731826 | 2020-05-14T08:50:30 | 2020-05-14T08:50:30 | 130,076,178 | 0 | 0 | null | 2019-03-27T09:57:17 | 2018-04-18T14:36:34 | HTML | UTF-8 | R | false | false | 2,210 | r | reproducible lectures.R | setwd("C:/Users/ksetd/Dropbox/datascience/datasciencecoursera/05 Reproducible research")
library(kernlab)
data(spam)
set.seed(3435)
trainIndicator=rbinom(dim(spam)[1],size = 1,prob=0.5)
table(trainIndicator)
trainSpam=spam[trainIndicator==1,]
testSpam=spam[trainIndicator==0,]
names(trainSpam)
table(trainSpam$type)
plot(trainSpam$capitalAve~trainSpam$type)
library(party)
cfit1 <- ctree(type ~ ., data = trainSpam)
plot(cfit1)
traintypepred <- predict(cfit1, trainSpam)
library(InformationValue)
library(MLmetrics)
traintypepred <-ifelse(traintypepred=="spam",1,0)
truth <- ifelse(trainSpam$type=="spam",1,0)
testtypepred <- predict(cfit1, testSpam)
testtypepred <-ifelse(testtypepred=="spam",1,0)
truthtest <- ifelse(testSpam$type=="spam",1,0)
plotROC(traintypepred, truth)
Gini(truth, traintypepred)
plotROC(testtypepred, truthtest)
Gini(truthtest, testtypepred)
plot(log10(trainSpam$capitalAve+1)~trainSpam$type)
plot(log10(trainSpam[,1:4]+1))
#use hclust
hClust <- hclust(dist(t(trainSpam[,1:57])))
plot(hClust)
hClustup <- hclust(dist(t(log10(trainSpam[,1:57]+1))))
plot(hClustup)
## statistical model ####
trainSpam$numType = as.numeric(trainSpam$type) - 1
costFunction = function(x, y) sum(x != (y > 0.5))
cvError = rep(NA, 55)
library(boot)
for (i in 1:55) {
lmFormula = reformulate(names(trainSpam)[i], response = "numType")
glmFit = glm(lmFormula, family = "binomial", data = trainSpam)
cvError[i] = cv.glm(trainSpam, glmFit, costFunction, 2)$delta[2]
}
## Which predictor has minimum cross-validated error?
names(trainSpam)[which.min(cvError)]
## Use the best model from the group
predictionModel = glm(numType ~ charDollar, family = "binomial", data = trainSpam)
## Get predictions on the test set
predictionTest = predict(predictionModel, testSpam)
predictedSpam = rep("nonspam", dim(testSpam)[1])
## Classify as `spam' for those with prob > 0.5
predictedSpam[predictionModel$fitted > 0.5] = "spam"
table(predictedSpam, testSpam$type)
traintypepredlect <-ifelse(predictedSpam=="spam",1,0)
(61 + 458)/(1346 + 458 + 61 + 449)
Gini(truthtest, traintypepredlect)
Gini(truthtest, testtypepred)
table( testtypepred,truthtest)
(107+104)/(107+104+1300+803)
## week 3
|
2304bbfea64ec8572491c5ecc2817811d9f48bbd | 0500ba15e741ce1c84bfd397f0f3b43af8cb5ffb | /cran/paws.application.integration/man/swf_describe_domain.Rd | 3a616147096935440fcd9a7989a144a97450b074 | [
"Apache-2.0"
] | permissive | paws-r/paws | 196d42a2b9aca0e551a51ea5e6f34daca739591b | a689da2aee079391e100060524f6b973130f4e40 | refs/heads/main | 2023-08-18T00:33:48.538539 | 2023-08-09T09:31:24 | 2023-08-09T09:31:24 | 154,419,943 | 293 | 45 | NOASSERTION | 2023-09-14T15:31:32 | 2018-10-24T01:28:47 | R | UTF-8 | R | false | true | 554 | rd | swf_describe_domain.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/swf_operations.R
\name{swf_describe_domain}
\alias{swf_describe_domain}
\title{Returns information about the specified domain, including description
and status}
\usage{
swf_describe_domain(name)
}
\arguments{
\item{name}{[required] The name of the domain to describe.}
}
\description{
Returns information about the specified domain, including description and status.
See \url{https://www.paws-r-sdk.com/docs/swf_describe_domain/} for full documentation.
}
\keyword{internal}
|
98d9a97fd8e1825606d44c80adf3d36b9e239c59 | ddbc8d66f3ef3617aacdd4e6b8de5cbb4897fd96 | /maryamCode/generateAllStates.R | f586d41147a00143d0ff0e8cbe083f74fa26986e | [] | no_license | Venlanen/strandsequtils | 7594238693b0496b7104bb6c657a5ff6fc042276 | 21c08fb25810ed3f5570e5956e54fa482cd6b89c | refs/heads/master | 2021-07-16T00:35:56.343268 | 2017-10-18T13:40:02 | 2017-10-18T13:40:02 | 107,122,636 | 0 | 0 | null | 2017-10-16T12:12:54 | 2017-10-16T12:12:54 | null | UTF-8 | R | false | false | 354 | r | generateAllStates.R | #tested
#' generates all binary strings with n 0s and m 1s
#' @param n the number of 0s
#' @param m the number of 1s
#' @author Maryam Ghareghani
#' @export
#'
allStatus = function(n,m)
{
allStat = NULL
status = initialState(n,m)
while(status != FALSE)
{
allStat = c(allStat, status)
status = getNextState(status)
}
allStat
} |
e76154163ef10acd40f83494b8be944bab5ba062 | 72ab89b96248e135427e31e9c97fb055fdb307b6 | /man/venenos.Rd | 41445c4ab6823fd24b897de56d3fc017d2bb6c0c | [] | no_license | javiercara/DisRegETSII | a0b6ab9f061ec94f72146a6860074f3f635a46dd | 851788b05b98efc819fa0f648699faa41d0181fc | refs/heads/master | 2021-01-20T11:34:18.246895 | 2020-02-03T16:29:04 | 2020-02-03T16:29:04 | 79,790,355 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,138 | rd | venenos.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/venenos.R
\docType{data}
\name{venenos}
\alias{venenos}
\title{Datos de superviviencia de roedores a los que se le habian suministrado diferentes venenos y antidotos}
\format{Lista con los siguientes campos:
\itemize{
\item ven: tipo de veneno suministrado ('I','II','III')
\item ant: tipo de antidoto suministrado ('A','B','C','D')
\item tiempo: tiempo de superviviencia del roedor (en unidades de 10 horas)
}}
\usage{
data(venenos)
}
\description{
Se pretende combatir los efectos de ciertos agentes toxicos.
Para ello, se analiza el efecto de tres venenos y cuatro antidotos en el tiempo de supervivencia
de unas ratas. Para el experimento se cogieron 48 animales y se asignaron al azar a cada uno
de los doce tratamientos resultantes de combinar venenos y antídotos. A cada rata se le
suministro una cierta cantidad de veneno y despues de un cierto tiempo se les administro el
antidoto. A partir de este momento se cuenta el tiempo de supervivencia del roedor.
}
\examples{
data(venenos)
\donttest{aov(tiempo ~ ven*ant,data=venenos)}
}
\keyword{datasets}
|
2e64ea1b066fbb175f6482c90f6fc07fba17ab50 | 21ee3aa10d114c0fa72a63659d6a6387d2cab2fb | /r.functions/check.introns.F.R | bfdcb73a02940f696ef2ca8191d27dbcca6fab17 | [] | no_license | iaaaka/evo-devo | 27c25bf5e7eb86daa48ab4d8479e7ec29d2310de | 4bbe57c9867a18ba67cb14c80f349ac9228a5408 | refs/heads/master | 2021-06-28T19:20:45.485510 | 2021-02-19T07:57:04 | 2021-02-19T07:57:04 | 224,820,497 | 6 | 4 | null | null | null | null | UTF-8 | R | false | false | 9,465 | r | check.introns.F.R | library(vioplot)
library(reshape)
plotProportionOfCannonicalSites = function(x,species,min.sam.no,more.or.equal.fun,...){
get.freq = function(l,f){
r = split(l,f)
r = r[order(as.numeric(names(r)))]
t = rev(cumsum(rev(sapply(r,length))))
c = rev(cumsum(rev(sapply(r,sum))))
list(y=c/t,x=as.numeric(names(t)))
}
canon.seq = x$introns$canonical
self = x$sam.cnts[,species] >= min.sam.no
sp.cnt = apply(x$sam.cnts >= min.sam.no,1,sum)
sam.cnt = apply(x$sam.cnts,1,sum)
canon.freq = list()
sc = ncol(x$sam.cnts)
for(i in 1:sc){
ff = self & .Primitive(more.or.equal.fun)(sp.cnt,i)
canon.freq[[i]] = get.freq(canon.seq[ff],sam.cnt[ff])
ff = !self & .Primitive(more.or.equal.fun)(sp.cnt,i)
if(sum(ff)>0)
canon.freq[[sc+i]] = get.freq(canon.seq[ff],sam.cnt[ff])
}
ylim=range(sapply(canon.freq,function(z){range(z$y)}))
xlim=range(sapply(canon.freq,function(z){range(z$x)}))
plot(1,t='n',xlim=xlim,ylim=c(0.1,1),log='',xlab='# samples',ylab='% of canonical sites',...)
col=getPal(c('#0000FF','#FF0000'),sc)
col=c(col,paste(substr(col[-1],1,7),'60',sep=''))
for(i in 1:length(canon.freq))
lines(canon.freq[[i]],lwd=3,col=col[i],lty=(i>sc)+1)
abline(h=0.95)
legend('bottomright',col=col,lty=(1:length(col)>sc)+1,legend=c(paste(' self, sp',more.or.equal.fun,1:7,sep=''),paste('!self, sp',more.or.equal.fun,1:6,sep='')),ncol=2)
}
plotIntronSampleCntBySpecies = function(x,...){
lens = split(log10(apply(x$sam.cnts,1,sum)+1),x$introns$species)
nms = gsub('-','',names(lens),fixed = TRUE)
lens = lens[order(-nchar(nms),-sapply(lens,sum))]
ymax=max(unlist(lens))
plot(1,t='n',xaxt='n',yaxt='n',ylim=c(0,ymax),xlim=c(0.5,length(lens)+0.5),...)
axis(1,at=1:length(lens),names(lens),las=2)
lab=10^(0:ymax)
axis(2,at=log10(lab+1),labels = lab)
col = nchar(gsub('-','',names(lens),fixed = TRUE))+1
for(i in 1:length(lens))
vioplot(lens[[i]],at = i,col=col[i],add=TRUE)
#plot intron count
cnt = log10(sapply(lens,length))
lab = 10^(0:max(cnt))
at = log10(lab)/max(cnt)*ymax
cnt = cnt/max(cnt)*ymax
points(1:length(cnt),cnt,pch=19,cex=2,type='b')
axis(4,at=at,labels = lab)
mtext('# of introns',side = 4,line = 2)
}
saveGoodSites = function(d,thr.canon,thr.other,file){
cnt = apply(d$sam.cnts,1,max)
f = (cnt >= thr.canon & d$intron$seqs %in% c('GTAG','CTAC')) | cnt >= thr.other
cat('Saved',sum(f),'introns\n')
write.table(d$intron[f,c('chr.id','start','stop','strand')],file,sep='\t',quote = FALSE,row.names = FALSE,col.names = FALSE)
}
loadMergedLiftovered = function(f,species){
species.r = setNames(names(species),species)
sp.order = c('mouse','chicken','rabbit','rat','human','macaque','opossum') #it is order used in merge.lo.py
species.r = species.r[sp.order]
r = read.table(f,sep='\t')
i = r[,1:6]
colnames(i) = c('chr.id','start','stop','strand','seq','doubled.sp')
i$doubled.sp[!is.na(i$doubled.sp)] = sapply(strsplit(as.character(i$doubled.sp[!is.na(i$doubled.sp)]),''),function(x){paste(species.r[as.numeric(x)],collapse='')})
iden = r[,7:13]
s = r[,14:20]
colnames(s) = colnames(iden) = sp.order
s = s[,species]
iden = iden[,species]
i$species = apply(s,1,function(x){paste(ifelse(x>0,species.r,'-'),collapse='')})
r = list(introns = i,sam.cnts=s,identity=iden)
class(r) = c('sajr','list')
r
}
loadLiftovered = function(original,species,path='~/iitp.disk/Solexa/ma.disk/mazin/evo.devo/mapping/junctions/',sp.names){
stop("different introns can be liftovered into different positions!! Use merge.lo.py instead")
original = original[,c('seq','sam.cnt','strand')]
files = list.files(path,paste(species,'.out',sep=''))
lo = vector('list',length(files))
names(lo) = sapply(strsplit(files,'2',TRUE),'[',1)
for(i in 1:length(files)){
t = read.table(paste(path,files[i],sep='/'),skipNul = TRUE)[,c(1,4,5,7,9)] # skipNul = TRUE to evide 'embedded nul(s) found in input'... have no idea what it means
lo[[i]] = setNames(t[,5],do.call(paste,c(t[,1:4],sep=':')))
}
all.ints = unique(c(rownames(original),unlist(lapply(lo,names))))
#seqs = sam.cnts = strands = matrix(nrow=length(all.ints),ncol=length(lo)+1)
#rownames(seqs) = rownames(sam.cnts) = rownames(strands) = all.ints
#colnames(seqs) = colnames(sam.cnts) = colnames(strands) = c(species,sapply(strsplit(names(lo),'To',TRUE),'[',1))
sam.cnts = matrix(nrow=length(all.ints),ncol=length(lo)+1)
rownames(sam.cnts) = all.ints
colnames(sam.cnts) = c(species,sapply(strsplit(names(lo),'To',TRUE),'[',1))
#strands[rownames(original),1] = original[,'strand']
#seqs[rownames(original),1] = original[,'seq']
sam.cnts[rownames(original),1] = original[,'sam.cnt']
lo = lapply(lo,function(x){strsplit(x,':',TRUE)})
for(i in 1:length(lo)){
print(i)
#seqs[names(lo[[i]]),1+i] = sapply(lo[[i]],'[',6)
sam.cnts[names(lo[[i]]),1+i] = as.numeric(sapply(lo[[i]],'[',7))
#strands[names(lo[[i]]),1+i] = sapply(lo[[i]],'[',5)
}
introns = as.data.frame(do.call(rbind,strsplit(all.ints,':',TRUE)))
colnames(introns) = c('chr.id','start','stop','strand')
introns$start = as.integer(introns$start)
introns$stop = as.integer(introns$stop)
sam.cnts[is.na(sam.cnts)] = 0
sp = sp.names[colnames(sam.cnts)]
introns$species = apply(sam.cnts>0,1,function(x){paste(sort(sp[x]),collapse='')})
r = list(introns = introns,sam.cnts=sam.cnts)#,seqs=seqs,strands=strands)
class(r) = c('sajr','list')
r
}
printIntronsAsGFF = function(i,min.sam.cnt,species,out){
i = i[i$sam.cnt >= min.sam.cnt,]
i = cbind(i$chr.id,'.','.',i$start,i$stop,'.',i$strand,'.',paste(species,rownames(i),i$seq,i$sam.cnt,sep=':'))
write.table(i,file = out,col.names = FALSE,row.names = FALSE,quote = FALSE,sep='\t')
}
plotintronStatForSpecies = function(x,species){
o = order(apply(sweep(x$seq.stat,1,apply(x$seq.stat,1,sum),'/')[,c('GTAG','CTAC')],1,sum))
plotSeqStat(x$seq.stat[o,],xlab='Samples',main=paste(species,'. By samples.',sep=''),ylab='freq',ylim = c(0,1.5),yrange = c(0.5,1.5))
plotSeqStat(x$seq.stat[o,],ord=c('ATAC','GCAG','CTGC','GTAT','others'),add=T,yrange = c(0,0.45))
abline(h=0.475)
t = table(x$sam.stat$sam.cnt,x$sam.stat$seq)
plotSeqStat(t,xlab='# samples',main=paste(species,'. By number of samples where detected.',sep=''),ylab='freq',xlog=TRUE,yrange = c(0.5,1.5),ylim=c(0,1.5),plot.total = T)
plotSeqStat(t,ord=c('ATAC','GCAG','CTGC','GTAT'),xlog=TRUE,add=T,yrange = c(0,0.45))
mtext('# of introns',4,2.3,FALSE)
abline(h=0.475)
}
loadJunctionStat = function(dir2sam,merged){
r = read.table(merged,stringsAsFactors = FALSE)[,c(1,4,5,7,9)]
colnames(r) = c('chr.id','start','stop','strand','seq')
r$seq = sapply(strsplit(r$seq,':',fixed = TRUE),'[',6)
rownames(r) = do.call(paste,c(r[,1:4],sep=':'))
r$sam.cnt = 0
sams = list.files(dir2sam,'.splicesites')
unuq.seqs = unique(r$seq)
seqs = matrix(nrow = length(sams),ncol=length(unuq.seqs))
colnames(seqs) = unuq.seqs
rownames(seqs) = sams
for(i in 1:length(sams)){
cat('\r',i,length(sams))
t = read.table(paste(dir2sam,sams[i],sep='/'),stringsAsFactors = FALSE)
t = do.call(paste,c(t[,1:4],sep=':'))
seqs[i,] = table(factor(r[t,'seq'],levels = unuq.seqs))[unuq.seqs]
r[t,'sam.cnt'] = r[t,'sam.cnt'] + 1
}
list(sam.stat=r,seq.stat = seqs)
}
plotSeqStat = function(t,...,add=F,yrange=c(0,1),ylim=NULL,xlog=FALSE,ord=c('ATAC','GCAG','GTAG','others','CTAC','CTGC','GTAT'),col=c('ATAC'='orange','GCAG'='yellow','GTAG'='red','others'='gray','CTAC'='blue','CTGC'='green','GTAT'='cyan'),plot.leg=!add,plot.total=FALSE){
sam.cnts = suppressWarnings(as.numeric(rownames(t)))
if(sum(is.na(sam.cnts)) > 0)
sam.cnts = 1:nrow(t)
scale = function(x,range,mn=min(x),mx=max(x)){
(x - mn)/(mx-mn)*(range[2]-range[1]) + range[1]
}
xfun = xfun. = function(x){x}
xat = c(1,seq(50,by = 50,to=max(51,nrow(t))))
if(xlog){
xfun = function(x,to=max(x)+1){
log((to-1)*x/(to-x))
}
xfun. = function(x,to=max(x)+1){
x = exp(y)
to*x/(to-1+x)
}
xat = c(1:3,5,10,20,40)
xat = c(xat,seq(100,by=100,to = max(101,max(sam.cnts)-max(xat))),max(sam.cnts)-xat+1)
}
seq.freq = t[,c('GTAG','CTAC','GCAG','CTGC','ATAC','GTAT')]
seq.freq = cbind(seq.freq,others = apply(t[,!(colnames(t) %in% colnames(seq.freq))],1,sum))
seq.freq = sweep(seq.freq,1,apply(seq.freq,1,sum),'/')
seq.freq = seq.freq[,ord]
col = rev(col[ord])
seq.freq = t(apply(seq.freq,1,function(x){rev(cumsum(x))}))
freq.range = range(0,seq.freq,na.rm=T)
seq.freq = scale(seq.freq,yrange)
if(is.null(ylim))
ylim = c(0,max(apply(seq.freq,1,max)))
seq.freq = cbind(seq.freq,yrange[1])
x = xfun(c(sam.cnts,rev(sam.cnts)))
if(!add){
plot(1,t='n',xlim=c(0,max(x)*1.2),ylim=ylim,yaxt='n',xaxt='n',...)
axis(1,at=xfun(xat),xat,las=3)
}
for(i in 2:ncol(seq.freq)){
polygon(x,c(seq.freq[,i-1],rev(seq.freq[,i])),col=col[i-1],border = NA)
}
if(plot.leg)
legend(xfun(max(sam.cnts))*1.01,yrange[2],fill=col,legend=names(col))
if(plot.total){
total = log(apply(t,1,sum))
lab = c(1,3,10,30,100,300,1000,3000,10000,30000,100000,300000,1000000,3000000)
lab = lab[log(lab)>min(total) & log(lab)<max(total)]
at = log(lab)
at = scale(at,yrange,min(total),max(total))
total = scale(total,yrange)
lines(xfun(sam.cnts),total,lwd=3)
axis(4,at,lab,las=2,tck=0.02,mgp=c(1.1,0.1,0))
}
lab = seq(from=freq.range[1],to=freq.range[2],length.out = 5)
n0 = as.character(lab)
n0 = max(nchar(n0) - nchar(gsub('0\\.0*','',n0,perl=TRUE)))-1
lab = unique(round(lab,n0))
at = scale(lab,yrange)
axis(2,at=at,labels = lab,las=2,tck=0.02,mgp=c(1.1,0.1,0))
}
|
84f368c0f5f4355a833ec0f5a12d6a7f065e26cf | e59bfb6100c4ef2de8ba99b865f510db5a47cb52 | /scripts/Fishers/sort_combine_threshhold.R | 50b0b16fde50d630a569654c02989dfe28f1f095 | [] | no_license | jmmarzolino/CCII_BOZ | 0330a1a8abe2249e4412ad73d7309d451c6d8f0a | bca16e6fde90c989a7c1b90c9a65cf000d1677c2 | refs/heads/master | 2021-12-11T18:00:35.595392 | 2021-11-30T23:01:15 | 2021-11-30T23:01:15 | 214,494,990 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,339 | r | sort_combine_threshhold.R | #!/usr/bin/env Rscript
#SBATCH --ntasks=1
#SBATCH --mem=50G
#SBATCH --time=2-00:00:00
#SBATCH --job-name='sort_combine'
#SBATCH --output=/rhome/jmarz001/bigdata/CCII_BOZ/scripts/sort_combine_threshhold.stdout
#SBATCH -p batch
# first run the script `cum_pos.R` with a bunch of fixes for multiple file names and different column numbers
setwd("/bigdata/koeniglab/jmarz001/CCII_BOZ/results")
library(readr)
all_freqs <- read_delim("all_freqs","\t", col_names = FALSE, trim_ws = TRUE)
pvals <- read_delim("pvals","\t", col_names = FALSE, trim_ws = TRUE)
files <- list(all_freqs, pvals)
outnames <- c("freqs_cum_pos", "pval_cum_pos")
chromosomes <- c("chr1H","chr2H","chr3H","chr4H","chr5H","chr6H","chr7H")
for (p in 1:length(files)){
sample <- data.frame(files[p])
newcol <- ncol(sample)+1
len1 <- max(sample[which(sample[,1]==chromosomes[1]),sample[,2]])
len2 <- max(sample[which(sample[,1]==chromosomes[2]),sample[,2]]) + len1
len3 <- max(sample[which(sample[,1]==chromosomes[3]),sample[,2]]) + len2
len4 <- max(sample[which(sample[,1]==chromosomes[4]),sample[,2]]) + len3
len5 <- max(sample[which(sample[,1]==chromosomes[5]),sample[,2]]) + len4
len6 <- max(sample[which(sample[,1]==chromosomes[6]),sample[,2]]) + len5
len7 <- max(sample[which(sample[,1]==chromosomes[7]),sample[,2]]) + len6
for (row in 1:nrow(sample)) {
chr_val <- gsub("chr(\\w+)H", "\\1", sample[row,1])
if (chr_val=="1"){
sample[row,newcol] <- sample[row,2]
}
if (chr_val=="2"){
sample[row,newcol] <- sample[row,2] + len1
}
if (chr_val=="3"){
sample[row,newcol] <- sample[row,2] + len2
}
if (chr_val=="4"){
sample[row,newcol] <- sample[row,2] + len3
}
if (chr_val=="5"){
sample[row,newcol] <- sample[row,2] + len4
}
if (chr_val=="6"){
sample[row,newcol] <- sample[row,2] + len5
}
if (chr_val=="7"){
sample[row,newcol] <- sample[row,2] + len6
}}
# sort the data frames by their cumulative count before writing the table out
outsample <- sample[order(sample[,newcol]),]
write.table(outsample, file=outnames[p], quote=F ,sep="\t",row.names=F,col.names=F)
}
freqs_cum_pos <- read_delim("freqs_cum_pos","\t", col_names = FALSE, trim_ws = TRUE)
pval_cum_pos <- read_delim("pval_cum_pos","\t", col_names = FALSE, trim_ws = TRUE)
# combine the two sorted frames, since they were sorted by the same column, they should be in the same position order
# freqs_cum_pos, pvals
colnames(freqs_cum_pos) <- c("chromosome","position","parents","F18","F27", "F28", "F50", "F58", "cum_position")
colnames(pval_cum_pos) <- c("chromosome","position","pvalue","cum_position")
bind_frame <- cbind(freqs_cum_pos, pval_cum_pos)
write.table(bind_frame, file="bind_frame", quote=F ,sep="\t",row.names=F,col.names=T)
merge_frame <- merge(freqs_cum_pos, pval_cum_pos, by="cum_position")
write.table(merge_frame, file="merge_frame", quote=F ,sep="\t",row.names=F,col.names=T)
# did it work? check if the cumulative positions match by looking for where they don't
bind_mismatch <- bind_frame[which(bind_frame$cum_position != bind_frame$cum_position.1),]
write.table(mismatch, file="cum_pos_bind_mismatch", quote=F ,sep="\t",row.names=F,col.names=T)
merge_mismatch <- merge_frame[which(merge_frame$cum_position != merge_frame$cum_position.1),]
write.table(merge_frame, file="cum_pos_merge_mismatch", quote=F ,sep="\t",row.names=F,col.names=T)
# then parse by the p-value
# set the cutoff for p-values at 1%
cutoff <- quantile(merge_frame$pvalue,0.01)
# if p-value is greater than the cutoff value, replace p-value with NA
# subset and save the data that is at or below the cutoff
merge_frame[which(merge_frame$pvalue >= cutoff),3] <- NA
# convert p-vales into positive values and make 1's into 0's with a -log transform!
merge_frame$transform <- -log10(merge_frame$pvalue)
# save the subsetted table
write.table(merge_frame, file="top_1percent", quote=F ,sep="\t",row.names=F,col.names=T)
# graph the p-values to see whole-genome trends
library(ggplot2)
ggplot(data=merge_frame,aes(x=cum_position, y=transform,color=chromosome.x))+geom_point()+xlab("genome position")+ylab("p-value") + theme_minimal() #+ coord_cartesian(ylim = c(0, max(top_1per$X5))) # c(0, 200)
# save the plot
ggsave("sort_combine_threshhold_graph.pdf")
|
0988124189b7a36027a3218433e8d3ca378542a7 | a2ceebfc20220ad30e394ac39c5b61f6e0ab0d10 | /hPVI_2015_histapp/server.R | cffc47cc2e063cba0a2f29b94f8b5e0afacde121 | [] | no_license | TonyAngelo/MN_Politics | bf3e9f58efbdb8d6217df7db2d37e18fdb2abfaa | f8a33ee9d7d3f6a358dc8349d698326231f7c52c | refs/heads/master | 2021-01-19T14:57:05.728741 | 2015-06-18T13:53:31 | 2015-06-18T13:53:31 | 35,682,330 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,288 | r | server.R | # required packages
library(shiny)
library(ggplot2)
library(gridExtra)
# Define server logic required to draw a histogram
shinyServer(function(input, output) {
# create the output plot
output$distPlot <- renderPlot({
# get the data for the selected chamber
x <- read.csv(paste("./data/mn_",tolower(input$chamber),"_hpvi_2015.csv",sep=""))[,2:9]
# add a color factor for coloring the graph
x$hpvi_color <- as.factor((x$rpvi>0)*1)
# get the bin breaks
bins <- seq(from = -50, to = 50, by = as.numeric(input$width))
# draw the histogram
h_plot <- ggplot(data=x, aes(rpvi, fill=hpvi_color)) +
geom_histogram(breaks=bins) +
scale_fill_manual(values = c("red", "blue")) +
ylab("Number of Districts") +
xlab("hPVI") +
# ggtitle(paste("2015 Minnesota ",toupper(substring(input$chamber, 1,1)),substring(input$chamber, 2)," hPVI Distribution", sep="")) +
theme(legend.position = "none",
plot.title = element_text(size = rel(2)))
# if the show districts check box is selected add the rug
if(input$districts) {
h_plot + geom_rug()
} else {
h_plot
}
})
}) |
b3cf6d21f7f76d0ddcf44b04ec7a3d6f7eedaa73 | 1239237523470898ec3dec34427dcfc878d4f937 | /191216_도서관별 연간 대출건수 0인 인기도서.R | 1a536c6f9da2fe5cf724cf7ecbd55a1af5620725 | [] | no_license | swoos91/Library_book_recommend | 589868c82c0cf42802b7923a0f29bbae7d88602b | cdfbf2b9269dfa6883e1eae99cfcf0f1830cc945 | refs/heads/master | 2022-04-17T01:07:46.735420 | 2020-04-17T10:12:16 | 2020-04-17T10:12:16 | 256,232,883 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,181 | r | 191216_도서관별 연간 대출건수 0인 인기도서.R | setwd('c:/semi/6조')
load('2018년 성북구 도서관별 대출목록.Rdata')
# '영유아 인기도서 리스트'와 '1월 도서 대출 목록'에서 겹치는 도서를 추출
name<-c()
#name<-as.data.frame(name)
for (idx in 1:12) {
temp<-read.csv(paste0('c:\\semi\\6조\\성북구\\인기대출도서_2018-',idx,'.csv'),
skip=13, header=T, stringsAsFactors=F)
temp<-temp$서명
print(length(name))
name<-c(name, temp)
print(length(name))
name<-unique(name)
print(length(name))
}
lib<-sb_lib_list$성북정보도서관
lib1<-lib[[1]]
lib1_0<-lib1 %>% filter(대출건수==0)
ext<-c()
for (i in 1:length(lib1_0$도서명)) {
if (lib1_0$도서명[i] %in% name) {
print(lib1_0$도서명[i])
ext<-c(lib1_0$도서명[i],ext)
}
}
# '1월 도서 대출 목록'과 '12월 도서 대출 목록' 가운데 위에서 추출한 도서를 중심으로 대출 건수가 0인 도서를 재추출
lib12<-lib[[12]]
lib12_0<-lib12 %>% filter(대출건수==0)
ext1<-c()
for (i in 1:length(lib12_0$도서명)) {
if (lib12_0$도서명[i] %in% ext) {
print(lib12_0$도서명[i])
ext1<-c(lib12_0$도서명[i],ext1)
}
} |
849fbac3be27a5fb194da97d1dc9397283123f10 | 27e0aa98574e032b0085ba97f7a17b1d1693ca8d | /cachematrix.R | 2cfac468b23ffd7360a2c5f2d1c7d4054dce852e | [] | no_license | LucilaSchmidt/ProgrammingAssignment2 | 9b13d74b18c95e68eb72e7a86e9b6bdbbd1a9a19 | 7f2b56ee01fd359edb44ff57806c86683039be55 | refs/heads/master | 2021-01-22T13:48:19.875994 | 2014-05-18T23:35:59 | 2014-05-18T23:35:59 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,420 | r | cachematrix.R | ## These functions are designed to avoid repiting calculations
## If we know that we are going to need the inverse of a matrix lot of times
## we can calculate it once, and store the result for the next time that we need it.
## These functions are for that.
## This function creates from a matrix a special type of object that stores:
## 1) A function to set the matrix that's going to be returned byt the nex function
## 2) A function to get that matrix
## 3) A function to set the inverse of the matrix
## 4) A function for setting it.
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y) {
x <<- y
inv <<- NULL
}
get <- function() {x}
setInverse <- function(inverse){inv <<- inverse]
getInverse <- function() {inv}
list(set = set, get = get,
setInverse = setInverse,
getInverse = getInverse)
}
## This function is the one that user has to use if he/she wants to avoid extra calculation.
## In this function, the inverse of the matrix is calculated and the value is stored for further calculations (if it hasnt been calculated yet)
## or the value of the inverse is returned from a variables that is storing it
cacheSolve <- function(x, ...) {
inv <- x$getInverse()
if(!is.null(inv)) {
message("getting cached data")
return(inv)
}
matrix <- x$get()
inv <- solve(matrix, ...)
x$setInverse(inv)
inv
}
|
6b5bc1d382fe481ba19bebdc40b0de0b9391c68a | 853cfd087c68c70d5456b331654199f43802c6cd | /man/beadc.Rd | daf3afacc4d2583ba3e6cffc1682b24102ba2181 | [] | no_license | schalkwyk/wateRmelon | 04a30e26765dff4c16d41b98b2283c026625505c | b674068c40f806d6edfd1d703171eba173f74c8c | refs/heads/master | 2023-06-26T00:48:16.638300 | 2023-06-16T15:30:41 | 2023-06-16T15:30:41 | 108,290,350 | 3 | 3 | null | 2021-10-04T12:31:58 | 2017-10-25T15:38:03 | R | UTF-8 | R | false | false | 733 | rd | beadc.Rd | \name{beadc}
\alias{beadc}
\title{
Calculates the number of samples with bead count <3 for each probe in matrix of bead count values
}
\description{
Calculates the number of samples with bead count <3 for each probe in matrix of bead count values.
}
\usage{
beadc(x)
}
\arguments{
\item{x}{
matrix of bead count values returned by the beadcount function
}
}
\value{
Vector of number of samples with bead count <3 for each probe
}
\references{
[1] Pidsley R, Wong CCY, Volta M, Lunnon K, Mill J, Schalkwyk LC:
A data-driven approach to preprocessing Illumina 450K methylation
array data (submitted)}
\author{
ruth.pidsley@kcl.ac.uk
}
\note{
The beadc function is internal to the pfilter function
}
|
1ab959c95e518cac3e8b1582397b9f017f11a78d | 9254b237ee565c9c33a5cadc44353f2e1d0e5748 | /R/get_corgi.R | dd99ae071b7a72fc6eaa6d00cfe82dde84e2b725 | [] | no_license | bdacunha/randompic | 5bc9de9d671448bc94f5d7d664687fca5bafd30d | f937ff70989f6dae444e04960368f783586e18eb | refs/heads/master | 2021-01-10T14:03:37.212361 | 2015-11-23T23:57:30 | 2015-11-23T23:57:30 | 46,516,582 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 432 | r | get_corgi.R | #' get_corgi
#'
#'Inserts the link of a picture containing corgis depending on the height and width specified
#'
#' @param width numeric
#' @param height numeric
#' @return link
#' @export
#' @examples
#' get_corgi(300, 400)
get_corgi <- function(width, height){
stopifnot(is.numeric(width))
stopifnot(is.numeric(height))
stopifnot(width > 0)
stopifnot(height > 0)
sprintf("", width, height)
}
|
547996dddc0e88807015f6364a6e79256e91dd31 | 534402f58f8e00d95b83a79eb1fab5f887d9c13c | /paper-code/f_cmp/j_merging_ratio.R | 539565c3f0647a695f78f3bb159d3e35b57fc822 | [] | no_license | mywanuo/KaMRaT | 79fd064d7db18bc29146f3f249d6227c5c4237be | 4cda92ca1ecc40549ef95354be734216ee8cd9cb | refs/heads/master | 2023-04-22T01:31:03.805036 | 2021-04-21T09:04:00 | 2021-04-21T09:04:00 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,803 | r | j_merging_ratio.R | rm(list = ls())
library(stringr)
library(tidyr)
library(magrittr)
library(ggplot2)
nb.summary <- read.table("/home/haoliang.xue/media/ssfa/MEMBERS/haoliang.xue/PRAD_TCGA/merging-first/merging-ratio.tsv", header = F)
colnames(nb.summary) <- c("number", "file")
nb.summary$number <- nb.summary$number - 1
nb.summary$type <- ifelse(str_detect(nb.summary$file, pattern = "CVMatrices"), yes = "kmer", no = "contig")
nb.summary$dataset <- str_extract(nb.summary$file, pattern = "risk|relapse")
nb.summary$train_set <- str_extract(nb.summary$file, pattern = "train[0-9]+")
nb.summary <- nb.summary[, c("dataset", "train_set", "type", "number")]
nb.summary.wide <- pivot_wider(nb.summary, id_cols = c("dataset", "train_set"), names_from = "type", values_from = "number")
nb.summary.wide$reduc.ratio <- nb.summary.wide$kmer / nb.summary.wide$contig
nb.summary.mean <- aggregate(nb.summary.wide$reduc.ratio, by= list(nb.summary.wide$dataset), FUN = mean)
colnames(nb.summary.mean) <- c("dataset", "mean")
nb.summary.min <- aggregate(nb.summary.wide$reduc.ratio, by= list(nb.summary.wide$dataset), FUN = min)
colnames(nb.summary.min) <- c("dataset", "min")
nb.summary.max <- aggregate(nb.summary.wide$reduc.ratio, by= list(nb.summary.wide$dataset), FUN = max)
colnames(nb.summary.max) <- c("dataset", "max")
nb.summary.agg <- merge(nb.summary.mean, nb.summary.max) %>%
merge(nb.summary.min)
ggplot(nb.summary.agg) +
geom_col(aes(x = dataset, y = mean), color = "black", size = 1, fill = "gray") +
geom_errorbar(aes(x = dataset, ymin = min, ymax = max), size = 1, width = 0.5) +
ylab("reduction ratio") +
theme(text = element_text(size = 35, family = "Arial")) +
ggsave("/home/haoliang.xue/media/ssfa/MEMBERS/haoliang.xue/PRAD_TCGA/cmp_res/reduction_ratio.png", width = 8, height = 11)
|
4fcbcd8168bc1c99405900a355e861fedc59a7de | d23a003b3329ddd581ce28b90bbb4ae1dab6ddce | /01_dep_exposure.R | 9bfbbd6ce975f630de3ccddf5f2d9b2ec1f19c50 | [] | no_license | lenamax2355/EHR_Data_Management_and_Analysis | d8930c7d416ff84767690765231a257cf7d5f317 | d908e2ab5bac3d1309c7e3b6e3c9549fc328484d | refs/heads/main | 2023-04-28T02:27:25.293263 | 2021-05-12T05:15:25 | 2021-05-12T05:15:25 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,854 | r | 01_dep_exposure.R | ######################################################################
## Title: 01_dep_exposure.R
## Author: Charysse Gibson
## Date created: July 17, 2019
######################################################################
# Purpose: Identify depression exposure
######################################################################
# Inputs: cohort_20190725.fst
# cohort_diag_20190725.fst
# ICD9_ICD10_Depressive Disorders Diagnoses_v2.csv
# outputs: dep_exp_20190819.fst
######################################################################
setwd('A:/')
library(data.table)
library(fst)
library(Hmisc)
library(bit64)
# Prior Depression Definition:
# 2 outpatient, 1 inpatient within 12-months
# Post Depression Definition:
# 2 outpatient, 1 inpatient within 12-months
#-----Dataset & Variables Needed-----
# cohort (patients with oud diag & baseline dates)
# PTID
# FIRST_OUD
# cohort_diag (all diagnoses in cohort)
# PTID
# DATE
# DIAG
# care_type
# SOURCE
# covariate
# dep_diag
# Type (ICD9, ICD10)
# Value (ICD code)
# Covariate
# Description
#-----Inputs-----
## cohort (patients with oud diag & baseline dates)
cohort <-
read_fst('data/created_data/cohort_20190725.fst',
as.data.table = T)
# cohort[,list(.N,unique_pats=uniqueN(PTID))]
# N unique_pats
# 1: 21757 21757
## cohort_diag (all diagnoses in cohort)
cohort_diag <-
read_fst('data/created_data/cohort_diag_20190725.fst',
as.data.table = T)
# cohort_diag[,list(.N,unique_pats=uniqueN(PTID))]
# N unique_pats
# 1: 6784206 21757
# dep diagnoses
dep_diag <-
fread('A:/background_notes/ICD9_ICD10_Depressive Disorders Diagnoses_v2.csv')
oud_diag[,DIAG:=gsub('\\.','',value)]
##-----Identify pre-existing depression-----
# 1 - identify dep in cohort
# 2 - one inpatient or two outpatient diagnoses (before first OUD)
# 3 - create exposure indicator variable
# dep diagnoses
dep_diag <-
fread('A:/background_notes/ICD9_ICD10_Depressive Disorders Diagnoses_v2.csv')
dep_diag[,DIAG:=gsub('\\.','',Value)]
## identify dep in cohort
setkey(dep_diag,DIAG)
setkey(cohort_diag,DIAG)
cohort_diag[dep_diag,covariate:='DEP']
# cohort_diag[covariate=='DEP',list(.N,uniqueN(PTID))]
# N V2
# 1: 157881 11910
# cohort_diag[sample(.N,size=.N*.10)]
# one inpatient or two outpatient diagnoses (before first OUD)
cohort_dep <-
cohort_diag[covariate=='DEP']
setkey(cohort_dep,PTID,DATE)
cohort_dep[,diff:=shift(DATE, fill=first(DATE), type = 'lead')-DATE,by=PTID]
cohort_dep[,diff:=ifelse(DATE==max(DATE),0,diff),by=PTID]
cohort_first_dep <-
cohort_dep[(care_type=='inpatient')|(diff>0)&(diff<=365),list(FIRST_DEP=min(DATE)),by=PTID]
# depression exposure indicator variable
setkey(cohort,PTID)
setkey(cohort_first_dep,PTID)
cohort[cohort_first_dep,FIRST_DEP:=FIRST_DEP]
cohort[,DEP_EXP:=as.factor(ifelse(FIRST_OUD>FIRST_DEP,1,0))]
## DEP_EXP key
# 1 - depression prior to first oud
# 0 - depression diagnosis after first oud
# NA - did not have depression diagnosis
# describe(cohort[,list(DEP_EXP)])
# n missing distinct
# 10451 11306 2
# 11306/21757 = 0.5196488 NA (with no dep diag)
# Value 0 1
# Frequency 5121 5330
# Proportion 0.49 0.51
## recode depression key
cohort[DEP_EXP=='1',PRIOR_DEP:=1]
cohort[DEP_EXP=='0',POST_DEP:=1] #will be removed later
#-----Outputs-----
# cohort with DEP exposure indicator
write.fst(cohort[,list(PTID,FIRST_OUD,PRIOR_DEP,POST_DEP)],
path = paste0('data/created_data/dep_exp_',
gsub('-','',Sys.Date()),'.fst'),
compress = 100)
|
81c3c48340f73cbaff4d86c59ef932bc512b7398 | 0704b673d172dbb69bd5f55fa5eb226e36fb66d6 | /PoonaiAnil_Homework2.r | 3ebde4203df89f6ea6c02fdad496d43f84c2db15 | [] | no_license | DevonARP/STA-3000-Work | f2352f8664648d6053e50363ac0b581d4a36b4e7 | 108f5d595ccfed287c7b6f76914db8f3139f1e0b | refs/heads/master | 2022-11-22T14:57:23.400854 | 2020-07-23T22:21:09 | 2020-07-23T22:21:09 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 588 | r | PoonaiAnil_Homework2.r | Carbs = c(50,60,40)
Fat = c(8,30,30)
Protein = c(20,40,40)
dat1 = data.frame(Carbs,Fat,Protein)
rownames(dat1) = c("Breakfast","Lunch","Dinner")
dat1
#1
p1 = c(sum(dat1$Carbs),sum(dat1$Fat),sum(dat1$Protein))
p1
#2
CCarbs = dat1$Carbs * 4
CFat = dat1$Fat * 9
CProtein = dat1$Protein * 4
dat2 = data.frame(CCarbs,CFat,CProtein)
rownames(dat2) = c("Breakfast","Lunch","Dinner")
dat2
p2 = rowSums(dat2)
p2
p3 = sum(p2)
p3
# He did stay under his goal of 1800 calories.
#3
c1 = colSums(dat2)
c1
p4 = c1/p3
p4
# He could eat more Carbs and Protein and less Fat. |
4ec21925d7a326b52971ee7fa4f9a8563fed6665 | aaee9e9d31a117fc6639973769cf91f7f36bc005 | /pkg/R/substValue.R | c8fdc4c88d3f30dece59b50cc195d8978f954612 | [] | no_license | data-cleaning/editrules | f83c80b73034eeab28d6f4997d3bc6404c33eb11 | 9194adda360ebc3555f6bec0355555fbd94fe1a5 | refs/heads/master | 2020-04-12T13:14:20.517998 | 2018-09-17T08:57:44 | 2018-09-17T08:57:44 | 10,077,571 | 15 | 7 | null | 2015-05-26T19:50:54 | 2013-05-15T11:57:08 | R | UTF-8 | R | false | false | 9,639 | r | substValue.R | #' Replace a variable by a value in a set of edits.
#'
#' @note At the moment, objects of class \code{\link[=disjunct]{editenv}} are converted to \code{list}
#' prior to processing (so no performance is gained there) and reconverted afterwards.
#'
#' @param E \code{\link{editset}}, \code{\link{editmatrix}}, \code{\link{editarray}},
#' \code{\link[=disjunct]{editlist}} or \code{\link[=disjunct]{editenv}}
#' @param var \code{character} with name(s) of variable(s) to substitute
#' @param value vector with value(s) of variable(s)
#' @param ... arguments to be passed to or from other methods
#' @return \code{E}, with variables replaced by values
#' @example ../examples/substValue.R
#' @seealso \code{\link{eliminate}}
#' @export
#' @references
#' Value substitution is extensively described in the package vignettes.
substValue <- function(E, var, value, ...){
UseMethod("substValue")
}
# Reduce an editmatrix by substituting a variable
#
# Given a set of linear restrictions \eqn{E: {\bf Ax}\odot {\bf b}} with \eqn{\odot\in\{<,\leq,==\}},
# and matrix \eqn{{\bf A}} with columns \eqn{{\bf a}_1,{\bf a}_2,\ldots,{\bf a}_n}.
# Substituting variable \eqn{x_j} with a value \eqn{\tilde{\bf x}_j} means setting \eqn{{\bf a}_j=0}
# and \eqn{{\bf b}={\bf a}_j\tilde{x}_j}.
#
# Note that the resulting \code{\link{editmatrix}} may be inconsistent because of inconsistencies in
# \eqn{\tilde{\bf x}}.
#'
#' @method substValue editmatrix
#' @param reduce \code{logical} should the result be simplified? For \code{\link{editmatrix}} this has the same effect
#' as calling the function \code{\link{reduce}}. For \code{\link{editarray}}, the datamodel of the substituted variable
#' is reduced to a single value, and the variable itself is not removed.
#' @param removeredundant \code{logical}. Should empty rows be removed?
#'
#' @rdname substValue
#' @export
substValue.editmatrix <- function(E, var, value, reduce=FALSE, removeredundant=TRUE, ...){
stopifnot(length(var)==length(value))
if (length(var) == 0) return(E)
v <- match(var, getVars(E), nomatch=0)
if (any(v==0)){
warning("Parameter var (", var[v==0], ") is not a variable of editmatrix E")
}
v <- v[v != 0]
ib <- ncol(E)
# typecast of 'value' so it may be passed as list (usefull in error localization).
E[,ib] <- E[ ,ib] - E[ ,v]%*%as.numeric(value)
if (reduce)
E <- E[,-v, drop=FALSE]
else
E[,v] <- 0
if (removeredundant) {
return( E[!isObviouslyRedundant.editmatrix(E),] )
} else {
return(E)
}
}
# Substitute a value in an editarray
#
# For an \code{\link{editarray}}, only rows with \code{<var>:<value>==TRUE} are kept.
# In the kept rows, categories not equal to <value> are set to \code{FALSE}
# If \code{reduce=TRUE}, columns corresponding to categories which are set
# to \code{FALSE} will be removed. Note that the function \code{\link{reduce}}
# has a different effect (it removes complete variables).
#
#' @method substValue editarray
#'
#'
#' @rdname substValue
#'
#' @export
substValue.editarray <- function(E, var, value, reduce=FALSE, ...){
stopifnot(length(var)==length(value))
if (length(var) == 0) return(E)
ind <- getInd(E)
sep=getSep(E)
A <- getArr(E)
value <- as.character(value)
for ( i in seq_along(var) ){
vr <- var[i]
vl <- value[i]
J <- ind[[vr]]
ii <- J[vl]
if ( is.null(ii) || is.na(ii) )
stop(paste("Variable ", vr,"not present in editarray or cannot take value",vl))
I <- A[,ii]
if ( reduce ){
A <- A[ ,-setdiff(J,ii) ,drop=FALSE]
ind <- indFromArray(A, sep)
} else {
A[,J] <- TRUE
}
}
neweditarray(
E = A[I,,drop=FALSE],
ind = ind,
sep = sep,
levels = colnames(A)
)
}
#' Compute index from array part of editarray
#'
#' @param A boolean array
#' @param sep separator
#' @keywords internal
#'
indFromArray <- function(A,sep){
if (ncol(A) == 0 ) return(list())
cn <- colnames(A)
l <- strsplit(cn,sep)
V <- sapply(l,`[`,1)
# C <- sapply(l,`[`,-1)
C <- sapply(l,function(g) ifelse(length(g[-1])==1,g[-1],""))
vars <- unique(V)
ind <- lapply(vars, function(v) which(v==V))
names(ind) <- vars
ind <- lapply(ind, function(k){ names(k) <- C[k]; k})
ind
}
# Substitute values in an \code{\link{editset}}
#
# For an \code{\link{editset}}, purely numerical variables are
# substitutes as in an \code{\link{editmatrix}} and categorical
# as in an \code{\link{editarray}}. Numerical variables appearing
# logical constraints are substituted and if truth values can
# be derived these are substituted in the logical constraint.
#
#' @param simplify Simplify editset by moving logical edits containing a single
#' numerical statement to the pure numerical part? (This is mostly for internal purposes
#' and overwriting the default should normally not be necessary for package users).
#'
#' @method substValue editset
#'
#' @rdname substValue
#' @export
substValue.editset <- function(E, var, value, simplify=TRUE, ...){
# Techical note. Substituting a dummy variable (e.g. .l1) with TRUE or
# FALSE amounts to making an assumption about the validity
# of the condition stated in that dummy. As such, it should not be added
# to the numerical editmatrix (since that editmatrix is only relevant when the
# assumed condition is already fulfilled). Instead, the condition is
# added to the 'condition' attribute of the editset.
#TODO make it possible to supply value = list(x=1, A="a") which makes
# substituting values in an editset a lot easier. Especially when we have
# used localizeErrors and want the solution space by substituting non adapted variables.
stopifnot(length(var)==length(value))
if (length(var) == 0) return(E)
catidx <- var %in% getVars(E, type="cat")
# the nonnumeric case is simple
if ( !is.numeric(value) ){
E$mixcat <- substValue(E$mixcat,var,value,...)
# move substituted dummies to "condition"
id <- var %in% getVars(E,type='dummy')
if ( any(id) ){
dvar <- rownames(E$mixnum) %in% var[id]
v <- as.character(E$mixnum[dvar,])
v[!value[id]] <- invert(v[!value[id]])
attr(E,"condition") <- c(editmatrix(v),attr(E,"condition"))
E$mixnum <- E$mixnum[!dvar,]
}
if ( simplify ) E <- simplify(E)
return(E)
}
# substitute pure numeric data
i1 <- var %in% getVars(E$num)
if ( any(i1) ){ # time-saving condition
numvar <- var[i1]
numval <- value[i1]
innum <- colSums(contains(E$num, numvar )) > 0
if ( any(innum) )
E$num <- substValue(E$num, numvar[innum], numval[innum])
}
# substitute in condition
cnd <- condition(E)
if ( var %in% getVars(cnd) ) condition(E) <- substValue(cnd,var,value,...)
# substitute in then-clauses
i1 <- var %in% getVars(E$mixnum)
if ( any (i1) ){ # time-saving condition
mixvar <- var[i1]
mixval <- value[i1]
u <- contains(E$mixnum, mixvar)
inmix <- colSums(u) > 0
if ( any(inmix) ){
E$mixnum <- substValue(
E$mixnum,
mixvar[inmix],
mixval[inmix],
removeredundant=FALSE
)
# did substitution yield any certainties?
cntr <- isContradiction(E$mixnum)
taut <- isTautology(E$mixnum)
# dummy variables to be eliminated from mixcat
lvar <- apply(u[,inmix,drop=FALSE],1,any)
dvar <- rownames(u)
dval <- logical(length(taut))
dval[lvar & cntr] <- FALSE
dval[lvar & taut] <- TRUE
isub <- lvar & (cntr | taut)
E$mixcat <- substValue(E$mixcat, dvar[isub],dval[isub])
}
}
if ( simplify ) E <- simplify(E)
removeRedundantDummies(E)
}
# Returns which linear edits are obvious contradictions.
# - Accurate to 8 figures.
# - Assumes editmatrix normality
isContradiction <- function(E){
tol = 1e-8
ops <- getOps(E)
absA <- abs(getA(E))
nil <- rowSums(absA) < ncol(absA)*tol
b <- getb(E)
I <- logical(nrow(absA))
eq <- ops=='=='
lt <- ops=='<'
le <- !eq & !lt
I[eq] <- nil[eq] & abs(b[eq]) > tol
I[lt] <- nil[lt] & b[lt] <= 0
I[le] <- nil[le] & b[le] < tol
I
}
# returns which linear edits are obviously TRUE
# - Accurate to 8 figures
# - Assumes editmatrix normality
isTautology <- function(E, tol=sqrt(.Machine$double.eps)){
tol = 1e-8
ops <- getOps(E)
absA <- abs(getA(E))
nil <- rowSums(absA) < ncol(absA)*tol
b <- getb(E)
I <- logical(nrow(absA))
eq <- ops=='=='
lt <- ops=='<'
le <- !eq & !lt
I[eq] <- nil[eq] & abs(b[eq]) < tol
I[lt] <- nil[lt] & b[lt] > tol
I[le] <- nil[le] & b[le] >= -tol
I
}
#'
#' @method substValue editlist
#' @rdname substValue
#' @export
substValue.editlist <- function(E, var, value, ...){
L <- varTypeAndOccurrence(E,var)
if ( length(L) == 1 && is.na(L) ){
return(E)
}
type = L$type
iRemove <- logical(length(E))
for ( i in which(L$occurs) ){
E[[i]] <- substValue(E[[i]],var,value,...)
if ( !isFeasible(condition(E[[i]])) ) iRemove[i] <- TRUE
}
E[!iRemove]
}
#' @method substValue editenv
#' @rdname substValue
#' @export
substValue.editenv <- function(E,var,value,...){
L <- as.list(E)
L <- substValue.editlist(E)
list2env(L)
}
|
c454637e6cea4a27db3644cac50f8506a5bf0aad | 12ab3213a5868d5718701df22de1ca9494adfcf8 | /man/kn_is_exist.Rd | 102dc9366c7682d20d58df2cec65ddfec2da0cd7 | [] | no_license | takewiki/ksmpkg | 93ede44f273fbf9a5b9c93694956ff637676f313 | 3572ae8c79f3167caf68280ce130a005debf92e7 | refs/heads/master | 2022-04-18T01:13:18.947256 | 2020-04-21T00:21:16 | 2020-04-21T00:21:16 | 257,218,206 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 428 | rd | kn_is_exist.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/knowledgeNode.R
\name{kn_is_exist}
\alias{kn_is_exist}
\title{判断知识点是否存在}
\usage{
kn_is_exist(kn_name, kc_name, conn = conn_rds("nsic"))
}
\arguments{
\item{kn_name}{知识点名称}
\item{kc_name}{知识分类名称}
\item{conn}{连接}
}
\value{
返回值
}
\description{
判断知识点是否存在
}
\examples{
kn_is_exist()
}
|
b7994e8d2c105df8e674d80456378c72613b4471 | 49853e35e13b3701d794b5b784b30a40e83cb2c4 | /plot2.R | 3d304510e4822d526dc83879a28e55efc96c299a | [] | no_license | VivienBH/PlotProject1 | c43316f62c19e8f6008f89fa207b8f1f88d5f89e | 75b852f1d805bdee4899ffb24fe1a02a348fbb98 | refs/heads/master | 2016-09-06T10:25:56.541996 | 2015-05-10T18:23:20 | 2015-05-10T18:23:20 | 35,382,102 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 551 | r | plot2.R | ## Load data, draw graphic and store in file "plot2.png"
## 2015-5-10
## Set the working directory
#setwd("~/Desktop/Coursera/Exploratory Data Analysis")
## Load tidy data - data set store in object named power.consumption
source("getandcleandata.R")
## Draw plot
plot(power.consumption$DateTime, power.consumption$Global_active_power, xlab = " ", ylab = "Global Active Power (kilowatts)", type = "l")
dev.copy(png, file = "plot2.png", width = 480, height = 480) ## Copy my plot to a PNG file
dev.off() ## Don't forget to close the PNG device! |
1f4bddf2a139c74b4af605d00e30845b8f3047eb | cdc8e477a0b8392984ac7a63f9d608a6cc48d8fd | /man/fpopTree.Rd | 581d68e4c9aa187215cc8a9303a757e5f9397cf3 | [] | no_license | vrunge/fpopTree | e192fcc796b9519a8ef1e41f42d5c2c325c19831 | 3b0da013f236330d380179d512b3ba27030684a5 | refs/heads/main | 2023-03-05T14:37:04.733741 | 2021-02-05T17:00:30 | 2021-02-05T17:00:30 | 330,942,638 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,337 | rd | fpopTree.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fpopTree.R
\name{fpopTree}
\alias{fpopTree}
\title{Functional Pruning Optimal Partitioning for data structures in tree}
\usage{
fpopTree(vertex_data, tree, type = "mean", weights = NULL, testMode = FALSE)
}
\arguments{
\item{vertex_data}{vector of data associated to each vertex}
\item{tree}{tree structure encoded in a list}
\item{type}{a string defining the cost model to use: "mean", "variance", "poisson", "exp", "negbin"}
\item{weights}{vector of weights (positive numbers), same size as data}
\item{testMode}{boolean. False by default. Used to debug the code}
}
\value{
a gfpop object = (changepoints, states, forced, parameters, globalCost)
\describe{
\item{\code{changepoints}}{is the vector of changepoints (we give the last element of each segment)}
\item{\code{states}}{is the vector giving the state of each segment}
\item{\code{forced}}{is the vector specifying whether the constraints of the graph are active (=1) or not (=0)}
\item{\code{parameters}}{is the vector of successive parameters of each segment}
\item{\code{globalCost}}{is a number equal to the total loss: the minimal cost for the optimization problem with all penalty values excluded}
}
}
\description{
Functional pruning optimal partitioning with data in a tree structure
}
|
047a97f1468100511dcc67ce92cbb5909e8a288f | 5dda2f287818150aa71603a9f9f42375f6b99c6f | /keepbusy.R | 06d0261d12d63f2f2ea47690a48d7315afd25b38 | [] | no_license | gtritchie/aaa | b6f841c09c14bb5f52894823c6f032b3f41ce59f | 6c6f689a4484ebbd1247082c163ae147bb7024d1 | refs/heads/master | 2023-05-05T14:06:19.955364 | 2023-04-22T19:43:03 | 2023-04-22T19:43:03 | 90,062,045 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 164 | r | keepbusy.R | # random comment
keepbusy <- function() {
a <- 0
while (TRUE) {
Sys.sleep(1)
print(a)
a <- a + 1
Sys.sleep(2)
print(a)
a <- a + 1
}
}
|
1b3b43b0fcc6e01406fbecd76032d7f24eec7039 | fccda946aeab155ab523e683537e0f8cb5ecac8e | /man/projMembership.Rd | 7fb0581718fe5d0feb49ce4c325c4e54d73d0282 | [] | no_license | lingxuez/SOUPR | d2931a89f46b310e5dc8f0bf2d817d0085b6dd1e | 68358c79a40b82174605fdb249e286bd7fc5eee3 | refs/heads/master | 2021-03-22T05:04:26.405694 | 2018-10-17T02:35:32 | 2018-10-17T02:35:32 | 122,891,238 | 8 | 7 | null | null | null | null | UTF-8 | R | false | true | 336 | rd | projMembership.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/SOUP.R
\name{projMembership}
\alias{projMembership}
\title{Clean up membership matrix}
\usage{
projMembership(theta)
}
\arguments{
\item{theta}{The estimated raw theta}
}
\value{
The cleaned-up membership matrix.
}
\description{
Clean up membership matrix
}
|
e0427078ac613e588959c82942dd4115d2c06754 | 3191f9ae5e1a31cdb3eef7866ea6300ad276db1e | /w2_project.R | 4374f313c050a4a951b552053b537dbc96f19e1a | [] | no_license | adityatibrewal-2803/RepData_PeerAssessment1 | fa2c5490fcf374078d9be82ad292912d09c1cee3 | 64e8b82e9b85d9135e28d3410b2db4a4c33a2c36 | refs/heads/master | 2021-08-07T09:29:20.600324 | 2017-11-08T00:17:10 | 2017-11-08T00:17:10 | 109,666,134 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,401 | r | w2_project.R | # Setting the working directory
setwd("C:\\00-AdityaTibrewal\\OneDrive\\Study Mtrl\\Data Science\\Coursera\\Reproducible Analysis\\W2\\Peer Project\\RepData_PeerAssessment1")
# Loading libraries
library(dplyr)
library(ggplot2)
# File reading and pre-processing
activity = read.csv(unzip("activity.zip"), na.strings = "NA")
activity$date = as.Date(activity$date)
# Ask 1 - Steps per day
totalStepsDayWise = group_by(activity, date) %>%
summarise(totalSteps = sum(steps))
meanSteps = mean(totalStepsDayWise$totalSteps, na.rm = T)
# Ask 2 - Steps per day (histogram)
hist(totalStepsDayWise$totalSteps, col = "blue", xlab = "Total # of Steps",
main = "Distribution of Total Steps")
# Ask 3 - Mean and median of the total number of steps taken per day
medianSteps = median(totalStepsDayWise$totalSteps, na.rm = T)
meanSteps = mean(totalStepsDayWise$totalSteps, na.rm = T)
print(paste('Median steps per day: ', medianSteps))
print(paste('Mean steps per day: ', round(meanSteps, 0)))
# Ask 4 - Time series plot of the 5-minute interval (x-axis) and
# the average number of steps taken, averaged across all days (y-axis)
meanStepsMinWise = group_by(activity, interval) %>%
summarise(meanSteps = mean(steps, na.rm = T))
plot(x = meanStepsMinWise$interval, y = meanStepsMinWise$meanSteps,
type = "l", xlab = "5-minute intervals",
ylab = "Mean of steps taken across days",
main = "Average number of steps taken by 5-minute intervals")
# Ask 5 - Interval with max average steps
meanStepsMinWise[meanStepsMinWise$meanSteps == max(meanStepsMinWise$meanSteps), 1]
# Ask 6 - Count of missing values
nrow(activity[is.na(activity$steps), ])
# Ask 7 and 8 - Imputing missing values for steps
# Each missing value to be replaced with the median of total steps taken
# in that interval
medianStepsMinWise = group_by(activity, interval) %>%
summarise(meanSteps = median(steps, na.rm = T))
activityCompleted = activity
# Going throuh the dataset - finding rows with missing values -
# finding median steps for those intervals - imputing in the dataset
for (i in 1:nrow(activityCompleted)) {
if (is.na(activityCompleted[i, 1])) {
activityCompleted[i, 1] =
medianStepsMinWise[medianStepsMinWise$interval == activityCompleted[i, 3], 2]
}
}
# Ask 8 - Histogram of imputed dataset
totalStepsDayWiseCompleted = group_by(activityCompleted, date) %>%
summarise(totalSteps = sum(steps))
par(mfrow = c(1, 2))
hist(totalStepsDayWise$totalSteps, col = "blue", xlab = "Total # of Steps",
main = "Distribution of Total Steps")
hist(totalStepsDayWiseCompleted$totalSteps, col = "blue", xlab = "Total # of Steps",
main = "Distribution of Total Steps (Imputed)")
dev.off()
# Mean and median of total steps
meanStepsCompleted = mean(totalStepsDayWiseCompleted$totalSteps)
medianStepsCompleted = median(totalStepsDayWiseCompleted$totalSteps)
print(paste('Median steps per day for the imputed data set: ', medianStepsCompleted))
print(paste('Mean steps per day for the imputed data set: ', round(meanStepsCompleted, 0)))
# Comparison of mean and median pre and post imputation
boxplot(totalStepsDayWise$totalSteps, totalStepsDayWiseCompleted$totalSteps,
names = c("Original Data", "Imputed Data"), col = "grey")
legend(1, meanSteps, c("Mean Original Data"), cex = 0.7)
legend(2, meanStepsCompleted, c("Mean Imputed Data"), cex = 0.7)
abline(h = meanSteps, col = "red")
abline(h = meanStepsCompleted, col = "blue")
# Ask 9 - Splitting into Weekday/ Weekend
getDayType = function (dayVal) {
dayType = "weekday"
wkEnd = c("Saturday", "Sunday")
if (weekdays(dayVal) %in% wkEnd) {
dayType = "weekend"
} else {
dayType = "weekday"
}
dayType
}
activityDayType = activityCompleted
activityDayType$dayType = sapply(activityDayType$date, getDayType)
activityDayType$dayType = as.factor(activityDayType$dayType)
# Ask 10 - Average steps at 5-minute intervals split across weekday ~ weekend
activityDayTypeAvg = group_by(activityDayType, interval, dayType) %>%
summarise(avgSteps = mean(steps, na.rm = T))
ggplot(activityDayTypeAvg, aes(x = interval, y = avgSteps)) +
facet_wrap(~ dayType, nrow = 2) +
geom_line(col = "blue") +
labs(x = "Interval in minutes", y = "Average steps",
title = "Average steps by weekday/ weekend") |
337556ca29a3c2bc8b7465bd056ec8ee1e3499d3 | 93d1fcc7758e5e99927be0529fb9d681db71e70c | /R/ma_d_bb.R | 0ca847b8a13374d508766d5da181e52a61b95cd6 | [] | no_license | psychmeta/psychmeta | ef4319169102b43fd87caacd9881014762939e33 | b790fac3f2a4da43ee743d06de51b7005214e279 | refs/heads/master | 2023-08-17T20:42:48.778862 | 2023-08-14T01:22:19 | 2023-08-14T01:22:19 | 100,509,679 | 37 | 15 | null | 2023-08-14T01:06:53 | 2017-08-16T16:23:28 | R | UTF-8 | R | false | false | 15,306 | r | ma_d_bb.R | #' @rdname ma_d
#' @export
#' @import dplyr
#' @aliases ma_d_barebones
ma_d_bb <- ma_d_barebones <- function(d, n1, n2 = rep(NA, length(d)), n_adj = NULL, sample_id = NULL, citekey = NULL,
wt_type = c("n_effective", "sample_size", "inv_var_mean", "inv_var_sample",
"DL", "HE", "HS", "SJ", "ML", "REML", "EB", "PM"),
correct_bias = TRUE,
moderators = NULL, cat_moderators = TRUE,
moderator_type = c("simple", "hierarchical", "none"),
data = NULL, control = control_psychmeta(), ...){
.dplyr.show_progress <- options()$dplyr.show_progress
.psychmeta.show_progress <- psychmeta.show_progress <- options()$psychmeta.show_progress
if(is.null(psychmeta.show_progress)) psychmeta.show_progress <- TRUE
options(dplyr.show_progress = psychmeta.show_progress)
warn_obj1 <- record_warnings()
call <- match.call()
wt_type <- match.arg(wt_type, choices = c("n_effective", "sample_size", "inv_var_mean", "inv_var_sample",
"DL", "HE", "HS", "SJ", "ML", "REML", "EB", "PM"))
moderator_type <- match.arg(moderator_type, choices = c("simple", "hierarchical", "none"))
control <- control_psychmeta(.psychmeta_ellipse_args = list(...),
.control_psychmeta_arg = control)
error_type <- control$error_type
conf_level <- control$conf_level
cred_level <- control$cred_level
conf_method <- control$conf_method
cred_method <- control$cred_method
var_unbiased <- control$var_unbiased
hs_override <- control$hs_override
if(hs_override){
wt_type <- "sample_size"
error_type <- "mean"
correct_bias <- TRUE
conf_method <- cred_method <- "norm"
var_unbiased <- FALSE
}
correct_bias <- scalar_arg_warning(arg = correct_bias, arg_name = "correct_bias")
moderator_type <- scalar_arg_warning(arg = moderator_type, arg_name = "moderator_type")
wt_type <- scalar_arg_warning(arg = wt_type, arg_name = "wt_type")
error_type <- scalar_arg_warning(arg = error_type, arg_name = "error_type")
conf_method <- scalar_arg_warning(arg = conf_method, arg_name = "conf_method")
cred_method <- scalar_arg_warning(arg = cred_method, arg_name = "cred_method")
conf_level <- interval_warning(interval = conf_level, interval_name = "conf_level", default = .95)
cred_level <- interval_warning(interval = cred_level, interval_name = "cred_level", default = .8)
formal_args <- formals(ma_d_bb)
formal_args[["..."]] <- NULL
for(i in names(formal_args)) if(i %in% names(call)) formal_args[[i]] <- NULL
call_full <- as.call(append(as.list(call), formal_args))
if(!is.null(data)){
data <- as.data.frame(data, stringsAsFactors = FALSE)
d <- match_variables(call = call_full[[match("d", names(call_full))]], arg = d, arg_name = "d", data = data)
n1 <- match_variables(call = call_full[[match("n1", names(call_full))]], arg = n1, arg_name = "n1", data = data)
n2 <- match_variables(call = call_full[[match("n2", names(call_full))]], arg = n2, arg_name = "n2", data = data)
n_adj <- match_variables(call = call_full[[match("n_adj", names(call_full))]], arg = n_adj, arg_name = "n_adj", data = data)
if(deparse(substitute(sample_id))[1] != "NULL")
sample_id <- match_variables(call = call_full[[match("sample_id", names(call_full))]], arg = sample_id, arg_name = "sample_id", data = data)
if(deparse(substitute(citekey))[1] != "NULL")
citekey <- match_variables(call = call_full[[match("citekey", names(call_full))]], arg = citekey, arg_name = "citekey", data = data)
if(deparse(substitute(moderators))[1] != "NULL")
moderators <- match_variables_df({{moderators}}, data = as_tibble(data, .name_repair = "minimal"), name = deparse(substitute(moderators)))
}
if(!is.null(moderators)){
if(is.null(dim(moderators))){
moderators <- as.data.frame(moderators, stringsAsFactors = FALSE)
colnames(moderators) <- "Moderator"
}
moderator_names <- list(all = colnames(moderators),
cat = colnames(moderators)[cat_moderators],
noncat = colnames(moderators)[!cat_moderators])
moderator_names <- lapply(moderator_names, function(x) if(length(x) == 0){NULL}else{x})
if(any(cat_moderators)){
moderator_levels <- lapply(as_tibble(moderators, .name_repair = "minimal")[,cat_moderators], function(x){
lvls <- levels(x)
if(is.null(lvls)) lvls <- levels(factor(x))
lvls
})
names(moderator_levels) <- colnames(as_tibble(moderators, .name_repair = "minimal")[,cat_moderators])
}else{
moderator_levels <- NULL
}
moderators <- as.data.frame(moderators, stringsAsFactors = FALSE)
}else{
moderator_names <- list(all = NULL,
cat = NULL,
noncat = NULL)
moderator_levels <- NULL
}
additional_args <- list(...)
as_worker <- additional_args$as_worker
if(is.null(as_worker)) as_worker <- FALSE
inputs <- list(wt_type = wt_type, error_type = error_type, correct_bias = correct_bias,
conf_level = conf_level, cred_level = cred_level, conf_method = conf_method, cred_method = cred_method,
var_unbiased = var_unbiased)
es_data <- data.frame(d = d, n1 = n1, n2 = n2, stringsAsFactors = FALSE)
es_data$n_adj <- n_adj
if(is.null(sample_id)) sample_id <- paste0("Sample #", 1:nrow(es_data))
if(!is.null(citekey)) es_data <- cbind(citekey = citekey, es_data) %>% mutate(citekey = as.character(citekey))
es_data <- cbind(sample_id = sample_id, es_data) %>% mutate(sample_id = as.character(sample_id))
out <- ma_wrapper(es_data = es_data, es_type = "d", ma_type = "bb", ma_fun = .ma_d_bb,
moderator_matrix = moderators, moderator_type = moderator_type, cat_moderators = cat_moderators,
ma_arg_list = list(error_type = error_type, correct_bias = correct_bias, conf_level = conf_level, cred_level = cred_level,
conf_method = conf_method, cred_method = cred_method, var_unbiased = var_unbiased, wt_type = wt_type),
presorted_data = additional_args$presorted_data, analysis_id_variables = additional_args$analysis_id_variables,
moderator_levels = moderator_levels, moderator_names = moderator_names)
if(!as_worker){
out <- bind_cols(analysis_id = 1:nrow(out), out)
attributes(out) <- append(attributes(out), list(call_history = list(call),
inputs = inputs,
ma_methods = "bb",
ma_metric = "d_as_d",
default_print = "bb",
warnings = clean_warning(warn_obj1 = warn_obj1, warn_obj2 = record_warnings()),
fyi = record_fyis(neg_var_res = sum(unlist(map(out$meta_tables, function(x) x$barebones$var_res < 0)), na.rm = TRUE))))
out <- namelists.ma_psychmeta(ma_obj = out)
}
class(out) <- c("ma_psychmeta", class(out))
options(psychmeta.show_progress = .psychmeta.show_progress)
options(dplyr.show_progress = .dplyr.show_progress)
return(out)
}
#' Internal function for computing bare-bones meta-analyses of d values
#'
#' @param data Data frame of bare-bones information.
#' @param run_lean If TRUE, the meta-analysis will not generate an escalc object. Meant to speed up bootstrap analyses that do not require supplmental output.
#' @param ma_arg_list List of arguments to be used in the meta-analysis function.
#'
#' @return A list object containing the results of bare-bones meta-analyses of d values.
#'
#' @keywords internal
.ma_d_bb <- function(data, ma_arg_list, run_lean = FALSE){
sample_id <- data$sample_id
citekey <- data$citekey
d <- data$d
n1 <- data$n1
n2 <- data$n2
n_adj <- data$n_adj
conf_level <- ma_arg_list$conf_level
cred_level <- ma_arg_list$cred_level
correct_bias <- ma_arg_list$correct_bias
wt_type <- ma_arg_list$wt_type
error_type <- ma_arg_list$error_type
conf_method <- ma_arg_list$conf_method
cred_method <- ma_arg_list$cred_method
var_unbiased <- ma_arg_list$var_unbiased
## Determine how to use sample-size information: Use total sample size or subgroup sample sizes?
if(is.null(n2)) n2 <- rep(NA, length(n1))
n_vec <- n1
use_n1_only <- is.na(n2)
n_vec[!use_n1_only] <- n1[!use_n1_only] + n2[!use_n1_only]
if(is.null(n_adj)){
n_adj <- n_vec
}else{
n_adj[is.na(n_adj)] <- n_vec[is.na(n_adj)]
}
n1[n_vec != n_adj] <- n_adj[n_vec != n_adj]
use_n1_only[n_vec != n_adj] <- TRUE
n1_i <- n1
n2_i <- n2
n1_i[use_n1_only] <- n2_i[use_n1_only] <- n_adj[use_n1_only] / 2
.d <- d
if(correct_bias) d <- correct_d_bias(d = d, n = n_vec)
wt_source <- check_wt_type(wt_type = wt_type)
if(wt_source == "psychmeta"){
if(wt_type == "n_effective") wt_vec <- n1_i * n2_i / (n1_i + n2_i)
if(wt_type == "sample_size") wt_vec <- n_adj
if(wt_type == "inv_var_mean") wt_vec <- 1 / var_error_d(d = rep(0, length(d)), n1 = n1_i, n2 = n2_i, correct_bias = FALSE)
if(wt_type == "inv_var_sample") wt_vec <- 1 / var_error_d(d = d, n1 = n1_i, n2 = n2_i, correct_bias = FALSE)
}
if(wt_source == "metafor"){
if(error_type == "mean"){
var_e_vec <- var_error_d(d = 0, n1 = n1_i, n2 = n2_i, correct_bias = FALSE)
var_e_vec <- var_error_d(d = wt_mean(x = d, wt = 1 / var_e_vec), n1 = n1_i, n2 = n2_i, correct_bias = FALSE)
}
if(error_type == "sample") var_e_vec <- var_error_d(d = d, n1 = n1_i, n2 = n2_i, correct_bias = FALSE)
wt_vec <- as.numeric(metafor::weights.rma.uni(metafor::rma(yi = d,
vi = var_e_vec,
control = list(maxiter = 1000, stepadj = .5), method = wt_type)))
}
## Estimate the weighted mean d value
mean_d <- wt_mean(x = d, wt = wt_vec)
## Estimate sampling error
if(error_type == "mean") var_e_vec <- var_error_d(d = rep(mean_d, length(d)), n1 = n1_i, n2 = n2_i, correct_bias = FALSE)
if(error_type == "sample") var_e_vec <- var_error_d(d = d, n1 = n1_i, n2 = n2_i, correct_bias = FALSE)
var_e <- wt_mean(x = var_e_vec, wt = wt_vec)
## Create escalc object
if(run_lean){
escalc_obj <- NULL
}else{
escalc_obj <- data.frame(yi = d, vi = var_e_vec,
d = .d,
n1 = n1, n2 = n2, n = n_vec, n_adj = n_adj,
n1_split = n1_i, n2_split = n2_i, stringsAsFactors = FALSE)
escalc_obj$pi <- data$pi
if(is.null(data$pa)){
escalc_obj$pi <- n1_i / (n1_i + n2_i)
}else{
escalc_obj$pi <- data$pi
}
if(is.null(data$pa)){
escalc_obj$pa <- .5
}else{
escalc_obj$pa <- data$pa
}
escalc_obj$pa <- data$pa
escalc_obj$weight <- wt_vec
escalc_obj$residual <- d - mean_d
if(!is.null(citekey)) escalc_obj <- cbind(citekey = citekey, escalc_obj)
if(!is.null(sample_id)) escalc_obj <- cbind(sample_id = sample_id, escalc_obj)
if(any(colnames(data) == "original_order")) escalc_obj <- cbind(original_order = data$original_order, escalc_obj)
class(escalc_obj) <- c("escalc", "data.frame")
}
## Estimate the weighted variance of d values
var_d <- wt_var(x = d, wt = wt_vec, unbiased = var_unbiased)
## Compute residual variance
var_res <- var_d - var_e
sd_d <- var_d^.5
sd_e <- var_e^.5
sd_res <- var_res^.5
sd_res[is.na(sd_res)] <- 0
## Compute cumulative sample size and cumulative adjusted sample size
N <- sum(n_vec[!is.na(wt_vec) & !is.na(d)])
k <- sum(!is.na(wt_vec) & !is.na(d))
## Compute uncertainty intervals
if(k == 1){
var_d <- sd_d <- NA
var_res <- sd_res <- NA
se_d <- sd_e
ci <- confidence(mean = mean_d, sd = sd_e, k = 1, conf_level = conf_level, conf_method = "norm")
}else{
se_d <- sd_d / sqrt(k)
ci <- confidence(mean = mean_d, sd = var_d^.5, k = k, conf_level = conf_level, conf_method = conf_method)
}
cr <- credibility(mean = mean_d, sd = sd_res, cred_level = cred_level, k = k, cred_method = cred_method)
ci <- setNames(c(ci), colnames(ci))
cr <- setNames(c(cr), colnames(cr))
barebones <- as.data.frame(t(c(k = k,
N = N,
mean_d = mean_d,
var_d = var_d,
var_e = var_e,
var_res = var_res,
sd_d = var_d^.5,
se_d = se_d,
sd_e = var_e^.5,
sd_res = sd_res,
ci, cr)), stringsAsFactors = FALSE)
class(barebones) <- c("ma_table", class(barebones))
attributes(barebones) <- append(attributes(barebones), list(ma_type = "d_bb"))
## Compile results
list(meta = list(barebones = barebones,
individual_correction = NULL,
artifact_distribution = NULL),
escalc = list(barebones = escalc_obj,
individual_correction = NULL,
artifact_distribution = NULL))
}
#' Internal function for computing bootstrapped bare-bones meta-analyses of d values
#'
#' @param data Data frame of bare-bones information.
#' @param i Vector of indexes to select studies from 'data'.
#' @param ma_arg_list List of arguments to be passed to the meta-analysis function.
#'
#' @return A list object containing the results of bootstrapped bare-bones meta-analyses of d values.
#'
#' @keywords internal
.ma_d_bb_boot <- function(data, i, ma_arg_list){
data <- data[i,]
out <- .ma_d_bb(data = data, ma_arg_list = ma_arg_list, run_lean = TRUE)
unlist(out$meta$barebones)
}
|
a49562e91f1f4a6b55ac08a04d966951418d6210 | dc5db82837fc638aee83c3f9d28f9b10b696e977 | /ComplementaryScripts/AnalysisPipeline_mainScript.R | d7db2874821d0fc4d21e289eb7fca95bd7e98c89 | [
"MIT"
] | permissive | Philipgdv/OrthOmics | 043c9bfad7687a63efdb87d3ace63718339abf00 | 8d899531a6278dff53d82d5fe631e1e83cf3b2d2 | refs/heads/master | 2022-04-27T14:11:52.133257 | 2020-03-04T16:43:03 | 2020-03-04T16:43:03 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,670 | r | AnalysisPipeline_mainScript.R | #AnalysisPipeline_mainScript
#
#Function that performs differential expression analysis on proteomics absolute and relative) nd RNAseq datasets
#for the datasets in the CHASSY project (fermentations for S. cerevisiae, K. marxianus and Y. lipolytica exposed
#reference, high temperature, low pH and osmotic stress conditions).
#
#The DE hits for all organisms and conditions are mapped to a list of 1:1:1 orthologous genes (from orthoFinder)
#to search for evolutionary conserved stress-adaptation responses at the transcript and protein levels.
#
#An integrated table is also generated in which information of foldchanges at the transcript level together with
#absolute proteomics levels [umol/g protein], Molecular weight of proteins, Sequence lenght and GO terms information
#is put together for each organism.
#
#This script will facilitate all analyses described above, the user only needs to 1) clone the repo and
#2) change the directory name on Line 56 to reflect the location of your cloned directory
#
# Last modified: Ivan Domenzain. 2019-11-27
#
install.packages('VennDiagram')
install.packages("rlist")
install.packages("devtools")
install.packages("edgeR")
install.packages("limma")
install.packages("tidyverse")
library("rlist")
library(VennDiagram)
library(devtools)
library(ggplot2)
library(limma)
library(edgeR)
library(tidyverse) # Collection of useful R-packages
library(RColorBrewer)
library(ggbiplot)
library(ggplot2)
#====================================DEFINE VARIABLES =======================================
#Provide organism code [sce,kma,yli]
organisms <- c('sce','kma','yli')
#Indicate the dataset that should be used foer DE analysis
dataSource <- 'XIC' #1 for XIC or NSAF, 2 for Scounts or iBAQ and 3 for merged datasets
#Define DE thresholds
pVal <- 0.01
logPval <- abs(log10(pVal))
log2FC <- 1
adjustedP <- TRUE
#Should the initial dataset be normalized by MW of proteins
normByMW <- TRUE
#Filter type for determination of present and non-noisy proteins in the dataset (TRUE if filter
#criteria should be applied to all conditions, FALSE if just the reference is desired to be
#filtered)
stringent <- TRUE
#Normalization method for DE analysis
normMethod <- 'TMM'
#Add the directory info where you cloned the OrthOmics Repository (the main directory), an example is shown
repoPath <- '/Users/ivand/Documents/GitHub/OrthOmics'
#Internal functions path
scriptsPath <- paste(repoPath,'/ComplementaryScripts',sep='')
#================== Data analysis pipeline ====================================
for (organism in organisms){
cat(paste("Analyzing data for: ", organism,'\n',sep=""))
#================== 1. Analyze Transcriptomics Data ====================================
setwd(scriptsPath)
source('RNAseqAnalysis.R')
cat(paste("Analyzing RNAseq data for: ", organism,'\n',sep=""))
RNAseqAnalysis(organism,normMethod,0,logPval,log2FC,adjustedP,repoPath)
#================== 2. Analyze proteomics Data ====================================
setwd(scriptsPath)
source('proteomics_Analysis.R')
cat(paste("Analyzing proteomics data for: ", organism,'\n',sep=""))
proteomics_Analysis(organism,dataSource,normByMW,stringent,normMethod,logPval,log2FC,adjustedP,repoPath)
#================== 3. Create integratative Omics table ==================
setwd(scriptsPath)
source('createIntegratedTable.R')
createIntegratedTable(organism,pVal,log2FC,adjustedP,FALSE,repoPath)
cat("\014")
}
#================== 3. Map DE genes to 1:1:1 orthologous genes list ==================
setwd(scriptsPath)
source('mapDEgenesToOG.R')
mapDEgenesToOG(c('sce','kma','yli'),pVal,log2FC,adjustedP,'RNA',repoPath)
|
5365290b1eb5d714b97c32606feed950c7b56ce7 | 2d32eae4ce79f0764e7ee7b00a011bf89e53c3fa | /vignettes/binary_models.R | 089213098fad9c3cbe4231a70bf955c3cb2aab25 | [] | no_license | Ax3man/phylopath | 3285615b8fa6ebaca9a1e688c901df43532864a5 | edec26041a3983915bb6d02b361f3fc147500745 | refs/heads/master | 2023-04-13T03:22:07.610911 | 2023-04-04T17:56:49 | 2023-04-04T17:56:49 | 65,209,538 | 12 | 2 | null | null | null | null | UTF-8 | R | false | false | 1,216 | r | binary_models.R | ## ----define_models, fig.align='center', fig.width=10, fig.height=8, out.height="600px", fig.dpi = 600----
library(phylopath)
models <- define_model_set(
A = c(C~M+D),
B = c(C~D),
C = c(C~D, P~M),
D = c(C~D, M~P, G~P),
E = c(C~D, P~M, G~P),
F = c(C~D, P~M+G),
G = c(C~D, M~P, P~G),
H = c(C~D, M~P),
I = c(C~D, M~M, G~P),
J = c(M~P, G~D),
K = c(P~M, G~D),
L = c(C~M+D, P~M+G),
.common = c(C~P+G)
)
plot_model_set(models, algorithm = 'kk')
## ----fit models----------------------------------------------------------
(cichlids_results <- phylo_path(models, cichlids, cichlids_tree))
## ----get_summary---------------------------------------------------------
(s <- summary(cichlids_results))
plot(s)
## ------------------------------------------------------------------------
best_cichlids <- best(cichlids_results)
## ------------------------------------------------------------------------
best_cichlids
## ------------------------------------------------------------------------
coef_plot(best_cichlids, error_bar = "se", reverse_order = TRUE) + ggplot2::coord_flip()
## ---- fig.align='center', fig.width=8, fig.height=4, out.width="600px", fig.dpi = 300----
plot(best_cichlids)
|
38a1d0f4c2df205eff471ba635383b28735c8237 | 52eb5c068b2f73adb9a2aedc619a7dac58279bcb | /cachematrix.R | fe4a18997dbb86464f7e5837cd6c2c0aa5c1f140 | [] | no_license | bdoherty1420/ProgrammingAssignment2 | ba445511676c69fa0f45e81e7b2fdfc159d51d71 | 0cf3fd2ebec4f1a51256518b45f308c093fe34ca | refs/heads/master | 2021-01-17T20:29:46.428795 | 2015-01-25T11:04:01 | 2015-01-25T11:04:01 | 29,810,490 | 0 | 0 | null | 2015-01-25T10:30:25 | 2015-01-25T10:30:25 | null | UTF-8 | R | false | false | 1,340 | r | cachematrix.R | ## These functions have the ability to cache the inversion of a matrix in order to
## save time running this computation for large matrices
## This function creates as special "matrix" that contains a function to:
## 1. set the value of the matrix 2. get the value of the matrix
## 3. set the value of the inverted matrix 4. get the value of the inverted
## matrix
makeCacheMatrix <- function(x = matrix()) {
i <- NULL
set <- function(y) {
x <<- y
i <<- NULL
}
get <- function() x
setinversion <- function(solve) i <<- solve
getinversion <- function() i
list(set = set, get = get,
setinversion = setinversion,
getinversion = getinversion)
}
## This function calculates the inversion of the special "matrix" created
## with the above function. It checks to see if the inversion has been
## calculated, in which case it gets the cached calculation, otherwise
## it calculates and set the inverted matrix
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
i <- x$getinversion()
if(!is.null(i)) {
message("getting cached data")
return(i)
}
data <- x$get()
i <- solve(data, ...)
x$setinversion(i)
i
}
|
8971b776c0aa656280ac3b376fb797baef4ce35c | 20cb60e2bc3d8d8b39b179aecdb1f33797e797ee | /week4/classification.R | b43ba7ad78bf988c36c222a07b2aa644aa165606 | [] | no_license | esm2000/coursework | 2b5aebc55b1ace49fa541c5993ead9202871a648 | ee33e0ca1f1a5a2c80883f48846716204eca7ed3 | refs/heads/master | 2020-06-03T03:14:42.404691 | 2019-07-07T05:56:38 | 2019-07-07T05:56:38 | 191,412,586 | 0 | 0 | null | 2019-06-11T16:46:01 | 2019-06-11T16:46:00 | null | UTF-8 | R | false | false | 3,282 | r | classification.R | library(tidyverse)
library(scales)
library(ElemStatLearn) # spam dataset
library(e1071) # Naive Bayes implementation
library(ROCR) # evaluation metrics
theme_set(theme_bw())
str(spam)
summary(spam)
set.seed(42) # reproducible results
# Clever way to split the data into training and testing data
# Validation being ignored for right now
ndx <- sample(nrow(spam), floor(nrow(spam) * 0.9))
train <- spam[ndx,]
test <- spam[-ndx,]
xTrain <- train[,-58]
yTrain <- train$spam
xTest <- test[,-58]
yTest <- test$spam
# -------------------------------------------------------------
# Naive Bayes no smoothing
# -------------------------------------------------------------
model <- naiveBayes(xTrain, yTrain)
summary(model)
# Length Class Mode
# apriori 2 table numeric
# tables 57 -none- list
# levels 2 -none- character
# isnumeric 57 -none- logical
# call 3 -none- call
df <- data.frame(actual = yTest, pred = predict(model,xTest))
head(df)
# actual pred
# 1 spam spam
# 2 spam spam
# 3 spam spam
# 4 spam spam
# 5 spam spam
# 6 spam spam
table(df)
# accuracy: fraction of correct classifications
df %>%
summarize(acc = mean(pred == actual))
# precision; fraction of positive predictions that are actually true
df %>%
filter(pred == 'spam') %>%
summarize(prec = mean(actual == 'spam'))
# recall: fraction of true examples that we predicted to
# be positive
# aka true positive rate, sensitivity
df %>%
filter(actual == 'spam') %>%
summarize(recall = mean(pred == 'spam'))
# false positive rate: fraction of false examples that we
# predicted to be positive
df %>%
filter(actual == 'email') %>%
summarize(fpr = mean(pred == 'spam'))
# plot histogram of predictied possibilities
# note overconfident predicitons
probs <- data.frame(predict(model, xTest, type = 'raw'))
ggplot(probs, aes(x = spam)) +
geom_histogram(binwidth = 0.01) +
scale_x_continuous(label = percent) +
xlab("Predicted probability of spam") +
ylab('Number of examples')
# check calibration by looking at how often predicted probabilties
# match actual frequencies
# This is most easily done by binning examples by their predicted
# probability of being spam and then counting how often those
# examples actually turn out to be spam
data.frame(predicted=probs[, "spam"], actual=yTest) %>%
group_by(predicted=round(predicted*10)/10) %>%
summarize(num=n(), actual=mean(actual == "spam")) %>%
ggplot(data=., aes(x=predicted, y=actual, size=num)) +
geom_point() +
geom_abline(linetype=2) +
scale_x_continuous(labels=percent, lim=c(0,1)) +
scale_y_continuous(labels=percent, lim=c(0,1)) +
xlab('Predicted probability of spam') +
ylab('Percent that are actually spam')
# we can use the ROCR package to make a plot of the receiver
# operator characteristic (ROC) curve and compute the area
# under the curve (AUC)
# The ROC curve plots the true positive rate (also known as
# recall, sensitivity, or the probability of a false alarm)
# as we chane the threshold on the probability for predicting spam
# create a ROCR object
pred <- prediction(probs[, "spam"], yTest)
# plot ROC curve
perf_nb <- performance(pred, measure='tpr', x.measure='fpr')
plot(perf_nb)
performance(pred, 'auc') |
60f25aedf7b93a9873a429f924b288f5a3944b83 | 8c76eab8a05f535586d342c2d08ba9edd5261933 | /RyS_NBA.R | a8b22eeaa3c630065cbafd11b4686e51aaadbd38 | [] | no_license | martaruedas/practicas_prediccionMR | 295744b4a289a9071f694c50b809076268a467d1 | 208708fe21d28dbc451b77a5132e485150d0cd86 | refs/heads/main | 2023-01-14T17:32:25.302773 | 2020-11-23T21:54:11 | 2020-11-23T21:54:11 | 308,153,193 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 10,849 | r | RyS_NBA.R |
#---------------------------------------------------------------------------------------------------
#------------------------------------ REGRESIÓN Y SALARIOS NBA -------------------------------------
#------------------------------------- PREDICCIÓN (2020/2021) --------------------------------------
#---- 28/10/2020 -------------------------------------------------------- Marta Ruedas Burgos ------
#---------------------------------------------------------------------------------------------------
#### NBA-Predicción ####
#----------------------------------------------------------------------------------------------------
# OBJETIVO: determinar el mejor modelo para predecir el salario de los jugadores de la NBA.
#----------------------------------------------------------------------------------------------------
# Import dataset, nba.csv
library(readr)
library(car) # normalidad nba
library(tidyr) # modelo lineal
library(tidyverse) # modelo lineal
nba <- read_csv("nba.csv")
View(nba)
#Tabla de la NBA compuesta por 485 jugadores y sus respectivas valoraciones.
# Presentamos un análisis descriptivo.
# Las variables que se nos presentan son categóricas y cuantitativas.
# La columna "Salary" nos indica el Salario, que es una variable dependiente y es la columna donde nos vamos a enfocar durante este análisis.
#----------------------------------------------------------------------------------------------------
# Modelo de regresión
#----------------------------------------------------------------------------------------------------
vc_nba <- nba %>% select(Salary, NBA_DraftNumber, Age, G, MP, PER) # tabla de las variable cuantitativas
# He creado una tabla llamada vc_nba con las variables cuantitativas más relevantes para el análisis.
#----------------------------------------------------------------------------------------------------
# Modelo lineal
#----------------------------------------------------------------------------------------------------
regresion_nba <- lm(Salary ~., data = vc_nba)
summary(regresion_nba)
# Residuals:
# Min 1Q Median 3Q Max
# -15582460 -3242334 -616497 2478016 23586638
# He hallado una media de cada individuo con la regresión
# El P valor indica que si es menor al 5 % significa que es una variable relevante dentro del modelo.
# Y si es mayor no resulta relevante, usando un nivel de significación del 5%.
coefficients(regresion_nba)
#(Intercept) Age G MP PER `TS%` `3PAr` FTr
#-1595886.922 513255.761 -162996.127 5920.031 -385430.514 -3421384.245 -5725399.371 -506844.390
# `ORB%` `DRB%` `TRB%` `AST%` `STL%` `BLK%` `TOV%` `USG%`
#-1271021.098 -982859.702 2344549.530 -29704.936 -221468.727 198202.813 -10270.630 176357.159
# OWS DWS WS `WS/48` OBPM DBPM BPM VORP
#-2871579.939 -3075987.692 3457772.884 -4061192.205 3026489.950 2284450.996 -2144170.837 433149.376
# El ejemplo de la edad, cuanto mas años tenga el jugador va a cobrar 513255 dolares.
# El ejemplo de los partidos, cuanto más partidos juegue va a cobrar menos.
#----------------------------------------------------------------------------------------------------
# Predicción
#----------------------------------------------------------------------------------------------------
# Los modelos de regresión tiene de objetivos predecir la variable dependiente.
prediccion_nba <- predict(regresion_nba, newdata = vc_nba)
summary(prediccion_nba)
# Observamos
# Min. 1st Qu. Median Mean 3rd Qu. Max. NA's
# -3355000 2440174 6103239 6660622 10070237 31091979 2
# El mínimo es de -3.355.000 y el máximo de 31.091.979.
#----------------------------------------------------------------------------------------------------
# Modelo de predicción
# Ejemplo de 10 jugadores aleatorios
#----------------------------------------------------------------------------------------------------
# Primero quiero analizar un jugador, he escogido a LebronJames.
# Primer ejemplo LebronJames
# Primero creamos la función tipo salario para la tabla nba, la he llamado slary_nba_prediccion
# Segundo creamos la predicción de dicho modelo, llamada prediccion_nba
salary_nba_prediccion <- function(model, NBA_DraftNumber, Age, G, MP, PER){
prediccion_nba <- predict(model, data.frame( NBA_DraftNumber, Age, G, MP, PER))
}
SalarioLeBronJames<- salary_nba_prediccion(regresion_nba, 1,
33,
78,
2898,
28.5)
SalarioLeBronJames
# Tercero cuadramos los valores asociados a LebronJames para que nos salga un resultado indicándonos el tipo de salario según la predicción.
# El salario obtenido ha sido 22.978.838 millones de dolares, el salario de LeBron James aproximadamente.
#----------------------------------------------------------------------------------------------------
# Ejemplo de 10 jugadores aleatorios
#----------------------------------------------------------------------------------------------------
# Set.seed(1234)
set.seed(1234) # si ejecutamos set.seed(1234) nunca varian, siempre nos daría el mismo resultado aleatorio.
# Organizamos los diez jugadores aleatorios con su nombre, lo llamamos tabla_aleatoria_10jugadores
tabla_aleatoria_10jugadores <- nba %>% select(Player, Salary, NBA_DraftNumber, Age, G, MP, PER)
n <- 10
muestra_diez <- sample(1:nrow(tabla_aleatoria_10jugadores), size = n, replace = FALSE)
datos_muestra_diez <- tabla_aleatoria_10jugadores[muestra_diez, ]
datos_muestra_diez
# # A tibble: 10 x 7
# Player Salary NBA_DraftNumber Age G MP PER
# <chr> <dbl> <dbl> <dbl> <dbl> <dbl> <dbl>
# 1 Larry Drew 148318 62 27 10 70 3
# 2 Darren Collison 10000000 21 30 65 1929 18.9
# 3 Denzel Valentine 2186400 14 24 77 2095 12.1
# 4 Miles Plumlee 12500000 26 29 52 888 10.7
# 5 Bismack Biyombo 17000000 7 25 78 1428 13.9
# 6 Ian Mahinmi 16661641 28 31 74 1096 12.9
# 7 Domantas Sabonis 2550000 11 21 70 1711 17.3
# 8 Caleb Swanigan 1465920 26 20 24 165 7.1
# 9 Tim Frazier 2000000 62 27 56 807 11.2
# 10 Kyle Singler 4666500 33 29 12 59 5.9
# Salario real
# Con esta muestra hallamos el modelo de predicción con la siguiente formula:
prediccion_nba <- predict(regresion_nba, datos_muestra_diez)
prediccion_nba
# Predicción basándonos en las variables de arriba.
#1 2 3 4 5 6 7 8 9 10
#3418989 11971318 11179077 2237362 4634581 7712372 9922487 6822006 5173834 5810496
# El resultado que hemos obtenido son los salarios aleatorios de los datos_muestra_diez (10 jugadores aleatorios de la NBA).
# Podemos observar que el salario más elevado es el segundo con casi 12 millones de dolares.
#----------------------------------------------------------------------------------------------------
# Normalidad NBA
#----------------------------------------------------------------------------------------------------
qqPlot(regresion_nba, labels=row.names(vc_nba), id.method="identify",
simulate=TRUE, main="Q-Q Plot")
# Comparación de dos distribuciones, gráfico.
#----------------------------------------------------------------------------------------------------
# Histograma + densidad + normal + rug
#----------------------------------------------------------------------------------------------------
residplot <- function(fit, nbreaks=10) {
z <- rstudent(fit)
hist(z, breaks=nbreaks, freq=FALSE,
xlab="Studentized Residual",
main="Distribution of Errors")
rug(jitter(z), col="brown")
curve(dnorm(x, mean=mean(z), sd=sd(z)),
add=TRUE, col="blue", lwd=2)
lines(density(z)$x, density(z)$y,
col="red", lwd=2, lty=2)
legend("topright",
legend = c( "Normal Curve", "Kernel Density Curve"),
lty=1:2, col=c("blue","red"), cex=.7)
}
residplot(regresion_nba)
#----------------------------------------------------------------------------------------------------
# Jarque Bera, Shapiro-Wilk
#----------------------------------------------------------------------------------------------------
# El test de Shapiro-Wilk permite comprobar si una muestra ha sido generada por un distribución normal.
vResid <- resid(regresion_nba)
shapiro.test(vResid)
# Comprobamos si los residuos del modelo de regresion siguen una distribucion normal.
# Shapiro-Wilk normality test
# data: vResid
# W = 0.97066, p-value = 2.981e-08
# Podemos observar que el p-valor es menor que el 5% del nivel de significación por lo tanto concluimos que no sigue una distribución normal.
#----------------------------------------------------------------------------------------------------
# Linealidad
#----------------------------------------------------------------------------------------------------
crPlots(regresion_nba)
# Se grafican los valores ajustados con respecto a los predictores, si no hay problemas de linealidad se obtiene un recta sobre las que se representan los puntos.
# Componentes más residuales.
# En este caso no se representan al haber problemas de linealidad.
#----------------------------------------------------------------------------------------------------
# Outliers
#----------------------------------------------------------------------------------------------------
outlierTest(regresion_nba)
# Observamos los valores extremos de nuestros datos.
#rstudent unadjusted p-value Bonferroni p
#328 4.311181 1.9732e-05 0.0095698
#114 3.994462 7.5059e-05 0.0364040
# Esto significa que los jugadores que están en la posición 328 y 114 son valores extremos dentro de la regresión.
# Por lo que si los quitamos dentro de la tabla obtendremos una mayor precisión en nuestro modelo de predicción.
#----------------------------------------------------------------------------------------------------
# Guardar en archivo html
#----------------------------------------------------------------------------------------------------
install.packages("XQuartz")
# Al guardar el archivo en html me da un error en donde me indica que necesito el paquete XQuartz para poder guárdarlo.
# He intentado descargar XQuartz para R, también por varias fuentes y por x motivos no me permite pasar este archivo a html.
|
5a29db8026704f9a40835ac085f22198d9c82049 | 6f21d1b8365183e5708be7e71c003a0d2fc0d3b8 | /man/CueCountingExample.Rd | 1d5f801ee129289e4d18888d3f606300128e6019 | [] | no_license | cran/Distance | d2700127a013bf8bc5b0d362f6c2f04aacf22020 | 4755df1c9a2ae2e7225b140097449f2db5ccb554 | refs/heads/master | 2023-07-24T23:16:56.004912 | 2023-07-17T11:30:02 | 2023-07-17T12:56:55 | 17,678,843 | 0 | 1 | null | null | null | null | UTF-8 | R | false | true | 1,951 | rd | CueCountingExample.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dsdata.R
\docType{data}
\name{CueCountingExample}
\alias{CueCountingExample}
\alias{CueCountingExample_units}
\title{Cue counts of whale blows}
\format{
A \code{data.frame} with 109 rows and 15 variables.
\itemize{
\item `Region.Label stratum labels
\item \code{Area} size (km^2) of each stratum
\item \code{Sample.Label} transect labels
\item \code{Cue.rate} rate of blows per animal per hour
\item \code{Cue.rate.SE} variability in cue rate
\item \code{Cue.rate.df} degrees of freedom (number of animals sampled for cues)
\item \code{object} object ID
\item \code{distance} perpendicular distance (km)
\item \code{Sample.Fraction} proportion of full circle scanned (radians)
\item \code{Sample.Fraction.SE} variability in sampling fraction (0)
\item \code{Search.time} Duration of scanning effort (hr)
\item \code{bss} Beaufort sea state
\item \code{sp} Species detected (all observations W in these data)
\item \code{size} Number of animals in group (all 1 in these data)
\item \code{Study.Area} study area name
}
}
\description{
Cues are treated as an indirect count, requiring the use of multipliers.
}
\details{
Because whale blows disappear instantaneously, there is no need to measure a
decay rate. However a cue production rate (blows per individual per unit
time) is required, as is a measure of variability of that rate.
}
\note{
There are two other nuances in this survey. Even though the survey
is taking place on a moving ship, effort is measured as amount of time
scanning for blows. In some instances, it is not possible for the observer
to scan the sea all around them as view may be restricted by the ship's
superstructure. Here a \verb{sampling fraction} multiplier is employed to deal
with restricted vision. Units of measure of \code{cue.rate} and \code{Search.time}
must be equal.
}
\keyword{datasets}
|
42db6aea00c1623abc44bf3705fa734c178ea02b | 11d8fbce4f2c9a45a1fa68d0a00919d6a063cba7 | /Data-Science/3_Getting_and_Cleaning_Data/Lecture/Week_1/Examples/Reading Local Files.R | 48e5384baaf2bc9fc0ff1a4be464f75d39ada322 | [
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] | permissive | shanky0507/Coursera-John-Hopkins | e03c9c8234ff634a047aaf0aea9912c8b96358bd | 1b555bf9d3aaef3bfdd5d4bcd0e3b5c0e734cf66 | refs/heads/master | 2020-08-12T06:42:10.942297 | 2019-10-12T20:08:52 | 2019-10-12T20:08:52 | 214,708,337 | 0 | 0 | MIT | 2019-10-12T20:06:29 | 2019-10-12T20:06:28 | null | UTF-8 | R | false | false | 392 | r | Reading Local Files.R |
if (!file.exists("data")) {
dir.create("data")
}
fileUrl <- "https://data.baltimorecity.gov/api/views/dz54-2aru/rows.csv?accessType=DOWNLOAD"
download.file(fileUrl, destfile = "cameras.csv")
dateDownloaded <- date()
dateDownloaded
cameraData <- read.table("./data/cameras.csv", sep = ",", header = TRUE)
head(cameraData)
cameraData <- read.csv("./data/cameras.csv")
head(cameraData)
|
e4f180a339ba5a346afbe6acef2574ad90822f7d | 7917fc0a7108a994bf39359385fb5728d189c182 | /cran/paws.security.identity/man/iam_delete_server_certificate.Rd | 16da89c42b51b46ca57e1e75a3f433336664e933 | [
"Apache-2.0"
] | permissive | TWarczak/paws | b59300a5c41e374542a80aba223f84e1e2538bec | e70532e3e245286452e97e3286b5decce5c4eb90 | refs/heads/main | 2023-07-06T21:51:31.572720 | 2021-08-06T02:08:53 | 2021-08-06T02:08:53 | 396,131,582 | 1 | 0 | NOASSERTION | 2021-08-14T21:11:04 | 2021-08-14T21:11:04 | null | UTF-8 | R | false | true | 1,884 | rd | iam_delete_server_certificate.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/iam_operations.R
\name{iam_delete_server_certificate}
\alias{iam_delete_server_certificate}
\title{Deletes the specified server certificate}
\usage{
iam_delete_server_certificate(ServerCertificateName)
}
\arguments{
\item{ServerCertificateName}{[required] The name of the server certificate you want to delete.
This parameter allows (through its \href{https://en.wikipedia.org/wiki/Regex}{regex pattern}) a string of characters
consisting of upper and lowercase alphanumeric characters with no
spaces. You can also include any of the following characters: _+=,.@-}
}
\value{
An empty list.
}
\description{
Deletes the specified server certificate.
For more information about working with server certificates, see
\href{https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_server-certs.html}{Working with Server Certificates}
in the \emph{IAM User Guide}. This topic also includes a list of AWS services
that can use the server certificates that you manage with IAM.
If you are using a server certificate with Elastic Load Balancing,
deleting the certificate could have implications for your application.
If Elastic Load Balancing doesn't detect the deletion of bound
certificates, it may continue to use the certificates. This could cause
Elastic Load Balancing to stop accepting traffic. We recommend that you
remove the reference to the certificate from Elastic Load Balancing
before using this command to delete the certificate. For more
information, go to
\href{https://docs.aws.amazon.com/elasticloadbalancing/2012-06-01/APIReference/API_DeleteLoadBalancerListeners.html}{DeleteLoadBalancerListeners}
in the \emph{Elastic Load Balancing API Reference}.
}
\section{Request syntax}{
\preformatted{svc$delete_server_certificate(
ServerCertificateName = "string"
)
}
}
\keyword{internal}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.