blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4df62ca6f8a05acf202097df8931f002b05a1927
|
2b65f4d37c38539f7147496fad3026f96187eea2
|
/datatextfiles/smdata.R
|
f39c914952cb7df74a29f15fd9515691d41324cf
|
[] |
no_license
|
meenasirisha145/rinaction
|
b9f70007c76411bb5fad55918a9153db07af14ac
|
ac12afed86de156eedaf5f98223dcefa77773de9
|
refs/heads/master
| 2021-01-22T21:12:48.291432
| 2017-10-30T18:10:40
| 2017-10-30T18:10:40
| 100,679,599
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 466
|
r
|
smdata.R
|
#importing data
data=read.csv(file=file.choose(),stringsAsFactors = F)
data
str(data)
names(data)
attach(data)
sum(data$cpp)
mean(data$cpp)
length(data$sname)
data$pdp>50
data$dob=as.Date(data$dob,format="%d-%b-%y")
str(data$dob)
x=difftime(Sys.Date(),data$dob,units="weeks")
x
(data$age=ceiling(as.numeric(x)/52.5))
head(data$age)
str(data)
data[data$age>25,][1:5]
data[data$age>20,][1:5]
table(data$cat)
table(data$gender)
table(data$cat,data$gender)
data$java>80
|
517941ef3c850328a5861683814af2e3b265e9f6
|
ef4eb23543224c14f4cae67190d1f82bd881a4a4
|
/IDESSA/comparison_machineLearning/additionalScripts/aggregation/compareVerificationScoresForAggregation.R
|
1d1f77e3c5024ac4a0e96e6d77d5d3e02c97ccb5
|
[] |
no_license
|
environmentalinformatics-marburg/magic
|
33ed410de55a1ba6ff943090207b99b1a852a3ef
|
b45cf66f0f9aa94c7f11e84d2c559040be0a1cfb
|
refs/heads/master
| 2022-05-27T06:40:23.443801
| 2022-05-05T12:55:28
| 2022-05-05T12:55:28
| 9,035,494
| 6
| 7
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,337
|
r
|
compareVerificationScoresForAggregation.R
|
#compare verification scores for aggregation
library(proto)
library(grid)
datapath=("/media/hanna/ubt_kdata_0005/pub_rapidminer/aggregation/")
Fileslist=list.files(datapath,pattern=".Rdata",full.names=T)
Fileslist_short=list.files(datapath,full.names=F,glob2rx(c("*Rdata","day")))
source("/home/hanna/Documents/Projects/IDESSA/Precipitation/1_comparisonML/additionalScripts/scriptsForPublication/geom_boxplot_noOutliers.R")
for (i in 1:length(Fileslist)){
assign(substr(Fileslist_short[i],1,nchar(Fileslist_short[i])-6),get(load(Fileslist[i])))
}
day24=data.frame("VALUE"=c(unlist(MAE_day_24),unlist(ME_day_24),unlist(RMSE_day_24),unlist(RSQ_day_24)),
"SCORE"=c(rep("MAE",length(MAE_day_24[[1]])*4),rep("ME",length(MAE_day_24[[1]])*4),
rep("RMSE",length(MAE_day_24[[1]])*4),rep("Rsq",length(MAE_day_24[[1]])*4)),
"MODEL"=rep(toupper(names(MAE_day_24)),rep(length(unlist(MAE_day_24))/4,4)),
"TIME"=rep("24h",length(unlist(MAE_day_24))*4))
day3=data.frame("VALUE"=c(unlist(MAE_day_3),unlist(ME_day_3),unlist(RMSE_day_3),unlist(RSQ_day_3)),
"SCORE"=c(rep("MAE",length(MAE_day_3[[1]])*4),rep("ME",length(MAE_day_3[[1]])*4),
rep("RMSE",length(MAE_day_3[[1]])*4),rep("Rsq",length(MAE_day_3[[1]])*4)),
"MODEL"=rep(toupper(names(MAE_day_3)),rep(length(unlist(MAE_day_3))/4,4)),
"TIME"=rep("DAY 3h",length(unlist(MAE_day_3))*4))
inb3=data.frame("VALUE"=c(unlist(MAE_inb_3),unlist(ME_inb_3),unlist(RMSE_inb_3),unlist(RSQ_inb_3)),
"SCORE"=c(rep("MAE",length(MAE_inb_3[[1]])*4),rep("ME",length(MAE_inb_3[[1]])*4),
rep("RMSE",length(MAE_inb_3[[1]])*4),rep("Rsq",length(MAE_inb_3[[1]])*4)),
"MODEL"=rep(toupper(names(MAE_inb_3)),rep(length(unlist(MAE_inb_3))/4,4)),
"TIME"=rep("INB 3h",length(unlist(MAE_inb_3))*4))
night3=data.frame("VALUE"=c(unlist(MAE_night_3),unlist(ME_night_3),unlist(RMSE_night_3),unlist(RSQ_night_3)),
"SCORE"=c(rep("MAE",length(MAE_night_3[[1]])*4),rep("ME",length(MAE_night_3[[1]])*4),
rep("RMSE",length(MAE_night_3[[1]])*4),rep("Rsq",length(MAE_night_3[[1]])*4)),
"MODEL"=rep(toupper(names(MAE_night_3)),rep(length(unlist(MAE_night_3))/4,4)),
"TIME"=rep("NIGHT 3h",length(unlist(MAE_night_3))*4))
aggregatedData=rbind(day3,inb3,night3,day24)
aggregatedData$MODEL=factor(aggregatedData$MODEL,levels=toupper(names(MAE_inb_3)))
bp.RAINOUT <- ggplot(aggregatedData, aes(x = MODEL, y = VALUE))+
# geom_boxplot_noOutliers(aes(fill =MODEL),outlier.size = NA) + #use colors?
geom_boxplot_noOutliers(outlier.size = NA) +
theme_bw() +
facet_grid(SCORE ~ TIME,scales = "free")+
# scale_fill_manual(values = c("RF" = " lightcyan2", "NNET" = "lightblue","AVNNET" = "lightcyan3", "SVM" = "lightsteelblue"))+
xlab("") + ylab("")+
theme(legend.title = element_text(size=16, face="bold"),
legend.text = element_text(size = 16),
legend.key.size=unit(1,"cm"),
strip.text.y = element_text(size = 16),
strip.text.x = element_text(size = 16),
axis.text=element_text(size=14),
panel.margin = unit(0.5, "lines"),
panel.background = element_rect(fill = NA, colour = NA),
plot.background = element_rect(fill = NA, colour = NA))#+)
pdf(paste0(datapath,"/bp.Aggregation.pdf"),width=14,height=14)
#print(bp.RAINOUT)
#dev.off()
#with line for ME
print(bp.RAINOUT)
y_at <- 1 - ggplot_build(bp.RAINOUT)$panel$ranges[[5]]$y.range[2] /
(ggplot_build(bp.RAINOUT)$panel$ranges[[5]]$y.range[2] -
ggplot_build(bp.RAINOUT)$panel$ranges[[5]]$y.range[1])
#current.vpTree()
seekViewport(name = "panel.6-10-6-10")
#grid.rect(gp=gpar(fill="black"))
grid.lines(y = y_at, gp = gpar(lty = 1, lwd = 3,col="grey"))
upViewport(0)
seekViewport(name = "panel.6-4-6-4")
#grid.rect(gp=gpar(fill="black"))
grid.lines(y = y_at, gp = gpar(lty = 1, lwd = 3,col="grey"))
upViewport(0)
seekViewport(name = "panel.6-6-6-6")
grid.lines(y = y_at, gp = gpar(lty = 1, lwd = 3,col="grey"))
upViewport(0)
seekViewport(name = "panel.6-8-6-8")
grid.lines(y = y_at, gp = gpar(lty = 1, lwd = 3,col="grey"))
upViewport(0)
print(bp.RAINOUT, newpage = FALSE)
dev.off()
|
36fa6477ec9f40858be7269c3c47ad718c8ac263
|
74fec5abcb297a6415a5233bce1b6b647c7cc505
|
/4/1_FigS2_TAbS1_Fig3.R
|
e3bf39b95751da8192d3f467cdc595b7aaa30e5d
|
[] |
no_license
|
GiovanniCarapezza/KING-REX
|
f6e311d13bdb604e86614aec7860fb0391644d58
|
f06a8e0eec4da0a784b0e7846882830d24a19993
|
refs/heads/master
| 2020-04-21T12:46:29.726979
| 2019-02-07T15:02:41
| 2019-02-07T15:02:41
| 169,574,440
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,011
|
r
|
1_FigS2_TAbS1_Fig3.R
|
library(stringi)
library(pheatmap)
library(RColorBrewer)
#to excecute this script, you must run first script in folder 2
rm(list = ls())
#substitute your path to results
path_to_results="~/Scrivania/KING-REX data and scripts for BMC2"
#set working directory to this source file location
setwd(paste(path_to_results,"/Results/4",sep=""))
#path to norm counts
path_input_data=paste(sep="",path_to_results,"/Results/2/normalized_counts_KING-REX")
#path output
path_output=paste("output",sep="")
if(!dir.exists(path_output)){dir.create(path_output,recursive = T)}
#read kinase list
kin_list=read.table(paste(path_to_results,"/Results/1/kinase_list_and_info.txt",sep=""),sep="\t",stringsAsFactors=F,row.names = 1,header = T)
kin_list=kin_list[sort(rownames(kin_list)),]
#read annotation file
input=read.table(paste(path_to_results,"/Results/annotations_KING-REX.txt",sep=""),sep="\t",stringsAsFactors=F,header = T)
#load normalized counts
kingrex_genes=read.table(file=paste(path_input_data,"normalized_counts_log2.txt",sep="/"),sep="\t",header=T,row.names=1)
kingrex_genes_not_log2=read.table(file=paste(path_input_data,"normalized_counts.txt",sep="/"),sep="\t",header=T,row.names=1)
#annotation
annotation<-as.data.frame(cbind(input$Tissue),row.names=input$Sample,stringsAsFactors = F)
#selct dilution
indexes=c(which(stri_startswith_fixed(rownames(annotation),"KAR")),
which(stri_startswith_fixed(rownames(annotation),"U118")),
which(stri_startswith_fixed(rownames(annotation),"KU")),
which(stri_startswith_fixed(rownames(annotation),"UK")))
annotation<-as.data.frame(cbind(input$Tissue[indexes]),row.names=input$Sample[indexes],stringsAsFactors = F)
ann_colors=list(
V1=c(Mix="blue",
NERVOUS_SYSTEM="purple",LYMPHOMA="lightblue")
)
kingrex_genes=kingrex_genes[,colnames(kingrex_genes) %in% rownames(annotation)]
kingrex_genes_not_log2=kingrex_genes_not_log2[,colnames(kingrex_genes_not_log2) %in% rownames(annotation)]
#distance matrix of figure S2 on dilutions
distscn <- dist(t(kingrex_genes))
mat <- as.matrix(distscn)
hmcol <- colorRampPalette(brewer.pal(9, "Blues"))(255)
wd = 35*nrow(annotation)
jpeg(paste(path_output,"FigS2_distance_matrix_dilutions.jpeg",sep="/"),width = wd+400,height = wd+150)
pheatmap(mat,fontsize = 24,cellwidth = 20,cellheight = 20,
clustering_distance_rows=distscn,
annotation_col=annotation,
annotation_colors = ann_colors,
clustering_distance_cols=distscn,
col = rev(hmcol))
dev.off()
###select genes not expressed in karpas
hmcol <- colorRampPalette(brewer.pal(9, "Blues"))(255)
sel_0=apply(kingrex_genes[,c("KARPAS299_A","KARPAS299_B")],MARGIN = 1,mean)<1
U118MG=kingrex_genes[sel_0,]
#select genes with expression >5 in u118mg
sel_1=apply(U118MG[,c("U118MG_A","U118MG_B")],MARGIN = 1,mean)>5
U118MG=U118MG[sel_1,]
U118MG=U118MG[order(U118MG$U118MG_A,decreasing = T),]
U118MG=U118MG[,c("U118MG_A","U118MG_B",
"UK8712_A","UK8712_B",
"UK7525_A","UK7525_B",
"KU50_A","KU50_B",
"KU7525_A","KU7525_B",
"KU8712_A","KU8712_B",
"KARPAS299_A","KARPAS299_B")]
#heatmap
hg = 30*nrow(U118MG)+100
wd = 30*ncol(U118MG)+100
jpeg(paste(path_output,"TabS1_A.jpeg",sep="/"),width = wd,height = hg)
pheatmap(U118MG,show_rownames = T,
color = hmcol,
clustering_distance_rows = distanza,
clustering_distance_cols = distanza,
scale="none",
display_numbers = round(U118MG,1),
fontsize_number=10,
border_color="white",
number_color="black",
cellwidth =22,
cellheight = 22,
cluster_rows=F,
cluster_cols=F)
dev.off()
###select genes not expressed in u118mg
sel_0=apply(kingrex_genes[,c("U118MG_A","U118MG_B")],MARGIN = 1,mean)<1
KARPAS=kingrex_genes[sel_0,]
##select genes with expression >5 in Karpas
sel_1=apply(KARPAS[,c("KARPAS299_A","KARPAS299_B")],MARGIN = 1,mean)>5
KARPAS=KARPAS[sel_1,]
KARPAS=KARPAS[order(KARPAS$KARPAS299_A,decreasing = T),]
KARPAS=KARPAS[,c("KARPAS299_A","KARPAS299_B",
"KU8712_A","KU8712_B",
"KU7525_A","KU7525_B",
"KU50_A","KU50_B",
"UK7525_A","UK7525_B",
"UK8712_A","UK8712_B",
"U118MG_A","U118MG_B")]
hg = 30*nrow(KARPAS)+100
wd = 30*ncol(KARPAS)+100
jpeg(paste(path_output,"TabS1_B.jpeg",sep="/"),width = wd,height = hg)
pheatmap(KARPAS,show_rownames = T,
color = hmcol,
clustering_distance_rows = distanza,
clustering_distance_cols = distanza,
scale="none",
display_numbers = round(KARPAS,1),
fontsize_number=10,
border_color="white",
number_color="black",
cellwidth =22,
cellheight = 22,
cluster_rows=F,
cluster_cols=F)
dev.off()
#write to file
write.table(x = U118MG,file = paste(path_output,"TabS1_A.txt",sep="/"),append = F,quote = F,sep = "\t",row.names = T,col.names = T)
write.table(x = KARPAS,file = paste(path_output,"TabS1_B.txt",sep="/"),append = F,quote = F,sep = "\t",row.names = T,col.names = T)
###R2 Figure 3 Theoretical versus measured gene expression value
#function to scatterplot and R2 calculation
R2=function(a,b,x,y,mn){
plot(a,b,pch=20,col="blue",xlab = x,ylab = y,main = mn,cex.main=2,cex.axis=2,cex.lab=1.6,xlim =c(0,17.5),ylim =c(0,17.5))
abline(lm(b~a),col="red")
r2_1=summary(lm(b~a))
text(x = max(a)-2,y=min(b)+0.2,expression(paste("",R^2,": ",sep="")),cex = 2)
text(x = max(a)-0.8,y=min(b)+0.1,paste(round(r2_1$r.squared,2)),cex=2)
}
#function to calculate theoretical values
calcola_expr=function(a,b,perc1){
perc2=1-perc1
prev=a*perc1+b*perc2
return(prev)
}
#mean of measured values
KARPAS299=rowMeans(kingrex_genes_not_log2[,c("KARPAS299_A","KARPAS299_B")])
U118MG=rowMeans(kingrex_genes_not_log2[,c("U118MG_A","U118MG_B")])
KU8712=rowMeans(kingrex_genes_not_log2[,c("KU8712_A","KU8712_B")])
KU7525=rowMeans(kingrex_genes_not_log2[,c("KU7525_A","KU7525_B")])
KU50=rowMeans(kingrex_genes_not_log2[,c("KU50_A","KU50_B")])
UK7525=rowMeans(kingrex_genes_not_log2[,c("UK7525_A","UK7525_B")])
UK8712=rowMeans(kingrex_genes_not_log2[,c("UK8712_A","UK8712_B")])
#log2 of measured values
x=c(log(x=(KU8712+1),base=2),
log(x=(KU7525+1),base=2),
log(x=(KU50+1),base=2),
log(x=(UK7525+1),base=2),
log(x=(UK8712+1),base=2))
#theoretical value calculation and log2 transformation
y=c(log(x=(calcola_expr(KARPAS299,U118MG,0.875)+1),base=2),
log(x=(calcola_expr(KARPAS299,U118MG,0.75)+1),base=2),
log(x=(calcola_expr(KARPAS299,U118MG,0.5)+1),base=2),
log(x=(calcola_expr(KARPAS299,U118MG,0.25)+1),base=2),
log(x=(calcola_expr(KARPAS299,U118MG,0.125)+1),base=2))
#scatterplot and R2
s1="Measured"
s2="Theoretical"
mn = "Measured vs. Theoretical"
jpeg(filename = paste(path_output,"/Fig3_",s1,"_",s2,"_scatterplot.jpeg",sep=""),width = 700,height = 600)
R2(x,y,s1,s2,mn)
dev.off()
|
fa51f94c91bcc8445e238c466e429e2da3842693
|
4d2aa86f49e4525f8c03d3c8f01fbd171071c511
|
/script_matrix.r
|
f8f2c21f1d81bde95967555f546fed0bcc5c1edc
|
[
"MIT"
] |
permissive
|
armbrustlab/ssPopModel_respiration
|
53b3fd349d35a4556edbdf2968d786173e63655c
|
95cfc1bc2620e005cadde5bf51dd0bbb783cd56c
|
refs/heads/master
| 2020-03-16T20:41:43.963963
| 2018-08-08T22:32:28
| 2018-08-08T22:32:28
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,391
|
r
|
script_matrix.r
|
library(ssPopModel)
library(gplots)
#John
path.to.data <- "~/ssPopModel_sensitivity_test/ssPopModel_sensitivity_test_data"
path.to.git.repository <- "~/ssPopModel_sensitivity_test"
setwd(path.to.git.repository)
## MERGE MODEL OUTPUT (dt)
list.output <- list.files("output", "size",full.names=T)
DF <- NULL
for(path.distribution in list.output){
#path.distribution <- list.output[1]
print(path.distribution)
load(path.distribution)
file.name <- unlist(list(strsplit(basename(path.distribution), "_")))
origin <- file.name[1]
size <- as.numeric(file.name[3])
dt <- as.numeric(file.name[4])
# if(origin == "biomass"){
# params <- model2[,2][[1]]
# gr <- model2[,2][[2]]
# }
if(origin == "size"){
params <- model1[,2][[1]]
gr <- model1[,2][[2]]
}
df <- data.frame(origin, size, dt, params,gr=sum(gr,na.rm=T))
DF <- data.frame(rbind(DF, df))
}
# A few of the outputs were from old runs, this gets rid of them
DF <- na.omit(DF)
# # Some of the GR's came out negative
# DF <- DF[(DF$gr >= 0),]
# Building the matrix to plot
# GR
# Get the number of unique size classes and dt's
size <- sort(unique(DF$size))
dt <- sort(unique(DF$dt))
# this function creates a matrix from the DF table given
# the column index
create.matrix <- function(DF, index) {
# create the name of the matrix
parameter <- colnames(DF)[index]
name <- paste0(parameter, ".matrix")
# create the empty matrix
matrix <- matrix(,nrow = length(dt), ncol= length(size))
index = 1
# fill in the matrix
for (i in c(1:length(size))) {
for (j in c(1:length(dt))) {
# makes negative entires 0
if (DF[[parameter]][index] < 0) {
matrix[j,i] <- 0
}
else {
matrix[j,i] <- DF[[parameter]][index]
}
index <- index + 1
}
}
rownames(matrix) <- dt
colnames(matrix) <- size
return (matrix)
}
# creates a matrix for each parameter
for (i in c(4:9)) {
matrix <- create.matrix(DF, i)
name <- paste0(colnames(DF)[i], ".matrix")
# rownames(matrix) <- dt
# colnames(matrix) <- size
assign(name, matrix)
}
par(mfrow=c(3,2),pty='m')
image(gmax.matrix, xlab="dt", ylab="size")
image(dmax.matrix)
image(b.matrix)
image(E_star.matrix)
image(resnorm.matrix)
image(gr.matrix)
|
ec8d774b205c79ecb37e4c0e75a3f24d02d57839
|
8b24d2c0f284e48e74495be11c6ba7f2bf8578aa
|
/global.R
|
152d6b31f294d3d84c9f8ae2988ca2e86fb96228
|
[] |
no_license
|
Janzeero-PhD/Chornobyl-forest-dynamics
|
792452199162b3815e57f110bf7f23774a622176
|
28900cc98508f45f3a3662a66ef6ee6fc8724139
|
refs/heads/main
| 2023-02-04T11:39:55.904255
| 2020-12-27T13:29:18
| 2020-12-27T13:29:18
| 319,792,995
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,680
|
r
|
global.R
|
library(tidyverse)
library(raster)
library(sp)
library(shiny)
library(shinydashboard)
library(leaflet)
library(viridis)
library(rgdal)
### FOREST MASK DYNAMICS FILES
# run raster files of forest mask dynamics
Landsat.RF.2016.fact <- raster('rasters/RF_mask_2016.tif')
Landsat.RF.2006.fact <- raster('rasters/RF_mask_2006.tif')
Landsat.RF.1996.fact <- raster('rasters/RF_mask_1996.tif')
Landsat.RF.1986.fact <- raster('rasters/RF_mask_1986.tif')
# shape layer ChEZ border
ChEZ.border <- readOGR("CheZ_border_line.shp",
layer = "CheZ_border_line")
# transform to CRS could be used
ChEZ.border <- spTransform(ChEZ.border,
CRS("+init=epsg:4326"))
### DISTURBANCE DYNAMICS FILES
yod.map <- raster('rasters/NBR_years_filtered.tif')
#yod.map <- aggregate(yod.map, 2, fun = 'modal')
#writeRaster(yod.map, 'NBR_years_filtered.tif', format = 'GTiff', overwrite = T)
yod.map <- ratify(yod.map)
agents.map <- raster("rasters/RF_agents_filtered.tif")
#agents.map <- aggregate(agents.map, 2, fun = 'modal')
#writeRaster(agents.map, 'RF_agents_filtered.tif', format = 'GTiff', overwrite = T)
agents.map <- ratify(agents.map)
agents.levels <- levels(agents.map)[[1]]
agents.levels$agents_ua <- c('біотичні', "рубки", "пожежі", "буреломи")
levels(agents.map) <- agents.levels
### palettes for legends:
agents_pal <- c('#4dac26', '#7b3294', '#d7191c', '#2c7bb6')
pal_agents <- colorFactor(
agents_pal,
values(agents.map),
reverse = FALSE,
na.color = 'transparent'
)
pal_yod <- colorNumeric(palette = "viridis", values(yod.map), na.color = 'transparent')
|
da1e210fed594350599024173d098adcb0624da2
|
28cee2095ea67fe87a0ad4d70ffac60a4e105717
|
/Lec12_3_SVM Regression Example.R
|
3674e2c933802d379bb8becf79acf75c20ae1f06
|
[] |
no_license
|
lydia126/11_R_SVM
|
45c61ce24fb92aceadee9249a3e12e0e6a4da1ff
|
9376750039349d86fc71db94e82fb8f29a5f89e4
|
refs/heads/main
| 2023-06-12T03:41:35.687057
| 2021-07-05T07:04:52
| 2021-07-05T07:04:52
| 383,046,063
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,514
|
r
|
Lec12_3_SVM Regression Example.R
|
# Required libraries
library(e1071)
library(caret)
# Load data and partition
boston = MASS::Boston
set.seed(123)
indexes = createDataPartition(boston$medv, p = .8, list = F)
train = boston[indexes, ]
test = boston[-indexes, ]
# SVM regression
model_reg = svm(medv~., data=train)
print(model_reg)
pred = predict(model_reg, test)
x=1:length(test$medv)
plot(x, test$medv, pch=18, col="red")
lines(x, pred, lwd="1", col="blue")
# accuracy check
mae = MAE(test$medv, pred)
rmse = RMSE(test$medv, pred)
r2 = R2(test$medv, pred, form = "traditional")
cat(" MAE:", mae, "\n", "RMSE:", rmse, "\n", "R-squared:", r2)
install.packages("forecast")
library(forecast)
lm1 <- lm(medv~., data=train)
accuracy(lm1)
# paldang Chl-a
paldang <- read.csv("Paldang.csv")
indexes = createDataPartition(paldang$Chla, p = .67, list = F)
train = paldang[indexes, ]
test = paldang[-indexes, ]
# SVM regression
model_reg = svm(Chla~., data=train)
print(model_reg)
pred = predict(model_reg, test)
x=1:length(test$Chla)
plot(x, test$Chla, pch=18, col="red")
lines(x, pred, lwd="1", col="blue")
# accuracy check
mae = MAE(test$Chla, pred)
rmse = RMSE(test$Chla, pred)
r2 = R2(test$Chla, pred, form = "traditional")
cat(" MAE:", mae, "\n", "RMSE:", rmse, "\n", "R-squared:", r2)
|
e64a1fff73afcb3b8c02638404a58ae3cae71079
|
ba3e1a64b6a56469bb89fcb697acf5cbb51a9aca
|
/bigdata_ai_study/chapter_6/11.R
|
30331a82f11f0ebaa1f49cc54ed1626c668a4a2b
|
[] |
no_license
|
himitery/dankook
|
c5a2df9c989a31c4470212e7f6165ea4292f0b62
|
9f5208c7d9f2001794c67f4f27f45490ea260d4d
|
refs/heads/master
| 2023-06-08T21:23:22.394774
| 2021-06-27T20:00:39
| 2021-06-27T20:00:39
| 330,082,010
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 228
|
r
|
11.R
|
length(unique((Orange[, "Tree"])))
data <- Orange[, c("age", "circumference")]
point <- as.numeric(Orange$Tree)
color <- c("red", "orange", "green", "blue", "pink")
plot(
data,
pch = c(point),
col = color[point]
)
|
1097d2cd619e272ad38dfe4b77700f877e5ea572
|
f950be564a79a3e16db02320d555002c9f343783
|
/4.2_RichardsonExtrapolation.R
|
f84ae22e17b979d2f9002e164e8a15981af27c0c
|
[] |
no_license
|
felix-leo/math311
|
0c3c59bfe2ef44897ac2bcf9c027ae9a9d9332e2
|
d3345b8c00338c73cabf402baff012f01a340848
|
refs/heads/master
| 2020-05-03T08:27:25.930800
| 2019-05-02T21:08:15
| 2019-05-02T21:08:15
| 178,526,937
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 910
|
r
|
4.2_RichardsonExtrapolation.R
|
############################################################################
# this is formula works for the even power of h.
# which is O^(2j)
############################################################################
richardson = function (x0, h, f, n) {
options(digits=12)
# Three Point Central Difference (Eq. 4.5)
df=function(x,h) { (f(x+h) - f(x-h))/(2*h)}
tabl=matrix (, n, n)
tabl[1,1] = df(x0,h)
h=h/2
for (i in 2: n) {
tabl[i,1] = df(x0, h)
for (j in 2:i)
tabl[i,j]=tabl[i,j-1]+(tabl[i,j-1]-tabl[i-1, j-1])/(4^(j-1) -1)
h=h/2
}
return(tabl)
}
# ************************************
f=function(x) {x + exp(x)}
richardson(0, 0.4, f, 3)
# [,1] [,2] [,3]
# [1,] 2.02688081451 NA NA
# [2,] 2.00668001271 1.9999464121 NA
# [3,] 2.00166750020 1.9999966627 2.00000001274
# ************************************
|
0d2f038e8c15b2693334da10504cfcdf4280c990
|
95757c0b14be3bdacbea624e1cecd93a3aed926d
|
/man/find_incomplete_censuses.Rd
|
07bacbb72984f2c06908aa628904d8e6b030fb45
|
[
"MIT"
] |
permissive
|
emchristensen/portalr
|
f21a815af8c34c2d9c323aa6473c33e81fed3c33
|
50ac5db9ba493c13555f27263b2c67e7e87f7ee6
|
refs/heads/master
| 2021-07-24T21:41:41.572332
| 2018-10-06T23:28:41
| 2018-10-06T23:28:41
| 107,721,556
| 0
| 0
|
MIT
| 2018-07-13T18:14:25
| 2017-10-20T20:06:43
|
R
|
UTF-8
|
R
| false
| true
| 747
|
rd
|
find_incomplete_censuses.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data_processing.R
\name{find_incomplete_censuses}
\alias{find_incomplete_censuses}
\title{Period code for incomplete censuses}
\usage{
find_incomplete_censuses(trapping_table, min_plots, min_traps)
}
\arguments{
\item{trapping_table}{Data table that contains sampled column (1 for sampled, 0 for unsampled)}
\item{min_plots}{minimum number of plots in a census for a census to count as sampled}
\item{min_traps}{minimum number of traps on a plot for a census to count as sampled}
}
\value{
Data.table of period codes when not all plots were trapped.
}
\description{
Determines incomplete censuses by finding dates when some plots were trapped, but others were not.
}
|
3628f12a84f8e26c39540712f6fe908050832274
|
1fbce482fa0cd8117e0a80e38323c5eb5f67ca7a
|
/R/annoByGene.R
|
49a92942c2f3f78b2ccddd470b5eaa2b5a794d8d
|
[] |
no_license
|
bioinfo16/RIPAT
|
adf1ef88a37e033d3b4961272d8846370bb685c4
|
4e736b60e9bc2695a67ba13e9a50ed56c9c4d38a
|
refs/heads/master
| 2021-02-06T21:47:10.233559
| 2020-10-13T06:09:25
| 2020-10-13T06:09:25
| 243,133,840
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 13,732
|
r
|
annoByGene.R
|
#' @title Annotate integration sites by genes and TSSs.
#'
#' @description
#' Annotate vector integration sites by gene data.
#'
#' @usage
#' annoByGene(hits, ran_hits = NULL,
#' mapTool = 'blast',
#' organism = 'GRCh37',
#' interval = 5000,
#' range = c(-20000, 20000),
#' outPath = getwd(),
#' outFileName = paste0('RIPAT', round(unclass(Sys.time()))))
#'
#' @param hits GR object. This object made by \code{\link{makeExpSet}} function.
#' @param ran_hits GR object or list. This object is output of \code{\link{makeRanSet}} function.
#' @param mapTool Character. Function uses two types of object\cr from BLAST and BLAT.
#' Default is 'blast'. If you want to use BLAT result, use 'blat'.
#' @param organism Character. This function can run by two versions of organisms\cr
#' such as GRCh37, GRCh38 (Human). Default is 'GRCh37'.
#' @param interval Integer. This number means interval number\cr for
#' distribution analysis. Default is 5000.
#' @param range Integer array. The range of highlight region for analysis.\cr
#' Default range is \code{c(-20000, 20000)}.
#' @param outPath String. Plots are saved in this path. \cr Default value is R home directory.
#' @param outFileName Character. Attached ID to the result file name.
#'
#' @return Return a result list that is made up of insertion and distribution result tables\cr and GenomicRange object of Gene and TSS data.
#'
#' @examples
#' data(blast_obj); data(gene_exam_db); data(tss_exam_db)
#' saveRDS(gene_exam_db,
#' paste0(system.file("extdata", package = 'RIPAT'),
#' '/GRCh37_gene.rds'))
#' saveRDS(tss_exam_db,
#' paste0(system.file("extdata", package = 'RIPAT'),
#' '/GRCh37_TSS.rds'))
#'
#' blast_gene = annoByGene(hits = blast_obj, ran_hits = NULL,
#' outFileName = 'blast_res')
#'
#' @export
annoByGene = function(hits, ran_hits = NULL, mapTool = 'blast', organism = 'GRCh37', interval = 5000, range = c(-20000, 20000), outPath = getwd(), outFileName = paste0('RIPAT', round(unclass(Sys.time())))){
message('----- Annotate integration sites. (Time : ', date(), ')')
message('- Validate options')
if(length(which(c('blast', 'blat') %in% mapTool)) == 0){
stop("[ERROR] Please confirm the alignment tool name.\n----- This process is halted. (Time : ", date(), ")\n")
}
if(length(which(c('GRCh37', 'GRCh38') %in% organism)) == 0){
stop("[ERROR] Please use GRCh37/GRCh38 data.\n----- This process is halted. (Time : ", date(), ")\n")
}
if(stringr::str_ends(outPath, pattern = '/')){outPath = stringr::word(outPath, start = 1, end = nchar(outPath), sep = '')}
if(range[1] - range[2] >= 0){
stop("[ERROR] Please check distribution range.\n----- This process is halted. (Time : ", date(), ")\n")
} else {ranges = seq(from = range[1], to = range[2], by = interval)}
message('- OK!')
message('- Load the dataset.')
dbPath = system.file("extdata", package = "RIPAT")
dbFile = paste0('/', organism, '_gene.rds')
dataTable = data.frame(readRDS(paste0(dbPath, dbFile)), stringsAsFactors = FALSE)
dataTable = cbind(dataTable, 'g_len' = abs(dataTable$end - dataTable$start) + 1)
dbFile2 = paste0('/', organism, '_TSS.rds')
dataTable_tss = data.frame(readRDS(paste0(dbPath, dbFile2)), stringsAsFactors = FALSE)
gr_genes = GenomicRanges::makeGRangesFromDataFrame(dataTable, keep.extra.columns = TRUE, ignore.strand = FALSE)
gr_tss = GenomicRanges::makeGRangesFromDataFrame(dataTable_tss, keep.extra.columns = TRUE, ignore.strand = FALSE)
message('- OK!')
message('- Find integration sites located in genes.')
inside_gene_only = as.data.frame(GenomicRanges::findOverlaps(hits[[1]], gr_genes, type = 'any', ignore.strand = TRUE), stringsAsFactors = FALSE)
only_res_que = data.frame(hits[[1]][inside_gene_only$queryHits,], stringsAsFactors = FALSE)[,c(1:3,6:8)]
only_res_sub = dataTable[inside_gene_only$subjectHits,]
inside_tab = unique(cbind(only_res_que, only_res_sub)[,c(5,1:4,6:14)])
names(inside_tab)[c(7:14)] = c('Target_gene', 'Target_ensembl_id', 'Target_gene_type', 'Target_chr', 'Target_start', 'Target_end', 'Target_strand', 'Target_gene_length')
message('- OK!')
message('- Calculate distance.')
strp = dataTable[which(dataTable$strand == '+'),]; strn = dataTable[which(dataTable$strand == '-'),]
only_hits_tab = data.frame(hits[[1]], stringsAsFactors = FALSE)
dist_only = lapply(c(1:nrow(only_hits_tab)), function(a){
x = only_hits_tab$start[a]; y = only_hits_tab$query[a]; z = only_hits_tab$seqnames[a]
cal1p = strp$start-x; cal2p = strp$end-x; cal1n = strn$end-x; cal2n = strn$start-x
cal1_ip = intersect(which(cal1p <= abs(range[1])), which(cal1p > 0)); cal2_ip = intersect(which(abs(cal2p) <= range[2]), which(cal2p < 0))
cal1_in = intersect(which(cal1n >= range[1]), which(cal1n < 0)); cal2_in = intersect(which(cal2n <= range[2]), which(cal2n > 0))
dat = data.frame(rbind(strp[c(cal1_ip, cal2_ip),], strn[c(cal1_in, cal2_in),]), dist = c(-c(cal1p[cal1_ip], cal2p[cal2_ip]), cal1n[cal1_in], cal2n[cal2_in]))
dat = unique(data.frame('query' = rep(y, nrow(dat)), dat))
dat = dat[which(dat$chr == z),]
dat = dat[order(abs(dat$dist), decreasing = FALSE),][1,]
cal_tss = dataTable_tss$start-x
cal_i1_tss = intersect(which(cal_tss <= abs(range[1])), which(cal_tss > 0))
cal_i2_tss = intersect(which(abs(cal_tss) <= range[2]), which(cal_tss < 0))
dat_tss = cbind(dataTable_tss[c(cal_i1_tss, cal_i2_tss),], dist = -c(cal_tss[cal_i1_tss], cal_tss[cal_i2_tss]))
dat_tss = unique(data.frame('query' = rep(y, nrow(dat_tss)), dat_tss))
dat_tss = dat_tss[which(dat_tss$chr == z),]
dat_tss = dat_tss[order(abs(dat_tss$dist), decreasing = FALSE),][1,]
return(list(dat, dat_tss))
})
hist_only_g = hist(unlist(lapply(dist_only, function(x){x[[1]]$dist/1000})), breaks = ranges/1000, plot = FALSE)
hist_only_t = hist(unlist(lapply(dist_only, function(x){x[[2]]$dist/1000})), breaks = ranges/1000, plot = FALSE)
message('- OK!')
if(!is.null(ran_hits)){
message('- Do random set analysis.')
randomSize = length(ran_hits); gr_random = ran_hits
random_set = data.frame(c(1:randomSize), data.frame(ran_hits), stringsAsFactors = FALSE)[,c(1:3)]; names(random_set) = c('Random', 'Random_chr', 'Random_pos')
inside_gene_ran = as.data.frame(GenomicRanges::findOverlaps(gr_random, gr_genes, type = 'any', ignore.strand = TRUE), stringsAsFactors = FALSE)
a = as.data.frame(gr_random[inside_gene_ran$queryHits,], stringsAsFactors = FALSE)
b = dataTable[inside_gene_ran$subjectHits,]
inside_ran_tab = unique(cbind(a,b)[,c(1,2,3,6:13)])
names(inside_ran_tab) = c('q_chr', 'q_start', 'q_end', 'symbol', 'ensembl_id', 'gene_type', 'chr', 'start', 'end', 'strand', 'percentage_gc_content')
dist_ran = lapply(c(1:nrow(random_set)), function(a){
x = as.numeric(random_set$Random_pos[a]); y = random_set$Random[a]; z = random_set$Random_chr[a]
cal1p = strp$start-x; cal2p = strp$end-x; cal1n = strn$end-x; cal2n = strn$start-x
cal1_ip = intersect(which(cal1p <= abs(range[1])), which(cal1p > 0)); cal2_ip = intersect(which(abs(cal2p) <= range[2]), which(cal2p < 0))
cal1_in = intersect(which(cal1n >= range[1]), which(cal1n < 0)); cal2_in = intersect(which(cal2n <= range[2]), which(cal2n > 0))
dat = data.frame(rbind(strp[c(cal1_ip, cal2_ip),], strn[c(cal1_in, cal2_in),]), dist = c(-c(cal1p[cal1_ip], cal2p[cal2_ip]), cal1n[cal1_in], cal2n[cal2_in]))
dat = unique(data.frame('query' = rep(y, nrow(dat)), dat))
dat = dat[which(dat$chr == z),]
dat = dat[order(abs(dat$dist), decreasing = FALSE),][1,]
cal_tss = dataTable_tss$start-x
cal_i1_tss = intersect(which(cal_tss <= abs(range[1])), which(cal_tss > 0))
cal_i2_tss = intersect(which(abs(cal_tss) <= range[2]), which(cal_tss < 0))
dat_tss = cbind(dataTable_tss[c(cal_i1_tss, cal_i2_tss),], dist = c(cal_tss[cal_i1_tss], cal_tss[cal_i2_tss]))
dat_tss = unique(data.frame('query' = rep(y, nrow(dat_tss)), dat_tss))
dat_tss = dat_tss[which(dat_tss$chr == z),]
dat_tss = dat_tss[order(abs(dat_tss$dist), decreasing = FALSE),][1,]
return(list(dat, dat_tss))
})
all_dist_ran = unlist(lapply(dist_ran, function(x){x[[1]]$dist})); all_dist_t_ran = unlist(lapply(dist_ran, function(x){x[[2]]$dist}))
hist_obj_ran = hist(all_dist_ran, plot = TRUE, breaks = ranges); hist_t_obj_ran = hist(all_dist_t_ran, plot = T, breaks = ranges)
message('- OK!')
} else {message('[WARN] Skip random set analysis.')}
message('- Draw histograms.')
all_dist_only = unlist(lapply(dist_only, function(x){x[[1]]$dist}))
all_dist_t_only = unlist(lapply(dist_only, function(x){x[[2]]$dist}))
hist_obj = hist(all_dist_only, plot = FALSE, breaks = ranges)
hist_t_obj = hist(all_dist_t_only, plot = FALSE, breaks = ranges)
g_dist = list('Decided' = lapply(dist_only, function(x){x[[1]]})); t_dist = list('Decided' = lapply(dist_only, function(x){x[[2]]}))
if(!is.null(ran_hits)){
count_site = hist_obj$counts; count_site_ran = hist_obj_ran$counts
count_t_site = hist_t_obj$counts; count_t_site_ran = hist_t_obj_ran$counts
count_all = nrow(only_hits_tab)
count_data = data.frame('Range' = factor(rep(ranges[ranges != 0]/1000, 2), levels = ranges[ranges != 0]/1000), 'Group' = c(rep('Observed', length(count_site)), rep('Random', length(count_site_ran))), 'Count' = c(count_site, count_site_ran), 'Freq' = c(count_site/count_all, count_site_ran/randomSize))
count_t_data = data.frame('Range' = factor(rep(ranges[ranges != 0]/1000, 2), levels = ranges[ranges != 0]/1000), 'Group' = c(rep('Observed', length(count_t_site)), rep('Random', length(count_t_site_ran))), 'Count' = c(count_t_site, count_t_site_ran), 'Freq' = c(count_t_site/count_all, count_t_site_ran/randomSize))
} else {
count_site = hist_obj$counts; count_t_site = hist_t_obj$counts
count_all = nrow(only_hits_tab)
count_data = data.frame('Range' = factor(ranges[ranges != 0]/1000, levels = ranges[ranges != 0]/1000), 'Group' = rep('Observed', length(count_site)), 'Count' = count_site, 'Freq' = count_site/count_all)
count_t_data = data.frame('Range' = factor(ranges[ranges != 0]/1000, levels = ranges[ranges != 0]/1000), 'Group' = rep('Observed', length(count_t_site)), 'Count' = count_t_site, 'Freq' = count_t_site/count_all)
}
grDevices::png(paste0(outPath, '/', outFileName, '_distribution_gene_', organism, '.png'), width = 1200, height = 750)
g_plot = ggplot2::ggplot(count_data) + ggplot2::geom_bar(ggplot2::aes(x = Range, y = Freq, fill = Group), stat = "identity", position = "dodge", width = 0.5) +
ggplot2::lims(y = c(0, max(count_data$Freq)*1.5)) + ggplot2::ggtitle(label = "Random distribution (Gene)") +
ggplot2::xlab('Intervals (Kbs)') + ggplot2::ylab("Ratio of Integration Events") + ggplot2::scale_fill_manual(values = c('mediumspringgreen', 'mediumpurple')) +
ggplot2::theme(panel.background = ggplot2::element_rect(fill="white", colour = "white"), panel.grid.major = ggplot2::element_line(size = 0.5, linetype = 'dotted', colour = 'black'),
axis.line = ggplot2::element_line(colour = "darkgrey"), legend.title = ggplot2::element_blank(),
legend.key.size = ggplot2::unit(0.7, "cm"), plot.title = ggplot2::element_text(hjust = 0.5, face = "bold", size = 20),
legend.text = ggplot2::element_text(size = 18), axis.text = ggplot2::element_text(size = 17),
axis.title = ggplot2::element_text(size = 18))
print(g_plot)
grDevices::dev.off()
grDevices::png(paste0(outPath, '/', outFileName, '_distribution_tss_', organism, '.png'), width = 1200, height = 750)
t_plot = ggplot2::ggplot(count_t_data) + ggplot2::geom_bar(ggplot2::aes(x = Range, y = Freq, fill = Group), stat = "identity", position = "dodge", width = 0.5) +
ggplot2::lims(y = c(0, max(count_t_data$Freq)*1.5)) + ggplot2::ggtitle(label = "Random distribution (TSS)") +
ggplot2::xlab('Intervals (Kbs)') + ggplot2::ylab("Ratio of Integration Events") + ggplot2::scale_fill_manual(values = c('mediumspringgreen', 'mediumpurple')) +
ggplot2::theme(panel.background = ggplot2::element_rect(fill="white", colour = "white"), panel.grid.major = ggplot2::element_line(size = 0.5, linetype = 'dotted', colour = 'black'),
axis.line = ggplot2::element_line(colour = "darkgrey"), legend.title = ggplot2::element_blank(),
legend.key.size = ggplot2::unit(0.7, "cm"), plot.title = ggplot2::element_text(hjust = 0.5, face = "bold", size = 20),
legend.text = ggplot2::element_text(size = 18), axis.text = ggplot2::element_text(size = 17),
axis.title = ggplot2::element_text(size = 18))
print(t_plot)
grDevices::dev.off()
message('- OK!')
result_list = list(inside_tab, g_dist, count_data, gr_genes, t_dist, count_t_data, gr_tss, organism)
names(result_list) = c('Gene_inside', 'Gene_distribution', 'Gene_plot_data', 'Gene_data', 'TSS_distribution', 'TSS_plot_data', 'TSS_data', 'Target_ver')
if(!is.null(ran_hits)){
result_list = c(result_list, list(inside_ran_tab, dist_ran))
names(result_list)[9:10] = c('Gene_inside_ran', 'Random_distribution')
}
result_list = c(result_list, list('Gene_hist_plot' = g_plot, 'TSS_hist_plot' = t_plot))
message('----- Finish. (Time : ', date(), ')')
return(result_list)
}
|
65234f76fb0d80abd0f16d44d6309e587c673777
|
1a54b7ce5ee44d81f6f4caa0f688cdd864144f48
|
/MBA-ECaaP/Rscript/Market_Basket_Analysis.R
|
74e416889fb432bd8598465a180f59870326a0af
|
[] |
no_license
|
ahyeek/atoz-dev
|
c7de53f621d99d1432595e09cd0508bb3b192923
|
4142ad6d5eb117de6a00b6d714ec1df5dced63f2
|
refs/heads/master
| 2020-03-24T00:51:24.154925
| 2018-08-26T06:22:47
| 2018-08-26T06:22:47
| 142,311,321
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,742
|
r
|
Market_Basket_Analysis.R
|
#install.packages("arules")
#install.packages("arulesViz")
#install.packages("plyr", dependencies = TRUE)
# Load the libraries
library(arules)
library(arulesViz)
library(plyr)
# Import product Data by month
df_monthly <- read.csv("14-Jun_prod.csv")
str(df_monthly)
# Data can't be easily converted to string, so have to increase it's row access level to give more privilege for editing
levels(df_monthly$category) <- c(levels(df_monthly$category), "Clearing & breakdown")
df_monthly$category[df_monthly$category == "CB"] <- "Clearing & breakdown"
levels(df_monthly$category) <- c(levels(df_monthly$category), "Ink")
df_monthly$category[df_monthly$category == "I"] <- "Ink"
levels(df_monthly$category) <- c(levels(df_monthly$category), "Others")
df_monthly$category[df_monthly$category == "O"] <- "Others"
levels(df_monthly$category) <- c(levels(df_monthly$category), "Office Equipments")
df_monthly$category[df_monthly$category == "OE"] <- "Office Equipments"
levels(df_monthly$category) <- c(levels(df_monthly$category), "Printer")
df_monthly$category[df_monthly$category == "P"] <- "Printer"
levels(df_monthly$category) <- c(levels(df_monthly$category), "Ribbon")
df_monthly$category[df_monthly$category == "R"] <- "Ribbon"
levels(df_monthly$category) <- c(levels(df_monthly$category), "Stationary")
df_monthly$category[df_monthly$category == "S"] <- "Stationary"
levels(df_monthly$category) <- c(levels(df_monthly$category), "Technology")
df_monthly$category[df_monthly$category == "TE"] <- "Technology"
levels(df_monthly$category) <- c(levels(df_monthly$category), "Toner")
df_monthly$category[df_monthly$category == "TO"] <- "Toner"
# Sort data by email
df_sorted <- df_monthly[order(df_monthly$email),]
#convert item description to categorical format
df_sorted$category <- as.factor(df_sorted$category)
str(df_sorted)
# Check top and least product of the month
user_purchase <- count(df_sorted, 'category')
user_purchase <- arrange(user_purchase, desc(freq))
user_purchase["Mth_yr"] <- "2014-06"
write.csv(user_purchase,"14-Jun_prdsales.csv", row.names = TRUE)
# Check repeating customer
rep_cust <- data.frame(df_sorted$email)
colnames(rep_cust)[1] <- "email"
repeat_customer <- ddply(rep_cust,.(email),nrow)
rep_customer <- arrange(repeat_customer, desc(V1))
colnames(rep_customer)[2] <- "No. of Transaction"
rep_customer["Mth_yr"] <- "2014-06"
write.csv(rep_customer,"14-Jun_repcust.csv", row.names = TRUE)
# Merge related information like same user transaction in same month
df_itemList <- ddply(df_monthly,c("email","mth_yr"),
function(df1)paste(df1$category,
collapse = ","))
# Remove email and mth_yr from data
df_itemList$email <- NULL
df_itemList$mth_yr <- NULL
#Rename column headers for ease of use
colnames(df_itemList) <- c("itemList")
# Generate csv file of data
write.csv(df_itemList,"14-Jul_ItemList.csv", row.names = TRUE)
#------------------------------------------------Association Rules----------------------------
# Convert itemlist data to basket transaction data
txn = read.transactions(file="14-Jun_ItemList.csv", rm.duplicates= TRUE, format="basket",sep=",",cols=1)
# remove double quotes in dataset
txn@itemInfo$labels <- gsub("\"","",txn@itemInfo$labels)
# visualize in descending order what product involve in transaction monthly
itemFrequencyPlot(txn,topN=10,type="absolute")
# Get the rules using apriori algorithm
rules <- apriori(txn, parameter = list(supp = 1, conf = 0.8))
# Sort rules
rules<-sort(rules, by="confidence", decreasing=TRUE)
# decrease support level and standardize rules to avoid long rules generated
rules <- apriori(txn, parameter = list(supp = 0.001, conf = 0.8,maxlen=3))
# avoid redundancy of generated rules
subset.matrix <- is.subset(rules, rules)
subset.matrix[lower.tri(subset.matrix, diag=T)] <- NA
redundant <- colSums(subset.matrix, na.rm=T) >= 1
rules.pruned <- rules[!redundant]
rules<-rules.pruned
# getting relationship of what product does customer likely to buy if they bought office equipment
rules<-apriori(data=txn, parameter=list(supp=0.001,conf = 0.08),
appearance = list(default="lhs",rhs="Office Equipments"),
control = list(verbose=F))
rules<-sort(rules, decreasing=TRUE,by="confidence")
# if rules is not generated, lower confidence and support level for apriori algorithm is executed
rules<-apriori(data=txn, parameter=list(supp=0.001,conf = 0.08,minlen=2),
appearance = list(default="rhs",lhs="Office Equipments"),
control = list(verbose=F))
rules<-sort(rules, decreasing=TRUE,by="confidence")
# Plot the
png(filename="13-Dec_MBA_O.png")
plot(rules,method="graph",interactive=TRUE,shading=NA)
dev.off()
|
01c504d02c9bf6f1c342e528c3f782fd621deabf
|
6f1a492fce742c33d7f6c5ad377c6f771418496a
|
/insurance_prediction/insurance_linear_regression.R
|
a83c8ec2049012f3d46e4e92838209277061b769
|
[] |
no_license
|
Reet1992/Linear_Regression-problems
|
6632cd6843aa60fbc7948de758cc8e45f2d22a32
|
8c6695f26f79a95061c32fda2120f156b1b95491
|
refs/heads/master
| 2021-07-14T19:37:45.662968
| 2020-10-25T12:08:56
| 2020-10-25T12:08:56
| 219,558,679
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,640
|
r
|
insurance_linear_regression.R
|
setwd("E:/R_Datasets/")
df <- read.csv(file = 'insurance.csv')
head(df)
df$smoker <- as.numeric(df$smoker)
df$region <- as.numeric(df$region)
df$age <- as.numeric(df$age)
df$children <- as.numeric(df$children)
df_new <- df
df2 <- df_new[ -c(2) ]
df2$bmi <- df2$bmi - mean(df2$bmi)
#### Correlation Calculation #####
cor(df2,method = c("pearson","kendall","spearman"))
summary(cor(df2,method = c("pearson","kendall","spearman")))
install.packages("corrplot")
library(corrplot)
col<- colorRampPalette(c("blue", "white", "red"))(20)
heatmap(df2, col=col, symm=TRUE)
library(ggcorrplot)
#### Train_test Data split
# Random sample indexes
train_index <- sample(1:nrow(df2), 0.90 * nrow(df2))
test_index <- setdiff(1:nrow(df2), train_index)
# Build X_train, y_train, X_test, y_test
X_train <- df2[train_index, -15]
y_train <- df2[train_index, "charges"]
X_test <- df2[test_index, -15]
y_test <- df2[test_index, "charges"]
x2_train <- X_train[1:5]
y2_train <- X_train[6]
X2_test <- X_test[1:5]
y2_test <- X_test[6]
### Linear Regression #####
model_lm <- lm(formula = y2_train$charges~x2_train$age +
x2_train$bmi + x2_train$children + x2_train$smoker + x2_train$region)
plot(model_lm, pch = 20, col = "blue")
abline(model_lm)
summary(model_lm)
### plot Residuals ####
plot(model_lm$residuals, pch = 16, col = "red")
print(model_lm$coefficients)
print(model_lm$rank)
print(model_lm$fitted.values)
AIC(model_lm)
BIC(model_lm)
#### Prediction and accuracy #####
y_pred <- predict(model_lm, X_test)
actuals_preds <- data.frame(cbind(actuals=y_test, predicteds=y_pred))
## correlation accuracy ####
correlation_accuracy <- cor(actuals_preds)
print(correlation_accuracy)
head(actuals_preds)
#### min_max _accuracy #######
min_max_accuracy <- mean(apply(actuals_preds, 1, min) / apply(actuals_preds, 1, max))
print(min_max_accuracy)
#### Distribution of Y_pred ###
hist(model_lm$residuals, color = "grey")
summary(model_lm)
model_lm <- lm(formula = y2_train$charges~x2_train$age +
x2_train$bmi + x2_train$children + x2_train$smoker + x2_train$region,
subset=(1:length(height)!=(50,60,70,80,80,100)))
plot(model_lm, pch = 20, col = "blue")
abline(model_lm)
summary(model_lm)
#### Improvement of Residuals ######
resids = model_lm$residuals
trans <- log10(resids-1.0001*min(resids))
qqnorm(trans)
qqline(trans)
plot(trans, pch = 20, col = "blue")
hist(trans, color = "grey")
|
a4f4122ddbb807def4c91a35ef2ed5c6c9f42ca7
|
379b340f7c20203f133a9ff69ccaa87fafba3083
|
/scripts/Figs_2c_4b_5b_recruitment_projection.R
|
de0e09af1747cab700f4a3b04a06f45b4bfaca5c
|
[
"CC0-1.0",
"LicenseRef-scancode-public-domain"
] |
permissive
|
mikelitzow/fish-FAR
|
74a0aca31780c17450128f450a0ccbdd5be4bd77
|
de1ff085152716e1288852a4227f3ff4d3982b78
|
refs/heads/main
| 2023-04-18T00:24:12.937219
| 2021-09-29T00:01:20
| 2021-09-29T00:01:20
| 321,418,403
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 17,790
|
r
|
Figs_2c_4b_5b_recruitment_projection.R
|
## predict the response of cod and pollock recruitment to FAR values
## in the observational record and for CMIP projections
## FAR-modeled recruitment fit for cod is Fig. 2c
## FAR-modeled recruitment fit for pollock is Fig. 4b
## predictions for both spp. conditioned on CMIP projections is Fig. 5b
library(rstan)
library(brms)
library(bayesplot)
library(tidyverse)
library(reshape2)
source("./scripts/stan_utils.R")
cb <- c("#999999", "#E69F00", "#56B4E9", "#009E73", "#F0E442", "#0072B2", "#D55E00", "#CC79A7")
theme_set(theme_bw())
## load data --------------------
# stock assessment model estimates
recr <- read.csv("./data/cod_pollock_assessment_2020_SAFEs.csv")
## note that we're only using FAR as SSB isn't part of projections
# 2017-2020 recruitment estimates are poorly supported by data; discard
recr$codR0.2020[recr$year >= 2017] <- NA
recr <- recr %>%
filter(year >= 1977) %>%
select(year, codR0.2020) %>%
mutate(sc.log.codR0=as.vector(scale(log(codR0.2020))))
# annual recruitment estimates from seines - load brms object
recr_2_zinb <- readRDS("./output/recr_2_zinb.rds")
seine <- conditional_effects(recr_2_zinb, effect = "year_fac", re_formula = NA,
probs = c(0.025, 0.975))
print(seine)
seine$year_fac
seine.r <- data.frame(year=2006:2020,
seine.R=seine$year_fac$estimate__)
obs.dat <- left_join(recr, seine.r)
# log and scale seine estimates
# then predict 2017-2020 recruitment from seine values
obs.dat$sc.log.seine.R <- as.vector(scale(log(obs.dat$seine.R)))
mod <- lm(sc.log.codR0 ~ sc.log.seine.R, data=obs.dat, na.action = "na.omit")
summary(mod)
new.dat <- data.frame(sc.log.seine.R = obs.dat$sc.log.seine.R[obs.dat$year %in% 2017:2020])
obs.dat$sc.log.codR0[obs.dat$year %in% 2017:2020] <- predict(mod, newdata = new.dat)
# finally, add FAR estimates for 1977:2020 - load brms object
obs_far_fixef <- readRDS("./output/obs_far_fixef.rds")
obs.FAR <- conditional_effects(obs_far_fixef, probs = c(0.025, 0.975))
obs.FAR <- obs.FAR$year_fac %>%
mutate(year = as.numeric(as.character(year_fac))) %>%
select(year, estimate__, se__)
names(obs.FAR)[2:3] <- c("FAR", "FAR.SE")
obs.dat <- left_join(obs.dat, obs.FAR)
## finally, load projected FAR values through 2046 - load brms object
mod_far_fixef <- readRDS("./output/mod_far_fixef.rds")
proj.far <- conditional_effects(mod_far_fixef, probs = c(0.025, 0.975))
print(proj.far)
proj.dat <- proj.far$year_fac %>%
mutate(year = as.numeric(as.character(year_fac))) %>%
filter(year >= 2006) %>% # limit to RCP8.5 projections (no historical observations)
select(year, estimate__, se__)
names(proj.dat)[2:3] <- c("FAR", "FAR.SE")
## proj.dat is the dataframe for use as new.data when predicting
## model recruitment as a function of FAR
R1 <- brm(sc.log.codR0 ~ s(FAR, k = 4),
data = obs.dat,
save_pars = save_pars(latent = TRUE),
cores = 4, iter = 4000, chains = 4,
control = list(adapt_delta = 0.999, max_treedepth = 12))
saveRDS(R1, file = "output/cod_R_FAR_obs.rds")
summary(R1)
names(R1$fit)
R1 <- readRDS("./output/cod_R_FAR_obs.rds")
bayes_R2(R1)
summary(R1)
plot(conditional_effects(R1), points = TRUE)
check_hmc_diagnostics(R1$fit)
neff_lowest(R1$fit)
rhat_highest(R1$fit)
# quick look at results
ce1s_1 <- conditional_effects(R1)
check <- ce1s_1$FAR %>%
arrange(desc(FAR))
check
## try adding uncertainty in FAR
R2 <- brm(sc.log.codR0 ~ me(FAR, FAR.SE),
data = obs.dat,
save_pars = save_pars(latent = TRUE),
cores = 4, iter = 4000, chains = 4,
control = list(adapt_delta = 0.99, max_treedepth = 12))
saveRDS(R2, file = "output/cod_R_FAR_w_SE.rds")
summary(R2)
names(R2$fit)
R2 <- readRDS("./output/cod_R_FAR_w_SE.rds")
plot(conditional_effects(R2), points = TRUE)
check_hmc_diagnostics(R2$fit)
neff_lowest(R2$fit)
rhat_highest(R2$fit)
## Predicted effects ---------------------------------------
R1 <- readRDS("./output/cod_R_FAR_obs.rds")
## 95% CI
ce1s_1 <- conditional_effects(R1, effect = "FAR", re_formula = NA,
probs = c(0.025, 0.975))
## 90% CI
ce1s_2 <- conditional_effects(R1, effect = "FAR", re_formula = NA,
probs = c(0.05, 0.95))
## 80% CI
ce1s_3 <- conditional_effects(R1, effect = "FAR", re_formula = NA,
probs = c(0.1, 0.9))
dat_ce <- ce1s_1$FAR
dat_ce[["upper_95"]] <- dat_ce[["upper__"]]
dat_ce[["lower_95"]] <- dat_ce[["lower__"]]
dat_ce[["upper_90"]] <- ce1s_2$FAR[["upper__"]]
dat_ce[["lower_90"]] <- ce1s_2$FAR[["lower__"]]
dat_ce[["upper_80"]] <- ce1s_3$FAR[["upper__"]]
dat_ce[["lower_80"]] <- ce1s_3$FAR[["lower__"]]
## and years for plot
cod.sub <- conditional_effects(R1, effect = "FAR")
cod.sub <- lapply(cod.sub, attributes)$FAR$points
cod.sub$year <- 1977:2020
# jitter x and y for plot
f <- 135 # set jitter factor
set.seed(22)
cod.sub$x.jitter <- jitter(cod.sub$FAR, factor=f)
cod.sub$y.jitter <- jitter(cod.sub$resp__, factor=f)
fig.2c <- ggplot(dat_ce) +
aes(x = effect1__, y = estimate__) +
geom_ribbon(aes(ymin = lower_95, ymax = upper_95), fill = "grey90") +
geom_ribbon(aes(ymin = lower_90, ymax = upper_90), fill = "grey85") +
geom_ribbon(aes(ymin = lower_80, ymax = upper_80), fill = "grey80") +
geom_line(size = 1, color = "red3") +
geom_hline(yintercept = 0, size = 0.2) +
geom_text(data = cod.sub,
aes(x = x.jitter, y = y.jitter, label = year), color = "grey40", size = 3) +
labs(x = "Fraction of Attributable Risk (FAR)", y = "Log recruitment anomaly") +
theme_bw()
print(fig.2c)
ggsave("./figs/fig.2c.png", width = 4, height = 3)
# predict for CMIP projections!
post <- data.frame()
for(i in 2006:2046){
# i <- 2006
newdata <- data.frame(FAR=proj.dat$FAR[proj.dat$year==i],
FAR.SE=proj.dat$FAR.SE[proj.dat$year==i])
xx <- data.frame(year = i,
posterior = posterior_epred(R2, newdata = newdata, re_formula = NA, resp = "sc.log.codR0"))
post <- rbind(post, xx)
}
post$decade <- ifelse(post$year %in% 2010:2019, "2010s",
ifelse(post$year %in% 2020:2029, "2020s",
ifelse(post$year %in% 2030:2039, "2030s",
ifelse(post$year <=2009, "2000s", "2040s"))))
# and add historical predictions
histor <- data.frame()
for(i in 1977:2019){
# i <- 2006
newdata <- data.frame(FAR=obs.FAR$FAR[obs.FAR$year==i],
FAR.SE=obs.FAR$FAR.SE[obs.FAR$year==i])
xx <- data.frame(year = i,
posterior = posterior_epred(R2, newdata = newdata, re_formula = NA, resp = "sc.log.codR0"))
histor <- rbind(histor, xx)
}
histor$decade <- "Historical"
## combine and plot with CIs
pred.recr <- rbind(post, histor) %>%
dplyr::group_by(decade) %>%
dplyr::summarise(median=median(posterior),
LCI=quantile(posterior, probs = 0.025),
UCI=quantile(posterior, probs = 0.975)) %>%
dplyr::filter(decade %in% c("2020s", "2030s", "2040s", "Historical"))
pred.recr$order <- c(2,3,4,1)
pred.recr$decade <- reorder(pred.recr$decade, pred.recr$order)
cod.project.R <- ggplot(pred.recr, aes(decade, median)) +
geom_point(size=2) +
geom_errorbar(aes(ymin=LCI, ymax=UCI), width=0.2) +
ylab("Log recruitment anomaly") +
theme(axis.title.x = element_blank())
cod.project.R
ggsave("./figs/hist-projected_cod_R_with_FAR_uncertainty.png", width = 3, height = 3)
## now pollock! -----------------------
## load data --------------------
# stock assessment model estimates
recr <- read.csv("./data/cod_pollock_assessment_2020_SAFEs.csv")
poll.obs.dat <- recr %>%
filter(year %in% 1970:2019) %>%
select(year, pollR0.2020, poll.SSB.2020) %>%
mutate(sc.log.pollR0=as.vector(scale(log(pollR0.2020))))
obs_far_fixef <- readRDS("./output/obs_far_fixef.rds")
obs.FAR <- conditional_effects(obs_far_fixef, probs = c(0.025, 0.975))
obs.FAR <- obs.FAR$year_fac %>%
mutate(year = as.numeric(as.character(year_fac))) %>%
select(year, estimate__, se__)
names(obs.FAR)[2:3] <- c("FAR", "FAR.SE")
poll.obs.dat <- left_join(poll.obs.dat, obs.FAR)
names(poll.obs.dat)[3] <- "SSB"
## finally, load projected FAR values through 2046 - load brms object
mod_far_fixef <- readRDS("./output/mod_far_fixef.rds")
proj.far <- conditional_effects(mod_far_fixef, probs = c(0.025, 0.975))
print(proj.far)
proj.dat <- proj.far$year_fac %>%
mutate(year = as.numeric(as.character(year_fac))) %>%
filter(year >= 2006) %>% # limit to RCP8.5 projections (no historical observations)
select(year, estimate__, se__)
names(proj.dat)[2:3] <- c("FAR", "FAR.SE")
## proj.dat is the dataframe for use as new.data when predicting
## model recruitment as a function of FAR
poll.R1 <- brm(sc.log.pollR0 ~ s(FAR, k = 5),
data = poll.obs.dat,
seed = 1234,
save_pars = save_pars(all = TRUE),
cores = 4, iter = 4000, chains = 4,
control = list(adapt_delta = 0.999, max_treedepth = 12))
poll.R1 <- add_criterion(poll.R1, c("loo", "bayes_R2"),
moment_match = TRUE, reloo = FALSE,
cores = 4, k_threshold = 0.7)
saveRDS(poll.R1, file = "output/poll_R1_FAR_obs.rds")
summary(poll.R1)
bayes_R2(poll.R1)
names(poll.R1$fit)
plot(conditional_effects(poll.R1), points = TRUE)
check_hmc_diagnostics(poll.R1$fit)
neff_lowest(poll.R1$fit)
rhat_highest(poll.R1$fit)
## add SSB
# model selection
poll.R1s <- brm(sc.log.pollR0 ~ s(FAR, k = 5) + s(SSB, k = 5),
data = poll.obs.dat,
seed = 1234,
save_pars = save_pars(all = TRUE),
cores = 4, iter = 4000, chains = 4,
control = list(adapt_delta = 0.999, max_treedepth = 12))
poll.R1s <- add_criterion(poll.R1s, c("loo", "bayes_R2"),
moment_match = TRUE, reloo = TRUE,
cores = 4, k_threshold = 0.7)
saveRDS(poll.R1s, file = "output/poll_R1s_FAR_obs.rds")
summary(poll.R1s)
names(poll.R1s$fit)
plot(conditional_effects(poll.R1s), points = TRUE)
check_hmc_diagnostics(poll.R1s$fit)
neff_lowest(poll.R1s$fit)
rhat_highest(poll.R1s$fit)
## model selection -----------------------------
poll.R1 <- readRDS("./output/poll_R1_FAR_obs.rds")
poll.R1s <- readRDS("./output/poll_R1s_FAR_obs.rds")
loo(poll.R1, poll.R1s)
## plot R1 for Fig. 4b ------------------------------------
poll.R1 <- readRDS("./output/poll_R1_FAR_obs.rds")
## far predictions ##
## 95% CI
ce1s_1 <- conditional_effects(poll.R1, effect = "FAR", re_formula = NA,
probs = c(0.025, 0.975))
## 90% CI
ce1s_2 <- conditional_effects(poll.R1, effect = "FAR", re_formula = NA,
probs = c(0.05, 0.95))
## 80% CI
ce1s_3 <- conditional_effects(poll.R1, effect = "FAR", re_formula = NA,
probs = c(0.1, 0.9))
dat_ce <- ce1s_1$FAR
dat_ce[["upper_95"]] <- dat_ce[["upper__"]]
dat_ce[["lower_95"]] <- dat_ce[["lower__"]]
dat_ce[["upper_90"]] <- ce1s_2$FAR[["upper__"]]
dat_ce[["lower_90"]] <- ce1s_2$FAR[["lower__"]]
dat_ce[["upper_80"]] <- ce1s_3$FAR[["upper__"]]
dat_ce[["lower_80"]] <- ce1s_3$FAR[["lower__"]]
fig.4b <- ggplot(dat_ce) +
aes(x = effect1__, y = estimate__) +
geom_ribbon(aes(ymin = lower_95, ymax = upper_95), fill = "grey90") +
geom_ribbon(aes(ymin = lower_90, ymax = upper_90), fill = "grey85") +
geom_ribbon(aes(ymin = lower_80, ymax = upper_80), fill = "grey80") +
geom_line(size = 1, color = "red3") +
geom_hline(yintercept = 0, size = 0.2) +
labs(x = "Fraction of Attributable Risk (FAR)", y = "Log recruitment anomaly") +
theme_bw()+
geom_text(data = poll.obs.dat,
aes(x = FAR, y = sc.log.pollR0, label = year), color = "grey40", size = 3)
print(fig.4b)
## try adding uncertainty in FAR -----------------------------------------
poll.R2 <- brm(sc.log.pollR0 ~ 1 + me(FAR, FAR.SE) + I(me(FAR, FAR.SE)^2),
data = poll.obs.dat,
save_pars = save_pars(latent = TRUE),
cores = 4, iter = 6000, chains = 4,
control = list(adapt_delta = 0.999, max_treedepth = 16))
saveRDS(poll.R2, file = "output/poll_R_FAR_w_SE.rds")
summary(poll.R2)
names(poll.R2$fit)
poll.R3 <- brm(sc.log.pollR0 ~ 1 + me(FAR, FAR.SE) + I(me(FAR, FAR.SE)^2) + I(me(FAR, FAR.SE)^3),
data = poll.obs.dat,
save_pars = save_pars(latent = TRUE),
cores = 4, iter = 6000, chains = 4,
control = list(adapt_delta = 0.999, max_treedepth = 16))
saveRDS(poll.R3, file = "output/poll_R_FAR_w_SE_cubic.rds")
summary(poll.R3)
names(poll.R3$fit)
poll.R2 <- readRDS("./output/poll_R_FAR_w_SE.rds")
poll.R3 <- readRDS("./output/poll_R_FAR_w_SE_cubic.rds")
loo(poll.R2, poll.R3)
## Predict R2 manually
pred_far <- data.frame(FAR = seq(min(poll.obs.dat$FAR), max(poll.obs.dat$FAR), length.out = 100),
FAR.SE = 0.00001) ## set measurement error to zero
pred_full <- posterior_epred(poll.R2, newdata = pred_far)
pred <- data.frame(estimate = apply(pred_full, 2, mean),
upper = apply(pred_full, 2, quantile, probs = 0.975),
lower = apply(pred_full, 2, quantile, probs = 0.025))
pred_df <- cbind(pred_far, pred)
g <- ggplot(pred_df) +
geom_ribbon(aes(x = FAR, ymin = lower, ymax = upper), fill = "grey90") +
geom_line(aes(x = FAR, y = estimate), color = "red3") +
geom_point(data = poll.obs.dat, aes(x = FAR, y = sc.log.pollR0), color = "grey25")
## geom_smooth(data = poll.obs.dat, aes(x = FAR, y = sc.log.pollR0), method = "lm",
## formula = y ~ x + I(x^2), se = FALSE, color = "blue") +
## geom_segment(data = poll.obs.dat, aes(y = sc.log.pollR0, yend = sc.log.pollR0,
## x = FAR - FAR.SE, xend = FAR + FAR.SE))
print(g)
check_hmc_diagnostics(poll.R2$fit)
neff_lowest(poll.R2$fit)
rhat_highest(poll.R2$fit)
## predict for CMIP projections!---------------------------
poll.post <- data.frame()
for(i in 2006:2046){
# i <- 2006
newdata <- data.frame(FAR=proj.dat$FAR[proj.dat$year==i],
FAR.SE=proj.dat$FAR.SE[proj.dat$year==i])
xx <- data.frame(year = i,
posterior = posterior_epred(poll.R3, newdata = newdata, re_formula = NA, resp = "sc.log.pollR0"))
poll.post <- rbind(poll.post, xx)
}
poll.post$decade <- ifelse(poll.post$year %in% 2010:2019, "2010s",
ifelse(poll.post$year %in% 2020:2029, "2020s",
ifelse(poll.post$year %in% 2030:2039, "2030s",
ifelse(poll.post$year <=2009, "2000s", "2040s"))))
# and add historical (20th century) predictions
poll.histor <- data.frame()
for(i in 1977:2019){
# i <- 2006
newdata <- data.frame(FAR=obs.FAR$FAR[obs.FAR$year==i],
FAR.SE=obs.FAR$FAR.SE[obs.FAR$year==i])
xx <- data.frame(year = i,
posterior = posterior_epred(poll.R3, newdata = newdata, re_formula = NA, resp = "sc.log.codR0"))
poll.histor <- rbind(poll.histor, xx)
}
poll.histor$decade <- "Historical"
## combine and plot with CIs
poll.pred.recr <- rbind(poll.post, poll.histor) %>%
dplyr::group_by(decade) %>%
dplyr::summarise(median=median(posterior),
LCI=quantile(posterior, probs = 0.025),
UCI=quantile(posterior, probs = 0.975)) %>%
dplyr::filter(decade %in% c("2020s", "2030s", "2040s", "Historical"))
poll.pred.recr$order <- c(2,3,4,1)
poll.pred.recr$decade <- reorder(poll.pred.recr$decade, poll.pred.recr$order)
ggplot(poll.pred.recr, aes(decade, median)) +
geom_point(size=2) +
geom_errorbar(aes(ymin=LCI, ymax=UCI), width=0.2) +
ylab("Log recruitment anomaly") +
theme(axis.title.x = element_blank())
ggsave("./figs/hist-projected_poll_R_with_FAR_uncertainty.png", width = 3, height = 3)
# combine projections into a single plot
pred.recr$species <- "cod"
poll.pred.recr$species <- "pollock"
all.plot <- rbind(pred.recr, poll.pred.recr)
fig.5b <- ggplot(all.plot, aes(decade, median, color=species)) +
geom_point(size=2, position=position_dodge(width=0.5)) +
geom_errorbar(aes(ymin=LCI, ymax=UCI), width=0.2, position=position_dodge(width=0.5)) +
geom_hline(yintercept = 0, size = 0.2) +
ylab("Log recruitment anomaly") +
theme(axis.title.x = element_blank(),
legend.title = element_blank(),
legend.position = c(0.8, 0.8)) +
scale_color_manual(values=cb[c(2,4)])
fig.5b
ggsave("./figs/hist-projected_poll_cod_ R.png", width = 3, height = 3)
## this is Fig. 5b in the draft
## calculate change in medians ------------------------------------
summ.dat <- all.plot %>%
filter(decade %in% c("2020s", "Historical")) %>%
select(species, decade, median) %>%
pivot_wider(values_from = median, names_from = species)
# go back to original data frames to back-calculate to original units!
ggplot(obs.dat, aes(sc.log.codR0, codR0.2020)) +
geom_point()
cod.mod <- lm(log(codR0.2020) ~ sc.log.codR0, data=obs.dat, na.action = "na.exclude")
summary(cod.mod)
check <- exp(predict(cod.mod))
plot(check, obs.dat$codR0.2020) # right!
new.dat <- data.frame(sc.log.codR0 = summ.dat$cod)
summ.dat$raw.cod <- exp(predict(cod.mod, newdata = new.dat))
# and pollock
poll.mod <- lm(log(pollR0.2020) ~ sc.log.pollR0, data=poll.obs.dat, na.action = "na.exclude")
summary(poll.mod)
check <- exp(predict(poll.mod))
plot(check, poll.obs.dat$pollR0.2020) # right!
new.dat <- data.frame(sc.log.pollR0 = summ.dat$pollock)
summ.dat$raw.poll <- exp(predict(poll.mod, newdata = new.dat))
cod.change <- (summ.dat$raw.cod[2]-summ.dat$raw.cod[1])/summ.dat$raw.cod[2]
poll.change <- (summ.dat$raw.poll[2]-summ.dat$raw.poll[1])/summ.dat$raw.poll[2]
|
2c28fdb4e81272456e6d0441942820ad0c038aea
|
f89c50f72976d4dea4e068bdfa3b61e2b4d1e5f1
|
/R_analyses/Homelessness and Housing/2_Join-PUMS_Cut-Percentiles.R
|
a679ec39e58807ffc90537ecc22ffe9c03f5abc9
|
[
"MIT"
] |
permissive
|
ahla-brfoley/abundanthousingla
|
5e48ef959531b3347dec1964b62699055fb9239c
|
d4f6d7c44a8633a83251af80a156a07654b78609
|
refs/heads/master
| 2021-03-10T09:08:00.356810
| 2020-08-12T18:52:13
| 2020-08-12T18:52:13
| 246,441,489
| 0
| 0
|
MIT
| 2020-05-15T18:56:50
| 2020-03-11T00:56:14
|
Rebol
|
UTF-8
|
R
| false
| false
| 2,213
|
r
|
2_Join-PUMS_Cut-Percentiles.R
|
library(plyr)
library (data.table)
library(dplyr)
library(Hmisc)
files <- list.files(pattern="*.csv")
#this function takes microdata files named [Year].csv and adds the year as a column
read_csv_w_year <- function(x) {
out <- read.csv(x)
yr <- as.numeric(str_extract(x, "[A-Za-z0-9]{4}"))
cbind(year=yr, out)
}
combined_files <- rbind.fill(lapply(files, read_csv_w_year))
combined_files_people <- rbind.fill(lapply(files, read_csv_w_year))
#Areas (PUMAs) to include/exclude in CoC
pre2011pumas <- c(301:307)
post2011pumas <- c(301:308)
#2011 and pre- use 2000 Census PUMAs. combined_files would be the dataframe from reading in the CSV generated by the snippet above
coc <- subset(combined_files, (combined_files$year <= 2011 & combined_files$PUMA %in% pre2011pumas) | (combined_files$year > 2011 & combined_files$PUMA %in% post2011pumas))
#for people level data (instead of households)
cocppl <- subset(combined_files_people, (combined_files_people$year <= 2011 & combined_files_people$PUMA %in% pre2011pumas) | (combined_files_people$year > 2011 & combined_files_people$PUMA %in% post2011pumas))
#this function allows you to cut a specific percentile from the microdata
percentiles <- function(df, year_col, tgt_col, weight_col, pctile){
df %>% dplyr::filter(!is.na(get(tgt_col))) %>% dplyr::group_by(get(year_col)) %>%
arrange(get(tgt_col)) %>%
dplyr::mutate(cum_wt = cumsum(get(weight_col))) %>%
dplyr::mutate(max_wt = max(cum_wt, na.rm = TRUE)) %>%
dplyr::mutate(pct_dist = abs(cum_wt - (pctile*max_wt))) %>%
dplyr::filter(pct_dist == min(pct_dist)) %>% arrange(get(year_col)) %>% ungroup(get(year_col)) %>%
select(year_col, tgt_col)
}
View(format_for_excel(percentiles(coc,"year","GRNTP","WGTP",0.5),percentiles(coc,"year","GRNTP","WGTP",0.1),percentiles(coc,"year","GRNTP","WGTP",0.2)))
percentiles(coc,"year","GRNTP","WGTP",0.2)
format_for_excel <- function(x,y,z)
{
cbind(x,y[,2],z[,2])
}
#get adult population
View(cocppl[cocppl$AGEP >= 18,] %>% dplyr::group_by(year) %>% dplyr::summarize(sum = sum(PWGTP)))
#get number of market units
View(coc[is.na(coc$VACS) | coc$VACS == 1 | coc$VACS == 3,] %>% dplyr::group_by(year) %>% dplyr::summarize(sum = sum(WGTP)))
|
0e845624f3e26f37eb320314d0b8a562be8ff7c5
|
56b32941415e9abe063d6e52754b665bf95c8d6a
|
/R-Portable/App/R-Portable/library/rjson/unittests/test.list.r
|
85482eee081edd647ce54a31bc07ba3ec8138060
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"GPL-2.0-only"
] |
permissive
|
voltek62/seo-viz-install
|
37ed82a014fc36e192d9a5e5aed7bd45327c8ff3
|
e7c63f4e2e4acebc1556912887ecd6a12b4458a0
|
refs/heads/master
| 2020-05-23T08:59:32.933837
| 2017-03-12T22:00:01
| 2017-03-12T22:00:01
| 84,758,190
| 1
| 0
|
MIT
| 2019-10-13T20:51:49
| 2017-03-12T21:20:14
|
C++
|
UTF-8
|
R
| false
| false
| 1,101
|
r
|
test.list.r
|
.setUp <- function() {}
.tearDown <- function() {}
test.list <- function()
{
json <- "{}"
x <- fromJSON( json )
checkIdentical( x, list() )
failing_json <- c( "{", "{a:5}", "{\"a:5}", "{\"a\":", "{\"a\":5", "{\"a\":}", "{123:false}", "{\"a\":unquoted}" )
for( bad_json in failing_json ) {
x <- try( fromJSON( bad_json ), silent = TRUE )
checkTrue( any( class( x ) == "try-error" ) )
}
json <- "{\"a\":5}"
x <- fromJSON( json )
checkIdentical( x, list( a = 5 ) )
json <- "{\"a\":5,\"b\":10}"
x <- fromJSON( json )
checkIdentical( x, list( a = 5, b = 10 ) )
json <- "{\"a\":5,\"b\":10, \"clap\":[true,false,false]}"
x <- fromJSON( json )
correct <- list( a = 5, b = 10, clap = c(TRUE,FALSE,FALSE) )
checkIdentical( x, correct )
checkIdentical( x[["clap"]], correct[["clap"]] )
}
test.nestedlist <- function()
{
json <- "[\"a\", [\"b\", \"c\"] ]"
x <- fromJSON( json )
correct <- list( "a", c( "b", "c" ) )
checkIdentical( x, correct )
checkIdentical( x[[2]], correct[[2]] )
}
test.bad.list <- function()
{
json <- "{\"a\": 123,}"
checkException( fromJSON( json ) )
}
|
cf65c96166dec1bbbe7117065c94cd84bd67fde7
|
db4521bb6991d88738ebf2e72b875abb8b3886b1
|
/dinucleotides.R
|
6cafe12199cf8769cc3fc5e98b56ad95031c618e
|
[] |
no_license
|
Hashem72/Some_R_Codes
|
5692e282a8ba9a0e673beb75f8a9a76f6a416353
|
6cc938fa4b97a3eb34245ecc68d13dd4bf2ee8fa
|
refs/heads/master
| 2020-12-24T13:18:10.456066
| 2013-08-13T14:25:21
| 2013-08-13T14:25:21
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,213
|
r
|
dinucleotides.R
|
get.dinucleotide.frequencies <- function(dinucloetide.Freq.File, dinucleotide.Freq.Random.Tags.File){
results = list()
freq.From.Real.Tags = read.table(file = dinucloetide.Freq.File)
freq.Frome.Random.Tags = read.table(file = dinucleotide.Freq.Random.Tags.File)
results$real = freq.From.Real.Tags
results$randome = freq.Frome.Random.Tags
results
}#get.dinucleotide.frequencies#
dinucleotides = c("aa", "ac", "ag", "at", "ca", "cc", "cg", "ct", "ga", "gc", "gg", "gt", "ta", "tc", "tg", "tt")
#UW DATA
#Gm12878
dinucleotide.freq.data = get.dinucleotide.frequencies("/nfs/th_group/hk3/UW_DNaseI_HS/Gm12878_For_Paper_Analysis/wgEncodeUwDnaseGm12878Alnrep1_chr22_dinucleotides_frequency.txt", "/nfs/th_group/hk3/UW_DNaseI_HS/Gm12878_For_Paper_Analysis/dinucleotides_frequency_from_Gm12878_randomly_generated_tags.txt")
#dinucleotide.freq.data = get.dinucleotide.frequencies("/nfs/th_group/hk3/UW_DNaseI_HS/K562_For_Paper_Analysis/wgEncodeUwDnaseK562Alnrep1_chr22_dinucleotides_frequency.txt", "/nfs/th_group/hk3/UW_DNaseI_HS/Gm12878_For_Paper_Analysis/dinucleotides_frequency_from_Gm12878_randomly_generated_tags.txt")
#dinucleotide.freq.data = get.dinucleotide.frequencies("/nfs/th_group/hk3/UW_DNaseI_HS/H1hesc_For_Paper_Analysis/wgEncodeUwDnaseH1hescAlnrep1_chr22_dinucleotides_frequency.txt", "/nfs/th_group/hk3/UW_DNaseI_HS/Gm12878_For_Paper_Analysis/dinucleotides_frequency_from_Gm12878_randomly_generated_tags.txt")
#dinucleotide.freq.data = get.dinucleotide.frequencies("/nfs/th_group/hk3/UW_DNaseI_HS/Hsmm_For_Paper_Analysis/wgEncodeUwDnaseHsmmAlnrep1_chr22_dinucleotides_frequency.txt", "/nfs/th_group/hk3/UW_DNaseI_HS/Gm12878_For_Paper_Analysis/dinucleotides_frequency_from_Gm12878_randomly_generated_tags.txt")
#dinucleotide.freq.data = get.dinucleotide.frequencies("/nfs/th_group/hk3/UW_DNaseI_HS/Helas3_For_Paper_Analysis/wgEncodeUwDnaseHelas3Alnrep1_chr22_dinucleotides_frequency.txt", "/nfs/th_group/hk3/UW_DNaseI_HS/Gm12878_For_Paper_Analysis/dinucleotides_frequency_from_Gm12878_randomly_generated_tags.txt")
# DUKE DATA
#dinucleotide.freq.data = get.dinucleotide.frequencies("/nfs/th_group/hk3/Duke_DNaseI_HS/Gm12878_For_Paper_Analysis/wgEncodeOpenChromDnaseGm12878Alnrep1_chr22_dinucleotides_frequency.txt", "/nfs/th_group/hk3/Duke_DNaseI_HS/Gm12878_For_Paper_Analysis/dinucleotides_frequency_from_Gm12878_randomly_generated_tags.txt")
#dinucleotide.freq.data = get.dinucleotide.frequencies("/nfs/th_group/hk3/Duke_DNaseI_HS/K562_For_Paper_Analysis/wgEncodeOpenChromDnaseK562Alnrep1_chr22_dinucleotides_frequency.txt", "/nfs/th_group/hk3/Duke_DNaseI_HS/Gm12878_For_Paper_Analysis/dinucleotides_frequency_from_Gm12878_randomly_generated_tags.txt")
#dinucleotide.freq.data = get.dinucleotide.frequencies("/nfs/th_group/hk3/Duke_DNaseI_HS/H1hesc_For_Paper_Analysis/wgEncodeOpenChromDnaseH1hescAlnrep1_chr22_dinucleotides_frequency.txt", "/nfs/th_group/hk3/Duke_DNaseI_HS/Gm12878_For_Paper_Analysis/dinucleotides_frequency_from_Gm12878_randomly_generated_tags.txt")
#dinucleotide.freq.data = get.dinucleotide.frequencies("/nfs/th_group/hk3/Duke_DNaseI_HS/Hsmm_For_Paper_Analysis/wgEncodeOpenChromDnaseHsmmAlnrep1_chr22_dinucleotides_frequency.txt", "/nfs/th_group/hk3/Duke_DNaseI_HS/Gm12878_For_Paper_Analysis/dinucleotides_frequency_from_Gm12878_randomly_generated_tags.txt")
#dinucleotide.freq.data = get.dinucleotide.frequencies("/nfs/th_group/hk3/Duke_DNaseI_HS/Helas3_For_Paper_Analysis/wgEncodeOpenChromDnaseHelas3Alnrep1_chr22_dinucleotides_frequency.txt", "/nfs/th_group/hk3/Duke_DNaseI_HS/Gm12878_For_Paper_Analysis/dinucleotides_frequency_from_Gm12878_randomly_generated_tags.txt")
DINUC.REAL.TAGS = dinucleotide.freq.data$real
DINUC.RANDOM.TAGS = dinucleotide.freq.data$random
DINUC.NORMALIZED = DINUC.REAL.TAGS/DINUC.RANDOM.TAGS
DINUC.NORMALIZED.MATRIX = as.matrix(DINUC.NORMALIZED)
DINUC.NORMALIZED.MATRIX.T = t(DINUC.NORMALIZED.MATRIX)
rownames(DINUC.NORMALIZED.MATRIX.T) = dinucleotides
#heatmap.2(DINUC.NORMALIZED.MATRIX.T, col=redgreen(75), scale="row", key=TRUE, density.info="none", trace="none",cexCol=0.9, dendrogram = "row", Colv = FALSE, colsep = c(10,30))
AA.AT.TA.TC.INDICES = c(1, 4, 13, 14)
CC.CG.GC.GG.INDICES = c(6, 7, 10, 11)
plot(colMeans( log(DINUC.NORMALIZED.MATRIX.T)[AA.AT.TA.TC.INDICES,-56]), col ='red', lwd =3, type = 'l', ylab= 'dinucleotid frequency', xlab= 'sequence position', main = "UW:Gm12878", ylim = c(-1, 1) )
lines( colMeans(log(DINUC.NORMALIZED.MATRIX.T)[CC.CG.GC.GG.INDICES,-56]) , lwd = 3, col='blue')
abline(v=10, lty = 1, col='grey')
abline(v=46, lty = 1, col='grey')
plot(DINUC_NORMALIZED[, 1], col = 'blue', pch=1, lty=1, lwd =2, type='b', ylab = "dinucleotides freq", main = "dinucleotide freqs", ylim= c(0,3))
for(x in seq(2:16)){
title = paste("normalized frequency of ", dinucleotides[x], sep=" ")
lines(DINUC_NORMALIZED[, x], col = x, pch=1, lty=1, lwd =2, type='b', ylab = "dinucleotides freq")
}
par(mfrow = c(4,4))
for(x in seq(1:16)){
plot(DINUC.NORMALIZED.MATRIX.T[x,], main =dinucleotides[x], lwd =2, col = "blue", type = 'l', ylab =paste("freq of", dinucleotides[x], sep=" "), xlab= "seq position" )
}
|
2c1bd51dd26155db9b8ddeac49b86322f58ed9de
|
2d44d0b3d4beedbf4169438d0c577edf339b3952
|
/vardisc/ui.R
|
b771230555c2724a03fdb761677c6b6b064f9178
|
[] |
no_license
|
joseanglez/Shinyapps
|
473dbb4178e698f2987ffc075b9c977692a3c69a
|
6b3cc0a085ec5d688edca88151d26f0405dbb22d
|
refs/heads/master
| 2019-08-23T21:36:16.334174
| 2018-01-10T11:57:26
| 2018-01-10T11:57:26
| 116,840,659
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,371
|
r
|
ui.R
|
Sys.setlocale("LC_ALL", "es_ES.UTF-8") #to be sure that accents in text will be allowed in plots
library(shiny)
source('../credit.r')
# Define UI for slider demo application
shinyUI(fluidPage(h3(textOutput("titulo")),
absolutePanel(
top = 0, right = 0, width=80,
fluidRow(actionButton("eng", img(src='uk-icon.png'), style="margin:0;padding:0;"), actionButton("spa", img(src='Spain-icon.png'), style="margin:0;padding:0;"))
),
tabsetPanel(type = "tabs",
tabPanel(textOutput("tab1"),
fluidRow(
column(3, br(),
p(textOutput("par1")),
p(textOutput("par2")),
uiOutput("par3"),
credit1
),
column(4,
plotOutput("fig", clickId='P'),
actionButton("reset", textOutput("null"))
),
column(3, br(),
p(textOutput("par4")),
htmlOutput("tab")
)
)
),
tabPanel(textOutput("tab2"),
fluidRow(
column(3, br(),
p(textOutput("par5")),
p(textOutput("par6")),
p(textOutput("par7")),
actionButton("lanza", textOutput("roll"))
),
column(4,
plotOutput("freq"),
actionButton("cero", textOutput("zero"))
)
)
),
tabPanel(textOutput("tab3"),
fluidRow(
column(3, br(),
p(textOutput("par8")),
p(textOutput("par9"))
),
column(4,
plotOutput("medias")
)
)
)
)
))
|
71b9c9f7bc0fa6e154886bcebd39f138c84f8694
|
d72ddca638703a741bc03182f7ed740a230785b8
|
/plot4.R
|
075769bf73e4af4675417055e8287479b35db31b
|
[] |
no_license
|
mmartinez2176/ExploratoryDataAnalysisProj2
|
c653ce718b011b1bc51670b0c1e13b2f8e669fc1
|
2984afda5a27070bb30537e58f19e3e4cfaa3a11
|
refs/heads/master
| 2021-01-10T06:43:47.275382
| 2016-03-27T03:58:01
| 2016-03-27T03:58:01
| 54,810,211
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,762
|
r
|
plot4.R
|
#######################################
# Exploratory Data Analysis
# Project 2
# Author Martin Martinez
# This R Script contains the resolution for the Exploratory Data Analysis
# This script is broken into smaller section
#######################################
## Section A. load Library
library(data.table)
library(dplyr)
library(ggplot2)
#######################################
## Section B. getting the data
## set the working dir.
myWorkingDir <- "/home/mmartine/Documents/Education/Coursera/04. Exploratory-Data-Analysis/Course Project/Project 2"
if(!file.exists(myWorkingDir)){
print("Cannot find the project directory")
stop()
}
## validate the working directory exist
setwd(myWorkingDir)
## Validate the the files do exist.
if(!file.exists("summarySCC_PM25.rds")){
print("Cannot find summarySCC_PM25 file")
stop()
}
## Validate the the files do exist.
if(!file.exists("Source_Classification_Code.rds")){
print("Cannot find Source_Classification_Code file")
stop()
}
## Using Dir as recommended to validate the file exist.
if (!"summarySCC_PM25.rds" %in% dir()){
print("Cannot find summarySCC_PM25 file")
stop()
}
## Using Dir as recommended to validate the file exist.
if (!"Source_Classification_Code.rds" %in% dir()){
print("Cannot find Source_Classification_Code file")
stop()
}
##get the Data set loading the file.
## This first line will likely take a few seconds. Be patient!
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
##Assignment
##You must address the following questions and tasks in your exploratory analysis.
## For each question/task you will need to make a single plot. Unless specified,
## you can use any plotting system in R to make your plot.
## QUESTION 4
##Across the United States, how have emissions from coal combustion-related sources changed from 1999–2008?
## Queston 4
## Code: R Programming file plot4.R
## Plot: plot4.png
## To build the association with the two tables we'll need to merge them first.
mergedData <- merge(NEI, SCC, by="SCC")
totalYearlyEmissions <- mergedData %>%
filter(grepl("coal",Short.Name,ignore.case=TRUE)) %>%
group_by(year) %>%
summarise(Emissions=sum(Emissions))
png("plot4.png", width=640, height=480)
graph <- ggplot(totalYearlyEmissions, aes(factor(year), Emissions))
graph <- graph +
geom_bar(stat="identity") +
xlab("Year") +
ylab("Total PM(2.5) Emission") +
ggtitle("Total PM emission from Coal Combustion-related 1998-2008")
print(graph)
dev.off()
## Answer Question 4:
## As you can see from the plot4.png, coal combustion related PM25 have decreased from 1998-2008
|
5f243b31cd822421c8398787e1ca7a4f841e2b84
|
db6e0fc29d4e1cf3a888571ae48488cad6fb3f26
|
/man/addOpacitySlider.Rd
|
a7c1b8f119cf09cce2c6a0f938001f935a059aa5
|
[] |
no_license
|
cran/leaflet.opacity
|
412275fccca09cb6254d4d7649abc6fe93c7e343
|
a83f3a5ece9288ed4c5078ff0f8b123e48897505
|
refs/heads/master
| 2020-04-08T20:17:12.276520
| 2018-11-29T15:00:10
| 2018-11-29T15:00:10
| 159,692,421
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 850
|
rd
|
addOpacitySlider.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/controls.R
\name{addOpacitySlider}
\alias{addOpacitySlider}
\title{Add opacity slider control to map}
\usage{
addOpacitySlider(map, layerId)
}
\arguments{
\item{map}{Leaflet map}
\item{layerId}{Opacity layer}
}
\description{
Add opacity slider control to map
}
\examples{
# Libraries
library(leaflet)
library(leaflet.opacity)
library(raster)
# Create artificial layer
r <- raster(xmn = -2.8, xmx = -2.79, ymn = 54.04, ymx = 54.05, nrows = 30, ncols = 30)
values(r) <- matrix(1:900, nrow(r), ncol(r), byrow = TRUE)
crs(r) <- CRS("+init=epsg:4326")
# Create leaflet map with opacity slider
leaflet() \%>\%
addTiles() \%>\%
addRasterImage(r, layerId = "raster") \%>\%
addOpacitySlider(layerId = "raster")
}
\author{
Marc Becker
}
|
a7fd8518852519dff7a7d90b03a975321a8a7bc6
|
476d21edd227c3af80fc490b9b109e9574000714
|
/code/global/import_data.R
|
7d1e7fe8e006c32e6aba02b1f25b0340280e06e1
|
[] |
no_license
|
leeshu-rt/DroughtOps_beta
|
e2c865587be9c3122aefb041d773e3ac6270bce4
|
f225d18984dd8bf61ab2b8ae5354256d75fa6d72
|
refs/heads/main
| 2023-08-17T05:45:36.708749
| 2021-09-16T20:13:16
| 2021-09-16T20:13:16
| 407,278,159
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 37,311
|
r
|
import_data.R
|
# *****************************************************************************
# DESCRIPTION
# *****************************************************************************
# This script is run by global.R
# It imports time series (ts) data.
# The path to the local files with time series data is defined in global.R
# The paths to the automatically accessed online data are given below
# *****************************************************************************
# INPUTS
# *****************************************************************************
# gages_daily.csv - file listing USGS stream gages we use for daily data
#
#------------------------------------------------------------------------------
# DAILY FLOW DATA
# Can be obtained from 2 sources.
# The OPTION switch is set in global.R:
# OPTION 1: read file directly from NWIS
# (autoread_dailyflows == 1)
# OPTION 2: read local file, /input/ts/current/flows_daily_cfs.csv
# (autoread_dailyflows == 0)
# For autoread option, if date >= June 1, start date is Jan 1;
# end date is yesterday.
#------------------------------------------------------------------------------
# HOURLY FLOW DATA
# Can be obtained from 2 sources:
# (start date is user-selected and end date is today)
#
# The OPTION switch is set in global.R:
# OPTION 1: read file directly from USGS's NWIS websites
# (autoread_hourlyflows == 1)
# OPTION 2: read local file, /input/ts/current/flows_hourly_cfs.csv
# (autoread_hourlyflows == 0)
#------------------------------------------------------------------------------
# WITHDRAWAL DATA
# Can be obtained from 2 sources:
# (start date is user-selected and end date is 15 days into the future)
#
# The OPTION switch is set in global.R:
# OPTION 1: read file directly from Data Portal
# (autoread_hourlywithdrawals == 1)
# OPTION 2: read local file, /input/ts/current/flows_hourly_cfs.csv
# (autoread_hourlywithdrawals == 0)
#------------------------------------------------------------------------------
# RESERVOIR STORAGE DATA
# Can be obtained from 2 sources:
# (start date is user-selected and end date is current day)
#
# The OPTION switch is set in global.R:
# OPTION 1: read file directly from Data Portal
# (autoread_dailystorage == 1)
# OPTION 2: read local file, /input/ts/current/flows_hourly_cfs.csv
# (autoread_dailystorage == 0)
#------------------------------------------------------------------------------
# LFFS DATA
# Can be read from two locations
# The OPTION switch is set in global.R:
# OPTION 1: read file directly from Data Portal
# (autoread_dailyflows == 1)
# OPTION 2: read local file, /input/ts/current/flows_daily_cfs.csv
# (autoread_dailyflows == 0)
#------------------------------------------------------------------------------
# STATE DROUGHT STATUS
# - time series of gw, precip, etc indices for MD, VA
# - read from state_drought_status.csv
# - this is currently a dummy file from 2018 DREX
#------------------------------------------------------------------------------
# Fake reservoir ops dfs, e.g., drex2018_output_sen.csv
# - used to initialize the res.ts.df's until I decide how to handle this
# *****************************************************************************
# OUTPUTS
# *****************************************************************************
# flows.daily.cfs.df0 - processed by /data_processing/process_daily_flows.R
# flows.hourly.cfs.df0 - processed by /data_processing/process_hourly_flows.R
# withdrawals.hourly.mgd.df0 - really withdrawals right now
# - used to create potomac.data.df in potomac_flows_init.R
# - used in sim_main_func in call to simulation_func
# - used in sim_add_days_func in call to simulation_func
# lffs.hourly.cfs.df0
# state.drought.df
# - used in state_status_ts_init.R
# - used in state_indices_update_func.R
# sen.ts.df00, pat.ts.df00, ..., from date_start to date_end (from parameters)
# *****************************************************************************
#------------------------------------------------------------------------------
#------------------------------------------------------------------------------
# PRELIMINARIES
#------------------------------------------------------------------------------
#------------------------------------------------------------------------------
# Read list of daily flow gages: id, location, description --------------------
# - e.g. 1638500, por, Potomac River at Point of Rocks
gages_daily <- data.table::fread(paste(parameters_path, "gages_daily.csv",
sep = ""),
header = TRUE,
colClasses = c('gage_id' = 'character',
'location' = 'character',
'area_mi2' = 'numeric',
'k' = 'numeric',
'description' = 'character'),
data.table = FALSE) %>%
mutate(gage_id = paste("0", gage_id, sep=""))
list_gages_daily_locations <- c("date", gages_daily$location)
list_gages_daily_ids <- c(gages_daily$gage_id)
# first find number of columns in Sarah's flow data files
n_gages_daily <- length(list_gages_daily_locations) - 1
gages_daily_locations <- list_gages_daily_locations[2:(n_gages_daily + 1)]
gages_daily_locations <- as.list(gages_daily_locations)
# Will make use of first and last day of current year
date_dec31 <- lubridate::ceiling_date(date_today0, unit = "year") - 1
date_jan1 <- lubridate::floor_date(date_today0, unit = "year")
# Also may use
today_month <- substring(date_today0, first = 6, last = 7)
today_day <- substring(date_today0, first = 9, last = 10)
today_year <- substring(date_today0, first = 1, last = 4)
#------------------------------------------------------------------------------
#------------------------------------------------------------------------------
# DAILY FLOW DATA
# This will be appended to historical dailies, currently ending 2020-12-31
#------------------------------------------------------------------------------
#------------------------------------------------------------------------------
# Set switch (this has been moved to global)-----------------------------------
# autoread_dailyflows <- 1 # automatic data retrieval from NWIS
# autoread_dailyflows <- 0 # read data from file in local directory
#------------------------------------------------------------------------------
# DAILY FLOW OPTION 1 - AUTOMATIC DATA RETRIEVAL
# - read daily flow data automatically from Data Portal
# - start date is January 1 of the current year
#------------------------------------------------------------------------------
if(autoread_dailyflows == 1) {
start_date_string <- paste(year(date_today0), "-01-01", sep="")
end_date_string <- paste(date_today0 - 1)
# the relevant fields are: site_no, Date, X00060_00003:
flows_daily_long_cfs_df0 <- dataRetrieval::readNWISdv(
siteNumbers = list_gages_daily_ids,
parameterCd = "00060",
startDate = start_date_string,
endDate = end_date_string,
statCd = "00003") %>%
mutate(date_time = as.Date(Date), flow_cfs = X_00060_00003) %>%
select(date_time, site_no, flow_cfs)
flows_daily_long_cfs_df <- left_join(flows_daily_long_cfs_df0,
gages_daily,
by = c("site_no" = "gage_id")) %>%
select(date_time, location, flow_cfs)
flows.daily.cfs.df0 <- flows_daily_long_cfs_df %>%
pivot_wider(names_from = location, values_from = flow_cfs)
}
#------------------------------------------------------------------------------
# DAILY FLOW OPTION 2 - READ DATA FROM FILE IN LOCAL DIRECTORY
# - read daily flow data from file residing in /input/ts/current/
# - file name is flows_daily_cfs.csv
# - code set up so that these time series should begin on Jan 1 of current year
# - can create this file by hitting the "Write output time series" button
# on the sidebar to the left, then copying from /output and pasting
# into /input/ts/current/.
# - OR, daily data can be downloaded from CO-OP's Data Portal
# - link for manual download is https://icprbcoop.org/drupal4/icprb/flow-data
# - name appropriately then save the file to /input/ts/current/
#------------------------------------------------------------------------------
if(autoread_dailyflows == 0) {
# read the lacal data table--------------------------------------------------
flows.daily.cfs.df0 <- data.table::fread(
paste(ts_path, "flows_daily_cfs.csv", sep = ""),
header = TRUE,
stringsAsFactors = FALSE,
colClasses = c("character", rep("numeric", n_gages_daily)), # force numeric
col.names = list_gages_daily_locations, # 1st column is "date"
na.strings = c("eqp", "Ice", "Bkw", "", "#N/A", "NA", -999999),
data.table = FALSE) %>%
dplyr::mutate(date_time = as.Date(date)) %>%
select(-date) %>%
filter(!is.na(date_time)) %>%
select(date_time, everything()) %>%
arrange(date_time)
}
print("finished importing daily flows")
#------------------------------------------------------------------------------
#------------------------------------------------------------------------------
# HOURLY FLOW DATA:
#------------------------------------------------------------------------------
#------------------------------------------------------------------------------
# Set switch (this has been moved to global)-----------------------------------
# autoread_hourlyflows <- 1 # automatic data retrieval from USGS NWIS
# autoread_hourlyflows <- 0 # read data from file in local directory
#------------------------------------------------------------------------------
# HOURLY FLOW OPTION 1 - AUTOMATIC DATA RETRIEVAL
# - read hourly data automatically from NWIS using package, dataRetrieval
#------------------------------------------------------------------------------
if(autoread_hourlyflows == 1) {
# define the desired gages---------------------------------------------------
gages_hourly_names <- c("lfalls",
"seneca",
"goose",
"monoc_jug",
"por",
"luke",
"kitzmiller",
"barnum",
"bloomington",
"barton")
gages_hourly_ids <- c("01646500",
"01645000",
"01644000",
"01643000",
"01638500",
"01598500",
"01595500",
"01595800",
"01597500",
"01596500")
gages_hourly <- data.frame(gage_id = gages_hourly_ids,
location = gages_hourly_names)
# n_gages_hourly <- length(gages_hourly_ids)
# set desired number of past days--------------------------------------------
n_past_days <- 90
# start_date <- as.POSIXct(date_today0) - lubridate::days(n_past_days)
# start_datetime <- time_now0 - lubridate::days(n_past_days)
# start_datetime <- lubridate::with_tz(start_datetime, "EST")
# # round to nearest hour in order to use dataRetrieval
# start_datetime <- lubridate::floor_date(start_datetime, unit = "hours")
#
# # Create dummy df of dates and hours-----------------------------------------
# temp0 <- start_datetime - lubridate::hours(1) # first step back 1 hour
# flows.hourly.empty.df <- data.frame(date_time = temp0) %>%
# add_row(date_time = seq.POSIXt(start_datetime,
# by = "hour",
# length.out = n_past_days*24))
# download hourly flows into a df--------------------------------------------
# - the function below makes use of the USGS's package, dataRetrieval
# - timezone is set as EST
# the relevant fields are: site_no, Date, X00060_00003:
flows_rt_long_cfs_df0 <- dataRetrieval::readNWISuv(
# siteNumbers = gages_hourly_ids,
siteNumbers = gages_hourly_ids,
parameterCd = "00060",
# startDate = start_date,
startDate = date_today0 - n_past_days,
endDate = date_today0,
tz = "America/New_York"
) %>%
mutate(date_time = dateTime, flow_cfs = X_00060_00000) %>%
select(date_time, site_no, flow_cfs)
flows_rt_long_cfs_df <- left_join(flows_rt_long_cfs_df0,
gages_hourly,
by = c("site_no" = "gage_id")) %>%
select(date_time, location, flow_cfs)
flows_rt_cfs_df <- flows_rt_long_cfs_df %>%
pivot_wider(names_from = location, values_from = flow_cfs)
flows.hourly.cfs.df0 <- flows_rt_cfs_df %>%
mutate(date_hour = lubridate::round_date(date_time,
unit = "hour")) %>%
select(-date_time) %>%
group_by(date_hour) %>%
summarise(across(where(is.numeric), ~mean(.x, na.rm = TRUE))) %>%
rename(date_time = date_hour) %>%
ungroup()
# flows.hourly.cfs.df0 <- get_hourly_flows_func(gage_nos = gages_hourly_nos,
# gage_names = gages_hourly_names,
# flows_empty = flows.hourly.empty.df,
# start_date = date_today0 -
# lubridate::days(n_past_days),
# end_date = date_today0,
# usgs_param = "00060")
# Trim off a day of beginning rows to get rid of NA's------------------------
# (this needs to be improved - ripe for creating a bug!)
# flows.hourly.cfs.df0 <- tail(flows.hourly.cfs.df0, (n_past_days - 1)*24)
}
#------------------------------------------------------------------------------
# HOURLY FLOW OPTION 2 - READ DATA FROM FILE IN LOCAL DIRECTORY
# - flow data file resides in /input/ts/current/
# - file name is flows_hourly_cfs.csv
# - hourly data can be downloaded from CO-OP's Data Portal
# - link for manual download is https://icprbcoop.org/drupal4/icprb/flow-data
# - grab last few days of data only (or memory error!)
# - name appropriately then save the file to /input/ts/current/
# - can create a longer time series by pasting new data into existing file
#------------------------------------------------------------------------------
if(autoread_hourlyflows == 0) {
# read the lacal data table--------------------------------------------------
# (need to convert date times to POSIXct for hourly's)
flows.hourly.cfs.df0 <- data.table::fread(
paste(ts_path, "flows_hourly_cfs.csv", sep = ""),
header = TRUE,
stringsAsFactors = FALSE,
colClasses = c('date' = 'character'),
# col.names = list_gages_daily_locations, # 1st column is "date"
na.strings = c("eqp", "Ice", "Bkw", "", "#N/A", "NA", -999999),
data.table = FALSE) %>%
dplyr::mutate(date_time = as.POSIXct(date, tz = "EST")) %>%
select(-date) %>%
arrange(date_time) %>%
filter(!is.na(date_time)) %>% # sometime these are sneaking in
head(-1) %>% # the last record is sometimes missing most data
select(date_time, everything())
}
print("finished importing hourly flows")
#------------------------------------------------------------------------------
#------------------------------------------------------------------------------
# WITHDRAWAL DATA
#------------------------------------------------------------------------------
#------------------------------------------------------------------------------
# Set switch (this has been moved to global)-----------------------------------
# autoread_hourlywithdrawals <- 1 # automatic data retrieval from Data Portal
# autoread_hourlywithdrawals <- 0 # read data from file in local directory
# a temporary need until the time series becomes available
discharge_broadrun <- 5 # MGD
# might be temporary - FW Central SA demand - water purchased from WA
d_fw_c <- 10 # MGD
#------------------------------------------------------------------------------
# WITHDRAWAL OPTION 1 - AUTOMATIC DATA RETRIEVAL
# - read hourly withdrawal data automatically from Data Portal
#------------------------------------------------------------------------------
if(autoread_hourlywithdrawals == 1) {
# read the online table -----------------------------------------------------
# Apr-2021: col names on line 16, data begins line 17
withdrawals.hourly.mgd.df0 <- data.table::fread(
"https://icprbcoop.org/drupal4/products/wma_withdrawals.csv",
skip = 16,
header = FALSE,
stringsAsFactors = FALSE,
# colClasses = c("character", rep("numeric", 6)), # force cols 2-6 numeric
na.strings = c("", "#N/A", "NA", -999999),
data.table = FALSE)
names(withdrawals.hourly.mgd.df0) <- c("DateTime",
"FW_POT",
"WSSC_POT",
"WA_GF",
"WA_LF",
"LW_POT",
"LW_FW",
"FW_OC",
"WSSC_PA",
"LW_BR")
}
#------------------------------------------------------------------------------
# WITHDRAWAL OPTION 2 - READ DATA FROM FILE IN LOCAL DIRECTORY
# - withdrawal data file resides in /input/ts/current/
# - file name is wma_withdrawals.csv
# - data can be downloaded from CO-OP's Data Portal via the "Products" tab
# - data is expressed as hourly
# - data extends from 30 days in past to 15 days in future
# - name appropriately then save the file to /input/ts/current/
# - can create a longer time series by pasting new data into existing file
#------------------------------------------------------------------------------
if(autoread_hourlywithdrawals == 0) {
withdrawals.hourly.mgd.df0 <- data.table::fread(
paste(ts_path, "wma_withdrawals.csv", sep = ""),
skip = 16,
header = TRUE,
stringsAsFactors = FALSE,
# colClasses = c("character", rep("numeric", 6)), # force cols 2-6 numeric
na.strings = c("", "#N/A", "NA", -999999),
data.table = FALSE)
names(withdrawals.hourly.mgd.df0) <- c("DateTime",
"FW_POT",
"WSSC_POT",
"WA_GF",
"WA_LF",
"LW_POT",
"LW_FW",
"FW_OC",
"WSSC_PA",
"LW_BR")
}
print("finished importing withdrawals")
#------------------------------------------------------------------------------
#------------------------------------------------------------------------------
# DAILY RESERVOIR STORAGE DATA
#------------------------------------------------------------------------------
#------------------------------------------------------------------------------
# Set switch (this has been moved to global)-----------------------------------
# autoread_dailystorage <- 1 # automatic data retrieval from Data Portal
# autoread_dailystorage <- 0 # read data from file in local directory
#------------------------------------------------------------------------------
# DAILY STORAGE OPTION 1 - AUTOMATIC DATA RETRIEVAL
# - read daily storage data automatically from Data Portal
#------------------------------------------------------------------------------
# &startdate=07%2F12%2F2021&enddate=07%2F19%2F2021&format=csv&submit=Submit
if(autoread_dailystorage == 1) {
# read the online data ------------------------------------------------------
# paste together the url for Data Portal's daily storage data
url_dailystor0 <- paste("https://icprbcoop.org/drupal4/icprb/data-view?",
"patuxent_reservoirs_current_usable_storage_wssc=patuxent_reservoirs_current_usable_storage_wssc",
"&little_seneca_reservoir_current_usable_storage_wssc=little_seneca_reservoir_current_usable_storage_wssc",
"&occoquan_reservoir_current_usable_storage=occoquan_reservoir_current_usable_storage",
# "&jrr_current_usable_storage=jrr_current_usable_storage",
# "&jrr_current_usable_ws_storage=jrr_current_usable_ws_storage",
# "&sr_current_usable_storage=sr_current_usable_storage",
sep = "")
year_temp <- today_year
day_first <- "01"
month_first <- "06"
start_date_string <- paste("&startdate=", month_first, "%2F",
day_first, "%2F",
year_temp, "&enddate=", sep="")
url_dailystor <- paste(url_dailystor0, start_date_string,
today_month, "%2F",
today_day, "%2F",
today_year, "&format=csv&submit=Submit",
sep="")
# the name storage.daily.bg.df0 is used in the legacy sim code (line 566)
storage_local_daily_bg_df <- data.table::fread(
url_dailystor,
skip = 3,
header = FALSE,
stringsAsFactors = FALSE,
# colClasses = c("character", rep("numeric", 6)), # force cols 2-6 numeric
na.strings = c("", "#N/A", "NA", -999999),
data.table = FALSE)
names(storage_local_daily_bg_df) <- c("date_time",
"patuxent",
"seneca",
"occoquan")
storage_nbr_df <- dataRetrieval::readNWISuv(
siteNumbers = c("01595790", "01597490"),
parameterCd = "00054",
startDate = paste0(year_temp, "-06-01"),
endDate = date_today0,
tz = "America/New_York"
) %>%
mutate(stor_mg = X_00054_00000*0.3259/1000, date_time = dateTime) %>%
select(date_time, site_no, stor_mg) %>%
pivot_wider(names_from = site_no, names_prefix = "X",
values_from = stor_mg) %>%
mutate(jrr_total = X01595790, jrr_ws = 13.1, savage = X01597490,
hour = lubridate::hour(date_time),
minute = lubridate::minute(date_time),
date = lubridate::date(date_time))
# just grab value at 8 AM for daily data df
storage_nbr_daily_df <- storage_nbr_df %>%
filter(hour == 8 & minute == 0) %>%
select(date, jrr_total, jrr_ws, savage) %>%
rename(date_time = date)
}
#------------------------------------------------------------------------------
# DAILY STORAGE OPTION 2 - READ DATA FROM FILE IN LOCAL DIRECTORY
# - daily storage data file resides in /input/ts/current/
# - file name is wma_storage.csv
# - data can be downloaded from CO-OP's Data Portal via the "Products" tab
# - data is expressed as daily
# - data extends from June 1 through the current day
# - name appropriately then save the file to /input/ts/current/
# - can create a longer time series by pasting new data into existing file
#------------------------------------------------------------------------------
if(autoread_dailystorage == 0) {
storage_daily_bg_df0 <- data.table::fread(
paste(ts_path, "wma_storage.csv", sep = ""),
skip = 3,
header = FALSE,
stringsAsFactors = FALSE,
# colClasses = c("character", rep("numeric", 6)), # force cols 2-6 numeric
na.strings = c("", "#N/A", "NA", -999999),
data.table = FALSE)
names(storage_daily_bg_df0) <- c("date_time",
"patuxent",
"seneca",
"occoquan",
"jrr_total",
"jrr_ws",
"savage")
}
print("finished importing reservoir storages")
#------------------------------------------------------------------------------
#------------------------------------------------------------------------------
# DAILY STORAGE DATA
#------------------------------------------------------------------------------
#------------------------------------------------------------------------------
# Set switch (this has been moved to global)-----------------------------------
# autoread_lffs <- 1 # automatic data retrieval from Data Portal
# autoread_lffs <- 0 # read data from file in local directory
#------------------------------------------------------------------------------
# LFFS OPTION 1 - AUTOMATIC DATA RETRIEVAL
# - read LFFS data automatically from Data Portal
#------------------------------------------------------------------------------
if(autoread_lffs == 1) {
# Read LFFS LFalls online data ------------------------------------------------
lffs.hourly.cfs.all.df0 <- data.table::fread(
# paste(ts_path, "PM7_4820_0001.flow", sep = ""),
# from cooplinux1:
"http://icprbcoop.org/dss_data_exchange/PM7_4820_0001.flow",
# from cooplinux2 - a backup source if FEWS Live is not working
# "http://icprbcoop.org/dss_data_exchange/PM7_4820_0001.flow_s2",
skip = 25,
header = FALSE,
stringsAsFactors = FALSE,
colClasses = c(rep("numeric", 6)), # force cols to numeric
col.names = c("year", "month", "day", "minute", "second", "lfalls_lffs"),
# na.strings = c("eqp", "Ice", "Bkw", "", "#N/A", "NA", -999999),
data.table = FALSE)
}
#------------------------------------------------------------------------------
# LFFS OPTION 2 - READ DATA FROM FILE IN LOCAL DIRECTORY
# - LFFS data file resides in /input/ts/current/
# - file name is PM7_4820_0001.flow
# - data can be downloaded from CO-OP's Data Portal
# - http://icprbcoop.org/dss_data_exchange/PM7_4820_0001.flow"
# - name appropriately then save the file to /input/ts/current/
#------------------------------------------------------------------------------
if(autoread_lffs == 0) {
# Read the local LFFS data file------------------------------------------------
lffs.hourly.cfs.all.df0 <- data.table::fread(
paste(ts_path, "PM7_4820_0001.flow", sep = ""),
skip = 25,
header = FALSE,
stringsAsFactors = FALSE,
colClasses = c(rep("numeric", 6)), # force cols to numeric
col.names = c("year", "month", "day", "minute", "second", "lfalls_lffs"),
# na.strings = c("eqp", "Ice", "Bkw", "", "#N/A", "NA", -999999),
data.table = FALSE)
}
print("finished creating lffs.hourly.cfs.all.df0")
#------------------------------------------------------------------------------
#------------------------------------------------------------------------------
# Import time series representing state drought status.
# - temporarily just use time series from 2018drex.
#------------------------------------------------------------------------------
#------------------------------------------------------------------------------
#
state.drought.df <- data.table::fread(paste(ts_path, "state_drought_status.csv", sep = ""),
data.table = FALSE) %>%
dplyr::mutate(date_time = as.Date(date_time)) %>%
dplyr::select(date_time,
gw_va_shen, p_va_shen, sw_va_shen, r_va_shen,
gw_va_nova, p_va_nova, sw_va_nova, r_va_nova,
gw_md_cent, p_md_cent, sw_md_cent, r_md_cent,
gw_md_west, p_md_west, sw_md_west, r_md_west,
region_md_cent, region_md_west
) %>%
dplyr:: filter(date_time <= date_end,
date_time >= date_start)
#------------------------------------------------------------------------------
#------------------------------------------------------------------------------
# Import reservoir storage data
#------------------------------------------------------------------------------
#------------------------------------------------------------------------------
# Set switch (move this to global?)--------------------------------------------
# autoread_resstorage <- 1 # automatic data retrieval from Data Portal
autoread_resstorage <- 0 # read data from file in local directory
#------------------------------------------------------------------------------
# STORAGE OPTION 1 - AUTOMATIC DATA RETRIEVAL
# - read daily flow data automatically from Data Portal
# - start date is January 1 of the current year
#------------------------------------------------------------------------------
if(autoread_resstorage == 1) {
# paste together the url for Data Portal's storage data-------------------
# https://icprbcoop.org/drupal4/icprb/data-view?patuxent_reservoirs_current_usable_storage_wssc=patuxent_reservoirs_current_usable_storage_wssc&little_seneca_reservoir_current_usable_storage_wssc=little_seneca_reservoir_current_usable_storage_wssc&occoquan_reservoir_current_usable_storage=occoquan_reservoir_current_usable_storage&startdate=01%2F01%2F2020&enddate=11%2F10%2F2020&format=csv&submit=Submit
url_storage0 <- "https://icprbcoop.org/drupal4/icprb/data-view?"
urlplus1 <- "patuxent_reservoirs_current_usable_storage_wssc=patuxent_reservoirs_current_usable_storage_wssc"
urlplus2 <- "&little_seneca_reservoir_current_usable_storage_wssc=little_seneca_reservoir_current_usable_storage_wssc"
urlplus3 <- "&occoquan_reservoir_current_usable_storage=occoquan_reservoir_current_usable_storage"
url_storagedaily <- paste(url_storage0,
urlplus1, urlplus2, urlplus3,
"&startdate=01%2F01%2F2020&enddate=",
today_month, "%2F",
today_day, "%2F",
today_year, "&format=csv&submit=Submit",
sep="")
# read the online data table-------------------------------------------------
storage.daily.bg.df0 <- data.table::fread(
url_storagedaily,
skip = 1,
header = FALSE,
col.names = c("date_time", "stor_pat", "stor_sen", "stor_occ"),
stringsAsFactors = FALSE,
na.strings = c("", "#N/A", -999999, "NA"),
data.table = FALSE) %>%
dplyr::mutate(date_time = as.Date(date_time)) %>%
filter(!is.na(date_time)) %>%
select(date_time, everything()) %>%
arrange(date_time)
# Now read N Br reservoir data ------------------------------------------------
start_date_resstor <- as.POSIXct("2020-01-01")
end_date_resstor <- as.POSIXct(date_today0)
storage_level_jrr_rt <- dataRetrieval::readNWISuv(siteNumbers = "01595790",
parameterCd = 62615,
startDate = start_date_resstor,
endDate = end_date_resstor,
tz = "EST" # time zone is Eastern Standard Time
)
storage_level_sav_rt <- dataRetrieval::readNWISuv(siteNumbers = "01597490",
parameterCd = 62615,
startDate = start_date_resstor,
endDate = end_date_resstor,
tz = "EST" # time zone is Eastern Standard Time
)
}
#------------------------------------------------------------------------------
# STORAGE OPTION 2 - READ DATA FROM FILE IN LOCAL DIRECTORY
# - read daily flow data from file residing in /input/ts/current/
# - file name is flows_daily_cfs.csv
# - code set up so that these time series should begin on Jan 1 of current year
# - daily data can be downloaded from CO-OP's Data Portal
# - link for manual download is https://icprbcoop.org/drupal4/icprb/flow-data
# - name appropriately then save the file to /input/ts/current/
#------------------------------------------------------------------------------
if(autoread_resstorage == 0) {
# read the lacal data table--------------------------------------------------
storage.daily.bg.df0 <- data.table::fread(
paste(ts_path, "storage_daily_bg.csv", sep = ""),
skip = 1,
header = FALSE,
col.names = c("date_time", "stor_pat", "stor_sen", "stor_occ"),
# colClasses = c("Date", "numeric", "numeric", "numeric"),
stringsAsFactors = FALSE,
na.strings = c("", "#N/A", -999999),
data.table = FALSE) %>%
dplyr::mutate(date_time = as.Date(date_time),
stor_pat = as.numeric(stor_pat),
stor_sen = as.numeric(stor_sen),
stor_occ = as.numeric(stor_occ)) %>%
filter(!is.na(date_time)) %>%
select(date_time, everything()) %>%
arrange(date_time)
}
print("Finished importing reservoir storages")
#------------------------------------------------------------------------------
#------------------------------------------------------------------------------
# Import reservoir dfs - this is a temporary fix to get us going in 2019
# - just importing tables outputted by 2018drex to serve as temporary reservoir data frames
# - these will be used to initialize the res dfs from date_start to date_today0 (ie today())
#------------------------------------------------------------------------------
#------------------------------------------------------------------------------
# print("date_today0")
# print(date_today0)
sen.ts.df00 <- data.table::fread(paste(ts_path, "drex2018_output_sen.csv", sep = ""),
data.table = FALSE) %>%
dplyr::mutate(date_time = as.Date(date_time)) %>%
dplyr:: filter(date_time <= date_end,
date_time >= date_start)
jrr.ts.df00 <- data.table::fread(paste(ts_path, "drex2018_output_jrr.csv", sep = ""),
data.table = FALSE) %>%
dplyr::mutate(date_time = as.Date(date_time)) %>%
dplyr:: filter(date_time <= date_end,
date_time >= date_start)
occ.ts.df00 <- data.table::fread(paste(ts_path, "drex2018_output_occ.csv", sep = ""),
data.table = FALSE) %>%
dplyr::mutate(date_time = as.Date(date_time)) %>%
dplyr:: filter(date_time <= date_end,
date_time >= date_start)
pat.ts.df00 <- data.table::fread(paste(ts_path, "drex2018_output_pat.csv", sep = ""),
data.table = FALSE) %>%
dplyr::mutate(date_time = as.Date(date_time)) %>%
dplyr:: filter(date_time <= date_end,
date_time >= date_start)
#----------------------------------------shapefile load----------------------------------
# read map shapefiles in ---------------------
# Luke - CS July 2020: the lines below cause errors,
# but the variables are never used so I'm commenting out
# clipcentral = readOGR(dsn=map_path, layer = "clipcentral")
# western_dslv = readOGR(dsn=map_path, layer = "western_dslv")
#transform map shapefiles ---------------------
# Luke - CS July 2020: the lines below cause errors,
# but the variables are never used so I'm commenting out
# clipcentral_t <- spTransform(clipcentral, CRS("+init=epsg:4326"))
# western_region_t <- spTransform(western_dslv, CRS("+init=epsg:4326"))
#----------------------------------------------------------------------------------------
#----------------------drought maps updating---------------------------------------------
# Luke - these functions seem to be broken - are hanging up
# calls function to get the latest version of the maryland drought map
# md_drought_map = md_drought_map_func(date_today0)
#####this has been set to
#this pulls directly from the url
md_drought_map <- 'https://mde.maryland.gov/programs/Water/droughtinformation/Currentconditions/PublishingImages/DroughtGraphsStarting2019jan31/Drought2020-08-31.png'
#change input to below to pull the map from input directory
#readPNG("input/MD_droughtmap_temp.png")
#calls function to get the latest version of the virginia drought map
#---toggle
##for day to day
# va_drought_map = va_drought_map_func()
#va_drought_map = readPNG("input/VA_droughtmap_temp.png")
# this works in Aug 2021 - CS:
va_drought_map <-'https://deq1.bse.vt.edu/drought/state/images/maps/virginia_drought.png'
##to publish
# project.dir <- rprojroot::find_rstudio_root_file()
# va_drought_map = file.path(project.dir,'/global/images/va_drought_placeholder.png')
#---
#----------------------------------------------------------------------------------------
# #------------------------------
# #load in test data for ten day
# ten_day.df <- data.table::fread(file.path(ts_path, "ten_day_test/ten_day_test.csv", sep=""),
# data.table = FALSE)
#
# #------------------------------
# #load in data for demands from Sarah's Drupal site (if site is up, otherwise do nothing)
# if(url.exists("https://icprbcoop.org/drupal4/products/coop_pot_withdrawals.csv" == TRUE))
# {demands_raw.df <- data.table::fread("https://icprbcoop.org/drupal4/products/coop_pot_withdrawals.csv",
# data.table = FALSE)}
|
478de2c109f400b2ef85b678e1a458dfe34edc43
|
6b7286e14b3830fd9cbe57bd49056a68babab052
|
/normalityTest.R
|
6c9cd023b162bfbc10fb025fe398b47e497b6263
|
[] |
no_license
|
briannoogin/Neural-Network-Science-Fair-Research
|
2c450b9889b8a1517db7ea6f6150cd0c93b30f8c
|
8232bc6abea8d670736ac2d0d21d04fb10842694
|
refs/heads/master
| 2021-09-14T19:26:14.727054
| 2018-05-17T22:59:17
| 2018-05-17T22:59:17
| 67,271,686
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,796
|
r
|
normalityTest.R
|
mardiaTest <-
function (data, cov = TRUE, qqplot = FALSE)
{
dataframe=as.data.frame(data)
dname <- deparse(substitute(data))
data <- as.matrix(data)
n <- dim(data)[1]
p <- dim(data)[2]
data.org <- data
data <- scale(data, scale = FALSE)
if (cov) {
S <- ((n - 1)/n) * cov(data)
}
else {
S <- cov(data)
}
D <- data %*% solve(S) %*% t(data)
g1p <- sum(D^3)/n^2
g2p <- sum(diag((D^2)))/n
df <- p * (p + 1) * (p + 2)/6
k <- (p + 1) * (n + 1) * (n + 3)/(n * ((n + 1) * (p + 1) -
6))
small.skew <- n * k * g1p/6
skew <- n * g1p/6
kurt <- (g2p - p * (p + 2)) * sqrt(n/(8 * p * (p + 2)))
p.skew <- pchisq(skew, df, lower.tail = FALSE)
p.small <- pchisq(small.skew, df, lower.tail = FALSE)
p.kurt <- 2 * (1 - pnorm(abs(kurt)))
if (qqplot) {
d <- diag(D)
r <- rank(d)
chi2q <- qchisq((r - 0.5)/n, p)
df = data.frame(d, chi2q)
qqPlotPrint = ggplot(df, aes(d, chi2q),environment = environment())+geom_point(shape=16, size=4)+
geom_abline(intercept =0, slope =1,color="black",size=2)+
xlab("Squared Mahalanobis Distance")+
ylab("Chi-Square Quantile")+
ggtitle("Chi-Square Q-Q Plot")+
theme(plot.title = element_text(lineheight=.8, face="bold"))
print(qqPlotPrint)
}
result <- new("mardia", g1p = g1p, chi.skew = skew, p.value.skew = p.skew,
chi.small.skew = small.skew, p.value.small = p.small, g2p = g2p,
z.kurtosis = kurt, p.value.kurt = p.kurt, dname = dname, dataframe = dataframe)
result
}
mydata = read.xls("breastCancerKaggle.xlsx")
y <- mardiaTest(breastCancerKaggle, cov = TRUE, qqplot = FALSE)
|
cb2dbad79803a0c2cb1905c48ddae5bf9fd85436
|
36853a83c9c333787682d1c1d585a2bb08c3c725
|
/ROI/Distance_To_Cortex.R
|
daed01bde1af19e64615d12b0b80d563d8e741ca
|
[] |
no_license
|
muschellij2/CT_Pipeline
|
ea075a40ac0799b7a9ea2efe3b60f16e70a346cc
|
9bf95e6f62bfd4d61a1084ed66d2af4155641dc2
|
refs/heads/master
| 2021-01-22T02:39:51.559886
| 2019-09-04T19:48:00
| 2019-09-04T19:48:00
| 11,985,496
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,458
|
r
|
Distance_To_Cortex.R
|
#################################
# Regressions with % of ROI
# Author: John Muschelli
#################################
rm(list=ls())
library(oro.nifti)
library(plyr)
library(scales)
library(RColorBrewer)
library(data.table)
library(cttools)
library(fslr)
library(ggplot2)
library(grid)
library(data.table)
homedir = "/Applications"
rootdir = "/Volumes/DATA_LOCAL/Image_Processing"
if (Sys.info()[["user"]] %in% "jmuschel") {
homedir = "~"
rootdir = "/dexter/disk2/smart/stroke_ct/ident"
}
progdir = file.path(rootdir, "programs")
# source(file.path(progdir, "convert_DICOM.R"))
# source(file.path(progdir, "fslhd.R"))
basedir = file.path(rootdir, "Registration")
tempdir = file.path(rootdir, "Template")
atlasdir = file.path(tempdir, "atlases")
outdir = file.path(basedir, "results")
load(file=file.path(rootdir, "Registration",
"Registration_Image_Names.Rda"))
df = df[, c("raw", "roi.nii", "ss", "copydir")]
df$raw = paste0(df$raw, ".gz")
df$roi.nii = paste0(df$roi.nii, ".gz")
df$ss = paste0(df$ss, ".gz")
irow = 4
df$dists = NA
# winds <- -1:1
# indices <- permutations(length(winds), 3, v = winds,
# repeats.allowed=TRUE)
# indices = indices[!apply(indices, 1, function(x) all(x == 0)),]
for (irow in seq(nrow(df))){
# raw = readNIfTI(df$raw[irow], reorient=FALSE)
img = readNIfTI(df$raw[irow], reorient=FALSE)
ss = readNIfTI(df$ss[irow], reorient=FALSE)
ero = fslerode(df$ss[irow], kopts = "-kernel box 1",
retimg=TRUE)
diff = ss - ero
diff.img = ss
diff.img@.Data = diff
diff.img = cal_img(diff.img)
stopifnot( all(diff.img %in% c(0, 1)))
ind = which(diff.img > 0, arr.ind=TRUE)
roi = readNIfTI(df$roi.nii[irow], reorient=FALSE)
ero.roi = fslerode(df$roi.nii[irow], kopts = "-kernel box 1",
retimg=TRUE)
diff.roi = diff.img
diff.roi@.Data = roi - ero.roi
allroi.ind = roi.ind = which(diff.roi > 0, arr.ind=TRUE)
vdim = voxdim(roi)
#### row-wise matrix mulitplication
ind = t(t(ind)*vdim)
roi.ind = t(t(roi.ind)*vdim)
ind2 = rowSums(ind^2)
roi.ind2 = rowSums(roi.ind^2)
mid = -2* tcrossprod(ind, roi.ind)
res = t(t(mid) + roi.ind2)
res = res + ind2
# checki = sample(nrow(ind), size=1)
# checkj = sample(nrow(roi.ind), size=1)
# check = res[checki, checkj]
# r = roi.ind[checkj,]
# i = ind[checki,]
# d = sum((r-i)^2)
# stopifnot(abs(d - check) < 1e-10)
res = sqrt(res)
### res is in mm, convert to cm
res = res / 10
mres = min(res)
min.ind = which(abs(res- mres) < 1e-10, arr.ind=TRUE)
roi[min.ind] = 100
index = allroi.ind[min.ind[,2], ]
index = index[1,]
stub = nii.stub(basename(df$raw[irow]))
pngname = file.path(df$copydir[irow], "results",
paste0(stub, "_min_dist.png"))
print(paste0("Pngname is "))
print(pngname)
# png(pngname)
# mask.overlay(img, roi, col.y=c("blue", "red"),
# xyz= round(index))
# dev.off()
# ind = data.frame(which(ss > 0, arr.ind=TRUE))
# l = lapply(indices, function(x) x + ind)
# ind.df = rbindlist(l)
# setkey(ind.df)
# print(nrow(ind.df))
# unique(ind.df)
# ind.df = unique(ind.df)
# dind = data.table(ind)
# setkey(dind)
# dind[, inbrain := 1]
# d = merge(ind.df, dind, all=TRUE)
# d = d[ which(is.na(inbrain)), ]
# mat = ss
# mat@.Data[is.na(mat) | !is.na(mat)] = NA
# dd = as.matrix(d)[, c("dim1", "dim2", "dim3")]
# mat@.Data[dd] = 1
# mat = cal_img(mat)
df$dists[irow] = mres
print(irow)
# mask.overlay(ss, mat)
}
save(df, file=file.path(rootdir, "Registration",
"Minimum_Distance_To_Cortex.Rda"))
|
95f4e68bebcce17a3be595529e9511dc5224ee9f
|
f9c1b6c61da4f75fd3eb9cf5c778372c0bd72b6e
|
/02extractGroundByPmfExample.R
|
10197db2287393cf731833e273a69e7667ce08e3
|
[] |
no_license
|
caiohamamura/salca_processing_R
|
1435f20cb1db8b5d4e64d66f08a51f12cf140cd3
|
cf10591ea6811f50ecb36de37bb090c2f9dc4f76
|
refs/heads/master
| 2020-09-21T22:06:05.423659
| 2019-11-30T02:35:06
| 2019-11-30T02:35:06
| 224,948,870
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 850
|
r
|
02extractGroundByPmfExample.R
|
if(! require("pacman")){
install.packages("pacman")
}
p_load("lidR", "doParallel")
# Load point cloud
if (Sys.info()[["sysname"]] == "Windows") {
setwd("U:/Datastore/CSCE/geos/groups/3d_env/data/visitors/2019.caio_hamamura/R/catalog")
} else {
setwd("~/myspace/R/catalog")
}
outPath = file.path(getwd(), "../processed")
dir.create(outPath, showWarnings = F)
files = list.files(".")
f = files[1]
registerDoParallel(cores=6)
foreach(f=files[1:length(files)]) %dopar%
{
las = lidR::readLAS(f)
ws = seq(3,12, 3)
th = seq(0.1, 1.5, length.out = length(ws))
groundLas = lidR::lasground(las, pmf(ws, th))
outFilePath = file.path(outPath, sub(".0.las", ".0_g.las", f))
output = lasfilter(las, groundLas$Classification == 2)
writeLAS(output, outFilePath)
rm(las, groundLas, output)
}
|
932342bee47bd06a140d7531c87f1415d15fae72
|
c0cfaa038f1fd21835d947f2fc39fcb42ea3eab2
|
/CryptoShiny/tests/testthat/test-Tsp_data.R
|
96432cd6855dda27bfd6fa850ba29a233d579aef
|
[
"MIT"
] |
permissive
|
fernandopf/ThinkRProject
|
8a9c2ecbeb8a5634fdbb9ae6ca4a561187460d23
|
81f6ece8b1ff60c80209b2bd4360c030df439af7
|
refs/heads/master
| 2020-04-02T14:26:28.223749
| 2018-12-14T19:01:12
| 2018-12-14T19:01:12
| 154,524,744
| 2
| 3
| null | 2018-11-28T15:29:48
| 2018-10-24T15:27:38
| null |
UTF-8
|
R
| false
| false
| 290
|
r
|
test-Tsp_data.R
|
test_that("Type of elements dataset:", {
skip_if_not(curl::has_internet(), message = "no internet")
datasetToTest <- Tsp_data(round(as.numeric(as.POSIXct(Sys.time(), format="%Y/%m/%d"))))
expect_is(datasetToTest$time, "integer")
expect_is(datasetToTest$body, "factor")
})
|
13f8269afb03ad2344c2063d7fde413b0823ec79
|
e36529495a25c8dd13099a7a5d138e9e9f44add0
|
/R/rectangle.gate.R
|
5e17f8937694f074a6a6fa87e5757c247567aec1
|
[] |
no_license
|
sheng-liu/SeqViz
|
911ba3a49b153cd142d4e7685adfc6a96d7f4ca9
|
a4edd8d35226b18a1bde61bf14adafcb8d4978b3
|
refs/heads/master
| 2020-12-24T16:08:08.131651
| 2015-01-04T07:23:45
| 2015-01-04T07:23:45
| 27,841,414
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,713
|
r
|
rectangle.gate.R
|
## rectangle gate is also on mouse move
rectangle.gate=function(action,window){
print("Rectangel Gate")
select.channels()
##--------------------------------------------------------------------------
## mouse events
#xWindow=X11()
x11(width=4,height=4)
plot.new()
flowPlot(x=appspace[active.SeqFrame],
plotParameters=c(appspace[channelX],appspace[channelY]))
mousedown <- function(buttons, x, y) {
if(length(buttons)==2) "Done"
else {
flowPlot(x=appspace[active.SeqFrame],
plotParameters=c(appspace[channelX],appspace[channelY]))
x.cord=grconvertX(x,from="ndc",to="user")
y.cord=grconvertY(y,from="ndc",to="user")
#cat("Buttons ", paste(buttons, collapse=" "), " at ", x, y, "\n")
#cat("convert ", paste(buttons, collapse=" "), " at ", x.cord, y.cord, "\n")
appspace[x.cord.ini]=x.cord
appspace[y.cord.ini]=y.cord
NULL
}
}
mousemove=function(buttons,x,y){
flowPlot(x=appspace[active.SeqFrame],
plotParameters=c(appspace[channelX],appspace[channelY]))
x.cord=grconvertX(x,from="ndc",to="user")
y.cord=grconvertY(y,from="ndc",to="user")
rect(xleft=appspace[x.cord.ini],xright=x.cord,
ybottom=appspace[y.cord.ini],ytop=y.cord,
border="red")
#density=10,angle=45
appspace[x.cord.end]=x.cord
appspace[y.cord.end]=y.cord
#cat("moves ", paste(buttons, collapse=" "), " at ", x, y, "\n")
#cat("convert ", paste(buttons, collapse=" "), " at ", x.cord, y.cord, "\n")
NULL
}
# keybd <- function(key) {
# cat("Key <", key, ">\n", sep = "")
# # if (key=="ctrl-Q") "Done"
#
# # if (key=="ctrl-S") dev.copy2pdf(file = "table.2.pdf")
# if (key=="ctrl-S") {
# Save_PDF(window=xWindow)
# "Done"
# }
# }
getGraphicsEvent(
"\nLeft click and drag to draw gate \nRight click to finish\n\nClick SavePDF to save current gating window\nClose the current gating window before open a new one\n",
onMouseDown = mousedown,
onMouseMove = mousemove
#onKeybd = keybd
)
##--------------------------------------------------------------------------
## data
# use matrix as input, can also use list
param=cbind(c(appspace[x.cord.ini],appspace[y.cord.ini]),
c(appspace[x.cord.end],appspace[y.cord.end]))
colnames(param)=c(appspace[channelX],appspace[channelY])
rectGate=rectangleGate(.gate=param,filterID="rectangleGate")
rectGate.filter=filter(appspace[active.SeqFrame],rectGate)
# use a list is better
appspace[filterBox]=rectGate.filter
print(summary(rectGate.filter))
cat("\n")
rectGate.split=split(appspace[active.SeqFrame],rectGate)
child.node.name=sapply(rectGate.split,
function(frames){keyword(frames)$GUID})
## add the veggi name for now
for (i in 1:length(child.node.name))
keyword(rectGate.split[[i]])$VEGGI.NAME=child.node.name[i]
# put result seqframes into SeqFrame.list
sapply(seq_along(child.node.name),function(i){
appspace[SeqFrame.list][[child.node.name[i]]]=rectGate.split[[i]]})
insert.node(
node.name=child.node.name,tree.view=appspace[SeqFrame.hierachy.view],method="insert")
}
|
91b05548c2c417b591ef600b66eb45eb654ad18d
|
bc6fc11685a58ac09ae8cfc286ec3eee68729324
|
/010-rbase-two-days/output/purl/54-presenting.R
|
6826abf4ff81b87d9040b8a95ab09cc79597d3d2
|
[] |
no_license
|
quantide/qtraining
|
2803fe93446931c43b70ecd7ed644f01c79ece85
|
846043fcc8207da7b5dbf5bd9a7d4b94111d5132
|
refs/heads/master
| 2021-09-15T10:57:47.797783
| 2018-05-30T09:13:43
| 2018-05-30T09:13:43
| 56,151,061
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,761
|
r
|
54-presenting.R
|
## ----first, include=TRUE, purl=TRUE, message=FALSE-----------------------
require(dplyr)
require(ggplot2)
require(qdata)
data(bands)
## ----presenting_gridArrange, message=FALSE, warning=FALSE----------------
gp0 <- ggplot(data=bands, aes(x=press_type, y=ink_pct)) +
geom_boxplot(fill="#3690c0") +
ylab("Continuos variable") + xlab("Grouping variable") +
theme(
axis.text.x = element_blank(), axis.text.y = element_blank()
)
gp1 <- gp0 + theme(axis.ticks.x = element_blank())
gp2 <- gp0 + theme(axis.ticks.y = element_blank()) + coord_flip()
gp <- gridExtra::grid.arrange(gp1, gp2, ncol=2)
## ----presenting_recall, message=FALSE, warning=FALSE---------------------
gp
plot(gp)
## ----presenting_save_pdf, message=FALSE, warning=FALSE, results='hide'----
pdf("gp1.pdf")
plot(gp)
dev.off()
## ----presenting_save_cairopdf, message=FALSE, warning=FALSE, results='hide'----
cairo_pdf("gp2.pdf")
plot(gp)
dev.off()
## ----presenting_save_svg, message=FALSE, warning=FALSE, results='hide'----
svg("gp.svg")
plot(gp)
dev.off()
## ----presenting_save_pdf_width, message=FALSE, warning=FALSE, results='hide'----
cairo_pdf("gp3.pdf", width=12, height=8)
plot(gp)
dev.off()
## ----presenting_ggsave1, message=FALSE, warning=FALSE, results='hide'----
ggsave("boxplot1.pdf", width = 20, height = 20, units = "cm")
## ----presenting_ggsave2, message=FALSE, warning=FALSE, results='hide'----
ggsave("boxplot2.pdf", plot=gp1, width = 20, height = 20, units = "cm")
## ----presenting_save_png, message=FALSE, warning=FALSE, results='hide'----
png("gp.png", width=1200, height=800, res=300)
plot(gp)
dev.off()
## ----presenting_ggsave_png, message=FALSE, warning=FALSE, results='hide'----
ggsave("boxplot.png", width = 4, height = 3, dpi = 300)
|
90eb82a8233cc5d1dcea2c2a4418a7f8a45c4afe
|
6a21a808a668533db92472b1e1adbe59dd37517e
|
/R/dev/tripDevel.R
|
5ccb88499517864f4c14f0b55f9766c5e34e4f9a
|
[] |
no_license
|
mdsumner/mdsutils
|
7198548e9059750a026a102409b8c88e3b39e7ea
|
f162246b5944050853ecb991e545eae7e3b833d2
|
refs/heads/master
| 2021-01-19T05:57:32.736249
| 2018-01-22T21:08:27
| 2018-01-22T21:08:27
| 11,871,571
| 2
| 1
| null | 2018-01-22T21:08:28
| 2013-08-04T00:34:16
|
R
|
UTF-8
|
R
| false
| false
| 11,803
|
r
|
tripDevel.R
|
#setClass("trip", contains = c("TimeOrderedRecords", "SpatialPointsDataFrame"))
setClass("trip", contains = "SpatialPointsDataFrame")
validtordata <- function(object) {
if (!is(object@data, "data.frame"))
stop("only data frames supported for data slots")
coords <- coordinates(object)
if (dim(coords)[2] < 4) stop("coordinates must contain at least four columns")
## assume 3rd and 4th columns for now
tid <- as.data.frame(coords[,3:4])
#tid <- as.data.frame(object@data[ , object@TOR.columns])
if (length(tid) == 0)
stop("timeIDs cannot have zero length")
if (nrow(tid) < 1)
stop("no timeIDs set: too few rows")
if (ncol(tid) < 2)
stop("no timeIDs set: too few columns")
if (any(duplicated(as.data.frame(object)))) stop("duplicated records within data")
if (any(duplicated(as(object, "data.frame")))) stop("duplicated records within data")
time <- tid[,1]
## force this for now
class(time) <- c("POSIXt", "POSIXct")
id <- tid[, 2]
TORlevs <- levels(factor(id))
if (!is(time, "POSIXt")) stop("trip only handles dates and times as POSIXt objects")
bad <- c(is.na(time), !is.finite(time), is.na(id), !is.finite(id))
if (any(bad)) return("time and/or id data contains missing or non finite values")
d <- unlist(tapply(time, id, diff))
if (any(d < 0)) return("date-times not in order within id")
if (any(d == 0)) return("date-times contain duplicates within id")
short <- which(unlist(tapply(time, id, length)) < 3)
## maybe trip enforces this
if (length(short)>0) {
mess <- "\n less than 3 locations for ids:\n"
mess <- paste(mess, paste(TORlevs[short], collapse = ","), sep = "")
return(mess)
}
return(TRUE)
}
setValidity("trip", validtordata)
as.data.frame.trip <- function(x, ...) as.data.frame(as(x, "SpatialPointsDataFrame"), ...)
if (!isGeneric("trip"))
setGeneric("trip", function(obj, TORnames)
standardGeneric("trip"))
trip <- function(obj, TORnames) {
## only spdf for now
if ( !is(obj, "SpatialPointsDataFrame") ) {
stop("trip only supports SpatialPointsDataFrame") #ANY?
}
if (is.factor(obj[[TORnames[2]]])) obj[[TORnames[2]]] <- factor(obj[[TORnames[2]]])
new("trip", obj, TimeOrderedRecords(TORnames))
}
## removed as this was causing recursion in 2.8.0
#setMethod("trip", signature(obj = "SpatialPointsDataFrame", TORnames = "ANY"), trip)
setMethod("trip", signature(obj = "ANY", TORnames = "TimeOrderedRecords"),
function(obj, TORnames) {
new("trip", obj, TORnames)
})
setMethod("trip", signature(obj = "trip", TORnames = "TimeOrderedRecords"),
function(obj, TORnames) {
new("trip", as(obj, "SpatialPointsDataFrame"), TORnames)
})
setMethod("trip", signature(obj = "trip", TORnames = "ANY"),
function(obj, TORnames) {
##trip.default(as(obj, "SpatialPointsDataFrame"), TORnames)
trip(as(obj, "SpatialPointsDataFrame"), TORnames)
})
#setMethod("trip", signature(obj = "ANY", col.nms = "TimeOrderedRecords"),
# function(obj, col.nms) {
# trip(obj, col.nms)
# })
#setMethod("trip", signature(obj = "SpatialPointsDataFrame", col.nms = "character"),
#function(obj, col.nms) new("trip", obj, TimeOrderedRecords(col.nms))
#)
## works already:
## coordinates - not replace
## print
## show
## plot
## summary
## "[" - not replace
## doesn't work already
## dim
## as.data.frame
## names - and replace
## points
## text
## subset
#"[[", "$"
## split
## S3 versions
dim.trip <- function(x) dim(as(x, "SpatialPointsDataFrame"))
#as.data.frame.trip <- function(x, ...) as.data.frame(as(x, "SpatialPointsDataFrame"), ...)
#"[[.trip" = function(x, ...) as(x, "SpatialPointsDataFrame")[[...]]
## not needed - by global "Spatial" method
#setMethod("[[", c("trip", "ANY", "missing"), function(x, i, j, ...)
# x@data[[i]]
# )
setReplaceMethod("[[", c("trip", "ANY", "missing", "ANY"),
function(x, i, j, value) {
tor <- getTORnames(x)
x <- as(x, "SpatialPointsDataFrame")
x[[i]] <- value
trip(x, tor)
}
)
## not needed - by global "Spatial" method
#setMethod("$", c("trip", "character"),
# function(x, name) x@data[[name]]
#)
## needed to ensure validity of returned object
setReplaceMethod("$", c("trip", "character", "ANY"),
function(x, name, value) {
tor <- getTORnames(x)
x <- as(x, "SpatialPointsDataFrame")
x[[name]] <- value
trip(x, tor)
}
)
#"[[<-.trip" = function(x, i, j, value) {
#
# tor <- getTORnames(x)
# x <- as(x, "SpatialPointsDataFrame")
# x[[i]] <- value
# trip(x, tor)
#}
#"$.trip" = function(x, name) x@data[[name]]
#"$<-.trip" = function(x, i, value) {
# tor <- getTORnames(x)
# x <- as(x, "SpatialPointsDataFrame")
# x[[i]] <- value
# trip(x, tor)
# }
##setMethod("names", "trip", function(x) names(as(x, "SpatialPointsDataFrame")))
##setMethod("names", "trip", function(x) names(x@data))
names.trip <- function(x) names(as(x, "SpatialPointsDataFrame"))
"names<-.trip" <- function(x, value) { names(x@data) = value; x@TOR.columns = value; x }
setMethod("points", "trip", function(x, ...) points(as(x, "SpatialPointsDataFrame"), ...))
setMethod("text", "trip", function(x, ...) text(as(x, "SpatialPointsDataFrame"), ...))
#setMethod("split", "SpatialPointsDataFrame", split.data.frame)
#subset.trip <- function(x, subset, select, drop = FALSE, ...) {
subset.trip <- function(x, ...) {
#spdf <- subset(as(x, "SpatialPointsDataFrame"), subset, select, drop = drop, ...)
spdf <- subset(as(x, "SpatialPointsDataFrame"), ...)
tor <- getTORnames(x)
if ( is.factor(spdf[[tor[2]]])) spdf[[tor[2]]] <- factor(spdf[[tor[2]]])
if (any(is.na(match(tor, names(spdf))))) {
cat("trip-defining Date or ID columns dropped, reverting to SpatialPointsDataFrame\n\n")
return(spdf)
} else if (any(tapply(spdf[[tor[1]]], spdf[[tor[2]]], length) < 3)){
cat("subset loses too many locations, reverting to SpatialPointsDataFrame\n\n")
return(spdf)
} else {
return(trip(spdf, tor))
}
}
setMethod("[", "trip",
function(x, i, j, ... , drop = TRUE) {
spdf <- as(x, "SpatialPointsDataFrame")[i, j, ..., drop = drop]
tor <- getTORnames(x)
if ( is.factor(spdf[[tor[2]]])) spdf[[tor[2]]] <- factor(spdf[[tor[2]]])
if (any(is.na(match(tor, names(spdf))))) {
cat("trip-defining Date or ID columns dropped, reverting to SpatialPointsDataFrame\n\n")
return(spdf)
} else if (any(tapply(spdf[[tor[1]]], spdf[[tor[2]]], length) < 3)){
cat("subset loses too many locations, reverting to SpatialPointsDataFrame\n\n")
return(spdf)
} else {
return(trip(spdf, tor))
}
}
)
setMethod("subset", "trip", subset.trip)
#### summary and print
summary.tordata <- function(object, ...) {
obj <- list(spdf = summary(as(object, "SpatialPointsDataFrame")))
## method or not here?
time <- object[[object@TOR.columns[1]]]
ids <- object[[object@TOR.columns[2]]]
tmins <- tapply(time, ids, min) + ISOdatetime(1970, 1, 1, 0, 0,0, tz = "GMT")
tmaxs <- tapply(time, ids, max) + ISOdatetime(1970, 1, 1, 0, 0,0, tz = "GMT")
nlocs <- tapply(time, ids, length)
obj[["class"]] <- class(object)
obj[["tmins"]] <- tmins
obj[["tmaxs"]] <- tmaxs
obj[["tripID"]] <- levels(factor(ids))
obj[["nRecords"]] <- nlocs
obj[["TORnames"]] <- getTORnames(object)
obj[["tripDuration"]] <- tapply(time, ids, function(x) {x <- format(diff(range(x)))})
class(obj) <- "summary.tordata"
#invisible(obj)
obj
}
print.summary.tordata <- function(x, ...) {
dsumm <- data.frame(tripID = x$tripID, No.Records = x$nRecords, startTime = x$tmins, endTime = x$tmaxs, tripDuration = x$tripDuration)
names(dsumm)[1] <- paste(names(dsumm)[1], " (\"", x[["TORnames"]][2], "\")", sep = "")
names(dsumm)[3] <- paste(names(dsumm)[3], " (\"", x[["TORnames"]][1], "\")", sep = "")
names(dsumm)[4] <- paste(names(dsumm)[4], " (\"", x[["TORnames"]][1], "\")", sep = "")
rownames(dsumm) <- 1:nrow(dsumm)
#dsumm <- as.data.frame(lapply(dsumm, as.character))
cat(paste("\nObject of class ", x[["class"]], "\n", sep = ""))
print(format(dsumm, ...))
cat(paste("\nDerived from Spatial data:\n\n", sep = ""))
print(x$spdf)
cat("\n")
}
print.trip <- function(x, ...) {
xs <- summary(x)
dsumm <- data.frame(tripID = xs$tripID, No.Records = xs$nRecords, startTime = xs$tmins, endTime = xs$tmaxs, tripDuration = xs$tripDuration)
names(dsumm)[1] <- paste(names(dsumm)[1], " (\"", xs[["TORnames"]][2], "\")", sep = "")
names(dsumm)[3] <- paste(names(dsumm)[3], " (\"", xs[["TORnames"]][1], "\")", sep = "")
names(dsumm)[4] <- paste(names(dsumm)[4], " (\"", xs[["TORnames"]][1], "\")", sep = "")
rownames(dsumm) <- 1:nrow(dsumm)
#dsumm <- as.data.frame(lapply(dsumm, as.character))
cat(paste("\nObject of class ", xs[["class"]], "\n", sep = ""))
print(format(dsumm, ...))
cat("\n")
nms <- names(x)
#names(nms) <- names(x)
#nms[[xs[["TORnames"]][1]]] <- paste(nms[[xs[["TORnames"]][1]]], "*trip DateTime*")
#nms[[xs[["TORnames"]][2]]] <- paste(nms[[xs[["TORnames"]][2]]], "#trip ID#")
clss <- unlist(lapply(as.data.frame(x@data), function(x) class(x)[1]))
#names(clss) <- names(x)
#clss[[xs[["TORnames"]][1]]] <- paste(clss[[xs[["TORnames"]][1]]], "*trip DateTime*")
#clss[[xs[["TORnames"]][2]]] <- paste(clss[[xs[["TORnames"]][2]]], "#trip ID#")
sdf <- data.frame(data.columns = nms, data.class = clss)
sdf[[" "]] <- rep("", nrow(sdf))
sdf[[" "]][which(names(x) == xs[["TORnames"]][1])] <- "**trip DateTime**"
sdf[[" "]][which(names(x) == xs[["TORnames"]][2])] <- "**trip ID** "
#sdf$hideme <- factor(sdf$hideme)
#names(sdf)[3] <- ""
row.names(sdf) <- 1:nrow(sdf)
print(sdf)
cat("\n")
}
setMethod("summary", "trip", summary.tordata)
setMethod("show", "trip", function(object) print.trip(object))
#setMethod("print", "trip",function(x, ...) print(as(x, "SpatialPointsDataFrame")))
#setMethod("spTransform", signature(x = "trip", "CRS"),
# function (x, CRSobj, ...)
# {
# xSP <- spTransform(as(x, "SpatialPointsDataFrame"), CRSobj, ...)
# xDF <- x@data
# res <- SpatialPointsDataFrame(coords = coordinates(xSP),
# data = xDF, coords.nrs = numeric(0), proj4string = CRS(proj4string(xSP)))
# trip(res, getTORnames(x))
# }
#)
setMethod("lines", signature(x = "trip"),
function(x, ...) {
tor <- getTORnames(x)
lx <- split(1:nrow(x), x[[tor[2]]])
coords <- coordinates(x)
col <- hsv(seq(0, 0.5, length = length(lx)))
for (i in 1:length(lx)) {
lines(coords[lx[[i]], ], col = col[i], ...)
}
}
)
setMethod("plot", signature(x = "trip", y = "missing"),
function(x, y, ...) {
plot(as(x, "SpatialPoints"), ...)
})
#setMethod("plot", signature(x = "trip", y = "character"),
# function(x, y, ...) {
# plot(coordinates(x), col = x[[y]], ...)
#})
recenter.trip <-
function(obj) {
proj <- is.projected(obj)
if (is.na(proj)) {
warning("unknown coordinate reference system: assuming longlat")
#projargs <- CRS("+proj=longlat")
}
if (!is.na(proj) & proj)
stop("cannot recenter projected coordinate reference system")
projargs <- CRS(proj4string(obj))
crds <- coordinates(obj)
inout <- (crds[, 1] < 0)
if (all(inout)) {
crds[, 1] <- crds[, 1] + 360
if (!is.na(proj)) projargs <- CRS(paste(proj4string(obj), "+over"))
}else {
if (any(inout)) {
crds[, 1] <- ifelse(inout, crds[, 1] + 360, crds[,
1])
if (!is.na(proj)) projargs <- CRS(paste(proj4string(obj), "+over"))
}
}
res <- trip(new("SpatialPointsDataFrame", SpatialPoints(crds, projargs), data = obj@data,
coords.nrs = obj@coords.nrs), obj@TOR.columns)
res}
setMethod("recenter", "trip", recenter.trip)
|
a8f3fc819405db0a473c1b136de4c7820e66aa41
|
10d70e74a76627e40671c013887099c3f99ffbd4
|
/R/StabilityChangesByLinearRegression.R
|
e40f4f17b8fdf81148dd38ad4c635a7f779ea296
|
[] |
no_license
|
sethiyap/FungalTranscriptStability
|
5d1224c5efc3802bcb559c2875fc2549614ee969
|
16dc0c8a4c8a6f7d96991fe0ed81870ddb2d7cf2
|
refs/heads/master
| 2023-02-12T08:41:23.250096
| 2021-01-13T13:43:31
| 2021-01-13T13:43:31
| 284,257,832
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,988
|
r
|
StabilityChangesByLinearRegression.R
|
tbl_rna <- readr::read_delim(pipe("pbpaste"), delim="\t", col_names = TRUE)
tbl_pol2 <- readr::read_delim(pipe("pbpaste"), delim="\t", col_names = TRUE)
StabilityChangesByLinearRegression <- function(tbl_rna, tbl_pol2, plot_label="sample"){
column1 <- tbl_rna %>% colnames() %>% .[1]
tbl_rna <- tbl_rna %>%
dplyr::rename(Gene= dplyr::all_of(column1))
rna_LFC <- tbl_rna %>%
dplyr::mutate(LFC_RNA=log2(.[[3]]/.[[2]]),
LFC_RNA=ifelse(is.infinite(LFC_RNA), 0, LFC_RNA)) %>%
replace(is.na(.), 0)
column2 <- tbl_pol2 %>% colnames() %>% .[1]
tbl_pol2 <- tbl_pol2 %>%
dplyr::rename(Gene= dplyr::all_of(column2))
pol2_transcribing <- tbl_pol2 %>%
dplyr::filter(.[[2]] >= quantile(.[[2]], 0.85) |
.[[3]] >= quantile(.[[3]], 0.85)) %>%
dplyr::mutate(LFC_Pol2=log2(.[[3]]/.[[2]]))
pol2_rna <- dplyr::left_join(pol2_transcribing,rna_LFC) %>%
replace(is.na(.), 0) %>%
dplyr::mutate(percentile_rna = dplyr::percent_rank(LFC_RNA),
percentile_pol2=dplyr::percent_rank(LFC_Pol2))
mod <- lm(percentile_rna~percentile_pol2, data=pol2_rna)
pol2_rna_studentised <- pol2_rna %>%
dplyr::select(c(Gene, LFC_RNA, LFC_Pol2, percentile_rna, percentile_pol2)) %>%
dplyr::mutate(studentised=rstudent(mod),
pvalue=pnorm(abs(studentised),sd = sd(studentised),lower.tail = F),
category=dplyr::if_else(studentised>=1, "stabilized",
dplyr::if_else(studentised<= -1, "destabilized", "no-change")))
print(pol2_rna)
#-- Plot Studentised Residual
gg_st <- ggplot2::ggplot(pol2_rna_studentised,ggplot2::aes(x=1:nrow(pol2_rna_studentised),studentised,colour = category))+
ggplot2::geom_point(size=2.5,alpha=0.7)+ggplot2::geom_hline(yintercept = c(-1,1),lwd=1.1)+
ggplot2::theme_bw()+
ggplot2::scale_color_manual(values=c("cyan3", "blue", "salmon"))+
ggplot2::ggtitle(plot_label)+
ggplot2::ylab("Studentised residual")+ggplot2::xlab("No. of Genes")+
ggplot2::theme( axis.text = ggplot2::element_text(color="black",size=12),
axis.title.y=ggplot2::element_text(face="bold", color="black",size=14),
axis.title.x=ggplot2::element_text(face="bold", color="black",size=14),
legend.text=ggplot2::element_text(face="bold", color="black",size=12))
print(gg_st)
cors <- pol2_rna_studentised %>%
dplyr::summarise(cor=round(cor(LFC_Pol2, LFC_RNA), 2), max=max(c(LFC_Pol2, LFC_RNA)), min=min(c(LFC_Pol2, LFC_RNA)))
max_lim <- max(cors$max)
min_lim <- min(cors$min)
x_cor <- max_lim - 1
y_cor <- min_lim + 1
gg_lfc <- ggplot2::ggplot(pol2_rna_studentised,ggplot2::aes(x=LFC_Pol2,LFC_RNA))+
ggplot2::geom_point(size=2.5,alpha=0.7,ggplot2::aes(color=category))+
ggplot2::ggtitle(plot_label)+
ggplot2::scale_color_manual( values=c("cyan3", "blue", "salmon"))+
ggplot2::xlim(min_lim, max_lim)+
ggplot2::ylim(min_lim, max_lim)+
ggplot2::theme_bw()+
ggplot2::theme(axis.text = ggplot2::element_text(color="black",size=12),
axis.title.y=ggplot2::element_text(face="bold", color="black",size=14),
axis.title.x=ggplot2::element_text(face="bold", color="black",size=14),
legend.text=ggplot2::element_text(face="bold", color="black",size=12))+
ggplot2::geom_text(data=cors, ggplot2::aes(label=paste("r=", cor, sep="")), x=x_cor, y=y_cor)
print(gg_lfc)
print(table(pol2_rna_studentised$category))
return(pol2_rna_studentised)
}
|
0343dd195a0de568d6f7e5dc7e8cf32040ffc108
|
7bfb89889de933a645435b664fa343abad2e17e8
|
/man/prepare_priors_and_scores.Rd
|
43f4af455450701c28d5bb92cd1b85cc0e5098b0
|
[
"MIT"
] |
permissive
|
iiasa/mapspam2globiom
|
ab4d49c0e02e36c96c2df1531c13c2381ea40d9c
|
e8680fbd5d94b698a96e1a76d00fe6d9cd99f5f5
|
refs/heads/master
| 2023-06-05T20:20:21.249753
| 2021-06-23T08:06:21
| 2021-06-23T08:06:21
| 245,864,011
| 3
| 2
| null | null | null | null |
UTF-8
|
R
| false
| true
| 386
|
rd
|
prepare_priors_and_scores.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/prepare_priors_and_scores.R
\name{prepare_priors_and_scores}
\alias{prepare_priors_and_scores}
\title{Prepares priors and scores for all farming systems, crops and grid cells}
\usage{
prepare_priors_and_scores(param)
}
\description{
Prepares priors and scores for all farming systems, crops and grid cells
}
|
a1d3ccd3bbd2cbe642605cf5aab47e8a30c2880d
|
2a3ede958b2987423fb97d10c3d8a0d81be2b52e
|
/R/saveRawVms.R
|
4ff08d6ab071817520f813bdaa1ba86912f5ba60
|
[] |
no_license
|
vmsbase/R-vmsbase
|
b4b5db3de1d4e2f0c8095294016eba8e2d4c7caa
|
08b632c626e98ee13299d205ac080a204b1c4af2
|
refs/heads/master
| 2021-01-18T21:48:43.567092
| 2020-05-15T07:48:50
| 2020-05-15T07:48:50
| 15,307,509
| 3
| 6
| null | null | null | null |
UTF-8
|
R
| false
| false
| 14,314
|
r
|
saveRawVms.R
|
#' Raw VMS editing function
#'
#' \code{saveRawVms} implements the routines that converts raw values
#' to standard data.
#'
#' @param rawfile The raw VMS dataset.
#' @param widget The widget list that contains the editing infos.
#'
#' @return The function returns the standardized VMS data.
#'
#' @usage saveRawVms(rawfile, widget)
#'
#' @seealso \code{\link{gui_vms_editraw}}
saveRawVms <- function(rawfile, widget) {
widget <- gsub("\"", "", as.character(widget))
vessIdSel <- widget[1]
latModeSel <- widget[2]
latDeg <- widget[3]
latMin <- widget[4]
latSec <- widget[5]
latDir <- widget[6]
latDec <- widget[7]
lonModeSel <- widget[8]
lonDeg <- widget[9]
lonMin <- widget[10]
lonSec <- widget[11]
lonDir <- widget[12]
lonDec <- widget[13]
timeMode <- widget[14]
timeUtc <- widget[15]
timeDate2a <- widget[16]
timeDate2b <- widget[17]
timeDate <- widget[18]
timeHour <- widget[19]
timeMinute <- widget[20]
timeSecond <- widget[21]
speedModeSel <- widget[22]
speedCol <- widget[23]
headModeSel <- widget[24]
headCol <- widget[25]
if (widget[26] == "DD/MM/YYYY") {
data_frm <- c(dates = "d/m/y", times = "h:m:s")
} else {
data_frm <- c(dates = "m/d/y", times = "h:m:s")
}
numlines <- length(rawfile$data[, 1])
cat("\n\n --- Raw Pings Editing Started! ---\n",
"\nProcessing ", numlines, " raw pings...\n",
sep = ""
)
# to_rep <- paste(to_rep, "\nProcessed ", numlines, " raw pings.\n", sep = "")
vmsdata <- data.frame(
"I_NCEE" = numeric(numlines),
"LAT" = numeric(numlines),
"LON" = numeric(numlines),
"DATE" = numeric(numlines),
"SPE" = numeric(numlines),
"HEA" = numeric(numlines)
)
vessid <- which(colnames(rawfile$data) == vessIdSel)
vmsdata[, "I_NCEE"] <- rawfile$data[, vessid]
if (latModeSel == "sex") {
latdeg <- which(colnames(rawfile$data) == latDeg)
latmin <- which(colnames(rawfile$data) == latMin)
latsec <- which(colnames(rawfile$data) == latSec)
latdir <- which(colnames(rawfile$data) == latDir)
tole_ladeg <- which(is.na(rawfile$data[, latdeg]))
cat("\n - ", length(tole_ladeg), " NAs found in latitude degrees...", sep = "")
# to_rep <- paste(to_rep, "\n - ", length(tole_ladeg), " NAs found in latitude degrees...\n", sep = "")
if (length(tole_ladeg) > 0) {
rawfile$data <- rawfile$data[-tole_ladeg, ]
vmsdata <- vmsdata[-tole_ladeg, ]
}
tole_lamin <- which(is.na(rawfile$data[, latmin]))
cat("\n - ", length(tole_lamin), " NAs found in latitude minutes...", sep = "")
# to_rep <- paste(to_rep, "\n - ", length(tole_lamin), " NAs found in latitude minutes...\n", sep = "")
if (length(tole_lamin) > 0) {
rawfile$data <- rawfile$data[-tole_lamin, ]
vmsdata <- vmsdata[-tole_lamin, ]
}
tole_lasec <- which(is.na(rawfile$data[, latsec]))
cat("\n - ", length(tole_lasec), " NAs found in latitude seconds...", sep = "")
# to_rep <- paste(to_rep, "\n - ", length(tole_lasec), " NAs found in latitude seconds...\n", sep = "")
if (length(tole_lasec) > 0) {
rawfile$data <- rawfile$data[-tole_lasec, ]
vmsdata <- vmsdata[-tole_lasec, ]
}
tole_ladir <- which(is.na(rawfile$data[, latdir]))
cat("\n - ", length(tole_ladir), " NAs found in latitude direction...", sep = "")
# to_rep <- paste(to_rep, "\n - ", length(tole_ladir), " NAs found in latitude direction...\n", sep = "")
if (length(tole_ladir) > 0) {
rawfile$data <- rawfile$data[-tole_ladir, ]
vmsdata <- vmsdata[-tole_ladir, ]
}
pre_lat <- latsex2dec(rawfile$data[, latdeg], rawfile$data[, latmin], rawfile$data[, latsec], rawfile$data[, latdir])
tole_prelat <- which(pre_lat < -90 | pre_lat > 90)
if (length(tole_prelat) > 0) {
pre_lat <- pre_lat[-tole_prelat]
vmsdata <- vmsdata[-tole_prelat, ]
rawfile$data <- rawfile$data[-tole_prelat, ]
}
cat("\n - ", length(tole_prelat), " Latitudes out of range ( -90 / 90 )", sep = "")
vmsdata[, "LAT"] <- pre_lat
}
if (latModeSel == "dec") {
latdec <- which(colnames(rawfile$data) == latDec)
tole_lade <- which(is.na(rawfile$data[, latdec]))
cat("\n - ", length(tole_lade), " NAs found in decimal latitude...", sep = "")
# to_rep <- paste(to_rep, "\n - ", length(tole_lade), " NAs found in decimal latitude...\n", sep = "")
if (length(tole_lade) > 0) {
rawfile$data <- rawfile$data[-tole_lade, ]
vmsdata <- vmsdata[-tole_lade, ]
}
pre_lat <- rawfile$data[, latdec]
tole_prelat <- which(pre_lat < -90 | pre_lat > 90)
if (length(tole_prelat) > 0) {
pre_lat <- pre_lat[-tole_prelat]
vmsdata <- vmsdata[-tole_prelat, ]
rawfile$data <- rawfile$data[-tole_prelat, ]
}
cat("\n - ", length(tole_prelat), " Latitudes out of range ( -90 / 90 )", sep = "")
vmsdata[, "LAT"] <- pre_lat
}
if (lonModeSel == "sex") {
londeg <- which(colnames(rawfile$data) == lonDeg)
lonmin <- which(colnames(rawfile$data) == lonMin)
lonsec <- which(colnames(rawfile$data) == lonSec)
londir <- which(colnames(rawfile$data) == lonDir)
tole_lodeg <- which(is.na(rawfile$data[, londeg]))
cat("\n - ", length(tole_lodeg), " NAs found in longitude degrees...", sep = "")
# to_rep <- paste(to_rep, "\n - ", length(tole_lodeg), " NAs found in longitude degrees...\n", sep = "")
if (length(tole_lodeg) > 0) {
rawfile$data <- rawfile$data[-tole_lodeg, ]
vmsdata <- vmsdata[-tole_lodeg, ]
}
tole_lomin <- which(is.na(rawfile$data[, lonmin]))
cat("\n - ", length(tole_lomin), " NAs found in longitude minutes...", sep = "")
# to_rep <- paste(to_rep, "\n - ", length(tole_lomin), " NAs found in longitude minutes...\n", sep = "")
if (length(tole_lomin) > 0) {
rawfile$data <- rawfile$data[-tole_lomin, ]
vmsdata <- vmsdata[-tole_lomin, ]
}
tole_losec <- which(is.na(rawfile$data[, lonsec]))
cat("\n - ", length(tole_losec), " NAs found in longitude seconds...", sep = "")
# to_rep <- paste(to_rep, "\n - ", length(tole_losec), " NAs found in longitude seconds...\n", sep = "")
if (length(tole_losec) > 0) {
rawfile$data <- rawfile$data[-tole_losec, ]
vmsdata <- vmsdata[-tole_losec, ]
}
tole_lodir <- which(is.na(rawfile$data[, londir]))
cat("\n - ", length(tole_lodir), " NAs found in longitude direction...", sep = "")
# to_rep <- paste(to_rep, "\n - ", length(tole_lodir), " NAs found in longitude direction...\n", sep = "")
if (length(tole_lodir) > 0) {
rawfile$data <- rawfile$data[-tole_lodir, ]
vmsdata <- vmsdata[-tole_lodir, ]
}
pre_lon <- lonsex2dec(rawfile$data[, londeg], rawfile$data[, lonmin], rawfile$data[, lonsec], rawfile$data[, londir])
tole_prelon <- which(pre_lon < -180 | pre_lon > 180)
if (length(tole_prelon) > 0) {
pre_lon <- pre_lon[-tole_prelon]
vmsdata <- vmsdata[-tole_prelon, ]
rawfile$data <- rawfile$data[-tole_prelon, ]
}
cat("\n - ", length(tole_prelon), " Longitudes out of range ( -180 / 180 )", sep = "")
vmsdata[, "LON"] <- pre_lon
}
if (lonModeSel == "dec") {
londec <- which(colnames(rawfile$data) == lonDec)
tole_lode <- which(is.na(rawfile$data[, londec]))
cat("\n - ", length(tole_lode), " NAs found in decimal longitude...", sep = "")
if (length(tole_lode) > 0) {
rawfile$data <- rawfile$data[-tole_lode, ]
vmsdata <- vmsdata[-tole_lode, ]
}
pre_lon <- rawfile$data[, londec]
tole_prelon <- which(pre_lon < -180 | pre_lon > 180)
if (length(tole_prelon) > 0) {
pre_lon <- pre_lon[-tole_prelon]
vmsdata <- vmsdata[-tole_prelon, ]
rawfile$data <- rawfile$data[-tole_prelon, ]
}
cat("\n - ", length(tole_prelon), " Longitudes out of range ( -180 / 180 )", sep = "")
vmsdata[, "LON"] <- pre_lon
}
if (timeMode == "UTC") {
timeutc <- which(colnames(rawfile$data) == timeUtc)
tole_ti <- which(is.na(rawfile$data[, timeutc]))
cat("\n - ", length(tole_ti), " NAs found in UTC times...", sep = "")
# to_rep <- paste(to_rep, "\n - ", length(tole_ti), " NAs found in UTC times...\n", sep = "")
if (length(tole_ti) > 0) {
rawfile$data <- rawfile$data[-tole_ti, ]
vmsdata <- vmsdata[-tole_ti, ]
}
vmsdata[, "DATE"] <- rawfile$data[, timeutc]
}
if (timeMode == "Date + Time") {
date <- which(colnames(rawfile$data) == timeDate2a)
time <- which(colnames(rawfile$data) == timeDate2b)
tole_tida <- which(is.na(rawfile$data[, time]))
cat("\n - ", length(tole_tida), " NAs found in times...", sep = "")
# to_rep <- paste(to_rep, "\n - ", length(tole_tida), " NAs found in times...\n", sep = "")
if (length(tole_tida) > 0) {
rawfile$data <- rawfile$data[-tole_tida, ]
vmsdata <- vmsdata[-tole_tida, ]
}
tole_dati <- which(is.na(rawfile$data[, date]))
cat("\n - ", length(tole_dati), " NAs found in dates...", sep = "")
# to_rep <- paste(to_rep, "\n - ", length(tole_dati), " NAs found in dates...\n", sep = "")
if (length(tole_dati) > 0) {
rawfile$data <- rawfile$data[-tole_dati, ]
vmsdata <- vmsdata[-tole_dati, ]
}
vmsdata[, "DATE"] <- as.numeric(chron(
as.character(rawfile$data[, date]),
as.character(rawfile$data[, time]),
data_frm
))
}
if (timeMode == "Date + H M S") {
date <- which(colnames(rawfile$data) == timeDate)
hour <- which(colnames(rawfile$data) == timeHour)
minute <- which(colnames(rawfile$data) == timeMinute)
second <- which(colnames(rawfile$data) == timeSecond)
tole_dat <- which(is.na(rawfile$data[, date]))
cat("\n - ", length(tole_dat), " NAs found in dates...", sep = "")
# to_rep <- paste(to_rep, "\n - ", length(tole_dat), " NAs found in dates...\n", sep = "")
if (length(tole_dat) > 0) {
rawfile$data <- rawfile$data[-tole_dat, ]
vmsdata <- vmsdata[-tole_dat, ]
}
tole_hou <- which(is.na(rawfile$data[, hour]))
cat("\n - ", length(tole_hou), " NAs found in hours...", sep = "")
# to_rep <- paste(to_rep, "\n - ", length(tole_hou), " NAs found in hours...\n", sep = "")
if (length(tole_hou) > 0) {
rawfile$data <- rawfile$data[-tole_hou, ]
vmsdata <- vmsdata[-tole_hou, ]
}
tole_min <- which(is.na(rawfile$data[, minute]))
cat("\n - ", length(tole_min), " NAs found in minutes...", sep = "")
# to_rep <- paste(to_rep, "\n - ", length(tole_min), " NAs found in minutes...\n", sep = "")
if (length(tole_min) > 0) {
rawfile$data <- rawfile$data[-tole_min, ]
vmsdata <- vmsdata[-tole_min, ]
}
tole_sec <- which(is.na(rawfile$data[, second]))
cat("\n - ", length(tole_sec), " NAs found in seconds...", sep = "")
# to_rep <- paste(to_rep, "\n - ", length(tole_sec), " NAs found in seconds...\n", sep = "")
if (length(tole_sec) > 0) {
rawfile$data <- rawfile$data[-tole_sec, ]
vmsdata <- vmsdata[-tole_sec, ]
}
time <- paste(rawfile$data[, hour], rawfile$data[, minute], rawfile$data[, second], sep = ":")
vmsdata[, "DATE"] <- as.numeric(chron(
as.character(rawfile$data[, date]),
as.character(time),
data_frm
))
}
tole_vda <- which(is.na(vmsdata[, "DATE"]))
cat("\n - ", length(tole_vda), " dates found with bad format...", sep = "")
# to_rep <- paste(to_rep, "\n - ", length(tole_vda), " dates found with bad format...\n", sep = "")
if (length(tole_vda) > 0) {
rawfile$data <- rawfile$data[-tole_vda, ]
vmsdata <- vmsdata[-tole_vda, ]
}
if (speedModeSel == "Knots") {
knots <- which(colnames(rawfile$data) == speedCol)
tole_kn <- which(is.na(rawfile$data[, knots]))
cat("\n - ", length(tole_kn), " NAs found in knots speed...", sep = "")
# to_rep <- paste(to_rep, "\n - ", length(tole_kn), " NAs found in knots speed...\n", sep = "")
if (length(tole_kn) > 0) {
rawfile$data <- rawfile$data[-tole_kn, ]
vmsdata <- vmsdata[-tole_kn, ]
}
vmsdata[, "SPE"] <- kno2kmh(rawfile$data[, knots])
}
if (speedModeSel == "Km/h") {
kmh <- which(colnames(rawfile$data) == speedCol)
tole_km <- which(is.na(rawfile$data[, kmh]))
cat("\n - ", length(tole_km), " NAs found in km/h speed...", sep = "")
# to_rep <- paste(to_rep, "\n - ", length(tole_km), " NAs found in km/h speed...\n", sep = "")
if (length(tole_km) > 0) {
rawfile$data <- rawfile$data[-tole_km, ]
vmsdata <- vmsdata[-tole_km, ]
}
vmsdata[, "SPE"] <- rawfile$data[, kmh]
}
if (headModeSel == "Rad") {
rad <- which(colnames(rawfile$data) == headCol)
tole_ra <- which(is.na(rawfile$data[, rad]))
cat("\n - ", length(tole_ra), " NAs found in radiants heading...", sep = "")
# to_rep <- paste(to_rep, "\n - ", length(tole_ra), " NAs found in radiants heading...\n", sep = "")
if (length(tole_ra) > 0) {
rawfile$data <- rawfile$data[-tole_ra, ]
vmsdata <- vmsdata[-tole_ra, ]
}
vmsdata[, "HEA"] <- rad2deg(rawfile$data[, rad])
}
if (headModeSel == "Deg") {
deg <- which(colnames(rawfile$data) == headCol)
tole_de <- which(is.na(rawfile$data[, deg]))
cat("\n - ", length(tole_de), " NAs found in degrees heading...", sep = "")
# to_rep <- paste(to_rep, "\n - ", length(tole_de), " NAs found in degrees heading...\n", sep = "")
if (length(tole_de) > 0) {
rawfile$data <- rawfile$data[-tole_de, ]
vmsdata <- vmsdata[-tole_de, ]
}
vmsdata[, "HEA"] <- rawfile$data[, deg]
}
cat("\nRemoved ", round((100 / numlines) * (numlines - nrow(vmsdata)), 2), "% of data, that is ", numlines - nrow(vmsdata), " pings\n",
"\n\n --- Raw Pings Editing Complete! ---\n\n",
sep = ""
)
return(vmsdata)
}
|
050709895ebcd5b9500f7377b5615bab0ba4b427
|
612142888df00ae67f0d537028da5656ca8b7b47
|
/man/nki7g.Rd
|
0c58b5ee4e4ba8bc5aa0b937d8339306d5597024
|
[] |
no_license
|
bhklab/survcomp
|
9241f71cdf1e51011988a85b54d7084a5e9283b0
|
3dada4303665b0816f6b5198cadd96cde7e53943
|
refs/heads/master
| 2022-03-22T16:38:34.391885
| 2021-12-14T19:29:21
| 2021-12-14T19:29:21
| 1,322,184
| 2
| 8
| null | 2022-03-09T22:25:41
| 2011-02-02T22:32:03
|
C++
|
UTF-8
|
R
| false
| false
| 2,879
|
rd
|
nki7g.Rd
|
\name{nki7g}
\alias{nki7g}
\docType{data}
\title{Subset of NKI dataset containing gene expression, annotations and clinical data.}
\description{
This dataset contains a subset of the gene expression, annotations and clinical data from the NKI datasets (see section details). The subset contains the seven genes introduced by Desmedt et al. 2008}
\format{
ExpressionSet with 7 features and 337 samples, containing:
\itemize{
\item \code{exprs(nki7g)}: Matrix containing gene expressions as measured by Agilent technology (dual-channel, oligonucleotides).
\item \code{fData(nki7g)}: AnnotatedDataFrame containing annotations of Agilent microarray platform.
\item \code{pData(nki7g)}: AnnotatedDataFrame containing Clinical information of the breast cancer patients whose tumors were hybridized.
\item \code{experimentalData(nki7g)}: MIAME object containing information about the dataset.
\item \code{annotation(nki7g)}: Name of the agilent chip.
}
}
\details{
This dataset represents a subset of the study published by van't Veer et al. 2002 and van de Vijver et al. 2002. The subset contains the genes AURKA (also known as STK6, STK7, or STK15), PLAU (also known as UPA), STAT1, VEGF, CASP3, ESR1, and ERBB2, as introduced by Desmedt et al. 2008. The seven genes represent the proliferation, tumor invasion/metastasis, immune response, angiogenesis, apoptosis phenotypes, and the ER and HER2 signaling, respectively.
}
\source{
\bold{nki:}
\url{http://www.rii.com/publications/2002/vantveer.html}
}
\references{
Laura J. van't Veer, Hongyue Dai, Marc J. van de Vijver, Yudong D. He, Augustinus A.M. Hart, Mao Mao, Hans L. Peterse, Karin van der Kooy, Matthew J. Marton, Anke T. Witteveen, George J. Schreiber, Ron M. Kerkhoven, Chris Roberts, Peter S. Linsley, Rene Bernards and Stephen H. Friend (2002) "Gene expression profiling predicts clinical outcome of breast cancer", \emph{Nature}, \bold{415}:530-536
M. J. van de Vijver, Y. D. He, L. van't Veer, H. Dai, A. M. Hart, D. W. Voskuil, G. J. Schreiber, J. L. Peterse, C. Roberts, M. J. Marton, M. Parrish, D. Atsma, A. Witteveen, A. Glas, L. Delahaye, T. van der Velde, H. Bartelink, S. Rodenhuis, E. T. Rutgers, S. H. Friend and R. Bernards (2002) "A Gene Expression Signature as a Predictor of Survival in Breast Cancer", \emph{New England Journal of Medicine}, \bold{347}(25):1999-2009
}
\examples{
## load Biobase package
library(Biobase)
## load the dataset
data(breastCancerData)
## show the first 5 columns of the expression data
exprs(nki7g)[ ,1:5]
## show the first 6 rows of the phenotype data
head(pData(nki7g))
## show first 20 feature names
featureNames(nki7g)
## show the experiment data summary
experimentData(nki7g)
## show the used platform
annotation(nki7g)
## show the abstract for this dataset
abstract(nki7g)
}
\keyword{datasets}
|
291a2ca33b011dadce4efb2cae2c6daeee4670fe
|
7afbb148ec11b3105aaead6bdd900f847e49eb18
|
/tests/testthat/test-poly.R
|
65727bc738f59a8d199f178c94b373537c4ed51d
|
[
"MIT"
] |
permissive
|
tidymodels/recipes
|
88135cc131b4ff538a670d956cf6622fa8440639
|
eb12d1818397ad8780fdfd13ea14d0839fbb44bd
|
refs/heads/main
| 2023-08-15T18:12:46.038289
| 2023-08-11T12:32:05
| 2023-08-11T12:32:05
| 76,614,863
| 383
| 123
|
NOASSERTION
| 2023-08-26T13:43:51
| 2016-12-16T02:40:24
|
R
|
UTF-8
|
R
| false
| false
| 6,272
|
r
|
test-poly.R
|
library(testthat)
library(recipes)
skip_if_not_installed("modeldata")
data(biomass, package = "modeldata")
biomass_tr <- biomass[biomass$dataset == "Training", ]
biomass_te <- biomass[biomass$dataset == "Testing", ]
rec <- recipe(HHV ~ carbon + hydrogen + oxygen + nitrogen + sulfur,
data = biomass_tr
)
test_that("correct basis functions", {
with_poly <- rec %>%
step_poly(carbon, hydrogen, id = "")
exp_tidy_un <- tibble(
terms = c("carbon", "hydrogen"),
degree = rep(2L, 2),
id = ""
)
expect_equal(exp_tidy_un, tidy(with_poly, number = 1))
with_poly <- prep(with_poly, training = biomass_tr, verbose = FALSE)
expect_equal(exp_tidy_un, tidy(with_poly, number = 1))
with_poly_pred_tr <- bake(with_poly, new_data = biomass_tr)
with_poly_pred_te <- bake(with_poly, new_data = biomass_te)
carbon_poly_tr_exp <- poly(biomass_tr$carbon, degree = 2)
hydrogen_poly_tr_exp <- poly(biomass_tr$hydrogen, degree = 2)
carbon_poly_te_exp <- predict(carbon_poly_tr_exp, biomass_te$carbon)
hydrogen_poly_te_exp <- predict(hydrogen_poly_tr_exp, biomass_te$hydrogen)
carbon_poly_tr_res <- as.matrix(with_poly_pred_tr[, grep("carbon", names(with_poly_pred_tr))])
colnames(carbon_poly_tr_res) <- NULL
hydrogen_poly_tr_res <- as.matrix(with_poly_pred_tr[, grep("hydrogen", names(with_poly_pred_tr))])
colnames(hydrogen_poly_tr_res) <- NULL
carbon_poly_te_res <- as.matrix(with_poly_pred_te[, grep("carbon", names(with_poly_pred_te))])
colnames(carbon_poly_te_res) <- 1:ncol(carbon_poly_te_res)
hydrogen_poly_te_res <- as.matrix(with_poly_pred_te[, grep("hydrogen", names(with_poly_pred_te))])
colnames(hydrogen_poly_te_res) <- 1:ncol(hydrogen_poly_te_res)
## remove attributes
carbon_poly_tr_exp <- matrix(carbon_poly_tr_exp, ncol = 2)
carbon_poly_te_exp <- matrix(carbon_poly_te_exp, ncol = 2)
hydrogen_poly_tr_exp <- matrix(hydrogen_poly_tr_exp, ncol = 2)
hydrogen_poly_te_exp <- matrix(hydrogen_poly_te_exp, ncol = 2)
dimnames(carbon_poly_tr_res) <- NULL
dimnames(carbon_poly_te_res) <- NULL
dimnames(hydrogen_poly_tr_res) <- NULL
dimnames(hydrogen_poly_te_res) <- NULL
expect_equal(carbon_poly_tr_res, carbon_poly_tr_exp)
expect_equal(carbon_poly_te_res, carbon_poly_te_exp)
expect_equal(hydrogen_poly_tr_res, hydrogen_poly_tr_exp)
expect_equal(hydrogen_poly_te_res, hydrogen_poly_te_exp)
})
test_that("check_name() is used", {
dat <- mtcars
dat$mpg_poly_1 <- dat$mpg
rec <- recipe(~ ., data = dat) %>%
step_poly(mpg)
expect_snapshot(
error = TRUE,
prep(rec, training = dat)
)
})
test_that("tunable", {
rec <-
recipe(~., data = iris) %>%
step_poly(all_predictors())
rec_param <- tunable.step_poly(rec$steps[[1]])
expect_equal(rec_param$name, c("degree"))
expect_true(all(rec_param$source == "recipe"))
expect_true(is.list(rec_param$call_info))
expect_equal(nrow(rec_param), 1)
expect_equal(
names(rec_param),
c("name", "call_info", "source", "component", "component_id")
)
})
test_that("old option argument", {
expect_message(
res <-
recipe(~., data = iris) %>%
step_poly(Sepal.Width, options = list(degree = 3)) %>%
prep() %>%
bake(new_data = NULL),
"The `degree` argument is now a main argument"
)
exp_names <- c(
"Sepal.Length", "Petal.Length", "Petal.Width", "Species",
"Sepal.Width_poly_1", "Sepal.Width_poly_2", "Sepal.Width_poly_3"
)
expect_equal(
names(res),
exp_names
)
})
# Infrastructure ---------------------------------------------------------------
test_that("bake method errors when needed non-standard role columns are missing", {
with_poly <- rec %>%
step_poly(carbon, hydrogen, id = "") %>%
update_role(carbon, hydrogen, new_role = "potato") %>%
update_role_requirements(role = "potato", bake = FALSE)
exp_tidy_un <- tibble(
terms = c("carbon", "hydrogen"),
degree = rep(2L, 2),
id = ""
)
with_poly <- prep(with_poly, training = biomass_tr, verbose = FALSE)
expect_error(bake(with_poly, new_data = biomass_tr[, c(-3)]),
class = "new_data_missing_column")
})
test_that("empty printing", {
rec <- recipe(mpg ~ ., mtcars)
rec <- step_poly(rec)
expect_snapshot(rec)
rec <- prep(rec, mtcars)
expect_snapshot(rec)
})
test_that("empty selection prep/bake is a no-op", {
rec1 <- recipe(mpg ~ ., mtcars)
rec2 <- step_poly(rec1)
rec1 <- prep(rec1, mtcars)
rec2 <- prep(rec2, mtcars)
baked1 <- bake(rec1, mtcars)
baked2 <- bake(rec2, mtcars)
expect_identical(baked1, baked2)
})
test_that("empty selection tidy method works", {
rec <- recipe(mpg ~ ., mtcars)
rec <- step_poly(rec)
expect <- tibble(terms = character(), degree = integer(), id = character())
expect_identical(tidy(rec, number = 1), expect)
rec <- prep(rec, mtcars)
expect_identical(tidy(rec, number = 1), expect)
})
test_that("keep_original_cols works", {
new_names <- c("mpg_poly_1", "mpg_poly_2")
rec <- recipe(~ mpg, mtcars) %>%
step_poly(all_predictors(), keep_original_cols = FALSE)
rec <- prep(rec)
res <- bake(rec, new_data = NULL)
expect_equal(
colnames(res),
new_names
)
rec <- recipe(~ mpg, mtcars) %>%
step_poly(all_predictors(), keep_original_cols = TRUE)
rec <- prep(rec)
res <- bake(rec, new_data = NULL)
expect_equal(
colnames(res),
c("mpg", new_names)
)
})
test_that("keep_original_cols - can prep recipes with it missing", {
rec <- recipe(~ mpg, mtcars) %>%
step_poly(all_predictors())
rec$steps[[1]]$keep_original_cols <- NULL
expect_snapshot(
rec <- prep(rec)
)
expect_error(
bake(rec, new_data = mtcars),
NA
)
})
test_that("printing", {
rec <- recipe(HHV ~ carbon + hydrogen + oxygen + nitrogen + sulfur,
data = biomass_tr) %>%
step_poly(carbon, hydrogen)
expect_snapshot(print(rec))
expect_snapshot(prep(rec))
})
test_that("tunable is setup to work with extract_parameter_set_dials", {
skip_if_not_installed("dials")
rec <- recipe(~., data = mtcars) %>%
step_poly(
all_predictors(),
degree = hardhat::tune()
)
params <- extract_parameter_set_dials(rec)
expect_s3_class(params, "parameters")
expect_identical(nrow(params), 1L)
})
|
6ae77123a799702036dc443c51fdfdac553929ad
|
17064425f85aa3d26d5c755851baf3d73f141c9f
|
/rcode/Hypercubes.R
|
5e0b0b5a4dd72e0865e88c884552b2d5ce6c921b
|
[] |
no_license
|
coco90417/PM520
|
2a56dab0745a6b9a88e533aaa699894a69b347ff
|
0fbcd89748bcca2cd0abc1106bd01f2a094db263
|
refs/heads/master
| 2021-01-21T21:43:00.677594
| 2016-04-17T19:44:35
| 2016-04-17T19:44:35
| 56,454,512
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,682
|
r
|
Hypercubes.R
|
set.seed(329878) # set the seed for the random number generator to any number you like
NumberOfTrials<-50000
TotalCountNearSurface<-numeric()
MaxNumberOfDimensions<-10 # we will go up to this many dimensions
HowManyCloseToSurface<-0 # A variable to record how often the point is close to the surface
for (j in 1:MaxNumberOfDimensions) {
HowManyCloseToSurface<-0 # reset your counter
for (i in 1:NumberOfTrials) {
# pick a point at random in the cube
MyPoint<-runif(j,0,1)
#cat(MyPoint)
#cat(" Max/min: ",max(MyPoint),min(MyPoint<0.1),(max(MyPoint)>0.9)||(min(MyPoint<0.1)))
#cat("\n")
# check whether it is within 0.1 of the surface (how do you check this?)
if ( (max(MyPoint)>0.9)||(min(MyPoint)<0.1)){
# if it is, increase the value of HowManyCloseToSurface by 1
HowManyCloseToSurface <- HowManyCloseToSurface+1
}
}
# record the value of HowManyCloseToSurface/NumberOfTrials, this will be your estimate of
TotalCountNearSurface[j]<-HowManyCloseToSurface
# the proportion of the volume of the cube that is within 0.1 of the surface
cat("\nEstimatedProb.:", TotalCountNearSurface[j]/NumberOfTrials)
}
# plot your estimates of the proportion of the volume that is within 01 of the surface (y-axis)
# against the number of dimensions the hypercube has (x-axis)
plot(TotalCountNearSurface/NumberOfTrials)
# what do we think the answer should be
ExpectedAnswer<-numeric()
for (i in 1:MaxNumberOfDimensions){
ExpectedAnswer[i]<-1-(0.8^i)
}
# super-impose this on our plot
lines(ExpectedAnswer)
# what do you notice about this proportion as N increases?
# what value do you think it takes for very large N?
|
84e7a14659adc0e43a7a234b8b5889677996df29
|
287add902a548b978254b03f571f5e127d325e88
|
/R/moreLibs_m.R
|
53e57a01072ea5142a6579ac486f6afe9d400629
|
[] |
no_license
|
Auburngrads/publicLibs
|
e36884552220fcf859d28ef5cc16d26baeb23f65
|
804efbb6bc80f5053712e375a09d2d46ce2f61a6
|
refs/heads/master
| 2021-01-17T02:44:58.943620
| 2020-07-20T00:32:03
| 2020-07-20T00:32:03
| 58,672,156
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,133
|
r
|
moreLibs_m.R
|
#' Get the url for each state on public-libraries.org
#' @import datasets
#' @importFrom httr GET content
#' @importFrom XML htmlTreeParse xpathSApply xmlValue
#' @param state Character string for state name or two-letter abbreviation
state_lib_urls <- function(url = 'http://www.public-libraries.org/') {
# Parse HTML tags from page tree and extract anchor tag values
get_request <- httr::GET(url)
get_content <- httr::content(get_request, encoding = 'UTF-8')
parsed_html <- XML::htmlTreeParse(get_content, useInternalNodes = T)
raw_anchors <- XML::xpathSApply(parsed_html, "//a", XML::xmlValue)
fmt_anchors <- tolower(gsub(' ', '', raw_anchors))
state_names <- tolower(gsub(' ', '', datasets::state.name))
anchors <- fmt_anchors[fmt_anchors %in% state_names]
base_url <- 'http://library.public-libraries.org'
state_urls <- paste(file.path(base_url,anchors, tolower(state.abb)),
'.html',
sep = '')
zout <- data.frame(state_name = state.name,
state_abb <- tolower(state.abb),
url = state_urls,
stringsAsFactors = F)
return(zout)
}
#' Get the url for a local libraries in a state on public-libraries.org
#' @param state Character string for state name or two-letter abbreviation
#' @param base_url Root url for local libraries on public-libraries.org
#' @import datasets
city_lib_urls <- function(state = 'AL',
base_url = 'http://library.public-libraries.org')
{
state <- gsub(' ', '', tolower(state))
dfs <- data.frame(gsub(' ', '', tolower(state.abb)),
gsub(' ', '', tolower(state.name)),
stringsAsFactors = F)
num <- unlist(sapply(dfs,
FUN = function(x) grep(state,x)))[[1]]
state_url <- paste(file.path(base_url, dfs[num,2], dfs[num,1]),
'.html',
sep = '')
get_request <- httr::GET(state_url)
get_content <- httr::content(get_request, encoding = 'UTF-8')
parsed_html <- XML::htmlTreeParse(get_content, useInternalNodes = T)
raw_anchors <- XML::xpathSApply(parsed_html, "//a", XML::xmlValue)
city_anchors <- raw_anchors[grep('Libraries', raw_anchors)]
city_anchors <- gsub("[[:punct:]]",'', city_anchors)
city_anchors <- qdap::mgsub(c("Libraries", " "), '', city_anchors)
city_anchors <- city_anchors[2:(length(city_anchors) - 2)]
city_urls <- paste('http://',
dfs[num,2], '.public-libraries.org/library/',
dfs[num,1], '/', tolower(city_anchors),
'.html',
sep = '')
zout <- data.frame(state_name = dfs[num,2],
state_abb = dfs[num,1],
city = tolower(city_anchors),
city_url = city_urls,
stringsAsFactors = F)
return(zout)
}
lib_urls <- function(state_name,
state_abb,
city)
{
state_name <- tolower(state_name)
state_abb <- tolower(state_abb)
city <- tolower(city)
base_url = paste('http://',state_name, '.public-libraries.org/library/',
state_abb, '/', city,
'.html',
sep = '')
get_request <- httr::GET(base_url)
get_content <- httr::content(get_request, encoding = 'UTF-8')
parsed_html <- XML::htmlTreeParse(get_content, useInternalNodes = T)
raw_anchors <- XML::xpathSApply(parsed_html, "//a", XML::xmlValue)
start <- max(grep('>>Read more',raw_anchors))
end <- grep(paste0(state_name,' libraries'), tolower(raw_anchors))
lib_anchors <- raw_anchors[(start + 1): (end - 1)]
lib_anchors <- gsub("[[:punct:]]", '', lib_anchors)
lib_anchors <- gsub(" ", '', lib_anchors)
lib_urls <- paste('http://library.public-libraries.org/',
state_name, '/',
city, '/',
tolower(lib_anchors),
'.html',
sep = '')
zout <- data.frame(state_name = state_name,
state_abb = state_abb,
city = tolower(city),
lib = lib_anchors,
lib_url = lib_urls,
stringsAsFactors = F)
return(zout)
}
lib_data <- function(lib_url) {
get_request <- httr::GET(lib_url)
get_content <- httr::content(get_request, encoding = 'UTF-8')
parsed_html <- XML::htmlTreeParse(get_content, useInternalNodes = T)
raw_tables <- XML::xpathSApply(parsed_html,'//td[@background="http://www.public-libraries.org/images/back.gif"]',XML::xmlValue);
tl4 <- gsub(' ', '', tl4)
name <- XML::xpathSApply(tl3,'//td[@bgcolor="#6e7b8a"]',XML::xmlValue)
tl4 <- tl4[-c(1,length(tl4))]
tl5 <- XML::xpathSApply(tl3,"//table",XML::xmlValue)
tl6 <- gsub(' ', '', tl5)
tl7 <- strsplit(tl6, '\\n')
tl8 <- lapply(tl7, FUN = unlist)
}
for(j in 1:length(st4)) { # length(st4)
url.lib <- paste(c(tolower(state.name[i]),
'.public-libraries.org/library/',
datasets::state.abb[i],'/',
st4[j],
'.html'),
collapse = '')
dl1 <- httr::GET(url.lib)
dl2 <- httr::content(dl1, encoding = 'UTF-8')
doc.lib = XML::htmlTreeParse(dl2, useInternalNodes=T)
llib <- XML::xpathSApply(doc.lib,"//a",XML::xmlValue)
llib2 <- llib[-c(1:9)]
lll <- length(llib2)
llib3 <- llib2[-c((lll-7):lll)]
llib4 <- gsub('/', '', llib3) # Library Name
llib5 <- gsub(' ', '', llib4)
llib5 <- gsub("'", '', llib5)
llib5 <- gsub('\\.', '', llib5)
llib5 <- gsub('-', '', llib5)
for(l in 1:length(llib5)) {
the.lib <- paste(c('library.public-libraries.org/',
state.name[i],'/',
st4[j],'/',llib5[l],'.html'),
collapse = '')
tl1 <- httr::GET(the.lib)
tl2 <- httr::content(tl1, encoding = 'UTF-8')
tl3 <- XML::htmlTreeParse(tl2, useInternalNodes=T)
tl4 <- XML::xpathSApply(tl3,'//td[@background="http://www.public-libraries.org/images/back.gif"]',XML::xmlValue); tl4 <- gsub(' ', '', tl4)
name <- XML::xpathSApply(tl3,'//td[@bgcolor="#6e7b8a"]',XML::xmlValue)
tl4 <- tl4[-c(1,length(tl4))]
tl5 <- XML::xpathSApply(tl3,"//table",XML::xmlValue)
tl6 <- gsub(' ', '', tl5)
tl7 <- strsplit(tl6, '\\n')
tl8 <- lapply(tl7, FUN = unlist)
if(length(tl8) < 30) {
little <- data.frame(library = llib4[l],
City = st3[j],
State = state.name[i])
smalls <- plyr::rbind.fill(smalls, little)
} else {
tl9 <- unlist(lapply(tl8, FUN = function(x) sum(x%in%lapply(tl4, FUN = function(x) x))))
tl0 <- which(tl9==1)+1
tl8[tl0[4]][[1]][1] <- paste(c(tl8[tl0[4]][[1]][1:2]), collapse = ' ')
tl8[tl0[4]][[1]] <- tl8[tl0[4]][[1]][-2]
tl8[tl0[5]][[1]] <- tl8[tl0[5]][[1]][-c(1,12)]
tla <- lapply(tl8[tl0], FUN=function(x) matrix(unlist(x), ncol=2, byrow = T))
tlb <- lapply(tla, FUN = function(x) {matrix(x, ncol = 2, byrow = F)})
tlc <- rbind(tlb[[1]], tlb[[2]], tlb[[3]],tlb[[4]], tlb[[5]], tlb[[6]])
tld <- t(tlc)
colnames(tld) <- c('Relationship','Operated','Area','Population','Visits','Hours','Libraries', 'Branches', 'Bookmobiles','Books','Audio','Video','Subscriptions','Circulation','Loans Provided','Loans Received','ALA Librarians','FT Librarians','Other Full Time','Total Full Time','Children Circulation','Children Attendance','Income-local','Income-state','Income-federal','Income-other','Income-total','Salaries','Benefits','Expenditures-staff','Expenditures-collection','Expenditures-operating-other','Expenditures-operating-total','Expenditures-capital', 'Expenditures-electronic materials','Expenditures-electronic access','Materials electronic format','Access to electronic services','Access to the internet', 'Staff terminals', 'Public Terminals','Weekly Elect Resources')
tle <- t(data.frame(tld[-1,], stringsAsFactors = F))
tlf <- gsub('\\$', '',tle)
tlf <- gsub(',', '',tlf)
tlf[,c(4:37,40:42)] <- as.numeric(tlf[,c(4:37,40:42)])
tlg <- data.frame(llib4[l], st3[j],state.name[i],tlf, stringsAsFactors = F)
colnames(tlg) <- c('Library', 'City', 'State', colnames(tlf))
mat <- plyr::rbind.fill(mat, tlg)
}
}
}
mat[,c(7:40,43:45)] <- lapply(X = mat[,c(7:40,43:45)], FUN = function(x) { as.numeric(x) })
zzz <- list()
zzz$libs <- mat
zzz$excluded <- smalls
txt.name <- paste(c('inst/','extdata3/',state.abb[i],'_Libs2.txt'),collapse = '')
write.table(zzz$libs, file = txt.name, row.names = F)
rda.name <- paste(c(state.abb[i],'_Libs2'),collapse = '')
assign(rda.name, zzz$libs, envir = environment())
save(list = rda.name,
file = paste(c("data/",tolower(rda.name),'.rda'), collapse = ''),
compress = 'xz',
compression_level = 9)
invisible(zzz)
}
|
fea17b1e920f5f440988b19bb63b0e42c7aae3a0
|
ff005c8dbf2731fe08de8577dccb306b0eb01590
|
/movie_critics/moduleChangeTheme.R
|
c87482b7f8ed0085a773b869f3cf112f14fe3ecf
|
[
"Apache-2.0"
] |
permissive
|
tim-heuwinkel/r-visualizations
|
706065409448a5fa579795a7591c5353a5dfeb39
|
53439ffdf5c9ca7c77e873ce214e0a8d1f23b223
|
refs/heads/main
| 2023-07-31T18:16:29.984495
| 2021-09-11T23:27:46
| 2021-09-11T23:27:46
| 393,496,475
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,126
|
r
|
moduleChangeTheme.R
|
library(dashboardthemes)
library(shiny)
library(shinydashboard)
# Code below taken from: https://github.com/nik01010/dashboardThemeSwitcher
# Ui functions ------------------------------------------------------------
uiChangeThemeDropdown <- function(dropDownLabel = "Change Theme", defaultTheme = "grey_dark")
{
changeThemeChoices <- c(
"Grey dark" = "grey_dark",
"Purple gradient" = "purple_gradient"
)
ns <- NS("moduleChangeTheme")
dropdown <- tagList(
selectizeInput(
inputId = ns("dbxChangeTheme"),
label = dropDownLabel,
choices = changeThemeChoices,
selected = defaultTheme
)
)
return(dropdown)
}
uiChangeThemeOutput <- function()
{
ns <- NS("moduleChangeTheme")
themeOutput <- tagList(
uiOutput(ns("uiChangeTheme"))
)
return(themeOutput)
}
# Server functions --------------------------------------------------------
serverChangeTheme <- function(input, output, session)
{
observeEvent(
input$dbxChangeTheme,
{
output$uiChangeTheme <- renderUI({
shinyDashboardThemes(theme = input$dbxChangeTheme)
})
}
)
}
|
9817723555d710e52bcdc04d031564883408dac3
|
c508332ca0b0f2c8c02ae1cf83ccf34b8e08c9d4
|
/GameDay2/helpers.R
|
3efbc30a12562cb3a4270bb4734af5219f289da6
|
[] |
no_license
|
dnordgren/KiattenMittons
|
5d9d932ef91a85bc150bc57b0288c29d4b553fb3
|
647462f0f39fa1d39d7664d4006a0112b7bc6ad6
|
refs/heads/master
| 2021-03-27T15:43:10.102770
| 2015-04-28T00:44:23
| 2015-04-28T00:44:23
| 30,683,866
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 706
|
r
|
helpers.R
|
library(data.table)
library(xlsx)
get_data <- (function() {
source('app_config.R', local = T)
data_dir <- get_config('data_dir')
function(fname, rank_flip=F) {
vote_data <- fread(paste0(data_dir, '/', fname))
if(rank_flip) {
vote_cols <- colnames(vote_data)[-1]
n <- nrow(vote_data)
vote_data[,(vote_cols):=lapply(.SD, function(x){n - x}), .SDcols=vote_cols]
}
setkey(vote_data, 'movie')
vote_data
}
})()
to_excel <- function(round, movies) {
source('app_config.R', local=T)
team_name <- get_config('team_name')
filename <- sprintf('Round%s_%s.xlsx', round, team_name)
write.xlsx(x = movies, file = filename,
sheetName = sprintf('Round%s', round), row.names = FALSE)
}
|
5cf72921c29a70268c75762e3760a55743757f2e
|
9157f22cbe21c896b7a69d74504f70ea6e365305
|
/man/buy.Rd
|
c2b40514c16bd7c56bd3af7b8f65a9f179d910b4
|
[
"LicenseRef-scancode-warranty-disclaimer",
"MIT"
] |
permissive
|
weigao10/keyboardpadpls
|
e746872a73afa429e5907851e87b63fd8d3f47c1
|
48b8fd885120c9ea000f11c96260cbce3bac638d
|
refs/heads/master
| 2023-03-17T14:53:44.525443
| 2020-08-11T23:53:57
| 2020-08-11T23:53:57
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 890
|
rd
|
buy.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/nom.R
\name{buy}
\alias{buy}
\title{Click the earliest available time}
\usage{
buy(n_tries = 100, timeout_after = 30 * 60, end_earlier = TRUE, sleep_time = 1)
}
\arguments{
\item{n_tries}{Number of times to try refreshing before giving up.}
\item{timeout_after}{Number of seconds to try refreshing before giving up.}
\item{end_earlier}{If true, the loop will end after either \code{n_tries} is reached or \code{timeout_after} is reached. If false, both conditions must be met.}
\item{sleep_time}{Average number of seconds to sleep in between refreshes. Exact sleep time is randomly chosen from a uniform distribution of \code{sleep_time - sleep_time/2} to \code{sleep_time + sleep_time/2}.}
}
\value{
}
\description{
If no times are available, keep refreshing.
}
\examples{
\dontrun{
buy(sleep_time = 2)
}
}
|
48c70d490ee9987135573e6fbf661c430ca423fd
|
9bdef83f28b070321ba27709d2c7ec028474b5c3
|
/R/visualization/plot3D.R
|
4b4d12037d26a981c3b6edb4815baae3638bad48
|
[] |
no_license
|
antagomir/scripts
|
8e39ce00521792aca1a8169bfda0fc744d78c285
|
c0833f15c9ae35b1fd8b215e050d51475862846f
|
refs/heads/master
| 2023-08-10T13:33:30.093782
| 2023-05-29T08:19:56
| 2023-05-29T08:19:56
| 7,307,443
| 10
| 15
| null | 2023-07-19T12:36:45
| 2012-12-24T13:17:03
|
HTML
|
UTF-8
|
R
| false
| false
| 1,016
|
r
|
plot3D.R
|
#https://stat.ethz.ch/pipermail/r-help/2008-May/161202.html
rgl.plot3d<-function(z, x, y, cols="red",axes=T,new=T)
{xr<-range(x)
x01<-(x-xr[1])/(xr[2]-xr[1])
yr<-range(y)
y01<-(y-yr[1])/(yr[2]-yr[1])
zr<-range(z)
z01<-(z-zr[1])/(zr[2]-zr[1])
if(new) rgl.clear()
if(axes)
{xlab<-pretty(x)
ylab<-pretty(y)
zlab<-pretty(z)
xat<-(xlab-xr[1])/(xr[2]-xr[1])
yat<-(ylab-yr[1])/(yr[2]-yr[1])
zat<-(zlab-zr[1])/(zr[2]-zr[1])
rgl.lines(c(0,1.1),0,0)
rgl.lines(0,c(0,1.1),0)
rgl.lines(0,0,c(0,1.1))
rgl.texts(xat,-.05,-.05,xlab)
rgl.texts(-.05,yat,-.05,ylab)
rgl.texts(-.05,-.05,zat,zlab)
rgl.texts(c(0.5,-.15,-.15),c(-.15,.5,-.15),c(-.15,-.15,.5),
c(deparse(substitute(x)),deparse(substitute(y)),deparse(substitute(z))))
}
rgl.spheres(x01,y01,z01,.01,color=cols)
}
#and here is how you call it
library(rgl)
data(iris)
iris.pc<-prcomp(iris[,1:4],scale=T)
rgl.plot3d(iris.pc$x[,1],iris.pc$x[,2],iris.pc$x[,3])
# different colors
rgl.plot3d(iris.pc$x[,1],iris.pc$x[,2],iris.pc$x[,3],col=unclass(iris[,5])+1)
|
889112f318e81e1b90d78857cd6298f67c3f4175
|
7756510ce32073ce2ba906ee12b6d93b7383f475
|
/WindRose - FsaB.R
|
5f524b8c454e348fd760b4ac74cc9638fc04693e
|
[] |
no_license
|
stringstar315/EPD_R
|
313bda5e1e8c890c2b02d1fb8a245a3e1635813e
|
f06e32dcb7ec086f198eddd98975227394b7e909
|
refs/heads/master
| 2021-01-18T18:17:57.772178
| 2016-09-19T10:05:29
| 2016-09-19T10:05:29
| 61,719,526
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,306
|
r
|
WindRose - FsaB.R
|
## plot and compared the windRose of SO2
## 1. 2014.7 to 2015.6
## 2. 2015.7 to 2016.6
airYear = 2014:2016
pollutant.pick = c("date", "site", "so2_ug")
logEnable = T
source('default.R')
wStation.pick = c("GI")
w1 = LoadWind(dir_Wind, dir_Master, 2011:2016, wStation.pick)
## Match airData and AQHIdata and windData
m2 = merge.data.frame(m1, w1[[1]], by = "date", all.x = T)
for (j in 2:length(wStation.pick)){
m2 = merge.data.frame(m2, w1[[j]], by = "date", all.x = T)
}
glimpse(m2)
m3 = m2 %>% filter(site == 'KC_HK')
names(m3)[6] = 'ws'
names(m3)[7] = 'wd'
colPlot = c('darkgreen', 'palegreen','#FFFEE0', 'orange','red')
fontsizePlot = 24
yearPlot = 2012:2014
brkVec = c(seq(0,60,5))
legTxt = "SO2 (ug/m3)"
fontsizePlot = 24
#plotData = selectByDate(m3, year = 2010:2014, month = 7:9)
plotData = selectByDate(m3, start = '1/7/2014', end = '30/6/2015')
write.csv(plotData, '2014')
p1 = polarFreq(plotData, grid = 5, annotate = F,
pollutant = 'so2_ug', offset = 0, breaks = brkVec,
angle = 10, statistic = "mean", cols = colPlot, min.bin = 1,
par.settings=list(fontsize=list(text=fontsizePlot)), mis.col = "transparent",
ws.int = 2)
wdLIM = 24
p1$plot$y.limits = c(-wdLIM, wdLIM)
p1$plot$x.limits = c(-wdLIM, wdLIM)
p1$plot$aspect.ratio = 1
p1$plot$legend$right$args$key$footer = legTxt
p1$plot$legend$right$args$key$header = ""
p1$plot
xpos = wdLIM*0.7
ypos = xpos*0.75
trellis.last.object() + layer(ltext(xpos, ypos, srt = 45,
expression(Wind ~ Speed ~ "("~m~s^-1~")" ), cex = 0.65))
#####
plotData = selectByDate(m3, start = '1/7/2015', end = '30/6/2016')
p1 = polarFreq(plotData, grid = 5, annotate = F,
pollutant = 'so2_ug', offset = 0, breaks = brkVec,
angle = 10, statistic = "mean", cols = colPlot, min.bin = 1,
par.settings=list(fontsize=list(text=fontsizePlot)), mis.col = "transparent",
ws.int = 2)
wdLIM = 24
p1$plot$y.limits = c(-wdLIM, wdLIM)
p1$plot$x.limits = c(-wdLIM, wdLIM)
p1$plot$aspect.ratio = 1
p1$plot$legend$right$args$key$footer = legTxt
p1$plot$legend$right$args$key$header = ""
p1$plot
xpos = wdLIM*0.7
ypos = xpos*0.75
trellis.last.object() + layer(ltext(xpos, ypos, srt = 45,
expression(Wind ~ Speed ~ "("~m~s^-1~")" ), cex = 0.65))
#########end######
|
16905c97357623c00fd941534b3743c9747f0b0f
|
3ff0771c80378e261b9f58f9dcefb3b8b892b253
|
/man/pseudoLog10.Rd
|
eeb7ddc3c586c9343899e095d6622b9fcacfd50d
|
[] |
no_license
|
const-ae/proDD_old
|
4f008542b7aeeca8ec05f0c6f14abe4ecc50a813
|
4176aea1b337d46299cddd5f5e38cc9642e171f8
|
refs/heads/master
| 2021-08-16T22:06:09.033883
| 2017-11-20T11:23:11
| 2017-11-20T11:23:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 618
|
rd
|
pseudoLog10.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/helper_functions.R
\name{pseudoLog10}
\alias{pseudoLog10}
\title{Function that will be transformed with a function
that is very close to log10 for values between 1-infinity, but has
the feature that pseudoLog10(0) = 0 and pseudoLog10(- x) = - pseudoLog10(x)}
\usage{
pseudoLog10(x)
}
\arguments{
\item{x}{a numeric vector}
}
\description{
Function that will be transformed with a function
that is very close to log10 for values between 1-infinity, but has
the feature that pseudoLog10(0) = 0 and pseudoLog10(- x) = - pseudoLog10(x)
}
|
65d024461bb6e21ef80f647b3ae4824b49d471a2
|
8853ae897c2737ca95401f215ba9b8d505c81b0a
|
/project/code/analysis/messingaround_tm.R
|
b1097995d6ae86f7f02e02675dbc93731df8e327
|
[
"BSD-2-Clause"
] |
permissive
|
cycomachead/info290
|
fe563cb9b12f477c1176b68fa11d778893c993c1
|
694361cfa755daec24c773e15d5bc965411d4caf
|
refs/heads/master
| 2020-07-04T15:22:00.330663
| 2015-05-14T12:27:24
| 2015-05-14T12:27:24
| 30,524,205
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,614
|
r
|
messingaround_tm.R
|
library('topicmodels')
library('tm')
library('SnowballC')
data("AssociatedPress", package = "topicmodels")
ctm <- CTM(AssociatedPress[1:20,], k = 2)
AssociatedPress[1:20,]
class (topic.model)
load(file.path(input, all.beers.names.only[1]))
current.words.l8 <- Reduce(function(agg, df) union(agg, df$word[df$count > 1]), word.freqs, NULL) # count > 1 is a simple (though probably overly aggressive) way to remove weird words
write(words_everything, file="words_everything.txt")
textdoc_data=read.table("words_everything.txt")
termFreq(textdoc_data)
class(textdoc_data)
corpus <- Corpus(VectorSource(textdoc_data))
JSS_dtm <- DocumentTermMatrix(corpus,control = list(stemming = TRUE,
stopwords = TRUE, minWordLength = 3,removeNumbers = TRUE, removePunctuation = TRUE))
dim (JSS_dtm)
class(JSS_dtm)
#[1] "DocumentTermMatrix" "simple_triplet_matrix"
CTM = CTM(JSS_dtm, k = 30, control = list(seed = 2010,
var = list(tol = 10^-4), em = list(tol = 10^-3)))
k <- 104
SEED <- 2010
jss_TM <- list(
VEM = LDA(JSS_dtm, k = k, control = list(seed = SEED)),
VEM_fixed = LDA(JSS_dtm, k = k, control = list(estimate.alpha = FALSE,
seed = SEED)),
Gibbs = LDA(JSS_dtm, k = k, method = "Gibbs", control = list(
seed = SEED, burnin = 1000, thin = 100, iter = 1000)),
CTM = CTM(JSS_dtm, k = k, control = list(seed = SEED,
var = list(tol = 10^-4), em = list(tol = 10^-3))))
sapply(jss_TM[1:2], slot, "alpha")
# VEM VEM_fixed
#625.5746889 0.4807692
#These are the alpha scores; the lower the alpha, the higher is the percentage of
#documents which are assigned to one single topic with a high probability
sapply(jss_TM, function(x) mean(apply(posterior(x)$topics,
1, function(z) - sum(z * log(z)))))
#VEM VEM_fixed Gibbs CTM
#4.644391 4.644383 4.519253 4.644379
#Higher values indicate topic distributions are spread out more evenly
Topic <- topics(jss_TM[["VEM"]], 1)
#estimated topic
#The five most frequent terms for each topic are obtained by
Terms <- terms(jss_TM[["VEM"]], 5)
Terms[, 1:5]
#Topic 1 Topic 2 Topic 3 Topic 4 Topic 5
#[1,] "abv" "overal" "overal" "this" "overal"
#[2,] "overal" "abv" "pour" "abv" "this"
#[3,] "drinkabl" "drinkabl" "pack" "pour" "pour"
#[4,] "pack" "pack" "bottl" "drinkabl" "this"
#[5,] "bottl" "pour" "drinkabl" "bottl" "drinkabl"
|
8e8a12952b357f76e4c37d6d7f4c34301b08c5ac
|
adae033f51b175132ddb350b136b0636b54159b2
|
/plot4.R
|
7adc08a5820bc7d04f1a6abd81dd4a0b515c482a
|
[] |
no_license
|
jvondie/ExData_Plotting1
|
cc29e8d5729fc7e8171523ae38fefed4e58f924d
|
4b1853ba4f89921740e459fbb34dd40b8e88f3b0
|
refs/heads/master
| 2020-12-26T03:00:00.347597
| 2016-03-14T20:35:03
| 2016-03-14T20:35:03
| 53,168,233
| 0
| 0
| null | 2016-03-04T22:00:17
| 2016-03-04T22:00:17
| null |
UTF-8
|
R
| false
| false
| 1,567
|
r
|
plot4.R
|
if(!file.exists("household_power_consumption.txt")) {
temp <- tempfile()
download.file("http://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip",temp)
file <- unzip(temp)
unlink(temp)
}
HHPWR <- read.table("household_power_consumption.txt", sep=";", header=T)
twoday <- HHPWR[(HHPWR$Date == "1/2/2007")| (HHPWR$Date=="2/2/2007"),]
twoday$Date <- as.Date(twoday$Date, format="%d/%m/%Y")
twoday <- transform(twoday, timestamp=as.POSIXct(paste(Date, Time)), "%d/%m/%Y %H:%M:%S")
twoday$Sub_metering_1 <- as.numeric(as.character(twoday$Sub_metering_1))
twoday$Sub_metering_2 <- as.numeric(as.character(twoday$Sub_metering_2))
twoday$Sub_metering_3 <- as.numeric(as.character(twoday$Sub_metering_3))
par(mfrow=c(2,2))
plot(twoday$timestamp, as.numeric(as.character(twoday$Global_active_power)),type="l", xlab = "", ylab ="GLobal Active Power (kilowatts)")
plot(twoday$timestamp, as.numeric(as.character(twoday$Voltage)),type="l", xlab = "datetime", ylab ="Voltage")
plot(twoday$timestamp,twoday$Sub_metering_1, type="l", xlab="", ylab="Energy sub metering")
lines(twoday$timestamp,twoday$Sub_metering_2,col="red")
lines(twoday$timestamp,twoday$Sub_metering_3,col="blue")
legend("topright", col=c("black","red","blue"), c("Sub_metering_1 ","Sub_metering_2 ", "Sub_metering_3 "),lty=c(1,1), lwd=c(1,1))
plot(twoday$timestamp,twoday$Global_reactive_power, type="l", xlab="datetime", ylab="Global_reactive_power")
dev.copy(png, file="plot4.png", width=480, height = 480)
dev.off()
|
ec4a8a93f619de5bf816f0b9f1d46c1f93f94379
|
da725622bc962b639e1eb6df535b433e4366bcc5
|
/shinyOccupationAndEducation/ui.R
|
500aecfa98f57ba69d2465aae3f8e38e340c4d83
|
[] |
no_license
|
bekahdevore/rKW
|
5649a24e803b88aa51a3e64020b232a23bd459fa
|
970dcf8dc93d4ec0e5e6a79552e27ddc0f850b91
|
refs/heads/master
| 2020-04-15T12:41:49.567456
| 2017-07-25T16:29:31
| 2017-07-25T16:29:31
| 63,880,311
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 421
|
r
|
ui.R
|
# This is the user-interface definition of a Shiny web application.
# You can find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com
#
library(shiny)
shinyUI(fluidPage(
# Application title
titlePanel("Occupation and Education Louisville MSA"),
# Show a plot of the generated distribution
mainPanel(
dataTableOutput("dataTable")
)
)
)
|
e30ba71390dbfd0dbb769ca7519c4809515a1942
|
0f0c82255b1fe732c2fccdf5d0132c7b6d13603b
|
/plot3.R
|
0797d041906955a3526daf1b2862e083a608a767
|
[] |
no_license
|
dutchminator/ExData_Plotting1
|
833637532c674e4f7610565bb105791af84ca55a
|
e1b3ecd61357aef46222338b64424c1debb12c2a
|
refs/heads/master
| 2021-01-12T12:32:40.584602
| 2016-11-01T15:53:52
| 2016-11-01T15:53:52
| 72,544,217
| 0
| 0
| null | 2016-11-01T14:30:08
| 2016-11-01T14:30:08
| null |
UTF-8
|
R
| false
| false
| 1,408
|
r
|
plot3.R
|
### Preparation
setwd("~/ExploratoryDataAnalysis/Exploratory Data Analysis/Week 1")
library(dplyr)
library(lubridate)
### 0. Properly read the source files
url <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
download.file(url, destfile = "Dataset.zip")
unzip("Dataset.zip")
data <- tbl_df(read.csv("household_power_consumption.txt", header=TRUE, sep=";"))
### 1. Subset to only two days: 2007-02-01 and 2007-02-02
# Transform the Date column into proper date type
data <- mutate(data, Date=dmy(Date))
# Filter to the selected two-day period
period <- interval(ymd("2007-02-01"),ymd("2007-02-02"))
data <- filter(data, Date %within% period)
rm(period) # Cleanup
# Give columns a proper data type
data <- mutate_each(data, funs(as.character), -Date) %>% mutate_each(funs(as.numeric), -Date, -Time)
# Introduce combined datetime column
data <- mutate(data, datetime=ymd_hms(paste(Date, Time)))
### Plots
# Plot3
with(data, plot(datetime, Sub_metering_1, type="l", xlab="", ylab="Energy sub metering", col="black"))
with(data, lines(datetime, Sub_metering_2, col="red"))
with(data, lines(datetime, Sub_metering_3, col="blue"))
legend("topright", lty = 1, lwd = 1, legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), col=c("black", "red", "blue"))
dev.copy(png, file="plot3.png") # write to png, default is 480x480 on white background
dev.off()
|
fa363cf5d25673abc224de01c63b0402f30adfc4
|
260a135f52304bd38e787ac89da3caad9ebbbb30
|
/tests/test-transpose.R
|
bb5b910dab2386afc8982b82d0a0ecde493bd81f
|
[] |
no_license
|
bryanyang0528/FeatureHashing
|
1acc0cc02b5a6646b50aa2628d099292fd6b09a8
|
86d13a65279f337007ea0552c6b91dc808d57099
|
refs/heads/master
| 2021-01-16T22:39:08.515892
| 2015-02-07T16:45:47
| 2015-02-07T16:45:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 347
|
r
|
test-transpose.R
|
if (require(RUnit)) {
library(methods)
library(FeatureHashing)
m1 <- as(hashed.model.matrix(~ ., CO2, 2^5), "dgCMatrix")
m2 <- as(hashed.model.matrix(~ ., CO2, 2^5, F), "dgCMatrix")
m1.1 <- as(m1, "matrix")
m2.1 <- as(m2, "matrix")
checkTrue(all(t(m1.1) == m2.1),
"The transpose argument produces incorrect result")
}
|
db32a2a4c440ac99fda441209345cf163f4ac7aa
|
a1e0d63b32fc0c8ffed6beb6f9b9bb84ca72849f
|
/R/sizelimit_route.R
|
9730d9001fafc6ab1ed74d54df13424c06c6a76e
|
[] |
no_license
|
cran/routr
|
bdb84c9c56fa6546debbb98061f08fc1f9bdc2fb
|
c828c7b2eeecdd424ffb75847c785389f23d77aa
|
refs/heads/master
| 2022-08-30T18:25:19.148300
| 2022-08-19T12:40:05
| 2022-08-19T12:40:05
| 101,089,466
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,878
|
r
|
sizelimit_route.R
|
#' Limit the size of requests
#'
#' This route is meant for being called prior to retrieving of the request body.
#' It inspects the `Content-Length` header and determines if the request should
#' be allowed to proceed. The limit can be made variable by supplying a function
#' to the `limit` argument returning a numeric. If the `Content-Length` header
#' is missing and the limit is not `Inf` the response will be set to
#' `411 - Length Required`, If the header exists but exceeds the limit the
#' response will be set to `413 - Request Entity Too Large`. Otherwise the route
#' will return `TRUE` and leave the response unchanged.
#'
#' @param limit Either a numeric or a function returning a numeric when called
#' with the request
#'
#' @return `TRUE` if the request are allowed to proceed, or `FALSE` if it should
#' be terminated
#'
#' @importFrom assertthat assert_that has_args
#' @export
#'
#' @family Route constructors
#'
#' @examples
#' limit_route <- sizelimit_route() # Default 5Mb limit
#' rook <- fiery::fake_request('http://www.example.com', 'post',
#' headers = list(Content_Length = 30*1024^2))
#' req <- reqres::Request$new(rook)
#' limit_route$dispatch(req)
#' req$respond()
#'
sizelimit_route <- function(limit = 5*1024^2) {
assert_that(
is.numeric(limit) ||
(is.function(limit) && has_args(limit, 'request', TRUE))
)
route <- Route$new()
route$add_handler('all', '*', function(request, response, keys, ...) {
if (is.function(limit)) {
limit <- limit(request)
assert_that(is.numeric(limit))
}
req_length <- request$get_header('Content-Length')
if (is.null(req_length) && limit < Inf) {
response$status_with_text(411L)
FALSE
} else if (as.numeric(req_length) > limit) {
response$status_with_text(413L)
FALSE
} else {
TRUE
}
})
route
}
|
84e2372ba7c6910e9014c502758b17c8fc78c03d
|
2e5bcb3c8028ea4bd4735c4856fef7d6e46b5a89
|
/man/AffineSnpPlm.Rd
|
766909f40b602bf7b9dc8592f6613df9312de759
|
[] |
no_license
|
HenrikBengtsson/aroma.affymetrix
|
a185d1ef3fb2d9ee233845c0ae04736542bb277d
|
b6bf76f3bb49474428d0bf5b627f5a17101fd2ed
|
refs/heads/master
| 2023-04-09T13:18:19.693935
| 2022-07-18T10:52:06
| 2022-07-18T10:52:06
| 20,847,056
| 9
| 4
| null | 2018-04-06T22:26:33
| 2014-06-15T03:10:59
|
R
|
UTF-8
|
R
| false
| false
| 3,247
|
rd
|
AffineSnpPlm.Rd
|
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Do not modify this file since it was automatically generated from:
%
% AffineSnpPlm.R
%
% by the Rdoc compiler part of the R.oo package.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\name{AffineSnpPlm}
\docType{class}
\alias{AffineSnpPlm}
\title{The AffineSnpPlm class}
\description{
Package: aroma.affymetrix \cr
\bold{Class AffineSnpPlm}\cr
\code{\link[R.oo]{Object}}\cr
\code{~~|}\cr
\code{~~+--}\code{\link[aroma.core]{ParametersInterface}}\cr
\code{~~~~~~~|}\cr
\code{~~~~~~~+--}\code{\link[aroma.affymetrix]{Model}}\cr
\code{~~~~~~~~~~~~|}\cr
\code{~~~~~~~~~~~~+--}\code{\link[aroma.affymetrix]{UnitModel}}\cr
\code{~~~~~~~~~~~~~~~~~|}\cr
\code{~~~~~~~~~~~~~~~~~+--}\code{\link[aroma.affymetrix]{MultiArrayUnitModel}}\cr
\code{~~~~~~~~~~~~~~~~~~~~~~|}\cr
\code{~~~~~~~~~~~~~~~~~~~~~~+--}\code{\link[aroma.affymetrix]{ProbeLevelModel}}\cr
\code{~~~~~~~~~~~~~~~~~~~~~~~~~~~|}\cr
\code{~~~~~~~~~~~~~~~~~~~~~~~~~~~+--}\code{\link[aroma.affymetrix]{AffinePlm}}\cr
\code{~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~|}\cr
\code{~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~+--}\code{\link[aroma.affymetrix]{SnpPlm}}\cr
\code{~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~|}\cr
\code{~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~+--}\emph{\code{AffineSnpPlm}}\cr
\bold{Directly known subclasses:}\cr
\emph{\link[aroma.affymetrix]{AffineCnPlm}}\cr
public abstract static class \bold{AffineSnpPlm}\cr
extends \link[aroma.affymetrix]{SnpPlm}\cr
}
\usage{
AffineSnpPlm(..., mergeStrands=FALSE)
}
\arguments{
\item{...}{Arguments passed to \code{\link{AffinePlm}}.}
\item{mergeStrands}{If \code{\link[base:logical]{TRUE}}, the sense and the anti-sense strands are
fitted together, otherwise separately.}
}
\section{Fields and Methods}{
\bold{Methods:}\cr
\emph{No methods defined}.
\bold{Methods inherited from SnpPlm}:\cr
getCellIndices, getChipEffectSet, getMergeStrands, getParameters, getProbeAffinityFile, setMergeStrands
\bold{Methods inherited from AffinePlm}:\cr
getAsteriskTags, getFitUnitGroupFunction, getProbeAffinityFile
\bold{Methods inherited from ProbeLevelModel}:\cr
calculateResidualSet, calculateWeights, fit, getAsteriskTags, getCalculateResidualsFunction, getChipEffectSet, getProbeAffinityFile, getResidualSet, getRootPath, getWeightsSet
\bold{Methods inherited from MultiArrayUnitModel}:\cr
getListOfPriors, setListOfPriors, validate
\bold{Methods inherited from UnitModel}:\cr
findUnitsTodo, getAsteriskTags, getFitSingleCellUnitFunction, getParameters
\bold{Methods inherited from Model}:\cr
as.character, fit, getAlias, getAsteriskTags, getDataSet, getFullName, getName, getPath, getRootPath, getTags, setAlias, setTags
\bold{Methods inherited from ParametersInterface}:\cr
getParameterSets, getParameters, getParametersAsString
\bold{Methods inherited from Object}:\cr
$, $<-, [[, [[<-, as.character, attach, attachLocally, clearCache, clearLookupCache, clone, detach, equals, extend, finalize, getEnvironment, getFieldModifier, getFieldModifiers, getFields, getInstantiationTime, getStaticInstance, hasField, hashCode, ll, load, names, objectSize, print, save, asThis
}
\author{Henrik Bengtsson}
\keyword{classes}
|
e25a749fbd295ce9e1535558ae4581b7dc822794
|
3dceafc09ae75114e193d24a7af14abaf684e84f
|
/best.R
|
21d36642ae39100f388685d4730d48aa5d308fea
|
[] |
no_license
|
codeBehindMe/ProgAssig3
|
298aabdf1193f9d99743d304a7a0487f57b99756
|
d4a6f91b98e08025d3c3aa154d6ca19070efed88
|
refs/heads/master
| 2021-01-15T11:18:35.120812
| 2015-05-20T06:17:16
| 2015-05-20T06:17:16
| 35,912,466
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,515
|
r
|
best.R
|
#this function is used to find the best hospital in the state for a particular outcome
# this function takes 2 arguments, a 2letter abbriviated state and a outcome name.
best <- function(state,outcome){
# we first need to set the working directory
setwd("C:/Users/HAZARDAZ/Documents/Data Science/Assignment 3/rprog-data-ProgAssignment3-data")
# read the data into a data frame (all columns stored as character)
df_outcome <- read.csv("outcome-of-care-measures.csv", colClasses="character")
## now we need to do the checks before we move on with the ranking
# check that the passed in variables are valid
#iterate through the number of rows
t_state <- FALSE #this temp variable stores if the state is valid or not
for(i in 1:nrow(df_outcome)){
if(df_outcome[i,7] == state){
t_state <- TRUE
break
}
}
if (t_state == FALSE) stop("invalid state") # check the condition and break if necessary
#store the column numbers and names in a list
c_outcomes <- list("heart attack"=11,"heart failure"=17,"pneumonia"=23)
if(outcome %in% names(c_outcomes) == FALSE) stop("invalid outcome") #check the condition and break if necessary
# now that we have passed the checks do the body of the function
#lets extract the columns of interest from the df_outcome (namely hospital.name and the selected outcome col)
v_colOfInt <- c_outcomes[[outcome]] #get the col numer of the outcome of intrest
#subset the values of the cols and rows of interest in the a df
df_outOfInt <- subset(df_outcome, State == state,select=c(2,7,v_colOfInt))
#cast the mortality rate column as numeric
df_outOfInt[,3] <- as.numeric(df_outOfInt[,3])
#lets recover only complete cases
df_outOfInt <- df_outOfInt[complete.cases(df_outOfInt),]
#order the data frame by ascending order for death rate and hospital.name
df_outOfInt <- df_outOfInt[order(df_outOfInt[,3],df_outOfInt[,1]),]
top <- as.vector(df_outOfInt[1,1])
#set back to the rpog wd so that dont have to keep setting it to submit
setwd("C:/Users/HAZARDAZ/Documents/Data Science/Assignment 3/rprog-data-ProgAssignment3-data/ProgAssig3")
return(top)
}
|
45a9a3dc2c951cae5612f355353850161f1d6ada
|
4b1ead961f8ff8cfe1860f19e1389ac6fcbcf85c
|
/man/is.paths.Rd
|
bd693c529e2965b504d69e3131e9ffd3c363a751
|
[] |
no_license
|
bsmity13/ADePTR
|
b301275e04ae2aa2f1372cdf73e6acd1d3b20a2b
|
a843c6bc7359dd5349c852ffc6cfbb9017be453b
|
refs/heads/master
| 2020-04-19T06:25:25.175586
| 2019-11-08T17:44:56
| 2019-11-08T17:44:56
| 168,017,224
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 359
|
rd
|
is.paths.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/s5_interpolate_animate.R
\name{is.paths}
\alias{is.paths}
\title{Check if object is of class \code{*_paths}.}
\usage{
is.paths(x)
}
\description{
Convenience function that checks is object is of class \code{*_paths}.
Currently must be either \code{str_paths} or \code{lc_paths}.
}
|
1aff57fe4142278828f8b5ee7c71ea626c972cff
|
0dfb4f501eaaf6d78ea7c7b9efd2ef17337b4cc6
|
/Plotting Stuff.R
|
8c7df1701ded00fe9151491dc402d5a0ef584f80
|
[
"BSD-2-Clause"
] |
permissive
|
hiendn/SCOTS-MAMM
|
cc9ceb3e6e30c930c9b8b35155544dd30e7fa49d
|
5f3d8516528a7568ab87ce3b5a422eff6542be38
|
refs/heads/master
| 2020-04-09T13:33:54.110781
| 2016-06-02T05:48:50
| 2016-06-02T05:48:50
| 60,233,700
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,463
|
r
|
Plotting Stuff.R
|
## Copyright Hien Duy Nguyen - University of Queensland 2016/06/02
COUNT <- 0
for (ii in 1:512) {
for (jj in 1:512) {
if (MASK[ii,jj]==1) {
COUNT <- COUNT + 1
while (min(YY[[COUNT]])< -5.5) {
YY[[COUNT]][which(YY[[COUNT]]< -5.5)] <- YY[[COUNT]][sample(which(YY[[COUNT]] >= -5.5),length(which(YY[[COUNT]]< -5.5)))]
}
}
}
}
CLUSTERING <- apply(TAU,1,which.max)
par(mfrow=c(3,6))
for (gg in 1:17)
{
WHICH <- which(CLUSTERING==gg)
HOLDER <- matrix(NA,length(WHICH),481)
for (ii in 1:length(WHICH)) {
HOLDER[ii,] <- YY[[WHICH[ii]]][10:490]
}
MED <- apply(HOLDER,2,quantile,0.5)
UPPER <- apply(HOLDER,2,quantile,0.975)
LOWER <- apply(HOLDER,2,quantile,0.025)
UNLIST <- unlist(HOLDER)
plot(seq(min(UNLIST),max(UNLIST),length.out=500),main=paste('Cluster',gg),ylab='Y',xlab='',type='n')
lines(10:490,MED,col='black')
lines(10:490,UPPER,col='blue')
lines(10:490,LOWER,col='blue')
}
library(fields)
TABLE_PLOT <- matrix(NA,25,20)
for (ii in 1:25) {
for (jj in 1:20) {
if (ii + jj <= 26) {
TABLE_PLOT[ii,jj] <- TABLE[ii,jj]
}
}
}
image.plot(1:25,1:20,TABLE_PLOT,xlab='g',ylab='p',col=tim.colors(10000))
points(17,3,pch=20,col='white',cex=2)
## Put CLUSTER Back Into original Image
COUNT <- 0
SMOOTH <- matrix(NA,512,512)
for (ii in 1:512) {
for (jj in 1:512) {
if (MASK[ii,jj]==1) {
COUNT <- COUNT + 1
SMOOTH[ii,jj] <- YMEAN[COUNT]
}
}
}
image.plot(SMOOTH)
|
39fc98efd94f58fad9603d7e49220e8ce01387ec
|
51ea7fcb2ca10bdf710edf31a1f3be5d17619931
|
/R/test.R
|
bd407a2a9717d50b8b80c44153b850ac50406cc4
|
[] |
no_license
|
jacobkap/crimedatatool_helper
|
534e65182ae69c536938c8552448494b82b421ef
|
ca1ffade6c3f3c1173ed608ef40e4e4c3f729fba
|
refs/heads/master
| 2023-04-08T08:19:53.891205
| 2023-04-04T21:44:49
| 2023-04-04T21:44:49
| 134,924,347
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,084
|
r
|
test.R
|
data <- hate_crimes[hate_crimes$ORI %in% "AK0010100", ]
add_missing_months_for_years_available <- function(data) {
original_years <- unique(lubridate::year(data$year))
data$year <- as.character(data$year)
data <- dummy_rows_missing_years(data, type = "month") %>%
dplyr::mutate(data_year = ymd(year),
data_year = lubridate::year(data_year)) %>%
dplyr::filter(data_year %in% original_years) %>%
dplyr::arrange(desc(year))
data <-
data %>%
dplyr::mutate_at(vars(-one_of("year",
"agency",
"state",
"ORI",
"population",
"data_year")),
make_all_0)
for (selected_year in original_years) {
population_val <- unique(data$population[data$data_year %in% selected_year])
population_val <- population_val[!is.na(population_val)]
data$population[data$data_year %in% selected_year] <- population_val
}
data$data_year <- NULL
data$year <- ymd(data$year)
return(data)
}
z <-
hate_crimes %>%
dplyr::select(year,
matches("total")) %>%
dplyr::group_by(year) %>%
dplyr::summarize_all(sum)
z <- as.data.frame(z)
head(z)
cbind(z$year, z$anti_arab_total)
cbind(z$year, z$anti_transgender_total)
cbind(z$year, z$anti_hispanic_total)
cbind(z$year, z$anti_not_hispanic_total)
cbind(z$year, z$anti_male_homosexual_gay_total)
cbind(z$year, z$anti_female_homosexual_lesbian_total)
cbind(z$year, z$anti_transgender_total)
cbind(z$year, z$anti_bisexual_total)
cbind(z$year, z$anti_gender_non_conforming_total)
cbind(z$year, z$anti_homosexual_gay_and_lesbian_total)
cbind(z$year, z$anti_lesbian_gay_bisexual_or_transgender_mixed_group_lgbt_total)
cbind(z$year,
z$anti_male_homosexual_gay_total,
z$anti_female_homosexual_lesbian_total,
z$anti_homosexual_gay_and_lesbian_total,
z$anti_lesbian_gay_bisexual_or_transgender_mixed_group_lgbt_total)
|
c7be56a59739c47d7ccb356b81d8d548319684c6
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/urltools/examples/param_remove.Rd.R
|
2e542a564102984c312b5051619605bdbe77f38f
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 320
|
r
|
param_remove.Rd.R
|
library(urltools)
### Name: param_remove
### Title: Remove key-value pairs from query strings
### Aliases: param_remove
### ** Examples
# Remove multiple parameters from a URL
param_remove(urls = "https://en.wikipedia.org/wiki/api.php?action=list&type=query&format=json",
keys = c("action","format"))
|
5323f66368e83f77a8b3fc22501920e5a0059c40
|
518edc72134df5ede965e4fc2bfcaa37791456c9
|
/file1.R
|
e8b66683d5ea53667c5882155982b283db22eb03
|
[] |
no_license
|
swati1596/analytics1
|
ea991446df91a3be2ae33c34790cb96ed4bc1efb
|
10065136b83154b77a10a453a85ffb052f8a9352
|
refs/heads/master
| 2020-03-26T16:20:20.053151
| 2018-08-17T11:03:15
| 2018-08-17T11:03:15
| 145,095,199
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 104
|
r
|
file1.R
|
mtcars
git config --global user.email "dash.swati36@gmail.com"
git config --global user.name "swati1596"
|
c7002eaa0c32548c4ae9997dbaa2d1289a65179a
|
01974b6e1b34adc1f2ce9b0530d78dc3def3e553
|
/tests/testthat/test-thisfile.R
|
4f48ab80eb891709b148e24014a2048668b68afe
|
[] |
no_license
|
krlmlr/kimisc
|
e518fcd318ae1a86fa23e2a4713847efbf85f011
|
d85e30b10b5baf411f93ca70dd4210e26010211d
|
refs/heads/master
| 2021-01-23T03:27:30.024493
| 2017-12-18T19:46:27
| 2017-12-18T19:46:27
| 7,799,871
| 16
| 1
| null | 2017-10-16T19:49:11
| 2013-01-24T14:48:17
|
R
|
UTF-8
|
R
| false
| false
| 379
|
r
|
test-thisfile.R
|
context("thisfile")
test_that("thisfile works with source", {
res <- source("scripts/thisfile.R")
expect_true(grepl("thisfile.R$", res$value))
})
test_that("thisfile works with Rscript", {
skip("Doesn't seem to work for R CMD check")
p <- pipe("Rscript scripts/thisfile-cat.R")
on.exit(close(p))
res <- readLines(p)
expect_equal("scripts/thisfile-cat.R", res)
})
|
6afbf0b5d8483b80783bd45c5c17f7b39ff584e6
|
9aafde089eb3d8bba05aec912e61fbd9fb84bd49
|
/codeml_files/newick_trees_processed/1842_37/rinput.R
|
80f2e992e6fcceb9fb1e28c2134845b9b9ac3c9c
|
[] |
no_license
|
DaniBoo/cyanobacteria_project
|
6a816bb0ccf285842b61bfd3612c176f5877a1fb
|
be08ff723284b0c38f9c758d3e250c664bbfbf3b
|
refs/heads/master
| 2021-01-25T05:28:00.686474
| 2013-03-23T15:09:39
| 2013-03-23T15:09:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 137
|
r
|
rinput.R
|
library(ape)
testtree <- read.tree("1842_37.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="1842_37_unrooted.txt")
|
22ec3b6f70823b931c46e8cec08c8a39f502feb2
|
d576b8f5fa3845727f4bc80bb8ce465e9ccb25a7
|
/man/IntelligenceData.Rd
|
2d01b80f4e637f84c38790ad82fd345a0ca7b270
|
[] |
no_license
|
jorgetendeiro/PerFit
|
6164c0abe12a4a1384ad7dec33b1dba8cc91686f
|
c741acec820cb05f694b5fa9f57bf16b83a26c51
|
refs/heads/master
| 2021-10-23T04:49:20.743552
| 2021-10-15T07:18:34
| 2021-10-15T07:18:34
| 235,191,533
| 1
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 705
|
rd
|
IntelligenceData.Rd
|
\name{Intelligence data}
\alias{IntelligenceData}
\docType{data}
\title{Intelligence data (number completion)}
\description{
The data are dichotomous scores of a Dutch intelligence test on number completion (Dutch: "Cijferreeksen", Drenth and Hoolwerf, 1970). The file consists of archival data that were collected in a high-stakes personnel selection context around 1990.
}
\usage{data(IntelligenceData)}
\format{A 1000x26 matrix of dichotomous item scores.}
\references{
Drenth, P. J. D., and Hoolwerf, G. (1970) \emph{Numerieke aanleg test - Cijferreeksen (NAT-Cijferreeksen)} [Numerical ability test]. Amsterdam: The Netherlands.
}
\examples{data(IntelligenceData)}
\keyword{datasets}
|
f6bd06f244a625dc5f3ce15886fb9dc09279da49
|
007c469e385ed6aad68fb99822ec183d47c6ff1f
|
/man/custom_stopwords.Rd
|
514c1f7eb0b83538c402d3d9ce83b990573dc1fa
|
[] |
no_license
|
libiner/petro.One
|
9f7eb7af91a97c68fe4a93fef67901e75c0fb010
|
fc44acdb0ca01891064cfd6f855e80cd9347f88d
|
refs/heads/master
| 2020-05-30T23:47:10.566906
| 2019-01-13T06:30:59
| 2019-01-13T06:30:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 630
|
rd
|
custom_stopwords.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/petro.One-package.r
\docType{data}
\name{custom_stopwords}
\alias{custom_stopwords}
\title{Default custom stop words}
\format{An object of class \code{NULL} of length 0.}
\usage{
custom_stopwords
}
\description{
This is a minimal dataset of custom stopwords.
You can supply your own stopwords by editing the file stopwords.txt under
`extdata` and then importing it.
The provided dataset is a basic way to start and eliminate
common words from the paper titles during classification.
Dataset: stopwords.rda
Source: stopwords.txt
}
\keyword{datasets}
|
0a3e6c6530e7e9a32cb3b925eacc70c204a7228c
|
2c643c04d34af216f18c951f6019af74cd70cdff
|
/cachematrix.R
|
c3ba6d6562489ce26a8b12a51d6cf257f72f122f
|
[] |
no_license
|
gutidaniel/ProgrammingAssignment2
|
fa0351f5b4307da1290961567b63074041967209
|
de72a732aebdc1233855d2932f7627bd98408c98
|
refs/heads/master
| 2020-12-01T01:11:39.270474
| 2016-02-29T17:02:28
| 2016-02-29T17:02:28
| 52,615,130
| 0
| 0
| null | 2016-02-26T15:54:59
| 2016-02-26T15:54:58
| null |
UTF-8
|
R
| false
| false
| 980
|
r
|
cachematrix.R
|
# The function "makeCacheMatrix" creates a list contaning the following elements:
# 1) Set the matrix
# 2) Get the matrix
# 3) Set the inverse of the matrix
# 4) Get the inverse of the matrix
makeCacheMatrix <- function(x = matrix()) {
B <- NULL
set <- function(y) {
x <<- y
B <<- NULL
}
get <- function() x
setinverse <- function(inverse) B <<- inverse
getinverse <- function() B
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
# The function "cacheSolve" gets the inverse of the matrix from the cache
# and skips the calculation if it has been already done. If not, the inverse
# is calculated via the function "solve" and the result is store in the cache.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
B <- x$getinverse()
if(!is.null(B)) {
message("getting cached data")
return(B)
}
data <- x$get()
B <- solve(data, ...)
x$setinverse(B)
B
}
|
bb7c62eb8aceae920dcd034163c1cd6494693940
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/NTS/examples/tvARFiSm.Rd.R
|
9abfa5711d0af198eb8be9b68f60ad2de807b20d
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 306
|
r
|
tvARFiSm.Rd.R
|
library(NTS)
### Name: tvARFiSm
### Title: Filtering and Smoothing for Time-Varying AR Models
### Aliases: tvARFiSm
### ** Examples
t=50
x=rnorm(t)
phi1=matrix(0.4,t,1)
for (i in 2:t){
phi1[i]=0.7*phi1[i-1]+rnorm(1,0,0.1)
x[i]=phi1[i]*x[i-1]+rnorm(1)
}
est=tvAR(x,1)
tvARFiSm(x,1,FALSE,est$par)
|
1ba2babda0f1522a53018de26edac4eac405a11f
|
c6365a23a0f38ff83110da166444fda0ec8a2112
|
/poolOld.R
|
1a73f65889668a752eb171b80eace0d769e4ff89
|
[] |
no_license
|
keurcien/pooled-samples
|
a92fcabf0a14ece71f11d9c537daa69252651317
|
80b228f3300a123904c4e00615c6b8675807ad58
|
refs/heads/master
| 2020-06-10T16:38:26.264231
| 2016-12-16T08:22:19
| 2016-12-16T08:22:19
| 75,933,837
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,073
|
r
|
poolOld.R
|
library(pcadapt)
library(robust)
library(MASS)
pool.old <- function(input,K,min.maf=0.05){
nPOP <- nrow(input)
nSNP <- ncol(input)
if (missing(K)){
K <- nPOP-1
}
res <- corpca(input,K)
freq <- apply(input,2,FUN=function(x){mean(x,na.rm=TRUE)})
res$maf <- as.vector(pmin(freq,1-freq))
res$loadings[res$maf<min.maf] <- NA
res$stat <- array(NA,dim=nSNP)
finite.list <- which(!is.na(apply(abs(res$loadings),1,sum)))
if (K>1){
res$stat[finite.list] <- as.vector(robust::covRob(res$loadings,na.action=na.omit,estim="pairwiseGK")$dist)
} else {
onedcov <- as.vector(MASS::cov.rob(res$loadings[finite.list,1]))
res$stat <- (res$zscores[,1]-onedcov$center)^2/onedcov$cov[1]
}
res$gif <- median(res$stat,na.rm=TRUE)/qchisq(0.5,df=K)
res$chi2.stat <- res$stat/res$gif
res$pvalues <- compute.pval(res$chi2.stat,K,method="mahalanobis")
class(res) <- 'pcadapt'
attr(res,"K") <- K
return(res)
}
pool.old.corrected = function(data,K,min.maf,cover.matrix=NULL){
nSNP <- ncol(data)
nPOP <- nrow(data)
# New procedure
z.matrix <- array(0,dim=c(nPOP,nSNP))
for (k in 1:nSNP){
for (n in 1:nPOP){
n_i <- cover.matrix[n,k]
f_i <- data[n,k]
se <- sqrt(abs(f_i*(1-f_i))/n_i)
if ((!is.na(se)) && (se > 0)){
z.matrix[n,k] <- f_i/se
} else {
z.matrix[n,k] <- f_i
}
}
}
# End new procedure
res <- corpca(data=data,K=K)
freq <- apply(data,2,FUN=function(x){mean(x,na.rm=TRUE)})
res$maf <- as.vector(pmin(freq,1-freq))
res$loadings[res$maf<min.maf] <- NA
z.matrix[,res$maf<min.maf] <- NA
res$stat <- array(NA,dim=nSNP)
finite.list <- which(!is.na(apply(abs(z.matrix),2,sum)))
res$stat <- as.vector(robust::covRob(t(z.matrix),na.action=na.omit,estim = "pairwiseGK")$dist)
res$chi2.stat <- res$stat
# Compute p-values
res$pvalues <- compute.pval(res$chi2.stat,K,method="mahalanobis")
class(res) <- 'pcadapt'
attr(res,"K") <- K
attr(res,"method") <- "mahalanobis"
attr(res,"data.type") <- "pool"
attr(res,"min.maf") <- min.maf
return(res)
}
|
a87e0295f990dafe6be57eca99236b8a3dff9bcd
|
d10466f9976f0f5cc2d93e0f0cd21a0a9e13c7b6
|
/EDA/demographics/00_crossswalk_geo.R
|
27a2b84bee06d659630e51a27156cb81c94e9613
|
[] |
no_license
|
nmmarquez/hispanicACS
|
4685987433a0aefa6c8240ca532c98395765f45c
|
cd6cc94fd1f0c711edc93986fbaee2f580b43ece
|
refs/heads/master
| 2020-05-04T22:50:19.702698
| 2020-03-12T02:36:07
| 2020-03-12T02:36:07
| 179,524,372
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 854
|
r
|
00_crossswalk_geo.R
|
rm(list=ls())
library(tidyverse)
library(sf)
cPUMA <- read_sf("./data/ipums_puma_2010/ipums_puma_2010.dbf") %>%
filter(!(STATEFIP %in% c("72", "02", "15"))) %>%
st_set_precision(1e5)
migPUMA <- read_sf(
"./data/ipums_migpuma_pwpuma_2010/ipums_migpuma_pwpuma_2010.dbf") %>%
filter(!(MIGPLAC %in% c("072", "002", "015"))) %>%
st_set_precision(1e5)
test <- st_intersection(cPUMA, migPUMA)
savecp <- which(sapply(test[1:nrow(cPUMA)], function(v) 1 %in% v))
validShapes <- sapply(
lapply(test$geometry, class),
function(v) ("POLYGON" %in% v) | ("MULTIPOLYGON" %in% v))
cwPUMA <- test[validShapes,]
# Sanity check to make sure we found a match everywhere
nrow(cwPUMA) == nrow(cPUMA)
all(cPUMA$PUMA %in% cwPUMA$PUMA)
cwPUMA %>%
as_tibble() %>%
select(PUMA, MIGPUMA, STATEFIP) %>%
saveRDS("./data/puma2migpuma.RDS")
|
afcac39fce2ac43354367a560c256b33631a9484
|
2398deea4262568f03448241eb1858f033c506b2
|
/cachematrix.R
|
3b06d8f1a5cf8798acdff9109f23f6b9f2c3b7c6
|
[] |
no_license
|
btok/ProgrammingAssignment2
|
cf13259da87dae9905009e6c1c47026d95334f6e
|
8030dc84086227c9142dbf57d5eceb388212dacb
|
refs/heads/master
| 2021-01-22T00:45:41.065944
| 2015-07-20T13:49:36
| 2015-07-20T13:49:36
| 39,378,426
| 0
| 0
| null | 2015-07-20T10:46:10
| 2015-07-20T10:46:10
| null |
UTF-8
|
R
| false
| false
| 1,503
|
r
|
cachematrix.R
|
##creates a special "matrix", containing functions to:
##1. set the value of the matrix
##2. get the value of the matrix
##3. set the value of the inversed matrix
##4. get the value of the inversed matrix
makeCacheMatrix <- function(x = matrix()) {
inversedMatrix <- NULL
set <- function(newMatrix) {
##setting x as the new matrix & deleting old inverse calculations
x <<- newMatrix
inversedMatrix <<- NULL
}
get <- function(){ x }
setInverse <- function(inverse) { inversedMatrix <<- inverse }
getInverse <- function() { inversedMatrix }
list(set = set, get = get,
setInverse = setInverse,
getInverse = getInverse)
}
##calculates the inverse of the special "matrix", The function first checks to see if the inverse has
##already been calculated. If so, it `get`s the inverse from the cache and skips the computation.
##Otherwise, it calculates the inverse of the data and sets the value of the inverse in the cache via
##the `setInverse` function.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inversedMatrix <- x$getInverse()
if(!is.null(inversedMatrix)) {
##getting cached data
return(inversedMatrix)
}
else{
##no cached data - recalculating
matrix <- x$get()
inversedMatrix <- solve(matrix)
x$setInverse(inversedMatrix)
inversedMatrix
}
}
|
1a3fda5bf734b1006de3acf0e63a1d72ecb01596
|
d96cdb76ebd6d7eab32994c61cc742ca8a7ea40d
|
/tests/test_all.R
|
b15a941125f6bae975822bed4bda1fe741c5c3a9
|
[] |
no_license
|
jakobbossek/Rargs
|
429ca046c96e94b0a272aed48c81a2e9e151e992
|
6b2a6857f184c506af1406dbeaef8e412386b19f
|
refs/heads/master
| 2020-03-30T16:42:05.911452
| 2015-04-15T14:43:49
| 2015-04-15T14:43:49
| 27,809,944
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 39
|
r
|
test_all.R
|
library(testthat)
test_check("Rargs")
|
d88588bcc2b516e02706548b26e0ce2696f84dbf
|
8074543de45fe6641641036ee40f5a3247ead8d6
|
/man/hltest.Rd
|
d7269055b9890d294f391b876ae1789021a04f36
|
[] |
no_license
|
gnattino/largesamplehl
|
7b0034b14e10787a2968f0ea52f9a225fcda6d36
|
f2cc63d417e7758b7e297ec7fa21961a78949400
|
refs/heads/master
| 2021-06-12T10:03:10.925398
| 2021-03-14T10:47:25
| 2021-03-14T10:47:25
| 158,947,112
| 2
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 6,431
|
rd
|
hltest.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/hltest_functions.R
\name{hltest}
\alias{hltest}
\alias{hltest.numeric}
\alias{hltest.glm}
\title{Modified Hosmer-Lemeshow Test for Large Samples}
\usage{
hltest(...)
\method{hltest}{numeric}(y, prob, G = 10, outsample = FALSE,
epsilon0 = NULL, conf.level = 0.95, citype = "one.sided",
cimethod = ifelse(citype == "one.sided", NULL, "symmetric"), ...)
\method{hltest}{glm}(glmObject, ...)
}
\arguments{
\item{...}{Additional arguments (ignored).}
\item{y, prob}{Numeric vectors with binary responses and predicted probabilities to be evaluated.
The vectors must have equal length. Missing values are dropped.}
\item{G}{Number of groups to be used in the Hosmer-Lemeshow statistic. By default, \code{G=10}}
\item{outsample}{A boolean specifying whether the model has been fit on the data provided
(\code{outsample=FALSE}, default) or if the model has been developed on an external sample
(\code{outsample=TRUE}). The distribution of the Hosmer-Lemeshow
statistic is assumed to have \code{G-2} and \code{G} degrees of freedom if \code{outsample=FALSE} and
\code{outsample=TRUE}, respectively.}
\item{epsilon0}{Value of the parameter epsilon0, which characterizes the models to be considered as
acceptable in terms of goodness of fit. By default (NULL), epsilon0 is set to the value of epsilon expected from a model attaining a
p-value of the traditional Hosmer-Lemeshow test of 0.05 in a sample of one million observations.
The case \code{epsilon0=0} corresponds to the traditional Hosmer-Lemeshow test. See the section
"Details" for further information.}
\item{conf.level}{Confidence level for the confidence interval of epsilon. Equal to \code{.95}
by default.}
\item{citype}{Type of confidence interval of epsilon to be computed: one-sided
(\code{citype="one.sided"}, default) or two-sided
(\code{citype="two.sided"}).}
\item{cimethod}{Method to be used to compute the two-sided confidence interval:
symmetric (\code{cimethod="symmetric"}, default) or central
(\code{cimethod="central"}). See section "Details" for further information.}
\item{glmObject}{In alternative to the vectors \code{y} and \code{prob}, it is possible to
provide the \code{glm} object with the model to be evaluated.}
}
\value{
A list of class \code{htest} containing the following components:
\describe{
\item{null.value}{The value of epsilon0 used in the test.}
\item{statistic}{The value of the Hosmer-Lemeshow statistic.}
\item{p.value}{The p-value of the test.}
\item{parameter}{A vector with the parameters of the noncentral chi-squared distribution used to
compute the p-value: degrees of freedom (\code{dof}) and noncentrality
parameter (\code{lambda}).}
\item{lambdaHat}{The estimate of noncentrality parameter lambda.}
\item{estimate}{The estimate of epsilon.}
\item{conf.int}{The confidence interval of epsilon.}
}
}
\description{
\code{hltest} implements a goodness-of-fit test to assess the goodness of fit of
logistic regression models in large samples.
}
\details{
The modification of the Hosmer-Lemeshow test evaluates the hypotheses:
H0: epsilon <= epsilon0 vs. Ha: epsilon > epsilon0,
where epsilon is a parameter that measures the goodness of fit of a model. This parameter is based on a
standardization of the noncentrality parameter that characterizes the distribution
of the Hosmer-Lemeshow statistic. The case epsilon=0 corresponds to a model with perfect fit.
Because the null hypothesis of the traditional Hosmer-Lemeshow test is the condition of perfect fit,
it can be interpreted as a test for H0: epsilon = 0 vs. Ha: epsilon > 0. Therefore, the
traditional Hosmer-Lemeshow test can be performed by setting the argument \code{epsilon0=0}.
If epsilon0>0, the implemented test evaluates whether the fit of a model is "acceptable", albeit not perfect.
The value of epsilon0 defines what is meant for "acceptable" in terms of goodness of fit.
By default, epsilon0 is the value of epsilon expected from a model attaining a
p-value of the traditional Hosmer-Lemeshow test of 0.05 in a sample of one million observations.
In other words, the test assesses whether the fit of a model is worse than
the fit of a model that would be considered as borderline-significant (i.e., attaining a p-value of 0.05)
in a sample of one million observations.
The function also estimates the parameter epsilon and constructs its confidence interval.
The confidence interval of this parameter is based on the confidence interval of the
noncentrality parameter that characterizes the distribution
of the Hosmer-Lemeshow statistic, which is noncentral chi-squared. Two types of
two-sided confidence intervals are implemented: symmetric (default) and central.
See Kent and Hainsworth (1995) for further details.
References:
Kent, J. T., & Hainsworth, T. J. (1995). Confidence intervals for the noncentral chi-squared distribution. Journal of Statistical Planning and Inference, 46(2), 147–159.
Nattino, G., Pennell, M. L., & Lemeshow, S.. Assessing the Goodness of fit of Logistic Regression Models in Large Samples: A Modification of the Hosmer-Lemeshow Test. In preparation.
}
\section{Methods (by class)}{
\itemize{
\item \code{numeric}: Method for vectors of responses and predicted probabilities.
\item \code{glm}: Method for result of \code{glm} fit.
}}
\examples{
#Generate fake data with two variables: one continuous and one binary.
set.seed(1234)
dat <- data.frame(x1 = rnorm(5e5),
x2 = rbinom(5e5, size=1, prob=.5))
#The true probabilities of the response depend on a negligible interaction
dat$prob <- 1/(1+exp(-(-1 + dat$x1 + dat$x2 + 0.05*dat$x1*dat$x2)))
dat$y <- rbinom(5e5, size = 1, prob = dat$prob)
#Fit an acceptable model (does not include the negligible interaction)
model <- glm(y ~ x1 + x2, data = dat, family = binomial(link="logit"))
#Check: predicted probabilities are very close to true probabilities
dat$phat <- predict(model, type = "response")
boxplot(abs(dat$prob-dat$phat))
#Traditional Hosmer-Lemeshow test: reject H0
hltest(model, epsilon0 = 0)
#Modified Hosmer-Lemeshow test: fail to reject H0
hltest(model)
#Same output with vectors of responses and predicted probabilities
hltest(y=dat$y, prob=dat$phat)
}
|
10e84ed75be281ad95b744c510ec7ba87252050d
|
31d92549c391af529867cc3ead586b5583b6bb22
|
/plot5.R
|
d26e5b5b569cc88939d5a20c15429231c793bb84
|
[] |
no_license
|
ZubeirSiddiqui/exploratory_data_analysis_project2
|
81202d4409e2dcb2261425d4c2a6c4725a59e3b6
|
30a4a7426ef43bd215e4cb0a970394410d225ae8
|
refs/heads/master
| 2021-01-23T04:58:59.999706
| 2017-03-26T20:40:18
| 2017-03-26T20:40:18
| 86,262,979
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 922
|
r
|
plot5.R
|
## set working directory
setwd("C:/Users/zubeir/Desktop/DataScience/assignment6")
## Add library ggplot2 to plot ggplot2 later
library(ggplot2)
## Read in data from summarySCC_PM25.rds file, if object NEI does not exist
if(!exists("NEI")){
NEI <- readRDS("summarySCC_PM25.rds")
}
## Perform subset of data where Baltimore City, Maryland (fips == "24510")
subsetNEI <- NEI[NEI$fips=="24510" & NEI$type=="ON-ROAD", ]
## Aggregate total emission by year
aggregatedTotalByYear <- aggregate(Emissions ~ year, subsetNEI, sum)
## Plot data
png("plot5.png", width=840, height=480)
gg_plot <- ggplot(aggregatedTotalByYear, aes(factor(year), Emissions))
gg_plot <- gg_plot + geom_bar(stat="identity") +
xlab("year") +
ylab(expression('Total PM'[2.5]*" Emissions")) +
ggtitle('Total Emissions from motor vehicle (type = ON-ROAD) in Baltimore City, Maryland (fips = "24510") from 1999 to 2008')
print(gg_plot)
dev.off()
|
67a99ee0472545fb639ded1872969d3e37716e15
|
dda334048e5b6a6a9bfde44c5c995bc059db86ab
|
/R/indata.transformation.R
|
027af02c749e095a1ad0d59a753ed62a7e5283cf
|
[] |
no_license
|
MariaNikoghosyan/oposSOM
|
71e4e1245930767f006199f843e5b078d10cd090
|
b66dc23593f8aa85ba4ab54d9f25b661a0ea3117
|
refs/heads/master
| 2020-07-01T01:58:39.209042
| 2019-11-08T18:16:05
| 2019-11-08T18:16:05
| 201,011,410
| 0
| 0
| null | 2019-08-07T08:52:31
| 2019-08-07T08:52:29
| null |
UTF-8
|
R
| false
| false
| 15,959
|
r
|
indata.transformation.R
|
# transform genotype matrix into a numeric matrix
pipeline.indata.transformation <- function()
{
# global minor allele encoding
if(preferences$indata.transformation == 'global.minor.major.alleles')
{
util.call(pipeline.BiomartAvailabilityForIndataTransformation, env)
if(preferences$indata.transformation == 'global.minor.major.alleles')
{
biomart.table <- NULL
try({
mart <- useMart(biomart=preferences$database.biomart.snps, host=preferences$database.host)
mart <- useDataset(preferences$database.dataset.snps, mart=mart)
#query = c("refsnp_id","chr_name","ensembl_gene_stable_id")[ which( c("refsnp_id","chr_name","ensembl_gene_stable_id") %in% listAttributes(mart)[,1] ) ][1:2]
suppressWarnings({ biomart.table <-
getBM(c('refsnp_id', 'allele','minor_allele', 'chr_name'),
preferences$database.id.type.snp,
rownames(indata),
mart, checkFilters=FALSE) })
}, silent=TRUE)
# remove all snps which are mapped on other chromosoms ????
biomart.table <- biomart.table[which(biomart.table$chr_name %in% c(1:22, 'X', 'Y')),]
# defien all multyallelic snps
ind <- c(0)
for(i in 1:nrow(biomart.table))
{
if(lengths(regmatches(biomart.table$allele[i], gregexpr("/", biomart.table$allele[i]))) > 1)
{
ind <- c(ind,i)
}
}
if(length(ind) > 1)
{
biomart.table <- biomart.table[-ind,]
}
# seperate two alleles into different columns
biomart.table$allele_1 <- NA
biomart.table$allele_2 <- NA
for (i in 1:nrow(biomart.table))
{
k <- stringr::str_split(biomart.table$allele[i], "/",n = Inf, simplify = FALSE)[[1]]
biomart.table$allele_1[i] <- k[1]
biomart.table$allele_2[i] <- k[2]
}
biomart.table <- biomart.table[which(biomart.table$allele_1 == 'A' |
biomart.table$allele_1 == 'T' |
biomart.table$allele_1 == 'G' |
biomart.table$allele_1 == 'C'),]
biomart.table <- biomart.table[which(biomart.table$allele_2 == 'A' |
biomart.table$allele_2 == 'T' |
biomart.table$allele_2 == 'G' |
biomart.table$allele_2 == 'C'),]
biomart.table <- biomart.table[which(biomart.table$minor_allele == 'A' |
biomart.table$minor_allele == 'T' |
biomart.table$minor_allele == 'G' |
biomart.table$minor_allele == 'C'),]
biomart.table$major_allele <- NA
for (i in 1:nrow(biomart.table))
{
if(biomart.table$minor_allele[i] == biomart.table$allele_1[i])
{
biomart.table$major_allele[i] <- biomart.table$allele_2[i]
}else
{
biomart.table$major_allele[i] <- biomart.table$allele_1[i]
}
}
# filter indata
indata <<- indata[biomart.table$refsnp_id,]
# define indata alleles
indata_alleles <- apply(indata, 1,FUN = function(y)
{
y <- as.character(sapply(y, function(x)
{
x <- strsplit(as.character(x),split = "")[[1]]
return(x)
}))
return(y)
})
indata_alleles <- t(indata_alleles)
indata_alleles <- apply(indata_alleles, 1,unique)
indata_alleles <- t(indata_alleles)
indata_alleles <- setNames(split(indata_alleles, seq(nrow(indata_alleles))), rownames(indata_alleles))
biomart.table$indata_minor_allele <- NA
biomart.table$indata_major_allele <- NA
for (i in 1:nrow(biomart.table))
{
#minor allele
if(biomart.table$minor_allele[i] == indata_alleles[which(names(indata_alleles) %in% biomart.table$refsnp_id[i])][[1]][1] |
biomart.table$minor_allele[i] == indata_alleles[which(names(indata_alleles) %in% biomart.table$refsnp_id[i])][[1]][2])
{
biomart.table$indata_minor_allele[i] <- biomart.table$minor_allele[i]
}else
{
biomart.table$indata_minor_allele[i] <- pipeline.change.complementary.nucleotide(biomart.table$minor_allele[i])
}
#major allele
if(biomart.table$major_allele[i] == indata_alleles[which(names(indata_alleles) %in% biomart.table$refsnp_id[i])][[1]][1] |
biomart.table$major_allele[i] == indata_alleles[which(names(indata_alleles) %in% biomart.table$refsnp_id[i])][[1]][2])
{
biomart.table$indata_major_allele[i] <- biomart.table$major_allele[i]
}else
{
biomart.table$indata_major_allele[i] <- pipeline.change.complementary.nucleotide(biomart.table$major_allele[i])
}
}
biomart.table <- split(biomart.table[,c('minor_allele','major_allele', 'indata_minor_allele','indata_major_allele')], biomart.table$refsnp_id)
for (i in 1:nrow(indata))
{
indata[i,] <<- sapply(indata[i,], FUN = function(x)
{
x <- strsplit(as.character(x),split = "")[[1]]
if(length(unique(x)) > 1)
{
x <- 1
return(x)
break()
}
if(unique(x) == biomart.table[which(names(biomart.table) %in% rownames(indata)[i])][[1]]$indata_minor_allele)
{
x <- 2
return(x)
break()
}
if(unique(x) == biomart.table[which(names(biomart.table) %in% rownames(indata)[i])][[1]]$indata_major_allele)
{
x <- 0
}
else
{
x <- 0
return(x)
break()
}
})
}
}
primary.indata <<- indata
}
#minor allele encoding
if(preferences$indata.transformation == 'minor.major.alleles') # calculate minor and major aleles
{
minor.major.alleles <<- as.data.frame(matrix(NA, nrow = nrow(indata), ncol = 5))
colnames(minor.major.alleles) <<- c('SNP_ID', 'Minor.allele', 'Minor.allele.frequency', 'Major.allele', 'Major.allele.frequency')
minor.major.alleles$SNP_ID <<- rownames(indata)
alleles <-apply(indata, 1,FUN = function(y)
{
y <- as.character(sapply(y, function(x)
{
x <- strsplit(as.character(x),split = "")[[1]]
return(x)
}))
})
alleles <- t(alleles)
#alleles <- t(alleles)
for (i in 1:nrow(alleles))
{
if(table(alleles[i,])[1] != table(alleles[i,])[2])
{
minor.major.alleles$Minor.allele[i] <<- names(which(table(alleles[i,]) == min(table(alleles[i,]))))
minor.major.alleles$Minor.allele.frequency[i] <<- as.numeric(table(alleles[i,])[which(table(alleles[i,]) == min(table(alleles[i,])))]) / as.numeric(ncol(alleles))
minor.major.alleles$Major.allele[i] <<- names(which(table(alleles[i,]) == max(table(alleles[i,]))))
minor.major.alleles$Major.allele.frequency[i] <<- as.numeric(table(alleles[i,])[which(table(alleles[i,]) == max(table(alleles[i,])))]) / as.numeric(ncol(alleles))
} else
{
minor.major.alleles$Minor.allele[i] <<- names(table(alleles[i,]))[1]
minor.major.alleles$Minor.allele.frequency[i] <<- as.numeric(table(alleles[i,])[1]) / as.numeric(ncol(alleles))
minor.major.alleles$Major.allele[i] <<- names(table(alleles[i,]))[2]
minor.major.alleles$Major.allele.frequency[i] <<- as.numeric(table(alleles[i,])[2]) / as.numeric(ncol(alleles))
}
}
for (i in 1:nrow(indata))
{
indata[i,] <<- sapply(indata[i,], function(x)
{
x <- strsplit(as.character(x),split = "")[[1]]
if(length(unique(x)) > 1)
{
x <- 1
return(x)
break()
}
if(unique(x) == minor.major.alleles$Minor.allele[i])
{
x <- 2
return(x)
break()
}
if(unique(x) == minor.major.alleles$Major.allele[i])
{
x <- 0
}
else
{
x <- 0
return(x)
break()
}
})
}
primary.indata <<- indata
}
#disease allele encoding
if(preferences$indata.transformation == 'disease.assocoated.alleles')
{
gwas <- read.table('data/gwas_catalogue.csv', sep = '\t', header = T, as.is = T)
gwas_slim <- gwas[,c("SNPS","RISK_ALLELE_starnd_1", 'neutral_allele_strand_1')]
gwas_slim <- gwas_slim[!duplicated(gwas_slim),]
gwas_slim <- gwas_slim[which(gwas_slim$SNPS %in% rownames(indata)),]
indata <<- indata[which(rownames(indata) %in% gwas_slim$SNPS),]
## define indata alleles and corresponding disease associated alleles
indata_alleles <- apply(indata, 1,FUN = function(y)
{
y <- as.character(sapply(y, function(x)
{
x <- strsplit(as.character(x),split = "")[[1]]
return(x)
}))
return(y)
})
indata_alleles <- t(indata_alleles)
indata_alleles <- apply(indata_alleles, 1,unique)
indata_alleles <- t(indata_alleles)
indata_alleles <- setNames(split(indata_alleles, seq(nrow(indata_alleles))), rownames(indata_alleles))
gwas_alleles <- split(gwas_slim[,c('RISK_ALLELE_starnd_1','neutral_allele_strand_1')], gwas_slim$SNPS)
disease.alleles <<- as.data.frame(matrix(NA, nrow = 0, ncol = 8))
colnames(disease.alleles) <<- c('SNP_ID','SNP_uniq_ID','indata_allele_1','indata_allele_2','disease_associated_allele', 'neutral_allele',
'disease_associated_allele_in_indata', 'neutral_allele_in_indata')
for (i in 1:length(indata_alleles))
{
snp <- as.data.frame(matrix(NA, nrow = nrow(gwas_alleles[names(indata_alleles[i])][[1]]), ncol = ncol(disease.alleles)))
names(snp) <- colnames(disease.alleles)
for (j in 1:nrow(gwas_alleles[names(indata_alleles[i])][[1]]))
{
if(j < 2)
{
snp[j,c('SNP_ID','SNP_uniq_ID')] <- names(indata_alleles[i])
snp[j,c('indata_allele_1', 'indata_allele_2')] <- indata_alleles[i][[1]]
snp[j,'disease_associated_allele'] <- gwas_alleles[names(indata_alleles[i])][[1]][['RISK_ALLELE_starnd_1']][j]
snp[j,'neutral_allele'] <- gwas_alleles[names(indata_alleles[i])][[1]][['neutral_allele_strand_1']][j]
if(snp[j,'disease_associated_allele'] == snp[j,'indata_allele_1'] | snp[j,'disease_associated_allele'] == snp[j,'indata_allele_2'])
{
snp[j,'disease_associated_allele_in_indata'] <- snp[j,'disease_associated_allele']
}else
{
snp[j,'disease_associated_allele_in_indata'] <-pipeline.change.complementary.nucleotide(snp[j,'disease_associated_allele'])
}
if(snp[j,'neutral_allele'] == snp[j,'indata_allele_1'] | snp[j,'neutral_allele'] == snp[j,'indata_allele_2'])
{
snp[j,'neutral_allele_in_indata'] <- snp[j,'neutral_allele']
}else
{
snp[j,'neutral_allele_in_indata'] <-pipeline.change.complementary.nucleotide(snp[j,'neutral_allele'])
}
}else
{
snp[j,'SNP_ID'] <- names(indata_alleles[i])
snp[j,'SNP_uniq_ID'] <- paste(names(indata_alleles[i]), j-1, sep = '_')
snp[j,c('indata_allele_1', 'indata_allele_2')] <- indata_alleles[i][[1]]
snp[j,'disease_associated_allele'] <- gwas_alleles[names(indata_alleles[i])][[1]][['RISK_ALLELE_starnd_1']][j]
snp[j,'neutral_allele'] <- gwas_alleles[names(indata_alleles[i])][[1]][['neutral_allele_strand_1']][j]
if(snp[j,'disease_associated_allele'] == snp[j,'indata_allele_1'] | snp[j,'disease_associated_allele'] == snp[j,'indata_allele_2'])
{
snp[j,'disease_associated_allele_in_indata'] <- snp[j,'disease_associated_allele']
}else
{
snp[j,'disease_associated_allele_in_indata'] <-pipeline.change.complementary.nucleotide(snp[j,'disease_associated_allele'])
}
if(snp[j,'neutral_allele'] == snp[j,'indata_allele_1'] | snp[j,'neutral_allele'] == snp[j,'indata_allele_2'])
{
snp[j,'neutral_allele_in_indata'] <- snp[j,'neutral_allele']
}else
{
snp[j,'neutral_allele_in_indata'] <-pipeline.change.complementary.nucleotide(snp[j,'neutral_allele'])
}
}
}
disease.alleles <<- rbind(disease.alleles, snp)
}
################
#not sure if it is necessary
snps_withour_disease_alleles <- c('')
for (i in 1:nrow(disease.alleles))
{
if((disease.alleles$indata_allele_1[i] == disease.alleles$disease_associated_allele[i] |
disease.alleles$indata_allele_1[i] == disease.alleles$neutral_allele[i] |
pipeline.change.complementary.nucleotide(disease.alleles$indata_allele_1[i])== disease.alleles$disease_associated_allele[i] |
pipeline.change.complementary.nucleotide(disease.alleles$indata_allele_1[i]) == disease.alleles$neutral_allele[i])&
(disease.alleles$indata_allele_2[i] == disease.alleles$disease_associated_allele[i] |
disease.alleles$indata_allele_2[i] == disease.alleles$neutral_allele[i] |
pipeline.change.complementary.nucleotide(disease.alleles$indata_allele_2[i]) == disease.alleles$disease_associated_allele[i] |
pipeline.change.complementary.nucleotide(disease.alleles$indata_allele_2[i]) == disease.alleles$neutral_allele[i] ))
{}else
{
snps_withour_disease_alleles <- c(snps_withour_disease_alleles, disease.alleles$SNP_ID[i])
}
}
disease.alleles <<- disease.alleles[which(disease.alleles$SNP_ID != snps_withour_disease_alleles),]
#################
# disease.alleles <<- split(disease.alleles[,
# c("SNP_uniq_ID",
# "indata_allele_1",
# "indata_allele_2",
# "disease_accosoated_allele",
# "neutral_allele")], disease.alleles$SNP_ID)
#################################
indata_numeric_genotypes <- as.data.frame(matrix(NA, nrow = nrow(disease.alleles), ncol = ncol(indata)))
colnames(indata_numeric_genotypes) <- colnames(indata)
rownames(indata_numeric_genotypes) <- disease.alleles$SNP_uniq_ID
for (i in 1:nrow(indata_numeric_genotypes))
{
k <- disease.alleles[which(disease.alleles$SNP_uniq_ID %in% rownames(indata_numeric_genotypes)[i]),]
indata_numeric_genotypes[i,] <- sapply(indata[which(rownames(indata) %in% k$SNP_ID),], FUN = function(x)
{
x <- strsplit(as.character(x),split = "")[[1]]
if(length(unique(x)) > 1)
{
x <- 1
return(x)
break()
}
if(unique(x) == k$disease_associated_allele_in_indata)
{
x <- 2
return(x)
break()
}
if(unique(x) == k$neutral_allele_in_indata)
{
x <- 0
}
else
{
x <- 0
return(x)
break()
}
})
}
indata <<- indata_numeric_genotypes
primary.indata <<- indata_numeric_genotypes
}
}
|
d7464fd80d3e5b5bd81fea1ce5ebb35bf2bb07c7
|
efc680d4e454d5c330aae260f84b2cfb877fc6b8
|
/01_run_ERSST.R
|
ce9b3775b92e4705622cafa35d01a8b880f0d9d8
|
[] |
no_license
|
lizllanos/CPT_R
|
c86c6bfdc37e55f811df31ed2cdbcf81219ad2dc
|
ccdad0f5c6fe200dfaf5383bf7f182fd7e4b392c
|
refs/heads/master
| 2021-09-25T01:35:09.402757
| 2018-10-16T16:27:05
| 2018-10-16T16:27:05
| 108,312,174
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,885
|
r
|
01_run_ERSST.R
|
run_cpt=function(x,y,i_fores,path_run,output){
GI=paste0(output,"_goodness_index.txt"); pear=paste0(output,"_pearson.txt"); afc=paste0(output,"_2afc.txt")
prob=paste0(output,"_prob.txt"); cc=paste0(output,"_modos_cc.txt"); x_load=paste0(output,"_x_load.txt"); x_serie=paste0(output,"_x_serie.txt"); y_load=paste0(output,"_y_load.txt")
y_serie=paste0(output,"_y_serie.txt"); det_forecast=paste0(output,"_det_forecast.txt"); det_forecast_limit=paste0(output,"_det_forecast_limit.txt")
roc_a=paste0(output,"_roc_a.txt");roc_b=paste0(output ,"_roc_b.txt")
prob_rt=paste0(output,"_prob_rt.txt"); det_forecast_rt=paste0(output,"_det_forecast_rt.txt"); det_forecast_limit_rt=paste0(output,"_det_forecast_limit_rt.txt")
pear_rt=paste0(output,"_pearson_rt.txt"); afc_rt=paste0(output,"_2afc_rt.txt"); roc_a_rt=paste0(output,"_roc_a_rt.txt"); roc_b_rt=paste0(output ,"_roc_b_rt.txt")
cmd <- "@echo off
(
echo 611
echo 545
echo 1
echo %path_x%
echo 30
echo -30
echo 0
echo 359
echo 1
echo 10
echo 2
echo %path_y%
echo 17
echo 13
echo -90
echo -83
echo 1
echo 10
echo 1
echo 5
echo 9
echo 1
echo 213
echo %i_for%
echo 3
echo 3
echo 532
echo 1981
echo 2015
echo N
echo 2
echo 7
echo 35
echo 554
echo 2
echo 541
echo 112
echo %path_GI%
echo 312
echo 25
echo 1
echo 451
echo 454
echo 455
echo 413
echo 1
echo %path_pear%
echo 413
echo 3
echo %path_2afc%
echo 413
echo 10
echo %path_roc_b%
echo 413
echo 11
echo %path_roc_a%
echo 111
echo 501
echo %path_prob%
echo 111
echo 411
echo %path_x_load%
echo 111
echo 412
echo %path_x_serie%
echo 111
echo 421
echo %path_y_load%
echo 111
echo 422
echo %path_y_serie%
echo 111
echo 511
echo %path_det_forecast%
echo 111
echo 513
echo %path_det_forecast_limit%
echo 111
echo 203
echo %path_prob_rt%
echo 111
echo 202
echo %path_det_forecast_rt%
echo 111
echo 204
echo %path_det_forecast_limit_rt%
echo 401
echo %path_cc%
echo 0
echo 423
echo 1
echo %path_pear_rt%
echo 423
echo 3
echo %path_2afc_rt%
echo 423
echo 10
echo %path_roc_b_rt%
echo 423
echo 11
echo %path_roc_a_rt%
echo 0
) | CPT_batch.exe"
cmd<-gsub("%path_x%",x,cmd)
cmd<-gsub("%path_y%",y,cmd)
cmd<-gsub("%path_GI%",GI,cmd)
cmd<-gsub("%path_pear%",pear,cmd)
cmd<-gsub("%path_2afc%",afc,cmd)
cmd<-gsub("%path_roc_b%",roc_b,cmd)
cmd<-gsub("%path_roc_a%",roc_a,cmd)
cmd<-gsub("%path_prob%",prob,cmd)
cmd<-gsub("%path_cc%",cc,cmd)
cmd<-gsub("%i_for%",i_fores,cmd)
cmd<-gsub("%path_x_load%",x_load,cmd)
cmd<-gsub("%path_x_serie%",x_serie,cmd)
cmd<-gsub("%path_y_load%",y_load,cmd)
cmd<-gsub("%path_y_serie%",y_serie,cmd)
cmd<-gsub("%path_y_serie%",y_serie,cmd)
cmd<-gsub("%path_det_forecast%",det_forecast,cmd)
cmd<-gsub("%path_det_forecast_limit%",det_forecast_limit,cmd)
cmd<-gsub("%path_pear_rt%",pear_rt,cmd)
cmd<-gsub("%path_2afc_rt%",afc_rt,cmd)
cmd<-gsub("%path_roc_b_rt%",roc_b_rt,cmd)
cmd<-gsub("%path_roc_a_rt%",roc_a_rt,cmd)
cmd<-gsub("%path_prob_rt%",prob_rt,cmd)
cmd<-gsub("%path_det_forecast_rt%",det_forecast_rt,cmd)
cmd<-gsub("%path_det_forecast_limit_rt%",det_forecast_limit_rt,cmd)
write(cmd,path_run)
system(path_run, ignore.stdout = T, show.output.on.console = T)
}
main_dir <- "C:/Users/dagudelo/Desktop/CPT"
names_x <- list.files(paste0(main_dir,"/input/ERSST"))
output <- paste0(main_dir,"/output/",substring(names_x,1,nchar(names_x)-4))
x <- list.files(paste0(main_dir,"/input/ERSST"),full.names = T)
y <- list.files(paste0(main_dir,"/input/stations"),full.names = T)
i_fores <- sapply(names_x,function(x) as.numeric(substring(x,1,2)))
path_run <- paste0(main_dir,"/",substring(names_x,1,nchar(names_x)-4),".bat")
Map(run_cpt,x,y,i_fores,path_run,output)
|
84ab97259530ebdc76129f4e44e4c5486a49e45a
|
a79570312203ffe1c78a0d5063571a59ca8729d7
|
/code.R
|
cfdb51f0ef8a7ae25c9e04db0c13220adc2ce55e
|
[] |
no_license
|
iffotiadis/Clustering-Customer-Segmentation-
|
d233a0113eee37423b17e295a7333fc895e650ae
|
c5bce7fcc556a6c3a76a407bf0b9c511265647f3
|
refs/heads/master
| 2020-03-17T11:30:25.694460
| 2018-05-15T18:06:42
| 2018-05-15T18:06:42
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,840
|
r
|
code.R
|
I downloaded this wholesale customer dataset from UCI Machine Learning Repository. The data set refers to clients of a wholesale distributor. It includes the annual spending in monetary units on diverse product categories.
My goal today is to use various clustering techniques to segment customers. Clustering is an unsupervised learning algorithm that tries to cluster data based on their similarity. Thus, there is no outcome to be predicted, and the algorithm just tries to find patterns in the data.
#This is the head and structure of the original data
customer <- read.csv('C:/Ioannis/Downloads/Wholesale.csv')
head(customer)
str(customer)
K-Means Clustering
#Prepare the data for analysis. Remove the missing value and remove "Channel" and "Region" columns because they are not useful for #clustering.
customer1<- customer
customer1<- na.omit(customer1)
customer1$Channel <- NULL
customer1$Region <- NULL
#Standardize the variables.
customer1 <- scale(customer1)
#Determine number of clusters.
wss <- (nrow(customer1)-1)*sum(apply(customer1,2,var))
for (i in 2:15) wss[i] <- sum(kmeans(customer1,
centers=i)$withinss)
plot(1:15, wss, type="b", xlab="Number of Clusters",
ylab="Within groups sum of squares")
#The correct choice of k is often ambiguous, but from the above plot, I am going to try my cluster analysis with 6 clusters .
#Fit the model and print out the cluster means.
fit <- kmeans(customer1, 6) # fit the model
aggregate(customer1,by=list(fit$cluster),FUN=mean) # get cluster means
customer1 <- data.frame(customer1, fit$cluster) #append cluster assignment
#Plotting the results.
library(cluster)
clusplot(customer1, fit$cluster, color=TRUE, shade=TRUE, labels=2, lines=0)
#Interpretation of the results: With my analysis, more than 70% of information about the multivariate data is captured by this plot of #component 1 and 2.
#Outlier detection with K-Means
#First, the data are partitioned into k groups by assigning them to the closest cluster centers, as follows:
customer2 <- customer[, 3:8]
kmeans.result <- kmeans(customer2, centers=6)
kmeans.result$centers
#Then calculate the distance between each object and its cluster center, and pick those with largest distances as outliers.
kmeans.result$cluster # print out cluster IDs
centers <- kmeans.result$centers[kmeans.result$cluster, ]
distances <- sqrt(rowSums((customer2 - centers)^2)) # calculate distances
outliers <- order(distances, decreasing=T)[1:5] # pick up top 5 distances
print(outliers)
#These are the outliers. Let me make it more meaningful.
print(customer2[outliers,])
#Much better!
#Hierarchical Clustering
#First draw a sample of 40 records from the customer data, so that the clustering plot will not be over crowded. Same as before, variables #Region and Channel are removed from the data. After that, I apply hierarchical clustering to the data.
idx <- sample(1:dim(customer)[1], 40)
customerSample <- customer[idx,]
customerSample$Region <- NULL
customerSample$Channel <- NULL
#There are a wide range of hierarchical clustering methods, I heard Ward's method is a good appraoch, so try it out.
d <- dist(customerSample, method = "euclidean") # distance matrix
fit <- hclust(d, method="ward")
plot(fit) # display dendogram
groups <- cutree(fit, k=6) # cut tree into 6 clusters
# draw dendogram with red borders around the 6 clusters
rect.hclust(fit, k=6, border="red")
#Let me try to interpret: At the bottom, I start with 40 data points, each assigned to separate clusters, two closest clusters are then #merged till I have just one cluster at the top. The height in the dendrogram at which two clusters are merged represents the distance #between two clusters in the data space. The decision of the number of clusters that can best depict different groups can be chosen by #observing the dendrogram.
|
bda137d173d686c113adcd4c01f94b774c3cd519
|
06493ebc67611baf4e2c2f536feffc1d583a1803
|
/man/as_df.Rd
|
8c6443f2491e50cad455aa347840cf4442a1d7d0
|
[] |
no_license
|
mdelrio1/Momocs
|
96bd5fc0673bac53a8b259b95252198f009cd239
|
00da8cc6cdd09bb4f65385fa287453f08b9348ee
|
refs/heads/master
| 2020-03-27T13:28:17.850600
| 2018-08-14T14:01:32
| 2018-08-14T14:01:32
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,043
|
rd
|
as_df.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/babel-bridges.R
\name{as_df}
\alias{as_df}
\alias{as_df.Coo}
\alias{as_df.Coe}
\alias{as_df.TraCoe}
\alias{as_df.PCA}
\alias{as_df.LDA}
\title{Converts Momocs objects to data.frames}
\usage{
as_df(x)
\method{as_df}{Coo}(x)
\method{as_df}{Coe}(x)
\method{as_df}{TraCoe}(x)
\method{as_df}{PCA}(x)
\method{as_df}{LDA}(x)
}
\arguments{
\item{x}{an object, typically a Momocs object}
}
\value{
a \code{data.frame}
}
\description{
Used in particular for compatibility with the \code{tidyverse}
}
\examples{
# smaller Out
lite_bot <- bot \%>\% slice(c(1, 2, 21, 22)) \%>\% coo_sample(12)
# Coo object
lite_bot \%>\% as_df \%>\% head
# Coe object
lite_bot \%>\% efourier(2) \%>\% as_df \%>\% head
# PCA object
lite_bot \%>\% efourier(2) \%>\% PCA \%>\% as_df \%>\% head
# LDA object
lite_bot \%>\% efourier(2) \%>\% PCA \%>\% LDA(~type) \%>\% as_df \%>\% head
}
\seealso{
Other bridges functions: \code{\link{bridges}},
\code{\link{complex}}, \code{\link{export}}
}
|
c6232e7255e0473620950877e02f0a52c53a57a3
|
0500ba15e741ce1c84bfd397f0f3b43af8cb5ffb
|
/cran/paws.database/man/memorydb_describe_parameters.Rd
|
cd7bd17b6ade230f30088003a40376f18e245dee
|
[
"Apache-2.0"
] |
permissive
|
paws-r/paws
|
196d42a2b9aca0e551a51ea5e6f34daca739591b
|
a689da2aee079391e100060524f6b973130f4e40
|
refs/heads/main
| 2023-08-18T00:33:48.538539
| 2023-08-09T09:31:24
| 2023-08-09T09:31:24
| 154,419,943
| 293
| 45
|
NOASSERTION
| 2023-09-14T15:31:32
| 2018-10-24T01:28:47
|
R
|
UTF-8
|
R
| false
| true
| 1,242
|
rd
|
memorydb_describe_parameters.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/memorydb_operations.R
\name{memorydb_describe_parameters}
\alias{memorydb_describe_parameters}
\title{Returns the detailed parameter list for a particular parameter group}
\usage{
memorydb_describe_parameters(
ParameterGroupName,
MaxResults = NULL,
NextToken = NULL
)
}
\arguments{
\item{ParameterGroupName}{[required] he name of a specific parameter group to return details for.}
\item{MaxResults}{The maximum number of records to include in the response. If more
records exist than the specified MaxResults value, a token is included
in the response so that the remaining results can be retrieved.}
\item{NextToken}{An optional argument to pass in case the total number of records exceeds
the value of MaxResults. If nextToken is returned, there are more
results available. The value of nextToken is a unique pagination token
for each page. Make the call again using the returned token to retrieve
the next page. Keep all other arguments unchanged.}
}
\description{
Returns the detailed parameter list for a particular parameter group.
See \url{https://www.paws-r-sdk.com/docs/memorydb_describe_parameters/} for full documentation.
}
\keyword{internal}
|
7db9586f473bd3d360976d52f3c1931e4cca2fab
|
5b1e58ad4a366882be5a6c0380d3fe26e41f1c74
|
/app.R
|
5d569ec2ed02c999652b57072f572bb8350f6a60
|
[] |
no_license
|
vedangmehta/STAT597_week_10
|
be6571a5d1a4d5a8eb8c6a7dbca52810a3b756e1
|
f7b95b683655dd43b4131c68923c626140693086
|
refs/heads/master
| 2020-03-13T11:29:28.143366
| 2018-04-26T05:18:18
| 2018-04-26T05:18:18
| 131,102,486
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,020
|
r
|
app.R
|
library(shiny)
library(tidyverse)
library(tidycensus)
ui <- fluidPage(sidebarLayout(
sidebarPanel(
# Input box to select the state
# Default value is NJ
selectInput(
inputId = "state",
label = "State",
choices = c(
'Alaska',
'Alabama',
'Arkansas',
'Arizona',
'California',
'Colorado',
'Connecticut',
'District of Columbia',
'Delaware',
'Florida',
'Georgia',
'Hawaii',
'Iowa',
'Idaho',
'Illinois',
'Indiana',
'Kansas',
'Kentucky',
'Louisiana',
'Massachusetts',
'Maryland',
'Maine',
'Michigan',
'Minnesota',
'Missouri',
'Mississippi',
'Montana',
'North Carolina',
'North Dakota',
'Nebraska',
'New Hampshire',
'New Jersey',
'New Mexico',
'Nevada',
'New York',
'Ohio',
'Oklahoma',
'Oregon',
'Pennsylvania',
'Rhode Island',
'South Carolina',
'South Dakota',
'Tennessee',
'Texas',
'Utah',
'Virginia',
'Vermont',
'Washington',
'Wisconsin',
'West Virginia',
'Wyoming'
),
selected = "New Jersey"
),
# Radio button to select the plot type
# Default value is "Median Household Income"
radioButtons(
inputId = "plotType",
label = "What to plot?",
choices = c(
"Median Household Income",
"Median Gross Rent",
"Ratio of Median Gross Rent to Median Household Income"
),
selected = "Median Household Income"
)
),
mainPanel(plotOutput("main_plot"),
tableOutput("results"))
),
titlePanel(" "))
server <- function(input, output, session) {
output$main_plot <- renderPlot({
# Getting input values
state <- input$state
plot_type <- input$plotType
if (plot_type == "Median Household Income") {
# Fetching median income dataset from ACS
df_median_income <-
get_acs(
geography = "county",
variables = c(medincome = "B19013_001"),
state = state,
geometry = TRUE
)
# Plotting median income by county
df_median_income %>%
ggplot(aes(fill = estimate, color = estimate)) +
geom_sf() + scale_fill_gradient(low = "white", high = "#004414") + scale_color_gradient(low = "white", high = "#004414") +
ggtitle("Median Household Income") + guides(
fill = guide_legend(title = "Median Household Income"),
colour = guide_legend(title = "Median Household Income")
) + theme_bw()
}
else if (plot_type == "Median Gross Rent") {
# Fetching median rent dataset from ACS
df_median_rent <-
get_acs(
geography = "county",
variables = c(medrent = "B25064_001"),
state = state,
geometry = TRUE
)
# Plotting median rent by county
df_median_rent %>%
ggplot(aes(fill = estimate, color = estimate)) +
geom_sf() + scale_fill_gradient(low = "white", high = "#001344") + scale_color_gradient(low = "white", high = "#001344") +
ggtitle("Median Gross Rent") + guides(
fill = guide_legend(title = "Median Gross Rent"),
colour = guide_legend(title = "Median Gross Rent")
) + theme_bw()
}
else{
# Fetching median income dataset from ACS
df_median_income <-
get_acs(
geography = "county",
variables = c(medincome = "B19013_001"),
state = state,
geometry = TRUE
)
# Fetching median rent dataset from ACS
df_median_rent <-
get_acs(
geography = "county",
variables = c(medrent = "B25064_001"),
state = state,
geometry = TRUE
)
# Changing column names
colnames(df_median_income)[4] <- "Income"
colnames(df_median_rent)[4] <- "Rent"
# Applying inner join
df <-
inner_join(as.data.frame(df_median_income),
as.data.frame(df_median_rent),
by = "GEOID")
# Calculating ratio of median rent and median income
df %>% mutate(Ratio = Rent / Income) -> df_ratio
df_median_income$Ratio <- df_ratio$Ratio
# Plotting the ratio of median rent and median income by county
df_median_income %>% ggplot(aes(fill = Ratio, color = Ratio)) +
geom_sf() + scale_fill_gradient(low = "white", high = "#440042") + scale_color_gradient(low = "white", high = "#440042") +
ggtitle("Ratio of Median Gross Rent to Median Household Income") + guides(
fill = guide_legend(title = "Ratio of Median Gross Rent to Median Household Income"),
colour = guide_legend(title = "Ratio of Median Gross Rent to Median Household Income")
) + theme_bw()
}
})
}
shinyApp(ui = ui, server = server)
|
63b42526747ea256dedb01434f0c97d71588656c
|
7204e3bfeea08327b4bda576b082b9dd5e254046
|
/buildpkg/.todolist.R
|
6f34453149374a62512baaecd004ac90a5261436
|
[] |
no_license
|
EpidemiologyDVM/duke-rnet-quick
|
fd2717c3146b2b4a38af494820bd28b1033a3d80
|
b979af770522aff93d736c0d0c4a9f0f7b17c0a7
|
refs/heads/main
| 2023-05-01T16:26:25.930895
| 2021-05-26T16:35:46
| 2021-05-26T16:35:46
| 371,102,574
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 343
|
r
|
.todolist.R
|
#To Do list:
# * Add threshold default/option for L1Selection for threshold selection
# * Complete vignettes
# * Create new package for creating heatmaps from lists of igraph objects.
# * add vertex names to layout matrix (possibly add this to igraph clone and create pull request)
# * add edge.subset and network arguments to heatmap function
|
df6434ad41afb63e43ad3add4c510f16c95e9dca
|
29585dff702209dd446c0ab52ceea046c58e384e
|
/EbayesThresh/R/ebayesthresh.R
|
60e4ef9c26eb0be3d0e819f56eb07ff0f9137c17
|
[] |
no_license
|
ingted/R-Examples
|
825440ce468ce608c4d73e2af4c0a0213b81c0fe
|
d0917dbaf698cb8bc0789db0c3ab07453016eab9
|
refs/heads/master
| 2020-04-14T12:29:22.336088
| 2016-07-21T14:01:14
| 2016-07-21T14:01:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,085
|
r
|
ebayesthresh.R
|
"ebayesthresh" <-
function(x, prior = "laplace", a = 0.5, bayesfac = FALSE, sdev = NA, verbose = FALSE,
threshrule = "median")
{
# Given a vector of data x, find the marginal maximum likelihood estimator
# of the mixing weight w, and apply an appropriate thresholding rule using
# this weight.
# If the prior is laplace and a=NA, then the scale factor is also found by MML.
# The thresholding rules allowed are "median", "mean", "hard", "soft" and "none";
# if "none" is used, then only the parameters are worked out.
# If hard or soft thresholding is used, the argument "bayesfac" specifies
# whether to use the bayes factor threshold or the posterior median threshold.
# If verbose=TRUE then the routine returns a list with several arguments, including
# muhat which is the result of the thresholding.
# If verbose=FALSE then only muhat is returned.
# It is assumed that the standard deviation of the data is sdev; if sdev=NA, then
# it is estimated using the function mad(x).
#
# find the standard deviation if necessary and estimate the parameters
if(is.na(sdev)) sdev <- mad(x, center = 0)
x <- x/sdev
pr <- substring(prior, 1, 1)
if((pr == "l") & is.na(a)) {
pp <- wandafromx(x)
w <- pp$w
a <- pp$a
}
else w <- wfromx(x, prior = prior, a = a) #
if(pr != "m" | verbose) {
tt <- tfromw(w, prior = prior, bayesfac = bayesfac, a = a)
tcor <- sdev * tt
}
if(threshrule == "median")
muhat <- postmed(x, w, prior = prior, a = a)
if(threshrule == "mean")
muhat <- postmean(x, w, prior = prior, a = a)
if(threshrule == "hard")
muhat <- threshld(x, tt)
if(threshrule == "soft")
muhat <- threshld(x, tt, hard = FALSE)
if(threshrule == "none") muhat <- NA #
# Now return desired output
#
muhat <- sdev * muhat
if(!verbose)
return(muhat)
retlist <- list(muhat = muhat, x = x, threshold.sdevscale = tt,
threshold.origscale = tcor, prior = prior, w = w, a = a,
bayesfac = bayesfac, sdev = sdev, threshrule = threshrule)
if(pr == "c")
retlist <- retlist[-7]
if(threshrule == "none")
retlist <- retlist[-1]
return(retlist)
}
|
75ac6d60a61df05f61ed55f197a636fd87fca038
|
c722e2f386fb7a59e62b1a3f8af961caf1c6404a
|
/R/sampler.R
|
1baa194b295a26e5c8557060b648f81134e1464c
|
[] |
no_license
|
jhmarcus/flashier
|
62f21d4c58285fe859af579aead1bcc87930453a
|
4e02017d0ecbb72e9c765958fa4cb4ce628af0d6
|
refs/heads/master
| 2020-05-21T02:04:50.219432
| 2019-02-22T22:30:50
| 2019-02-22T22:30:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,499
|
r
|
sampler.R
|
F.sampler <- function(flash) {
# Beware of unfulfilled promise leak.
force(flash)
return(function(nsamp) {
# Get samples as list of dimensions with sublists of factors.
samp <- rapply(all.post.samplers(flash),
function(f) do.call(f, list(nsamp = nsamp)),
how = "list")
# Re-organize the list so that each element corresponds to a single sample.
return(lapply(1:nsamp, function(trial) {
lapply(1:get.dim(flash),
function(n) do.call(cbind,
lapply(samp[[n]], function(k) k[trial, ])))
}))
})
}
all.post.samplers <- function(flash) {
return(lapply(1:get.dim(flash),
function(n) lapply(1:get.n.factors(flash),
function(k) post.sampler(flash, k, n))))
}
post.sampler <- function(flash, k, n) {
factor <- extract.factor(flash, k)
if (is.zero(factor)) {
sampler <- function(nsamp) {matrix(0,
nrow = nsamp,
ncol = get.dims(flash)[n])}
} else if (all.fixed(factor, n)) {
sampler <- function(nsamp) {matrix(get.fix.vals(flash, k),
nrow = nsamp,
ncol = get.dims(flash)[n],
byrow = TRUE)}
} else {
ebnm.res <- solve.ebnm(factor, n, flash, return.sampler = TRUE)
sampler <- ebnm.res$post_sampler
}
return(sampler)
}
|
98c5f9c112ed2d81748bcc385ba440cd6c4b07fa
|
f89eab046bf6363a5c30e0e9a13b15c0c2255a71
|
/RLM_2016_Indicator_Species_Analysis_2018.R
|
e111e6a7d4fe2dec73ed22d45bf262d7cb45f517
|
[] |
no_license
|
mallorymrice/Poc_2016
|
c2e6e8c61afc1bcd25e1315e3dd80aec75b13504
|
8a911dbfe3b9cc7cdae15d5dd45e3c9b678bde05
|
refs/heads/master
| 2020-04-23T05:27:40.821250
| 2019-08-15T20:30:08
| 2019-08-15T20:30:08
| 170,940,827
| 1
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,313
|
r
|
RLM_2016_Indicator_Species_Analysis_2018.R
|
#####################################################
## Indicator Species Analysis for POC_2016 ##
## November 20, 2018
## By Rebecca Maher
####################################################
rm(list=ls())
library("phyloseq")
library("data.table")
library("plyr")
library("dplyr")
library("ggplot2")
library("reshape2")
library("indicspecies")
library("ggnetwork")
library("ape")
# load the rarefied OTU table with mapping file with physiological data
qd <- load("~/data/RLM_2016_phyloseq_object.RData")
# Subset phyloseq object to only include samples with a healing rate.
qd <- prune_samples(sample_data(qd)$healing !="NA", qd)
# Make a binary table
scarred <- as.data.frame(t(otu_table(qd)))
scar <- as.data.frame(ifelse(scarred>0,1,0))
mean(rowSums(scar))
# With the full table
binary <- as.data.frame(t(otu_table(qd)))
binary <- as.data.frame(ifelse(binary>0,1,0))
# Making the named vector for the cluster option
v = sample_data(qd)$nutrient
names(v) = rownames(sample_data(qd))
# Run indicator species analysis
vals <- multipatt(scar, cluster = v, func = "r.g", control = how(nperm =999))
summary(vals, indvalcomp = TRUE)
vals1 <- signassoc(scar, U = NULL, cluster = v, mode = 1, control = how(nperm = 999))
isa <- read.csv(file = "/Users/Becca/Documents/isa.csv")
####################################################################################
# making figure dot plot
qd = transform_sample_counts(qd, function(x) x/sum(x)) # turn into relative abundances
### Reextract your relative abundance data as community structure and taxonomic annotation
comm = as.data.frame(as(object = phyloseq::otu_table(qd), Class = "matrix"))
tax = as.data.frame(as(object = phyloseq::tax_table(qd), Class = "matrix"))
meta = as.data.frame(as(object = phyloseq::sample_data(qd), Class = "matrix"))
data = cbind(tax,comm)
### Melt your data
data$otuid <- rownames(data)
data_melt = melt(data[,c(8:36)], id = c("otuid")) # Melt data by Family and all samples
tax_melt = data[,c(3:5,36)]
tax_melt$otuid <- rownames(tax_melt)
new_data_melt <- merge(data_melt, tax_melt)
meta_t <- meta[,c(1,3)]
colnames(meta_t)[1] <- "variable"
temp <- merge(new_data_melt, meta_t)
isa <- c("321405","552866","1116027","267843","New.CleanUp.ReferenceOTU235","New.CleanUp.ReferenceOTU252",
"916277","509548","New.CleanUp.ReferenceOTU19","706432","256116")
temp <- temp[temp$otuid %in% isa,]
temp$Family <- gsub("f__", "", temp$Family)
temp[,6][is.na(temp[,6])] = "Class: Alphaproteobacteria"
temp$otuid <- as.factor(temp$otuid)
levels(temp$otuid)
temp$otuid <- factor(temp$otuid, levels = c("321405","552866","1116027","267843","New.CleanUp.ReferenceOTU235","New.CleanUp.ReferenceOTU252",
"916277","509548","New.CleanUp.ReferenceOTU19","706432","256116"))
# plot
colorblind_pallette = c("#999999", "#E69F00", "#56B4E9", "#ffffff", "#009E73", "#660066", "#FFFF00", "#0072B2", "#CC3300", "#CC79A7","#000000")
p <- ggplot(temp,aes(variable,otuid)) +
geom_point(aes(size=value, fill = Family), shape = 21) +
facet_grid(. ~ nutrient,scales = "free", space = "free", switch = "y") +
theme_facet() +
scale_fill_manual(values = colorblind_pallette) +
guides(fill = guide_legend(override.aes = list(size=4))) +
labs(fill = "Family", size = "Relative abundance")
p
|
dce834b0a4546ac5ef015eb8aae4066fe4b96250
|
0c3ad13ceb982ddff3c51818ce8e5d8807de9313
|
/R/day01/r5.R
|
40adbd0375d8cfdc2ed1a2e6bc4a4318234ddd61
|
[] |
no_license
|
HanHyunwoo/TIL
|
befc9ecae77d227832179b49a37248780558697e
|
cd3c49270ca20f3cb9f841b929490d2e9193bafd
|
refs/heads/master
| 2021-05-25T12:06:30.157595
| 2018-12-09T13:00:46
| 2018-12-09T13:00:46
| 127,377,631
| 5
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,598
|
r
|
r5.R
|
c1 <- c(1:5);
print(cs)
c2 <- c(6:10);
print(cs)
cs <- union(c1,c2)
print(cs)
cs <- setequal(c1,c2)
print(cs)
5 %in% cs
5-cs
result <- c(1:3) %in% cs
print(length(result))
c3 <- seq(1,1000,5)
m1 <- matrix(c(1, 2, 3, 4, 5, 6, 7, 8, 9), nrow=3)
m2 <- matrix(c(1, 2, 3, 4, 5, 6, 7, 8, 9), ncol=3, byrow =TRUE)
colnames(m1) <- c('c1','c2','c3')
rownames(m1) <= c('r1','r2','r3')
m1[1,2]
m1[1,] #1행을 가져와라
m1[-2,] #2행을 빼고 모두 가져와라
m1[c(1,3),]
m1[c(1:3),] #1~3행을 가져와라
m1[c(1,2),3] #1행,2행에 3열만 가져와라
m1[c(1,2),c(2,3)] # 1행,2행에서 2열,3열만 가져와라
m1('r2',) #갑자기 왜 안되지? #2차원은 반드시 행,열 입력해야 함 m1('r2'): x
m1[c('r2','r3'),'c3']
m1 *5
m1 *m1
nrow(m1) #row숫자가 몇인지 알수 있는거
ncol(m1) #col숫자가 몇인지 알수 있는거
ccl <- m1[,1] #ccl :벡터, 1차원 배열로 나오면 벡터로 된다.
m1[,1]
m5 <- matrix(c(80,90,70,100,80,99,78,72,90,78,82,78,99,89,78,90), ncol=4, byrow =TRUE) #byrow안하면 열기준이 됨
colnames(m5) <- c('ko','en','si','ma')
rownames(m5) <- c('kim','lee','hong','jang')
d <- dim(m5) #객체의 차원 수를 구한다.
students <- rowMeans(m5)
subjects <- colMeans(m5)
avg <- ncol(m5)
m1[c(1:4),] #1~3행을 가져와라
result1 <- m5(mean(c(1:4),))
result1 <- mean(m5[1,])
result2 <- mean(m5[2,])
result3 <- mean(m5[3,])
result4 <- mean(m5[4,])
result5 <- mean(m5[,1])
result6 <- mean(m5[,2])
result7 <- mean(m5[,3])
result8 <- mean(m5[,4])
apply(m5, 1, mean)
apply(m5, 2, mean)
``
m5[,mean(c(1:4))]
|
e7668526938d32fdfad37679a0a39858f57f111a
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/umx/examples/umx_rename.Rd.R
|
2c24ba08b93004616352b97b7e0874207124863b
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 578
|
r
|
umx_rename.Rd.R
|
library(umx)
### Name: umx_rename
### Title: umx_rename
### Aliases: umx_rename
### ** Examples
# Re-name "cyl" to "cylinder"
x = mtcars
x = umx_rename(x, replace = c(cyl = "cylinder"))
# alternate style
x = umx_rename(x, old = c("disp"), replace = c("displacement"))
umx_check_names("displacement", data = x, die = TRUE)
# This will warn that "disp" does not exist (anymore)
x = umx_rename(x, old = c("disp"), replace = c("displacement"))
x = umx_rename(x, grep = "lacement", replace = "") # using grep to revert to disp
umx_names(x, "^d") # all names begining with a d
|
c723a944840cd2426237db5461442031ff257708
|
661bc0fec7cf236dae9a7c0d1c90661e33f8f996
|
/extractDASSmetrics.R
|
9e2f228f51a5de6311e18a59d11f3270a8cebcb4
|
[] |
no_license
|
kinleyid/LimeSurveysDataExtraction
|
1048b541529d55916186d1c3af4efa59195bf306
|
b0574312402cb5608ec605f97edff29322e59c5b
|
refs/heads/master
| 2020-04-28T23:33:31.675912
| 2019-03-14T18:38:28
| 2019-03-14T18:38:28
| 175,659,168
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,400
|
r
|
extractDASSmetrics.R
|
extract.DASS.metrics <- function(full.path) {
# Extracts the following metrics:
# Depression score
# Anxiety score
# Stress score
data <- read.table(full.path,
header = T,
sep = ',',
quote = '"')
depression.questions <- c(3, 5, 10, 13, 16, 17, 21, 24, 26, 31, 34, 37, 38, 42)
anxiety.questions <- c(2, 4, 7, 9, 15, 19, 20, 23, 25, 28, 30, 36, 40, 41)
stress.questions <- c(1, 6, 8, 11, 12, 14, 18, 22, 27, 29, 32, 33, 35, 39)
all.sets <- list(depression.questions,
anxiety.questions,
stress.questions)
set.names <- c('depression',
'anxiety',
'stress')
output.data <- data.frame(matrix(nrow = nrow(data), ncol = 3))
colnames(output.data) <- set.names
for (curr.set.idx in 1:length(all.sets)) {
curr.sums <- array(0, dim = nrow(data))
curr.totals <- array(0, dim = nrow(data))
for (q.n in all.sets[[curr.set.idx]]) {
curr.resps <- data[, grep(sprintf('SQ%03d', q.n), names(data), value=T)]
curr.totals[!is.na(curr.resps)] <- curr.totals[!is.na(curr.resps)] + 3
curr.resps[is.na(curr.resps)] <- 0
curr.sums <- curr.sums + curr.resps
}
# curr.sums <- curr.sums / curr.totals # Normalize by missing data
output.data[, set.names[curr.set.idx]] <- curr.sums
}
return(output.data)
}
|
ef5a741842d0e1b9b73f82f0ea7158c238b73fa6
|
a5de76ef9638938b93ac6046bdf5862e0435a19c
|
/home/www/app.R
|
0d67175c9fb75ca1a9c41c247ca90efde211977a
|
[] |
no_license
|
bolivarez9193/Gene-Web-App
|
33f7168ae509be57cbfcf4357b07bc23b8c7a1e4
|
1ee3416357f91d518afb059d7827c062ac5630fc
|
refs/heads/master
| 2021-01-17T09:51:29.818651
| 2016-05-14T03:21:50
| 2016-05-14T03:21:50
| 55,119,657
| 0
| 1
| null | 2016-04-13T05:17:13
| 2016-03-31T04:13:35
|
R
|
UTF-8
|
R
| false
| false
| 68
|
r
|
app.R
|
#setwd("C:/Users/bolivarez9193/Documents/GUI_Code")
#runApp('/main')
|
89162f2c57d46c4e677b44e9dce5a7d48d58c196
|
deb8293b706ba213c330d43ee2e547227c5365a2
|
/BSCOV/man/BSCOV.Rd
|
f596b85e588c4a2af2b4ebac1dfbe868bf37e42c
|
[] |
no_license
|
markov10000/BSCOV
|
c6c9acea1bb091e16435159c781548fa0c5d8ddb
|
87cc5e2c914b0b3528274a9585a2aa1ac9dfae00
|
refs/heads/master
| 2023-08-23T17:54:23.949831
| 2021-10-23T22:14:09
| 2021-10-23T22:14:09
| 277,444,752
| 5
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 3,748
|
rd
|
BSCOV.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/BSCOV.R
\name{BSCOV}
\alias{BSCOV}
\title{Using Wild Sparsified Binary Segmentation to find chang points in the second order structure in both factors and idiosyncratic errors.}
\usage{
BSCOV(
x,
normalisation = FALSE,
r = NULL,
bn.op = 2,
sig.lev = 1,
dw = NULL,
rule = NULL,
SN.op = 0,
refact.op = 0,
common.norm = 2,
idio.norm = 2,
common.norm.thr = 0,
idio.norm.thr = Inf,
norm.op = FALSE,
SSIC.slack = c(1/2, 1/2),
max.q = NULL,
WBS = 1,
SBS = 1,
M = 500,
input.op = 1,
idio.diag = FALSE,
do.parallel = TRUE,
no.proc = 2,
setseed = 0
)
}
\arguments{
\item{x}{input time series matrix, with each row representing a time series}
\item{normalisation}{if \eqn{normalisation=TRUE}, normalized the input data}
\item{r}{the number of factors, if \eqn{r=NULL}, screening over a range of factor number candidates is performed as described in the paper}
\item{bn.op}{an index number for the information criterion-based estimator of Bai and Ng (2002) for the number of factors}
\item{sig.lev}{not used now}
\item{dw}{trims off the interval of consideration in the binary segmentation algorithm and determines the minimum length of a stationary segment; if dw=NULL, a default value is chosen as described in the Appendix of Barigozzi, Cho & Fryzlewicz (2016)}
\item{rule}{the depth of a binary tree for change-point analysis, see the Appendix of Barigozzi, Cho & Fryzlewicz (2016)}
\item{SN.op}{if \eqn{SN.op==1} Self Normalized; \eqn{SN.op==2} mean of absolute; \eqn{SN.op==3} \eqn{mad(abs(diff(x)-mad(diff(x))))} ...}
\item{refact.op}{if \eqn{refact.op=TRUE}, redo the PCA on each segment after the breaks in the factors are found}
\item{common.norm}{norm used in the CUSUM statistics aggregation of factors}
\item{idio.norm}{norm used in the CUSUM statistics aggregation of idiosyncratic errors}
\item{common.norm.thr}{threshold used in the CUSUM statistics of factors}
\item{idio.norm.thr}{threshold used in the CUSUM statistics of idiosyncratic errors}
\item{norm.op}{if \eqn{idio.diag=TRUE} the threshold is multiplied by \eqn{((e-s+1)/T)^0.5}}
\item{SSIC.slack}{positive constant used in the SSIC}
\item{max.q}{the maximum number of factors, if \eqn{max.q=NULL}, a default value is chosen as described in the paper}
\item{WBS}{Wild Binary Segmentation}
\item{SBS}{Sparsified Binary Segmentation}
\item{M}{the number of random intervals}
\item{input.op}{options of data tramsformation. if \eqn{input.op=1}, second moments, if \eqn{input.op=2}, square of difference and square of sum, if \eqn{input.op=3}, wavelet transformation}
\item{idio.diag}{if \eqn{idio.diag=TRUE}, only the diagonal wavelet-transform is employed in order to generate the panel of statistics from the idiosyncratic components}
\item{do.parallel}{if \eqn{do.parallel=TRUE}, a set of copies of R running in parallel are created and used for bootstrap procedure}
\item{no.proc}{sets the number of processes to be spawned when do.parallel=TRUE}
\item{setseed}{sets the ramdon seed}
}
\value{
change points
\itemize{
\item{"gfm"}{information of factor model}
\item{"para"}{Parameters used in the algotrithm}
\item{"hat.vep"}{estimate of idiosyncratic components}
\item{"common.seg.res"}{information of common component breaks}
\item{"idio.seg.res"}{information of idiosyncratic component breaks}
\item{"common.est.cps"}{location of common component breaks}
\item{"idio.est.cps"}{location idiosyncratic component breaks}
}
}
\description{
Using Wild Sparsified Binary Segmentation to find chang points in the second order structure in both factors and idiosyncratic errors.
}
\author{
Yu-Ning Li \email{yl3409@york.ac.uk}
}
|
8e49fd84c0a0cdc3ba51fece7422f44d98517ce9
|
65c14e4ae3f3f2a31979b6b1b4925e09a3d4a9c3
|
/code/explore.R
|
fd978edd59b7fc798e369fa56b4ee08d8e883692
|
[] |
no_license
|
michaellevy/obesity-networks
|
157c97b03248e72a3ff1e1dd152e740b65181eac
|
88bf228cd31559ac0569c8b6d9829da1f9dd0ca4
|
refs/heads/master
| 2021-01-09T06:50:10.875760
| 2016-12-29T22:08:33
| 2016-12-29T22:08:33
| 67,002,138
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 39,400
|
r
|
explore.R
|
library(dplyr)
library(statnet)
library(ggplot2)
library(stargazer)
el = read.csv("data/Edgelist_8.8.csv")
att = read.csv("data/NodeAttributes2012_8.8.csv")
set.seed(510)
# statnet's edgelist import functions are awful. Let's use igraph's instead,
# and immediately convert to statnet with the intergraph package.
# We read the vertex attributes in here too.
n = intergraph::asNetwork(igraph::graph.data.frame(el, vertices = att))
# Let's try to detect communities algorithmically so we don't have to do it subjectively.
communities = igraph::cluster_edge_betweenness(intergraph::asIgraph(n))
# Get the community of each vertex and assign as vertex attribute:
n %v% "ebCommunity" = as.integer(igraph::membership(communities))
vCols = RColorBrewer::brewer.pal(max(n %v% "ebCommunity"), "Accent") # Nice colors for plot
vCols = vCols[n %v% "ebCommunity"]
png("results/netPlot.png", height = 800, width = 800)
plot(n
, displaylabels = TRUE
, vertex.col = vCols
# , vertex.cex = log(degree(n, cmode = "indegree") + 1) + .5
, vertex.cex = ((n %v% "bmi.percentile")^-1 * 25)^-1
, label.cex = 1.2
, label.pos = 6
, vertex.sides = c(4, 50)[as.factor(n %v% "Ethnicity")]
, main = "Nodes are colored by edge-betweenness community membership.\nShape reflects ethnicity and size BMI percentile"
)
dev.off()
# Looking at this, I think perhaps we want differential homophily in communities 1 and 2 and none elsewhere.
# Implement this in model 18.
# Or, make everyone not in 1 or 2 in a 3rd community and have uniform homophily across all three.
# Model comparison (see 17.8 vs 18.1) favors the former.
# While I've got my head around it:
## nodematch("comm12", diff = FALSE) makes three communities, all share a homophily effect
## nodematch("ebCommunity", diff = FALSE, keep = 1:2) makes three communities, the first two share a homophily effect
n %v% "comm12" = n %v% "ebCommunity"
set.vertex.attribute(n, "comm12", 3, which(n %v% "ebCommunity" > 2)) # Combine communities 3-7
# plot(n, vertex.col = "comm12", vertex.cex = 3) # Make sure that worked right
# Set unobserved ties (from non-participants) to NA.
# Unfortunately, clustering algorithms don't handle missingness, so they have to be done with these assumed to be 0s
n[n %v% "net.data" == 0, ] = NA
# Examine distribution of bmi%ile and weight statuses
ggplot(att, aes(x = bmi.percentile)) +
geom_density(adjust = .2, fill = "gray") +
geom_rug(sides = "b", aes(color = weight.status.3),
data = data.frame(bmi.percentile = att$bmi.percentile + rnorm(nrow(att), 0, .5),
weight.status.3 = att$weight.status.3)) +
theme(legend.position = c(0, 1), legend.justification = c(0, 1))
ggsave("results/BMI-distribution.png", width = 5, height = 4)
# Looks like the two-class division is reasonable. Overweight/obese divides similar girls.
# Let's try to fit some ERGMs
m1 = ergm(n ~ edges + mutual)
m2 = ergm(n ~ edges + mutual + gwidegree(.5, fixed = TRUE))
m2.5 = ergm(n ~ edges + mutual + gwodegree(.5, fixed = TRUE))
m3 = ergm(n ~ edges + mutual + gwidegree(.5, fixed = TRUE) + gwodegree(.5, fixed = TRUE))
m4 = ergm(n ~ edges + mutual +
gwidegree(.5, fixed = TRUE) + gwodegree(.5, fixed = TRUE) +
nodeicov("bmi.percentile") + nodeocov("bmi.percentile"))
m4.5 = ergm(n ~ edges + mutual +
nodeicov("bmi.percentile") + nodeocov("bmi.percentile"))
stargazer(m1, m2, m2.5, m3, m4, m4.5, type = "text")
# Strong evidence for reciprocity and a popularity effect.
# gwod controls for differential nominating, not clear whether it matters
# May be a positive relationship between bmi%ile and popularity, but it's collinear with a straight popularity effect (gwid)
# No differential nomination tendency with bmi%ile
# Let's add homophily by bmi%ile
m5 = ergm(n ~ edges + mutual +
gwidegree(.5, fixed = TRUE) + gwodegree(.5, fixed = TRUE) +
nodeicov("bmi.percentile") + nodeocov("bmi.percentile") +
absdiff("bmi.percentile"))
# Sampler didn't mix.
m6 = ergm(n ~ edges + mutual +
nodeicov("bmi.percentile") + nodeocov("bmi.percentile") +
absdiff("bmi.percentile"))
m6.5 = ergm(n ~ edges + mutual + gwidegree(.5, fixed = TRUE) +
nodeicov("bmi.percentile") + nodeocov("bmi.percentile") +
absdiff("bmi.percentile"))
stargazer(m6, m6.5, type = "text")
m7 = ergm(n ~ edges + mutual + gwidegree(.5, fixed = TRUE) + gwesp(.25, fixed = TRUE) +
nodeicov("bmi.percentile") + nodeocov("bmi.percentile") +
absdiff("bmi.percentile"))
# Unconverged, singular Hessian
# Try categorical weight status instead of bmi%ile
m8 = ergm(n ~ edges + mutual + gwidegree(.5, fixed = TRUE) +
nodeifactor("weight.status.2") + nodeofactor("weight.status.2"))
# Add uniform homophily
m9 = ergm(n ~ edges + mutual + gwidegree(.5, fixed = TRUE) +
nodeifactor("weight.status.2") + nodeofactor("weight.status.2") +
nodematch("weight.status.2", diff = FALSE))
# And differential homophily
m10 = ergm(n ~ edges + mutual + gwidegree(.5, fixed = TRUE) +
nodeifactor("weight.status.2") + nodeofactor("weight.status.2") +
nodematch("weight.status.2", diff = TRUE))
# mcmc.diagnostics(m10) # Poor convergence. Overspecified, I think.
# How about combining the node-in and node-out factors?
m10.1 = ergm(n ~ edges + mutual + gwidegree(.5, fixed = TRUE) +
nodefactor("weight.status.2") +
nodematch("weight.status.2", diff = TRUE))
# Poor convergence
# mcmc.diagnostics(m10.1) # Not sure why this is performing poorly. Must be overspecd
gof10.1 = gof(m10.1)
par(mfrow = c(2, 2))
plot(gof10.1) # Not getting transitivity.
summary(m10.1)
m11 = ergm(n ~ edges + mutual + gwidegree(.5, fixed = TRUE) +
gwesp(.1, fixed = TRUE) +
nodefactor("weight.status.2") +
nodematch("weight.status.2", diff = TRUE))
# MCMC not mixing. Maybe the nodefactor and gwi popularity effects are colinear. Try ditching gwid
m12 = ergm(n ~ edges + mutual + gwesp(.1, fixed = TRUE) +
nodeifactor("weight.status.2") + nodeofactor("weight.status.2") +
nodematch("weight.status.2", diff = TRUE),
control = control.ergm(MCMC.samplesize = 1e4))
# Really is computationally singular. Don't see colinearity in params though.
summary(m12)
m12$covar
plot(gof(m12))
# Maybe with three weight categories?
m13 = ergm(n ~ edges + mutual + gwidegree(.5, fixed = TRUE) +
nodefactor("weight.status.3") +
nodematch("weight.status.3", diff = FALSE))
summary(m13)
m14 = ergm(n ~ edges + mutual + gwidegree(.5, fixed = TRUE) +
gwesp(.2, fixed = TRUE) +
nodefactor("weight.status.3") +
nodematch("weight.status.3", diff = FALSE))
# Nope
m13.1 = ergm(n ~ edges + mutual + gwidegree(.5, fixed = TRUE) +
nodefactor("weight.status.3") +
nodematch("weight.status.3", diff = TRUE))
# Aha! Can get differential homophily, just not with GWESP.
gof13.1 = gof(m13.1)
plot(gof13.1) # Really missing those ESPs though.
m14.1 = ergm(n ~ edges + mutual + gwidegree(.5, fixed = TRUE) +
gwesp(.2, fixed = TRUE) +
nodefactor("weight.status.3") +
nodematch("weight.status.3", diff = TRUE))
# Of course, nope.
m15 = ergm(n ~ edges + mutual + gwidegree(.5, fixed = TRUE) +
gwesp(0, fixed = TRUE) +
nodefactor("weight.status.3") +
nodematch("weight.status.3", diff = TRUE))
# nope
m16 = ergm(n ~ edges + mutual + gwidegree(.5, fixed = TRUE) +
nodematch("comm12", diff = TRUE) +
nodefactor("weight.status.2") +
nodematch("weight.status.2", diff = TRUE))
# nope
m16.1 = ergm(n ~ edges + mutual +
nodematch("comm12", diff = TRUE) +
nodefactor("weight.status.2") +
nodematch("weight.status.2", diff = TRUE))
# nope
m17 = ergm(n ~ edges + mutual + gwidegree(.5, fixed = TRUE) +
gwesp(.2, fixed = TRUE) +
nodematch("comm12", diff = TRUE) +
nodefactor("weight.status.2") +
nodematch("weight.status.2", diff = TRUE))
# nope
gof17 = gof(m17)
par(mfrow = c(2,2))
plot(gof17)
mcmc.diagnostics(m17)
m17.1 = ergm(n ~ edges + mutual + gwidegree(.5, fixed = TRUE) +
gwesp(.2, fixed = TRUE) +
nodematch("comm12", diff = TRUE) +
nodefactor("weight.status.2") +
nodematch("weight.status.2", diff = FALSE))
m17.2 = ergm(n ~ edges + mutual + gwidegree(.5, fixed = TRUE) +
gwesp(.2, fixed = TRUE) +
nodematch("comm12", diff = FALSE) +
nodefactor("weight.status.2") +
nodematch("weight.status.2", diff = TRUE))
# nope
m17.3 = ergm(n ~ edges + mutual + gwidegree(.5, fixed = TRUE) +
gwesp(.2, fixed = TRUE) +
nodematch("comm12", diff = FALSE) +
nodematch("weight.status.2", diff = TRUE))
summary(m17.3)
gof17.3 = gof(m17.3)
plot(gof17.3)
m17.4 = ergm(n ~ edges + mutual + gwidegree(.5, fixed = TRUE) +
gwesp(.2, fixed = TRUE) +
nodematch("comm12", diff = TRUE) +
nodematch("weight.status.2", diff = TRUE))
# nope. weird: no warning, but nope.
m17.5 = ergm(n ~ edges + mutual +
gwidegree(.5, fixed = TRUE) +
gwodegree(.5, fixed = TRUE) +
gwesp(.2, fixed = TRUE) +
nodematch("comm12", diff = FALSE) +
nodematch("weight.status.2", diff = TRUE))
summary(m17.5)
gof17.5 = gof(m17.5)
plot(gof17.5)
m17.6 = ergm(n ~ edges + mutual + isolates +
gwidegree(.5, fixed = TRUE) +
gwodegree(.5, fixed = TRUE) +
gwesp(.2, fixed = TRUE) +
nodematch("comm12", diff = FALSE) +
nodematch("weight.status.2", diff = TRUE))
summary(m17.6)
gof17.6 = gof(m17.6)
plot(gof17.6)
m17.7 = ergm(n ~ edges + mutual + isolates +
gwidegree(.5, fixed = TRUE) +
gwodegree(.5, fixed = TRUE) +
gwesp(.5, fixed = TRUE) +
nodematch("comm12", diff = TRUE) +
nodematch("weight.status.2", diff = TRUE))
summary(m17.7)
# nope
summary(n ~ edges + nodefactor("Ethnicity") + nodematch("Ethnicity", TRUE))
table(n %v% "Ethnicity", n %v% "comm12")
m17.8 = ergm(n ~ edges + mutual + isolates +
gwidegree(.5, fixed = TRUE) +
gwodegree(.5, fixed = TRUE) +
gwesp(.2, fixed = TRUE) +
nodematch("Ethnicity", diff = FALSE) +
nodematch("comm12", diff = FALSE) +
nodematch("weight.status.2", diff = TRUE))
summary(m17.8)
gof17.8 = gof(m17.8)
plot(gof17.8)
# load("models/ergmsGOFsetc.RData") # Everything to this point
things = structure(lapply(ls(), get), names = ls())
models = things[sapply(things, class) == "ergm"]
# Keep just the models that converged:
models = models[sapply(models, function(m) !all(is.na(m[["est.cov"]])))]
# Order by model-name number
models = models[order(as.numeric(sapply(names(models), function(x) substr(x, 2, nchar(x)))))]
stargazer(models, type = "text", column.labels = names(models))
# m17.8 minimizes A/BIC. Good! Has differential homophily by weight status, but no node-factors
summary(m17.8)
m18 = ergm(n ~ edges + mutual + isolates +
gwidegree(.5, fixed = TRUE) +
gwodegree(.5, fixed = TRUE) +
gwesp(.2, fixed = TRUE) +
nodematch("Ethnicity", diff = FALSE) +
nodematch("ebCommunity", diff = TRUE, keep = 1:2) +
nodematch("weight.status.2", diff = TRUE))
# nope
m18.1 = ergm(n ~ edges + mutual + isolates +
gwidegree(.5, fixed = TRUE) +
gwodegree(.5, fixed = TRUE) +
gwesp(.2, fixed = TRUE) +
nodematch("Ethnicity", diff = FALSE) +
nodematch("ebCommunity", diff = FALSE, keep = 1:2) +
nodematch("weight.status.2", diff = TRUE))
summary(m18.1)
stargazer(m17.8, m18.1, type = "text")
# xIC slightly prefers not having homophily in the "third community"
# Want:
## bump gwesp alpha to get 2-esps
## add nodefactor for weight status
## same for ethnicity
## Keep isolates?
## Need endo communities?
## Add physical activity
m19 = ergm(n ~ edges + mutual + isolates +
gwidegree(.5, fixed = TRUE) +
gwodegree(.5, fixed = TRUE) +
gwesp(.5, fixed = TRUE) +
nodematch("Ethnicity", diff = FALSE) +
nodematch("comm12", diff = FALSE) +
nodematch("weight.status.2", diff = TRUE))
gof19 = gof(m19)
par(mfrow = c(2, 2))
plot(gof19) # Really helps with the 1 vs 2 ESPs. Gets the upper distribution too. Nice.
stargazer(m17.8, m18.1, m19, type = "text")
par(mfcol = c(2, 4))
plot(gof17.8)
plot(gof19)
# Hmm, bigger gwesp-alpha helps with ESPs but seems to hurt with degree dists. And xIC favors smaller.
m20 = ergm(n ~ edges + mutual +
gwidegree(.5, fixed = TRUE) +
gwodegree(.5, fixed = TRUE) +
gwesp(.5, fixed = TRUE) +
nodematch("Ethnicity", diff = FALSE) +
nodematch("comm12", diff = FALSE) +
nodematch("weight.status.2", diff = TRUE))
stargazer(m17.8, m19, m20, type = "text")
m17.9 = ergm(n ~ edges + mutual +
gwidegree(.5, fixed = TRUE) +
gwodegree(.5, fixed = TRUE) +
gwesp(.2, fixed = TRUE) +
nodematch("Ethnicity", diff = FALSE) +
nodematch("comm12", diff = FALSE) +
nodematch("weight.status.2", diff = TRUE))
m21 = ergm(n ~ edges + mutual +
gwidegree(.5, fixed = TRUE) +
gwodegree(.5, fixed = TRUE) +
gwesp(.5, fixed = TRUE) +
nodematch("Ethnicity", diff = FALSE) +
nodematch("ebCommunity", diff = FALSE, keep = 1:2) +
nodematch("weight.status.2", diff = TRUE))
m22 = ergm(n ~ edges + mutual +
gwidegree(.5, fixed = TRUE) +
gwodegree(.5, fixed = TRUE) +
gwesp(.5, fixed = TRUE) +
nodematch("Ethnicity", diff = FALSE) +
nodematch("ebCommunity", diff = TRUE, keep = 1:2) +
nodematch("weight.status.2", diff = TRUE))
# nope
m23 = ergm(n ~ edges + mutual +
gwidegree(.5, fixed = TRUE) +
gwodegree(.5, fixed = TRUE) +
gwesp(.2, fixed = TRUE) +
nodematch("Ethnicity", diff = FALSE) +
nodematch("comm12", diff = TRUE) +
nodematch("weight.status.2", diff = TRUE))
# nope
stargazer(m17.8, m18.1, m19, m20, m21, type = "text")
# Okay, those are small xIC differences. I like nodematch("ebCommunity", diff = FALSE, keep = 1:2):
# Found two major clusters, let there be one homophily force acting within but not outside them.
# How about GWESP alpha and isoaltes for that:
m24 = ergm(n ~ edges + mutual + isolates +
gwidegree(.5, fixed = TRUE) +
gwodegree(.5, fixed = TRUE) +
gwesp(.5, fixed = TRUE) +
nodematch("Ethnicity", diff = FALSE) +
nodematch("ebCommunity", diff = FALSE, keep = 1:2) +
nodematch("weight.status.2", diff = TRUE))
stargazer(m18.1, m21, m24, type = "text")
par(mfrow = c(2, 2))
gof21 = gof(m21)
plot(gof21)
m25 = ergm(n ~ edges + mutual + isolates +
gwidegree(1, fixed = TRUE) +
gwodegree(.5, fixed = TRUE) +
gwesp(.5, fixed = TRUE) +
nodematch("Ethnicity", diff = FALSE) +
nodematch("ebCommunity", diff = FALSE, keep = 1:2) +
nodematch("weight.status.2", diff = TRUE))
gof25 = gof(m25)
plot(gof25) # Increasing gwd-decay and adding isolates doesn't help degdist or xIC
stargazer(m21, m25, type = "text")
m26 = ergm(n ~ edges + mutual +
gwidegree(.5, fixed = TRUE) +
gwodegree(.5, fixed = TRUE) +
gwesp(.5, fixed = TRUE) +
nodefactor("weight.status.2") +
nodematch("Ethnicity", diff = FALSE) +
nodematch("ebCommunity", diff = FALSE, keep = 1:2) +
nodematch("weight.status.2", diff = TRUE))
# nope. computationally singular. weight status main effect seems to be redundent
m27 = ergm(n ~ edges + mutual +
gwidegree(.5, fixed = TRUE) +
gwodegree(.5, fixed = TRUE) +
gwesp(.5, fixed = TRUE) +
nodecov("bmi.percentile") +
nodematch("Ethnicity", diff = FALSE) +
nodematch("ebCommunity", diff = FALSE, keep = 1:2) +
nodematch("weight.status.2", diff = TRUE))
stargazer(m21, m27, type = "text")
# So besides the overspecification problem above, there doesn't seem to be any
# main effect of weight status (here as bmi%ile) and including it doesn't improve
# the model per xIC.
m28 = ergm(n ~ edges + mutual +
gwidegree(.5, fixed = TRUE) +
gwodegree(.5, fixed = TRUE) +
gwesp(.5, fixed = TRUE) +
nodefactor("Ethnicity") +
nodematch("Ethnicity", diff = FALSE) +
nodematch("ebCommunity", diff = FALSE, keep = 1:2) +
nodematch("weight.status.2", diff = TRUE))
stargazer(m21, m28, type = "text")
# Maybe Latina girls form more friends, but it's not strong and xIC slightly prefers without it
m29 = ergm(n ~ edges + mutual +
gwidegree(.5, fixed = TRUE) +
gwodegree(.5, fixed = TRUE) +
gwesp(.5, fixed = TRUE) +
nodefactor("Ethnicity") +
nodematch("Ethnicity", diff = FALSE) +
# nodematch("ebCommunity", diff = FALSE, keep = 1:2) +
nodematch("weight.status.2", diff = TRUE))
stargazer(m21, m28, m29, type = "text")
# Ha! xiC likes having endo-defined communities, for obvious reasons, and ethnicity doesn't replace them.
# m21 still looks the best, though a few others are in the same neighborhood.
# Oh, what about age homophily?
m21.1 = ergm(n ~ edges + mutual +
gwidegree(.5, fixed = TRUE) +
gwodegree(.5, fixed = TRUE) +
gwesp(.5, fixed = TRUE) +
nodematch("Ethnicity", diff = FALSE) +
nodematch("ebCommunity", diff = FALSE, keep = 1:2) +
nodematch("weight.status.2", diff = TRUE) +
absdiff("T1Age"))
stargazer(m21, m21.1, type = 'text') # Yeah, age homophily is important. Good.
# To what extent are the communities age separation?
ggplot(data.frame(com = n %v% "ebCommunity",
age = n %v% "T1Age") %>%
filter(com < 3),
aes(x = as.factor(com), y = age)) +
geom_boxplot(fill = "gray") +
xlab("Edge-betweenness commmunity membership")
ggsave("results/communities-vs-age.png", height = 4, width = 5)
# To a large extent. Does xIC prefer just age homophily with endo-communities?
# No: It likes having both. There is overlap of age in the communities and they have separate effects.
m21.2 = ergm(n ~ edges + mutual +
gwidegree(.5, fixed = TRUE) +
gwodegree(.5, fixed = TRUE) +
gwesp(.5, fixed = TRUE) +
nodematch("Ethnicity", diff = FALSE) +
# nodematch("ebCommunity", diff = FALSE, keep = 1:2) +
nodematch("weight.status.2", diff = TRUE) +
absdiff("T1Age"))
# What about a main effect of age?
m21.3 = ergm(n ~ edges + mutual +
gwidegree(.5, fixed = TRUE) +
gwodegree(.5, fixed = TRUE) +
gwesp(.5, fixed = TRUE) +
nodematch("Ethnicity", diff = FALSE) +
# nodematch("ebCommunity", diff = FALSE, keep = 1:2) +
nodematch("weight.status.2", diff = TRUE) +
nodecov("T1Age") +
absdiff("T1Age"))
m21.4 = ergm(n ~ edges + mutual +
gwidegree(.5, fixed = TRUE) +
gwodegree(.5, fixed = TRUE) +
gwesp(.5, fixed = TRUE) +
nodematch("Ethnicity", diff = FALSE) +
nodematch("ebCommunity", diff = FALSE, keep = 1:2) +
nodematch("weight.status.2", diff = TRUE) +
nodecov("T1Age") +
absdiff("T1Age"))
stargazer(m21, m21.1, m21.2, m21.3, m21.4, type = "text")
# m21.4 is winner. Main effect of age and age-homophily beyond the community structure.
# AHA! And there was masking: Revealed a weight-status homophily effect!
# Let's make sure we can't have a weight main effect in there too.
m21.5 = ergm(n ~ edges + mutual +
gwidegree(.5, fixed = TRUE) +
gwodegree(.5, fixed = TRUE) +
gwesp(.5, fixed = TRUE) +
nodematch("Ethnicity", diff = FALSE) +
nodematch("ebCommunity", diff = FALSE, keep = 1:2) +
nodefactor("weight.status.2") +
nodematch("weight.status.2", diff = TRUE) +
nodecov("T1Age") +
absdiff("T1Age"))
# nope, we can't.
# What about these camp activity teams?
m21.6 = ergm(n ~ edges + mutual +
gwidegree(.5, fixed = TRUE) +
gwodegree(.5, fixed = TRUE) +
gwesp(.5, fixed = TRUE) +
nodematch("Ethnicity", diff = FALSE) +
nodematch("ebCommunity", diff = FALSE, keep = 1:2) +
nodematch("weight.status.2", diff = TRUE) +
nodecov("T1Age") +
absdiff("T1Age") +
nodematch("team", diff = FALSE))
m21.7 = ergm(n ~ edges + mutual +
gwidegree(.5, fixed = TRUE) +
gwodegree(.5, fixed = TRUE) +
gwesp(.5, fixed = TRUE) +
nodematch("Ethnicity", diff = FALSE) +
# nodematch("ebCommunity", diff = FALSE, keep = 1:2) +
nodematch("weight.status.2", diff = TRUE) +
nodecov("T1Age") +
absdiff("T1Age") +
nodematch("team", diff = TRUE, keep = 2:4))
summary(n ~ nodematch("team", diff = TRUE)) # No ties in team=3 so exclude that
# from the differential team-homophily estimate. This is unfortunate beacuse we'd
# like to keep that negative effect but fixing it to -Inf is harsh and wrecks xIC.
# Anyway, overspecified with differential homophily for the other three teams.
# Tried it without endo communities; still over-specd. So, stick with one
# team-homophily effect (m21.6).
stargazer(m21.4, m21.7, m21.6, type = "text")
# Add exercise and dietary habits
summary(n %v% "TotalPAChange") # Wow -- that's some variance! Nice.
m30 = ergm(n ~ edges + mutual +
gwidegree(.5, fixed = TRUE) +
gwodegree(.5, fixed = TRUE) +
gwesp(.5, fixed = TRUE) +
nodematch("Ethnicity", diff = FALSE) +
nodematch("ebCommunity", diff = FALSE, keep = 1:2) +
nodematch("weight.status.2", diff = TRUE) +
nodecov("T1Age") +
absdiff("T1Age") +
nodematch("team", diff = FALSE) +
nodecov("TotalPAChange"))
m31 = ergm(n ~ edges + mutual +
gwidegree(.5, fixed = TRUE) +
gwodegree(.5, fixed = TRUE) +
gwesp(.5, fixed = TRUE) +
nodematch("Ethnicity", diff = FALSE) +
nodematch("ebCommunity", diff = FALSE, keep = 1:2) +
nodematch("weight.status.2", diff = TRUE) +
nodecov("T1Age") +
absdiff("T1Age") +
nodematch("team", diff = FALSE) +
nodecov("TotalPA2"))
m32 = ergm(n ~ edges + mutual +
gwidegree(.5, fixed = TRUE) +
gwodegree(.5, fixed = TRUE) +
gwesp(.5, fixed = TRUE) +
nodematch("Ethnicity", diff = FALSE) +
nodematch("ebCommunity", diff = FALSE, keep = 1:2) +
nodematch("weight.status.2", diff = TRUE) +
nodecov("T1Age") +
absdiff("T1Age") +
nodematch("team", diff = FALSE) +
nodecov("MVPA2"))
m33 = ergm(n ~ edges + mutual +
gwidegree(.5, fixed = TRUE) +
gwodegree(.5, fixed = TRUE) +
gwesp(.5, fixed = TRUE) +
nodematch("Ethnicity", diff = FALSE) +
nodematch("ebCommunity", diff = FALSE, keep = 1:2) +
nodematch("weight.status.2", diff = TRUE) +
nodecov("T1Age") +
absdiff("T1Age") +
nodematch("team", diff = FALSE) +
nodecov("TotalPA1"))
m34 = ergm(n ~ edges + mutual +
gwidegree(.5, fixed = TRUE) +
gwodegree(.5, fixed = TRUE) +
gwesp(.5, fixed = TRUE) +
nodematch("Ethnicity", diff = FALSE) +
nodematch("ebCommunity", diff = FALSE, keep = 1:2) +
nodematch("weight.status.2", diff = TRUE) +
nodecov("T1Age") +
absdiff("T1Age") +
nodematch("team", diff = FALSE) +
nodecov("TotalPA1") +
nodecov("TotalPAChange"))
stargazer(m21.4, m21.6, m30, m31, m32, m33, m34, type = "text")
# Total exercise, vigerous exercise, and change in exercise don't seem to matter much as main effects.
# What about homophily?
m30.1 = ergm(n ~ edges + mutual +
gwidegree(.5, fixed = TRUE) +
gwodegree(.5, fixed = TRUE) +
gwesp(.5, fixed = TRUE) +
nodematch("Ethnicity", diff = FALSE) +
nodematch("ebCommunity", diff = FALSE, keep = 1:2) +
nodematch("weight.status.2", diff = TRUE) +
nodecov("T1Age") +
absdiff("T1Age") +
nodematch("team", diff = FALSE) +
absdiff("TotalPAChange"))
m31.1 = ergm(n ~ edges + mutual +
gwidegree(.5, fixed = TRUE) +
gwodegree(.5, fixed = TRUE) +
gwesp(.5, fixed = TRUE) +
nodematch("Ethnicity", diff = FALSE) +
nodematch("ebCommunity", diff = FALSE, keep = 1:2) +
nodematch("weight.status.2", diff = TRUE) +
nodecov("T1Age") +
absdiff("T1Age") +
nodematch("team", diff = FALSE) +
absdiff("TotalPA2"))
m32.1 = ergm(n ~ edges + mutual +
gwidegree(.5, fixed = TRUE) +
gwodegree(.5, fixed = TRUE) +
gwesp(.5, fixed = TRUE) +
nodematch("Ethnicity", diff = FALSE) +
nodematch("ebCommunity", diff = FALSE, keep = 1:2) +
nodematch("weight.status.2", diff = TRUE) +
nodecov("T1Age") +
absdiff("T1Age") +
nodematch("team", diff = FALSE) +
absdiff("MVPA2"))
m33.1 = ergm(n ~ edges + mutual +
gwidegree(.5, fixed = TRUE) +
gwodegree(.5, fixed = TRUE) +
gwesp(.5, fixed = TRUE) +
nodematch("Ethnicity", diff = FALSE) +
nodematch("ebCommunity", diff = FALSE, keep = 1:2) +
nodematch("weight.status.2", diff = TRUE) +
nodecov("T1Age") +
absdiff("T1Age") +
nodematch("team", diff = FALSE) +
absdiff("TotalPA1"))
m34.1 = ergm(n ~ edges + mutual +
gwidegree(.5, fixed = TRUE) +
gwodegree(.5, fixed = TRUE) +
gwesp(.5, fixed = TRUE) +
nodematch("Ethnicity", diff = FALSE) +
nodematch("ebCommunity", diff = FALSE, keep = 1:2) +
nodematch("weight.status.2", diff = TRUE) +
nodecov("T1Age") +
absdiff("T1Age") +
nodematch("team", diff = FALSE) +
absdiff("TotalPA1") +
absdiff("TotalPAChange"))
stargazer(m30, m31, m32, m33, m34, m30.1, m31.1, m32.1, m33.1, m34.1, type = "text")
# No homophily there; tiny effects are just main effects recast. Just to be extra sure:
m35 = ergm(n ~ edges + mutual +
gwidegree(.5, fixed = TRUE) +
gwodegree(.5, fixed = TRUE) +
gwesp(.5, fixed = TRUE) +
nodematch("Ethnicity", diff = FALSE) +
nodematch("ebCommunity", diff = FALSE, keep = 1:2) +
nodematch("weight.status.2", diff = TRUE) +
nodecov("T1Age") +
absdiff("T1Age") +
nodematch("team", diff = FALSE) +
nodecov("MVPA2") +
absdiff("MVPA2"))
summary(m35) # Nothing there.
# What about diet?
dev.off()
par(mfrow = c(2, 2))
plot(n %v% "fvservt1", n %v% "usservt1")
plot(n %v% "fvservt2", n %v% "usservt2")
plot(n %v% "fvservt1", n %v% "fvservt2")
plot(n %v% "FVChange", n %v% "USChange")
# That outlier who was eating 12 unhealthy snacks a day at t1 might need to be dealt with, that point has a lot of leverage
m36 = ergm(n ~ edges + mutual +
gwidegree(.5, fixed = TRUE) +
gwodegree(.5, fixed = TRUE) +
gwesp(.5, fixed = TRUE) +
nodematch("Ethnicity", diff = FALSE) +
nodematch("ebCommunity", diff = FALSE, keep = 1:2) +
nodematch("weight.status.2", diff = TRUE) +
nodecov("T1Age") +
absdiff("T1Age") +
nodematch("team", diff = FALSE) +
nodecov("usservt1")
)
m37 = ergm(n ~ edges + mutual +
gwidegree(.5, fixed = TRUE) +
gwodegree(.5, fixed = TRUE) +
gwesp(.5, fixed = TRUE) +
nodematch("Ethnicity", diff = FALSE) +
nodematch("ebCommunity", diff = FALSE, keep = 1:2) +
nodematch("weight.status.2", diff = TRUE) +
nodecov("T1Age") +
absdiff("T1Age") +
nodematch("team", diff = FALSE) +
nodecov("fvservt1")
)
m38 = ergm(n ~ edges + mutual +
gwidegree(.5, fixed = TRUE) +
gwodegree(.5, fixed = TRUE) +
gwesp(.5, fixed = TRUE) +
nodematch("Ethnicity", diff = FALSE) +
nodematch("ebCommunity", diff = FALSE, keep = 1:2) +
nodematch("weight.status.2", diff = TRUE) +
nodecov("T1Age") +
absdiff("T1Age") +
nodematch("team", diff = FALSE) +
nodecov("fvservt1") +
nodecov("usservt1")
)
stargazer(m21.6, m36, m37, m38, type = "text")
# Maybe a little evidence for unhealthy eaters being a little more popular, but unclear
# Homophily in eating?
m39 = ergm(n ~ edges + mutual +
gwidegree(.5, fixed = TRUE) +
gwodegree(.5, fixed = TRUE) +
gwesp(.5, fixed = TRUE) +
nodematch("Ethnicity", diff = FALSE) +
nodematch("ebCommunity", diff = FALSE, keep = 1:2) +
nodematch("weight.status.2", diff = TRUE) +
nodecov("T1Age") +
absdiff("T1Age") +
nodematch("team", diff = FALSE) +
nodecov("usservt1") +
absdiff("usservt1")
)
stargazer(m36, m39, type = "text")
# Hmm, minor evidence for homophily in unhealthy snacks.
m40 = ergm(n ~ edges + mutual +
gwidegree(.5, fixed = TRUE) +
gwodegree(.5, fixed = TRUE) +
gwesp(.5, fixed = TRUE) +
nodematch("Ethnicity", diff = FALSE) +
nodematch("ebCommunity", diff = FALSE, keep = 1:2) +
nodematch("weight.status.2", diff = TRUE) +
nodecov("T1Age") +
absdiff("T1Age") +
nodematch("team", diff = FALSE) +
nodecov("fvservt1") +
absdiff("fvservt1") +
nodecov("usservt1") +
absdiff("usservt1")
)
stargazer(m36, m39, m40, type = "text")
# Similar story for healthy snacks, weaker but with similar (im)precision
# What if we combined them and called it the net healthy snack score?
n %v% "netSnackT1" = n %v% "fvservt1" - n %v% "usservt1"
m41 = ergm(n ~ edges + mutual +
gwidegree(.5, fixed = TRUE) +
gwodegree(.5, fixed = TRUE) +
gwesp(.5, fixed = TRUE) +
nodematch("Ethnicity", diff = FALSE) +
nodematch("ebCommunity", diff = FALSE, keep = 1:2) +
nodematch("weight.status.2", diff = TRUE) +
nodecov("T1Age") +
absdiff("T1Age") +
nodematch("team", diff = FALSE) +
nodecov("netSnackT1") +
absdiff("netSnackT1")
)
stargazer(m21.6, m41, type = "text")
summary(m41)
# Similar story, prefered by xIC, but effect is still imprecise. May just be noisy effect.
# What about dietary change?
m42 = ergm(n ~ edges + mutual +
gwidegree(.5, fixed = TRUE) +
gwodegree(.5, fixed = TRUE) +
gwesp(.5, fixed = TRUE) +
nodematch("Ethnicity", diff = FALSE) +
nodematch("ebCommunity", diff = FALSE, keep = 1:2) +
nodematch("weight.status.2", diff = TRUE) +
nodecov("T1Age") +
absdiff("T1Age") +
nodematch("team", diff = FALSE) +
nodecov("netSnackT1") +
absdiff("netSnackT1") +
nodecov("USChange") +
absdiff("USChange")
)
stargazer(m41, m42, type = "text")
# Interesting: Adding change in diet during camp clarified the homophily of dietary
# choices and overweight-homophily effects (but both only slightly). Can we use just
# unhealthy snacks instead of my created variable?
m43 = ergm(n ~ edges + mutual +
gwidegree(.5, fixed = TRUE) +
gwodegree(.5, fixed = TRUE) +
gwesp(.5, fixed = TRUE) +
nodematch("Ethnicity", diff = FALSE) +
nodematch("ebCommunity", diff = FALSE, keep = 1:2) +
nodematch("weight.status.2", diff = TRUE) +
nodecov("T1Age") +
absdiff("T1Age") +
nodematch("team", diff = FALSE) +
nodecov("usservt1") +
absdiff("usservt1") +
nodecov("USChange") +
absdiff("USChange")
)
stargazer(m41, m42, m43, type = "text")
# It doesn't work as well. So, what about constructing a net-change statistic?
n %v% "netSnackT2" = n %v% "fvservt2" - n %v% "usservt2"
n %v% "netSnackChange" = n %v% "netSnackT2" - n %v% "netSnackT1"
m44 = ergm(n ~ edges + mutual +
gwidegree(.5, fixed = TRUE) +
gwodegree(.5, fixed = TRUE) +
gwesp(.5, fixed = TRUE) +
nodematch("Ethnicity", diff = FALSE) +
nodematch("ebCommunity", diff = FALSE, keep = 1:2) +
nodematch("weight.status.2", diff = TRUE) +
nodecov("T1Age") +
absdiff("T1Age") +
nodematch("team", diff = FALSE) +
nodecov("netSnackT1") +
absdiff("netSnackT1") +
nodecov("netSnackChange") +
absdiff("netSnackChange")
)
m45 = ergm(n ~ edges + mutual +
gwidegree(.5, fixed = TRUE) +
gwodegree(.5, fixed = TRUE) +
gwesp(.5, fixed = TRUE) +
nodematch("Ethnicity", diff = FALSE) +
nodematch("ebCommunity", diff = FALSE, keep = 1:2) +
nodematch("weight.status.2", diff = TRUE) +
nodecov("T1Age") +
absdiff("T1Age") +
nodematch("team", diff = FALSE) +
nodecov("netSnackChange") +
absdiff("netSnackChange")
)
stargazer(m21.6, m39, m41, m42, m43, m44, m45, type = "text")
# Let's try adding physical activity back in on top of that:
m46 = ergm(n ~ edges + mutual +
gwidegree(.5, fixed = TRUE) +
gwodegree(.5, fixed = TRUE) +
gwesp(.5, fixed = TRUE) +
nodematch("Ethnicity", diff = FALSE) +
nodematch("ebCommunity", diff = FALSE, keep = 1:2) +
nodematch("weight.status.2", diff = TRUE) +
nodecov("T1Age") +
absdiff("T1Age") +
nodematch("team", diff = FALSE) +
nodecov("netSnackChange") +
absdiff("netSnackChange") +
nodecov("MVPA2") +
absdiff("MVPA2")
)
summary(m46)
m47 = ergm(n ~ edges + mutual +
gwidegree(.5, fixed = TRUE) +
gwodegree(.5, fixed = TRUE) +
gwesp(.5, fixed = TRUE) +
nodematch("Ethnicity", diff = FALSE) +
nodematch("ebCommunity", diff = FALSE, keep = 1:2) +
nodematch("weight.status.2", diff = TRUE) +
nodecov("T1Age") +
absdiff("T1Age") +
nodematch("team", diff = FALSE) +
nodecov("netSnackChange") +
absdiff("netSnackChange") +
nodecov("TotalPAChange") +
absdiff("TotalPAChange")
)
m48 = ergm(n ~ edges + mutual +
gwidegree(.5, fixed = TRUE) +
gwodegree(.5, fixed = TRUE) +
gwesp(.5, fixed = TRUE) +
nodematch("Ethnicity", diff = FALSE) +
nodematch("ebCommunity", diff = FALSE, keep = 1:2) +
nodematch("weight.status.2", diff = TRUE) +
nodecov("T1Age") +
absdiff("T1Age") +
nodematch("team", diff = FALSE) +
nodecov("netSnackChange") +
absdiff("netSnackChange") +
nodecov("MVPA1") +
absdiff("MVPA1") +
nodecov("MVPAChange") +
absdiff("MVPAChange")
)
m49 = ergm(n ~ edges + mutual +
gwidegree(.5, fixed = TRUE) +
gwodegree(.5, fixed = TRUE) +
gwesp(.5, fixed = TRUE) +
nodematch("Ethnicity", diff = FALSE) +
nodematch("ebCommunity", diff = FALSE, keep = 1:2) +
nodematch("weight.status.2", diff = TRUE) +
nodecov("T1Age") +
absdiff("T1Age") +
nodematch("team", diff = FALSE) +
nodecov("netSnackChange") +
absdiff("netSnackChange") +
nodecov("MVPAChange") +
absdiff("MVPAChange")
)
m50 = ergm(n ~ edges + mutual +
gwidegree(.5, fixed = TRUE) +
gwodegree(.5, fixed = TRUE) +
gwesp(.5, fixed = TRUE) +
nodematch("Ethnicity", diff = FALSE) +
nodematch("ebCommunity", diff = FALSE, keep = 1:2) +
nodematch("weight.status.2", diff = TRUE) +
nodecov("T1Age") +
absdiff("T1Age") +
nodematch("team", diff = FALSE) +
nodecov("netSnackChange") +
absdiff("netSnackChange") +
absdiff("MVPAChange")
)
stargazer(m45, m46, m47, m48, m49, m50, type = "text")
stargazer(m41, m45, m44, type = "text")
save.image("models/manyModels.RData")
load("models/manyModels.RData")
dd = broom::tidy(m50)[c(1:7, 10:12, 8:9, 13:15), -4]
dd = mutate(dd, sig = ifelse(p.value < .01, "***",
ifelse(p.value < .05, "**",
ifelse(p.value < .1, "*", ""))))
knitr::knit(text = knitr::kable(dd, format = "html", digits = 3),
output = "ergmSummay.html")
|
fc7e67f241e1d428a0711e3c0cfc789001ce8b0a
|
105714934f50b0daec0496fdc335cd9021fcc319
|
/script-notes.R
|
f57988dd04453b0987da98e7c727192f0f027a60
|
[
"MIT"
] |
permissive
|
danmarrod/rstudio
|
08f8d8a7d92bcf7894bd9c3cb39f2643ca87b0db
|
777ce9872c0afecd091c594e38f5b05044955ecf
|
refs/heads/main
| 2023-08-24T13:11:20.375649
| 2021-09-16T12:42:23
| 2021-09-16T12:42:23
| 404,712,473
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 26,260
|
r
|
script-notes.R
|
# ------------------------------------------------------------------
# CARGAMOS LOS DATOS
# ------------------------------------------------------------------
# Preparamos el espacio de trabajo y reseteamos objetos cargados
getwd()
setwd("E:/FID/rstudio/")
getwd()
ls()
rm(list = ls())
ls()
# Cargamos dataset
marks <- read.csv("./data/marks.csv", sep=",", head = TRUE)
# Comprobamos dataset
head(marks)
dim(marks)
str(marks)
# ------------------------------------------------------------------
# LIMPIEZA DE DATOS
# ------------------------------------------------------------------
# Comprobar si hay alguna fila incompleta
any(!complete.cases(marks))
# Establecemos los valores ausentes a 0
marks[is.na(marks)] <- 0
# Datos ausentes por variable
map_dbl(datos, .f = function(x){sum(is.na(x))})
# Normalizamos decimales
marks <- marks %>% mutate(across(where(is.numeric), round, 3))
# Eliminamos la columna Ids anonimizada
marks$idStudent <- NULL
filter_marks <- marks
# Transformamos en Aprobado/Suspenso en función de la nota de T4
# filter_marks_del$T4 <- factor(filter_marks_del$T4 > 0.25, labels = c("Aprobado", "suspenso"))
# ------------------------------------------------------------------
# VISUALIZACIÓN DEL DATASET
# ------------------------------------------------------------------
# Cargamos principales librerías
install.packages("tidyverse")
library(ggplot2)
library(tidyverse)
library(caret)
# Correlación
filter_marks %>%
filter(Flow == "SI") %>%
select_if(is.numeric) %>%
cor() %>%
corrplot::corrplot()
# Densidad
filter_marks %>%
select(Professor,T1, T2, T3,T4, Total_T, Total_D, Deliveries, Extras, Total_TED, Marks, Flow) %>%
gather(metric, value) %>%
ggplot(aes(value, fill = metric)) +
geom_density(show.legend = FALSE) +
facet_wrap(~ metric, scales = "free")
# Resultados parciales T1-T4. Un poco engorrosa.
install.packages("AppliedPredictiveModeling")
library(AppliedPredictiveModeling)
transparentTheme(trans = .4)
featurePlot(x = filter_marks[, 3:6],
y = filter_marks$Flow,
plot = "pairs",
auto.key = list(columns = 2))
# Notas medias de trabajos en grupos . Diagrama Polar
data <- filter_marks
data_group <- data %>% group_by(idGroup) %>%
summarize(group_mark = mean(Deliveries + Extras), )
ggplot(data_group, aes(x=as.factor(idGroup), y=group_mark)) +
geom_bar(stat="identity", fill=alpha("blue", 0.3)) +
ylim(-3,8) +
theme_minimal() +
theme(
axis.text = element_blank(),
axis.title = element_blank(),
panel.grid = element_blank(),
plot.margin = unit(rep(-2,4), "cm")
) +
coord_polar(start = 0)
# Notas finales por alumnos y grupos. Diagrama Polar
data <- marks %>% select(idStudent, idGroup, Marks) %>% rename(individual=idStudent, group=idGroup, value=Marks)
data = data %>% arrange(group, value)
empty_bar <- 4
to_add <- data.frame( matrix(NA, empty_bar*nlevels(data$group), ncol(data)) )
colnames(to_add) <- colnames(data)
to_add$group <- rep(levels(data$group), each=empty_bar)
data <- rbind(data, to_add)
data <- data %>% arrange(group)
data$id <- seq(1, nrow(data))
label_data <- data
number_of_bar <- nrow(label_data)
angle <- 90 - 360 * (label_data$id-0.5) /number_of_bar
label_data$hjust <- ifelse( angle < -90, 1, 0)
label_data$angle <- ifelse(angle < -90, angle+180, angle)
ggplot(data, aes(x=as.factor(id), y=value, fill=as.factor(group))) +
geom_bar(stat="identity", alpha=0.5) +
ylim(-3,10) +
theme_minimal() +
theme(
legend.position = "none",
axis.text = element_blank(),
axis.title = element_blank(),
panel.grid = element_blank(),
plot.margin = unit(rep(-1,4), "cm")
) +
coord_polar() +
geom_text(data=label_data, aes(x=id, y=value+10, label=individual, hjust=hjust), color="black", fontface="bold",alpha=0.6, size=2.5, angle= label_data$angle, inherit.aes = FALSE )
# Densidad calificaciones
data <- filter_marks
ggplot(data, aes(x = Marks, fill = 'Alumnos')) +
geom_histogram(bins = 50, aes(y = ..density..)) +
geom_density(alpha = 0.3) +
ggtitle("Densidad en calificaciones") + theme_bw()
# Calificaciones finales por alumno proactivo
ggplot(data, aes(Marks)) +
geom_histogram(binwidth=5, color="gray", aes(fill=Flow)) + xlab("Calificaciones") +
ylab("Alumnos proactivos") +
ggtitle("Calificaciones por alumnos proactivos")
ggplot(data, aes(Flow, Marks, color = Flow )) +
geom_boxplot() +
ggtitle("Calificaciones por alumnos proactivos") +
xlab("Alumnos proactivos") +
ylab("Calificaciones")
# Calificaciones por Profesor
ggplot(data) +
geom_bin2d(aes(x=Marks, y=Professor)) +
xlab("CALIFICACIONES") + ylab("PROFESORES") +
ggtitle("Calificaciones por Profesor")
# Calificaciones por Grupo
ggplot(data, aes(x = reorder(idGroup, Marks, FUN = median), y = Marks, col = idGroup)) +
geom_boxplot() +
coord_flip() +
theme_bw()
ggplot(data, aes(x=Professor, y=Marks)) +
geom_segment( aes(x=Professor, xend=Professor, y=0, yend=Marks), color="grey") +
geom_point( color="orange", size=4) +
theme_light() +
theme(
panel.grid.major.x = element_blank(),
panel.border = element_blank(),
axis.ticks.x = element_blank()
) +
xlab("Profesor") +
ylab("Calificación") +
ggtitle("Calificaciones por profesor")
# Calificaciones por Profesor y Tipo de Alumnos
ggplot(data = data)+
geom_bar(mapping = aes(x = Flow,Marks,fill = factor(Professor)),stat = 'Identity',position = 'dodge') +
ggtitle("Calificaciones por Profesor y Tipo") +
xlab("Alumnos proactivos") +
ylab("Calificaciones")
# Calificaciones por Grupos
# Agrupamos por grupo y sumamos la nota de los trabajos en grupo
data_group <- data %>% group_by(idGroup, Professor) %>%
summarize(group_mark = mean(Deliveries + Extras))
# Notas medias de todas los items evaluables. Spider chart
install.packages("fmsb")
library("fmsb")
data[1,] = c(0.0)
data[2,] = c(1.5)
data <- marks %>%
select(T1, T2, T3, T4, D2, D3, D4, D5) %>%
rename(Code=T1, Github=T2, Sonar=T3, iTop=T4, Delivery2=D2, Delivery3=D3, Delivery4=D4, Delivery5=D5) %>%
summarise_all(list(mean))
data <- rbind(rep(1.5,8) , rep(0.0,8) , data)
radarchart( data, axistype=1,
pcol=rgb(0.2,0.5,0.5,0.9) , pfcol=rgb(0.2,0.5,0.5,0.5) , plwd=4 ,
cglcol="grey", cglty=1, axislabcol="grey", caxislabels=seq(0,1.6,0.4), cglwd=0.8,
vlcex=0.8
)
# ---------------------------------------------------------------------------------------------
# PRUEBA 1: VAMOS A PREDECIR LOS VALORES DE LA CLASE FLOW
# Clasificación binaria
# ---------------------------------------------------------------------------------------------
# Preprocesamos el dataset. Limpiamos valores nulos y filtramos columnas no necesarias
regVar <- c("idGroup","Professor","T1", "T2", "T3","T4", "Total_T", "Total_D", "Deliveries", "Extras", "Total_TED", "Marks", "Flow")
marks[is.na(marks)] <- 0
filter_marks <- marks[, regVar]
str(filter_marks)
# Separamos datos de training y testesting con el paquete CARET.
# No seleccionamos datos para validar el modelo, utilizaremos validación cruzada.
# Como el dataset es pequeño podemos usar LOOCV sin problemas.
library(caret)
set.seed(825)
# Separamos los grupos de datos en base a importancia de grupos, con groupsKFold
inTraining <- createDataPartition(filter_marks$Flow, p = .80, list = FALSE)
training <- filter_marks[inTraining,]
testing <- filter_marks[-inTraining,]
# Los alumnos son evaluados por profesor y nos puede interesar agruparlos.
# 4 Folds grouped by Proffesor cross validation, repeated 3 times
group_folds <- groupKFold(training$Professor, k = 4)
# Preparamos la clase de control.
# Activamos que nos devuelva las probabilidades para analisis ROC
# Realizamos una validación cruzada LOOCV
# Sustituimos para poder usar Resample por CV, 10-fold cross-validation
# Añadimos las muestras
myControl_clas <- trainControl(
#index = group_folds,
method = "CV",
number = 10,
summaryFunction = twoClassSummary,
classProbs = TRUE,
savePredictions = TRUE,
verbose = FALSE
)
# Probamos con un primer algoritmo
model_class_glm <- train(Flow~ ., training,
method="glm",
trControl=myControl_clas)
print(model_class_glm)
model_class_glm
# Probamos con un segundo algoritmo
# Esta técnica es muy útil para conjunto de datos con un elevado número de variables predictoras y pocos valores
model_class_glmnet <- train(Flow~ ., training,
method = "glmnet",
trControl = myControl_clas
)
print(model_class_glmnet)
plot(model_class_glmnet)
# Probamos con un tercer algoritmo
model_class_xgbTree <- train(Flow~ ., training,
method = "xgbTree",
trControl=myControl_clas)
print(model_class_xgbTree)
plot(model_class_xgbTree)
# Probamos con un cuarto algoritmo
model_class_nbayes <- train(Flow~ ., training,
method = "naive_bayes",
trControl=myControl_clas)
# Observamos los parámetros utilizamos por defecto con train
print(model_class_nbayes)
plot(model_class_nbayes)
# Personalizamos los parámetros para que utilice el más optimo
nb_grid <- expand.grid(usekernel = c(TRUE, FALSE),
laplace = c(0, 0.5, 1),
adjust = c(0.75, 1, 1.25, 1.5))
model_class_nbayes_tun <- train(Flow~ ., training,
method = "naive_bayes",
usepoisson = TRUE,
tuneGrid = nb_grid,
trControl=myControl_clas)
print(model_class_nbayes_tun)
plot(model_class_nbayes_tun)
# Como último paso hacemos una comparativa de modelos con la función resample de Caret
model_list <- list(
glm = model_class_glm,
glmnet = model_class_glmnet,
xgbTree = model_class_xgbTree,
nbayes = model_class_nbayes_tun
)
resamps <- resamples(model_list)
# Error: LOOCV is not compatible with `resamples()` since only one resampling estimate is available.
# Pasamos trainControl a CV para poder utilizar la función
summary(resamps, metric="ROC")
# Visualizamos los resultados de la comparativa
bwplot(resamps, metric = "ROC")
dotplot(resamps, metric="ROC")
xyplot(resamps, what = "BlandAltman")
splom(resamps)
# Como último paso, y en base al mejor modelo realizamos la predicción para ver el rendimiento real
prediction <- predict(model_class_nbayes_tun, testing, type = "prob")
summary(prediction)
probs <- prediction[,2]
probs
install.packages("ROCR")
library(ROCR)
# Make a prediction object: pred
pred <- prediction(probs, testing$Flow)
# Make a performance object: perf
perf <- performance(pred, "tpr", "fpr")
#Plot this curve.Buen resultado, objetivo siempre minimizar FPR, maximizar TPR
plot(perf)
# Valor de AUC o área bajo la curva
perf_auc <- performance(pred, "auc")
print(perf_auc@y.values[[1]])
# Exploramos la densidad
trellis.par.set(caretTheme())
densityplot(model_class_nbayes_tun, pch = "|")
# TODO: Visualizamos que tal bueno ha sido con la matriz de confusión
cm <- confusionMatrix(model_class_nbayes_tun)
draw_confusion_matrix <- function(cm) {
total <- sum(cm$table)
res <- as.numeric(cm$table)
# Generate color gradients. Palettes come from RColorBrewer.
greenPalette <- c("#F7FCF5","#E5F5E0","#C7E9C0","#A1D99B","#74C476","#41AB5D","#238B45","#006D2C","#00441B")
redPalette <- c("#FFF5F0","#FEE0D2","#FCBBA1","#FC9272","#FB6A4A","#EF3B2C","#CB181D","#A50F15","#67000D")
getColor <- function (greenOrRed = "green", amount = 0) {
if (amount == 0)
return("#FFFFFF")
palette <- greenPalette
if (greenOrRed == "red")
palette <- redPalette
colorRampPalette(palette)(100)[10 + ceiling(90 * amount / total)]
}
# set the basic layout
layout(matrix(c(1,1,2)))
par(mar=c(2,2,2,2))
plot(c(100, 345), c(300, 450), type = "n", xlab="", ylab="", xaxt='n', yaxt='n')
title('CONFUSION MATRIX', cex.main=2)
# create the matrix
classes = colnames(cm$table)
rect(150, 430, 240, 370, col=getColor("green", res[1]))
text(195, 435, classes[1], cex=1.2)
rect(250, 430, 340, 370, col=getColor("red", res[3]))
text(295, 435, classes[2], cex=1.2)
text(125, 370, 'Predicted', cex=1.3, srt=90, font=2)
text(245, 450, 'Actual', cex=1.3, font=2)
rect(150, 305, 240, 365, col=getColor("red", res[2]))
rect(250, 305, 340, 365, col=getColor("green", res[4]))
text(140, 400, classes[1], cex=1.2, srt=90)
text(140, 335, classes[2], cex=1.2, srt=90)
# add in the cm results
text(195, 400, res[1], cex=1.6, font=2, col='white')
text(195, 335, res[2], cex=1.6, font=2, col='white')
text(295, 400, res[3], cex=1.6, font=2, col='white')
text(295, 335, res[4], cex=1.6, font=2, col='white')
# add in the specifics
plot(c(100, 0), c(100, 0), type = "n", xlab="", ylab="", main = "DETAILS", xaxt='n', yaxt='n')
text(10, 85, names(cm$byClass[1]), cex=1.2, font=2)
text(10, 70, round(as.numeric(cm$byClass[1]), 3), cex=1.2)
text(30, 85, names(cm$byClass[2]), cex=1.2, font=2)
text(30, 70, round(as.numeric(cm$byClass[2]), 3), cex=1.2)
text(50, 85, names(cm$byClass[5]), cex=1.2, font=2)
text(50, 70, round(as.numeric(cm$byClass[5]), 3), cex=1.2)
text(70, 85, names(cm$byClass[6]), cex=1.2, font=2)
text(70, 70, round(as.numeric(cm$byClass[6]), 3), cex=1.2)
text(90, 85, names(cm$byClass[7]), cex=1.2, font=2)
text(90, 70, round(as.numeric(cm$byClass[7]), 3), cex=1.2)
# add in the accuracy information
text(30, 35, names(cm$overall[1]), cex=1.5, font=2)
text(30, 20, round(as.numeric(cm$overall[1]), 3), cex=1.4)
text(70, 35, names(cm$overall[2]), cex=1.5, font=2)
text(70, 20, round(as.numeric(cm$overall[2]), 3), cex=1.4)
}
draw_confusion_matrix(cm)
# ------------------------------------------------------------------------------------
# PRUEBA 2: Predicción de la nota final (Marks) en base a la nota parcial de iTop(T4)
# Regresión lineal simple
# ------------------------------------------------------------------------------------
filter_marks <- marks[,c(7,28), drop=FALSE]
colnames(filter_marks) <- c('Nota iTop', 'Nota final')
colnames(filter_marks)
plot(filter_marks)
# Se observa una tendencia lineal
lm_filter_marks <- lm(filter_marks$Marks ~ ., data=filter_marks)
lm_filter_marks$coefficients
final_marks_new <- filter_marks[1,]
predict(lm_filter_marks, final_marks_new)
abline(lm_filter_marks$coefficients, col = "red")
# Calcular el rendimiento RMSE (Raíz cuadrada de la suma final)
final_marks_est <- predict(lm_filter_marks)
res <- filter_marks$Marks - final_marks_est
rmse <- sqrt(mean(res^2))
print(rmse)
# Calcular rendimiento R^2. Apreciamos que el rendimiento es malo, R^2 muy cercano a cero.
summary(lm_filter_marks)$r.squared
# ------------------------------------------------------------------------
# PRUEBA 3: VAMOS A PREDECIR LOS VALORES DE T4 en BASE A T1, T2, T3
# Regresión lineal multivariable
# ------------------------------------------------------------------------
# Filtramos el dataset con las columnas que nos interesan para optimizar los cálculos
filter_marks <- marks[,(4:7), drop=FALSE]
# Visualizamos si hay tendencia lineal
plot(filter_marks)
# Visualizamos y vemos si hay una tendencia lineal. Si es así aplicamos lm, lineal model.
# Si no hay tendencia lineal, o no se puede visualizar bien porque tuviera muchas variables,
# lo que se suele hacer es aplicar un lm y otro, por ejemplo un paramétrico,
# y luego se ve cuales de los dos tiene mejor métrica, cual generaliza mejor.
plot(T4 ~ T1, filter_marks)
plot(T4 ~ T2, filter_marks)
plot(T4 ~ T3, filter_marks)
# Observamos que no siguen una tendencia lineal visible en ninguna de ellas.
# En este punto habría que realizar una transformación para dar linealidad o aplicar algoritmos no paramétricos
lm_filter_marks <- lm(T4 ~ ., data=filter_marks)
summary(lm_filter_marks)$r.squared
# Observamos que el valor está muy cerca de 0, por lo que nuestro modelo es muy malo.
# Para que el modelo sea bueno NO se debe observar ningún patrón
# en los residuos frente al real (estimados frente a real)
plot(lm_filter_marks$fitted.values, lm_filter_marks$residuals,
xlab = "Fitted values", ylab = "Residuals")
# Para que el modelo sea bueno SI se debe observar patrón en los residuos
qqnorm(lm_filter_marks$residuals, ylab = "Residual Quantiles")
# TODO: Es raro los resultados de los residuos, si el modelo era tan malo debería tener resultados diferentes. Investigarlo.
# Vamos a usar el paquete CARET para obtener un modelo no paramétrico, como KNN o M5P
library(caret)
# En este caso al no ser predicción binaria no solicitamos ROC
myControl_clas <- trainControl(
method = "cv",
number = 10,
summaryFunction = multiClassSummary,
classProbs = TRUE, # IMPORTANT!
savePredictions = TRUE,
verbose = FALSE
)
# Eliminamos filas con valores NA porque da error
filter_marks_del <- filter_marks[-c(186:188), ]
filter_marks_del <- filter_marks_del[-c(63), ]
# nos pide el paquete e1071
install.packages("e1071")
library(e1071)
model_clas_glm <- train(T4 ~ ., filter_marks_del, method="bayesglm", trControl=myControl_clas)
# se produce un error porque el valor a predecir debe ser binario: glm models can only use 2-class outcomes
print(model_clas_glm)
# Transformamos en Aprobado/Suspenso en función de la nota de T4
filter_marks_del$T4 <- factor(filter_marks_del$T4 > 0.25, labels = c("Aprobado", "suspenso"))
# Calculamos si están balanceados. No lo están mucho.
table(filter_marks_del$T4)
# Vamos a visualizar algunos datos
install.packages("mlbench")
library(mlbench)
library(caret)
regVar <- c("T1", "T2", "T3")
str(filter_marks_del[, regVar])
theme1 <- trellis.par.get()
theme1$plot.symbol$col = rgb(.2, .2, .2, .4)
theme1$plot.symbol$pch = 16
theme1$plot.line$col = rgb(1, 0, 0, .7)
theme1$plot.line$lwd <- 2
trellis.par.set(theme1)
featurePlot(x = filter_marks[, regVar],
y = filter_marks$T4,
plot = "scatter",
type = c("p", "smooth"),
span = .5,
layout = c(3, 1))
# No me termina de visualizar bien todos los datos. El problema es la carga de los datos, si hay nulos falla y deja de pintar,
# Si cargamos como read.csv en vez de read.csv2, marca los nulos como NA y si los muestra bien.
#----------------------------------------------
# PRUEBA 4, HACEMOS LO MISMO DE DIFERENTE FORMA
#-----------------------------------------------
# Analizamos previamente la relación de variables para comprobar la linealidad entre ellas
install.packages("psych")
install.packages("GGally")
library(psych)
library(GGally)
# Seleccionamos los datos
filter_marks <- marks %>% select(T1, T2, T3,T4)
# Distribución de cada variable mediante histogramas y correlación
multi.hist(x = filter_marks, dcol = c("blue", "red"), dlty = c("dotted", "solid"),
main = "")
ggpairs(filter_marks, lower = list(continuous = "smooth"),
diag = list(continuous = "barDiag"), axisLabels = "none")
# Generamos el modelo
model_ln <- lm(T4 ~ ., data = filter_marks )
summary(model_ln)
# R^2 = 0.056, mal modelo
# Vamos a analizar ahora si podemos predecir los resultados finales en base a T1,T2,T3,T4
filter_marks <- marks %>% select(T1, T2, T3,T4, Marks)
multi.hist(x = filter_marks, dcol = c("blue", "red"), dlty = c("dotted", "solid"),
main = "")
ggpairs(filter_marks, lower = list(continuous = "smooth"),
diag = list(continuous = "barDiag"), axisLabels = "none")
model_ln <- lm(Marks ~ ., data = filter_marks )
summary(model_ln)
# R^2 = 0.5785, MEJOR QUE ANTES.
# ------------------------------------------------------------------------
# PRUEBA 5: APLICAMOS REGRESIÓN NO PARAMÉTRICA con Knn
# ------------------------------------------------------------------------
library(caret)
particiones <- 10
repeticiones <- 5
hiperparametros <- data.frame(k = c(1, 2, 5, 10, 15, 20, 30, 50))
set.seed(123)
seeds <- vector(mode = "list", length = (particiones * repeticiones) + 1)
for (i in 1:(particiones * repeticiones)) {
seeds[[i]] <- sample.int(1000, nrow(hiperparametros))
}
seeds[[(particiones * repeticiones) + 1]] <- sample.int(1000, 1)
control_train <- trainControl(method = "repeatedcv", number = particiones,
repeats = repeticiones, seeds = seeds,
returnResamp = "final", verboseIter = FALSE,
allowParallel = TRUE)
set.seed(342)
# Caso 1, predecir T4
filter_marks <- marks %>% select(T1, T2, T3,T4)
knn_model <- train(T4 ~ ., data = filter_marks,
method = "knn",
tuneGrid = hiperparametros,
metric = "RMSE",
trControl = control_train)
knn_model
plot(knn_model, type = 'l', lwd = 2)
# Caso 2, predecir Marks
filter_marks <- marks %>% select(T1, T2, T3, T4, Marks)
knn_model <- train(Marks ~ ., data = filter_marks,
method = "knn",
tuneGrid = hiperparametros,
metric = "RMSE",
trControl = control_train)
knn_model
plot(knn_model, type = 'l', lwd = 2)
#pred <- predict(knn_model, training)
#RMSE(pred, filter_marks$Marks)
# ------------------------------------------------------------------------
# PRUEBA 6: ANALISIS NO SUPERVISADO
# Clustering
# ------------------------------------------------------------------------
# Cargamos la librerías que usaremos en esta sección
library(tidyverse)
library(caret)
# Cargamos los datos completos por defecto
marks <- read.csv("./data/marks-v1.0.csv", sep=",", head = TRUE)
# Preparamos el dataset. Limpiamos valores nulos y filtramos columnas no necesarias para este problema concreto
regVar <- c("T1", "T2", "T3","T4", "Total_T", "Total_D", "Deliveries", "Extras")
marks[is.na(marks)] <- 0
cluster_marks <- marks[, regVar]
# Fijamos a tres decimales las columnas numéricas
#Cambiar tipo de columnas: cluster_marks <- format(round(cluster_marks, 3), nsmall = 3)
# TODO: El siguiente comando no cambia cuando las decimales son inferiores a 3, como el caso de T1
cluster_marks <- cluster_marks %>% mutate(across(where(is.numeric), round, 3))
# Comprobamos dataset
head(cluster_marks)
dim(cluster_marks)
str(cluster_marks)
# -----------------------------------
# Clustering con KMEANs
# -----------------------------------
# Como la magnitud de los valores varía entre variables, las escalamos para normalizarlas.
datos <- scale(cluster_marks)
install.packages("factoextra")
library(factoextra)
fviz_nbclust(x = datos, FUNcluster = kmeans, method = "wss", k.max = 15,
diss = get_dist(datos, method = "euclidean"), nstart = 50)
# Observamos que con K=4 o 5 se obtienen buenos resultados.
# El paquete factoextra permite obtener visualizaciones de las agrupaciones resultantes.
# Si el número de variables (dimensionalidad) es mayor de 2, automáticamente realiza un PCA y
# representa las dos primeras componentes principales.
set.seed(123)
km_clusters <- kmeans(x = datos, centers = 5, nstart = 50)
# Las funciones del paquete factoextra emplean el nombre de las filas del
# dataframe que contiene los datos como identificador de las observaciones.
# Esto permite añadir labels a los gráficos.
fviz_cluster(object = km_clusters, data = datos, show.clust.cent = TRUE,
ellipse.type = "euclid", star.plot = TRUE, repel = TRUE) +
labs(title = "Resultados clustering K-means") +
theme_bw() +
theme(legend.position = "none")
# -----------------------------------------------
# Clustering con K-MEDOIDS (PAM)
# -----------------------------------------------
# Este algoritmo es más robusto que Kmeans, sobre todo si hay ruido. Necesita saber de primera mano el nºK
library(cluster)
# En este caso, dado que se sospecha de la presencia de outliers, se emplea la distancia de Manhattan como medida de similitud
fviz_nbclust(x = datos, FUNcluster = pam, method = "wss", k.max = 15,
diss = dist(datos, method = "manhattan"))
# En este caso obtenemos unos buenos resultados con K=5
set.seed(123)
pam_clusters <- pam(x = datos, k = 5, metric = "manhattan")
pam_clusters
# El objeto devuelto por pam() contiene entre otra información: las observaciones que finalmente se han seleccionado
# como medoids ($medoids) y el cluster al que se ha asignado cada observación ($clustering).
fviz_cluster(object = pam_clusters, data = datos, ellipse.type = "t",
repel = TRUE) +
theme_bw() +
labs(title = "Resultados clustering PAM") +
theme(legend.position = "none")
# --------------------------------------------
# VALIDAMOS la eficacia de ambos algoritmos
# --------------------------------------------
# Utilizamos las funciones eclust() y fviz_silhouette() del paquete factoextra()
# para de forma sencilla los coeficientes de Silhoutte
# La función eclust(), con FUNcluster permite, como CARET, aplicar varios algoritmos de clustering
# KMEANS
km_clusters <- eclust(x = datos, FUNcluster = "kmeans", k = 5, seed = 123,
hc_metric = "euclidean", nstart = 50, graph = FALSE)
fviz_silhouette(sil.obj = km_clusters, print.summary = TRUE, palette = "jco",
ggtheme = theme_classic())
# Media silhouette por cluster
km_clusters$silinfo$clus.avg.widths
# Coeficiente silhouette para cada observación
head(km_clusters$silinfo$widths)
# PAM
pam_clusters <- eclust(x = datos, FUNcluster = "pam", k = 5, seed = 123,
hc_metric = "manhattan", nstart = 50, graph = FALSE)
fviz_silhouette(sil.obj = pam_clusters, print.summary = TRUE, palette = "jco",
ggtheme = theme_classic())
# Media silhouette por cluster
pam_clusters$silinfo$clus.avg.widths
# Coeficiente silhouette para cada observación
head(pam_clusters$silinfo$widths)
# COMPARANDO ALGORITMOS CON CLVALID
install.packages("clValid")
library("clValid")
resamps <- clValid(
obj = datos,
nClust = 2:6,
clMethods = c("hierarchical", "kmeans", "pam"),
validation = c("stability", "internal")
)
summary(resamps)
|
d2383c1749e8c44b30e69f41b1b46adf2497ac44
|
03e42f65dc15b43f9b31846aaf8a4a8c2324fee9
|
/get_gamma_hyperparameters.R
|
c9a5e511cb62cb1181e9cb8ea6fcf43c7ce80ad1
|
[] |
no_license
|
Klemens46/Technology-Assessment-Model--GUI-console-adaptable
|
d4f05277577f0c1b39f55dfd9ef6ffe3245f013d
|
1587b1f7dd37eca77ce564ebd97f49ab7a3bbd4c
|
refs/heads/master
| 2021-10-11T23:37:02.975219
| 2019-01-30T17:17:48
| 2019-01-30T17:17:48
| 168,208,955
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,752
|
r
|
get_gamma_hyperparameters.R
|
# Name of script: get_gamma_hyperparameters.R
# Purpose: A script that creates two functions to quickly calculate
# the Gamma distribution hyper-parameters shape and rate (2nd and 3rd
# arguments of the Gamma functions as used in the R software)
# from a mean and a standard deviation (SD) or a mean and a
# relative standard deviation (RSD).
# This script allows to quickly parameterize a Gamma
# distribution and if needed automatically re-parameterize it
# when the mean and/or the SD is changed.
# Author(s): Klemens Wallner (who is grateful to Benoit Kudinga who helped
# to make this script more clear).
# Note(s): 1) a = alpha = shape, b = beta = rate , mean = shape / rate
# 2) The gamma distribution can be parameterized
# using either the shape and rate (as here) or the
# shape and scale parameters, where the scale = 1 / rate and
# rate = 1 / scale.
# For example 'pgamma(1,3,2)' is equivalent to
# 'pgamma(1,3, scale=0.5)'.
# Function 1
# Get the Gamma hyper-parameters from a mean and a SD
Get.GHP <- function(mean, SD) {
b <- mean / (SD ^ 2)
a <- mean * b
return(c(a, b))
}
# For example: to draw 5 samples from a Gamma distribution with a mean
# of 60 and a SD of 3 type:
# H <- Get.GHP(60, 3)
# samples <- rgamma(5, H[1], H[2])
# Function 2
# Get the Gamma hyper-parameters from a mean and a RSD in percent
# (default RSD is 10%)
Get.GHP.RSD <- function(mean, RSD=10) {
RSD <- RSD / 100
b <- mean / ((mean * RSD) ^ 2)
a <- mean * b
return(c(a, b))
}
# For example: to draw 5 samples from a Gamma distribution with a mean
# of 60 and a RSD of 7% type:
# H <- Get.GHP.RSD(60, 7)
# samples <- rgamma(5, H[1], H[2])
|
847e653ccdc18a34ec5f1cb211a0bbc1e1ba47eb
|
d94c3c03a9efc6b256b5a589b837acb6b6cd5a42
|
/xkcd.R
|
8bf2776bd6f4b2291bd614a9670591d6a172fd64
|
[] |
no_license
|
BarryHaworth/Webrip
|
f127b2a081279063673dbd29734ce13ea268ac5a
|
d09bd21dde3280e6ef9d792a9572da211fdeaaec
|
refs/heads/master
| 2023-08-31T17:57:04.532050
| 2023-08-21T21:23:10
| 2023-08-21T21:23:10
| 201,607,135
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,699
|
r
|
xkcd.R
|
# Download the images from XKCD
# Page links of form https://xkcd.com/2628/
# Updated to check what has been downloaded already and download to the latest file
library(rvest)
library(dplyr)
library(stringr)
library(xml2)
options(timeout= 4000000)
print("Program started")
timestamp()
PROJECT_DIR <- "c:/R/Webrip"
FILE_DIR <- "c:/R/Webrip/XKCD"
dir.create(FILE_DIR,showWarnings = FALSE)
# for testing
#d = start
#url <- paste0('https://xkcd.com/',d)
##Reading the HTML code from the website
#webpage <- read_html(url)
#image <- html_nodes(webpage,'img')
#img_link <- image[grep("//imgs.xkcd.com/comics/",image)]
#link <- xml_attrs(img_link)[[1]][1]
#name <- paste0(formatC(i,3,flag="0"),"_",word(link,-1,sep='/'))
start = 1
stop = 2628 # Latest comic as at 06/06/2022
# Start and Stop
dir <- list.files(FILE_DIR)
last_file <- tail(dir,1)
start <- as.numeric(substr(last_file,1,4))+1
url <- paste0('https://xkcd.com/')
webpage <- read_html(url)
links <- html_nodes(webpage,'a')
latest_link <- tail(links[grep("https://xkcd.com/",links)],1)
latest <- xml_attrs(latest_link)[[1]][1]
stop <- as.numeric(word(latest,-1,sep='/'))
#Download the images
for (i in start:stop){
url <- paste0('https://xkcd.com/',i)
#Reading the HTML code from the website
webpage <- read_html(url)
image <- html_nodes(webpage,'img')
img_link <- image[grep("//imgs.xkcd.com/comics/",image)]
tryCatch({link <- xml_attrs(img_link)[[1]][1]
link <- paste0("https:",link)
name <- paste0(formatC(i,3,flag="0"),"_",word(link,-1,sep='/'))
print(paste("Downloading file number",i,name))
download.file(link,paste0(FILE_DIR,"/",name),quiet=TRUE, mode="wb")},error=function(e){})
}
|
b92b5e2bb3f67009449617fbba70d287a6292ab4
|
b5d8e170fb18ea9c6f0d4f71421bb3a8a479ec7f
|
/LANDSAT/getSpatialData_LANDSAT.R
|
c427bef6f0889ad663284a7880feabea090ffba7
|
[] |
no_license
|
brownag/sANDREWbox
|
8afa6707e7796f146cda62fb291ef23908ca287b
|
27e25464921bfdcd2c45f3df4ebbb74056f1da98
|
refs/heads/master
| 2023-06-21T22:55:01.482301
| 2023-06-09T22:38:34
| 2023-06-09T22:38:34
| 95,600,286
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 755
|
r
|
getSpatialData_LANDSAT.R
|
## Load packages
library(getSpatialData)
library(sf)
aoi <- rgdal::readOGR("E:/CA649/CA649_b.shp")
set_aoi(aoi)
time_range <- c("2018-06-01", "2018-12-31")
login_USGS("br0wn.andrewg", password = "thisisaverylongpassword1")
set_archive("E:/CA649/LANDSAT")
product_names <- getLandsat_names()
## query for records for your AOI, time range and product
query <- getLandsat_query(time_range = time_range, name = product_names[4])
## preview a record
getLandsat_preview(query[3,])
#print available levels for a record
query[5,]
## download record 5 with level "l1" (will direct to AWS automaticaly)
files <- getLandsat_data(records = query[3,], level = "l1", source = "auto")
#espa-andrew.g.brown@ca.usda.gov-07022019-135831-039
espa_order(query[3,],)
|
e6c7975ecfc8c8f87eb4eab371bba8d0c05643f8
|
60005a00a8636e8597d66b8a7658d1f409df5606
|
/workshops/regression/demo-plots.R
|
d36bf49131cfd0b2d875dc10a35a74b80f8e8cc4
|
[
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] |
permissive
|
ajstamm/titanic
|
9198c7123208d1d9d6ef4c770c716539158b7caf
|
3e817ad19b4e2bdd0d97343d414376e527bd331e
|
refs/heads/gh-pages
| 2022-07-20T16:11:04.371587
| 2022-07-14T19:19:51
| 2022-07-14T19:19:51
| 159,426,128
| 1
| 1
|
NOASSERTION
| 2022-06-01T12:25:47
| 2018-11-28T01:42:34
|
HTML
|
UTF-8
|
R
| false
| false
| 4,243
|
r
|
demo-plots.R
|
## #############################################################################
## Demo Plots
##
## - Plots to demonstrate Base R's graphics abilities.
## http://choens.github.io/titanic/workshops/regression/import-passengers.R
## - All examples use the passengers data set.
##
## #############################################################################
## LEARNING OBJECTIVES ---------------------------------------------------------
## - R has many graphical packages.
## - ggplot2 (publishing to, everywhere)
## - ggvis (publishing to web)
## - lattice (publishing to PDF)
## - plotly (publishing to web)
## - htmlwidgets (publishing to web)
## - Today, we use Base R
## BASE R GRAPHICS -------------------------------------------------------------
## - Encourage interactive use
## - Easy to remember commands
## - Good defaults!
## - Thank You: John W. Tukey
## OPENING DISCUSSION ----------------------------------------------------------
## Plotting functions we will use today:
## - plot() You've already used this command.
## - barplot()
## - boxplot()
## - hist()
## AGE -------------------------------------------------------------------------
## Remeber:
mean(passengers$age)
## Only one variable? X axis is row number. Can look like a trend.
plot(passengers$age)
## Use sample() to avoid.
plot(sample(passengers$age))
## AGE BY PASSENGER CLASS ------------------------------------------------------
## First Class passengers were the oldest group.
## Third Class was the youngest AND the most diverse.
## X/Y Axis Order matters!
plot(x=passengers$passenger_class, y=passengers$age)
## REMEMBER FACTORS ------------------------------------------------------------
## - Superficially similar to character/string variables.
## - Tells R a variable is a categorical variable.
## - Factors have different defaults in plot()
## AGE BY PASSENGER CLASS (FACTOR) ---------------------------------------------
## - If we tell R that passengers$passenger_class is a categorical variable,
## the scatter plot becomes a box plot!
## - The box is +/- 1 standard deviation from the mean.
## - The whiskers is 1.5 the interquartile range.
## - Third-class (steerage) was the youngest (and the most diverse) group.
plot(as.factor(passengers$passenger_class),
passengers$age
)
## BARPLOTS ---------------------------------------------------------------------
## You can't create a barplot with raw data. It needs a table.
## The barplot is a good way to visualize categorical data.
# Our passenger data has more males than females
table(passengers$sex)
# Creates a table called tbl_sex
tbl_sex <- table(passengers$sex)
tbl_sex
## PASSENGER SEX ---------------------------------------------------------------
## Input is a table object.
barplot(tbl_sex)
## Simple commands, building on one another.
barplot(table(passengers$sex))
## STOP! TEACHABLE MOMENT ------------------------------------------------------
## - SAS/SPSS: Powerful, independent commands.
## - R: Simple commands work together.
## - Both methods have advantages.
## - One reason R is so modular.
## YOUR TURN! ------------------------------------------------------------------
## Learn how to use the prop.table command.
## Use this command to build a table showing the
## proportions of males and females in passengers.
?prop.table
## YOUR TURN! ------------------------------------------------------------------
## Now use prop.table to build a proportional barplot.
## GET SOME HELP! --------------------------------------------------------------
## - Eventually, we all need som help.
## - stackoverflow.com
## - google.com
## - Let's practice finding help.
## YOUR TURN! ------------------------------------------------------------------
## - Download the exploratory-plots.R and advanced-plots.R files.
## - https://choens.github.io/titanic/workshops/regression/exploratory-plots.R
## - https://choens.github.io/titanic/workshops/regression/advanced-plots.R
## - Will show you more plotting and graphing commands and techniques.
## - There is sooooo much we could discuss . . . and only three hours to do so.
## - Remember, these materials are available after this workshops.
|
a1093a904ab3abfead8fe13c335adb123f20a48d
|
465038dc0030ea330afd3e8ff599a511dd0bd336
|
/msk_stuff/vcf_compare.R
|
793231d7a61c9d20c57434b10d742372963ab739
|
[] |
no_license
|
vyellapa/misc
|
693f3631e6ba9a102d2cb819754b686262c6c713
|
d0bf18071c10f1e7c99f386a55f474c3b155c5bd
|
refs/heads/master
| 2021-06-29T21:43:14.946440
| 2021-06-18T01:07:00
| 2021-06-18T01:07:00
| 239,605,752
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,632
|
r
|
vcf_compare.R
|
library(ggplot2)
library("stringr")
args=commandArgs(TRUE)
cn2vcf=args[1]
cn5vcf=args[2]
bam5=args[3]
bam2=bam5
#cn2vcf="~/Desktop/SOHN_163/one_offs/SNP/E-H-109099-T2-1-D1-1_vs_E-H-109099-N1-1-D1-1.caveman.muts.annot.vcf.gz"
#cn5vcf="~/Desktop/SOHN_163/one_offs/SNP/E-H-109099-T1-1-D1-1_vs_E-H-109099-N1-1-D1-1.caveman.muts.annot.vcf.gz"
CN2=read.table(cn2vcf,header=F,sep="\t",stringsAsFactors = F)
CN5=read.table(cn5vcf,header=F,sep="\t",stringsAsFactors = F)
pat<-"ASRD=[0-9,.]+"
CN2$m=paste(sep=":",CN2$V1,CN2$V2)
CN2$ASRD=gsub("ASRD=","",str_extract(CN2$V8, pat))
CN2$ASRD=as.numeric(as.character(CN2$ASRD))
cn2p=CN2[which(CN2$V7=="PASS" & CN2$ASRD>0.9),]
CN5$m=paste(sep=":",CN5$V1,CN5$V2)
CN5$ASRD=gsub("ASRD=","",str_extract(CN5$V8, pat))
CN5$ASRD=as.numeric(as.character(CN5$ASRD))
cn5p=CN2[which(CN5$V7=="PASS" & CN5$ASRD>0.9),]
common=cn5p[(cn5p$m %in% cn2p$m),]
common=common[,c(1,2,2,4,5)]
uniq_cn2=cn2p[!(cn2p$m %in% cn5p$m),]
uniq_cn2=uniq_cn2[,c(1,2,2,4,5)]
uniq_cn5=cn5p[!(cn5p$m %in% cn2p$m),]
uniq_cn5=uniq_cn5[,c(1,2,2,4,5)]
write.table(common,file="common.in", append=FALSE, quote=F,sep="\t", eol="\n", row.names=F, col.names=F)
write.table(uniq_cn2,file="uniq_cn2.in", append=FALSE, quote=F,sep="\t", eol="\n", row.names=F, col.names=F)
write.table(uniq_cn5,file="uniq_cn5.in", append=FALSE, quote=F,sep="\t", eol="\n", row.names=F, col.names=F)
command=paste(sep=" ","bam-readcount","-f","~/local/resources/hs37d5.fa","-l","common.in",bam5, "> common.out;paste common.in common.out |cut -f1-15 > common.out1")
system(command)
command=paste(sep=" ","bam-readcount","-f","~/local/resources/hs37d5.fa","-l","uniq_cn2.in",bam2, "> common.out;paste uniq_cn2.in common.out |cut -f1-15 > uniq_cn2.out1")
system(command)
command=paste(sep=" ","bam-readcount","-f","~/local/resources/hs37d5.fa","-l","uniq_cn5.in",bam5, "> common.out;paste uniq_cn5.in common.out |cut -f1-15 > uniq_cn5.out1")
system(command)
u.cn2=read.table("~/Desktop/uniq_cn2.out1",header=F,sep="\t",stringsAsFactors = F)
u.cn5=read.table("~/Desktop/uniq_cn5.out1",header=F,sep="\t",stringsAsFactors = F)
shared=read.table("~/Desktop/common.out1",header=F,sep="\t",stringsAsFactors = F)
u.cn2$STATUS="UNIQ_CN2"
u.cn5$STATUS="UNIQ_CN5"
shared$STATUS="SHARED"
all.rc=rbind(u.cn2,u.cn5,shared)
#Find out the REF base and readcount
all.rc$REF=all.rc$V11
all.rc[all.rc$V4=="C",]$REF=all.rc[all.rc$V4=="C",]$V12
all.rc[all.rc$V4=="G",]$REF=all.rc[all.rc$V4=="G",]$V13
all.rc[all.rc$V4=="T",]$REF=all.rc[all.rc$V4=="T",]$V14
#Find out the ALT base and readcount
all.rc$ALT=all.rc$V11
all.rc[all.rc$V5=="C",]$ALT=all.rc[all.rc$V5=="C",]$V12
all.rc[all.rc$V5=="G",]$ALT=all.rc[all.rc$V5=="G",]$V13
all.rc[all.rc$V5=="T",]$ALT=all.rc[all.rc$V5=="T",]$V14
all.rc=all.rc[,c(1,2,4,5,9,16,17,18)]
r=as.data.frame(do.call('rbind',strsplit(as.character(all.rc$REF), ':', fixed=TRUE)))
a=as.data.frame(do.call('rbind',strsplit(as.character(all.rc$ALT), ':', fixed=TRUE)))
all.rc=all.rc[,c(1,2,3,4,5,6)]
all.rc=cbind(all.rc,r,a)
colnames(all.rc)=c("CHR","POS","R1","A1","DP","CATEGORY","REF","COUNT","MAPQ","BASEQ","SE_MAPQ","POS_STRAND_READS","NEG_STRAND_READS","POS_AS_FRAC","MM_AS_FRAC","MMQS","NUM_Q2","AVG_DIST_Q2","CLIPPED_LEN","DIST_3P","ALT1","ALT_COUNT","ALT_MAPQ","ALT_BASEQ","ALT_SE_MAPQ","ALT_POS_STRAND_READS","ALT_NEG_STRAND_READS","ALT_POS_AS_FRAC","ALT_MM_AS_FRAC","ALT_MMQS","ALT_NUM_Q2","ALT_AVG_DIST_Q2","ALT_CLIPPED_LEN","ALT_DIST_3P")
all.rc$STRANDEDNESS=apply(all.rc,1,function(x) {p=as.numeric(as.character(x["ALT_POS_STRAND_READS"]));n=as.numeric(as.character(x["ALT_NEG_STRAND_READS"]));m=min(p,n)/(p+n); return(m)})
all.rc$VAF=apply(all.rc,1,function(x) {p=as.numeric(as.character(x["COUNT"]));n=as.numeric(as.character(x["ALT_COUNT"]));m=n/(p+n); return(m)})
all.rc$MMQS_DIFF=abs(as.numeric(as.character(all.rc$ALT_MMQS))-as.numeric(as.character(all.rc$MMQS)))
p=ggplot(a,aes(ALT_COUNT,fill=CATEGORY))+geom_density(linetype="blank",alpha=0.6)+facet_wrap(~SAMPLE)+coord_cartesian(xlim=c(0,150),ylim=c(0,0.15))+theme_bw()
ggsave(p, file="ALT_COUNT.pdf", width=6, height=4)
p=ggplot(a,aes(ALT_MAPQ,fill=CATEGORY))+geom_density(linetype="blank",alpha=0.6)+facet_wrap(~SAMPLE)+theme_bw()+coord_cartesian(xlim=c(45,60),ylim=c(0,2))
ggsave(p, file="ALT_MAPQ.pdf", width=6, height=4)
p=ggplot(a,aes(ALT_BASEQ,fill=CATEGORY))+geom_density(linetype="blank",alpha=0.6)+facet_wrap(~SAMPLE)+theme_bw()+coord_cartesian(ylim=c(0,0.4))
ggsave(p, file="ALT_BASEQ.pdf", width=6, height=4)
p=ggplot(a,aes(STRANDEDNESS,fill=CATEGORY))+geom_density(linetype="blank",alpha=0.6)+facet_wrap(~SAMPLE)+theme_bw()
ggsave(p, file="STRANDEDNESS.pdf", width=6, height=4)
p=ggplot(a,aes(ALT_POS_AS_FRAC,fill=CATEGORY))+geom_density(linetype="blank",alpha=0.6)+facet_wrap(~SAMPLE)+theme_bw()+geom_vline(xintercept = 0.01)
ggsave(p, file="ALT_POS_AS_FRAC.pdf", width=6, height=4)
p=ggplot(a,aes(ALT_MM_AS_FRAC,fill=CATEGORY))+geom_density(linetype="blank",alpha=0.6)+facet_wrap(~SAMPLE)+theme_bw()+coord_cartesian(ylim=c(0,300),xlim=c(0.01,0.065))
ggsave(p, file="ALT_MM_AS_FRAC.pdf", width=6, height=4)
p=ggplot(a,aes(ALT_MMQS,fill=CATEGORY))+geom_density(linetype="blank",alpha=0.6)+facet_wrap(~SAMPLE)+theme_bw()+coord_cartesian(xlim=c(0,120),ylim=c(0,0.2))
ggsave(p, file="STRANDEDNESS.pdf", width=6, height=4)
p=ggplot(a,aes(ALT_AVG_DIST_Q2,fill=CATEGORY))+geom_density(linetype="blank",alpha=0.6)+facet_wrap(~SAMPLE)+theme_bw()
ggsave(p, file="ALT_MMQS.pdf", width=6, height=4)
p=ggplot(a,aes(VAF,fill=CATEGORY))+geom_density(linetype="blank",alpha=0.6)+facet_wrap(~SAMPLE)+theme_bw()
ggsave(p, file="VAF.pdf", width=6, height=4)
p=ggplot(a,aes(MMQS_DIFF,fill=CATEGORY))+geom_density(linetype="blank",alpha=0.6)+facet_wrap(~SAMPLE)+theme_bw()+coord_cartesian(xlim=c(0,100),ylim=c(0,0.15))
ggsave(p, file="MMQS_DIFF.pdf", width=6, height=4)
ggplot(a,aes(ALT_CLIPPED_LEN,fill=CATEGORY))+geom_density(linetype="blank",alpha=0.6)+facet_wrap(~SAMPLE)+theme_bw()+coord_cartesian(xlim=c(125,150),ylim=c(0,0.75))
ggsave(p, file="ALT_CLIPPED_LEN.pdf", width=6, height=4)
ggplot(a,aes(ALT_DIST_3P,fill=CATEGORY))+geom_density(linetype="blank",alpha=0.6)+facet_wrap(~SAMPLE)+theme_bw()
ggsave(p, file="ALT_DIST_3P.pdf", width=6, height=4)
#ggplot(a,aes(MP,fill=CATEGORY))+geom_density(linetype="blank",alpha=0.6)+facet_wrap(~SAMPLE)+theme_bw()+coord_cartesian(xlim=c(0.80,1),ylim=c(0,25))
#ggsave(p, file="STRANDEDNESS.pdf", width=6, height=4)
ggplot(a,aes(DP,fill=CATEGORY))+geom_density(linetype="blank",alpha=0.6)+facet_wrap(~SAMPLE)+theme_bw()+coord_cartesian(xlim=c(0,750),ylim=c(0,0.015))
ggsave(p, file="DP.pdf", width=6, height=4)
|
056ceda135fc64416f8d3ee65cc2369f21a500d0
|
b2f61fde194bfcb362b2266da124138efd27d867
|
/code/dcnf-ankit-optimized/Results/QBFLIB-2018/A1/Database/Jordan-Kaiser/reduction-finding-full-set-params-k1c3n4/exquery_query49_1344n/exquery_query49_1344n.R
|
7ff850991cd346e88d4e78add34c8a45f48ac9d5
|
[] |
no_license
|
arey0pushpa/dcnf-autarky
|
e95fddba85c035e8b229f5fe9ac540b692a4d5c0
|
a6c9a52236af11d7f7e165a4b25b32c538da1c98
|
refs/heads/master
| 2021-06-09T00:56:32.937250
| 2021-02-19T15:15:23
| 2021-02-19T15:15:23
| 136,440,042
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 71
|
r
|
exquery_query49_1344n.R
|
f7a4dfe290260b48e87c3378e2557772 exquery_query49_1344n.qdimacs 510 1245
|
f2a970d19d802a4f3ae37e1bf88f7addc9510441
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/gmp/examples/operators_bigq.Rd.R
|
46842023fffe4e4b34318f907dbda1d54063291d
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 331
|
r
|
operators_bigq.Rd.R
|
library(gmp)
### Name: Bigq operators
### Title: Basic arithmetic operators for large rationals
### Aliases: add.bigq +.bigq sub.bigq -.bigq mul.bigq *.bigq div.bigq
### /.bigq pow.bigq ^.bigq abs.bigq
### Keywords: arith
### ** Examples
## 1/3 + 1 = 4/3 :
as.bigq(1,3) + 1
r <- as.bigq(12, 47)
stopifnot(r ^ 3 == r*r*r)
|
bfe9409a3c547561fadbd3b0f4e3ffaffaf2e5da
|
f28140b6953997b812afc1fda2da566d456ca517
|
/plot1.R
|
b967631c8aabbd22a2737626fa6c3b5b8c7cb138
|
[] |
no_license
|
paadanfel/ExData_Plotting1
|
2e547890bf358b4b1ef184c197de8d7a088a2583
|
a357a9b4cad14b2ec519e3d1bb9bdce7c2d9f7c0
|
refs/heads/master
| 2020-08-29T04:48:47.113506
| 2019-10-28T00:11:04
| 2019-10-28T00:11:04
| 217,933,046
| 0
| 0
| null | 2019-10-27T23:57:04
| 2019-10-27T23:57:03
| null |
UTF-8
|
R
| false
| false
| 214
|
r
|
plot1.R
|
# Plot 1
get_data() #get data
png(filename = "plot1.png",height = 480, width = 480)
hist(data$Global_active_power, col = "red", main = "Global Active Power", xlab = "Global Active Power (kilowatts)")
dev.off()
|
4337638f995a5c55717cdd5a0dd376841e0eb3b7
|
2e11399edb62739978e18210edeebb277e712fba
|
/data/table.R
|
7a9f7e0d6e3b49d18ecbca350967b262e91561b5
|
[] |
no_license
|
lter/lterwg-emergent
|
02b819204d60b69eb869fc4aa752fde6e961bb26
|
8bf0d8d940aab9948da47036fbaa889d053cb550
|
refs/heads/master
| 2023-04-28T11:22:38.398091
| 2023-04-19T18:32:06
| 2023-04-19T18:32:06
| 236,833,596
| 4
| 1
| null | 2020-10-29T19:56:32
| 2020-01-28T20:24:31
|
HTML
|
UTF-8
|
R
| false
| false
| 3,052
|
r
|
table.R
|
#This is the Shiny app for the Neon data showcasing the tables of the data
#Author: Dvir Blander and Katrina Newcomer
#First loading in the shiny, dplyr, readr libraries.
#The shiny library is used because this is a shiny app.
#The dplyr and readr libraries are used to help read in the data.
#The DT library is used for the datatables
library(plotly)
library(ggplot2)
library(shiny)
library(dplyr)
library(readr)
library(DT)
library(shinyWidgets)
#Note: The files are loaded onto the local machine. The folder should be on GitHub and it's name is NeonFiles.
#Make sure to set the working directory as the GitHub "NeonFiles" folder.
#This can be done by clicking Session --> Set Working Directory --> Choose Directory. Then navigate to this directory.
#Loading in the csv files and showing less than 113 columns
soilFieldChem <- read.csv(file = 'soilFieldChem.csv')
<<<<<<< HEAD
#<<<<<<< HEAD
grass <- soilFieldChem[grep('grassland|Grassland', soilFieldChem$nlcdClass), ]
forest <- soilFieldChem[grep('forest|Forest', soilFieldChem$nlcdClass), ]
#=======
#>>>>>>> parent of 373efc5... Table less columns and WIP for graphs
=======
soilFieldChem <- soilFieldChem[-c(72:113)]
>>>>>>> parent of 8edd20a... update .groups error
ui <- fluidPage(
titlePanel("Neon Data Table"),
sidebarLayout(position = "left",
tabPanel("Table",
sidebarPanel(selectInput("selection1", label = h3("Select nlcdClass"),
choices = c("choose" = "", levels(soilFieldChem$nlcdClass)), selected = 'mixedForest' ),
selectInput("selection2", label = h3("Select siteID"),
choices = c("choose" = "", levels(soilFieldChem$siteID)), selected = 'BART'),
selectInput("selection4", label = h3("Select biophysicalCriteria"),
choices = c("choose" = "", levels(soilFieldChem$biophysicalCriteria)), selected = 'OK - no known exceptions'),
selectInput("selection5", label = h3("Select sampleTiming"),
choices = c("choose" = "", levels(soilFieldChem$sampleTiming)), selected='peakGreenness')
)
),
mainPanel(DT::dataTableOutput("table"))
)
)
server <- function(input, output) {
tab <- reactive({
soilFieldChem %>%
filter(nlcdClass == input$selection1) %>%
filter(siteID == input$selection2) %>%
filter(biophysicalCriteria == input$selection4) %>%
filter(sampleTiming == input$selection5 )
})
output$table <-DT::renderDataTable({
tab()
})
<<<<<<< HEAD
}
shinyApp(ui = ui, server = server)
=======
}
# Create Shiny app objects from either an explicit UI/server pair
shinyApp(ui = ui, server = server)
>>>>>>> parent of 8edd20a... update .groups error
|
b0d3d2307c74cf8e74e44c1c681cd87160af64ec
|
fd7434084d6e02b60b765fe6473d0b9bcefe735a
|
/actions/install-deps/test_install-deps.R
|
b436070d12533d296083dbf8f30ccedf69342dd4
|
[
"MIT"
] |
permissive
|
kiwiroy/ghactions
|
adb9104287b6c6fe487cda46c9db97496d5d36f8
|
2ef1c090f8d49037e2b1d14bf2f122a6ce720b02
|
refs/heads/master
| 2020-07-31T21:19:27.299917
| 2019-09-24T20:37:40
| 2019-09-24T20:42:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 539
|
r
|
test_install-deps.R
|
context("install-deps")
# TODO duplication write this out to function
if (file.exists("/.dockerenv")) {
cmd_path <- "/entrypoint.R"
} else {
cmd_path <- paste0(getwd(), "/", "entrypoint.R")
}
test_that(desc = "package from good DESCRIPTION is installed", code = {
setwd("good")
system2(command = cmd_path, stdout = FALSE, stderr = FALSE)
expect_equal(
object = dir(Sys.getenv("R_LIBS_WORKFLOW")),
expected = "mnormt"
)
expect_silent(object = library(mnormt, lib.loc = Sys.getenv("R_LIBS_WORKFLOW")))
setwd("..")
})
|
cc5e6a899ea8b401701350a74f560a2b94f9d953
|
8f984222e808c6f15301bf415f7902850185cf83
|
/ui.R
|
cdb8e37c00192658d66cf192c6306b335654ea6c
|
[] |
no_license
|
alimojiz1/ddp_assignment
|
71ee15e8d3f3abda5f873d1215e70141884b9dfd
|
a95681beb89557d2f8ca15bf44002483a269ab58
|
refs/heads/master
| 2021-01-20T08:14:03.669895
| 2017-05-03T06:36:04
| 2017-05-03T06:36:04
| 90,116,281
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,055
|
r
|
ui.R
|
library(shiny)
data_years <- c("2012", "2013","2014", "2015")
data_graf <- c("By Year", "Top 10 States", "Comparison By 4 States")
fluidPage(
pageWithSidebar(
headerPanel("Dengue Cases in Malaysia from year 2012 -2015"),
sidebarPanel(
helpText("Please choose the year to see the sample of dengue data by year",
"in a table form in DISPLAY DENGUE TABLE tab."),
selectInput("datayear", label = "Select year", choices = data_years),
helpText("Please click PLOT DENGUE CASES tab to see the graph.",
"The plot display the general trend of dengue outbreak",
"in Malaysia throughout the years from the dataset."),
selectInput("graf1", label = "Select plot type", choices = data_graf),
br(),
a(href = "https://github.com/Yus2015/DevDataProducts.git", "The Shiny apps Source code")
),
mainPanel(
tabsetPanel(
tabPanel("DISPLAY DENGUE TABLE", tableOutput("data_table")),
tabPanel("PLOT DENGUE CASES", plotOutput("data_plot")))
)
)
)
|
33243f540b6001ce42b053c7030d7d85df1dfb8c
|
403f786c7c85fa551326d1e077bc895fea26e7c9
|
/man/configure_environment.Rd
|
029bba8924536ca24153dd7b7cd54665e5dfb523
|
[
"Apache-2.0"
] |
permissive
|
rstudio/reticulate
|
81528f898d3a8938433d2d6723cedc22bab06ecb
|
083552cefe51fe61441679870349b6c757d6ab48
|
refs/heads/main
| 2023-08-22T01:41:52.850907
| 2023-08-21T16:19:42
| 2023-08-21T16:19:42
| 81,120,794
| 1,672
| 399
|
Apache-2.0
| 2023-09-13T20:35:47
| 2017-02-06T18:59:46
|
R
|
UTF-8
|
R
| false
| true
| 1,999
|
rd
|
configure_environment.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/python-packages.R
\name{configure_environment}
\alias{configure_environment}
\title{Configure a Python Environment}
\usage{
configure_environment(package = NULL, force = FALSE)
}
\arguments{
\item{package}{The name of a package to configure. When \code{NULL}, \code{reticulate}
will instead look at all loaded packages and discover their associated
Python requirements.}
\item{force}{Boolean; force configuration of the Python environment? Note
that \code{configure_environment()} is a no-op within non-interactive \R
sessions. Use this if you require automatic environment configuration, e.g.
when testing a package on a continuous integration service.}
}
\description{
Configure a Python environment, satisfying the Python dependencies of any
loaded \R packages.
}
\details{
Normally, this function should only be used by package authors, who want
to ensure that their package dependencies are installed in the active
Python environment. For example:
\if{html}{\out{<div class="sourceCode">}}\preformatted{.onLoad <- function(libname, pkgname) \{
reticulate::configure_environment(pkgname)
\}
}\if{html}{\out{</div>}}
If the Python session has not yet been initialized, or if the user is not
using the default Miniconda Python installation, no action will be taken.
Otherwise, \code{reticulate} will take this as a signal to install any required
Python dependencies into the user's Python environment.
If you'd like to disable \code{reticulate}'s auto-configure behavior altogether,
you can set the environment variable:
\if{html}{\out{<div class="sourceCode">}}\preformatted{RETICULATE_AUTOCONFIGURE = FALSE
}\if{html}{\out{</div>}}
e.g. in your \verb{~/.Renviron} or similar.
Note that, in the case where the Python session has not yet been initialized,
\code{reticulate} will automatically ensure your required Python dependencies
are installed after the Python session is initialized (when appropriate).
}
|
ed7eda917623ba784042d5d18ac77db8f8773be1
|
17200de75fee60dfda419b6b467973dab08c9d85
|
/R/r04.R
|
ff0b40458908a0c87632f6317cc5e41e53e62f12
|
[] |
no_license
|
saltandlight/TIL
|
de3568f3b4d1d892983b6025bcd453a4dc3b7f04
|
ae75f506efd231eb88e5a115410c8a760c0a15ca
|
refs/heads/master
| 2022-12-27T15:07:22.485769
| 2020-07-27T08:22:50
| 2020-07-27T08:22:50
| 187,792,504
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 835
|
r
|
r04.R
|
student <- read.csv("student2.csv",
header=TRUE,
encoding = "EUC-KR")
str(student)
student
View(student)
class(student[1,c(3:6)])
avg <- c(mean(as.numeric(student[1,c(3:6)])),
mean(as.numeric(student[2,c(3:6)])),
mean(as.numeric(student[3,c(3:6)])),
mean(as.numeric(student[4,c(3:6)])),
mean(as.numeric(student[5,c(3:6)])),
mean(as.numeric(student[6,c(3:6)])),
mean(as.numeric(student[7,c(3:6)])),
mean(as.numeric(student[8,c(3:6)])))
student$AVGDATA <- avg
student
newst <- student[,c(2,7)]
newst
newst2 <- student[,c(3:6)]
newst2<-rbind(newst2,c(mean(newst2$KO),mean(newst2$EN),mean(newst2$MA),mean(newst2$SI)))
newst2
newst3<-as.data.frame(
mean(newst2$KO),
mean(newst2$EN),
mean(newst2$MA),
mean(newst2$SI))
newst3
|
df6048f837905f50c204bd0766464a223e4200ee
|
d2eda24acceb35dc11263d2fa47421c812c8f9f6
|
/man/coastline3.Rd
|
59fdf68ec51575c7de34b6bc75daae20c42c2a45
|
[] |
no_license
|
tbrycekelly/TheSource
|
3ddfb6d5df7eef119a6333a6a02dcddad6fb51f0
|
461d97f6a259b18a29b62d9f7bce99eed5c175b5
|
refs/heads/master
| 2023-08-24T05:05:11.773442
| 2023-08-12T20:23:51
| 2023-08-12T20:23:51
| 209,631,718
| 5
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 526
|
rd
|
coastline3.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.r
\docType{data}
\name{coastline3}
\alias{coastline3}
\title{World Coastline Data (L3)
This is a coarse resolution coastline, compiled from the global hierarchical coastline data product.}
\format{
An object of class \code{list} of length 2.
}
\source{
NOAA
}
\usage{
coastline3
}
\description{
World Coastline Data (L3)
This is a coarse resolution coastline, compiled from the global hierarchical coastline data product.
}
\keyword{datasets}
|
e38adc4aab56e738f5382eb81c09bab4a3ba8e03
|
44091b4bc11842b1ed02cd8b765fddfe9c8c6202
|
/temp_files/generate spatial data for server.R
|
2ea460fe78915d972555b4aeab65a60a89651c8e
|
[] |
no_license
|
anarosner/testing
|
aa2bb896a807308acccce943e90e7a66020d3ec7
|
d110884d052f12b5ccf48ca3d0ce6a2e8776be51
|
refs/heads/master
| 2021-01-25T05:21:36.250905
| 2020-02-15T14:21:15
| 2020-02-15T14:21:15
| 7,007,489
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,867
|
r
|
generate spatial data for server.R
|
#!/usr/bin/env Rscript
library(sp)
library(rgdal)
library(rgeos)
library(maptools)
# Load NHDplus Data
##
##
#####
####
###
##
#
# Base directory (change depending on file structure wherever this is being run)
basedir<-"C:/ALR/SpatialData"
# basedir<-"/home/ana/testing"
# basedir<-"C:/Documents/__USGS"
##### Define projections
# Projection used by NHD and NHDplus
proj4.NHD<-"+proj=longlat +ellps=GRS80 +datum=NAD83 +no_defs"
#Projection used by NHDplus for gridded data
# proj4.NHDplusgrid
# Projection used by MassGIS
# proj4.MAstateplane<-"+proj=lcc +lat_1=41.71666666666667 +lat_2=42.68333333333333 +lat_0=41 +lon_0=-71.5 +x_0=200000 +y_0=750000 +ellps=GRS80 +units=m +no_defs"
setwd("C:/ALR/GeneratedSpatialData/FilesByHUC10_3")
system.time(HUC_8<-readShapePoly("HUC_8",proj4string=CRS(proj4.NHD)))
### load NHDPlus shapefiles and attribute dbf files
#Flowlines
setwd(paste0(basedir,"/NHDPlus/NHDPlusV21_NE_01_NHDSnapshot_01/NHDPlusNE/NHDPlus01/NHDSnapshot/Hydrography"))
system.time(NEFlow.line<-readShapeLines("NHDFlowline",proj4string=CRS(proj4.NHD)))
#Catchments
setwd(paste0(basedir,"/NHDplus/NHDPlusV21_NE_01_NHDPlusCatchment_01/NHDPlusNE/NHDPlus01/NHDPlusCatchment"))
system.time(NECatch.shape<-readShapePoly("Catchment",proj4string=CRS(proj4.NHD)))
# setwd("C:/ALR/GeneratedSpatialData/FilesByHUC10")
# system.time(catchmentwHUC<-readShapePoly("catchmentwHUC",proj4string=CRS(proj4.NHD)))
# huc10<-unique(as.character(catchmentwHUC$HUC_10))
# huc10
#Attributes tables
setwd(paste0(basedir,"/NHDplus/NHDPlusV21_NE_01_NHDPlusAttributes_01/NHDPlusNE/NHDPlus01/NHDPlusAttributes"))
system.time(plusflow<-read.dbf("PlusFlow.dbf"))
system.time(plusflowlineVAA<-read.dbf("PlusFlowlineVAA.dbf"))
names(plusflowlineVAA)
order<-plusflowlineVAA[,c(1,4)]
large<-order[order$StreamOrde>=6 & !is.na(order$StreamOrde),]
# large<-large[!is.na(large$StreamOrde),]
large[1:22,]
dim(large)
large[280:310,]
is.na(large[280:310,])
setwd("C:/ALR/GitHub/testing/spatial_data")
write.csv(large$ComID, file="large_features.csv", row.names=FALSE)
system.time(plusflow<-read.csv("PlusFlow.csv"))
system.time(plusflow<-read.csv("LargeFeatures.csv"))
### Merge w/ VAA table to get attributes
plusdata<-plusflowlineVAA[,c(1,4,1)]
bkup<-NEFlow.line
NEFlow.line@data[c(1:nrow(NEFlow.line@data)),ncol(NEFlow.line)+1]<-c(1:nrow(NEFlow.line@data)) #add column to sort by later
colnames(NEFlow.line@data)[15]<-"sortby"
NEFlow.line@data<-merge(NEFlow.line, plusdata, by.x="COMID", by.y="ComID", all.x=TRUE, sort=FALSE)
NEFlow.line@data<-NEFlow.line@data[order(NEFlow.line$sortby),]
rm(list=c("plusdata"))
#add centroid
NECatch.shape$lat<-0
NECatch.shape$long<-0
Centroids<-gCentroid(NECatch.shape, byid=TRUE, id = "FEATUREID")
# NHDFlow.line<-NEFlow.line
# NHDCatch.shape<-NECatch.shape
#HUC 8 outlines
#by first 8 digits of reachcode in Flowlines
huc8<-unique(substr(NEFlow.line$REACHCODE,1,8))
unique(substr(NEFlow.line$REACHCODE,1,4))
huc8[34:35]
setwd("C:/ALR/GeneratedSpatialData/FilesByHUC10_3")
# for (i in 34:35) {
for (i in 1:length(huc8)) {
print(paste("iteration: ",i,"huc:",huc8[i]))
huc8.line<-NEFlow.line[substr(NEFlow.line$REACHCODE,1,8)==huc8[i],] #HUC8
huc8.shape<-NECatch.shape[NECatch.shape$FEATUREID %in% huc8.line$COMID,]
print(paste("number of catchments:",length(huc8.shape)))
writeOGR(huc8.shape, ".", layer=paste0(huc8[i],"Catchments"), driver="ESRI Shapefile")
writeOGR(huc8.line, ".", layer=paste0(huc8[i],"Flowlines"), driver="ESRI Shapefile")
writeOGR(huc8.shape, paste0(huc8[i],"Catchments.kml"), layer="NHDCatchments", driver="KML",dataset_options=c("NameField=FEATUREID"))
writeOGR(huc8.line, paste0(huc8[i],"Flowlines.kml"), layer="NHDplusFlowlines", driver="KML",dataset_options=c("NameField=COMID","DescriptionField=GNIS_NAME"))
}
#set up new blank spatialpolygonsdataframe w/ structure of NECatch.shape
oldcatch<-NECatch.shape
newcatch<-NHDCatch.shape[1,]
newcatch@data
# names(newcatch@data)
newcatch@data[,"HUC_10"]<-1
newcatch<-newcatch[-1,]
newcatch@data
# names(newcatch@data)
featureids<-oldcatch$FEATUREID
setwd("C:/ALR/GeneratedSpatialData/dissolved_hucs")
huc10.shape<-readShapePoly("huc10",proj4string=CRS(proj4.NHD))
length(huc10.shape)
for (i in 1:2) {
i<-345
i<-10
catch_index<-gCovers(huc10.shape[i,],NECatch.shape)
#catch_index<-over(huc10.shape[i,],NECatch.shape)
huc_index<-over(NECatch.shape[catch_index,],huc10.shape)
# huc10.shape@polygons[[i]]@ID
plot(huc10.shape[huc_index,])
plot(NECatch.shape[catch_index,],add=T)
currentcatch<-newcatch
currentcatch_index<-length(currentcatch)+1
newcatch[newcatch_index,]<-oldcatch[catch_index,]
newcatch[newcatch_index,"HUC_10"]
# oldcatch<-oldcatch[-oldcatch$FEATUREID==current,]
# print(paste("old",length(oldcatch),"new",length(newcatch)))
}
|
4cbe8d319c066f182015f7fe17281ae5e546be16
|
80cfa0fd4f5b3d95e8799ef2b4c9271d00c58432
|
/validate/scf_income_plot.R
|
de4d07cf8a7706e87858cb9879b0fcd6ad6617d8
|
[] |
no_license
|
davidsovich/auto_abs
|
aaf4054ad0bab7f6b0e368294246933def35cae8
|
21614a07bec4cde645de63332a296bc0054ec1b5
|
refs/heads/master
| 2023-02-20T21:36:20.903361
| 2021-01-20T16:28:30
| 2021-01-20T16:28:30
| 331,362,887
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,977
|
r
|
scf_income_plot.R
|
# ---- Description --------------------------------------------------------------------------------
# This program plots the income distribution in the Reg AB II and the SCF.
# ---- Preliminaries ------------------------------------------------------------------------------
# Load libraries
library(tidyverse)
library(ggplot2)
library(readxl)
library(tidyr)
library(lfe)
# Load functions
source("./assist/functions.R")
# Load constants
source("./assist/constants.R")
# ---- Load data ----------------------------------------------------------------------------------
# Load data
plot_df = read_csv(
file = "../data/validation/scf_income.csv",
col_types = "didd"
)
# ---- Wrangle data -------------------------------------------------------------------------------
# Wrangle data
plot_df = plot_df %>%
filter(
income <= 250000
) %>%
mutate(
wgt = ifelse(
is.na(wgt),
1,
wgt
),
auto = ifelse(
is.na(X437) | X437 == 1,
1,
0
),
series = case_when(
user_data == 1 ~ "Reg AB II",
TRUE ~ "SCF"
)
)
# ---- Plot income distribution conditional on loan -----------------------------------------------
# Plot the data
gg_inc = ggplot(
data = filter(plot_df, user_data == 1)
) +
geom_histogram(
aes(
x = income,
y = ..density..
),
position = "identity",
color = "darkgray",
fill = "darkgray",
bins = 48
) +
geom_histogram(
data = filter(plot_df, user_data == 0, auto == 1),
aes(
x = income,
y = ..density..,
weight = wgt
),
position = "identity",
color = "black",
fill = "white",
bins = 48,
alpha = 0
) +
scale_fill_manual(
name = "",
values = c("darkgray", "white")
) +
theme_bw() +
theme(
axis.text.x = element_text(
size = 12
),
axis.text.y = element_text(
size = 12,
angle = 45
),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
) +
labs(
x = "Income",
y = "Density"
)
# ---- Export the plot ----------------------------------------------------------------------------
# Export figure
cairo_pdf(
filename = "../output/figures/validation/scf_income_plot.pdf",
)
gg_inc
dev.off()
# Export object
save(
gg_inc,
file = "../output/objects/validation/gg_inc.Rda"
)
# ---- Plot alternate income distribution conditional on loan -------------------------------------
# Plot the data
gg_ainc = ggplot(
data = filter(plot_df, auto == 1)
) +
geom_histogram(
aes(
x = income,
y = ..density..,
fill = series,
color = series,
weight = wgt
),
position = "identity",
bins = 48,
alpha = 0.5
) +
scale_fill_manual(
values = c("darkgray", "white")
) +
scale_color_manual(
values = c("black", "black")
) +
theme_bw() +
theme(
axis.text.x = element_text(
size = 12
),
axis.text.y = element_text(
size = 12,
angle = 45
),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
legend.position = "top",
legend.title = element_blank()
) +
labs(
x = "Income",
y = "Density"
)
# ---- Export the plot ----------------------------------------------------------------------------
# Export figure
cairo_pdf(
filename = "../output/figures/validation/scf_alternate_income_plot.pdf",
)
gg_ainc
dev.off()
# Export object
save(
gg_ainc,
file = "../output/objects/validation/gg_ainc.Rda"
)
# ---- Plot unconditional income distribution -----------------------------------------------------
# Plot the data
gg_uinc = ggplot(
data = filter(plot_df, user_data == 1)
) +
geom_histogram(
aes(
x = income,
y = ..density..
),
position = "identity",
color = "darkgray",
fill = "darkgray",
bins = 48
) +
geom_histogram(
data = filter(plot_df, user_data == 0),
aes(
x = income,
y = ..density..,
weight = wgt
),
position = "identity",
color = "black",
fill = "white",
bins = 48,
alpha = 0
) +
scale_fill_manual(
name = "",
values = c("darkgray", "white")
) +
theme_bw() +
theme(
axis.text.x = element_text(
size = 12
),
axis.text.y = element_text(
size = 12,
angle = 45
),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
) +
labs(
x = "Income",
y = "Density"
)
# ---- Export the plot ----------------------------------------------------------------------------
# Export figure
cairo_pdf(
filename = "../output/figures/validation/scf_income_unconditional_plot.pdf",
)
gg_uinc
dev.off()
# Export object
save(
gg_uinc,
file = "../output/objects/validation/gg_uinc.Rda"
)
# ---- Plot unconditional alternate income distribution -------------------------------------------
# Plot the data
gg_uainc = ggplot(
data = plot_df
) +
geom_histogram(
aes(
x = income,
y = ..density..,
fill = series,
color = series,
weight = wgt
),
position = "identity",
bins = 48,
alpha = 0.5
) +
scale_fill_manual(
values = c("darkgray", "white")
) +
scale_color_manual(
values = c("black", "black")
) +
theme_bw() +
theme(
axis.text.x = element_text(
size = 12
),
axis.text.y = element_text(
size = 12,
angle = 45
),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
legend.position = "top",
legend.title = element_blank()
) +
labs(
x = "Income",
y = "Density"
)
# ---- Export the plot ----------------------------------------------------------------------------
# Export figure
cairo_pdf(
filename = "../output/figures/validation/scf_alternate_income_unconditional_plot.pdf",
)
gg_uainc
dev.off()
# Export object
save(
gg_uainc,
file = "../output/objects/validation/gg_uainc.Rda"
)
|
ab8116ea1286f69bc876f38893bf1d42d733bfb9
|
53bceac9eb1fe79a59e7f555fa611d98257bc3dd
|
/R/closedp.mX.R
|
a855ce33623d61d0aa9e67ef17db46af75e0097a
|
[] |
no_license
|
cran/Rcapture
|
6d8b78bf364e183283593eeae3b16a35ed924cdb
|
b536c826402072e23db2772e61e4e48d64bddb4d
|
refs/heads/master
| 2022-06-06T19:11:38.710140
| 2022-05-04T12:20:06
| 2022-05-04T12:20:06
| 17,693,081
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,490
|
r
|
closedp.mX.R
|
"closedp.mX" <- function(X,dfreq=FALSE,mX,mname="Customized model")
{
X<-as.matrix(X)
t <- ifelse(dfreq,dim(X)[2]-1,dim(X)[2])
#####################################################################################################################################
# Validation des arguments fournis en entree
# Argument dfreq
if(!is.logical(dfreq)||!isTRUE(all.equal(length(dfreq),1))) stop("'dfreq' must be a logical object of length 1")
# Argument X
if (dfreq)
{
if (any(X[,1:t]!=1&X[,1:t]!=0)) stop("every columns of 'X' but the last one must contain only zeros and ones")
if (any((X[,t+1]%%1)!=0)) stop("the last column of 'X' must contain capture history frequencies, therefore integers")
} else {
if(any(X!=1&X!=0)) stop("'X' must contain only zeros and ones")
}
# Argument mX
mX<-as.matrix(mX)
if (!isTRUE(all.equal(2^t-1,dim(mX)[1]))) stop("'mX' must have 2^t-1 rows")
# Argument mname
if(!is.character(mname)) stop("'mname' must be a character string specifying the model's name")
#####################################################################################################################################
Y <- histfreq.t(X,dfreq=dfreq)
anaM <- glm(Y~mX,family=poisson)
NM <- sum(Y)+exp(anaM$coef[1])
varcovM <- summary(anaM)$cov.unscaled
erreurtypeM <- sqrt(exp(anaM$coef[1])+(exp(2*anaM$coef[1]))*varcovM[1,1])
M <- matrix(c(NM,erreurtypeM,anaM$dev,anaM$df.residual,anaM$aic),nrow=1)
# Preparation des sorties
dimnames(M) <- list(mname,c("abundance","stderr","deviance","df","AIC"))
ans <- list(n=sum(Y),results=M,glm=anaM)
class(ans) <- "closedp.custom"
ans
}
print.closedp.custom <- function(x, ...) {
cat("\nNumber of captured units:",x$n,"\n\n")
cat("Abundance estimation and model fit:\n")
tableau <- x$results
tableau[,c(1,2)] <- round(tableau[,c(1,2)],1)
tableau[,4] <- round(tableau[,4],0)
tableau[,c(3,5)] <- round(tableau[,c(3,5)],3)
print.default(tableau, print.gap = 2, quote = FALSE, right=TRUE)
cat("\n")
invisible(x)
}
boxplot.closedp.custom <- function(x,...) {
boxplot((x$glm$y-fitted(x$glm))/sqrt(fitted(x$glm)),main="Boxplot of Pearson Residuals for the customized model")
}
|
9491854bf2001ff560c43d01aa3e22453f2c5abb
|
6ec650d8565f4b68a3a2c23bd9c39bbad4e1006a
|
/R/proust_datasets.R
|
90d483c615d2fc39defb3dc31fe397c42bf1595f
|
[] |
no_license
|
ColinFay/proustr
|
9de427aa8f69eb527b6e8d2f12b046473bc9308b
|
ff1cd4bad45701e97ae85f894dbcab1bff9f15de
|
refs/heads/master
| 2021-01-24T08:32:11.157401
| 2019-02-05T13:17:02
| 2019-02-05T13:17:02
| 93,385,515
| 26
| 4
| null | null | null | null |
UTF-8
|
R
| false
| false
| 542
|
r
|
proust_datasets.R
|
#' Characters from "À la recherche du temps perdu"
#'
#' A dataset containing Marcel Proust's characters from "À la recherche du temps perdu" and
#' their frequency in each book.
#' This dataset has been downloaded from proust-personnages.
#'
#' @source \url{http://proust-personnages.fr/?page_id=10254}
#'
#' @format A tibble with their name
"proust_char"
#' Stopwords
#'
#' ISO stopwords
#'
#' @source \url{https://raw.githubusercontent.com/stopwords-iso/stopwords-iso/master/stopwords-iso.json}
#'
#' @format A tibble
"stop_words"
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.