blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
โ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
โ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
โ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
9a1fab03b036ce268f0a7931ad42b2950a87bf63
|
237bcbdc6b09c57b251191471359eeefb8014410
|
/letter_Prokopenko/06-Aquilla_9_23_2021_compare_variants.R
|
c672f6be251aa3a62788ffd5c34f09cf7fd9ec71
|
[] |
no_license
|
achalneupane/rcodes
|
d2055b03ca70fcd687440e6262037507407ec7a5
|
98cbc1b65d85bbb6913eeffad62ad15ab9d2451a
|
refs/heads/master
| 2022-10-02T20:35:18.444003
| 2022-09-09T20:53:03
| 2022-09-09T20:53:03
| 106,714,514
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 22,355
|
r
|
06-Aquilla_9_23_2021_compare_variants.R
|
#############################
## Single variant analysis ##
#############################
# Family (FBAT)
NEUP_FAMILIAL_FBAT_ANNO <- read.delim("/40/AD/AD_Seq_Data/05.-Analyses/06-Aquilla_202101/09-Tanzi-replication/01-familial/03-PLINK-QC-files/FBAT/FBAT_rare_variant_analysis_results.csv", header = T, sep = "\t", stringsAsFactors = FALSE)
sum(NEUP_FAMILIAL_FBAT_ANNO$P < 0.0005, na.rm = T)
colnames(NEUP_FAMILIAL_FBAT_ANNO) <- paste0("NEUP_FAMILIAL_", colnames(NEUP_FAMILIAL_FBAT_ANNO))
# sort
NEUP_FAMILIAL_FBAT_ANNO <- NEUP_FAMILIAL_FBAT_ANNO[order(NEUP_FAMILIAL_FBAT_ANNO$NEUP_FAMILIAL_P),]
NEUP_FAMILIAL_FBAT_ANNO <- NEUP_FAMILIAL_FBAT_ANNO[NEUP_FAMILIAL_FBAT_ANNO$NEUP_FAMILIAL_P < 0.05,]
# write.table(NEUP_FAMILIAL_FBAT_ANNO, "Single_variant_analysis_familial_data_results_p_0.05.csv", sep =",", col.names = T, quote = F, row.names = FALSE)
NEUP_FAMILIAL_FBAT_ANNO$ID <- do.call(paste, c(read.table(text = NEUP_FAMILIAL_FBAT_ANNO$NEUP_FAMILIAL_Marker, sep = ":")[1:2], sep = ":"))
# Replication
NEUP_UNRELATED_ANNO <- read.delim("/100/AD/AD_Seq_Data/05.-Analyses/06-Aquilla_202101/01-Aquilla-preQC/06-Aquilla_202101-b/03-plink-QC-files/Firth-Fallback_replication_study_results.txt", header = T, sep = "\t", stringsAsFactors = FALSE)
sum(NEUP_UNRELATED_ANNO$P < 0.0005, na.rm = T)
colnames(NEUP_UNRELATED_ANNO) <- paste0("NEUP_UNRELATED_", colnames(NEUP_UNRELATED_ANNO))
NEUP_UNRELATED_ANNO <- NEUP_UNRELATED_ANNO[!is.na(NEUP_UNRELATED_ANNO$NEUP_UNRELATED_P),]
NEUP_UNRELATED_ANNO <- NEUP_UNRELATED_ANNO[order(NEUP_UNRELATED_ANNO$NEUP_UNRELATED_P),]
# NEUP_UNRELATED_ANNO <- NEUP_UNRELATED_ANNO[NEUP_UNRELATED_ANNO$NEUP_UNRELATED_P < 0.05,]
# write.table(NEUP_UNRELATED_ANNO, "Single_variant_analysis_unrelated_data_results_p_0.05.csv", sep =",", col.names = T, quote = F, row.names = FALSE)
NEUP_UNRELATED_ANNO$ID <- do.call(paste, c(read.table(text = NEUP_UNRELATED_ANNO$NEUP_UNRELATED_SNP, sep = ":")[1:2], sep = ":"))
# Meta-analysis of two datasets
Fixed_Effect_Meta_analysis_result <- read.delim("/100/AD/AD_Seq_Data/05.-Analyses/06-Aquilla_202101/09-Tanzi-replication/01-familial/03-PLINK-QC-files/fixed_effect_meta-analysis/Fixed_effect_Meta_analysis_results.txt", header = T, sep = "\t", stringsAsFactors = FALSE)
colnames(Fixed_Effect_Meta_analysis_result) <- paste0("NEUP_META_", colnames(Fixed_Effect_Meta_analysis_result))
Fixed_Effect_Meta_analysis_result <- Fixed_Effect_Meta_analysis_result[order(Fixed_Effect_Meta_analysis_result$NEUP_META_P),]
# Fixed_Effect_Meta_analysis_result <- Fixed_Effect_Meta_analysis_result[Fixed_Effect_Meta_analysis_result$NEUP_META_P < 0.05,]
# write.table(Fixed_Effect_Meta_analysis_result, "Single_variant_META_analysis_results_p_0.05.csv", sep =",", col.names = T, quote = F, row.names = FALSE)
Fixed_Effect_Meta_analysis_result$ID <- do.call(paste, c(read.table(text = Fixed_Effect_Meta_analysis_result$NEUP_META_SNP, sep = ":")[1:2], sep = ":"))
## First filtering paradigm: P< 0.0005 on familial data and P< 0.05 on unrelated data
singlevar_Prokopenko_p0.0005 <- read.table("https://raw.githubusercontent.com/achalneupane/data/master/Rare_variants_showing_association_at_P_5e-04_in_the_NIMH_NIA_ADSP_AD_families.csv", sep =",", header = TRUE)
singlevar_Prokopenko_p0.0005$Prokopenko_Nearest_protein.coding.gene <- as.character(singlevar_Prokopenko_p0.0005$Prokopenko_Nearest_protein.coding.gene)
singlevar_Prokopenko_p0.0005$key <- paste(singlevar_Prokopenko_p0.0005$Prokopenko_Chromosome, singlevar_Prokopenko_p0.0005$Prokopenko_Position, sep = ":")
colnames(singlevar_Prokopenko_p0.0005) <- paste(colnames(singlevar_Prokopenko_p0.0005), "filter0.0005", sep = "_")
colnames(singlevar_Prokopenko_p0.0005) [colnames(singlevar_Prokopenko_p0.0005) == "key_filter0.0005"] <- "ID"
## Second filtering paradigm: P< 0.05 on familial data and Pmeta < 0.0005
######################### Comparison ##########################
singlevar_Prokopenko_p0.05 <- read.table("https://raw.githubusercontent.com/achalneupane/data/master/Rare_variants_showing_association_in_Prokopenko_paper_P0.05.csv", sep =",", header = TRUE)
singlevar_Prokopenko_p0.05$Prokopenko_Overlapping_GREAT_associated_genes <- as.character(singlevar_Prokopenko_p0.05$Prokopenko_Overlapping_GREAT_associated_genes)
singlevar_Prokopenko_p0.05$key <- paste(singlevar_Prokopenko_p0.05$Prokopenko_Chromosome, singlevar_Prokopenko_p0.05$Prokopenko_Position, sep = ":")
colnames(singlevar_Prokopenko_p0.05) <- paste(colnames(singlevar_Prokopenko_p0.05), "filter0.05", sep = "_")
colnames(singlevar_Prokopenko_p0.05) [colnames(singlevar_Prokopenko_p0.05) == "key_filter0.05"] <- "ID"
# match by SNP position
Merged_single_Var_by_variantNAME <- Reduce(function(x,y) merge(x,y,by="ID",all.x= TRUE) ,list(NEUP_FAMILIAL_FBAT_ANNO,NEUP_UNRELATED_ANNO,Fixed_Effect_Meta_analysis_result, singlevar_Prokopenko_p0.0005, singlevar_Prokopenko_p0.05))
write.table(Merged_single_Var_by_variantNAME, "/40/AD/AD_Seq_Data/05.-Analyses/06-Aquilla_202101/09-Tanzi-replication/01-familial/03-PLINK-QC-files/fixed_effect_meta-analysis/Single_variant_analysis_results_table_matched_by_variants.csv", sep ="\t", col.names = T, quote = F, row.names = FALSE)
## Now, match by geneNAME
NEUP_FAMILIAL_FBAT_ANNO$geneNAME <- NEUP_FAMILIAL_FBAT_ANNO$NEUP_FAMILIAL_gene
NEUP_FAMILIAL_FBAT_ANNO <- NEUP_FAMILIAL_FBAT_ANNO[order(NEUP_FAMILIAL_FBAT_ANNO$NEUP_FAMILIAL_P, NEUP_FAMILIAL_FBAT_ANNO$geneNAME),]
NEUP_UNRELATED_ANNO$geneNAME <- NEUP_UNRELATED_ANNO$NEUP_UNRELATED_gene
## I am only keeping variants with P< 0.05. I am also picking the lowest pvalue for genes that are common in all. Otherwise, the merged table would be too big
# NEUP_UNRELATED_ANNO <- NEUP_UNRELATED_ANNO[order(NEUP_UNRELATED_ANNO$NEUP_UNRELATED_P, NEUP_UNRELATED_ANNO$geneNAME),]
NEUP_UNRELATED_ANNO <- NEUP_UNRELATED_ANNO[NEUP_UNRELATED_ANNO$NEUP_UNRELATED_P < 0.05,]
NEUP_FAMILIAL_FBAT_ANNO <- NEUP_FAMILIAL_FBAT_ANNO[NEUP_FAMILIAL_FBAT_ANNO$geneNAME != "",]
# TT <- by(NEUP_UNRELATED_ANNO, NEUP_UNRELATED_ANNO$geneNAME, function(x) x[which.min(x$NEUP_UNRELATED_P), ] )
library(dplyr)
NEUP_UNRELATED_ANNO <- NEUP_UNRELATED_ANNO %>%
group_by(geneNAME) %>%
slice(which.min(NEUP_UNRELATED_P))
Fixed_Effect_Meta_analysis_result$geneNAME <- Fixed_Effect_Meta_analysis_result$NEUP_META_gene
## I am only keeping variants with P< 0.05. I am also picking the lowest pvalue for genes that are common in all. Otherwise, the merged table would be too big
# Fixed_Effect_Meta_analysis_result <- Fixed_Effect_Meta_analysis_result[order(Fixed_Effect_Meta_analysis_result$NEUP_META_P, Fixed_Effect_Meta_analysis_result$geneNAME),]
Fixed_Effect_Meta_analysis_result <- Fixed_Effect_Meta_analysis_result[Fixed_Effect_Meta_analysis_result$NEUP_META_P < 0.05,]
Fixed_Effect_Meta_analysis_result <- Fixed_Effect_Meta_analysis_result %>%
group_by(geneNAME) %>%
slice(which.min(NEUP_META_P))
# genes sorted by Lowest Discovery_P-value
singlevar_Prokopenko_p0.0005$geneNAME <- singlevar_Prokopenko_p0.0005$Prokopenko_Nearest_protein.coding.gene_filter0.0005
singlevar_Prokopenko_p0.0005 <- singlevar_Prokopenko_p0.0005[order(singlevar_Prokopenko_p0.0005$Prokopenko_Discovery_P.value_filter0.0005, singlevar_Prokopenko_p0.0005$geneNAME),]
singlevar_Prokopenko_p0.0005 <- singlevar_Prokopenko_p0.0005 %>%
group_by(geneNAME) %>%
slice(which.min(Prokopenko_Discovery_P.value_filter0.0005))
# genes sorted by lowest Meta_P-value
singlevar_Prokopenko_p0.05$geneNAME <- as.character(singlevar_Prokopenko_p0.05$Prokopenko_Nearest_protein_coding.gene_filter0.05)
singlevar_Prokopenko_p0.05 <- singlevar_Prokopenko_p0.05[order(singlevar_Prokopenko_p0.05$Prokopenko_META_P_value_filter0.05, singlevar_Prokopenko_p0.05$geneNAME),]
singlevar_Prokopenko_p0.05 <- singlevar_Prokopenko_p0.05 %>%
group_by(geneNAME) %>%
slice(which.min(Prokopenko_DISC_P_value_filter0.05))
# First we merge all tables from our Analysis
NEUP_MERGED_SINGLE_VAR <- Reduce(function(x,y) merge(x,y,by="geneNAME",all.x= TRUE) ,list(NEUP_FAMILIAL_FBAT_ANNO,NEUP_UNRELATED_ANNO,Fixed_Effect_Meta_analysis_result))
# Then merge tables from Prokopenko (two filtering paradigms)
PROKOPENKO_MERGED_SINGLE_VAR <- Reduce(function(x,y) merge(x,y,by="geneNAME",all.x= TRUE) ,list(singlevar_Prokopenko_p0.0005, singlevar_Prokopenko_p0.05))
# Now merge Both tables
Merged_single_Var_by_geneNAME <- Reduce(function(x,y) merge(x,y,by="geneNAME",all.x= TRUE) ,list(NEUP_MERGED_SINGLE_VAR, PROKOPENKO_MERGED_SINGLE_VAR))
## sort by lowest to highest P-value and gene name
Merged_single_Var_by_geneNAME <- Merged_single_Var_by_geneNAME[order(Merged_single_Var_by_geneNAME$NEUP_FAMILIAL_P, Merged_single_Var_by_geneNAME$geneNAME),]
write.table(Merged_single_Var_by_geneNAME, "/40/AD/AD_Seq_Data/05.-Analyses/06-Aquilla_202101/09-Tanzi-replication/01-familial/03-PLINK-QC-files/fixed_effect_meta-analysis/Single_variant_analysis_results_table_matched_by_geneNAME.csv", sep ="\t", col.names = T, quote = F, row.names = FALSE)
####################################################################################
########################
## Spatial clustering ##
########################
setwd("/100/AD/AD_Seq_Data/05.-Analyses/06-Aquilla_202101/01-Aquilla-preQC/06-Aquilla_202101-b/03-plink-QC-files/")
# Family (Spatial)
NEUP_DISCOVERY_SPATIAL <- read.delim("/40/AD/AD_Seq_Data/05.-Analyses/06-Aquilla_202101/09-Tanzi-replication/01-familial/03-PLINK-QC-files/FBAT/Spatial_clustering_analysis_results.csv", header = T, sep = "\t", stringsAsFactors = FALSE)
sum(NEUP_DISCOVERY_SPATIAL$P < 0.01, na.rm = T) # 529
sum(NEUP_DISCOVERY_SPATIAL$P < 0.05, na.rm = T) #4145
colnames(NEUP_DISCOVERY_SPATIAL) <- paste0("NEUP_SPATIAL_FAMILIAL_", colnames(NEUP_DISCOVERY_SPATIAL))
# sort by P-value
NEUP_DISCOVERY_SPATIAL <- NEUP_DISCOVERY_SPATIAL[order(NEUP_DISCOVERY_SPATIAL$NEUP_SPATIAL_FAMILIAL_P),]
sum(NEUP_DISCOVERY_SPATIAL$NEUP_SPATIAL_FAMILIAL_P < 0.05)
NEUP_DISCOVERY_SPATIAL <- NEUP_DISCOVERY_SPATIAL[NEUP_DISCOVERY_SPATIAL$NEUP_SPATIAL_FAMILIAL_P < 0.05,]
write.table(NEUP_DISCOVERY_SPATIAL, "Spatial_clustering_analysis_familial_data_results_p_0.05.csv", sep =",", col.names = T, quote = F, row.names = FALSE)
# Now, select regions with P< 0.05
NEUP_FAMILIAL_Cluster.4145.P0.05 <- NEUP_DISCOVERY_SPATIAL[NEUP_DISCOVERY_SPATIAL$NEUP_SPATIAL_FAMILIAL_P < 0.05,]
## Results from gene-based analysis on 4145 regions of unrelated dataset
NEUP_UNRELATED_SKAT_C_4145.CLUSTER <- read.delim("/100/AD/AD_Seq_Data/05.-Analyses/06-Aquilla_202101/01-Aquilla-preQC/06-Aquilla_202101-b/03-plink-QC-files/02-gene-based/geneset-SPATIALCLUSTER_SKAT_SKAT_C-GENOME.ASSOC", header = T, sep = "\t", stringsAsFactors = FALSE)
colnames(NEUP_UNRELATED_SKAT_C_4145.CLUSTER) <- paste0("NEUP_SPATIAL_UNRELATED_", colnames(NEUP_UNRELATED_SKAT_C_4145.CLUSTER))
NEUP_UNRELATED_SKAT_C_4145.CLUSTER$NEUP_SPATIAL_UNRELATED_CLUSTER_ID <- gsub("_", ":", NEUP_UNRELATED_SKAT_C_4145.CLUSTER$NEUP_SPATIAL_UNRELATED_GENE)
# Merge the results from FAMILIAL and UNRELATED datasets by matching cluster ID
SPATIAL_CLUSTER_RESULT_FAMILIAL_UNRELATED.4145 <- cbind(NEUP_FAMILIAL_Cluster.4145.P0.05, NEUP_UNRELATED_SKAT_C_4145.CLUSTER[match(NEUP_FAMILIAL_Cluster.4145.P0.05$NEUP_SPATIAL_FAMILIAL_ClID, NEUP_UNRELATED_SKAT_C_4145.CLUSTER$NEUP_SPATIAL_UNRELATED_CLUSTER_ID ),])
SPATIAL_CLUSTER_RESULT_FAMILIAL_UNRELATED.4145$geneNAME <- SPATIAL_CLUSTER_RESULT_FAMILIAL_UNRELATED.4145$NEUP_SPATIAL_FAMILIAL_gene
# Perform Fisher's combined probability test
# i=1
SPATIAL_CLUSTER_RESULT_FAMILIAL_UNRELATED.4145$NEUP_SPATIAL_META_P <- ""
for(i in 1:nrow(SPATIAL_CLUSTER_RESULT_FAMILIAL_UNRELATED.4145)){
if (is.na(SPATIAL_CLUSTER_RESULT_FAMILIAL_UNRELATED.4145$NEUP_SPATIAL_UNRELATED_SKAT_C[i])){
next # skipping NAs
}
SPATIAL_CLUSTER_RESULT_FAMILIAL_UNRELATED.4145$NEUP_SPATIAL_META_P[i] <- sumlog(as.vector(c(SPATIAL_CLUSTER_RESULT_FAMILIAL_UNRELATED.4145$NEUP_SPATIAL_FAMILIAL_P[i], SPATIAL_CLUSTER_RESULT_FAMILIAL_UNRELATED.4145$NEUP_SPATIAL_UNRELATED_SKAT_C[i])))$p
}
SPATIAL_CLUSTER_RESULT_FAMILIAL_UNRELATED.4145$NEUP_SPATIAL_META_P <- as.numeric(SPATIAL_CLUSTER_RESULT_FAMILIAL_UNRELATED.4145$NEUP_SPATIAL_META_P)
# sum(SPATIAL_CLUSTER_RESULT_FAMILIAL_UNRELATED.4145$NEUP_SPATIAL_FAMILIAL_P < 0.0005)
# sum(SPATIAL_CLUSTER_RESULT_FAMILIAL_UNRELATED.4145$NEUP_SPATIAL_META_P < 0.0005)
# Now compare these with Prokopenko's result
PROKOPENKO_SPATIAL <- read.table("/40/AD/AD_Seq_Data/05.-Analyses/06-Aquilla_202101/09-Tanzi-replication/01-familial/03-PLINK-QC-files/Tanzi_spatial_clustering.csv", header = T, sep = ",")
PROKOPENKO_SPATIAL$PROKOPENKO_GENE <- as.character(PROKOPENKO_SPATIAL$PROKOPENKO_GENE)
PROKOPENKO_SPATIAL$geneNAME <- PROKOPENKO_SPATIAL$PROKOPENKO_GENE
# MERGED_SPATIAL_CLUSTER_ANALYSIS <- Reduce(function(x,y) merge(x,y,by="geneNAME",all= TRUE) ,list(SPATIAL_CLUSTER_RESULT_FAMILIAL_UNRELATED.4145,PROKOPENKO_SPATIAL))
# PROKOPENKO_SPATIAL_PARADIGM1 <- MERGED_SPATIAL_CLUSTER_ANALYSIS[(MERGED_SPATIAL_CLUSTER_ANALYSIS$PROKOPENKO_DISC_P < 0.0005 & MERGED_SPATIAL_CLUSTER_ANALYSIS$PROKOPENKO_META_P < 0.0005),]
# PROKOPENKO_SPATIAL_PARADIGM2 <- PROKOPENKO_SPATIAL[PROKOPENKO_SPATIAL$PROKOPENKO_META_P < 0.00005 & PROKOPENKO_SPATIAL$PROKOPENKO_DISC_P < 0.05,]
PROKOPENKO_SPATIAL_PARADIGM1 <- PROKOPENKO_SPATIAL[(PROKOPENKO_SPATIAL$PROKOPENKO_DISC_P < 0.0005 & PROKOPENKO_SPATIAL$PROKOPENKO_META_P < 0.0005 & PROKOPENKO_SPATIAL$PROKOPENKO_REP_P < 0.05),]
colnames(PROKOPENKO_SPATIAL_PARADIGM1)[colnames(PROKOPENKO_SPATIAL_PARADIGM1) != "geneNAME"] <- paste0(colnames(PROKOPENKO_SPATIAL_PARADIGM1)[colnames(PROKOPENKO_SPATIAL_PARADIGM1) != "geneNAME"], ".PARADIGM1")
PROKOPENKO_SPATIAL_PARADIGM2 <- PROKOPENKO_SPATIAL[(PROKOPENKO_SPATIAL$PROKOPENKO_META_P < 0.00005 & PROKOPENKO_SPATIAL$PROKOPENKO_DISC_P < 0.05 & PROKOPENKO_SPATIAL$PROKOPENKO_REP_P < 0.05),]
colnames(PROKOPENKO_SPATIAL_PARADIGM2)[colnames(PROKOPENKO_SPATIAL_PARADIGM2) != "geneNAME"] <- paste0(colnames(PROKOPENKO_SPATIAL_PARADIGM2)[colnames(PROKOPENKO_SPATIAL_PARADIGM2) != "geneNAME"], ".PARADIGM2")
MERGED_SPATIAL_CLUSTER_ANALYSIS <- Reduce(function(x,y) merge(x,y,by="geneNAME",all= TRUE) ,list(SPATIAL_CLUSTER_RESULT_FAMILIAL_UNRELATED.4145,PROKOPENKO_SPATIAL_PARADIGM1, PROKOPENKO_SPATIAL_PARADIGM2))
write.table(MERGED_SPATIAL_CLUSTER_ANALYSIS, "/40/AD/AD_Seq_Data/05.-Analyses/06-Aquilla_202101/09-Tanzi-replication/01-familial/03-PLINK-QC-files/Fisher_combined_meta_analysis_for_spatial_clustering/Spatial_clustering_analysis_results_for_4145_clusters.csv", sep ="\t", col.names = T, quote = F, row.names = FALSE)
###########################################################################
###########################################################################
########################## END !!!!!!!!!!!!!###############################
# Replication (gene-based SKAT RC)
# MAF
NEUP_REPLICATION_SKAT_C_MAF <- read.delim("/100/AD/AD_Seq_Data/05.-Analyses/06-Aquilla_202101/01-Aquilla-preQC/06-Aquilla_202101-b/03-plink-QC-files/02-gene-based/geneset-MAF1PEXAC_SKAT_SKAT_C-GENOME.ASSOC", header = T, sep = "\t", stringsAsFactors = FALSE)
sum(NEUP_REPLICATION_SKAT_C_MAF$SKAT_C < 0.0005, na.rm = T)
colnames(NEUP_REPLICATION_SKAT_C_MAF) <- paste0("NEUP_SKATC_MAF_UNRELATED_", colnames(NEUP_REPLICATION_SKAT_C_MAF))
# sort by P-value
NEUP_REPLICATION_SKAT_C_MAF <- NEUP_REPLICATION_SKAT_C_MAF[order(NEUP_REPLICATION_SKAT_C_MAF$NEUP_SKATC_MAF_UNRELATED_SKAT_C),]
NEUP_REPLICATION_SKAT_C_MAF <- NEUP_REPLICATION_SKAT_C_MAF[NEUP_REPLICATION_SKAT_C_MAF$NEUP_SKATC_MAF_UNRELATED_SKAT_C < 0.01,]
write.table(NEUP_REPLICATION_SKAT_C_MAF, "GENE_BASED_analysis_MAF0.01_Unrelated_data_results_p_0.01.csv", sep =",", col.names = T, quote = F, row.names = FALSE)
# CADD
NEUP_REPLICATION_SKAT_C_CADD <- read.delim("/100/AD/AD_Seq_Data/05.-Analyses/06-Aquilla_202101/01-Aquilla-preQC/06-Aquilla_202101-b/03-plink-QC-files/02-gene-based/geneset-CADD20_SKAT_SKAT_C-GENOME.ASSOC", header = T, sep = "\t", stringsAsFactors = FALSE)
sum(NEUP_REPLICATION_SKAT_C_CADD$SKAT_C < 0.01, na.rm = T)
colnames(NEUP_REPLICATION_SKAT_C_CADD) <- paste0("NEUP_SKAT_CADD_UNRELATED_", colnames(NEUP_REPLICATION_SKAT_C_CADD))
NEUP_REPLICATION_SKAT_C_CADD <- NEUP_REPLICATION_SKAT_C_CADD[order(NEUP_REPLICATION_SKAT_C_CADD$NEUP_SKAT_CADD_UNRELATED_GENE, NEUP_REPLICATION_SKAT_C_CADD$NEUP_SKAT_CADD_UNRELATED_SKAT_C),]
NEUP_REPLICATION_SKAT_C_CADD$NEUP_SKATC_CADD_UNRELATED_GENE_NAME <- NEUP_REPLICATION_SKAT_C_CADD$NEUP_SKAT_CADD_UNRELATED_GENE
# sort by P-value
NEUP_REPLICATION_SKAT_C_CADD <- NEUP_REPLICATION_SKAT_C_CADD[order(NEUP_REPLICATION_SKAT_C_CADD$NEUP_SKAT_CADD_UNRELATED_SKAT_C),]
NEUP_REPLICATION_SKAT_C_CADD <- NEUP_REPLICATION_SKAT_C_CADD[NEUP_REPLICATION_SKAT_C_CADD$NEUP_SKAT_CADD_UNRELATED_SKAT_C < 0.01,]
write.table(NEUP_REPLICATION_SKAT_C_CADD, "GENE_BASED_analysis_CADD20_Unrelated_data_results_p_0.01.csv", sep =",", col.names = T, quote = F, row.names = FALSE)
######################### Comparison ##########################
sum(NEUP_DISCOVERY_SPATIAL$NEUP_SPATIAL_DISCgene %in% NEUP_REPLICATION_SKAT_C_MAF$NEUP_SKATC_MAF_REPGENE)
sum(NEUP_DISCOVERY_SPATIAL$NEUP_SPATIAL_DISCgene %in% NEUP_REPLICATION_SKAT_C_CADD$NEUP_SKAT_CADD_REPGENE)
(m1 <- merge(NEUP_DISCOVERY_SPATIAL, NEUP_REPLICATION_SKAT_C_MAF, by.x = "NEUP_SPATIAL_DISCgene", by.y = "NEUP_SKATC_MAF_REPGENE", all.x = TRUE))
# m2 is the dataframe that has spatial clustering analysis results of familial data
# merged with the (top p value) corresponding genes in CADD and MAF
(m2 <- merge(m1, NEUP_REPLICATION_SKAT_C_CADD, by.x = "NEUP_SPATIAL_DISCgene", by.y = "NEUP_SKAT_CADD_REPGENE", all.x = TRUE))
# Now compare these with Prokopenko's result
PROKOPENKO_SPATIAL <- read.table("https://raw.githubusercontent.com/achalneupane/data/master/Tanzi_spatial_clustering.csv", header = T, sep = ",")
PROKOPENKO_SPATIAL$PROKOPENKO_GENE <- as.character(PROKOPENKO_SPATIAL$PROKOPENKO_GENE)
sum(m2$NEUP_SPATIAL_DISCgene %in% PROKOPENKO_SPATIAL$PROKOPENKO_GENE)
unique(PROKOPENKO_SPATIAL$PROKOPENKO_GENE)
sum(unique(PROKOPENKO_SPATIAL$PROKOPENKO_GENE) %in% m2$NEUP_SPATIAL_DISCgene)
###########################################################################################
########################## MATCH VARIANTS FROM SPATIAL clustering #########################
###########################################################################################
Spatial_replicated_Tanzi <- read.table("Spatial_replicated_Tanzi.csv", sep =",", header = TRUE)
singlevar_Tanzi$Nearest.protein.coding.gene <- as.character(singlevar_Tanzi$Prokopenko_Nearest_protein.coding.gene)
# sum(LOGISTIC_ANNO$gene %in% singlevar_Tanzi$Nearest.protein.coding.gene)
LOGISTIC_ANNO <- cbind(LOGISTIC_ANNO, singlevar_Tanzi[match(LOGISTIC_ANNO$gene, singlevar_Tanzi$Nearest.protein.coding.gene), c("Nearest.protein.coding.gene", "VEP.consequence")])
REPLICATED_singled_var <- LOGISTIC_ANNO[!is.na(LOGISTIC_ANNO$Nearest.protein.coding.gene),]
colnames(REPLICATED_singled_var) <- c("Marker", "Allele", "afreq", "fam_size", "S-E(S)", "Var(S)", "Z", "P", "CHROM", "POS", "ID", "REF", "ALT", "key", "consequence", "gene", "type", "region", "TANZI_Nearest.protein.coding.gene", "TANZI_VEP.consequence")
write.table(REPLICATED_singled_var, "/40/AD/AD_Seq_Data/05.-Analyses/06-Aquilla_202101/09-Tanzi-replication/01-familial/03-PLINK-QC-files/FBAT/Replicated_single_variant_genes.csv", sep ="\t", col.names = T, quote = F, row.names = FALSE)
## below suggestive significance
singlevar_Tanzi$key <- paste(singlevar_Tanzi$Chromosome, singlevar_Tanzi$Position, sep =":")
REPLICATED_singled_var_below_suggestive <- REPLICATED_singled_var[REPLICATED_singled_var$P < 0.05,]
write.table(REPLICATED_singled_var_below_suggestive, "/40/AD/AD_Seq_Data/05.-Analyses/06-Aquilla_202101/09-Tanzi-replication/01-familial/03-PLINK-QC-files/FBAT/REPLICATED_singled_var_below_suggestive.csv", sep ="\t", col.names = T, quote = F, row.names = FALSE)
SUGGESTIVE_SINGLE_VAR <- {}
for (i in 1:nrow(singlevar_Tanzi)){
SUGGESTIVE_SINGLE_VAR_tmp <- REPLICATED_singled_var_below_suggestive[grepl(singlevar_Tanzi$key[i], REPLICATED_singled_var_below_suggestive$key),]
SUGGESTIVE_SINGLE_VAR <- rbind.data.frame(SUGGESTIVE_SINGLE_VAR, SUGGESTIVE_SINGLE_VAR_tmp)
}
library("GenomicRanges")
q=GRanges(seqnames=LOGISTIC_ANNO$`#CHROM`,
ranges=IRanges(start = LOGISTIC_ANNO$POS, end = LOGISTIC_ANNO$POS)
)
q
ALL_overlappingVAR <- {}
for (i in 1:nrow(singlevar_Tanzi)){
gr=GRanges(seqnames=singlevar_Tanzi$Prokopenko_Chromosome[i],
ranges=IRanges(start = singlevar_Tanzi$Prokopenko_Position[i]-100000, end = singlevar_Tanzi$Prokopenko_Position[i]+100000))
overlappingVAR <- subsetByOverlaps(q, gr)
overlappingVAR <- as.data.frame(overlappingVAR)
if(nrow(overlappingVAR) != 0){
overlappingVAR$TANZI_var <- paste(singlevar_Tanzi$Chromosome[i], singlevar_Tanzi$Position[i], sep =":")
ALL_overlappingVAR <- rbind.data.frame(ALL_overlappingVAR, overlappingVAR)
}
}
ALL_overlappingVAR$key <- paste(ALL_overlappingVAR$seqnames, ALL_overlappingVAR$start, sep = ":")
hundreadKB_SV <- {}
for (i in 1:nrow(ALL_overlappingVAR)){
hundreadKB_SV_tmp <- LOGISTIC_ANNO [grepl(ALL_overlappingVAR$key[i], LOGISTIC_ANNO$key),]
hundreadKB_SV_tmp$TANZI_var <- ALL_overlappingVAR$TANZI_var[i]
hundreadKB_SV <- rbind.data.frame(hundreadKB_SV, hundreadKB_SV_tmp)
}
write.table(hundreadKB_SV, "/40/AD/AD_Seq_Data/05.-Analyses/06-Aquilla_202101/09-Tanzi-replication/01-familial/03-PLINK-QC-files/FBAT/Replicated_single_variant_genes_within_100KB.csv", sep ="\t", col.names = T, quote = F, row.names = FALSE)
## below suggestive significance
hundreadKB_SV$key2 <- paste(hundreadKB_SV$`#CHROM`, hundreadKB_SV$POS, sep =":")
hundreadKB_SV$P <- as.numeric(hundreadKB_SV$P)
hundreadKB_SV_below_suggestive <- hundreadKB_SV[hundreadKB_SV$P < 0.05,]
write.table(hundreadKB_SV_below_suggestive, "/40/AD/AD_Seq_Data/05.-Analyses/06-Aquilla_202101/09-Tanzi-replication/01-familial/03-PLINK-QC-files/FBAT/hundreadKB_SV_below_suggestive.csv", sep ="\t", col.names = T, quote = F, row.names = FALSE)
SUGGESTIVE_SINGLE_VAR_100KB <- {}
for (i in 1:nrow(singlevar_Tanzi)){
SUGGESTIVE_SINGLE_VAR_100KB_tmp <- hundreadKB_SV_below_suggestive[grepl(singlevar_Tanzi$key[i], hundreadKB_SV_below_suggestive$key2),]
SUGGESTIVE_SINGLE_VAR_100KB <- rbind.data.frame(SUGGESTIVE_SINGLE_VAR_100KB, SUGGESTIVE_SINGLE_VAR_100KB_tmp)
}
|
b0cdc307a880283f009a0137cbdfd0f07fefff3a
|
b599e97542c6df5add3e4b53586097705b10ce74
|
/man/micro_nz.Rd
|
b8b5841f9fb8e1e57ce4f7a44227bbf9cbbd8024
|
[] |
no_license
|
ajijohn/NicheMapR
|
09c435107b9e4aa0fd5b7982510a65e76680f1ed
|
98386659036cc55df840df7339af519a766b88d2
|
refs/heads/master
| 2021-01-09T05:34:23.544532
| 2017-01-28T07:29:16
| 2017-01-28T07:29:16
| 80,757,456
| 0
| 1
| null | 2017-02-02T18:51:23
| 2017-02-02T18:51:23
| null |
UTF-8
|
R
| false
| true
| 6,168
|
rd
|
micro_nz.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/micro_nz.R
\name{micro_nz}
\alias{micro_nz}
\title{New Zealand implementation of the microclimate model.}
\usage{
micro_aust(loc = "Melbourne, Australia", timeinterval = 365, ystart = 1990, yfinish = 1990, soiltype = 4,
REFL = 0.15, slope = 0, aspect = 0, DEP = c(0., 2.5, 5., 10., 15, 20, 30, 50, 100, 200), minshade = 0, maxshade = 90,
Usrhyt = 0.01, ...)
}
\arguments{
\item{loc}{Either a longitude and latitude (decimal degrees) or a place name to search for on Google Earth}
\item{timeinterval}{The number of time intervals to generate predictions for over a year (must be 12 <= x <=365)}
\item{ystart}{First year to run}
\item{yfinish}{Last year to run}
\item{soiltype}{Soil type: Rock = 0, sand = 1, loamy sand = 2, sandy loam = 3, loam = 4, silt loam = 5, sandy clay loam = 6, clay loam = 7, silt clay loam = 8, sandy clay = 9, silty clay = 10, clay = 11, user-defined = 12, based on Campbell and Norman 1990 Table 9.1.}
\item{REFL}{Soil solar reflectance, decimal \%}
\item{slope}{Slope in degrees}
\item{aspect}{Aspect in degrees (0 = north)}
\item{DEP}{Soil depths at which calculations are to be made (cm), must be 10 values starting from 0, and more closely spaced near the surface}
\item{minshade}{Minimum shade level to use (\%)}
\item{maxshade}{Maximum shade level to us (\%)}
\item{Usrhyt}{Local height (m) at which air temperature, wind speed and humidity are to be computed for organism of interest}
\item{...}{Additional arguments, see Details}
}
\value{
metout The above ground micrometeorological conditions under the minimum specified shade
shadmet The above ground micrometeorological conditions under the maximum specified shade
soil Hourly predictions of the soil temperatures under the minimum specified shade
shadsoil Hourly predictions of the soil temperatures under the maximum specified shade
soilmoist Hourly predictions of the soil moisture under the minimum specified shade
shadmoist Hourly predictions of the soil moisture under the maximum specified shade
soilpot Hourly predictions of the soil water potential under the minimum specified shade
shadpot Hourly predictions of the soil water potential under the maximum specified shade
humid Hourly predictions of the soil humidity under the minimum specified shade
shadhumid Hourly predictions of the soil humidity under the maximum specified shade
}
\description{
An implementation of the Niche Mapper microclimate model that uses the AWAP daily weather database
}
\examples{
micro<-micro_aust() # run the model with default location and settings
metout<-as.data.frame(micro$metout) # above ground microclimatic conditions, min shade
shadmet<-as.data.frame(micro$shadmet) # above ground microclimatic conditions, max shade
soil<-as.data.frame(micro$soil) # soil temperatures, minimum shade
shadsoil<-as.data.frame(micro$shadsoil) # soil temperatures, maximum shade
# append dates
days<-rep(seq(1,12),24)
days<-days[order(days)]
dates<-days+metout$TIME/60/24-1 # dates for hourly output
dates2<-seq(1,12,1) # dates for daily output
plotmetout<-cbind(dates,metout)
plotsoil<-cbind(dates,soil)
plotshadmet<-cbind(dates,shadmet)
plotshadsoil<-cbind(dates,shadsoil)
minshade<-micro$minshade
maxshade<-micro$maxshade
# plotting above-ground conditions in minimum shade
with(plotmetout,{plot(TALOC ~ dates,xlab = "Date and Time", ylab = "Air Temperature (deg C)"
, type = "l",main=paste("air temperature, ",minshade,"\% shade",sep=""))})
with(plotmetout,{points(TAREF ~ dates,xlab = "Date and Time", ylab = "Air Temperature (deg C)"
, type = "l",lty=2,col='blue')})
with(plotmetout,{plot(RHLOC ~ dates,xlab = "Date and Time", ylab = "Relative Humidity (\%)"
, type = "l",ylim=c(0,100),main=paste("humidity, ",minshade,"\% shade",sep=""))})
with(plotmetout,{points(RH ~ dates,xlab = "Date and Time", ylab = "Relative Humidity (\%)"
, type = "l",col='blue',lty=2,ylim=c(0,100))})
with(plotmetout,{plot(TSKYC ~ dates,xlab = "Date and Time", ylab = "Sky Temperature (deg C)"
, type = "l",main=paste("sky temperature, ",minshade,"\% shade",sep=""))})
with(plotmetout,{plot(VREF ~ dates,xlab = "Date and Time", ylab = "Wind Speed (m/s)"
, type = "l",main="wind speed")})
with(plotmetout,{points(VLOC ~ dates,xlab = "Date and Time", ylab = "Wind Speed (m/s)"
, type = "l",lty=2,col='blue')})
with(plotmetout,{plot(ZEN ~ dates,xlab = "Date and Time", ylab = "Zenith Angle of Sun (deg)"
, type = "l",main="solar angle, sun")})
with(plotmetout,{plot(SOLR ~ dates,xlab = "Date and Time", ylab = "Solar Radiation (W/m2)"
, type = "l",main="solar radiation")})
# plotting soil temperature for minimum shade
for(i in 1:10){
if(i==1){
plot(plotsoil[,i+3]~plotsoil[,1],xlab = "Date and Time", ylab = "Soil Temperature (deg C)"
,col=i,type = "l",main=paste("soil temperature ",minshade,"\% shade",sep=""))
}else{
points(plotsoil[,i+3]~plotsoil[,1],xlab = "Date and Time", ylab = "Soil Temperature
(deg C)",col=i,type = "l")
}
}
# plotting above-ground conditions in maximum shade
with(plotshadmet,{plot(TALOC ~ dates,xlab = "Date and Time", ylab = "Air Temperature (deg C)"
, type = "l",main="air temperature, sun")})
with(plotshadmet,{points(TAREF ~ dates,xlab = "Date and Time", ylab = "Air Temperature (deg C)"
, type = "l",lty=2,col='blue')})
with(plotshadmet,{plot(RHLOC ~ dates,xlab = "Date and Time", ylab = "Relative Humidity (\%)"
, type = "l",ylim=c(0,100),main="humidity, shade")})
with(plotshadmet,{points(RH ~ dates,xlab = "Date and Time", ylab = "Relative Humidity (\%)"
, type = "l",col='blue',lty=2,ylim=c(0,100))})
with(plotshadmet,{plot(TSKYC ~ dates,xlab = "Date and Time", ylab = "Sky Temperature (deg C)",
type = "l",main="sky temperature, shade")})
# plotting soil temperature for maximum shade
for(i in 1:10){
if(i==1){
plot(plotshadsoil[,i+3]~plotshadsoil[,1],xlab = "Date and Time", ylab = "Soil Temperature
(deg C)",col=i,type = "l",main=paste("soil temperature ",maxshade,"\% shade",sep=""))
}else{
points(plotshadsoil[,i+3]~plotshadsoil[,1],xlab = "Date and Time", ylab = "Soil Temperature
(deg C)",col=i,type = "l")
}
}
}
|
2a5c68f7cdc81c014b842ae48962cf4a6dfefd43
|
cf3d35a51ca24a2434826c81730c18904ceab1db
|
/surfacePlots.R
|
759afca741b3fd4a010369ea373c281bbea3c2c9
|
[] |
no_license
|
Hardervidertsie/DILI_screen_paper
|
2953c4715da122810a31d167dd9c2c7b0d1bcec2
|
a09aa51ace36ec4284d5b96c0181e2f3722d9a10
|
refs/heads/master
| 2021-03-27T08:27:04.852256
| 2017-06-19T14:38:54
| 2017-06-19T14:38:54
| 74,959,948
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,365
|
r
|
surfacePlots.R
|
testing rsm for fitting and displaying surface plot
require(rsm)
https://cran.r-project.org/web/packages/rsm/vignettes/rsm-plots.pdf
https://cran.r-project.org/web/packages/rsm/vignettes/rsm.pdf
CR1 <- combined.resp[ treatment %in% "mercaptopurine" & fingerprints %in% "GFP_pos.2m_Srxn1", ]
CR1 <- CR1[, mean(value), by = c("timeID","dose_uM")]
CR1 <- CR1[ timeID < 17]
setnames(CR1, "V1", "value")
CR1.lmP <- lm(value ~ poly(dose_uM * timeID, degree= 3), data = CR1)
persp(CR1.lmP, dose_uM ~timeID, zlab = "response", zlim= c(0,1))
anova(CR1.lmP)
CR1.lmP.dmso <- lm(value ~ poly(dose_uM * timeID, degree= 3), data = CR1)
persp(CR1.lmP.dmso, dose_uM ~timeID, zlab = "response", zlim= c(0,1))
anova(CR1.lmP, CR1.lmP.dmso)
text()
#
col.matrix <- matrix(runif(80), nrow = 16, ncol = 5)
#CR1 <- CR1[, list( timeID, dose_uM, value )]
pdf("test.pdf", height = 60, width = 60)
par(mfrow = c(20,20))
text()
dev.off()
# conclusie:
# model per dose de time courses
# maak een grid: per compound-replicate een matrix
# maak een matrix met t-test waarden tussen de replicates
# bereken gemiddelde van de replicate gemiddelde responsen
# plot met 'persp' en kleur met de p-waarden
persp(seq(10, 300, 5), seq(10, 300, 5), z, phi = 45, theta = 45,
xlab = "X Coordinate (feet)", ylab = "Y Coordinate (feet)",
main = "Surface elevation data"
)
|
461eca2567536934adf135cd6b7f29482d385fda
|
0a906cf8b1b7da2aea87de958e3662870df49727
|
/biwavelet/inst/testfiles/rcpp_row_quantile/libFuzzer_rcpp_row_quantile/rcpp_row_quantile_valgrind_files/1610554442-test.R
|
7f8dfa57d1bb7f93d07b047f7076cc4c8998628f
|
[] |
no_license
|
akhikolla/updated-only-Issues
|
a85c887f0e1aae8a8dc358717d55b21678d04660
|
7d74489dfc7ddfec3955ae7891f15e920cad2e0c
|
refs/heads/master
| 2023-04-13T08:22:15.699449
| 2021-04-21T16:25:35
| 2021-04-21T16:25:35
| 360,232,775
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 402
|
r
|
1610554442-test.R
|
testlist <- list(data = structure(c(3.17466821391751e-319, 0, 2.8396262443943e+238, 2.8396262443943e+238, 2.8396262443943e+238, 2.8396262443943e+238, 2.8396262443943e+238, 2.83962624009443e+238, 4.06493636881578e-259, 1.06559867695611e-255, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(10L, 3L)), q = 0)
result <- do.call(biwavelet:::rcpp_row_quantile,testlist)
str(result)
|
ea3aeeb08c89f4be23dc115ca0e1d5eed8c2f2ab
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/hIRT/examples/coef_item.Rd.R
|
e882a0ba5161715347aef8f8a9978b2409e5f32c
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 358
|
r
|
coef_item.Rd.R
|
library(hIRT)
### Name: coef_item
### Title: Extracting Estimates of Item Parameters from Hierarchical IRT
### Models.
### Aliases: coef_item coef_item.hgrm coef_item.hltm
### ** Examples
y <- nes_econ2008[, -(1:3)]
x <- model.matrix( ~ party * educ, nes_econ2008)
z <- model.matrix( ~ party, nes_econ2008)
nes_m1 <- hgrm(y, x, z)
coef_item(nes_m1)
|
170dc5b1e1281c7dfc75bb3f85dda8d18643ae9c
|
b391a00661c6b5368b6406ede4d93c8a96913417
|
/setup.R
|
f8b8200648653a5f8ef3a54b76bccddf0a9f8e4f
|
[] |
no_license
|
normhcho/capstone
|
ad06769b211176f0dc48003434d55825bbc2c04d
|
ef7adbdbe0db813158e1c5d6b390d8cd6c9f8d0e
|
refs/heads/master
| 2020-03-28T02:53:43.772473
| 2018-09-06T02:46:19
| 2018-09-06T02:46:19
| 147,605,479
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,489
|
r
|
setup.R
|
# Loading the libraries
library(tm);library(quanteda);library(stringi);library(stringr);library(data.table);library(dplyr)
blogs <- readLines(file("./en_US.blogs.txt"), encoding = "UTF-8", skipNul = TRUE)
blogs <- iconv(blogs, from = "latin1", to = "UTF-8", sub="")
news <- readLines(file("./en_US.news.txt"), encoding = "UTF-8", skipNul = TRUE)
news <- iconv(news, from = "latin1", to = "UTF-8", sub="")
twitter <- readLines(file("./en_US.twitter.txt"), encoding = "UTF-8",
skipNul = TRUE)
twitter <- iconv(twitter, from = "latin1", to = "UTF-8", sub="")
corpus <- c(blogs,news,twitter)
rm(blogs);rm(news);rm(twitter)
set.seed(410)
sample <- sample(corpus, length(corpus)*0.33333)
badwords <-VectorSource(readLines("./badwords.txt"))
sample <- Corpus(VectorSource(sample))
sample <- tm_map(sample, stripWhitespace)
sample <- tm_map(sample, tolower)
sample <- tm_map(sample, removeNumbers)
#sample <- tm_map(sample, removeWords, stopwords("english"))
sample <- tm_map(sample, removePunctuation)
sample <- tm_map(sample, removeWords, badwords)
sample <- tm_map(sample, PlainTextDocument)
sample <- tm_map(sample, function(x) gsub("[^0-9A-Za-z///' ]", "", x))
myCorpus <- corpus(sample)
rm(badwords)
tok<-tokens(myCorpus,remove_numbers = TRUE, remove_punct = TRUE, remove_symbols = TRUE)
#mydf1<-dfm(tok,ngrams=1,concatenator = " " )
#mydf1<-dfm_trim(mydf1, min_termfreq = 50, min_docfreq = 50)
#df1 <- data.table(Content = featnames(mydf1), freq = colSums(mydf1),tip = ""
#df1<-df1[order(df1$freq,decreasing=TRUE),]
#Creating bigram table
mydf2<-dfm(tok,ngrams=2,concatenator = " " )
mydf2<-dfm_trim(mydf2, min_termfreq = 10, min_docfreq = 2)
df2 <- data.table(Content = featnames(mydf2), freq = colSums(mydf2),tip = sub("^\\s*((?:\\S+\\s+){0}\\S+).*", "\\1", featnames(mydf2)))
df2<-df2[order(df2$freq,decreasing=TRUE),]
rm(mydf2)
#Creating trigram table
mydf3<-dfm(tok,ngrams=3,concatenator = " " )
mydf3<-dfm_trim(mydf3, min_termfreq = 8, min_docfreq = 2)
df3 <- data.table(Content = featnames(mydf3), freq = colSums(mydf3), tip = sub("^\\s*((?:\\S+\\s+){1}\\S+).*", "\\1",featnames(mydf3)))
df3<-df3[order(df3$freq,decreasing=TRUE),]
rm(mydf3)
#Creating quadgram table
mydf4<-dfm(tok,ngrams=4,concatenator = " " )
mydf4<-dfm_trim(mydf4, min_termfreq = 6, min_docfreq = 2)
df4 <- data.table(Content = featnames(mydf4), freq = colSums(mydf4), tip = sub("^\\s*((?:\\S+\\s+){2}\\S+).*", "\\1",featnames(mydf4))
)
df4<-df4[order(df4$freq,decreasing=TRUE),]
rm(mydf4)
#Creating quintgram table
mydf5<-dfm(tok,ngrams=5,concatenator = " " )
mydf5<-dfm_trim(mydf5, min_termfreq = 4, min_docfreq = 2)
df5 <- data.table(Content = featnames(mydf5), freq = colSums(mydf5), tip = sub("^\\s*((?:\\S+\\s+){3}\\S+).*", "\\1",featnames(mydf5))
)
df5<-df5[order(df5$ freq,decreasing=TRUE),]
rm(mydf5)
#Creating sextgram table, might be overkill
mydf6<-dfm(tok,ngrams=6,concatenator = " " )
mydf6<-dfm_trim(mydf6, min_termfreq = 2, min_docfreq = 2)
df6 <- data.table(Content = featnames(mydf6), freq = colSums(mydf6), tip = sub("^\\s*((?:\\S+\\s+){4}\\S+).*", "\\1",featnames(mydf6))
)
df6<-df6[order(df5$ freq,decreasing=TRUE),]
rm(mydf6)
#combining the ngram data tables
df <- rbind(df2,df3,df4,df5,df6)
# adding the number of words from the ngrams, used for reference, not needed to run the function
df$words <- sapply(df$Content, function(x) length(unlist(strsplit(as.character(x), "\\W+"))))
save(df, file="capstone_ngram.RData")
rm(myCorpus)
rm(tok)
rm(corpus)
|
b2e9d003e327db9715c363b8207e1957a5bbbeb1
|
7912e18deeebf6d99b1c35bf9c7e83f49da0fc8b
|
/man/omeka_key.Rd
|
bb88c064e8ccd96d5ef02e3ccefbffbbdb5d2e2f
|
[] |
no_license
|
giocomai/omekaR
|
8fd2b6683dba53f1e52ab618f3b9d9cc958f7ef6
|
e7eee70c7ee2cecc5e5988fd2c196da7f5c3efc2
|
refs/heads/master
| 2020-07-04T06:09:00.856328
| 2014-11-22T19:51:39
| 2014-11-22T19:51:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 707
|
rd
|
omeka_key.Rd
|
% Generated by roxygen2 (4.0.2): do not edit by hand
\name{omeka_key}
\alias{omeka_key}
\title{Get or set the Omeka API key}
\usage{
omeka_key(key = NULL)
}
\arguments{
\item{key}{The Omeka API key to the site that you are using.}
}
\value{
The current Omeka API key, or NULL if none is set.
}
\description{
Pass an Omeka API key to this function to set the Omeka API key for the rest
of your script. Call this function without an argument to get the currently
set endpoint. If you do not set an Omeka API key, then this function returns
NULL and the API will be accessed without passing along a key. You can set
your API key as the \code{OMEKA_KEY} system environment variable.
}
\examples{
omeka_key()
}
|
245aaa686dd655c3c9bce106d8cb6eb9ae7492af
|
fed9f3581739dbb8e2b82565ad31b948e17910eb
|
/pierwszy.R
|
568e01270e3b07fbbf926326d597feb741fbf32a
|
[] |
no_license
|
Omuza/pjatkr
|
30a5fbccafd7e10276b8f0d821a4007ae9ed3d02
|
181748c360ba78fa6ffc6736a04d94f2bb5d7aa6
|
refs/heads/main
| 2023-01-07T11:10:51.944081
| 2020-11-08T10:03:18
| 2020-11-08T10:03:18
| 311,009,190
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 363
|
r
|
pierwszy.R
|
library(devtools)
library(httr)
library(jsonlite)
endpoint<-"https://api.openweathermap.org/data/2.5/weather?q=Lublin&units=metric&appid=ccd2c7f8b414cadf0c4383ce0a541dc2"
getWeather<-GET(endpoint)
weatherText<-content(getWeather, "text")
weatherJson<-fromJSON(weatherText, flatten=TRUE)
weatherDF<-as.data.frame(weatherJson)
View(weatherDF)
print(weatherDF)
|
604eec29bd41dc4e689c096d43234ab84cb189a6
|
673e813b89de8f8ccffe671c6b6070026abbc53d
|
/R/MyPrimers_taqman.R
|
3000a74972bf4a0b04df5c9e062f5810dfe8d57d
|
[] |
no_license
|
jpromeror/EventPointer
|
4eaa1f3a6bc653e72afef317517eec42dff41627
|
aa24e3a15c6bdbd7b6c950b962b3d24c3eb80950
|
refs/heads/master
| 2023-05-25T16:48:24.853661
| 2023-05-15T11:14:22
| 2023-05-15T11:14:22
| 80,099,260
| 4
| 0
| null | 2022-11-28T11:24:50
| 2017-01-26T09:01:24
|
R
|
UTF-8
|
R
| false
| false
| 358
|
r
|
MyPrimers_taqman.R
|
#' Data frame with primers design for taqman PCR
#'
#'
#' @name MyPrimers_taqman
#'
#' @return MyPrimers_taqman object contains a data.frame with
#' the information of the design primers for taqman
#' PCR.
#'
#' @format A \code{data.frame} object displays the relative
#' information for primers design for taqman PCR
#'
#' @usage data(MyPrimers_taqman)
NULL
|
1d321c793a49a2d16a5185122ef47af4c4e2bbf5
|
fbd13aa34e784ccae3bdd238cfdcb12ac915470a
|
/man/RhttpdApp-class.Rd
|
94b32860d4a791e4d11958ea8e9a235fa590da05
|
[] |
no_license
|
cran/Rook
|
676dd56d2d2b0a44f872dca1d5f44e4c8ad78e8e
|
933db81fbecf2f5e7a630a130242f13131ea68a9
|
refs/heads/master
| 2022-11-10T00:38:07.380159
| 2022-11-07T07:50:19
| 2022-11-07T07:50:19
| 17,693,393
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,683
|
rd
|
RhttpdApp-class.Rd
|
\name{RhttpdApp-class}
\Rdversion{1.1}
\docType{class}
\alias{RhttpdApp-class}
\alias{RhttpdApp}
\title{Class \code{RhttpdApp}}
\description{
Creates a Rook application ready to add to an \code{\link{Rhttpd}} server.
}
\details{
The internal web server allows dispatching to user-defined closures
located in tools:::.httpd.handlers.env. For instance, if a handler named
'foo' is placed there, then the url path to that handler is /custom/foo.
\code{RhttpdApp} along with \code{\link{Rhttpd}} hide these details by
allowing a user to create application objects specifying only their name
and the application. There is currently a limit of 63 characters
or less for application names.
NOTE: When a file is given as the value of the \code{app} argument
to \code{new()}, it is monitored for timestamp changes. If a change
occurs in the modification time as returned by \code{\link[base]{file.info}},
then the file is sourced prior to handling subsequent requests.
}
\seealso{
\code{\link{Rhttpd}}.
}
\examples{
s <- Rhttpd$new()
s$add(RhttpdApp$new(
name='summary',
app=system.file('exampleApps/summary.R',package='Rook')
))
\dontrun{
s$start(quiet=TRUE)
s$browse(1)
}
s$remove(all=TRUE)
# Stops the server but doesn't uninstall the app
\dontrun{
s$stop()
}
s$remove(all=TRUE)
rm(s)
}
\keyword{classes}
\section{Methods}{
\describe{
\item{\code{new(app, name)}:}{ Creates an object of class \code{RhttpdApp}. Argument \code{app} can be any \code{\link{Rook}} aware object or it can be a location to a file whose source creates a Rook aware object. That object must be named either \code{'app'} or the value of \code{name}. \code{name} is a character vector.}
}
}
|
32711b86f92509c64c8f4ed7a0a8c524219d3d1f
|
22865e39c7c66cd740bc43313e4b2279f7d75d7f
|
/R/base.R
|
3eff32e4fab0599590d794769e65c17b7c4ea24c
|
[] |
no_license
|
cran/thematic
|
b545fc6a1ee118e22a8f8c7e66aa37571c04b655
|
004f263db643a1cc1bffc6e2d374d24942faa239
|
refs/heads/master
| 2023-08-18T15:27:49.716782
| 2023-08-11T16:30:02
| 2023-08-11T18:30:29
| 334,224,013
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,453
|
r
|
base.R
|
base_palette_set <- function(theme = .globals$theme) {
base_palette_restore()
codes <- theme$qualitative
.globals$base_palette <- if (isTRUE(is.na(codes))) {
attempt_palette()
} else {
attempt_palette(codes)
}
}
base_palette_restore <- function() {
if (is.null(.globals$base_palette)) return()
attempt_palette(.globals$base_palette)
rm("base_palette", envir = .globals)
}
base_params_set <- function(theme = .globals$theme) {
base_params_restore()
params <- list()
bg <- theme$bg
if (length(bg)) {
params <- c(params, attempt_par(bg = bg))
}
fg <- theme$fg
if (length(fg)) {
params <- c(params, attempt_par(
fg = fg,
col.axis = fg,
col.lab = fg,
col.main = fg,
col.sub = fg
))
}
font <- theme$font
if (length(font$family)) {
params <- c(params, attempt_par(
family = font$family,
cex.axis = font$scale,
cex.lab = font$scale,
cex.main = font$scale * 1.2,
cex.sub = font$scale
))
}
.globals$base_params <- params
}
base_params_restore <- function() {
if (is.null(.globals$base_params)) return()
do.call(attempt_par, .globals$base_params)
rm("base_params", envir = .globals)
}
attempt_par <- function(...) {
attempt_(par(...))
}
attempt_palette <- function(...) {
attempt_(palette(...))
}
attempt_ <- function(expr) {
if (is_null_device()) {
attempt_with_new_device(expr)
} else {
force(expr)
}
}
|
21aa39e154d751e27708eeb8745a50d1265c9f04
|
818dd3954e873a4dcb8251d8f5f896591942ead7
|
/Mouse/ClassicalPhenotypes/FV3/QTLanalysis.R
|
6c6d587b4fa2700506e66208247fdff2837cdf07
|
[] |
no_license
|
DannyArends/HU-Berlin
|
92cefa16dcaa1fe16e58620b92e41805ebef11b5
|
16394f34583e3ef13a460d339c9543cd0e7223b1
|
refs/heads/master
| 2023-04-28T07:19:38.039132
| 2023-04-27T15:29:29
| 2023-04-27T15:29:29
| 20,514,898
| 3
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,982
|
r
|
QTLanalysis.R
|
# QTL analysis
#
# copyright (c) 2014-2020 - Brockmann group - HU Berlin, Danny Arends
# last modified Juli, 2014
# first written March, 2009
#
library(qtl)
setwd("D:/Edrive/Mouse/ClassicalPhenotypes/FV3")
# Analyse the whole F2 cross
cross <- read.cross("csv", file="cross_F2.csv",genotypes=c("A","H","B"), na.strings="NA")
cross <- jittermap(cross)
cross$pheno <- cbind(cross$pheno, FATDLEAN = cross$pheno[,"FAT70"] / cross$pheno[,"LEAN70"])
sex <- as.numeric(cross$pheno[,"Sex"])
season <- as.numeric(cross$pheno[,"sea"])
futter <- as.numeric(cross$pheno[,"Futter"])
littersize <- as.numeric(cross$pheno[,"pupsize"])
resFATDLEANFUTTER <- scanone(cross, pheno.col="FATDLEAN", addcovar = cbind(sex, season, littersize, futter), intcovar = futter)
resFATDLEAN <- scanone(cross, pheno.col="FATDLEAN", addcovar = cbind(sex, season, littersize, futter))
resFUTTER <- scanone(cross, pheno.col="Futter", addcovar = cbind(sex, season, littersize))
plot(resFUTTER, resFATDLEAN, resFATDLEANFUTTER, main="Fat/Lean = Sex + Season + Futter + G + G:Futter")
# Analyse the different parts (NF versus FF)
crossFF <- read.cross("csv", file="cross_F2_FF.csv",genotypes=c("A","H","B"), na.strings="NA")
crossNF <- read.cross("csv", file="cross_F2_NF.csv",genotypes=c("A","H","B"), na.strings="NA")
crossNF <- calc.genoprob(crossNF)
crossNF$pheno <- cbind(crossNF$pheno, FATDLEAN = crossNF$pheno[,"FAT70"] / crossNF$pheno[,"LEAN70"])
genotypes <- pull.geno(fill.geno(crossNF))
phenotype <- crossNF$pheno[,"FATDLEAN"]
sex <- as.numeric(crossNF$pheno[,"Sex"])
season <- as.numeric(crossNF$pheno[,"sea"])
littersize <- as.numeric(crossNF$pheno[,"WG21"])
resFATDLEAN <- scanone(crossNF, pheno.col="FATDLEAN", addcovar=cbind(sex, season, littersize))
topmarker <- genotypes[,rownames(resFATDLEAN[which.max(resFATDLEAN[,3]),])]
genotypes <- genotypes[which(topmarker!=1),]
phenotype <- phenotype[which(topmarker!=1)]
lods <- apply(genotypes, 2, function(genotype){ return(-log10(anova(lm(phenotype ~ as.factor(genotype)))[[5]][1])) })
plot(lods, t='l')
crossNF$pheno <- cbind(crossNF$pheno, FATDCW = crossNF$pheno[,"FAT70"] / crossNF$pheno[,"CW"])
resFAT <- scanone(crossNF, pheno.col="FAT70", addcovar=cbind(sex, season, littersize))
plot(resFAT, resFATDLEAN, col=c("green", "blue"))
RESvar <- lm(crossNF$pheno[,"FATDLEAN"] ~ sex + season + littersize + genotypes[,which(resFATDLEAN[,3] > 10)][,1])
phenoresiduals <- rep(NA,nrow(crossNF$pheno))
phenoresiduals[as.numeric(names(RESvar$residuals))] <- RESvar$residuals
crossNF$pheno <- cbind(crossNF$pheno, FATDLEANres = phenoresiduals)
resFATDLEANres <- scanone(crossNF, pheno.col="FATDLEANres")
plot(resFATDLEAN, resFATDLEANres, col=c("blue", "black"))
SSexp <- anova(lm(crossNF$pheno[,"FATDLEANres"] ~ as.factor(pull.geno(crossNF)[,"rs3151604"])))[,"Sum Sq"]
SStotal <- sum((crossNF$pheno[,"FATDLEANres"] - mean(crossNF$pheno[,"FATDLEANres"],na.rm=TRUE))^2,na.rm=TRUE)
SSexp/SStotal
|
f7ef30ec3880c13fce02825ee83485d5f0ba35b8
|
9d600582590ce8b61b9f3a0b76a209fffa60215c
|
/netTime1000SamplingDistribution.R
|
dcc62cf25136039f9e20e039fbebfd27b5607434
|
[] |
no_license
|
amlanbanerjee/pathway_to_statistics
|
f43bd5e6394860962d9134a43e7fe030de121816
|
e997f1cc2d9aa406a57e0f19fb205fc11bfb0b1f
|
refs/heads/main
| 2023-06-14T21:34:55.673148
| 2021-07-09T14:59:38
| 2021-07-09T14:59:38
| 378,179,236
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 882
|
r
|
netTime1000SamplingDistribution.R
|
library(openintro)
data(COL)
myPNG('netTime1000SamplingDistribution.png', 500, 400,
mar = c(4, 4, 1, 1),
mgp = c(2.7,0.7,0))
set.seed(5)
means <- c()
for (i in 1:1000) {
temp <- sample(nrow(run10), 100)
means[i] <- mean(run10$time[temp], na.rm=TRUE)
}
plot(0, 0,
type = 'n',
xlim = c(70, 125),
ylim = c(0, 145),
xlab = 'Sample mean',
ylab = 'Frequency',
axes = FALSE)
m <- mean(run10$time, na.rm = TRUE)
s <- sd(run10$time, na.rm = TRUE)/10
histPlot(means, col = COL[1], breaks = 25, add = TRUE)
abline(h = 0)
axis(1, at = seq(0, 200, 10))
axis(1, at = seq(0, 200, 10) + 5, rep("", 21), tcl = -0.15)
axis(2, at = c(0, 50, 100, 150))
text(112, 75,
paste("The distribution of sample means,",
"shown here, is much narrower than",
"the distribution of raw observations.",
sep = "\n"))
dev.off()
|
fa72cf2bbf93a15ec66300c05f5d8c682bf53ba0
|
fdf19c5e406df9d9f52409a18bd77e4b120eb87f
|
/man/limma_stats_fun.Rd
|
b1b88fc1eceefe944325c4ca7a534b027d10b325
|
[
"MIT"
] |
permissive
|
MassDynamics/lfq_processing
|
d2325cabb0d50779d9ea5beb8596fefb815e0bca
|
5480744fbdfc4aea014dec6589e86b3dc2b0f632
|
refs/heads/main
| 2023-05-12T02:28:48.982754
| 2023-05-04T03:28:00
| 2023-05-04T03:28:00
| 341,805,035
| 4
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 625
|
rd
|
limma_stats_fun.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/limma_stats_fun.R
\name{limma_stats_fun}
\alias{limma_stats_fun}
\title{This function performs the differential expression analysis with limma including all pairwise comparisons
using the condition provided}
\usage{
limma_stats_fun(
ID_type,
int_type,
condition_col_name,
run_id_col_name,
rep_col_name,
funDT,
pairwise.comp = NULL,
all.comparisons = TRUE,
fix_distr = FALSE
)
}
\description{
This function performs the differential expression analysis with limma including all pairwise comparisons
using the condition provided
}
|
4d21eb8f884c31158f5451de11df50cd299fa643
|
c870f9319784e1c3f36b827cc08a3103816c4674
|
/IDS 572_Assignment 1_Group code.R
|
6fa863bef1aee864abe73862be93cf7bee6e06af
|
[] |
no_license
|
adriankennyb/assignment_1
|
33f201bc30ceab23f4ecfe3761548d7790c33036
|
4a3985c70182dd2e14ddc977b0218ac7bddefb8b
|
refs/heads/master
| 2023-08-13T23:04:56.868672
| 2021-10-01T23:34:18
| 2021-10-01T23:34:18
| 410,384,355
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 10,243
|
r
|
IDS 572_Assignment 1_Group code.R
|
##IDS 572 - Assignment 1A
##Authors: Jinrong Qiu, Adrian Blamires, Mike Gannon
##Due date September 25, 2021
lcdf <- read.csv("~/Desktop/School/IDS 572/Assignment 1/lcData100K.csv")
library('tidyverse')
library('lubridate')
library('rpart')
library('dplyr')
library('knitr')
library('ggplot2')
library(pacman)
library(tidyr)
glimpse(lcdf)
summary(lcdf)
##Question 2I
##Proportion of Defaults
lcdf %>% group_by(loan_status) %>% tally() %>% mutate(percent=n/sum(n)*100)
##Proportion of Defaults at grade level
lcdf %>% group_by(grade,loan_status) %>% tally() %>% mutate(percent=n/sum(n)*100)
##Proportion of Default/Fully Paid at grade/subgrade level
Q2i<- lcdf %>% group_by(grade,sub_grade,loan_status) %>% tally() %>% mutate(percent=n/sum(n)*100)
View(Q2i)
##Default rate increases as the grade level decreases (A to G). This relationship is consistent
##with sub grade too. This makes sense since the grade is related to overall risk of the loan. Riskier loans are
##associated with higher rates of default
##Question 2II
##Number of Loans in each grade
lcdf %>% group_by(grade) %>% tally() %>% mutate(percent=n/sum(n)*100)
##Loan amounts (Total, avg, stdev, min, max) by loan grade
lcdf %>% group_by(grade) %>% summarize(TotalLoanAmt=sum(funded_amnt),AvgLoanAmt=mean(funded_amnt),stdevLoanAmt=sd(funded_amnt),MinLoanAmt=min(funded_amnt),MaxLoanAmt=max(funded_amnt))
##Loan amounts (Total, avg, stdev, min, max) by loan grade and sub grade
Q2ii_Amount<-lcdf %>% group_by(grade,sub_grade) %>% summarize(TotalLoanAmt=sum(funded_amnt),AvgLoanAmt=mean(funded_amnt),stdevLoanAmt=sd(funded_amnt),MinLoanAmt=min(funded_amnt),MaxLoanAmt=max(funded_amnt))
View(Q2ii_Amount)
##interest rates (avg, stdev, min,max) by loan grade
lcdf %>% group_by(grade) %>% summarize(Avginterestrate=mean(int_rate),stdevinterest=sd(int_rate),Mininterstrate=min(int_rate),Maxinterestrate=max(int_rate))
##interest rates (avg, stde, min, max) by loan grade and sub grade
Q2ii_Interestrate <-lcdf %>% group_by(grade, sub_grade) %>% summarize(Avginterestrate=mean(int_rate),stdevinterest=sd(int_rate),Mininterstrate=min(int_rate),Maxinterestrate=max(int_rate))
View(Q2ii_Interestrate)
##Generally the amount funded decreases as loan grade gets worse and interest rates increase as
##loan grades/sub-grades get worse. Stdev in interest rates and funded amount increases as the loan grades get worse
##This is consistent with what woudl be expected since higher risk loans need to have a higher potential return to the investor. Therefore there would be more support for investors to
##invest in less risky loans, and those that are risky shoudl have a higher interest rate.
##Question2III
lcdf$annRet <- ((lcdf$total_pymnt -lcdf$funded_amnt)/lcdf$funded_amnt)*(12/36)*100
head(lcdf[, c("last_pymnt_d", "issue_d")])
lcdf$last_pymnt_d<-paste(sep = "",lcdf$last_pymnt_d, "-01")
head(lcdf[, c("last_pymnt_d", "issue_d")])
lcdf$last_pymnt_d<-parse_date_time(lcdf$last_pymnt_d,"myd")
head(lcdf[, c("last_pymnt_d", "issue_d")])
lcdf$actualTerm <- ifelse(lcdf$loan_status=="Fully Paid", as.duration(lcdf$issue_d %--% lcdf$last_pymnt_d)/dyears(1), 3)
lcdf$actualReturn <- ifelse(lcdf$actualTerm>0, ((lcdf$total_pymnt -lcdf$funded_amnt)/lcdf$funded_amnt)*(1/lcdf$actualTerm)*100, 0)
lcdf %>% select(loan_status, int_rate, funded_amnt, total_pymnt, annRet, actualTerm, actualReturn) %>% head()
boxplot(lcdf$actualTerm~lcdf$grade, data=lcdf, xlab("Loan Grade"), ylab("ActualTerm"))
lcdf%>%group_by(grade)%>%summarize(AvgTerm=mean(lcdf$actualTerm), MinTerm=min(lcdf$actualTerm), MaxTerm=max(lcdf$actualTerm))
summary(lcdf$actualTerm)
##Question2IV
lcdf %>% group_by(sub_grade, loan_status) %>% summarise(nLoans=n(), avgIntRate=mean(int_rate), avgLoanAmt=mean(loan_amnt), avgActRet = mean(actualReturn), avgActTerm=mean(actualTerm))
View(Q2Iv <-lcdf %>% group_by(sub_grade, loan_status) %>% summarise(nLoans=n(), avgIntRate=mean(int_rate), avgLoanAmt=mean(loan_amnt), avgActRet = mean(actualReturn), avgActTerm=mean(actualTerm)))
##Question2V
lcdf %>% group_by(purpose) %>% summarise(nLoans=n(), defaults=sum(loan_status=="Charged Off"), defaultRate=defaults/nLoans, avgLoanAmt=mean(loan_amnt))
table(lcdf$purpose, lcdf$grade)
##Question2VI
lcdf %>% group_by(emp_length) %>% summarise(nLoans=n(), defaults=sum(loan_status=="Charged Off"), defaultRate=defaults/nLoans, avgIntRate=mean(int_rate), avgLoanAmt=mean(loan_amnt), avgActRet=mean(actualReturn),avgActTerm=mean(actualTerm))
lcdf$emp_length <- factor(lcdf$emp_length, levels=c("n/a", "< 1 year","1 year","2 years", "3 years" , "4 years", "5 years", "6 years", "7 years" , "8 years", "9 years", "10+ years" ))
lcdf %>% group_by(emp_length) %>% summarise(nLoans=n(), defaults=sum(loan_status=="Charged Off"), defaultRate=defaults/nLoans, avgIntRate=mean(int_rate), avgLoanAmt=mean(loan_amnt), avgActRet=mean(actualReturn),avgActTerm=mean(actualTerm))
lcdf %>% group_by(loan_status) %>% summarise(AnnualIncome=mean(annual_inc))
##Question2VII
#New Variable - DTI after loan origination
Monthly_Income <-lcdf$annual_inc/12
Monthly_Debt_Beforeloan <- Monthly_Income*lcdf$dti
lcdf$DTI_AfterLoan <- round(((Monthly_Debt_Beforeloan+lcdf$installment)/Monthly_Income),2)
Q2VIIA <- lcdf %>% select(c(DTI_AfterLoan,grade,loan_status))
Q2VIIA %>% group_by(grade,loan_status) %>% summarize(AvgDTI_AfterLoan=mean(DTI_AfterLoan),MedianDTI_AfterLoan=median(DTI_AfterLoan),stdev=sd(DTI_AfterLoan), Min=min(DTI_AfterLoan), Max=max(DTI_AfterLoan))
summary(lcdf$DTI_AfterLoan)
boxplot(lcdf$DTI_AfterLoan~lcdf$grade,lcdf,ylab=("DTI After Loan"),xlab = "Loan Grade")
#New Variable - Expected Interest as Percent of Annual Income
expected_interest <- lcdf$installment*36-lcdf$loan_amnt
lcdf$expint_perincome <-round(((expected_interest/lcdf$annual_inc)*100),2)
lcdf %>% group_by(grade,loan_status) %>% summarize(AVGexpint_perincome=mean(expint_perincome),Medianexpint_perincome=median(expint_perincome),stdev=sd(expint_perincome),Min=min(expint_perincome),Max=max(expint_perincome))
boxplot(lcdf$expint_perincome~lcdf$grade,lcdf,ylab = ("Expected Interest Per Income"),xlab = ("Loan Grade"))
View(filter(lcdf, lcdf$expint_perincome<0))
##New Variable - Percent of accounts still open
lcdf$per_accounts_open <-round((lcdf$open_acc/lcdf$total_acc)*100,2)
lcdf %>% group_by(grade,loan_status) %>% summarize(AVGPercentOpenAcc=mean(per_accounts_open),MedianPercentOpenAcc=median(per_accounts_open),stdev=sd(per_accounts_open),Min=min(per_accounts_open),Max=max(per_accounts_open))
boxplot(lcdf$per_accounts_open~lcdf$grade,lcdf,ylab = ("Percent of Accounts Open"), xlab = ("Loan Grade"))
##Question 2C - Missing Values
lcdf <- lcdf %>% select_if(function(x){!all(is.na(x))})
names(lcdf)[colSums(is.na(lcdf))>0]
colMeans(is.na(lcdf))
colMeans(is.na(lcdf))[colMeans(is.na(lcdf))>0]
nm<-names(lcdf)[colMeans(is.na(lcdf))>0.6]
lcdf <- lcdf %>% select(-nm)
colMeans(is.na(lcdf))[colMeans(is.na(lcdf))>0]
nm<- names(lcdf)[colSums(is.na(lcdf))>0]
summary(lcdf[, nm])
lcx<-lcdf[, c(nm)]
colMeans(is.na(lcx))[colMeans(is.na(lcx))>0]
lcx<- lcx %>% replace_na(list(mths_since_last_delinq = 500))
#For revol_util, suppose we want to replace the misisng values by the median
lcx<- lcx %>% replace_na(list(revol_util=median(lcx$revol_util, na.rm=TRUE)))
lcx$revol_util
summary(lcx[, nm])
lcdf<- lcdf %>% replace_na(list(mths_since_last_delinq=500, revol_util=median(lcdf$revol_util, na.rm=TRUE), bc_open_to_buy=median(lcdf$bc_open_to_buy, na.rm=TRUE), mo_sin_old_il_acct=1000, mths_since_recent_bc=1000, mths_since_recent_inq=50, num_tl_120dpd_2m = median(lcdf$num_tl_120dpd_2m, na.rm=TRUE),percent_bc_gt_75 = median(lcdf$percent_bc_gt_75, na.rm=TRUE), bc_util=median(lcdf$bc_util, na.rm=TRUE) ))
##Question 3 - Removing Leakage Variables
lcdf2 <- lcdf %>% select(-c("loan_amnt",delinq_2yrs,inq_last_6mths,revol_bal,revol_util,total_rec_late_fee,recoveries,collection_recovery_fee,collections_12_mths_ex_med,acc_now_delinq,tot_cur_bal,tot_coll_amt,acc_open_past_24mths,avg_cur_bal,bc_open_to_buy,chargeoff_within_12_mths,delinq_amnt,mo_sin_rcnt_rev_tl_op,mo_sin_rcnt_tl,mths_since_recent_bc,mths_since_recent_inq,num_actv_bc_tl,num_actv_rev_tl,num_tl_120dpd_2m,num_tl_30dpd,num_tl_90g_dpd_24m,num_tl_op_past_12m,pct_tl_nvr_dlq,term,emp_title,issue_d,pymnt_plan,purpose,zip_code,addr_state,earliest_cr_line,out_prncp,out_prncp_inv,total_pymnt,total_pymnt_inv,total_rec_prncp,total_rec_int,last_pymnt_d,last_pymnt_amnt,last_credit_pull_d,policy_code,application_type,hardship_flag,disbursement_method,debt_settlement_flag,annRet,actualTerm,actualReturn))
View(lcdf2)
##Question 4 - univariate analysis
library(pROC)
auc(response=lcdf2$loan_status, lcdf2$loan_amnt)
auc(response=lcdf2$loan_status, as.numeric(lcdf2$emp_length))
aucsNum<-sapply(lcdf2 %>% select_if(is.numeric), auc, response=lcdf2$loan_status)
auc(response=lcdf2$loan_status, as.numeric(lcdf2$emp_length))
aucAll<- sapply(lcdf2 %>% mutate_if(is.factor, as.numeric) %>% select_if(is.numeric), auc, response=lcdf2$loan_status)
library(broom)
tidy(aucAll[aucAll > 0.54]) %>% View() #TO determine which variables have auc > 0.54
tidy(aucAll[aucAll >=0.55 & aucAll < 0.59]) %>% View() #TO determine which variables have auc between 0.54 and 0.59
TRNPROP = 0.7
nr<-nrow(lcdf2)
trnIndex<- sample(1:nr, size = round(TRNPROP * nr), replace=FALSE)
lcdfTrn <- lcdf2[trnIndex, ]
lcdfTst <- lcdf2[-trnIndex, ]
lcdf2$emp_length <- factor(lcdf2$emp_length, levels=c("n/a", "< 1 year","1 year","2 years", "3 years" , "4 years", "5 years", "6 years", "7 years" , "8 years", "9 years", "10+ years" ))
library(rpart)
lcdfTrn$loan_status <- factor(lcdfTrn$loan_status, levels=c("Fully Paid", "Charged Off"))
lcDT1$variable.importance
lcDT1 <- rpart(loan_status ~., data=lcdfTrn, method="class", parms = list(split = "information"), control = rpart.control(cp=0.0001, minsplit = 10))
#Do we want to prune the tree -- check for performance with different cp levels
printcp(lcDT1)
lcDT1p<- prune.rpart(lcDT1, cp=0.001)
printcp(lcDT1p)
summary(lcDT1)
library(rattle)
library(rpart.plot)
library(RColorBrewer)
|
8989ffea60641629b0e808f0f229cc390046805d
|
e3da04f20a57e0b11677faff78e700dbe05793ed
|
/NEX2018_08/Project/R/data_analysis.R
|
9e2b5e896b3f161d462028997dcb918fb6248aa4
|
[] |
no_license
|
salisaresama/NEX
|
73417608e9e700febbb53e1771b29adef076a902
|
2a55e12754ac39521080c5f610903b65aa85e8e8
|
refs/heads/master
| 2020-03-30T19:53:03.807727
| 2019-02-25T20:30:03
| 2019-02-25T20:30:03
| 151,563,825
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,858
|
r
|
data_analysis.R
|
# Clear the environment
rm(list = ls())
# Load libraries
PACKAGES <- c('FrF2', 'lattice', 'pid',
'tidyverse', 'nortest', 'lmtest', 'caret')
lapply(PACKAGES, require, character.only = TRUE)
rm(PACKAGES)
# Set up the working directory
setwd(paste0(getwd(), '/NEX2018_08/Project/R'))
# Load source files
source('functions/DataPreparation.R')
source('functions/CreateMultiBoxPlot.R')
source('functions/MapColNames.R')
source('functions/PlotAllInteractions.R')
################################################
########## LOAD DATA ##########
################################################
# Load the dataset
data_all <- loadData(FILE = '../Data/experimental_data.csv')
# Create a data frame with original values
# Separate center points
MAIN_IDX <- seq(1, 64)
data_noncent <- data_all[MAIN_IDX, ]
data_center <- data_all[-MAIN_IDX, ]
# Remove unnecessary data and variables from the environment
rm(list = setdiff(ls(), c('data_noncent', 'data_center', 'data_all', lsf.str())))
################################################
########## BASIC VISUAL ANALYSIS ##########
################################################
#Creating of boxplots
boxplot_all_vars <- createMultiBoxPlot(
df = data,
OUT_PATH = "figures/",
PLOT_NAME = "boxplot_all_vars",
PRINT_PLOT = FALSE
)
################################################
############### EFFECTS ###################
################################################
#one-way ANOVA test that tests if variables from the data_noncent
#have same mean values or not. Output values of the test are
#significant variable for this dataset
data_noncent.aov_simple <- aov(measurement ~ ., data = data_noncent)
#summary of the test
summary(data_noncent.aov_simple)
#Mean excess plots. These plots show dependency between mean
#values of different variables and measurements
MEPlot(data_noncent.aov_simple)
################################################
############# INTERACTIONS ################
################################################
#Function for plotting and saving all interaction
#with respect to measurements
plotAllInteractions(df = data_noncent,
RESPONSE_NAME = 'measurement',
OUT_PATH = 'figures/')
#Converting to another format for applying Daniel plot function
data_design <- data2design(data_noncent[, seq(1:6)], quantitative = c(F, F, F, F, F, F))
data_design <- add.response(data_design, data_noncent$measurement)
class(data_design)
#Creating Daniel plot that represents significance of some interactions
#Labeled interactions are significant
data_noncent.aov_allint <- aov(measurement ~ mass*distance*filling*hand*vision*stance,
data = data_noncent)
summary(data_noncent.aov_allint)
qqplot(DanielPlot(data_noncent.aov_allint)$x,
DanielPlot(data_noncent.aov_allint)$y)
qqline(DanielPlot(data_noncent.aov_allint)$y)
#Creating Daniel plot with respect to single variables and double interactions
data_noncent.aov_doubleint <- aov(measurement ~ (mass + distance + filling + hand + vision + stance)^2,
data = data_noncent)
summary(data_noncent.aov_doubleint)
qqplot(DanielPlot(data_noncent.aov_doubleint)$x,
DanielPlot(data_noncent.aov_doubleint)$y)
qqline(DanielPlot(data_noncent.aov_doubleint)$y)
#Plotting Pareto chart that also if specific interactions are significant
#or not. The higher the values, the more significant interaction is
paretoPlot(data_noncent.aov_allint)
################################################
################ ANOVA ####################
################################################
#Creating and investigating model with different interaction that were
#chosen with respect to Daniel and Pareto plots
prefinal.aov <- aov(lm(measurement ~ distance + mass:distance:filling:stance +
mass:distance:hand:stance + filling:hand:vision:stance +
mass:distance:filling:vision:stance + distance:hand:stance +
mass:distance:stance + distance:hand:vision:stance +
distance:filling + mass:hand + distance:filling:vision +
hand:stance + vision:stance + distance:vision:stance +
distance:filling:stance - 1, data = data_noncent))
summary(prefinal.aov)
#Final model
final.aov <- aov(lm.default(formula = measurement ~ distance +
distance:stance:vision - 1, data = data_noncent))
summary(final.aov)
# not Final model
#final.aov <- aov(lm(measurement ~ distance + distance:filling:stance - 1, data = data_noncent))
#summary(final.aov)
################################################
############# CENTER POINTS ###############
################################################
#Boxplots of variables with center points
b1 <- createSingleBoxPlot(mapColNames(data_all, "mass"), 1, 7, "Mass, [g]", "Measurement, [mm]",
"Mass with Center Points",
PRINT_PLOT = TRUE
)
b2 <- createSingleBoxPlot(mapColNames(data_all, "distance"), 2, 7, "Distance, [m]", "Measurement, [mm]",
"Distance with Center Points",
PRINT_PLOT = TRUE
)
plot_final <- grid.arrange(b1, b2, ncol = 2)
ggsave(
filename = "figures/boxplot_center_points.png",
plot = plot_final,
width = 170, height = 115, units = "mm"
)
#Linear model of measurement that depends on mass and distance
#without intercept
center.lm <- lm(measurement ~ mass + distance - 1, data = data_all)
summary(center.lm)
center.aov <- aov(center.lm)
summary(center.aov)
################################################
########## LINEAR REGRESSION ##############
################################################
#Linear regression with mapping to numeric values
data_all_num <- mapColNames(data_all, c('mass', 'distance'))
data_all_num <- data_all_num %>%
mutate(
mass = as.numeric(as.character(mass)),
distance = as.numeric(as.character(distance))
)
#The final linear model
final.lm_num <- lm(measurement ~ mass + distance - 1, data = data_all_num)
summary(final.lm_num)
#Residual tests to test normality of residuals
lillie.test(residuals(final.lm_num))
shapiro.test(residuals(final.lm_num))
#Heteriscedasticity analysis
bptest(final.lm_num)
# Box-Cox transformation
bc_transf <- BoxCoxTrans(data_all_num$measurement)
data_all_num$measurement_bc <- predict(bc_transf, data_all_num$measurement)
#Plotting summary of the linear regression model
par(mfrow = c(2, 2))
plot(final.lm_num)
#The FINAL final linear model with Box-Cox transformation
final_bc.lm_num <- lm(measurement_bc ~ mass + distance - 1, data = data_all_num)
summary(final_bc.lm_num)
# Once again normality tests of residuals
lillie.test(residuals(final_bc.lm_num))
shapiro.test(residuals(final_bc.lm_num))
#Once again eteriscedasticity analysis
bptest(final_bc.lm_num)
#Plotting summary of the linear regression model
par(mfrow = c(2, 2))
plot(final_bc.lm_num)
################################################
############ CONTOUR PLOT ################
################################################
#Creating contour plot to show the predictions with repsect
#to mass and distance values
# contourPlot(final.lm_num, N = 25)
new_data <- data.frame(
'mass' = seq(40, 110, length.out = 200),
'distance' = seq(2, 6, length.out = 200)
)
#Creating data grid for predictions
new_data <- expand.grid(new_data)
predictions <- predict(final.lm_num, new_data)
new_data$measurement <- predictions
#Plotting contour plot
contour_plot <- ggplot(new_data, aes(mass, distance, z = measurement)) +
geom_raster(aes(fill = measurement)) +
geom_contour(colour = "white", binwidth = 20) +
labs(title = 'Contour Plot for Mass and Distance',
fill = 'Measurement') +
xlab('Mass') +
ylab('Distance')
|
a1f77136cfb9c1dc41210743a2eaf25a9f0cbeda
|
acabe441d5bd5391ff0812169275c67128978c39
|
/tests/testthat/test_validate_templates.R
|
9434c340227e28e464f509eaeff43f5c6eb26a2f
|
[
"MIT"
] |
permissive
|
Ashley-LW/EMLassemblyline
|
65d448ce6ee760f06904326ca2f3b9f4e475a85e
|
a37bc32c1feffa4f8a5ae88f158457fd05d4a86e
|
refs/heads/master
| 2022-12-10T17:30:00.850619
| 2020-09-08T23:02:38
| 2020-09-08T23:02:38
| 292,932,246
| 0
| 0
|
MIT
| 2020-09-04T19:38:35
| 2020-09-04T19:38:34
| null |
UTF-8
|
R
| false
| false
| 13,450
|
r
|
test_validate_templates.R
|
context('Validate templates')
library(EMLassemblyline)
# abstract --------------------------------------------------------------------
testthat::test_that("abstract", {
# Parameterize
x <- template_arguments(
path = system.file(
'/examples/pkg_260/metadata_templates',
package = 'EMLassemblyline'),
data.path = system.file(
'/examples/pkg_260/data_objects',
package = 'EMLassemblyline'),
data.table = c("decomp.csv", "nitrogen.csv"),
other.entity = c("ancillary_data.zip", "processing_and_analysis.R"))$x
# Warn if missing
x1 <- x
x1$template$abstract.txt <- NULL
expect_warning(
validate_templates("make_eml", x1),
regexp = "An abstract is recommended.")
})
# attributes.txt --------------------------------------------------------------
testthat::test_that("attributes.txt", {
# Parameterize
attr_tmp <- read_template_attributes()
x <- template_arguments(
path = system.file(
'/examples/pkg_260/metadata_templates',
package = 'EMLassemblyline'),
data.path = system.file(
'/examples/pkg_260/data_objects',
package = 'EMLassemblyline'),
data.table = c("decomp.csv", "nitrogen.csv"),
other.entity = c("ancillary_data.zip", "processing_and_analysis.R"))$x
x1 <- x
expect_equivalent(validate_templates("make_eml", x1), x1)
# attributes.txt - attributes.txt should be present for each data table
x1 <- x
x1$template$attributes_decomp.txt <- NULL
expect_warning(
validate_templates("make_eml", x1),
regexp = "is missing attributes metadata.")
# attributeName - All table columns are listed as attributeName
x1 <- x
x1$template$attributes_decomp.txt$content <- x1$template$attributes_decomp.txt$content[1:2, ]
x1$template$attributes_nitrogen.txt$content <- x1$template$attributes_nitrogen.txt$content[1:2, ]
expect_error(validate_templates("make_eml", x1))
# attributeName - Names follow best practices
x1 <- x
n <- stringr::str_replace(names(x1$data.table$decomp.csv$content), "_", " ")
n <- stringr::str_replace(n, "t", "%")
names(x1$data.table$decomp.csv$content) <- n
x1$template$attributes_decomp.txt$content$attributeName <- n
expect_warning(validate_templates("make_eml", x1))
# definition- Each attribute has a definition
x1 <- x
x1$template$attributes_decomp.txt$content$attributeDefinition[1] <- ""
x1$template$attributes_nitrogen.txt$content$attributeDefinition[1] <- ""
expect_error(validate_templates("make_eml", x1))
# class - Each attribute has a class
x1 <- x
x1$template$attributes_decomp.txt$content$class[1] <- ""
x1$template$attributes_nitrogen.txt$content$class[1] <- ""
expect_error(validate_templates("make_eml", x1))
# class - Each class is numeric, Date, character, or categorical
x1 <- x
x1$template$attributes_decomp.txt$content$class[1] <- "dateagorical"
x1$template$attributes_nitrogen.txt$content$class[1] <- "numerecter"
expect_error(validate_templates("make_eml", x1))
# class - Each Date class has a dateTimeformatString
x1 <- x
x1$template$attributes_decomp.txt$content$dateTimeFormatString[
tolower(x1$template$attributes_decomp.txt$content$class) == "date"
] <- ""
expect_error(validate_templates("make_eml", x1))
# class - Attributes specified by the user as numeric should contain no
# characters other than listed under missingValueCode of the table
# attributes template.
x1 <- x
use_i <- x1$template$attributes_decomp.txt$content$class == "numeric"
x1$data.table$decomp.csv$content[[
x1$template$attributes_decomp.txt$content$attributeName[use_i]
]][1:5] <- "non_numeric_values"
expect_warning(validate_templates("make_eml", x1))
x1 <- suppressWarnings(validate_templates("make_eml", x1))
expect_true(
x1$template$attributes_decomp.txt$content$class[use_i] == "character")
expect_true(
x1$template$attributes_decomp.txt$content$unit[use_i] == "")
x1 <- x
use_i <- x1$template$attributes_nitrogen.txt$content$class == "numeric"
for (i in which(use_i)) {
x1$data.table$nitrogen.csv$content[[
x1$template$attributes_nitrogen.txt$content$attributeName[i]
]][1:5] <- "non_numeric_values"
}
expect_warning(validate_templates("make_eml", x1))
x1 <- suppressWarnings(validate_templates("make_eml", x1))
for (i in which(use_i)) {
expect_true(
x1$template$attributes_nitrogen.txt$content$class[i] == "character")
expect_true(
x1$template$attributes_nitrogen.txt$content$unit[i] == "")
}
# unit - Numeric classed attributes have units
x1 <- x
x1$template$attributes_decomp.txt$content$unit[6] <- ""
expect_error(validate_templates("make_eml", x1))
# unit - Units should be from the dictionary or defined in custom_units.txt
x1 <- x
x1$template$attributes_nitrogen.txt$content$unit[5] <- "an_undefined_unit"
x1$template$attributes_nitrogen.txt$content$unit[6] <- "another_undefined_unit"
expect_error(validate_templates("make_eml", x1))
x1 <- x
x1$template$custom_units.txt$content[nrow(x1$template$custom_units.txt$content)+1, ] <- c(
"an_undefined_unit", "of some type", "with some parent SI", "a multiplier",
"and a description")
x1$template$custom_units.txt$content[nrow(x1$template$custom_units.txt$content)+1, ] <- c(
"another_undefined_unit", "of some type", "with some parent SI",
"a multiplier", "and a description")
expect_equivalent(validate_templates("make_eml", x1), x1)
# dateTimeFormatString- Remaining dateTimeFormatString prompts have been removed
x1 <- x
x1$template$attributes_decomp.txt$content$dateTimeFormatString[1] <-
"!Add datetime specifier here!"
x1$template$attributes_nitrogen.txt$content$dateTimeFormatString[1] <-
"!Add datetime specifier here!"
expect_error(validate_templates("make_eml", x1))
# missingValueCode - Each missingValueCode has a missingValueCodeExplanation
x1 <- x
x1$template$attributes_decomp.txt$content$missingValueCodeExplanation[1] <- ""
x1$template$attributes_nitrogen.txt$content$missingValueCodeExplanation[1] <- ""
expect_error(validate_templates("make_eml", x1))
# missingValueCode - Each missingValueCode only has 1 entry per column
x1 <- x
x1$template$attributes_decomp.txt$content$missingValueCode[1] <- "NA, -99999"
x1$template$attributes_nitrogen.txt$content$missingValueCode[1] <- "NA -99999"
expect_error(validate_templates("make_eml", x1))
# missingValueCodeExplanation - Each missingValueCodeExplanation has a
# non-blank missingValueCode
x1 <- x
x1$template$attributes_decomp.txt$content$missingValueCode[1] <- ""
x1$template$attributes_nitrogen.txt$content$missingValueCode[1] <- ""
expect_error(validate_templates("make_eml", x1))
})
# catvars.txt -----------------------------------------------------------------
testthat::test_that("Categorical variables", {
# Parameterize
attr_tmp <- read_template_attributes()
x <- template_arguments(
path = system.file(
'/examples/pkg_260/metadata_templates',
package = 'EMLassemblyline'),
data.path = system.file(
'/examples/pkg_260/data_objects',
package = 'EMLassemblyline'),
data.table = c("decomp.csv", "nitrogen.csv"),
other.entity = c("ancillary_data.zip", "processing_and_analysis.R"))$x
x1 <- x
expect_equal(validate_templates("make_eml", x1), x1)
# TODO: catvars.txt - Required when table attributes are listed as
# "categorical"
x1 <- x
x1$template$attributes_decomp.txt$content$class[1] <- "categorical"
x1$template$attributes_nitrogen.txt$content$class[1] <- "categorical"
x1$template$catvars_decomp.txt <- NULL
x1$template$catvars_nitrogen.txt <- NULL
expect_error(validate_templates("make_eml", x1))
# TODO: codes - All codes require definition
use_i <- seq(
length(names(x$template)))[
stringr::str_detect(
names(x$template),
attr_tmp$regexpr[attr_tmp$template_name == "catvars"])]
x1 <- x
for (i in use_i) {
x1$template[[i]]$content$definition[round(runif(2, 1, nrow(x1$template[[i]]$content)))] <- ""
}
expect_error(validate_templates("make_eml", x1))
})
# geographic_coverage ---------------------------------------------------------
testthat::test_that("geographic_coverage", {
# Parameterize
attr_tmp <- read_template_attributes()
x <- template_arguments(
system.file(
'/examples/pkg_260/metadata_templates',
package = 'EMLassemblyline'))$x
x1 <- x
expect_equal(validate_templates("make_eml", x1), x1)
# TODO: geographicDescription - Each entry requires a north, south, east, and west
# bounding coordinate
x1 <- x
x1$template$geographic_coverage.txt$content$northBoundingCoordinate[1] <- ""
x1$template$geographic_coverage.txt$content$southBoundingCoordinate[2] <- ""
expect_error(validate_templates("make_eml", x1))
# TODO: coordinates - Decimal degree is expected
x1 <- x
x1$template$geographic_coverage.txt$content$northBoundingCoordinate[1] <- "45 23'"
x1$template$geographic_coverage.txt$content$southBoundingCoordinate[2] <- "23 degrees 23 minutes"
expect_error(validate_templates("make_eml", x1))
})
# intellectual_rights ---------------------------------------------------------
testthat::test_that("intellectual_rights", {
# Parameterize
x <- template_arguments(
path = system.file(
'/examples/pkg_260/metadata_templates',
package = 'EMLassemblyline'),
data.path = system.file(
'/examples/pkg_260/data_objects',
package = 'EMLassemblyline'),
data.table = c("decomp.csv", "nitrogen.csv"),
other.entity = c("ancillary_data.zip", "processing_and_analysis.R"))$x
# Warn if missing
x1 <- x
x1$template$intellectual_rights.txt <- NULL
expect_warning(
validate_templates("make_eml", x1),
regexp = "An intellectual rights license is recommended.")
})
# keywords --------------------------------------------------------------------
testthat::test_that("keywords", {
# Parameterize
x <- template_arguments(
path = system.file(
'/examples/pkg_260/metadata_templates',
package = 'EMLassemblyline'),
data.path = system.file(
'/examples/pkg_260/data_objects',
package = 'EMLassemblyline'),
data.table = c("decomp.csv", "nitrogen.csv"),
other.entity = c("ancillary_data.zip", "processing_and_analysis.R"))$x
# Warn if missing
x1 <- x
x1$template$keywords.txt <- NULL
expect_warning(
validate_templates("make_eml", x1),
regexp = "Keywords are recommended.")
})
# methods ---------------------------------------------------------------------
testthat::test_that("methods", {
# Parameterize
x <- template_arguments(
path = system.file(
'/examples/pkg_260/metadata_templates',
package = 'EMLassemblyline'),
data.path = system.file(
'/examples/pkg_260/data_objects',
package = 'EMLassemblyline'),
data.table = c("decomp.csv", "nitrogen.csv"),
other.entity = c("ancillary_data.zip", "processing_and_analysis.R"))$x
# Warn if missing
x1 <- x
x1$template$methods.txt <- NULL
expect_warning(
validate_templates("make_eml", x1),
regexp = "Methods are recommended.")
})
# personnel -------------------------------------------------------------------
testthat::test_that("personnel", {
# Parameterize
attr_tmp <- read_template_attributes()
x <- template_arguments(
system.file(
'/examples/pkg_260/metadata_templates',
package = 'EMLassemblyline'))$x
x1 <- x
expect_equal(validate_templates("make_eml", x1), x1)
# Missing
x1 <- x
x1$template$personnel.txt <- NULL
expect_warning(
validate_templates("make_eml", x1),
regexp = "Personnel are required \\(i.e. creator, contact, etc.\\).")
# role - At least one creator and contact is listed
x1 <- x
x1$template$personnel.txt$content$role[
stringr::str_detect(
x1$template$personnel.txt$content$role,
"contact")] <- "creontact"
expect_warning(
validate_templates("make_eml", x1),
regexp = "A contact is required.")
x1 <- x
x1$template$personnel.txt$content$role[
stringr::str_detect(
x1$template$personnel.txt$content$role,
"creator")] <- "creontact"
expect_warning(
validate_templates("make_eml", x1),
regexp = "A creator is required.")
# role - All personnel have roles
x1 <- x
x1$template$personnel.txt$content$role[
stringr::str_detect(
x1$template$personnel.txt$content$role,
"PI|pi")] <- ""
expect_warning(
validate_templates("make_eml", x1),
regexp = paste0(
"(Each person must have a 'role'.)|(A principal investigator and ",
"project info are recommended.)"))
})
# remove_empty_templates() ----------------------------------------------------
testthat::test_that("remove_empty_templates()", {
x <- template_arguments(
path = system.file(
'/examples/templates',
package = 'EMLassemblyline'))$x
for (i in 1:length(x$template)) {
x1 <- x
n <- names(x1$template[i])
x1$template[[i]]$content <- NULL
x1 <- remove_empty_templates(x1)
expect_true(!any(stringr::str_detect(names(x1$template), n)))
}
x <- template_arguments(empty = T)$x
x <- remove_empty_templates(x)
expect_true(is.null(x$template))
})
|
8ef9d630a1bb2a8a5b8336d146080c3919210cd3
|
8223dbd59aa177f0d7e5c91934a4aa83ef1f1e89
|
/caret_example.R
|
438f5181cd04eb155ae837b20230303a64f22f5c
|
[] |
no_license
|
Jong-Min-Moon/GMC-CSVM
|
352f21b3f2d3ea92168cfa098ff46550c1c8bed6
|
0bbe3e3c3a3f26ac34182f7dfbfdc4c815624154
|
refs/heads/master
| 2023-05-15T08:59:52.071397
| 2021-06-09T19:23:03
| 2021-06-09T19:23:03
| 347,348,245
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,897
|
r
|
caret_example.R
|
getwd()
source("wsvm.r")
## 1. prepare data
set.seed(1)
data(iris)
iris.binary <- iris[iris$Species != "setosa",]#only use two classes
#partition into training and test dataset
idx.training <- createDataPartition(iris.binary $Species, p = .75, list = FALSE)
training <- iris.binary [ idx.training,]
testing <- iris.binary [-idx.training,]
#make type vector indicating whether the sample belongs to majority, minority or synthetic sample
label <- training[,ncol(training)]
index.full <- 1:length(label)
type <- 1:length(label)
index.syn <- sample(index.full, 30)
type[label == "virginica"] <- "maj"
type[label == "versicolor"] <- "min"
type[index.syn] <- "syn"
#turn label vector into -1 and 1 for suppor vector machine fitting
y.values <- -1 * (label == "versicolor") + 1 * (label == "virginica")
y <- as.factor(y.values)
#bind type vector to predictors
x <- cbind(training[,-ncol(training)], type)
##2. start hyperparameter tuning
set.seed(2021)
fitControl <- trainControl(method = "repeatedcv",
number = 5, #5-fold cv
repeats = 10) # repeated 10 times
cv.fit <- train(x, y,
method = weighted.svm,
preProc = c("center", "scale"),
tuneLength = 20,
trControl = fitControl,
three.weights = list(maj = 1, min = 1, syn = 1)
)
cv.fit
#evaluate the final model with the test data
final.model <- cv.fit$finalModel
final.model.scaler <- cv.fit$preProcess
testing.scaled <- predict(final.model.scaler , testing)
testing.X <- testing.scaled[,-ncol(testing.scaled)]
testing.Y <- testing.scaled[,ncol(testing.scaled)]
testing.Y <- as.factor(-1 * (testing.Y == "versicolor") + 1 * (testing.Y == "virginica"))
pred <-wsvm.predict(testing.X, final.model)$predicted.Y
testing.Y
confusionMatrix(pred, testing.Y) #confusion matrix
|
e60b30b8977b4ac7905cc55067341663a84e4157
|
ee3c321939e7d8899fed042575057c368b6c59e3
|
/Project/Code/dus_model_building.R
|
4a3c83c90fe895d2e710bbbfc7d79d447ec7a56f
|
[] |
no_license
|
KBicks/CMEECourseWork
|
f30e29440077e95c5c87836a2ecc8194d4ee658a
|
8cff4651f1ce27ff731137255646e3baccf11387
|
refs/heads/master
| 2020-03-31T10:54:53.862344
| 2019-08-29T12:04:49
| 2019-08-29T12:04:49
| 152,155,192
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 34,180
|
r
|
dus_model_building.R
|
#!/usr/bin/env Rscript
# Author: Katie Bickerton k.bickerton18@imperial.ac.uk
# Script: dus_model_building.R
# Desc: Building and comparing models for various shark response variables.
# Arguments: None
# Date: 20 May 2019
rm(list=ls())
graphics.off()
# required packages
require(tidyverse)
require(lme4)
require(car)
require(MASS)
require(merTools)
require(MuMIn)
# load datasets
att <- read.csv("../Data/shark_attributes.csv", header = TRUE)
mov <- read.csv("../Data/movements_perday.csv", header = TRUE)
#det10 <- read.csv("../Data/detection_perday10.csv", header = TRUE)
det25 <- read.csv("../Data/detection_perday25.csv", header = TRUE)
#det50 <- read.csv("../Data/detection_perday50.csv", header = TRUE)
detdn <- read.csv("../Data/detection_daynight.csv", header = TRUE)
# function to select most common from a list of factors
factor_mode <- function(x){
aa <- unique(x)
aa[which.max(tabulate(match(x,aa)))]
}
colnames(att) <- gsub("day", "Day", colnames(att))
colnames(att) <- gsub("night", "Night", colnames(att))
# subset for seasons
season_names <- c("Spring", "Summer", "Autumn","Winter")
seasons <- data.frame()
for(x in season_names){
a <- att %>% dplyr::select(Transmitter.Name, Species, Sex, FL, Migratory.Status,
paste0("MCP_area_season_",x), paste0("core_KUD_season_",x),
paste0("Network_Density_", x), paste0(x,"RI"))
colnames(a)[6:9] <- c("MCP_area","core_KUD","Network_Density","RI")
a$Season <- rep(x, length(a$Transmitter.Name))
seasons <- rbind(seasons, a)
}
# subset for time of day
day_times <- c("Day", "Night")
daynight <- data.frame()
for(x in day_times){
a <- att %>% dplyr::select(Transmitter.Name, Species, Sex, FL, Migratory.Status,
paste0("MCP_area_",x), paste0("core_KUD_",x),
paste0("Network_Density_", x), paste0(x,"RI"))
colnames(a)[6:9] <- c("MCP_area","core_KUD","Network_Density","RI")
a$Daynight <- rep(x, length(a$Transmitter.Name))
daynight <- rbind(daynight, a)
}
# subset for season x time of day for network analysis
time_names <- c("Day_Spring", "Day_Summer", "Day_Autumn","Day_Winter", "Night_Spring", "Night_Summer", "Night_Autumn","Night_Winter")
times <- data.frame()
for(x in time_names){
a <- att %>% dplyr::select(Transmitter.Name, Species, Sex, FL, Migratory.Status,
paste0("Network_Density_", x))
colnames(a)[6] <- "Network_Density"
a$Time <- rep(x, length(a$Transmitter.Name))
times <- rbind(times, a)
}
times <- separate(times, Time, c("daynight","Season"), sep = "_")
dus_seasons <- seasons[seasons$Species == "Dusky",]
dus_daynight <- daynight[daynight$Species == "Dusky",]
dus_times <- times[times$Species == "Dusky",]
##### CONVEX POLYGONS MODEL - ALL #####
# ## Season subset
# # removing missing values from dataframe and setting to own variable
# dus_MCP_se <- dus_seasons
# dus_MCP_se <- dus_MCP_se[dus_MCP_se$Sex != "U",]
# dus_MCP_se <- dus_MCP_se[!is.na(dus_MCP_se$Migratory.Status),]
# dus_MCP_se <- dus_MCP_se[!is.na(dus_MCP_se$MCP_area),]
# dus_MCP_se$Season <- factor(dus_MCP_se$Season, levels = c("Summer","Autumn", "Winter", "Spring"))
# # deciding distribution for model
# par(mfrow = c(2,2))
# # normal distribution
# qqp(dus_MCP_se$MCP_area, "norm")
# # log normal distribution
# qqp(dus_MCP_se$MCP_area, "lnorm")
# # negative binomial
# # generates required parameter estimates
# nbinom <- fitdistr(round(dus_MCP_se$MCP_area), "Negative Binomial")
# qqp(dus_MCP_se$MCP_area, "nbinom", size = nbinom$estimate[[1]], mu = nbinom$estimate[[2]])
# # poisson
# poisson <- fitdistr(round(dus_MCP_se$MCP_area), "Poisson")
# qqp(dus_MCP_se$MCP_area, "pois", lambda = poisson$estimate)
# # best fit is normal - none are brilliant fits - checked against reduced seasons
# # Linear mixed models
# # null model
# MCP_glmm_null <- lmer(MCP_area ~ 1 + (1|Transmitter.Name), data = dus_MCP_se, REML = FALSE)
# # summary(MCP_glmm_null)
# MCP_lm_null <- lm(MCP_area ~1, data = dus_MCP_se)
# # anova(MCP_glmm_null, MCP_lm_null)
# # single variable models
# MCP_glmm_sex <- lmer(MCP_area ~ Sex + (1|Transmitter.Name), data = dus_MCP_se, REML = FALSE)
# # summary(MCP_glmm_sex)
# # anova(MCP_glmm_null, MCP_glmm_sex)
# MCP_glmm_sea <- lmer(MCP_area ~ Season + (1|Transmitter.Name), data = dus_MCP_se, REML = FALSE)
# # summary(MCP_glmm_sea)
# # anova(MCP_glmm_null, MCP_glmm_sea)
# MCP_glmm_sp <- lmer(MCP_area ~ Migratory.Status + (1|Transmitter.Name), data = dus_MCP_se, REML = FALSE)
# # summary(MCP_glmm_sp)
# # anova(MCP_glmm_null, MCP_glmm_sp)
# ## Time of day subset
# # removing missing values from dataframe and setting to own variable
# dus_MCP_dn <- dus_daynight
# dus_MCP_dn <- dus_MCP_dn[dus_MCP_dn$Sex != "U",]
# dus_MCP_dn <- dus_MCP_dn[!is.na(dus_MCP_dn$Migratory.Status),]
# dus_MCP_dn <- dus_MCP_dn[!is.na(dus_MCP_dn$MCP_area),]
# # null model
# MCP_glmm_null <- lmer(MCP_area ~ 1 + (1|Transmitter.Name), data = dus_MCP_dn, REML = FALSE)
# # summary(MCP_glmm_null)
# MCP_lm_null <- lm(MCP_area ~1, data = dus_MCP_dn)
# # anova(MCP_glmm_null, MCP_lm_null)
# # single variable models
# MCP_glmm_sex <- lmer(MCP_area ~ Sex + (1|Transmitter.Name), data = dus_MCP_dn, REML = FALSE)
# # summary(MCP_glmm_sex)
# # anova(MCP_glmm_null, MCP_glmm_sex)
# MCP_glmm_dn <- lmer(MCP_area ~ Daynight + (1|Transmitter.Name), data = dus_MCP_dn, REML = FALSE)
# # summary(MCP_glmm_dn)
# # anova(MCP_glmm_null, MCP_glmm_dn)
# MCP_glmm_mig <- lmer(MCP_area ~ Migratory.Status + (1|Transmitter.Name), data = dus_MCP_dn, REML = FALSE)
# # summary(MCP_glmm_mig)
# # anova(MCP_glmm_null, MCP_glmm_mig)
##### CORE KERNEL DENSITY MODEL - ALL #####
# seasons subsets
dus_KUD_se <- dus_seasons
dus_KUD_se <- dus_KUD_se[dus_KUD_se$Sex != "U",]
dus_KUD_se <- dus_KUD_se[!is.na(dus_KUD_se$Migratory.Status),]
dus_KUD_se <- dus_KUD_se[!is.na(dus_KUD_se$core_KUD),]
dus_KUD_se$Season <- factor(dus_KUD_se$Season, levels = c("Summer",
"Autumn", "Winter", "Spring"))
# # deciding distribution for model
# par(mfrow = c(2,3))
# # normal distribution
# qqp(dus_KUD_se$core_KUD, "norm")
# # log normal distribution
# qqp(dus_KUD_se$core_KUD, "lnorm")
# # negative binomial
# # generates required parameter estimates
# nbinom <- fitdistr(round(dus_KUD_se$core_KUD), "Negative Binomial")
# qqp(dus_KUD_se$core_KUD, "nbinom", size = nbinom$estimate[[1]],
#mu = nbinom$estimate[[2]])
# # poisson
# poisson <- fitdistr(round(dus_KUD_se$core_KUD), "Poisson")
# qqp(dus_KUD_se$core_KUD, "pois", lambda = poisson$estimate)
# # gamma
# gamma <- fitdistr(dus_KUD_se$core_KUD, "gamma")
# qqp(dus_KUD_se$core_KUD, "gamma", shape = gamma$estimate[[1]],
#rate = gamma$estimate[[2]])
# best fit is gamma - normal distribution also close
# Linear mixed models
# null model
KUD_glmm_null <- glmer(core_KUD ~ 1 + (1|Transmitter.Name), data = dus_KUD_se,
family = Gamma)
# summary(KUD_glmm_null)
KUD_lm_null <- lm(core_KUD ~ 1, data = dus_KUD_se)
# anova(KUD_glmm_null, KUD_lm_null)
# single variable models
KUD_glmm_sx <- glmer(core_KUD ~ Sex + (1|Transmitter.Name), data = dus_KUD_se,
family = Gamma)
# summary(KUD_glmm_sx)
# anova(KUD_glmm_null, KUD_glmm_sx)
KUD_glmm_se <- glmer(core_KUD ~ Season + (1|Transmitter.Name), data = dus_KUD_se,
family = Gamma)
# summary(KUD_glmm_se)
# anova(KUD_glmm_null, KUD_glmm_se)
KUD_glmm_mig <- glmer(core_KUD ~ Migratory.Status + (1|Transmitter.Name),
data = dus_KUD_se, family = Gamma)
# summary(KUD_glmm_mig)
# anova(KUD_glmm_null, KUD_glmm_mig)
dus_kud_aic_se <- AIC(KUD_glmm_null, KUD_glmm_sx, KUD_glmm_se, KUD_glmm_mig)
## Time of day subset
# removing missing values from dataframe and setting to own variable
dus_KUD_dn <- dus_daynight
dus_KUD_dn <- dus_KUD_dn[dus_KUD_dn$Sex != "U",]
dus_KUD_dn <- dus_KUD_dn[!is.na(dus_KUD_dn$Migratory.Status),]
dus_KUD_dn <- dus_KUD_dn[!is.na(dus_KUD_dn$core_KUD),]
# Linear mixed models
# null model
KUD_glmm_null <- glmer(core_KUD ~ 1 + (1|Transmitter.Name), data = dus_KUD_dn,
family = Gamma)
# summary(KUD_glmm_null)
KUD_lm_null <- lm(core_KUD ~ 1, data = dus_KUD_dn)
# anova(KUD_glmm_null, KUD_lm_null)
# single variable models
KUD_glmm_sx <- glmer(core_KUD ~ Sex + (1|Transmitter.Name), data = dus_KUD_dn,
family = Gamma)
# summary(KUD_glmm_sx)
# anova(KUD_glmm_null, KUD_glmm_sx)
KUD_glmm_dn <- glmer(core_KUD ~ Daynight + (1|Transmitter.Name), data = dus_KUD_dn,
family = Gamma)
# summary(KUD_glmm_dn)
# anova(KUD_glmm_null, KUD_glmm_dn)
KUD_glmm_mig <- glmer(core_KUD ~ Migratory.Status + (1|Transmitter.Name),
data = dus_KUD_dn, family = Gamma)
# summary(KUD_glmm_mig)
# anova(KUD_glmm_null, KUD_glmm_mig)
dus_kud_aic_dn <- AIC(KUD_glmm_null, KUD_glmm_sx, KUD_glmm_dn, KUD_glmm_mig)
##### NETWORK DENSITY MODELS - ALL #####
# seasons species subsets
dus_net_se <- dus_seasons
dus_net_se <- dus_net_se[dus_net_se$Sex != "U",]
dus_net_se <- dus_net_se[!is.na(dus_net_se$Migratory.Status),]
dus_net_se <- dus_net_se[!is.na(dus_net_se$Network_Density),]
dus_net_se$Season <- factor(dus_net_se$Season,
levels = c("Summer","Autumn", "Winter", "Spring"))
# deciding distribution for model
# par(mfrow = c(1,3))
# # normal distribution
# qqp(dus_net_se$Network_Density, "norm")
# # log normal distribution
# qqp(dus_net_se$Network_Density, "lnorm")
# # gamma
# gamma <- fitdistr(dus_net_se$Network_Density, "gamma")
# qqp(dus_net_se$Network_Density, "gamma", shape = gamma$estimate[[1]], rate = gamma$estimate[[2]])
# # best fit is log normal
# Linear mixed models
# null model
dus_net_glmm_null <- glmer(Network_Density ~ 1 + (1|Transmitter.Name),
data = dus_net_se, family = gaussian(link = "log"))
# summary(dus_net_glmm_null)
# r.squaredGLMM(dus_net_glmm_null)
dus_net_lm_null <- lm(log(Network_Density) ~ 1, data = dus_net_se)
# anova(dus_net_glmm_null, dus_net_lm_null)
# single variable models
dus_net_glmm_sx <- glmer(Network_Density ~ Sex + (1|Transmitter.Name),
data = dus_net_se, family = gaussian(link = "log"))
# summary(dus_net_glmm_sx)
# anova(dus_net_glmm_null, dus_net_glmm_sx)
# r.squaredGLMM(dus_net_glmm_sx)
dus_net_glmm_se <- glmer(Network_Density ~ Season + (1|Transmitter.Name),
data = dus_net_se, family = gaussian(link = "log"))
# summary(dus_net_glmm_se)
# anova(dus_net_glmm_null, dus_net_glmm_se)
# r.squaredGLMM(dus_net_glmm_se)
dus_net_glmm_mig <- glmer(Network_Density ~ Migratory.Status + (1|Transmitter.Name),
data = dus_net_se, family = gaussian(link = "log"))
# summary(dus_net_glmm_mig)
# anova(dus_net_glmm_null, dus_net_glmm_mig)
# r.squaredGLMM(dus_net_glmm_mig)
dus_net_aic_se <- AIC(dus_net_glmm_sx, dus_net_glmm_se, dus_net_glmm_mig)
### Model prediction - seasonal variation
# # generate test data
# n <- c(as.character(factor_mode(dus_net_se$Transmitter.Name)),
# as.character(factor_mode(dus_net_se$Migratory.Status)),
# as.character(factor_mode(dus_net_se$Sex)),
# mean(dus_net_se$FL),
# as.character(factor_mode(dus_net_se$Migratory.Status)),
# mean(dus_net_se$MCP_area), mean(dus_net_se$core_KUD),
# mean(dus_net_se$Network_Density), mean(dus_net_se$RI))
# dus_net_se_newdata <- data.frame(cbind(rbind(n,n,n,n), as.character(unique(dus_net_se$Season))))
# colnames(dus_net_se_newdata) <- names(dus_net_se)
# # predicted model values
# PI <- predictInterval(dus_net_glmm_se, newdata = dus_net_se_newdata, level = 0.95,
# n.sims = 1000, stat = "median", type = "probability", include.resid.var = TRUE)
# # joined with sample dataset
# dus_net_se_pred <- cbind(dus_net_se_newdata, PI)
# dus_net_se_pred$Season <- factor(dus_net_se_pred$Season, levels = c("Summer","Autumn", "Winter", "Spring"))
# # plot of seasonal trend
# ggplot(dus_net_se, aes(Season, Network_Density, fill = Migratory.Status)) + geom_boxplot() +
# #geom_line(data = dus_net_se_pred, aes(as.numeric(Season), fit)) +
# theme_bw() + ylab("Network Density") + xlab("Season")
# ggplot(dus_net_se, aes(Season, Network_Density)) + geom_boxplot() +
# #geom_line(data = dus_net_se_pred, aes(as.numeric(Season), fit)) +
# theme_bw() + ylab("Network Density") + xlab("Season")
# time of day subset
dus_net_dn <- dus_daynight
dus_net_dn <- dus_net_dn[dus_net_dn$Sex != "U",]
dus_net_dn <- dus_net_dn[!is.na(dus_net_dn$Migratory.Status),]
dus_net_dn <- dus_net_dn[!is.na(dus_net_dn$Network_Density),]
# null model
dus_net_glmm_null <- glmer(Network_Density ~ 1 + (1|Transmitter.Name),
data = dus_net_dn, family = gaussian(link = "log"))
# summary(dus_net_glmm_null)
# r.squaredGLMM(dus_net_glmm_null)
dus_net_lm_null <- lm(log(Network_Density) ~ 1, data = dus_net_dn)
# anova(dus_net_glmm_null, dus_net_lm_null)
# single variable models
dus_net_glmm_sx <- glmer(Network_Density ~ Sex + (1|Transmitter.Name),
data = dus_net_dn, family = gaussian(link = "log"))
# summary(dus_net_glmm_sx)
# anova(dus_net_glmm_null, dus_net_glmm_sx)
dus_net_glmm_dn <- glmer(Network_Density ~ Daynight + (1|Transmitter.Name),
data = dus_net_dn, family = gaussian(link = "log"))
# summary(dus_net_glmm_dn)
# anova(dus_net_glmm_null, dus_net_glmm_dn)
# r.squaredGLMM(dus_net_glmm_dn)
dus_net_glmm_mig <- glmer(Network_Density ~ Migratory.Status + (1|Transmitter.Name),
data = dus_net_dn, family = gaussian(link = "log"))
# summary(dus_net_glmm_mig)
# anova(dus_net_glmm_null, dus_net_glmm_mig)
dus_net_aic_dn <- AIC(dus_net_glmm_null, dus_net_glmm_sx, dus_net_glmm_dn,
dus_net_glmm_mig)
# ### Model prediction - variation between times of day
# # generate test data
# n <- c(as.character(factor_mode(dus_net_dn$Transmitter.Name)),
# as.character(factor_mode(dus_net_dn$Sex)),
# as.character(factor_mode(dus_net_dn$Migratory.Status)),
# mean(dus_net_dn$FL),
# as.character(factor_mode(dus_net_dn$Migratory.Status)),
# mean(dus_net_dn$MCP_area), mean(dus_net_dn$core_KUD),
# mean(dus_net_dn$Network_Density), mean(dus_net_dn$RI))
# dus_net_dn_newdata <- data.frame(cbind(rbind(n,n), as.character(unique(dus_net_dn$Daynight))))
# colnames(dus_net_dn_newdata) <- c("Transmitter.Name", "Sex", "Migratory.Status", "FL", "Migratory.Status",
# "MCP_area", "core_KUD", "Network_Density",
# "RI", "Daynight")
# # predicted model values
# PI <- predictInterval(dus_net_glmm_dn, newdata = dus_net_dn_newdata, level = 0.95,
# n.sims = 1000, stat = "median", type = "probability", include.resid.var = TRUE)
# # joined with sample dataset
# dus_net_dn_pred <- cbind(dus_net_dn_newdata, PI)
# # plot of seasonal trend
# ggplot(dus_net_dn, aes(Daynight, Network_Density)) + geom_boxplot() +
# #geom_line(data = dus_net_se_pred, aes(as.numeric(Season), fit)) +
# theme_bw() + ylab("Network Density") + xlab("Time of Day")
# ggplot(dus_net_dn, aes(Daynight, Network_Density, fill = Migratory.Status)) + geom_boxplot() +
# #geom_line(data = dus_net_se_pred, aes(as.numeric(Season), fit)) +
# theme_bw() + ylab("Network Density") + xlab("Time of Day")
### Season and time of day combined subsets
# seasons species subsets
net_ti <- dus_times
net_ti <- net_ti[net_ti$Sex != "U",]
net_ti <- net_ti[!is.na(net_ti$Network_Density),]
net_ti <- net_ti[!is.na(net_ti$Migratory.Status),]
net_ti$Season <- factor(net_ti$Season,
levels = c("Summer","Autumn", "Winter", "Spring"))
# Linear mixed models
# null model
net_glmm_null <- glmer(Network_Density ~ 1 + (1|Transmitter.Name), data = net_ti,
family = gaussian(link = "log"))
# summary(net_glmm_null)
# r.squaredGLMM(net_glmm_null)
net_lm_null <- lm(log(Network_Density) ~ 1, data = net_ti)
# anova(net_glmm_null, net_lm_null)
# single variable models
net_glmm_sx <- glmer(Network_Density ~ Sex + (1|Transmitter.Name), data = net_ti,
family = gaussian(link = "log"))
# summary(net_glmm_sx)
# anova(net_glmm_null, net_glmm_sx)
net_glmm_se <- glmer(Network_Density ~ Season + (1|Transmitter.Name), data = net_ti,
family = gaussian(link = "log"))
# summary(net_glmm_se)
# anova(net_glmm_null, net_glmm_se)
net_glmm_dn <- glmer(Network_Density ~ daynight + (1|Transmitter.Name),
data = net_ti, family = gaussian(link = "log"))
# summary(net_glmm_dn)
# anova(net_glmm_null, net_glmm_dn)
net_glmm_mig <- glmer(Network_Density ~ Migratory.Status + (1|Transmitter.Name),
data = net_ti, family = gaussian(link = "log"))
# summary(net_glmm_mig)
# anova(net_glmm_null, net_glmm_dn)
net_glmm_dnse <- glmer(Network_Density ~ daynight + Season + (1|Transmitter.Name),
data = net_ti, family = gaussian(link = "log"))
# summary(net_glmm_dnse)
# anova(net_glmm_null, net_glmm_dnse)
net_aic_ti <- AIC(net_glmm_null, net_lm_null, net_glmm_sx, net_glmm_se, net_glmm_dn,
net_glmm_dnse, net_glmm_mig)
##### RESIDENCY INDEX MODELS - ALL #####
# seasons species subsets
dus_ri_se <- dus_seasons
dus_ri_se <- dus_ri_se[dus_ri_se$Sex != "U",]
dus_ri_se <- dus_ri_se[!is.na(dus_ri_se$Migratory.Status),]
dus_ri_se <- dus_ri_se[!is.na(dus_ri_se$RI),]
dus_ri_se$Season <- factor(dus_ri_se$Season,
levels = c("Summer","Autumn", "Winter", "Spring"))
# # deciding distribution for model
# par(mfrow = c(1,3))
# # normal distribution
# qqp(dus_ri_se$RI, "norm")
# # log normal distribution
# qqp(dus_ri_se$RI, "lnorm")
# # gamma
# gamma <- fitdistr(dus_ri_se$RI, "gamma")
# qqp(dus_ri_se$RI, "gamma", shape = gamma$estimate[[1]],
#rate = gamma$estimate[[2]])
# # best fit is log normal
# Linear mixed models
# null model
dus_ri_glmm_null <- glmer(RI ~ 1 + (1|Transmitter.Name), data = dus_ri_se,
family = gaussian(link = "log"))
# summary(dus_ri_glmm_null)
# r.squaredGLMM(dus_ri_glmm_null)
ri_lm_null <- lm(log(RI) ~ 1, data = dus_ri_se)
# anova(dus_ri_glmm_null, ri_lm_null)
# single variable models
dus_ri_glmm_sx <- glmer(RI ~ Sex + (1|Transmitter.Name), data = dus_ri_se,
family = gaussian(link = "log"))
# summary(dus_ri_glmm_sx)
# r.squaredGLMM(dus_ri_glmm_sx)
# anova(dus_ri_glmm_null, dus_ri_glmm_sx)
dus_ri_glmm_se <- glmer(RI ~ Season + (1|Transmitter.Name), data = dus_ri_se,
family = gaussian(link = "log"))
# summary(dus_ri_glmm_se)
# r.squaredGLMM(dus_ri_glmm_se)
# anova(dus_ri_glmm_null, dus_ri_glmm_se)
dus_ri_glmm_mig <- glmer(RI ~ Migratory.Status + (1|Transmitter.Name) ,
data = dus_ri_se, family = gaussian(link = "log"))
# summary(dus_ri_glmm_mig)
# r.squaredGLMM(dus_ri_glmm_mig)
# anova(dus_ri_glmm_null, dus_ri_glmm_mig)
dus_ri_aic_se <- AIC(dus_ri_glmm_null, dus_ri_glmm_sx, dus_ri_glmm_se,
dus_ri_glmm_mig)
### Model prediction - seasonal variation
# # generate test data
# n <- c(as.character(factor_mode(dus_ri_se$Transmitter.Name)),
# as.character(factor_mode(dus_ri_se$Migratory.Status)),
# as.character(factor_mode(dus_ri_se$Sex)),
# mean(dus_ri_se$FL),
# as.character(factor_mode(dus_ri_se$Migratory.Status)),
# mean(dus_ri_se$MCP_area), mean(dus_ri_se$core_KUD),
# mean(dus_ri_se$Network_Density), mean(dus_ri_se$RI))
# dus_ri_se_newdata <- data.frame(cbind(rbind(n,n,n,n),
# as.character(unique(dus_ri_se$Season))))
# colnames(dus_ri_se_newdata) <- names(dus_ri_se)
# # predicted model values
# PI <- predictInterval(dus_ri_glmm_se, newdata = dus_ri_se_newdata, level = 0.95,
# n.sims = 1000, stat = "median", type = "probability",
# include.resid.var = TRUE)
# # joined with sample dataset
# dus_ri_se_pred <- cbind(dus_ri_se_newdata, PI)
# dus_ri_se_pred$Season <- factor(dus_ri_se_pred$Season,
# levels = c("Summer","Autumn", "Winter", "Spring"))
# # plot of seasonal trend
# ggplot(dus_ri_se, aes(Season, log(RI), fill = Migratory.Status)) + geom_boxplot() +
# #geom_line(data = dus_ri_se_pred, aes(as.numeric(Season), fit)) +
# theme_bw() + ylab("RI") + xlab("Season")
# ggplot(dus_ri_se, aes(Season, log(RI))) + geom_boxplot() +
# #geom_line(data = dus_ri_se_pred, aes(as.numeric(Season), fit)) +
# theme_bw() + ylab("RI") + xlab("Season")
# ggplot(dus_ri_se, aes(Migratory.Status, log(RI), fill = Season)) + geom_boxplot() +
# #geom_line(data = dus_ri_se_pred, aes(as.numeric(Season), fit)) +
# theme_bw() + ylab("RI") + xlab("Migratory.Status")
# time of day subset
dus_ri_dn <- dus_daynight
dus_ri_dn <- dus_ri_dn[dus_ri_dn$Sex != "U",]
dus_ri_dn <- dus_ri_dn[!is.na(dus_ri_dn$Migratory.Status),]
dus_ri_dn$RI[dus_ri_dn$RI == 0] <- NA
dus_ri_dn <- dus_ri_dn[!is.na(dus_ri_dn$RI),]
dus_ri_dn$Daynight <- as.factor(dus_ri_dn$Daynight)
# null model
dus_ri_glmm_null <- glmer(RI ~ 1 + (1|Transmitter.Name), data = dus_ri_dn,
family = gaussian(link = "log"))
# summary(dus_ri_glmm_null)
ri_lm_null <- lm(log(RI) ~ 1, data = dus_ri_dn)
# anova(dus_ri_glmm_null, ri_lm_null)
# single variable models
dus_ri_glmm_sx <- glmer(RI ~ Sex + (1|Transmitter.Name), data = dus_ri_dn,
family = gaussian(link = "log"))
# summary(dus_ri_glmm_sx)
# anova(dus_ri_glmm_null, dus_ri_glmm_sx)
dus_ri_glmm_dn <- glmer(RI ~ Daynight + (1|Transmitter.Name), data = dus_ri_dn,
family = gaussian(link = "log"))
# summary(dus_ri_glmm_dn)
# r.squaredGLMM(dus_ri_glmm_dn)
# anova(dus_ri_glmm_null, dus_ri_glmm_dn)
dus_ri_glmm_mig <- glmer(RI ~ Migratory.Status + (1|Transmitter.Name) +
(1|Daynight), data = dus_ri_dn, family = gaussian(link = "log"))
# summary(dus_ri_glmm_mig)
# anova(dus_ri_glmm_null, dus_ri_glmm_mig)
dus_ri_aic_dn <- AIC(dus_ri_glmm_null, dus_ri_glmm_sx, dus_ri_glmm_dn,
dus_ri_glmm_mig)
# ### Model prediction - variation between times of day
# # generate test data
# n <- c(as.character(factor_mode(dus_ri_dn$Transmitter.Name)),
# as.character(factor_mode(dus_ri_dn$Sex)),
# as.character(factor_mode(dus_ri_dn$Migratory.Status)),
# mean(dus_ri_dn$FL),
# as.character(factor_mode(dus_ri_dn$Migratory.Status)),
# mean(dus_ri_dn$MCP_area), mean(dus_ri_dn$core_KUD),
# mean(dus_ri_dn$Network_Density), mean(dus_ri_dn$RI))
# dus_ri_dn_newdata <- data.frame(cbind(rbind(n,n), as.character(unique(dus_ri_dn$Daynight))))
# colnames(dus_ri_dn_newdata) <- c("Transmitter.Name", "Sex", "Migratory.Status", "FL", "Migratory.Status",
# "MCP_area", "core_KUD", "Network_Density",
# "RI", "Daynight")
# # predicted model values
# PI <- predictInterval(dus_ri_glmm_dn, newdata = dus_ri_dn_newdata, level = 0.95,
# n.sims = 1000, stat = "median", type = "probability", include.resid.var = TRUE)
# # joined with sample dataset
# dus_ri_dn_pred <- cbind(dus_ri_dn_newdata, PI)
# # plot of seasonal trend
# ggplot(dus_ri_dn, aes(Daynight, log(RI), fill = Migratory.Status)) + geom_boxplot() +
# #geom_line(data = dus_ri_se_pred, aes(as.numeric(Season), fit)) +
# theme_bw() + ylab("RI") + xlab("Time of Day")
# ggplot(dus_ri_dn, aes(Migratory.Status, log(RI), fill = Daynight)) + geom_boxplot() +
# #geom_line(data = dus_ri_se_pred, aes(as.numeric(Season), fit)) +
# theme_bw() + ylab("RI") + xlab("Migratory Status")
##### DETECTION RATE MODELS FOR 25M DEPTH BAND - ALL #####
all_det25 <- det25
# remove seasons to test
all_det25 <- all_det25[all_det25$Sex != "U",]
dus_det25 <- subset(all_det25, Species == "Dusky")
dus_det25 <- dus_det25[!is.na(dus_det25$Migratory.Status),]
codes <- c()
for(x in unique(dus_det25$Transmitter.Name)){
if(length(unique(dus_det25$Date[dus_det25$Transmitter.Name == x])) >= 5){
codes <- append(codes, x)
}
}
dus_det25 <- dus_det25[dus_det25$Transmitter.Name %in% codes,]
depths <- c()
for(x in unique(dus_det25$Depth_band25)){
if(length(dus_det25$Migratory.Status[dus_det25$Depth_band25 == x]) >= 5){
depths <- append(depths, x)
}
}
dus_det25 <- dus_det25[dus_det25$Depth_band25 %in% depths,]
dus_det25$Season <- factor(dus_det25$Season,
levels = c("Summer","Autumn", "Winter", "Spring"))
# deciding distribution for model
# par(mfrow = c(2,3))
# # normal distribution
# qqp(dus_det25$No_Det, "norm")
# # log normal distribution
# qqp(dus_det25$No_Det, "lnorm")
# # negative binomial
# # generates required parameter estimates
# nbinom <- fitdistr(round(dus_det25$No_Det), "Negative Binomial")
# qqp(dus_det25$No_Det, "nbinom", size = nbinom$estimate[[1]],
#mu = nbinom$estimate[[2]])
# # poisson
# poisson <- fitdistr(round(dus_det25$No_Det), "Poisson")
# qqp(dus_det25$No_Det, "pois", lambda = poisson$estimate)
# # gamma
# gamma <- fitdistr(dus_det25$No_Det, "gamma")
# qqp(dus_det25$No_Det, "gamma", shape = gamma$estimate[[1]],
#rate = gamma$estimate[[2]])
# # best fit is log normal
# Linear mixed models
# null model
det25_glmm_null <- glmer(No_Det ~ 1 + (1|Transmitter.Name),
data = dus_det25, family = gaussian(link = "log"))
# summary(det25_glmm_null)
# r.squaredGLMM(det25_glmm_null)
det25_lm_null <- lm(log(No_Det) ~ 1, data = dus_det25)
# summary(det25_lm_null)
# single variable models
det25_glmm_sx <- glmer(No_Det ~ Sex + (1|Transmitter.Name),
data = dus_det25, family = gaussian(link = "log"))
# summary(det25_glmm_sx)
# r.squaredGLMM(det25_glmm_sx)
# anova(det25_glmm_null, det25_glmm_sx)
det25_glmm_se <- glmer(No_Det ~ Season + (1|Transmitter.Name),
data = dus_det25, family = gaussian(link = "log"))
# summary(det25_glmm_se)
# r.squaredGLMM(det25_glmm_se)
# anova(det25_glmm_null, det25_glmm_se)
det25_glmm_mig <- glmer(No_Det ~ Migratory.Status + (1|Transmitter.Name),
data = dus_det25, family = gaussian(link = "log"))
# summary(det25_glmm_mig)
# r.squaredGLMM(det25_glmm_mig)
# anova(det25_glmm_null, det25_glmm_mig)
det25_glmm_dep <- glmer(No_Det ~ as.factor(Depth_band25) + (1|Transmitter.Name),
data = dus_det25, family = gaussian(link = "log"))
# summary(det25_glmm_dep)
# r.squaredGLMM(det25_glmm_dep)
# anova(det25_glmm_null, det25_glmm_dep)
dus_det25_aic <- AIC(det25_glmm_null, det25_glmm_sx, det25_glmm_se,det25_glmm_mig,
det25_glmm_dep)
### Model prediction - variation in detections by season
# # generate test data
# n <- c(as.character(factor_mode(dus_det25$Transmitter.Name)),
# as.character(factor_mode(dus_det25$Date)),
# as.character(factor_mode(dus_det25$Sex)),
# as.character(factor_mode(dus_det25$Migratory.Status)),
# mean(dus_det25$Depth_band25), mean(dus_det25$No_Det),
# as.character(factor_mode(dus_det25$Migratory.Status)))
# dus_det25_newdata <- data.frame(cbind(rbind(n,n,n,n), as.character(unique(dus_det25$Season))))
# colnames(dus_det25_newdata) <- c("Transmitter.Name","Date","Sex","Migratory.Status","Depth_band25",
# "No_Det","Migratory.Status","Season")
# # predicted model values
# PI <- predictInterval(det25_glmm_se, newdata = dus_det25_newdata, level = 0.95,
# n.sims = 1000, stat = "median", type = "probability", include.resid.var = TRUE)
# # joined with sample dataset
# dus_det25_pred <- cbind(dus_det25_newdata, PI)
# dus_det25_pred$Season <- factor(dus_det25_pred$Season, levels = c("Summer","Autumn", "Winter", "Spring"))
# # plot of seasonal trend
# ggplot(dus_det25, aes(as.factor(Depth_band25), log(No_Det), fill = Season)) + geom_boxplot() +
# #geom_line(data = dus_det25_pred, aes(as.numeric(Season), fit)) +
# theme_bw() + ylab("No Detections") + xlab("Season")
# ggplot(dus_det25, aes(Season, log(No_Det))) + geom_boxplot() +
# geom_line(data = dus_det25_pred, aes(as.numeric(Season), log(fit))) +
# theme_bw() + ylab("No Detections") + xlab("Season")
### Model prediction - variation in number of detections over each depth
# generate test data
# n <- c(as.character(factor_mode(dus_det25$Transmitter.Name)),
# as.character(factor_mode(dus_det25$Date)),
# as.character(factor_mode(dus_det25$Sex)),
# as.character(factor_mode(dus_det25$Season)),
# as.character(factor_mode(dus_det25$Migratory.Status)),
# mean(dus_det25$No_Det),
# as.character(factor_mode(dus_det25$Migratory.Status)))
# dus_det25_newdata <- data.frame(cbind(rbind(n,n,n,n,n), unique(dus_det25$Depth_band25)))
# colnames(dus_det25_newdata) <- c("Transmitter.Name","Date","Sex","Season","Migratory.Status",
# "No_Det","Migratory.Status","Depth_band25")
# # predicted model values
# PI <- predictInterval(det25_glmm_dep, newdata = dus_det25_newdata, level = 0.95,
# n.sims = 1000, stat = "median", type = "probability", include.resid.var = TRUE)
# # joined with sample dataset
# dus_det25_pred <- cbind(dus_det25_newdata, PI)
# dus_det25_pred$Depth_band25 <- factor(dus_det25_pred$Depth_band25, levels = c("25","50", "75", "100", "125","150","175"))
# # plot of depth trend
# ggplot(dus_det25, aes(as.factor(Depth_band25), log(No_Det))) + geom_boxplot() + theme_bw()
# ggplot(dus_det25, aes(as.factor(Depth_band25), log(No_Det), fill = Season)) + geom_boxplot() + theme_bw()
# # depths with model values
# ggplot(dus_det25, aes(as.numeric(as.character(Depth_band25)), log(No_Det))) + geom_point() +
# theme_bw() + ylab("No Detections") + xlab("Depth (m)") +
# geom_line(data = dus_det25_pred, aes(y = log(fit)), colour = "blue") +
# geom_line(data = dus_det25_pred, aes(y = log(upr)), colour = "red", linetype = "dashed") +
# geom_line(data = dus_det25_pred, aes(y = log(lwr)), colour = "red", linetype = "dashed")
##### DETECTION RATE MODELS FOR 25M DEPTH BAND - TIME OF DAY #####
all_detdn <- detdn
# remove seasons to test
all_detdn <- all_detdn[all_detdn$Sex != "U",]
dus_detdn <- all_detdn[all_detdn$Species == "Dusky",]
dus_detdn <- dus_detdn[!is.na(dus_detdn$Migratory.Status),]
codes <- c()
for(x in unique(dus_detdn$Transmitter.Name)){
if(length(unique(dus_detdn$Date[dus_detdn$Transmitter.Name == x])) >= 5){
codes <- append(codes, x)
}
}
dus_detdn <- dus_detdn[dus_detdn$Transmitter.Name %in% codes,]
depths <- c()
for(x in unique(dus_detdn$Depth_band25)){
if(length(dus_detdn$Migratory.Status[dus_detdn$Depth_band25 == x]) >= 5){
depths <- append(depths, x)
}
}
dus_detdn <- dus_detdn[dus_detdn$Depth_band25 %in% depths,]
# # deciding distribution for model
# par(mfrow = c(2,3))
# # normal distribution
# qqp(dus_detdn$No_Det, "norm")
# # log normal distribution
# qqp(dus_detdn$No_Det, "lnorm")
# # negative binomial
# # generates required parameter estimates
# nbinom <- fitdistr(round(dus_detdn$No_Det), "Negative Binomial")
# qqp(dus_detdn$No_Det, "nbinom", size = nbinom$estimate[[1]],
#mu = nbinom$estimate[[2]])
# # poisson
# poisson <- fitdistr(round(dus_detdn$No_Det), "Poisson")
# qqp(dus_detdn$No_Det, "pois", lambda = poisson$estimate)
# # gamma
# gamma <- fitdistr(dus_detdn$No_Det, "gamma")
# qqp(dus_detdn$No_Det, "gamma", shape = gamma$estimate[[1]],
#rate = gamma$estimate[[2]])
# # best fit is log normal
# Linear mixed models
# null model
detdn_glmm_null <- glmer(No_Det ~ 1 + (1|Transmitter.Name), data = dus_detdn,
family = gaussian(link = "log"))
# summary(detdn_glmm_null)
detdn_lm_null <- lm(log(No_Det) ~ 1, data = dus_detdn)
# single variable models
detdn_glmm_sx <- glmer(No_Det ~ Sex + (1|Transmitter.Name), data = dus_detdn,
family = gaussian(link = "log"))
# summary(detdn_glmm_sx)
# anova(detdn_glmm_null, detdn_glmm_sx)
detdn_glmm_dn <- glmer(No_Det ~ daynight + (1|Transmitter.Name), data = dus_detdn,
family = gaussian(link = "log"))
# summary(detdn_glmm_dn)
# r.squaredGLMM(detdn_glmm_dn)
# anova(detdn_glmm_null, detdn_glmm_dn)
detdn_glmm_mig <- glmer(No_Det ~ Migratory.Status + (1|Transmitter.Name),
data = dus_detdn, family = gaussian(link = "log"))
# summary(detdn_glmm_mig)
# anova(detdn_glmm_null, detdn_glmm_mig)
detdn_glmm_dep <- glmer(No_Det ~ as.factor(Depth_band25) + (1|Transmitter.Name),
data = dus_detdn, family = gaussian(link = "log"))
# summary(detdn_glmm_dep)
# anova(detdn_glmm_null, detdn_glmm_dep)
dus_detdn_aic <- AIC(detdn_glmm_null, detdn_glmm_sx, detdn_glmm_dn, detdn_glmm_mig,
detdn_glmm_dep)
### Model prediction - variation in number of detections over each depth
# # generate test data
# n <- c(as.character(factor_mode(dus_detdn$Transmitter.Name)),
# as.character(factor_mode(dus_detdn$Date)),
# as.character(factor_mode(dus_detdn$Sex)),
# as.character(factor_mode(dus_detdn$daynight)),
# as.character(factor_mode(dus_detdn$Migratory.Status)),
# mean(dus_detdn$No_Det),
# as.character(factor_mode(dus_detdn$Migratory.Status)))
# dus_detdn_newdata <- data.frame(cbind(rbind(n,n,n,n,n,n), unique(dus_detdn$Depth_band25)))
# colnames(dus_detdn_newdata) <- c("Transmitter.Name","Date","Sex","daynight","Migratory.Status",
# "No_Det","Migratory.Status","Depth_band25")
# # predicted model values
# PI <- predictInterval(detdn_glmm_dep, newdata = dus_detdn_newdata, level = 0.95,
# n.sims = 1000, stat = "median", type = "probability", include.resid.var = TRUE)
# # joined with sample dataset
# dus_detdn_pred <- cbind(dus_detdn_newdata, PI)
# dus_detdn_pred$Depth_band25 <- factor(dus_detdn_pred$Depth_band25, levels = c("25","50", "75", "100", "125","150","175"))
# # plot of depth trend
# ggplot(dus_detdn, aes(as.factor(Depth_band25), log(No_Det))) + geom_boxplot() + theme_bw()
# ggplot(dus_detdn, aes(as.factor(Depth_band25), log(No_Det), fill = daynight)) + geom_boxplot() + theme_bw()
|
3a4a8001e46d5829a59259c352d7c84f8daa5a5a
|
851527c2a9663b2aa83462409e66f3da90583f5a
|
/R/get_num_scale.r
|
1c06dcb0cdafaf8d37fd26a0fd210179f09165e2
|
[] |
no_license
|
gastonstat/plspm
|
36511de2c95df73a010fb23847ef84f7ab680b56
|
bd21cb153021aed6ac6ea51ecbd0b856495b2a16
|
refs/heads/master
| 2022-05-05T19:36:11.853523
| 2022-03-27T00:25:31
| 2022-03-27T00:25:31
| 13,868,484
| 48
| 33
| null | 2016-12-13T07:37:39
| 2013-10-25T18:17:15
|
R
|
UTF-8
|
R
| false
| false
| 756
|
r
|
get_num_scale.r
|
#' @title Non-Metric Numerical Scale
#'
#' @details
#' Internal function. \code{get_num_scale} is called by \code{plspm}.
#'
#' @note
#' scales a matrix X in such a way that mean(X[,j])=0 and varpop(X[,j])=1
#' this means that sum(X[,j]^2) = n
#' if MD, sum(X[,j]^2, na.rm=T) = number of available elements
#'
#' @param X a matrix to be scaled
#' @return scaled matrix
#' @keywords internal
#' @template internals
#' @export
get_num_scale <- function(X) {
X = as.matrix(X)
X_scaled = matrix(0, nrow(X), ncol(X))
for (j in 1:ncol(X) ) {
correction <- (sqrt(length(na.omit(X[,j]))/(length(na.omit(X[,j]))-1)))
X_scaled[,j] <- scale(X[,j]) * correction
}
#rownames(X_scaled) = rownames(X)
#colnames(X_scaled) = colnames(X)
X_scaled
}
|
44a9393a0067e5ac2bf787e31f3ed53871f096ad
|
c7aeb17eef74157237662a8b92fefa21a116a15b
|
/cachematrix.R
|
9d1758567d769489f20943f4e202db00e6e405d3
|
[] |
no_license
|
abhinavgaikwad/ProgrammingAssignment2
|
a8db3755e26a63b454aa2d505a04230686ba4d80
|
2e8f7c0909a84218d908a7aa80e300ca7f10a7d4
|
refs/heads/master
| 2022-12-17T00:25:12.851387
| 2020-08-02T23:44:32
| 2020-08-02T23:44:32
| 284,554,043
| 0
| 0
| null | 2020-08-02T22:40:14
| 2020-08-02T22:40:13
| null |
UTF-8
|
R
| false
| false
| 1,098
|
r
|
cachematrix.R
|
## Matrix inversion is a computationally an expensive process. This program shows a process
## to cache the inverse of a matrix rather that computing it each time the inverse is required.
## In the following function a special "matrix" object is created that can cache
## the inverse of the input matrix
makeCacheMatrix <- function(x = matrix()) {
m<-NULL
set <- function(y){
x<<-y
m<<-NULL
}
get<-function()x
setinverse <-function (inverse) m<<-inverse
getinverse <-function ()m
list (set=set, get=get, setinverse=setinverse, getinverse=getinverse)
}
## In this function the inverse of the special "matrix" object returned by makeCacheMatrix
## is calculated. If the matrix is previously calculated, then the inverse is returned
## from the makeCacheMatrix. If not, the inverse is calculated with the solve () function
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
m<-x$getinverse()
if (!is.null(m)){
message("getting the cached data")
return(m)
}
data <-x$get()
m<-solve(data,...)
x$setinverse(m)
m
}
|
67695275f18a89bb8808863f64d836d8ae194d70
|
e80ffb7bfb546b42354e29dd9d0c2633e3743ca9
|
/R/fn_writesample.R
|
5e1ac26e8d11091ae71f7acaa543ae854fe07283
|
[] |
no_license
|
shearwavesplitter/MFASTR
|
5c417f2499dcbb1df8e56786106e8ebdaa7eeb5e
|
a533f527cd6a4d2472ff7305f63b7f7c85467ceb
|
refs/heads/master
| 2021-01-21T06:55:13.317019
| 2020-02-17T14:43:59
| 2020-02-17T14:43:59
| 84,286,263
| 3
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 864
|
r
|
fn_writesample.R
|
#' @title Sample data
#' @description Writes out MFAST sample data
#' @param path Path to folder
#' @param type "normal" or "verylocal" sample data
#' @export
#' @examples
#' # Write out MFAST sample events
#' write_sample("~/mfast/sample_data/raw_data")
#'
#' # Write out MFAST verylocal sample events
#' write_sample("~/mfast/sample_data/raw_data",type="verylocal")
write_sample <- function(path,type="normal"){
setwd(path)
if(type == "normal"){
for (i in 1:length(sample_normal)){
write <- sample_normal[[i]][[1]]
writename <- sample_normal[[i]][[1]]$fn
sm.write1sac(write,writename)
}
}else{
if(type == "verylocal"){
for (i in 1:length(sample_verylocal)){
write <- sample_verylocal[[i]][[1]]
writename <- sample_verylocal[[i]][[1]]$fn
sm.write1sac(write,writename)
}
}else{print("type not found. Use verylocal or normal")}
}
}
|
810af3f0bc5488e7509b5e69f494bae7ebcfa0f7
|
ca3d8712ef173397eb9b6e52d73a9e136827e868
|
/Rscripts/quant/meth-quant2.R
|
8ba67df3f5bac4e774d066bbc1b21060336a3d19
|
[] |
no_license
|
hurrialice/metnet
|
700953672643a20c1274d43cf6594accdb343631
|
6276d66dd0f042481229867f26348d88214e4c55
|
refs/heads/master
| 2021-01-23T20:31:47.180568
| 2018-01-19T04:23:57
| 2018-01-19T04:23:57
| 102,867,318
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,447
|
r
|
meth-quant2.R
|
library(rtracklayer)
library(readr)
library(dplyr)
library(tibble)
library(GenomicFeatures)
library(GenomicRanges)
rm <- read_table2('RMBase_hg19_all_m6A_site.txt')
rm0 <- rm %>% filter(!is.na(score2)) %>% filter(supportNum > 10) %>%
dplyr::select(chromosome, modStart, modEnd, modName, strand,
supportNum, pubmedIds, geneName, geneType) %>%
dplyr::rename(transcript_id = modName, gene_id = geneName,
start = modStart, end = modEnd, seqname = chromosome)
rm0$type <- 'exon'
rmdf <- DataFrame(rm0)
rmgr <- makeGRangesFromDataFrame(rmdf, keep.extra.columns = TRUE)
rm(rm, rm0)
write_rds(rmgr, 'mgr18w.rds')
# read FPKM values.
d <- read_rds('srr_withgtfs.rds')
msites <- read_rds('msites.rds')
home_path <- "/home/qingzhang/meth-qing/stringtie-meths/"
gtf2df <- function(file_to_read){
gr <- rtracklayer::import(file_to_read, 'gtf')
df <- as.tibble(mcols(gr)) %>% dplyr::filter(type == 'transcript') %>%
dplyr::select( transcript_id, FPKM) %>% dplyr::rename(modName = transcript_id)
}
# initialize with a container
mc <- matrix(nrow = nrow(d), ncol = length(msites),
dimnames = list(d$sra_acc, msites))
for (i in seq(nrow(d))){
print(i)
srr_id <- d$sra_acc[i]
file_to_read <- paste0(home_path, d$wecall[i], '.gtf')
df <- gtf2df(file_to_read)
print(df)
mc[srr_id,] <- df$FPKM[match(colnames(mc), df$modName)]
}
write_rds(mc, 'test_m_raw.rds')
|
a03c19f6b0d468e35d037fd73c551003b47658f6
|
e349aed6d2373331d16f07dd89e4a13c21f56d88
|
/R.scripts/PEER.Apr.UK.git.R
|
ac820448d8e777376e39d1fba85a2b01315fd483
|
[] |
no_license
|
dcjohnson23/dcj_publications
|
e60e1ac3d2c010e95c1d9645934fd357aee8d46e
|
3be944426fa6cee59be03dde4ab8a3b9f8b9f9cf
|
refs/heads/master
| 2020-07-31T08:37:09.233198
| 2019-10-08T10:18:28
| 2019-10-08T10:18:28
| 210,547,346
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,097
|
r
|
PEER.Apr.UK.git.R
|
library(peer)
setwd("/johnson/PEER/")
expr = read.csv("/johnson/PEER/UK.eQTL.gcrma.433.ComBat.Entrez.txt", row.names=1, sep="\t", header=TRUE)
covars = read.csv("/johnson/PEER/gwas.433.eQTL.csv", row.names=1, sep=",", header=TRUE )
texpr <- t(expr)
dim(texpr)
model = PEER()
PEER_setPhenoMean(model,as.matrix(texpr))
dim(PEER_getPhenoMean(model))
PEER_setNk(model,100)
PEER_getNk(model)
PEER_setCovariates(model, as.matrix(covars))
PEER_update(model)
texpr.factors = PEER_getX(model)
weights = PEER_getW(model)
precision = PEER_getAlpha(model)
residuals = PEER_getResiduals(model)
dim(texpr.factors)
peer.covars <- t(texpr.factors)
colnames(peer.covars) <- colnames(expr)
write.table(peer.covars, "UKpeerco.mar.433.covars.csv", quote=F, sep=",")
write.table(weights, "UKpeerco.mar.433.weights.csv", quote=F, sep=",")
write.table(precision, "UKpeerco.mar.433.precision.csv", quote=F, sep=",")
write.table(residuals, "UKpeerco.mar.433.residuals.csv", quote=F, sep=",")
png("UK.433.residuals.png",height=800,width=800)
plot(residuals)
dev.off()
pdf("UK.433.residuals.pdf")
plot(residuals)
dev.off()
|
bc30c5b4c446b49cfde59da8b3704b7a367419ec
|
3e6d6f143b0dd56472b70b4238203ea02db2e97c
|
/CarterHill_PrinciplesOfEconometrics/Chapter9_StationaryTimeSeries/exercise9.5.R
|
ddd63c19a4ee8d612b5268edce41763fa3d8e83f
|
[] |
no_license
|
statisticallyfit/REconometrics
|
80cee48277e3995df5219ee6515f673e97d1c875
|
b61455988f6040ef9ef2d0c36cee241299459461
|
refs/heads/master
| 2021-01-23T01:13:22.758614
| 2018-11-23T07:57:30
| 2018-11-23T07:57:30
| 92,862,217
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 391
|
r
|
exercise9.5.R
|
setwd("/datascience/projects/statisticallyfit/github/learningprogramming/R/RStats/learneconometrics/CarterHill_PrinciplesOfEconometrics/Chapter9_TimeSeries")
# QUESTION 9.5 (correlogram for 5.a)
growth <- read.dta("growth47.dta")
growth
growth.ts <- ts(growth, start=1947, frequency = 4)
growth.ts <- lag(growth.ts, -1)
growth.ts
autoplot(growth.ts)
autoplot(acf(growth.ts, plot = FALSE))
|
f4429605fcf05cb9570adf6e6b64badce54d2f8e
|
e26db52a0aa8c04aeba460cbb673da1911cd11e1
|
/figure_Rscripts/makeFigS2.R
|
abc793513d58f0337d19212bdf5bd06a386b7567
|
[] |
no_license
|
bdesanctis/mode-of-divergence
|
f108cd13622b7f8cd53518603c47d13847882424
|
f31efd047aa18cef71d3a912dd71111d9de9aeb9
|
refs/heads/main
| 2023-04-11T19:49:46.169050
| 2023-01-10T11:57:27
| 2023-01-10T11:57:27
| 533,055,768
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,719
|
r
|
makeFigS2.R
|
library(Hmisc)
library(RColorBrewer)
library(scales)
f <- 123 #this flag indicates that dominance coefficients are drawn from a 'realistic' distribution where large effect mutations have more extreme coefficients
mut <- 'perTrait'
overD <- TRUE
M <- 1
D <- 1000
n <- 20
scenarios <- 'drift'
paths <- c('M','m','f')
stats <- c('a','d','ad')
load("procSim.RData")
params <- as.data.frame(t(sapply(a, '[[','params')))
#initiate colour palette
palpaired <- brewer.pal(12,name='Paired')
palpaired[11] <- brewer.pal(11,name="BrBG")[4]
cols <- matrix(palpaired,ncol=2,byrow=T)
getstat <- function(x, diff=F, term, path,sub=T,maxd=D){
if(sub){
lines <- c(1:maxd)[which((c(1:maxd) %% 20) == 1 )]
}else{
lines <- c(1:maxd)
}
if(path=='f'){
if(diff){
return(x[lines,paste0('m',term)]-x[lines,paste0('M',term)])
}else{
return(x[lines,paste0('M',term)]/x[lines,paste0('m',term)])
}
}else{
return(x[lines,paste0(path,term)])
}
}
getylab <- function(path, stat,diff=F){
if(path=='m'){
if(stat=='a') return(expression(paste('(B) ',italic(m),'(',bold(A),',',bold(A),')')))
if(stat=='d') return(expression(paste('(E) ',italic(m),'(',bold(Delta),',',bold(Delta),')')))
if(stat =='ad') return(expression(paste('(H) ',italic(m),'(',bold(A),',',bold(Delta),')')))
}else if(path=='M'){
if(stat=='a') return(expression(paste('(A) ',italic(M),'(',bold(A),',',bold(A),')')))
if(stat=='d') return(expression(paste('(D) ',italic(M),'(',bold(Delta),',',bold(Delta),')')))
if(stat =='ad') return(expression(paste('(G) ',italic(M),'(',bold(A),',',bold(Delta),')')))
}else if(path=='f' & diff==T){
if(stat=='a') return(expression(paste('(C) ',italic(m),'(',bold(A),',',bold(A),') - ',italic(M),'(',bold(A),',',bold(A),')')))
if(stat=='d') return(expression(paste('(F) ',italic(m),'(',bold(Delta),',',bold(Delta),') - ',italic(M),'(',bold(Delta),',',bold(Delta),')')))
if(stat =='ad') return(expression(paste('(I) ',italic(m),'(',bold(A),',',bold(Delta),') - ',italic(M),'(',bold(A),',',bold(Delta),')')))
}else if(path=='f' & diff==F){
if(stat=='a') return(expression(paste('(C) ',italic(M),'(',bold(A),',',bold(A),') / ',italic(m),'(',bold(A),',',bold(A),')')))
if(stat=='d') return(expression(paste('(F) ',italic(M),'(',bold(Delta),',',bold(Delta),') / ',italic(m),'(',bold(Delta),',',bold(Delta),')')))
if(stat =='ad') return(expression(paste('(I) ',italic(M),'(',bold(A),',',bold(Delta),') / ',italic(m),'(',bold(A),',',bold(Delta),')')))
}
}
dplotall <- function(a, cols,sc=0,shaded=F,lm=F, sub=F,D,n, paths,stats){
if(sub){
lines <- c(1:D)[which((c(1:D) %% 20) == 1 )]
}else{
lines <- c(1:D)
}
for(i in 1:length(paths)){
for(j in 1:length(stats)){
#get y limits
dat <- lapply(a, FUN=function(x) {sapply(x$res, FUN=getstat, term=stats[j], path=paths[i], diff=diff,sub=sub,maxd=D)})
means <- sapply(dat, FUN=rowMeans)
if (stats[j]=='ad') means <- -means #needs to be flipped if P2 ancestral instead of P1.
sds <- sapply(dat, FUN=function(x) {apply(x, 1, sd)*2})
ymin <- min(means-sds); ymax <- max(means+sds)
for(w in 1:length(a)){
mycol <- cols[1+(a[[w]]$params$P1ancestral==TRUE),2]
dat <- sapply(a[[w]]$res, FUN=getstat, term=stats[j], path=paths[i], diff=diff,sub=sub,maxd=D)
means <- rowMeans(dat)
if (stats[j]=='ad') means <- -means
sds <- apply(dat, 1, sd)*2
if(w==1){
plot(0,type='n',
ylim=c(ymin,ymax),
xlim=c(1,D),
xlab='Divergence (D)',
ylab='',
main='')
mtext(side=3,adj=0, getylab(paths[i],stats[j],diff=diff),cex=1,padj=-.5)
if(stats[j]=='ad' | paths[i]=='f' & stats[j]=='d') abline(h=0, lty=3, lwd=2)
if(i==1 & j==1) {
legend('topleft',
fill=cols[1:2,2],
c(expression('Similar N'['e']),expression('P1 much lower N'['e'])),
bty='n',cex=1.2,border=F)
}
}
if(shaded) polygon(c(lines,rev(lines)),c(means+sds,rev(means-sds)),col=alpha(mycol,.1),border=F)
lines(lines,means, col=mycol,
lty=1,
lwd=2)
}
}
}
}
quartz(width=10,height=8)
layout(matrix(1:9,ncol=3))
w <- which(params$n==n &
params$sc %in% scenarios &
params$mutmodel==mut &
params$M==M &
params$overD == overD &
params$f == f)
dplotall(a=a[rev(w)], cols=cols[c(1,3),],shaded=T,sub=T,D=D,paths=paths,stats=stats)
dev.copy2pdf(file='FigS2.pdf',
width=10, height=8)
|
ee712d48b83f83a96e3c91c5ef3f8b531718f796
|
1a1e0b5c8362d2694f3eeb6337735a35edb76de7
|
/Rdatascience/15 - factors.R
|
6ca3da5f6362bf0096fd92dcb05fc7520229a521
|
[] |
no_license
|
daifengqi/TidyverseStyle
|
804adbfdb34f74c6d28aa59c38c25f50681c6236
|
ed1e2d949d40fb15644bef1ce65552cf53d3f6d9
|
refs/heads/master
| 2022-03-04T17:45:32.121907
| 2019-08-13T12:23:18
| 2019-08-13T12:23:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,196
|
r
|
15 - factors.R
|
library(forcats)
library(readr)
library(ggplot2)
# a package for dealing with factors
# Creat factors
month_levels <- c(
"Jan", "Feb", "Mar", "Apr", "May", "Jun",
"Jul", "Aug", "Sep", "Oct", "Nov", "Dec"
)
x1 <- c("Dec", "Apr", "Jan", "Mar")
y1 <- factor(x1, levels = month_levels)
sort(y1)
x2 <- c("Dec", "Apr", "Jam", "Mar")
y2 <- parse_factor(x2, levels = month_levels)
# General social survey
# example
gss_cat %>%
count(race)
# modifying factor level
relig_summary <- gss_cat %>%
group_by(relig) %>%
summarise(
age = mean(age, na.rm = TRUE),
tvhours = mean(tvhours, na.rm = TRUE),
n = n()
)
# these two results are different
ggplot(relig_summary, aes(tvhours, relig)) + geom_point()
ggplot(relig_summary, aes(tvhours, fct_reorder(relig, tvhours))) +
geom_point()
# A more recommended way
relig_summary %>%
mutate(relig = fct_reorder(relig, tvhours)) %>%
ggplot(aes(tvhours, relig)) +
geom_point()
# example
by_age <- gss_cat %>%
filter(!is.na(age)) %>%
count(age, marital) %>%
group_by(age) %>%
mutate(prop = n / sum(n))
ggplot(by_age, aes(age, prop, colour = fct_reorder2(marital, age, prop))) +
geom_line() +
labs(colour = "marital")
|
36362300a0cf52a5d3fe03067fca576f31559998
|
6e07af032188f52b9241bf568d98b159543ec8eb
|
/ProjectMindmap/DocumentationDatasetT-Total.R
|
a233886055df1be18cc0f230eae071d40446cb52
|
[] |
no_license
|
aumath-advancedr2019/PhaseTypeGenetics
|
1919f79f93421786ccc10c69056972f3f9ee9534
|
aa66f445b2515232ff4718a7bedef58242f08658
|
refs/heads/master
| 2020-08-01T15:21:43.487579
| 2019-11-28T09:13:14
| 2019-11-28T09:13:14
| 211,032,468
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 332
|
r
|
DocumentationDatasetT-Total.R
|
#' The total branch length for a sample size of n = 5,10,20,50 and 100.
#'
#' A dataset containing the initial distributions and subintensity rate matrices
#' for the total branch length (T_Total)
#' for a sample size of n in {5,10,20,50,100}.
#'
#' @format A list containing 5 objects of type \code{contphasetype}.
#'
"T_Total"
|
60a2cf167c00e0a3d561386364adfae41a5435cf
|
4570d4339e498fa8caaaad6db7296704562d0532
|
/webinars/Predictive_Modeling.R
|
ad3e688229cfa86542211bff3308114a49196457
|
[
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer"
] |
permissive
|
sassoftware/sas-viya-programming
|
81e024035a2fec55a17006672fd15069dcdfc8a5
|
947f16955fc7e94b73b5aa5a59010e90abd11130
|
refs/heads/master
| 2023-05-24T22:21:07.696235
| 2023-05-12T19:26:58
| 2023-05-12T19:26:58
| 62,091,838
| 146
| 154
|
Apache-2.0
| 2023-03-25T01:24:58
| 2016-06-27T22:11:06
|
Jupyter Notebook
|
UTF-8
|
R
| false
| false
| 8,046
|
r
|
Predictive_Modeling.R
|
#
# Copyright SAS Institute
#
# Licensed under the Apache License, Version 2.0 (the License);
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Loading the required SWAT package and other R libraries necessary
library(swat)
library(ggplot2)
library(reshape2)
library(xgboost)
library(caret)
library(dplyr)
library(pROC)
library(e1071)
library(ROCR)
library(pmml)
library(randomForest)
library(caret)
# Connect to CAS server using appropriate credentials
s = CAS()
# Create a CAS library called lg pointing to the defined directory
# Need to specify the srctype as path, otherwise it defaults to HDFS
cas.table.addCaslib(s,
name = "lg",
description = "Looking glass data",
dataSource = list(srcType="path"),
path = "/viyafiles/tmp"
)
# Load the data into the in-memory CAS server
data = cas.read.csv(s,
"C:/Users/Looking_glass.csv",
casOut=list(name="castbl", caslib="lg", replace=TRUE)
)
# Invoke the overloaded R functions to view the head and summary of the input table
print(head(data))
print(summary(data))
# Check for any missingness in the data
dist_tabl = cas.simple.distinct(data)$Distinct[,c('Column','NMiss')]
print(dist_tabl)
dist_tabl = as.data.frame(dist_tabl)
sub = subset(dist_tabl, dist_tabl$NMiss != 0)
imp_cols = sub$Column
# Print the names of the columns to be imputed
print(imp_cols)
# Impute the missing values
cas.dataPreprocess.impute(data,
methodContinuous = 'MEDIAN',
methodNominal = 'MODE',
inputs = imp_cols,
copyAllVars = TRUE,
casOut = list(name = 'castbl', replace = TRUE)
)
# Split the data into training and validation and view the partitioned table
loadActionSet(s,"sampling")
cas.sampling.srs( s,
table = list(name="castbl", caslib="lg"),
samppct = 30,
seed = 123456,
partind = TRUE,
output = list(casOut = list(name = "sampled_castbl", replace = T, caslib="lg"), copyVars = 'ALL')
)
# Check for frequency distribution of partitioned data
cas.simple.freq(s,table="sampled_castbl", inputs="_PartInd_")
# Partition data into train and validation based on _PartInd_
train = defCasTable(s, tablename = "sampled_castbl", where = " _PartInd_ = 0 ")
val = defCasTable(s, tablename = "sampled_castbl", where = " _PartInd_ = 1 ")
# Create the appropriate input and target variables
info = cas.table.columnInfo(s, table = train)
colinfo = info$ColumnInfo
## nominal variables are: region, upsell_xsell
nominals = colinfo$Column[c(7,8)]
intervals = colinfo$Column[c(-7,-8,-9,-15,-18)]
target = colinfo$Column[8]
inputs = colinfo$Column[c(-8,-9,-15,-18)]
# Build a GB model for predictive classification
loadActionSet(s, "decisionTree")
model = cas.decisionTree.gbtreeTrain(
s,
casOut=list(caslib="lg",name="gb_model",replace=T),
saveState = list(caslib="lg", name="R_SWAT_GB", replace=T),
inputs = inputs,
nominals = nominals,
target = target,
table = train
)
# View the model info
print(model)
cas.table.promote(s, caslib="lg", name="R_SWAT_GB", targetCaslib="casuser")
# Score the model on test data
out = cas.decisionTree.gbtreeScore (
s,
modelTable = list(name="gb_model", caslib="lg"),
table = val,
encodeName = TRUE,
assessonerow = TRUE,
casOut = list(name="scored_data", caslib="lg", replace=T),
copyVars = target
)
# View the scored results
cas.table.fetch(s,table="scored_data")
# Train an R eXtreme Gradient Boosting model
# First, convert the train and test CAS tables to R data frames for training the R-XGB model
train_cas_df = to.casDataFrame(train)
train_df = to.data.frame(train_cas_df)
val_cas_df = to.casDataFrame(val)
val_df = to.data.frame(val_cas_df)
# In R, we need to do the data pre-processing explicitly. Hence, convert the "char" region variable to "factor"
train_df$upsell_xsell = as.factor(train_df$upsell_xsell)
val_df$upsell_xsell = as.factor(val_df$upsell_xsell)
train_df$days_openwrkorders = train_df$IMP_days_openwrkorders
train_df$ever_days_over_plan = train_df$IMP_ever_days_over_plan
val_df$days_openwrkorders = val_df$IMP_days_openwrkorders
val_df$ever_days_over_plan = val_df$IMP_ever_days_over_plan
train_df$IMP_days_openwrkorders = NULL
train_df$IMP_ever_days_over_plan = NULL
val_df$IMP_days_openwrkorders = NULL
val_df$IMP_ever_days_over_plan = NULL
# Train a RF model on the data
rf_model <- randomForest(upsell_xsell ~ . , ntree=2, mtry=5, data=train_df[,c(3,8,9,10,11,12,14)], importance=TRUE)
# Make predictions on test data
pred <- predict(rf_model, val_df[,c(3,8,9,10,11,12,14)], type="prob")
# Evaluate the performance of SAS and R models
## Assessing the performance metric of SAS-GB model
loadActionSet(s,"percentile")
tmp = cas.percentile.assess(
s,
cutStep = 0.05,
event = "1",
inputs = "P_upsell_xsell1",
nBins = 20,
response = target,
table = "scored_data"
)$ROCInfo
roc_df = data.frame(tmp)
print(head(roc_df))
# Display the confusion matrix for cutoff threshold at 0.5
cutoff = subset(roc_df, CutOff == 0.5)
tn = cutoff$TN
fn = cutoff$FN
tp = cutoff$TP
fp = cutoff$FP
a = c(tn,fn)
p = c(fp,tp)
mat = data.frame(a,p)
colnames(mat) = c("Pred:0","Pred:1")
rownames(mat) = c("Actual:0","Actual:1")
mat = as.matrix(mat)
print(mat)
# Print the accuracy and misclassification rates for the model
accuracy = cutoff$ACC
mis = cutoff$MISCEVENT
print(paste("Misclassification rate is",mis))
print(paste("Accuracy is",accuracy))
## Assessing the performance metric of R-RF model
# Create a confusion matrix for cutoff threshold at 0.5
conf.matrix = table(val_df$upsell_xsell, as.numeric(pred[,2]>0.5))
rownames(conf.matrix) = paste("Actual", rownames(conf.matrix), sep = ":")
colnames(conf.matrix) = paste("Pred", colnames(conf.matrix), sep = ":")
# Print the accuracy and misclassification rates for the model
err = mean(as.numeric(pred[,2] > 0.5) != val_df$upsell_xsell)
print(paste("Misclassification rate is",err))
print(paste("Accuracy is",1-err))
# Plot ROC curves for both the models using standard R plotting functions
FPR_SAS = roc_df['FPR']
TPR_SAS = roc_df['Sensitivity']
pred1 = prediction(pred[,2], test_labels)
perf1 = performance( pred1, "tpr", "fpr" )
FPR_R = perf1@x.values[[1]]
TPR_R = perf1@y.values[[1]]
roc_df2 = data.frame(FPR = FPR_R, TPR = TPR_R)
ggplot() +
geom_line(
data = roc_df[c('FPR', 'Sensitivity')],
aes(x = as.numeric(FPR), y = as.numeric(Sensitivity),color = "SAS"),
) +
geom_line(
data = roc_df2,
aes(x = as.numeric(FPR_R), y = as.numeric(TPR_R),color = "R_RF"),
) +
scale_color_manual(
name = "Colors",
values = c("SAS" = "blue", "R_RF" = "red")
) +
xlab('False Positive Rate') + ylab('True Positive Rate')
# Generating PMML code to export R model to Model Manager
rf.pmml = pmml(rf_model)
format(object.size(rf.pmml))
savePMML(rf.pmml, "C:/Users/neveng/rf.xml", version=4.2 )
# Terminate the CAS session
cas.session.endSession(s)
|
898cd516bc9aa2fe5d9c7d891a660f507af6ac68
|
9397a453a4b9c4ddd9988235fe4a8ee6720358c1
|
/Process_WholeNL/Metrics_calc_lidRv203.R
|
9c16a8c59dcd106f40054ee5e9f1b77afef7e2de
|
[] |
no_license
|
komazsofi/myPhD_escience_analysis
|
33e61a145a1e1c13c646ecb092081182113dbce3
|
5f0ecdd05e7eaeb7fce30f0c28e0728642164dbc
|
refs/heads/master
| 2021-06-04T21:39:58.115874
| 2020-06-18T12:59:53
| 2020-06-18T12:59:53
| 119,659,750
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,529
|
r
|
Metrics_calc_lidRv203.R
|
library(lidR)
library(e1071)
# reinstall previous lidR version: require(devtools), install_version("lidR", version = "2.0.3", repos = "http://cran.us.r-project.org")
#Global settings
workdir="D:/Sync/_Amsterdam/10_ProcessWholeNL/Test/normalized_neibased/"
#workdir="D:/Koma/ProcessWholeNL/TileGroup_10/norm/"
setwd(workdir)
chunksize=2500
resolution=10
groupid=10
cores=2
rasterOptions(maxmemory = 200000000000)
# Set cataloge
ctg <- catalog(workdir)
opt_chunk_size(ctg) <- chunksize
opt_cores(ctg) <- cores
opt_output_files(ctg) <- ""
# Calc metrics related to both ground and vegetation points
opt_filter(ctg) <- "-keep_class 1 2"
myMetrics = function(Z,I,R,Classification)
{
metrics = list(
isd=sd(I),
echomean=mean(R),
lb1dense=(length(Z[Classification==1 & Z<1])/length(Z))*100,
l12dense=(length(Z[Classification==1 & Z>1 & Z<2])/length(Z))*100,
l23dense=(length(Z[Classification==1 & Z>1 & Z<2])/length(Z))*100,
l34dense=(length(Z[Classification==1 & Z>3 & Z<4])/length(Z))*100,
l45dense=(length(Z[Classification==1 & Z>4 & Z<5])/length(Z))*100,
l510dense=(length(Z[Classification==1 & Z>5 & Z<10])/length(Z))*100,
l1015dense=(length(Z[Classification==1 & Z>10 & Z<15])/length(Z))*100,
l1520dense=(length(Z[Classification==1 & Z>15 & Z<20])/length(Z))*100,
la20dense=(length(Z[Classification==1 & Z>20])/length(Z))*100,
pulsepen=(length(Z[Classification==2])/length(Z))*100
)
return(metrics)
}
Metrics = grid_metrics(ctg, myMetrics(Z,Intensity,ReturnNumber,Classification), res=10)
proj4string(Metrics ) <- CRS("+proj=sterea +lat_0=52.15616055555555 +lon_0=5.38763888888889 +k=0.9999079 +x_0=155000 +y_0=463000 +ellps=bessel +units=m +no_defs")
writeRaster(Metrics ,paste("Metrics_",groupid,".tif",sep=""),overwrite=TRUE)
# Calc vegetation related metrics
opt_filter(ctg) <- "-keep_class 1"
myVegMetrics = function(Z)
{
library(e1071)
metrics = list(
h95p=quantile(Z, 0.95),
hsd=sd(Z),
hsd_b3=sd(Z[Z<3]),
hskew=skewness(Z),
hskew_b3=skewness(Z[Z<3]),
h25p=quantile(Z, 0.25),
h50p=quantile(Z, 0.50),
h75p=quantile(Z, 0.75),
nofretamean=(length(Z[Z>mean(Z)])/length(Z))*100
)
return(metrics)
}
VegMetrics = grid_metrics(ctg, myVegMetrics(Z), res=10)
proj4string(VegMetrics) <- CRS("+proj=sterea +lat_0=52.15616055555555 +lon_0=5.38763888888889 +k=0.9999079 +x_0=155000 +y_0=463000 +ellps=bessel +units=m +no_defs")
writeRaster(VegMetrics,paste("VegMetrics_",groupid,".tif",sep=""),overwrite=TRUE)
|
5434de99fce83ebe664d0f59570e402049832334
|
c4c3992f17e63560bb98d7df0438072aab9f1c0a
|
/hw3/logistic_regression.R
|
a99f99cc31fd3a73359d3a59e80a6b608f5367e2
|
[] |
no_license
|
geluxp/CS584-Machine-Learning
|
7ecf615db557de3ef39c5d6739d8e21cf1c858e3
|
f3b4615d70fbd3b7a2906e36f7701815e8f218e3
|
refs/heads/master
| 2016-09-09T21:55:57.754815
| 2015-03-24T22:15:44
| 2015-03-24T22:15:44
| 32,828,424
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,807
|
r
|
logistic_regression.R
|
######################
####logistical regression 2 class
######################
##clear the memory
rm(list = ls())
require(cvTools) ##load cross validation package
#Load data
data <- read.csv("D:/KUN_MEI_ASS3/DATA/data.csv")
#Create plot
plot(data$score.1,data$score.2,col=as.factor(data$label),xlab="Score-1",ylab="Score-2")
#Predictor variables
X <- as.matrix(data[,c(1,2)])
#Add ones to X
X <- cbind(rep(1,nrow(X)),X)
kk=1
Xnew=matrix(0,nrow=nrow(X),ncol=6)
for (ii in 1:3){
for (jj in ii:3)
{Xnew[,kk]=X[,ii]*X[,jj]
kk=kk+1}
}
#Response variable
Y <- as.matrix(data$label)
data1<-data.frame(Xnew[,-1],Y)
#Sigmoid function
sigmoid <- function(z)
{
g <- 1/(1+exp(-z))
return(g)
}
#Cost Function
cost <- function(theta)
{
m <- nrow(X)
g <- sigmoid(X%*%theta)
J <- (1/m)*sum((-Y*log(g)) - ((1-Y)*log(1-g)))
return(J)
}
#Intial theta
initial_theta <- rep(0,ncol(X))
#Cost at inital theta
cost(initial_theta)
# Derive theta using gradient descent using optim function
theta_optim <- optim(par=initial_theta,fn=cost)
#set theta
theta <- theta_optim$par
#cost at optimal value of the theta
theta_optim$value
# probability of admission for student
prob <- sigmoid(t(c(1,45,85))%*%theta)
################################
##initialization confusion matrice
################################
confusionMatrixDefault <- matrix(0,2,2) # creating a confusion matrix computing by R package
confusionMatrix <- matrix(0,2,2) # creating a confusion matrix computing by implemented function
#####################
## main loop for no combinationa
####################
k <- 10 #10 folds
folds <- cvFolds(nrow(data), K = k, type = "interleaved") #using CVTools
for(i in 1:k){
testdata <- subset(data, folds$which == i)
traindata <- subset(data, folds$which != i)
#Predictor variables
X <- as.matrix(traindata[,c(1,2)])
#Add ones to X
X <- cbind(rep(1,nrow(X)),X)
#Response variable
Y <- as.matrix(traindata$label)
#Intial theta
initial_theta <- rep(0,ncol(X))
#Cost at inital theta
# cost(initial_theta)
# Derive theta using gradient descent using optim function
theta_optim <- optim(par=initial_theta,fn=cost)
#set theta
theta <- theta_optim$par
#cost at optimal value of the theta
theta_optim$value
Xtest<-as.matrix(testdata[,c(1,2)])
Xtest<-cbind(rep(1,nrow(Xtest)),Xtest)
prob <- sigmoid(Xtest%*%theta)
yhatEstimate<-vector()
for (j in 1:length(prob)){
if(prob[j]<0.5)
yhatEstimate<-c(yhatEstimate,0)
else
yhatEstimate<-c(yhatEstimate,1)
}
## computing the yhat using the premade R package LDA
# model <- lda(V1 ~ ., data = traindata, prior = c(m1,m2)/m)
# pred <- predict(model, testdata)
# yhat <- apply(pred$posterior,1,which.max)
#############################################################
## using a cumulation method to store the confusionMatrix.
##############################################################
# confusionMatrixDefault <- confusionMatrixDefault + table(yhat,testdata[,1])
confusionMatrix <- confusionMatrix + table(yhatEstimate,testdata[,3])
}
#####################
## main loop for non-linear combinationS
####################
k <- 10 #10 folds
folds <- cvFolds(nrow(data1), K = k, type = "interleaved") #using CVTools
k=1
for(i in 1:k){
testdata <- subset(data1, folds$which == i)
traindata <- subset(data1, folds$which != i)
#Predictor variables
X <- as.matrix(traindata[,c(1,5)])
#Add ones to X
X <- cbind(rep(1,nrow(X)),X)
#Response variable
Y <- as.matrix(traindata$label)
#Intial theta
initial_theta <- rep(0,ncol(X))
#Cost at inital theta
# cost(initial_theta)
# Derive theta using gradient descent using optim function
theta_optim <- optim(par=initial_theta,fn=cost)
#set theta
theta <- theta_optim$par
#cost at optimal value of the theta
theta_optim$value
Xtest<-as.matrix(testdata[,c(1,5)])
Xtest<-cbind(rep(1,nrow(Xtest)),Xtest)
prob <- sigmoid(Xtest%*%theta)
yhatEstimate<-vector()
for (j in 1:length(prob)){
if(prob[j]<0.5)
yhatEstimate<-c(yhatEstimate,0)
else
yhatEstimate<-c(yhatEstimate,1)
}
## computing the yhat using the premade R package LDA
# model <- lda(V1 ~ ., data = traindata, prior = c(m1,m2)/m)
# pred <- predict(model, testdata)
# yhat <- apply(pred$posterior,1,which.max)
#############################################################
## using a cumulation method to store the confusionMatrix.
##############################################################
# confusionMatrixDefault <- confusionMatrixDefault + table(yhat,testdata[,1])
confusionMatrix <- confusionMatrix + table(yhatEstimate,testdata[,6])
}
confusionMatrix
|
149e4c3ae343efef0ecd26a528cf04f57b3ec3b9
|
02e0dd12cd6473d11312f5cd27d655b3e1174cb1
|
/time-varying/MGWG_time-varying_20190927.R
|
d855297a3a524ed0e5410885fd3ca9544cdc27dd
|
[] |
no_license
|
ices-eg/wg_MGWG
|
754b9c93e0d4f31aa2183cbe7279a391e0d36be4
|
b22e911b7449ffb9c2137d785a44537012fd2201
|
refs/heads/master
| 2023-05-27T12:03:25.345328
| 2023-05-25T19:06:37
| 2023-05-25T19:06:37
| 104,081,883
| 6
| 6
| null | 2019-10-07T20:14:42
| 2017-09-19T13:56:56
|
R
|
UTF-8
|
R
| false
| false
| 1,314
|
r
|
MGWG_time-varying_20190927.R
|
#### Libraries
library("ggplot2")
#### Read in files
files <- dir(pattern = "tab1\\.csv", recursive = TRUE, full.names = TRUE)
results <- sapply(files, read.csv, header = TRUE, simplify = FALSE)
results <- lapply(results, function(x) {
colnames(x)[1] <- "Year"
colnames(x) <- gsub("^Fbar[0-9a-z\\.]+", "Fbar", colnames(x))
aa <- names(results)[parent.frame()$i[]]
x[, "name"] <- aa
x[, "spp"] <- basename(dirname(dirname(aa)))
x[, "model"] <- basename(dirname(aa))
x[, "tv"] <- !grepl("constant", aa)
if(sum(grepl("^F.", colnames(x))) ==0) browser()
x[, "Fbar"] <- x[, grep("^F.", colnames(x))]
return(x)
})
fbar <- do.call("rbind",
lapply(results, "[", c("Year", "Fbar", "name", "spp", "model", "tv")))
#### Make plots
# ggplot(fbar[grepl("SAM", fbar[,"model"]), ], aes(x = Year, y = Fbar)) +
# geom_point(aes(col = spp, pch = tv)) +
# scale_shape_manual(values = c(21, 19))
g <- ggplot(fbar[grepl("SAM", fbar[,"model"]) & fbar[, "spp"] != "GOMhaddock", ], aes(x = Year, y = Fbar)) +
geom_point(aes(col = tv, pch = tv), cex = 2) +
scale_shape_manual(values = c(21, 19)) +
scale_colour_manual(values = c("red", "green")) +
ylab("Average fishing intensity for ages of interest") +
facet_wrap(spp ~ .) + theme_bw()
ggsave(file.path("time-varying", "MGWG_time-varying_20190927.jpeg"))
|
eab1a3bac7eb6bd86427381df6ef378fb36fbd02
|
b1e1a193db8d4647a2ae1566724beebcfbc2c167
|
/index/data/observational/scripts/6_pheno_file.R
|
2d1df2bfe8b92ba19f96ae0c8fb2d5db901c82f7
|
[] |
no_license
|
mattlee821/000_thesis
|
166cef4616ad70ea47a6d558c77c8c4ec0a021b3
|
867c9f08daea61ecca7aa73e660d5001d1315a1b
|
refs/heads/master
| 2022-05-08T08:26:24.394209
| 2022-04-07T09:30:51
| 2022-04-07T09:30:51
| 229,047,207
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,362
|
r
|
6_pheno_file.R
|
# packages ====
library(data.table)
library(tidyr)
library(dplyr)
# children ====
data <- read.table("index/data/observational/data/body_composition/children_body_composition.txt", header = T, sep = "\t")
confounders <- read.table("index/data/observational/data/confounders/children_confounders.txt", header = T, sep = "\t")
qc <- read.table("index/data/observational/data/metabolomics/data_prep/final/qc/children/MetaboQC_release/qc_data/ALSPAC_children_2020_06_18_QCd_metabolite_data.txt", header = T, sep = "\t")
raw <- read.table("index/data/observational/data/metabolomics/data_prep/final/children_metabolites.txt", header = T, sep = "\t")
## make rownames column 1 for qc
qc <- setDT(qc, keep.rownames = TRUE)[]
colnames(qc)[1] <- "aln_qlet"
## make phenofile
data <- left_join(data, confounders, by = "aln_qlet")
data_qc <- left_join(data, qc, by = "aln_qlet")
data_raw <- left_join(data, raw, by = "aln_qlet")
## keep only individuals with metabolite data
data_qc <- drop_na(data_qc, xxlvldlp)
data_raw <- drop_na(data_raw, XXL.VLDL.P)
## how many complete cases
complete_cases_qc <- data_qc[complete.cases(data_qc), ]
complete_cases_raw <- data_raw[complete.cases(data_raw), ]
## save phenofile
write.table(data_qc, "index/data/observational/data/analysis/children_qc_phenofile.txt",
row.names = FALSE, col.names = TRUE, quote = FALSE, sep = "\t")
write.table(data_raw, "index/data/observational/data/analysis/children_raw_phenofile.txt",
row.names = FALSE, col.names = TRUE, quote = FALSE, sep = "\t")
# adolescents ====
data <- read.table("index/data/observational/data/body_composition/adolescents_body_composition.txt", header = T, sep = "\t")
confounders <- read.table("index/data/observational/data/confounders/adolescents_confounders.txt", header = T, sep = "\t")
qc <- read.table("index/data/observational/data/metabolomics/data_prep/final/qc/adolescents/MetaboQC_release/qc_data/ALSPAC_adolescents_2020_06_18_QCd_metabolite_data.txt", header = T, sep = "\t")
raw <- read.table("index/data/observational/data/metabolomics/data_prep/final/adolescents_metabolites.txt", header = T, sep = "\t")
## make rownames column 1 for qc
qc <- setDT(qc, keep.rownames = TRUE)[]
colnames(qc)[1] <- "aln_qlet"
## make phenofile
data <- left_join(data, confounders, by = "aln_qlet")
data_qc <- left_join(data, qc, by = "aln_qlet")
data_raw <- left_join(data, raw, by = "aln_qlet")
## keep only individuals with metabolite data
data_qc <- drop_na(data_qc, xxlvldlp)
data_raw <- drop_na(data_raw, XXL.VLDL.P)
## how many complete cases
complete_cases_qc <- data_qc[complete.cases(data_qc), ]
complete_cases_raw <- data_raw[complete.cases(data_raw), ]
## save phenofile
write.table(data_qc, "index/data/observational/data/analysis/adolescents_qc_phenofile.txt",
row.names = FALSE, col.names = TRUE, quote = FALSE, sep = "\t")
write.table(data_raw, "index/data/observational/data/analysis/adolescents_raw_phenofile.txt",
row.names = FALSE, col.names = TRUE, quote = FALSE, sep = "\t")
# young_adults ====
data <- read.table("index/data/observational/data/body_composition/young_adults_body_composition.txt", header = T, sep = "\t")
confounders <- read.table("index/data/observational/data/confounders/young_adults_confounders.txt", header = T, sep = "\t")
qc <- read.table("index/data/observational/data/metabolomics/data_prep/final/qc/young_adults/MetaboQC_release/qc_data/ALSPAC_young_adults_2020_06_18_QCd_metabolite_data.txt", header = T, sep = "\t")
raw <- read.table("index/data/observational/data/metabolomics/data_prep/final/young_adults_metabolites.txt", header = T, sep = "\t")
## make rownames column 1 for qc
qc <- setDT(qc, keep.rownames = TRUE)[]
colnames(qc)[1] <- "aln_qlet"
## make phenofile
data <- left_join(data, confounders, by = "aln_qlet")
data_qc <- left_join(data, qc, by = "aln_qlet")
data_raw <- left_join(data, raw, by = "aln_qlet")
## keep only individuals with metabolite data
data_qc <- drop_na(data_qc, xxlvldlp)
data_raw <- drop_na(data_raw, XXL.VLDL.P)
## how many complete cases
complete_cases_qc <- data_qc[complete.cases(data_qc), ]
complete_cases_raw <- data_raw[complete.cases(data_raw), ]
## save phenofile
write.table(data_qc, "index/data/observational/data/analysis/young_adults_qc_phenofile.txt",
row.names = FALSE, col.names = TRUE, quote = FALSE, sep = "\t")
write.table(data_raw, "index/data/observational/data/analysis/young_adults_raw_phenofile.txt",
row.names = FALSE, col.names = TRUE, quote = FALSE, sep = "\t")
# adults ====
data <- read.table("index/data/observational/data/body_composition/adult_body_composition.txt", header = T, sep = "\t")
confounders <- read.table("index/data/observational/data/confounders/adults_confounders.txt", header = T, sep = "\t")
qc <- read.table("index/data/observational/data/metabolomics/data_prep/final/qc/adults/MetaboQC_release/qc_data/ALSPAC_adults_2020_06_18_QCd_metabolite_data.txt", header = T, sep = "\t")
raw <- read.table("index/data/observational/data/metabolomics/data_prep/final/adult_metabolites.txt", header = T, sep = "\t")
## make rownames column 1 for qc
qc <- setDT(qc, keep.rownames = TRUE)[]
colnames(qc)[1] <- "aln_qlet"
## make phenofile
data <- left_join(data, confounders, by = "aln_qlet")
data_qc <- left_join(data, qc, by = "aln_qlet")
data_raw <- left_join(data, raw, by = "aln_qlet")
## keep only individuals with metabolite data
data_qc <- drop_na(data_qc, xxlvldlp)
data_raw <- drop_na(data_raw, XXL.VLDL.P)
## how many complete cases
complete_cases_qc <- data_qc[complete.cases(data_qc), ]
complete_cases_raw <- data_raw[complete.cases(data_raw), ]
## save phenofile
write.table(data_qc, "index/data/observational/data/analysis/adult_qc_phenofile.txt",
row.names = FALSE, col.names = TRUE, quote = FALSE, sep = "\t")
write.table(data_raw, "index/data/observational/data/analysis/adult_raw_phenofile.txt",
row.names = FALSE, col.names = TRUE, quote = FALSE, sep = "\t")
rm(list=ls())
# qc numbers ====
children <- read.table("index/data/observational/data/metabolomics/data_prep/final/qc/children/MetaboQC_release/qc_data/ALSPAC_children_2020_06_18_QCd_metabolite_data.txt", header = T, sep = "\t")
adolescents <- read.table("index/data/observational/data/metabolomics/data_prep/final/qc/adolescents/MetaboQC_release/qc_data/ALSPAC_adolescents_2020_06_18_QCd_metabolite_data.txt", header = T, sep = "\t")
young_adults <- read.table("index/data/observational/data/metabolomics/data_prep/final/qc/young_adults/MetaboQC_release/qc_data/ALSPAC_young_adults_2020_06_18_QCd_metabolite_data.txt", header = T, sep = "\t")
adults <- read.table("index/data/observational/data/metabolomics/data_prep/final/qc/adults/MetaboQC_release/qc_data/ALSPAC_adults_2020_06_18_QCd_metabolite_data.txt", header = T, sep = "\t")
ALSPAC_QC_N <- data.frame(Group = c("Children", "Adolescents", "Young_adults", "Adults"),
N = c(nrow(children), nrow(adolescents), nrow(young_adults), nrow(adults)),
Metabolites = c(ncol(children), ncol(adolescents), ncol(young_adults), ncol(adults)))
write.table(ALSPAC_QC_N, "index/data/observational/data/metabolomics/data_prep/final/qc/ALSPAC_QC_N.txt",
row.names = FALSE, col.names = TRUE, quote = FALSE, sep = "\t")
rm(list=ls())
|
4805a83940faae32d6077ab4c1a85f9f69c6faa1
|
c6367467b1ff97c12439e7f3c2a309900223a798
|
/data/data_carpentry/merge_mcd14ml_with_gldas2.1.R
|
72a71b03e9ad94f1c21486e4eab5577fd51eef8a
|
[] |
no_license
|
mikoontz/nighttime-fire-effects
|
8302bb1133a9a549de43a1ef451160839930380f
|
3a1908c7749e8c010f5967674b80185fbcb921f4
|
refs/heads/master
| 2021-07-22T06:51:21.428062
| 2020-06-22T18:52:12
| 2020-06-22T18:52:12
| 188,514,883
| 1
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,708
|
r
|
merge_mcd14ml_with_gldas2.1.R
|
# Purpose: climate per active fire detection
# Conditional on there being an active fire detection, what is the climate?
library(tidyverse)
library(sf)
library(data.table)
library(tdigest)
library(lubridate)
library(tmap)
library(mgcv)
library(gganimate)
library(viridis)
get_mcd14mlGLDAS <- function(year, download = FALSE) {
if(file.exists(paste0("data/data_output/mcd14ml_gldas21/mcd14ml_with_gldas_climate_variables_", year, ".csv"))) {
(afd_thisYear <- data.table::fread(paste0("data/data_output/mcd14ml_gldas21/mcd14ml_with_gldas_climate_variables_", year, ".csv")))
} else {
(afd_thisYear <- try(data.table::fread(paste0("https://earthlab-mkoontz.s3-us-west-2.amazonaws.com/mcd14ml-with-gldas2.1-climate-variables/mcd14ml_with_gldas_climate_variables_", year, ".csv"))))
if ("try-error" %in% class(afd_thisYear)) {
afd_thisYear <- NULL
} else {
if (download) {
if (!dir.exists("data/data_output/mcd14ml_gldas21")) {
dir.create("data/data_output/mcd14ml_gldas21")
}
data.table::fwrite(x = afd_thisYear,
file = paste0("data/data_output/mcd14ml_gldas21/mcd14ml_with_gldas_climate_variables_", year, ".csv"))
}
}
}
print(year)
return(afd_thisYear)
}
afd <- lapply(2000:2019, get_mcd14mlGLDAS)
afd_filtered <-
afd[which(purrr::map_lgl(afd, .f = function(x) {return(!is.null(x) & !is.null(x$TYPE))}))] %>%
data.table::rbindlist()
afd_filtered <- afd_filtered[TYPE == 0 & CONFIDENCE > 10]
afd_filtered[, .geo := NULL]
afd_filtered[, acq_hour := floor(ACQ_TIME / 100)]
afd_filtered[, acq_min := ((ACQ_TIME / 100) - acq_hour) * 100]
afd_filtered[, acq_datetime := as.POSIXct((ACQ_DATE / 1000) + (acq_hour * 3600) + (acq_min * 60),
origin = "1970-01-01",
tz = "America/Los_Angeles")]
afd_filtered[, `:=`(acq_year = year(acq_datetime),
acq_month = month(acq_datetime),
acq_day = day(acq_datetime),
solar_offset = LONGITUDE / 15,
hemisphere = ifelse(LATITUDE >= 0, yes = "Northern hemisphere", no = "Southern hemisphere"))]
afd_filtered[, acq_datetime_local := acq_datetime + as.duration(solar_offset * 60 * 60)]
afd_filtered[, `:=`(local_doy = lubridate::yday(acq_datetime_local),
local_hour_decmin = ((acq_hour) + (acq_min / 60) + solar_offset + 24) %% 24)]
afd_filtered[, `:=`(local_solar_hour_decmin_round = round(local_hour_decmin),
local_solar_hour_decmin_round0.5 = round(local_hour_decmin * 2) / 2)]
# https://en.wikipedia.org/wiki/Solar_zenith_angle
# https://en.wikipedia.org/wiki/Position_of_the_Sun#Declination_of_the_Sun_as_seen_from_Earth
# https://en.wikipedia.org/wiki/Hour_angle
afd_filtered[, `:=`(h = (local_hour_decmin - 12) * 15 * pi / 180,
phi = LATITUDE * pi / 180,
delta = -asin(0.39779 * cos(pi / 180 * (0.98565 * (local_doy + 10) + 360 / pi * 0.0167 * sin(pi / 180 * (0.98565 * (local_doy - 2)))))))]
afd_filtered[, solar_elev_ang := (asin(sin(phi)*sin(delta) + cos(phi)*cos(delta)*cos(h))) * 180 / pi]
afd_filtered[, .(min_solar_ang = min(solar_elev_ang)), by = (DAYNIGHT)]
afd_filtered[, .(max_solar_ang = max(solar_elev_ang)), by = (DAYNIGHT)]
afd_filtered[, .(pct_solar_ang_gt_0 = length(which(solar_elev_ang > 0)) / length(solar_elev_ang)), by = (DAYNIGHT)]
afd_filtered[, .(pct_solar_ang_lt_0 = length(which(solar_elev_ang < 0)) / length(solar_elev_ang)), by = (DAYNIGHT)]
fwrite(afd_filtered, "data/data_output/mcd14ml_gldas21/mcd14ml_with_gldas_climate_variables.csv")
|
25f2d4d0c8786a6f8ac62a6f7ef6a35b04b35bbc
|
acd9ba63d2780db1550acc7e460f73e5750af052
|
/code/plot4.R
|
8794086d76e83ab588740f288b66605d16e78cba
|
[] |
no_license
|
sayy85/ExData_Plotting1
|
2398bfc6fb839c0843f44f554a85718916b611c8
|
eeb965971a23b8281f36bd9b2d319b2165f14e2f
|
refs/heads/master
| 2020-12-25T12:28:37.519995
| 2014-08-12T06:49:51
| 2014-08-12T06:49:51
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 702
|
r
|
plot4.R
|
plot4 <-function() {
#Read data
source('EDA_P1_ReadData.R')
D<-ReadData()
#Plot the data and save the plot as png
png('plot4.png',width=480, height=480,unit='px')
par(mfrow=c(2,2))
with(D,{
plot(Time,Global_active_power,type='l',ylab='Global Active Power',xlab='')
plot(Time,Voltage,type='l',ylab='Voltage',xlab='datetime')
plot(Time,Sub_metering_1,type='l',ylab='Energy sub metering',xlab='')
points(Time,Sub_metering_2,type='l',col='red')
points(Time,Sub_metering_3,type='l',col='blue')
legend('topright',col=c('black','red','blue'),lty=c(1,1),legend=c(names(D[,7:9])),bty='n')
plot(Time,Global_reactive_power,type='l',xlab='datetime')
})
dev.off()
}
|
4bb92b3aca370b1610239eeb724590e1943acfe7
|
993ae53cf1cdbe68427ae5ddfd2845dec78c12e3
|
/Rplot4.R
|
1febf1128e7055c16c794362d6de8b7b577afdb9
|
[] |
no_license
|
HighSpiRitsdx/Emission
|
d3612050ef6f7ac83c2a4f316982500073eac2f4
|
bf54db134790b61ad0de8a579bf42280ea5a566a
|
refs/heads/master
| 2021-01-05T06:17:43.149479
| 2020-02-16T16:56:21
| 2020-02-16T16:56:21
| 240,912,296
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 427
|
r
|
Rplot4.R
|
## load data ##
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
library(dplyr)
library(ggplot2)
## plot4 ##
coal <- filter(SCC, grepl("Coal", Short.Name))
NEI_coal <- group_by(filter(NEI, SCC %in% coal$SCC), year)
plot_data4 <- summarise(NEI_coal, Emissions = sum(Emissions))
p <- ggplot(plot_data4, aes(x = year, y = Emissions))
p + geom_line() + labs(title = "Coal related emisssion")
|
9419d6048fa3eb8f8d14853337538f33abecfbab
|
79aa1613f924627f22e4bc60238bd65b3566d61e
|
/man/SP500.Rd
|
f23f1cd012a2abc0ef4375de33611ed09c7a3bb2
|
[
"MIT"
] |
permissive
|
tsurf101/olpsR
|
4d027e7e57bb7650ccd32cd99178464aa6fe6ac4
|
56cdb47725eb1348223355b0c83a42d9f229c1af
|
refs/heads/master
| 2021-05-17T17:33:51.727673
| 2020-03-28T21:36:42
| 2020-03-28T21:36:42
| 250,898,378
| 0
| 0
|
NOASSERTION
| 2020-03-28T21:36:09
| 2020-03-28T21:36:09
| null |
UTF-8
|
R
| false
| false
| 2,256
|
rd
|
SP500.Rd
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/data_SP500.R
\docType{data}
\name{SP500}
\alias{SP500}
\title{SP500 daily returns}
\format{A data frame with 1276 observations on the following 25 stocks.}
\source{
MSN Money, according to \url{http://www.cs.technion.ac.il/~rani/portfolios}
}
\usage{
data(SP500)
}
\description{
The dataset contains daily returns of the 25 largest market capitalization
stocks from the S&P 500 index (as of April 2003) from 1998-01-02 until 2003-01-31,
that is 1276 trading days.
Returns are calculated as closing price divided by the closing price
of the privious day (price relative).
The dataset was used, amongst others, for the analysis of \code{Anticor} algorithm by Borodin et al.
for example.
}
\details{
The following stocks are included:
\describe{
\item{\code{ge}}{General Electric Company (273 Bil)}
\item{\code{msft}}{Microsoft Corporation (259 Bil)}
\item{\code{walmart}}{Wal-Mart Stores (234 Bil)}
\item{\code{exxon}}{Exxon Mobil Corporation (230 Bil)}
\item{\code{pfizer}}{Pfizer Inc (194 Bil)}
\item{\code{citi}}{Citigroup Inc. (192 Bil)}
\item{\code{jnj}}{Johnson & Johnson (170 Bil)}
\item{\code{aig}}{American International Group (138 Bil)}
\item{\code{ibm}}{International Business Machines Corporation (136 Bil)}
\item{\code{merck}}{Merck & Co., Inc. (124 Bil)}
\item{\code{pg}}{Procter & Gamble Company (115 Bil)}
\item{\code{intel}}{Intel Corporation (110 Bil)}
\item{\code{bac}}{Bank of America Corporation (107 Bil)}
\item{\code{coke}}{Coca-Cola Company (102 Bil)}
\item{\code{cisco}}{Cisco Systems, Inc. (94 Bil)}
\item{\code{verizon}}{Verizon Communications Inc. (93 Bil)}
\item{\code{wfc}}{Wells Fargo & Company (78 Bil)}
\item{\code{amgen}}{Amgen Inc. (75 Bil)}
\item{\code{dell}}{Dell Computer Corporation (73 Bil)}
\item{\code{pepsi}}{PepsiCo, Inc. (69 Bil)}
\item{\code{sbc}}{SBC Communications Inc. (69 Bil)}
\item{\code{fannie}}{Fannie Mae S&P (68 Bil)}
\item{\code{chvron}}{ChevronTexaco Corporation (68 Bil)}
\item{\code{viacom}}{Viacom Inc'b' (66 Bil)}
\item{\code{lilly}}{Eli Lilly and Company (66 Bil)}
}
}
\references{
Borodin, A.; El-Yaniv, R. & Gogan, V.
Can we learn to beat the best stock, 2004
}
\keyword{datasets}
|
e7b95802d3ee90887bd33bec7902602d8037e3d1
|
a2b50507ed58c753f4f48e0e90b4537bd675706e
|
/tidy.R
|
8134230daeb9cb6b8ee8094468d787eb3bb90fd5
|
[
"MIT"
] |
permissive
|
datarian/SeelandSettlements
|
53f47b8ac5f73942f12fbea2ca0d9c2a7353be55
|
2e2a31fea9b950746cf2649bc6ca0e59d99d4e0f
|
refs/heads/master
| 2021-06-11T04:28:14.133021
| 2019-11-30T08:54:07
| 2019-11-30T08:54:07
| 128,356,597
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,772
|
r
|
tidy.R
|
library(dplyr)
library(sp)
dated_woods_raw <- read.csv("./data/belab.csv",
header = T,
stringsAsFactors = F,
sep = ";")
wood_list_raw <- read.csv("./data/Gesamtholzliste_Bielersee.csv",
na.strings = c("-","---", "----"), header=T,
stringsAsFactors = F, sep=";",
fileEncoding="utf8") # %>% enc2utf8()
wood_list <- wood_list_raw %>%
dplyr::select(Gemeinde,Flur,DNr,Qf,Fo,xLK,yLK,xLK95,yLK95) %>%
dplyr::rename(Nr = DNr) %>%
dplyr::mutate(Nr = as.integer(Nr), Qf = as.integer(Qf), Fo = as.integer(Fo)) %>%
dplyr::mutate(xLK = as.numeric(xLK), yLK = as.numeric(yLK), xLK95 = as.numeric(xLK95), yLK95 = as.numeric(yLK95)) %>%
dplyr::filter(!is.na(Nr))
LK95 <- wood_list %>%
dplyr::filter(!is.na(xLK95))
LK95nrs <- LK95$Nr
LK03 <- wood_list %>%
dplyr::filter(!is.na(xLK)) %>%
dplyr::filter(!Nr %in% LK95nrs)
dated_woods <- dated_woods_raw %>%
dplyr::filter(!is.na(Dat)) %>%
dplyr::select(Nr,Dat,Sp_Dat, Wk_Dat,WK,Sp,Anz,Ma,Art,Titel) %>%
dplyr::mutate(wood_type = case_when(
!is.na(Sp_Dat) ~ "Sp",
!is.na(Wk_Dat) ~ "Wk",
TRUE ~ "Ke"
)) %>%
dplyr::mutate(Dat = case_when(
!is.na(Sp_Dat) ~ Sp_Dat,
!is.na(Wk_Dat) ~ Wk_Dat,
TRUE ~ Dat
)) %>%
dplyr::mutate(Titel = as.character(Titel)) %>%
dplyr::select(-one_of(c('Sp_Dat', 'Wk_Dat')))
dated_woods$WK <- as.integer(dated_woods$WK)
#levels(dated_woods$WK) <- sub("^>[0-9]{1,3}", "",levels(dated_woods$WK))
combined_coordsLK03_dates <- dplyr::left_join(dated_woods,LK03,by="Nr") %>% filter(!is.na(yLK))
combined_coordsLK95_dates <- dplyr::left_join(dated_woods,LK95,by="Nr") %>% filter(!is.na(yLK95))
# Initialize a spatial points object with the CH95 projection
coordsLK95 <- SpatialPoints(cbind(combined_coordsLK95_dates$xLK95,combined_coordsLK95_dates$yLK95), proj4string = CRS("+init=epsg:2056"))
coordsLK03 <- SpatialPoints(cbind(combined_coordsLK03_dates$xLK,combined_coordsLK03_dates$yLK), proj4string = CRS("+init=epsg:21781"))
#Transform to WGS84:
coords95 <- spTransform(coordsLK95, CRS("+init=epsg:4326"))
coords03 <- spTransform(coordsLK03, CRS("+init=epsg:4326"))
# Combine to a spdataframe together with the attributes
spatial_data_LK95 <- SpatialPointsDataFrame(coords95,
combined_coordsLK95_dates[,-c(13,14,15,16)])
spatial_data_LK03 <- SpatialPointsDataFrame(coords03,
combined_coordsLK03_dates[,-c(13,14,15,16)])
spatial_data <- rbind(spatial_data_LK95,spatial_data_LK03)
saveRDS(spatial_data,file="./PrehistoricSeeland/data/woods_sp.Rds")
|
00ed61d059d34270a49b97e07ad821355dee8919
|
5fc8ac6bd470f9b0d64b17bd4dbce914667d9b28
|
/man/factorpart.Rd
|
bbaa1fa2b5d79fee85aea0daed1a95052f9d2be9
|
[] |
no_license
|
cancer-genetics-utu/heatmapGen2
|
2571134a4f31d2a3d2f50acb4514896bb3401195
|
e05237550098f47fec12764e5d90184e1c89c220
|
refs/heads/master
| 2022-08-01T08:15:28.928071
| 2020-06-02T22:43:47
| 2020-06-02T22:43:47
| 268,923,956
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,986
|
rd
|
factorpart.Rd
|
\name{factorpart}
\alias{factorpart}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Utility
}
\description{
%% ~~ A concise (1-5 lines) description of what the function does. ~~
}
\usage{
factorpart(fct, col = NULL, label = NULL, cex = 1, vertical = TRUE, width = lcm(1), na.color = "gray80", palettefn = rainbow, ...)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{fct}{
%% ~~Describe \code{fct} here~~
}
\item{col}{
%% ~~Describe \code{col} here~~
}
\item{label}{
%% ~~Describe \code{label} here~~
}
\item{cex}{
%% ~~Describe \code{cex} here~~
}
\item{vertical}{
%% ~~Describe \code{vertical} here~~
}
\item{width}{
%% ~~Describe \code{width} here~~
}
\item{na.color}{
%% ~~Describe \code{na.color} here~~
}
\item{palettefn}{
%% ~~Describe \code{palettefn} here~~
}
\item{\dots}{
%% ~~Describe \code{\dots} here~~
}
}
\details{
%% ~~ If necessary, more details than the description above ~~
}
\value{
%% ~Describe the value returned
%% If it is a LIST, use
%% \item{comp1 }{Description of 'comp1'}
%% \item{comp2 }{Description of 'comp2'}
%% ...
}
\references{
%% ~put references to the literature/web site here ~
}
\author{
%% ~~who you are~~
}
\note{
%% ~~further notes~~
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
%% ~~objects to See Also as \code{\link{help}}, ~~~
}
\examples{
##---- Should be DIRECTLY executable !! ----
##-- ==> Define data, use random,
##-- or do help(data=index) for the standard data sets.
## The function is currently defined as
function (fct, col = NULL, label = NULL, cex = 1, vertical = TRUE,
width = lcm(1), na.color = "gray80", palettefn = rainbow,
...)
{
fct <- as.factor(fct)
if (is.null(col)) {
col <- palettefn(length(levels(fct)))
}
labels <- levels(fct)
factorfct <- function(zoomx = NULL, zoomy = NULL) {
img <- if (vertical)
matrix(as.numeric(fct), nr = 1)
else matrix(as.numeric(fct), nc = 1)
xlim <- if (!is.null(zoomx))
zoomx
else c(0.5, nrow(img) + 0.5)
ylim <- if (!is.null(zoomy))
zoomy
else c(0.5, ncol(img) + 0.5)
image(1:nrow(img), 1:ncol(img), img, xaxt = "n", yaxt = "n",
bty = "n", xlim = xlim, ylim = ylim, col = col, ...)
if (!is.null(na.color) && any(is.na(img))) {
image(1:nrow(img), 1:ncol(img), ifelse(is.na(img),
1, NA), axes = FALSE, xlab = "", ylab = "", col = na.color,
add = TRUE)
}
box()
labelfct(vertical = vertical, r.cex = cex, c.cex = cex,
label = label)
}
list(FUN = factorfct, height = width, width = width, fct = fct,
col = col)
}
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ ~kwd1 }
\keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
|
62afcb6f9af727f421abbd8e474ba7d319263127
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/photobiologyFilters/examples/rosco.Rd.R
|
fa771d852fa8cda4f631121e3548131ad4700175
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 186
|
r
|
rosco.Rd.R
|
library(photobiologyFilters)
### Name: rosco
### Title: Filter spectra data for Rosco thetrical filters or 'gels'
### Aliases: rosco
### Keywords: datasets
### ** Examples
rosco
|
2fdfaeceed3f79e9b5161761bb829abc61d75d54
|
ea3caf9ccef3eca8bf5707f182401ceca55c6d03
|
/cleanup credit data.R
|
edf34456f2e715f76d0d8052327451f5db96146e
|
[] |
no_license
|
somu2k16/practice_R
|
c6566f02094eade9784db9c3f3c55c3382e08ad1
|
44e91e7bc81d710644a8180555d513a9c8fb74c0
|
refs/heads/master
| 2020-08-16T04:39:22.958529
| 2019-10-21T04:51:05
| 2019-10-21T04:51:05
| 215,456,030
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 948
|
r
|
cleanup credit data.R
|
library(dplyr)
setwd('C:/Eminent')
loandata = read.csv('LoanStats3a.csv', na.strings = c('.','') )
View(loandata)
dim(loandata)
str(loandata)
result_na = sapply(loandata, function(df) {sum(is.na(df) *100)/ length(df)})
result_na = round(result_na, digits = 2)
class(result_na)
result_na_df = as.data.frame(result_na)
View(result_na_df)
result_na_df$column = row.names(result_na_df)
row.names(result_na_df) = NULL
result_na_df = result_na_df[ , c("column", "result_na")]
str(result_na_df)
result_na_df = result_na_df %>% arrange( desc(result_na_df$result_na) )
View(result_na_df)
result_na_df = result_na_df[result_na_df$result_na < 5,]
View(result_na_df)
loandata = loandata[ , result_na_df$column]
dim(loandata)
View(loandata)
table(loandata$id)
length(loandata$id[2])
loandata$id[2]
unique(loandata$pub_rec_bankruptcies)
table(loandata$pub_rec_bankruptcies)
table(loandata$loan_status)
|
362490617031bc60a4c13f589f8aed3f566f6bbd
|
53f05350c45a0b1471560d49c4921d6f9128bd58
|
/ui.R
|
d2625fbec4de9e6966361a9efa6c25e9779ea0f5
|
[] |
no_license
|
KellyHu/BST260
|
961c1480d2e12708bd171d766c0bdec40ad8dc9b
|
15c1860e7e0efe7d33fe4d7ae7105f398a9225a8
|
refs/heads/master
| 2020-04-09T05:21:44.148104
| 2018-12-16T16:36:16
| 2018-12-16T16:36:16
| 160,061,399
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 497
|
r
|
ui.R
|
library(wordcloud)
library(tidyverse)
library(stringr)
library(tidytext)
library(shiny)
library(rsconnect)
u <- shinyUI(fluidPage(
titlePanel("Word Cloud for text variables in our dataset"),
sidebarLayout(
sidebarPanel(
selectInput("selection", "Choose a variable:",
choices = food_health),
hr(),
sliderInput("freq",
"Minimum Frequency:",
min = 1, max = 20, value = 5)),
mainPanel(plotOutput("plot")))))
|
40c0c50235dd1d52a80619ba939c5ef9214f7f58
|
74f962b3643898bb4185a7fb84a6b69cf93c72eb
|
/man/dot-perma_cc_folder_pref.Rd
|
a3cb68dfc2dbbba4d6092762ded4f4797191fece
|
[
"MIT"
] |
permissive
|
QualitativeDataRepository/archivr
|
4afecc346132c1a32aee030b7fb701bd2b7557d2
|
f7e10573b3c66a79fb82296d5eff4840d7e6d59e
|
refs/heads/master
| 2022-02-26T23:39:45.222070
| 2022-02-09T18:46:20
| 2022-02-09T18:46:20
| 161,853,679
| 5
| 2
|
NOASSERTION
| 2022-02-08T17:16:05
| 2018-12-15T00:08:33
|
R
|
UTF-8
|
R
| false
| true
| 378
|
rd
|
dot-perma_cc_folder_pref.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/apitools.R
\docType{data}
\name{.perma_cc_folder_pref}
\alias{.perma_cc_folder_pref}
\title{Global var for the API key for perma.cc}
\format{
An object of class \code{character} of length 1.
}
\usage{
.perma_cc_folder_pref
}
\description{
Global var for the API key for perma.cc
}
\keyword{datasets}
|
be62c5b921838d07754d202d1e97ea32ec33e123
|
4709e6dfacdf7bdc5bc1001be4d2879c38cd7344
|
/global.R
|
fba951645f3aed2c73c646b01153e8580e45e3a7
|
[] |
no_license
|
mcarmonabaez/crime_mexico_city
|
d993237ac9ccebc35c15183feff8481c15d36920
|
ccfcb33dd88203a95f64291370777b234a483d75
|
refs/heads/master
| 2021-05-02T16:19:26.212373
| 2018-02-07T22:32:06
| 2018-02-07T22:32:06
| 120,673,937
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,148
|
r
|
global.R
|
# cargar paquetes ---------------------------------------------------------
cargar_paquetes <- function(paquetes_extra = NULL){
paquetes <- c("tidyverse", "stringr", "lubridate", paquetes_extra)
if (length(setdiff(paquetes, rownames(installed.packages()))) > 0) {
install.packages(setdiff(paquetes, rownames(installed.packages())))
}
lapply(paquetes, require, character.only = TRUE)
return(search())
}
cargar_paquetes(c("shiny", "shinydashboard", "plotly", "DCluster",
"jsonlite", "RCurl", "rgdal", "rgeos", "ggmap",
"scales", "geojsonio", "downloader", "spdep",
"viridis", "maptools", "rvest", "stringi", "leaflet"))
#
# library("shiny")
# library("shinydashboard"); library("plotly"); library("DCluster")
# library("jsonlite"); library("RCurl"); library("rgdal")
# library("rgeos"); library("ggmap"); library("scales")
# library("geojsonio"); library("downloader"); library("spdep")
# library("viridis"); library("maptools"); library("rvest");
# library("stringi"); library("leaflet")
# library("tidyverse"); library("stringr"); library("lubridate")
# datos -------------------------------------------------------------------
cat_dias <- data.frame(day = c("Tuesday", "Wednesday", "Thursday", "Friday",
"Saturday", "Sunday", "Monday" ),
dia_semana = c("Martes", "Miรฉrcoles", "Jueves",
"Viernes", "Sรกbado", "Domingo", "Lunes"))
temp <- tempfile()
download.file("http://data.diegovalle.net/hoyodecrimen/cuadrantes.csv.zip",temp)
crimenes <- read_csv(unz(temp,"clean-data/crime-lat-long.csv")) %>%
mutate(hora = as.numeric(str_sub(string = as.character(hour), 1, 2)),
day = weekdays(date)) %>%
left_join(cat_dias, by = "day")
cuadrantes <- read_csv(unz(temp,"clean-data/cuadrantes-hoyodecrimen.csv"))
unlink(temp)
# datos para el mapa ------------------------------------------------------
tmp_cuadrantes <- tempfile("cuads", fileext = ".json")
download.file("https://hoyodecrimen.com/api/v1/cuadrantes/geojson", tmp_cuadrantes)
cuadrantes_json <- rgdal::readOGR(tmp_cuadrantes, "OGRGeoJSON", verbose = FALSE)
tmp_sectores <- tempfile("secs", fileext = ".json")
download.file("https://hoyodecrimen.com/api/v1/sectores/geojson", tmp_sectores)
sectores_json <- rgdal::readOGR(tmp_sectores, "OGRGeoJSON", verbose = FALSE)
crime_sectors <- fromJSON("https://hoyodecrimen.com/api/v1/sectores/all/crimes/all/period")$rows
#fortify the data for ggplot2
fsectors <- fortify(sectores_json, region = "sector")
sector_mapa <- left_join(fsectors, crime_sectors, by = c("id" = "sector"))
crime_cuadrantes <- fromJSON("https://hoyodecrimen.com/api/v1/cuadrantes/all/crimes/all/period")$rows
fcuadrantes <- fortify(cuadrantes_json, region = "cuadrante")
cuadrante_mapa <- left_join(fcuadrantes, crime_cuadrantes, by = c("id" = "cuadrante"))
long_media <- mean(cuadrante_mapa$long)
lat_media <- mean(cuadrante_mapa$lat)
# auxiliares --------------------------------------------------------------
coordenadas_labels <- sector_mapa %>%
dplyr::select(id, long, lat, population) %>%
unique %>%
group_by(long, lat) %>%
mutate(n = n()) %>%
ungroup() %>%
arrange(id, desc(n)) %>%
group_by(id, population) %>%
summarise(lat = first(lat),
long = first(long))
cuenta_sector <- cuadrantes %>%
group_by(sector, crime) %>%
summarise(total = sum(count)) %>%
ungroup()
cuenta_sector_year <- cuadrantes %>%
group_by(year, sector, crime) %>%
summarise(total = sum(count)) %>%
ungroup()
cuenta_mes <- cuadrantes %>%
group_by(year, date, crime) %>%
summarise(total = sum(count)) %>%
ungroup()
year <- unique(cuenta_sector_year$year)
res_cuadrantes <- cuadrantes %>%
group_by(year, date, municipio, crime) %>%
summarise(count = sum(count, na.rm = T) ) %>%
ungroup()
str(res_cuadrantes)
res_cuadrantes_tot <- cuadrantes %>%
group_by(year, date) %>%
summarise(count = sum(count, na.rm = T) ) %>%
ungroup()
tipo_crimen <- sort(unique(crimenes$crime))
muns <- sort(unique(res_cuadrantes$municipio))
|
568d29d1263bca2b8324634964d5a7783ccb9ac2
|
e75480f25015787abfcea357570c6dcae1954fe1
|
/Lecture_1_20160604.R
|
f9c9f07d4e583db0a5cd4e5df6cc50b851ecd57d
|
[] |
no_license
|
poplock100/NYC_Data_Science
|
2f113a2e83c17fc41996cccc0669b3b05af60e00
|
184437f556e0f9d0033c683cbf607e1fccd61d17
|
refs/heads/master
| 2021-01-17T08:27:03.471781
| 2016-07-16T16:39:40
| 2016-07-16T16:39:40
| 63,492,328
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 12,477
|
r
|
Lecture_1_20160604.R
|
setwd("c:/Users/Ali/NycDataScience/Lecture_1")
5-
1
a <- 1+1
b <- 2
c <- 3
plot(1:10, 2:11)
### install.packages("ggplot2")
library(ggplot2)
### Basic R
### Arithmetic
1+1*3
### Numerical and string vectors (atomic vector)
c(0,1,1,2,3,9)
c("Hello, World!", "I am an R user")
1:6
### Can't combine numeric and string into same vector:
"1"+1
### Error in "1" + 1 : non-numeric argument to binary operator
x<-c(1,"d",5)
print(x)
### This will convert the numerics to strings so that it is all the same class.
### 1. Each atomic vector stores its values as a one-dimensional vector,
### and each atomic vector can ONLH store ONE type of data.
### 2. So, R coerces data types.
### Example:
c(TRUE,1,"A")
### [1] "TRUE" "1" "A"
### But, if Logical and numerator:
c(TRUE,1)
### [1] 1 1
### So, converted TRUE to 1, because in R, TRUE means 1.
c(FALSE, 1)
### [1] 0 1
### Vector addition
c(1,2,3,4) + c(3,4,5,6)
###[1] 4 6 8 10
### 1+3 = 4, 2+4 = 6, etc.
### Adding numbers in 1st position together, then numbers in 2nd position together, etc.
c(1,2,3,4) + c(1,2)
### [1] 2 4 4 6
### 1+1 = 2, 2+2 = 4, 3+1 = 4, 4+2 = 6
### Adding first position to first (1+1) and second to second (2+2),
### But then repeating first position of second vector and adding to third (3+1) and
### second position to 4th (4+2)
c(1,2,3,4) + c(1,2,3)
### Warning message:
### In c(1, 2, 3, 4) + c(1, 2, 3) :
### longer object length is not a multiple of shorter object length
### 1st vector has 4, second has 3. So, it's recycling 1st element of 2nd vector
### but it's letting you know that it wasn't 1:1, or it didn't recycle all the way through
### Teacher note:
### R's Vector Recycling:
### If you give R two vectors of unequal length, R will repeat the shorter one
### as long as the longer vector, and then do the math.
### This isn't a permanent change; the shorter vector will be its original length
### If the length of the shorter vector does not divide evenly into the longer one,
### R will return a warning message, but will still do the calculation.
### This behavior is known as vector recycling.
c(1,2)/c(1,2,3)
### [1] 1.0000000 1.0000000 0.3333333
### Warning message:
### In c(1, 2)/c(1, 2, 3) :
### longer object length is not a multiple of shorter object length
### Comparison
c(1,2,3,4)>c(1,2,1,2)
### [1] FALSE FALSE TRUE TRUE
c(1,2,3,4) <= c(1,5)
### [1] TRUE TRUE FALSE TRUE
### Better ways to write lines 86 and 89:
(1:4)>(1:2)
### [1] FALSE FALSE TRUE TRUE
(1:4) <= c(1,5)
### [1] TRUE TRUE FALSE TRUE
(((10+2)*3)-6)/3
### 10 is x.
### Result will always be x.
### Same calculation with sequence 1:4.
x<- 30:90
y <- function(x){
(((x+2)*3)-6)/3
}
y(x)
(((x+2)*3)-6)/3
### Index operator: []
x <- c(5:8)
x[2]
### [1] 6
x[1:3]
### [1] 5 6 7
x[2]; x[2:4]
### [1] 6
### [1] 6 7 8
x[c(1,2,4)]
### [1] 5 6 8
x[-4]
### [1] 5 6 7
### So, it doesn't show the 4th element
x[c(-4,2)]
### Error in x[c(-4, 2)] : only 0's may be mixed with negative subscripts
x[c(-4,-1)]
### [1] 6 7
x[x>6]
### [1] 7 8
###Mathematical Functions.
### Calculate the square root of the sequence 1:4
x <- c(1:4)
sqrt(x)
### [1] 1.000000 1.414214 1.732051 2.000000
### Data frames
city <- c("New York", "San Francisco", "Chicago", "Houston", "Los Angeles")
age <- c(23, 43, 51, 32, 60)
sex <- c("F", "M", "F", "F", "M")
people <- data.frame(city, age, sex)
people
### city age sex
### 1 New York 23 F
### 2 San Francisco 43 M
### 3 Chicago 51 F
### 4 Houston 32 F
### 5 Los Angeles 60 M
###If vectors are not same length:
age1 <- c(23, 43)
people1 <- data.frame(sex, city, age1)
people1
### Error in data.frame(city, age1) :
### arguments imply differing number of rows: 5, 2
### Using the $ symbol to extract a column
people$age; people$sex
### [1] 23 43 51 32 60
### [1] F M F F M
### Levels: F M
people$age > 30 #Conditioned samples extracted from column
### [1] FALSE TRUE TRUE TRUE TRUE
people$city[people$age > 30] # Conditioning across variables
### [1] San Francisco Chicago Houston Los Angeles
### Levels: Chicago Houston Los Angeles New York San Francisco
people[people$age>30] # For dataframe, ALWAYS need to tell it rows & columns
### No, here so not asking for columns and then get this error:
### Error in `[.data.frame`(people, people$age > 30) :
### undefined columns selected
people[people$age>30, ]
### city age sex
### 2 San Francisco 43 M
### 3 Chicago 51 F
### 4 Houston 32 F
### 5 Los Angeles 60 M
### Data frames store data as a sequence of columns.
### Each column can be a different dtata type.
### Every column in a data frame must be the SAME LENGTH.
people[people$age>30,-1]
### age sex
### 2 43 M
### 3 51 F
### 4 32 F
### 5 60 M
inspections <- read.csv("data/BrooklynInspectionResults.csv", header = TRUE)
inspections[c(66, 70, 71, 72), -2] ### This says: return rows 66,70,71,72, and remove 2nd column.
###NOTE: in environment there is still full dataset We just printed out the above.
class(inspections)
### [1] "data.frame"
class(inspections$VIOLATION.CODE)
### [1] "factor"
### Extract the restaurants surveyed.
restaurants <- inspections$DBA
class(restaurants)
### [1] "factor"
### restaurants is a factor, not a data frame.
###Count the number of unique restaurants in the data set
restaurant_set <- unique(restaurants)
length(restaurant_set)
### [1] 4651
###Another way to count unique restaurants; this way doesn't create a new vector
length(unique(restaurants))
### [1] 4651
class(restaurant_set)
### [1] "factor"
### So, restaurant set is a vector, not a data frame.
dim(restaurants)
### NULL (can't give number of rows and number of columns)
dim(inspections)
### [1] 32221 6 (32,221 rows, 6 columns)
### Limit the data to only those entries with critical violations
inspections <- inspections[inspections$CRITICAL.FLAG == "Critical", ]
### Means, "reduce inspections to keep only rows where CRITICAL.FLAG = "Critical", and all columns.
### So now, inspections has 17,344 rows instead of 32,221
### To install data frames from other languages (SPSS, STATA)
library(foreign)
read.foreign...
### Exporting R Data to a Local File
write.table(people, file = "write/people.csv", sep=",")
### Same thing as:
write.csv(people, file = "write/people2.csv")
### If usging "write.csv", don't need to say what the separator is.
### Also, using "write.csv" lines columns up properly when opening in Excel, where
### using "write.table" shifts headers over to the left (adds row number with regular header,
### so header for now second column is in 1st one over the row number).
###Lists
### Lists are most felxible because can have elements of DIFFERENT types and DIFFERENT lengths.
### Kind of like a closet.
### people.list <- list(AgeOfIndividual = age, Location = city, Gender = sex)
### people.list
### $AgeOfIndividual
### [1] 23 43 51 32 60
### $Location
### [1] "New York" "San Francisco" "Chicago" "Houston" "Los Angeles"
### $Gender
### [1] "F" "M" "F" "F" "M"
# Note:
### Lists are like atomic vectors because they group data into a one-dimensional set.
### However, lists do not group together individual values;
### Lists group together R objects, such as atomic vectors, data frames, and other lists.
### Putting dataframe people into this list.
people.list$tabular.data <- people
people.list
### $AgeOfIndividual
### [1] 23 43 51 32 60
### $Location
### [1] "New York" "San Francisco" "Chicago" "Houston" "Los Angeles"
### $Gender
### [1] "F" "M" "F" "F" "M"
### $tabular.data
### city age sex
### 1 New York 23 F
### 2 San Francisco 43 M
### 3 Chicago 51 F
### 4 Houston 32 F
### 5 Los Angeles 60 M
### So, sort of a way to put a bunch of info to set aside.
### Extracting part of list.
people.list$Location
### [1] "New York" "San Francisco" "Chicago" "Houston" "Los Angeles"
### Can use a double index operatior to extract elements of a list.
### Example: to extract the last data element, you could do the following:
### Because only one dimension
people.list[[length(people.list)]]
### city age sex
### 1 New York 23 F
### 2 San Francisco 43 M
### 3 Chicago 51 F
### 4 Houston 32 F
### 5 Los Angeles 60 M
people.list[[length(people.list)-1]]
### [1] "F" "M" "F" "F" "M"
### Above two use length() to measure list.
### Use [[]] because list is closet, element is box. So,
### first []opens list (i.e. people.list), and second [] opens box (i.e. tabular data)
### city age sex
### 2 San Francisco 43 M
### 3 Chicago 51 F
### 4 Houston 32 F
### 5 Los Angeles 60 M
people.list[[length(people.list)]][age>30, ]
### This says, "in people.list, give me the last element (length(people.list), but only when age > 30".
### So if have conditional parameter, need to have it after list, not within.
### If using element name, use "$.
### If using position name, use "[[]]":
people.list$Location
### [1] "New York" "San Francisco" "Chicago" "Houston" "Los Angeles"
people.list[[2]]
### [1] "New York" "San Francisco" "Chicago" "Houston" "Los Angeles"
### Can extract data frame, etc from lists:
people2 <- data.frame(people.list[[length(people.list)]])
people2
### city age sex
### 1 New York 23 F
### 2 San Francisco 43 M
### 3 Chicago 51 F
### 4 Houston 32 F
### 5 Los Angeles 60 M
class(people2)
### [1] "data.frame"
### Exercise: create list with personal info, name, gender and age in separate elements:
personal.info <- list(name = "Alison", age = 47, gender = "F")
personal.info
### $name
### [1] "Alison"
### $age
### [1] 47
### $gender
### [1] "F"
### For any one object, we can use the class() function to print its class(es).
class(people)
### [1] "data.frame
### Can use attributes() function to print its properties.
attributes(people)
### $names
### [1] "city" "age" "sex"
### $row.names
### [1] 1 2 3 4 5
### $class
### [1] "data.frame"
### str() can be used to understand an object's class, attributes and sample data.
str(people)
### 'data.frame': 5 obs. of 3 variables:
### $ city: Factor w/ 5 levels "Chicago","Houston",..: 4 5 1 2 3
### $ age : num 23 43 51 32 60
### $ sex : Factor w/ 2 levels "F","M": 1 2 1 1 2
### Exercise: find into on people.list
class(people.list)
### [1] "list"
names(people.list)
### [1] "AgeOfIndividual" "Location" "Gender" "tabular.data"
attributes(people.list)
### $names
### [1] "AgeOfIndividual" "Location" "Gender" "tabular.data"
str(people.list)
### List of 4
### $ AgeOfIndividual: num [1:5] 23 43 51 32 60
### $ Location : chr [1:5] "New York" "San Francisco" "Chicago" "Houston" ...
### $ Gender : chr [1:5] "F" "M" "F" "F" ...
### $ tabular.data :'data.frame': 5 obs. of 3 variables:
### ..$ city: Factor w/ 5 levels "Chicago","Houston",..: 4 5 1 2 3
### ..$ age : num [1:5] 23 43 51 32 60
### ..$ sex : Factor w/ 2 levels "F","M": 1 2 1 1 2
####### MODELS
#A sample model y is a function of variables x1 to xn
### y ~ x1 + x2 + x3 + ... + xn
### For example, we can plot the relationship between distance and speed in the cars
### data set with the following function:
#install.packages("lattice")
library(lattice)
xyplot(dist ~ speed, data=cars)
### Can also run regression and save it in a variable:
model <- lm(dist ~ speed, data = cars)
model
### Call:
### lm(formula = dist ~ speed, data = cars)
### Coefficients:
### (Intercept) speed
### -17.579 3.932 lm(formula = dist ~ speed, data = cars)
### So, if we put it into a variable, it turns it into a list. If we call the variable
### it will give the info, but won't run the plot.
### But, class will be "lm".
class(model)
### [1] "lm"
summary(model)
xyplot(dist ~ speed, data = cars, type = c("p", "r"))
### Here, p = points, r = regression
|
7f96fae83160024e1108049d6582807e6b33e20c
|
82ea05843ae51c2a3a920d2de95b0dfac534ee82
|
/size.R
|
07cca6d386abe2377a07ce58e1ead94f69a9263e
|
[
"MIT"
] |
permissive
|
TestingEquivalence/PowerLawR
|
b15b797b57c6f473f90abf79a59980025f822aea
|
a92022f072f2f2787d9bfd029d86d7ba5911accb
|
refs/heads/master
| 2021-11-08T19:17:39.225981
| 2021-11-05T14:36:55
| 2021-11-05T14:36:55
| 199,118,799
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,249
|
r
|
size.R
|
fullToss<-function(i,parameter){
set.seed(i*1000000)
counting=rmultinom(n=1,size=parameter$n,prob=parameter$p)
if (parameter$test=="asymptotic"){
res=asymptotic_test(alpha = parameter$alpha,frequency = counting,
kmin = parameter$kmin,tol = parameter$tol)
return(res)
}
if (parameter$test=="bootstrap"){
res=bootstrap_test(alpha = parameter$alpha, frequency = counting,
kmin = parameter$kmin,
nSimulation = parameter$nSimulation, tol=parameter$tol)
return(res)
}
if (parameter$test=="MLE"){
res=powerLawMLE(counting,kmin=parameter$kmin,kmax=parameter$kmax,1,3)
return(res)
}
return(NA)
}
sizeAtPowerLaw<-function(parameter){
#calculate density of discrete power law
parameter$kmin=parameter$kmin/parameter$scale
parameter$kmax=parameter$kmax/parameter$scale
parameter$scale=1
parameter$p=powerLawDensity(beta = parameter$beta,
kmin = parameter$kmin,
kmax = parameter$kmax)
i=c(1:parameter$nSamples)
# simulate tests
# v=sapply(i, fullToss,parameter)
cl=getCluster()
v=parSapply(cl,i, fullToss,parameter)
stopCluster(cl)
return(v)
}
|
8d826949a6208d285adb25dbf825ef4f9fd6a8eb
|
8bd7b647bef8c7b720f1eefb9cf1bfa7f8275b1e
|
/R/constrppmn.R
|
0a879caa9fbe272e62d9de5fe6a1381c3dbd710d
|
[] |
no_license
|
cran/polyapost
|
d1680946acb5a18d25b1ea08599df6efd1624ab0
|
395012a4062a89193e93c569538f4bbf9e1f6d6b
|
refs/heads/master
| 2021-10-13T05:27:37.756110
| 2021-10-07T16:00:02
| 2021-10-07T16:00:02
| 17,698,639
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 517
|
r
|
constrppmn.R
|
#For the subset of the simplex defined by
# A1 p = b1, A2 p <= b2 and A3 p >= b3
# where the Ai's are matrices and the bi's
#vectors of nonnegative real numbers this
#function uses the Metroplis-Hastings algorithm
constrppmn<-function(A1,A2,A3,b1,b2,b3,initsol,reps,ysamp,burnin)
{
checkconstr(A1,A2,A3,b1,b2,b3)
if(!is.null(A3)) {
A4<-rbind(A2,-A3)
b4<-c(b2,-b3)
}
else {
A4<-A2
b4<-b2
}
out<-polyaest(A1,A4,b4,initsol,reps,ysamp,burnin)
return(out)
}
|
0e326e041a12bfcb18125fd8f735915dfa89c784
|
331635a7ffc237ebc34722d6eb2ae69e0b82c3a2
|
/20181116-pong/graphs.R
|
9dfde3055fd44a79ba745ba43736c35b5c4d2aa3
|
[] |
no_license
|
brmcdonnell/riddlers
|
22192dd07288be99f5fcd659d68be9a65a32f3f7
|
674cb31637ed88f29f37078de1945d367895a2ca
|
refs/heads/master
| 2022-04-20T08:36:00.402698
| 2020-03-28T20:44:54
| 2020-03-28T20:44:54
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 555
|
r
|
graphs.R
|
library(ggplot2)
pong = read.csv('pong-counts.csv')
pong['err_high'] = pong['means'] + pong['stds']
pong['err_low'] = pong['means'] - pong['stds']
p <- ggplot(pong) +
theme_bw() +
geom_ribbon(aes(x=N, ymin=err_low, ymax=err_high), fill='blue', alpha=0.1) +
geom_smooth(aes(x=N, y=means),
method='lm', se=FALSE,
color='indianred', size=0.3) +
geom_line(aes(x=N, y=means), color='darkblue') +
labs(x='N', y='Expected Number Rounds')
p
ggsave(plot=p, filename='ggpong.png', dpi=300, width=8, height=5, units='in')
|
9c2f5ba4eb5cc680f8c9dab4ec6c1192a8e4316d
|
e99fcd77f0cea82f21f48edece5a974b995c2bdf
|
/src/models/kingdoms/XGBoost/evaluate_model.R
|
a6cbb5f4f7ef5d536d9594b4074e7a5b3997b907
|
[
"MIT"
] |
permissive
|
Bohdan-Khomtchouk/codon-usage
|
ef62bfeabf82bcb8533c5b2ebcaf4ed59bf636f8
|
8922418b42d931aea66f7ebcc70d4a84d91faee2
|
refs/heads/master
| 2023-02-18T14:37:22.349674
| 2023-02-06T17:10:11
| 2023-02-06T17:10:11
| 307,536,175
| 2
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,384
|
r
|
evaluate_model.R
|
## ----------------------------------------------------------------------------------- ##
## Program to evaluate XGB models using test data
## ----------------------------------------------------------------------------------- ##
library(caret)
library(mltools)
library(data.table)
setwd("~/dev/cuAI/CUTG_ML_paper_datasets/") #yours will be different
load(file = "models/kingdoms/XGBoost/xgb_model.RData") #yours will be different
load(file = "processed_data/1e3_5kingdoms/test_1e3_5kingdoms.RData") #yours will be different
model <- xgb.model
myInput_test_labels <- myInput_test$Kingdom #Kingdom labels only
myInput_test_preds <- predict(model, newdata = myInput_test)
cm <- vector("list", length(levels(myInput_test_labels)))
for (i in seq_along(cm)) {
positive.class <- levels(myInput_test_labels)[i]
# in the i-th iteration, use the i-th class as the positive class
cm[[i]] <- confusionMatrix(myInput_test_preds, myInput_test_labels,
positive = positive.class)
}
metrics <- c("Precision", "Recall")
# print(cm[[1]]$byClass[, metrics])
get.conf.stats <- function(cm) {
out <- vector("list", length(cm))
for (i in seq_along(cm)) {
x <- cm[[i]]
tp <- x$table[x$positive, x$positive]
fp <- sum(x$table[x$positive, colnames(x$table) != x$positive])
fn <- sum(x$table[colnames(x$table) != x$positive, x$positive])
# TNs are not well-defined for one-vs-all approach
elem <- c(tp = tp, fp = fp, fn = fn)
out[[i]] <- elem
}
df <- do.call(rbind, out)
rownames(df) <- unlist(lapply(cm, function(x) x$positive))
return(as.data.frame(df))
}
get.precision <- function(cm){
cm.summary <- get.conf.stats(cm)
tp <- sum(cm.summary$tp)
fn <- sum(cm.summary$fn)
fp <- sum(cm.summary$fp)
pr <- tp / (tp + fp)
return(pr)
}
pr <- get.precision(cm)
get.recall <- function(cm){
cm.summary <- get.conf.stats(cm)
tp <- sum(cm.summary$tp)
fn <- sum(cm.summary$fn)
fp <- sum(cm.summary$fp)
re <- tp / (tp + fn)
return(re)
}
re <- get.recall(cm)
get.micro.f1 <- function(cm) {
cm.summary <- get.conf.stats(cm)
tp <- sum(cm.summary$tp)
fn <- sum(cm.summary$fn)
fp <- sum(cm.summary$fp)
pr <- tp / (tp + fp)
re <- tp / (tp + fn)
f1 <- 2 * ((pr * re) / (pr + re))
return(f1)
}
micro.f1 <- get.micro.f1(cm)
# print(paste0("Micro F1 is: ", round(micro.f1, 5)))
##### Macro F1
get.macro.f1 <- function(cm) {
c <- cm[[1]]$byClass # a single matrix is sufficient
re <- sum(c[, "Recall"]) / nrow(c)
pr <- sum(c[, "Precision"]) / nrow(c)
f1 <- 2 * ((re * pr) / (re + pr))
return(f1)
}
macro.f1 <- get.macro.f1(cm)
######## Accuracy
calculate.accuracy <- function(predictions, ref.labels) {
return(length(which(predictions == ref.labels)) / length(ref.labels))
}
calculate.w.accuracy <- function(predictions, ref.labels, weights) {
lvls <- levels(ref.labels)
if (length(weights) != length(lvls)) {
stop("Number of weights should agree with the number of classes.")
}
if (sum(weights) != 1) {
stop("Weights do not sum to 1")
}
accs <- lapply(lvls, function(x) {
idx <- which(ref.labels == x)
return(calculate.accuracy(predictions[idx], ref.labels[idx]))
})
acc <- mean(unlist(accs))
return(acc)
}
acc <- calculate.accuracy(myInput_test_preds, myInput_test_labels)
print(paste0("Accuracy is: ", round(acc, 2)))
calculate.AUC <- function(myInput_test){
test.labels <- data.table(as.factor(myInput_test$Kingdom))
test.labels <-one_hot(test.labels)
model.preds <- as.data.frame(predict(model, newdata = myInput_test[, -1], type = "prob"))
test.labels <- cbind(test.labels, model.preds)
colnames(test.labels) <- c("archaea_true", "bacteria_true", "eukaryote_true", "virus_true", "bacteriophage_true", "archaea_pred_xgb", "bacteria_pred_xgb", "eukaryote_pred_xgb", "virus_pred_xgb", "bacteriophage_pred_xgb")
model.roc <- multi_roc(test.labels)
return(round(model.roc$AUC$xgb$micro, 4))
}
auc <- calculate.AUC(myInput_test)
round_digits <- 4
print(cm[[1]]$byClass[, metrics])
print(paste0("Micro F1 is: ", round(micro.f1, round_digits)))
print(paste0("Macro F1 is: ", round(macro.f1, round_digits)))
print(paste0("Precision is: ", round(pr, round_digits)))
print(paste0("Recall is: ", round(re, round_digits)))
print(paste0("Accuracy is: ", round(acc, round_digits)))
print(paste0("Model AUC is: ", round(auc, round_digits)))
|
72c92e767a4dfd2e686e91b95443239761a42942
|
88e976cd1f2d50f5fa30129df589bc00d6bee3ac
|
/R/Crossover.R
|
3e57bd5b04bb61364190a8341bad8e15362e2f2b
|
[] |
no_license
|
yanrongmu/GA
|
d132cb495af9c3f280ee5bbda04db17572bbeef2
|
56376a54b207493932f01bc59fa5e6860e83b0d9
|
refs/heads/master
| 2021-08-29T20:47:19.801998
| 2017-12-14T23:58:10
| 2017-12-14T23:58:10
| 113,373,948
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,191
|
r
|
Crossover.R
|
############ This is the Crossover function #########
# It performs the crossover that produces the kid population from
# the parent population
Crossover <- function(Parents){
# Takes as input the pairs of parents
Parent1 <- Parents$Parent1
Parent2 <- Parents$Parent2
P <- 2 * nrow(Parent1)
p <- ncol(Parent1)
# Find the place where the crossover happens, between 1 and p-1
# We assume there is one and only one crossover event every time
#(so the crossover region cannot be 0 or p).
CrossoverRegions <- sample(x = 2:(p-1), size = P/2, replace = T)
# Create a mnatrix that says wheter the gene has the allele from
# parent1 (origin is 1) or parent2 (origin is 0)
Origins <- matrix(0, nrow = P/2, ncol = p)
for (i in 1:(P/2)){
Origins[i,] <- c(rep(1, times = CrossoverRegions[i]),
rep(0, times = p - CrossoverRegions[i]))
}
# Do the crossovers for the first kid of each pair of parents
Kids1 <- Parent1 * Origins + Parent2 * (1-Origins)
# Do the crossovers for the second kid of each pair of parents
Kids2 <- Parent2 * Origins + Parent1 * (1-Origins)
# Return the crossover children
return(rbind(Kids1, Kids2))
}
|
851dcc3d7ddf34c477e9e399b591da2adc9b21af
|
fb3c0532801f4e30484e0e45eac2f320fe926fef
|
/R_code/2_TOM_Signed_10samples_spearman.R
|
5e8abc1a6285e8f991ef2c0adca5063f5b1c7b9c
|
[] |
no_license
|
cyntsc/meta-data-arabidopsis
|
588a7250c2351b2cec98dc5d8a825e84b05147cc
|
6f3f729ae183b9ca9bc1454c84a40244aa19c49d
|
refs/heads/master
| 2023-03-17T07:30:35.525545
| 2021-03-18T01:05:41
| 2021-03-18T01:05:41
| 348,137,458
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 13,456
|
r
|
2_TOM_Signed_10samples_spearman.R
|
##########################################################
###
### Goal: WGCNA Std / Signed Ntw with dynamic cut off (spearman)
###
### Method: TOM calculation: adjacency ( SIGNED NTW / spearman corr)
###
###
### Made by: Cynthia Soto
### Date: January 29, 2021 / Last update: xxxxx
###
### This is a PhD project associated at CICY Mx with the technical support (HPC) of
### Dr.Childs Lab at MSU.
###
### DATA ASSUMPIONS:
### 1) Dataset is composed by 10 samples of A. Thaliana: ** I N F E C T E D **
### 2) Row RNASeq data comes from SRA-NCBI
### Data were cleaned, aligned with STAR and quantified with HTSeq-count Tool
### 4) Data were tested with several quality tests before to include in the expression matrix
### FastQC / HTSeq-qa / Stats: Central-Measurements & descriptive stat
### 5) 100% ceros across all samples were removed
### Reference: http://pklab.med.harvard.edu/scw2014/WGCNA.html
##########################################################
#getwd(); ## setwd('../../data/run/cyntsc');
rm(list = ls());
#install.packages(c("dynamicTreeCut", "cluster", "flashClust", "Hmisc", "reshape", "foreach", "doParallel") )
#source("http://bioconductor.org/biocLite.R")
#biocLite("impute")
library(tidyverse)
## This library contains all what is nedeed to use herarquical clustering
library(dynamicTreeCut)
library(flashClust)
library(WGCNA);
## This library needs lattice, survival and Formula. Let you calculate percentiles and more ...
library(lattice, survival, Formula);
library(Hmisc);
## The here package allows you to set the top level of your project folder as โhereโ and to specify where things live relative to that location
library(here);
here();
here::here(); # Top level dir: /data/run/cyntsc/Project_athal_wgcna
## Allow multi-treads and setting this in the bash environ
allowWGCNAThreads();
ALLOW_WGCNA_THREADS=12;
## Initial variables
options(stringsAsFactors = FALSE);
enableWGCNAThreads();
## Load data
here("files", "data", "all_log2_tidy.csv");
## 20 samples are included
athalData3 <- read.csv(here("data", "all_log2_tidy.csv"), header=TRUE, row.names='Genes', sep='\t')
dim(athalData3);
names(athalData3);
## get means for variables in data frame athalData3, excluding missing values
sapply(athalData3, mean, na.rm=TRUE)
sapply(athalData3, range, na.rm=TRUE)
sapply(athalData3, sd, na.rm=TRUE)
sapply(athalData3, quantile, na.rm=TRUE)
summary(athalData3)
## Samples of interest are filtered: 2 samples are removed due does not match the expected % of read alignment.
athalData3 = subset(athalData3, select = -c(Ss30,Ss30.1)); # 24326 genes (rows) x 19 samples (cols)
athalData3 = subset(athalData3, select =c(9:18));
dim(athalData3); ## 10 samples are keept after applying these filters.
class(athalData3); #Check data object type: is a data.frame
stats_infected = describe(athalData3) ### It is a generalization of SAS UNIVARIATE
stats_infected;
## WGCNA requires genes be given in the columns ***********************************************
## double check evaluation to avoid the error caused by empty correlation or NANs
## this code build a matrix with all genes with ceros
athalData3 == 0
rowSums(athalData3 == 0) #sum the number of ceros per genes
rowSums(athalData3 == 0) >=10
sum(rowSums(athalData3 == 0) >=10)
athal_withceros=athalData3[rowSums(athalData3 == 0) >=10, ] #delete rows (genes) that sum cero
## this code preserves a matrix with certain number of genes with ceros
rowSums(athalData3 == 0) < 4
sum(rowSums(athalData3 == 0) < 4)
athalData3=athalData3[rowSums(athalData3 == 0) < 4, ] #delete rows (genes) that sum cero
dim(athalData3)
sum(rowSums(athalData3 == 0) >= 4)
## Pull the names and transpose data
gene.names=rownames(athalData3)
#gene.names
athalData3=as.data.frame(t(athalData3))
datExpr=athalData3
any(is.na(datExpr))
#####################################################################################################################
##
## https://rdrr.io/cran/WGCNA/man/pickSoftThreshold.html
## Choosing a soft-threshold to fit a scale-free topology to the network
## pickSoftThreshold function offers a analysis of scale free topology for soft-thresholding
## signed ntw preserve the natural continuity of the correlation (+ / -), contrary to whats an unsigned ntw does
## Argument type determines whether a correlation (type one of "unsigned", "signed", "signed hybrid"), or a distance network (type equal "distance") will be calculated
## In correlation networks the adajcency is constructed from correlations (values between -1 and 1, with high numbers meaning high similarity). In distance networks, the adjacency is constructed from distances (non-negative values, high values mean low similarity).
##
## For similarity (corr) methods available are: "spearman, pearson and kendall"
## For distance (dist) methods available are: "euclidean", "maximum", "manhattan", "canberra", "binary" or "minkowski"
#####################################################################################################################
powers = c(c(1:10), seq(from = 12, to=20, by=2));
sft=pickSoftThreshold(
datExpr,
dataIsExpr = TRUE,
#RsquaredCut = 0.85, # desired minimum scale free topology fitting index R^2.
powerVector = powers,
corFnc = cor, # cor: Fast calculations of Pearson correlation
corOptions = list(use = 'p', method = 'spearman'), # Almost all lists in R internally are Generic Vectors, whereas traditional dotted pair lists (as in LISP) remain available but rarely seen by users (except as formals of functions).
networkType = "signed"); # "unsigned", "signed", "signed hybrid", "distance"
#warnings()
#sft
##Plot the results *************************************************************************************************
sizeGrWindow(9, 7)
par(mfrow = c(1,2));
cex1 = 0.9;
# Scale-free topology fit index as a function of the soft-thresholding power
plot(sft$fitIndices[,1], -sign(sft$fitIndices[,3])*sft$fitIndices[,2],xlab="Soft Threshold (power)",ylab="Scale Free Topology Model Fit, signed R^2 (pearson)",type="n", main = paste("Scale independence. 10 samples."));
text(sft$fitIndices[,1], -sign(sft$fitIndices[,3])*sft$fitIndices[,2],labels=powers,cex=cex1,col="red");
# Red line corresponds to using an R^2 cut-off
abline(h= 0.5150,col="red")
#abline(h=-0.90,col="blue")
# Mean connectivity as a function of the soft-thresholding power
plot(sft$fitIndices[,1], sft$fitIndices[,5],xlab="Soft Threshold (power)",ylab="Mean Connectivity", type="n",main = paste("Mean connectivity"))
text(sft$fitIndices[,1], sft$fitIndices[,5], labels=powers, cex=cex1,col="red")
abline(h=1460,col="blue");
softPower = 14;
###########################################################################
#
# Generating adjacency and TOM similarity matrices based on the selected softpower
#
###########################################################################
#calclute the adjacency matrix
adj= adjacency(datExpr,type = "signed", power = softPower);
dim(adj)
adj[1:100]
#CSC: some validation to avoid the error caused by NANs. Be careful to check your data before to continue
any(is.na(adj)) #check for any NANs
sum(is.na(adj)) #sum the number of NANs
adj[1:100]
any(is.infinite(adj))
any(is.null(adj))
# na.omit(datExpr) helps in omitting NA
#turn adjacency matrix into topological overlap to minimize the effects of noise and spurious associations
#TOM=TOMsimilarityFromExpr(na.omit(datExpr),networkType = "signed", TOMType = "signed", power = softPower);
TOM=TOMsimilarityFromExpr(datExpr,
networkType = "signed",
TOMType = "signed",
power = softPower);
any(is.na(TOM)) #CSC
TOM[1:100]
dim(TOM)
##Pull genes names to the TOM matrix
SubGeneNames=gene.names
colnames(TOM)=rownames(TOM)=SubGeneNames;
dissTOM=1-TOM
###########################################################################
#
# Module detection
#
###########################################################################
#hierarchical clustering of the genes based on the TOM dissimilarity measure **** ERROR **** NA/NaN/Inf in foreign function call (arg 11)
geneTree = flashClust(as.dist(dissTOM),method="average");
# Plot the results
sizeGrWindow(9, 12)
#plot the resulting clustering tree (dendrogram)
plot(geneTree, xlab="Gene clusters", sub="",cex=0.3)
# Set the minimum module size
minModuleSize = 20;
## Module identification using dynamic tree cut *******************************
## Function for pruning of Hierarchical Clustering Dendrograms
dynamicMods = cutreeDynamic(dendro = geneTree,
method="tree",
minClusterSize = minModuleSize);
#dynamicMods = cutreeDynamic(dendro = geneTree,
# distM = dissTOM,
# method="hybrid",
# deepSplit = 2,
# pamRespectsDendro = TRUE, #the PAM stage will respect the dendrogram in the sense that objects and small clusters will only be assigned to clusters that belong to the same branch that the objects or small clusters being
# minClusterSize = minModuleSize,
# verbose=1);
## when cutHeight not given, for method=="tree" it defaults to 0.99, for method=="hybrid" it defaults to 99% of the range between the 5th percentile and the maximum of the joining heights on the dendrogram
##gives the module labels and the size of each module. Lable 0 is reserved for unassigned genes
sort(table(dynamicMods), decreasing = TRUE)
##Plot the module assignment under the dendrogram; note: The grey color is reserved for unassigned genes
dynamicColors = labels2colors(dynamicMods)
sort(table(dynamicColors), decreasing = TRUE)
plotDendroAndColors(geneTree,
dynamicColors, "Dynamic Tree Cut",
dendroLabels = FALSE,
hang = 0.03,
addGuide = TRUE,
guideHang = 0.05,
main = "Gene dendrogram and module colors")
##discard the unassigned genes, and focus on the rest **********************************
restGenes= (dynamicColors != "grey") #restGenes= (dynamicColors == "mediumpurple2")
length(restGenes)
###########################################################################
#
##Calculation of the topological overlap matrix
#
###########################################################################
diss1=1-TOMsimilarityFromExpr(datExpr[,restGenes],
# corType = "bicor", #"pearson" and "bicor"
# networkType = "signed",
power = softPower)
colnames(diss1) =rownames(diss1) =SubGeneNames[restGenes]
hier1=flashClust(as.dist(diss1), method="average" ) #flashClust is the same that hclust but faster
plotDendroAndColors(hier1,
dynamicColors[restGenes], "Dynamic Tree Cut",
dendroLabels = FALSE,
hang = 0.03,
addGuide = TRUE,
guideHang = 0.05,
main = "Gene dendrogram and module colors (cor.type=bicor)")
##consult the module's color codes
standardColors(); #with NULL all (approx. 450) colors will be returned
##set the DIAGONAL of the dissimilarity to NA ******************************
diag(diss1) = NA;
##Visualize the TOM plot. Raise the dissimilarity matrix to the power of 4 to bring out the module structure
sizeGrWindow(7,7)
TOMplot(diss1,
hier1,
as.character(dynamicColors[restGenes]));
###########################################################################
#
## Extract modules
#
###########################################################################
module_colors= setdiff(unique(dynamicColors), "grey")
#module_colors= (unique(dynamicColors), "mediumpurple2")
##module_colors
for (color in module_colors){
module=SubGeneNames[which(dynamicColors==color)]
write.table(module, paste("module_",color, ".txt",sep=""), sep="\t", row.names=FALSE, col.names=FALSE,quote=FALSE)
}
#module
module.order <- unlist(tapply(1:ncol(datExpr),as.factor(dynamicColors),I))
m<-t(t(datExpr[,module.order])/apply(datExpr[,module.order],2,max))
heatmap(t(m),
zlim=c(0,1),
col=gray.colors(100),
Rowv=NA,
Colv=NA,
labRow=NA,
scale="none",
RowSideColors=dynamicColors[module.order])
#We can now look at the module gene listings and try to interpret their functions .. for instance using http://amigo.geneontology.org/rte
###########################################################################
#
# Quantify module similarity by eigengene correlation.
# Eigengenes: Module representatives
#
###########################################################################
MEList = moduleEigengenes(datExpr, colors = dynamicColors)
MEs = MEList$eigengenes
plotEigengeneNetworks(MEs, "", marDendro = c(0,4,1,2), marHeatmap = c(3,4,1,2))
|
7edb21c3333332df9e2cf6fe9061c169999e49b0
|
12533f4f5685c6a5b75ab91b918bd0677a332395
|
/data_migration.R
|
e5ac81a821b8c32706baa4982ff41fb16b9b62b3
|
[] |
no_license
|
Starryz/VW-Summer-Internship
|
7b3f9786f9ac80ae48006883aebb61b16eefbeb9
|
d093b9bf5aa2588727441a5515431e274c5a46d0
|
refs/heads/master
| 2020-06-24T04:48:37.540158
| 2019-08-08T21:07:44
| 2019-08-08T21:07:44
| 198,852,841
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 12,237
|
r
|
data_migration.R
|
library(readxl)
library(tidyverse)
library(stringi)
library(readr)
# help match --------------
## for commit
extract_c <- read_csv("/Volumes/GoogleDrive/My Drive/Sustainable_Vision/salesforce_examples/Organization_extract.csv") %>%
rename("GRANTED_INSTITUTION__C" = "NAME") %>%
select(-ORGANIZATION_ALIAS_NAME__C)
extract_alias_c <- read_csv("/Volumes/GoogleDrive/My Drive/Sustainable_Vision/salesforce_examples/Organization_extract.csv") %>%
rename("GRANTED_INSTITUTION__C" = "ORGANIZATION_ALIAS_NAME__C") %>%
select(-NAME)
## for proposal
extract_p <- read_csv("/Volumes/GoogleDrive/My Drive/Sustainable_Vision/salesforce_examples/Organization_extract.csv") %>%
rename("APPLYING_INSTITUTION_NAME__C" = "NAME") %>%
select(-ORGANIZATION_ALIAS_NAME__C)
extract_alias_p <- read_csv("/Volumes/GoogleDrive/My Drive/Sustainable_Vision/salesforce_examples/Organization_extract.csv") %>%
rename("APPLYING_INSTITUTION_NAME__C" = "ORGANIZATION_ALIAS_NAME__C") %>%
select(-NAME)
## match Zenn ID and team name
match <- read_excel("~/Desktop/Sustainable_Vision/sustainable_vision_grants_2013_proposals.xlsx",
col_types = c("numeric", "text", "text",
"text", "text", "numeric", "text",
"text", "text", "text", "text", "text",
"text", "text", "numeric", "text",
"numeric", "text", "text", "text",
"numeric", "text", "text", "text",
"numeric", "text", "text", "numeric",
"text", "text", "numeric", "text",
"text", "text", "text", "text")) %>%
select(`Zenn ID`, `Grant Title`, `Institution Name`)
match_c <- match %>%
rename("GRANTED_INSTITUTION__C" = "Institution Name") %>%
select(-`Grant Title`)
match_p <- match %>%
rename("NAME" = "Grant Title") %>%
select(-`Institution Name`)
# commits ------------------
commits_2013 <- read_excel("~/Desktop/Sustainable_Vision/sustainable_vision_grants_2013_proposals.xlsx") %>%
filter(`Application Status` == "funded") %>%
rename(
"GRANT_ID__C" = "External Proposal ID",
"GRANTED_INSTITUTION__C" = "Institution Name",
"AMOUNT_DISBURSED__C" = "Amount Disbursed",
"PROGRAM__C" = "Type"
) %>%
mutate(
"DISBURSEMENT_REQUEST_AMOUNT__C" = as.double(AMOUNT_DISBURSED__C),
"GRANT_STATUS__C" = stri_trans_totitle(`Application Status`),
"AWARD_LETTER_SENT__C" = as.Date(`Grant Letter Sent`),
"AWARD_LETTER_SIGNED__C" = as.Date(`Grant Letter Signed`),
"GRANT_START_DATE__C" = as.Date(`Actual Period Begin`),
"GRANT_END_DATE__C" = as.Date(`Actual Period End`),
"PAYMENT_STATUS__C" = "Paid",
"AMOUNT_APPROVED__C" = as.double(`Amount Approved`)
) %>%
select(
AMOUNT_APPROVED__C, `GRANT_ID__C`, `AMOUNT_APPROVED__C`, `AWARD_LETTER_SENT__C`, `AWARD_LETTER_SIGNED__C`,
`PAYMENT_STATUS__C`, `GRANT_START_DATE__C`, `PROGRAM__C`, `GRANT_END_DATE__C`,
`GRANT_STATUS__C`, `GRANTED_INSTITUTION__C`, `DISBURSEMENT_REQUEST_AMOUNT__C`
) %>%
left_join(extract_c) %>%
left_join(extract_alias_c, by = "GRANTED_INSTITUTION__C") %>%
mutate(ID = coalesce(ID.x, ID.y)) %>%
select(-ID.x, -ID.y) %>%
left_join(match_c) %>%
write_csv("new/commits_2013.csv")
commits_2012 <- read_excel("~/Desktop/Sustainable_Vision/sustainable_vision_grants_2012_proposals.xlsx") %>%
filter(`Application Status` == "funded") %>%
rename(
"GRANT_ID__C" = "External Proposal ID",
"GRANTED_INSTITUTION__C" = "Institution Name",
"AMOUNT_DISBURSED__C" = "Amount Disbursed",
"PROGRAM__C" = "Type"
) %>%
mutate(
"GRANT_STATUS__C" = stri_trans_totitle(`Application Status`),
"DISBURSEMENT_REQUEST_AMOUNT__C" = as.double(AMOUNT_DISBURSED__C),
"AWARD_LETTER_SENT__C" = as.Date(`Grant Letter Sent`),
"AWARD_LETTER_SIGNED__C" = as.Date(`Grant Letter Signed`),
"GRANT_START_DATE__C" = as.Date(`Actual Period Begin`),
"GRANT_END_DATE__C" = as.Date(`Actual Period End`),
"PAYMENT_STATUS__C" = "Paid",
"AMOUNT_APPROVED__C" = as.double(`Amount Approved`)
) %>%
select(
`GRANT_ID__C`, `AMOUNT_APPROVED__C`, `AWARD_LETTER_SENT__C`, `AWARD_LETTER_SIGNED__C`,
`PAYMENT_STATUS__C`, `GRANT_START_DATE__C`, `PROGRAM__C`, `GRANT_END_DATE__C`,
`GRANT_STATUS__C`, `GRANTED_INSTITUTION__C`, `DISBURSEMENT_REQUEST_AMOUNT__C`
) %>%
write_csv("new/commits_2012.csv")
# proposal -------------------------------
proposal_2013 <- read_excel("~/Desktop/Sustainable_Vision/sustainable_vision_grants_2013_proposals.xlsx") %>%
rename(
"NAME" = "Grant Title",
#"APPLYING_INSTITUTION_NAME__C" = "Institution Name",
"PROGRAM_COHORT_RECORD_TYPE__C" = "Type",
"PROJECT_DESCRIPTION_PROPOSAL_ABSTRACT__C" = "Proposal Summary",
"EXTERNAL_PROPOSAL_ID__C" = "External Proposal ID"
) %>%
mutate(
"RECORDTYPEID" = "01239000000Ap02AAC",
"STATUS__C" = stri_trans_totitle(`Application Status`),
"PROPOSAL_NAME_LONG_VERSION__C" = as.character(NAME),
"DATE_CREATED__C" = as.Date(`Date Created`),
"DATE_SUBMITTED__C" = as.Date(`Date Application Submitted`),
"GRANT_PERIOD_END__C" = as.Date(`Actual Period End`),
"GRANT_PERIOD_START__C" = as.Date(`Actual Period Begin`),
"AMOUNT_REQUESTED__C" = as.double(`Amount Requested`),
"ZENN_ID__C" = as.double(`Zenn ID`),
"AWARD_AMOUNT__C" = as.double(`Amount Approved`),
"PROGRAM_COHORT__C" = "a2C39000002zYt4EAE",
"PROPOSAL_FUNDER__C" = "The Lemelson Foundation",
"APPLYING_INSTITUTION_NAME__C" = ifelse(`Institution Name` == "University of Tennessee, Knoxville", "The University of Tennessee",
ifelse(`Institution Name` == "Cogswell Polytechnical College", "Cogswell College",
ifelse(`Institution Name` == "Arizona State University at the Tempe Campus", "Arizona State University", `Institution Name`)))
) %>%
select(
NAME, RECORDTYPEID, AMOUNT_REQUESTED__C, PROPOSAL_NAME_LONG_VERSION__C, APPLYING_INSTITUTION_NAME__C,
AWARD_AMOUNT__C, DATE_CREATED__C, DATE_SUBMITTED__C, GRANT_PERIOD_END__C,
GRANT_PERIOD_START__C, PROGRAM_COHORT_RECORD_TYPE__C,
PROJECT_DESCRIPTION_PROPOSAL_ABSTRACT__C, ZENN_ID__C, STATUS__C,
EXTERNAL_PROPOSAL_ID__C, PROGRAM_COHORT__C
) %>%
left_join(extract_p) %>%
left_join(extract_alias_p, by = "APPLYING_INSTITUTION_NAME__C") %>%
mutate(ID = coalesce(ID.x, ID.y)) %>%
select(-ID.x, -ID.y) %>%
rename("APPLYING_INSTITUTION__C" = "ID") %>%
left_join(match_p) %>%
select( - `Zenn ID`)
proposal_2013$PROJECT_DESCRIPTION_PROPOSAL_ABSTRACT__C <- str_replace_all(proposal_2013$PROJECT_DESCRIPTION_PROPOSAL_ABSTRACT__C, "[:cntrl:]", " ")
proposal_2013 %>% write_csv("new/proposal_2013.csv")
str_remove_all("\u0093systems\u0094", "[[\\[u]+[0-9]*]]")
proposal_2012 <- read_excel("~/Desktop/Sustainable_Vision/sustainable_vision_grants_2012_proposals.xlsx") %>%
rename(
"NAME" = "Grant Title",
"APPLYING_INSTITUTION_NAME__C" = "Institution Name",
"PROGRAM_COHORT_RECORD_TYPE__C" = "Type",
"PROJECT_DESCRIPTION_PROPOSAL_ABSTRACT__C" = "Proposal Summary",
"EXTERNAL_PROPOSAL_ID__C" = "External Proposal ID"
) %>%
mutate(
"STATUS__C" = stri_trans_totitle(`Application Status`),
"PROPOSAL_NAME_LONG_VERSION__C" = as.character(NAME),
"DATE_CREATED__C" = as.Date(`Date Created`),
"DATE_SUBMITTED__C" = as.Date(`Date Application Submitted`),
"GRANT_PERIOD_END__C" = as.Date(`Actual Period End`),
"GRANT_PERIOD_START__C" = as.Date(`Actual Period Begin`),
"PROGRAM__C" = as.character(PROGRAM_COHORT_RECORD_TYPE__C),
"AMOUNT_REQUESTED__C" = as.double(`Amount Requested`),
"ZENN_ID__C" = as.double(`Zenn ID`),
"AWARD_AMOUNT__C" = as.double(`Amount Approved`),
"PROGRAM_COHORT__C" = "a2C39000002zYtNEAU"
) %>%
select(
NAME, AMOUNT_REQUESTED__C, PROPOSAL_NAME_LONG_VERSION__C, APPLYING_INSTITUTION_NAME__C,
AWARD_AMOUNT__C, DATE_CREATED__C, DATE_SUBMITTED__C, GRANT_PERIOD_END__C,
GRANT_PERIOD_START__C, PROGRAM_COHORT_RECORD_TYPE__C,
PROJECT_DESCRIPTION_PROPOSAL_ABSTRACT__C, ZENN_ID__C, STATUS__C, PROGRAM__C,
EXTERNAL_PROPOSAL_ID__C, PROGRAM_COHORT__C
) %>%
write_csv("new/proposal_2012.csv")
## needs to change proposal summary
# team --------------------------
team_2013 <- read_excel("~/Desktop/Sustainable_Vision/sustainable_vision_grants_2013_proposals.xlsx") %>%
rename(
"NAME" = "Grant Title"
) %>%
mutate(
"RECORDTYPEID" = "a2639000000E4XIAA0",
"ALIAS__C" = ifelse(nchar(NAME) > 80, NAME, "")
) %>%
select(
NAME, RECORDTYPEID, ALIAS__C
) %>%
left_join(match_p) %>%
write_csv("new/team_2013.csv")
team_2012 <- read_excel("~/Desktop/Sustainable_Vision/sustainable_vision_grants_2012_proposals.xlsx") %>%
rename(
"NAME" = "Grant Title"
) %>%
mutate(
"END_DATE_OF_FIRST_PROGRAM_COMPLETED__C" = as.Date(`Actual Period End`),
"PROPOSAL_TOTAL_FUNDED_AWARD_AMOUNT__C" = as.double(`Amount Approved`)
) %>%
select(
NAME, PROPOSAL_TOTAL_FUNDED_AWARD_AMOUNT__C, END_DATE_OF_FIRST_PROGRAM_COMPLETED__C
) %>%
left_join(match_p) %>%
write_csv("new/team_2012.csv")
# membership -----------------------------
membership_2013_1a <- read_excel("~/Desktop/Sustainable_Vision/sustainable_vision_grants_2013_proposals.xlsx") %>%
mutate(
"START_DATE__C" = as.Date(`Actual Period Begin`),
"END_DATE__C" = as.Date(`Actual Period End`)
) %>%
rename(
"TEAM_NAME_TEXT_ONLY_HIDDEN__C" = "Grant Title",
"PROPOSAL_STATUS__C" ="Application Status"
) %>%
select(
START_DATE__C, END_DATE__C, TEAM_NAME_TEXT_ONLY_HIDDEN__C, PROPOSAL_STATUS__C
) %>%
membership_2013_1b <- proposal_2013 %>%
select(NAME, ZENN_ID__C) %>%
rename(
"TEAM_NAME_TEXT_ONLY_HIDDEN__C" = "NAME"
)
membership_2013_2 <- merge(membership_2013_1a, membership_2013_1b) # add ZENN ID
advisors <- read_excel("~/Desktop/Sustainable_Vision/sustainable_vision_grants_2013_advisors.xlsx") %>%
rename(
"ZENN_ID__C" = "Zenn ID"
)
membership_2013 <- merge(membership_2013_2, advisors) %>%
mutate(
"START_DATE__C" = as.Date(`START_DATE__C`),
"END_DATE__C" = as.Date(`END_DATE__C`),
"PROGRAM_TYPE_FORMULA__C" = "Sustainable Vision"
) %>%
unite(
"FULL_NAME__C", c(`First Name`, `Last Name`), sep = " ", remove = FALSE
) %>%
rename(
"ROLE__C" = "Team Role",
"EMAIL_FORMULA__C" = Email,
"PHONE_FORMULA__C" = `Telephone 1`,
"FIRST_NAME__C" = `First Name`,
"LAST_NAME__C" = `Last Name`,
"ORGANIZATION__C" = Organization
) %>%
select(
ROLE__C, START_DATE__C, END_DATE__C, FULL_NAME__C, EMAIL_FORMULA__C,
PHONE_FORMULA__C, PROGRAM_TYPE_FORMULA__C, FIRST_NAME__C, ORGANIZATION__C,
PROPOSAL_STATUS__C, LAST_NAME__C, TEAM_NAME_TEXT_ONLY_HIDDEN__C
) %>%
write_csv("new/member_2013.csv")
## note: status needs capitalization - stri_trans_totitle()
# task -------------------------------------------------
task_2013 <- read_excel("/Volumes/GoogleDrive/My Drive/Sustainable_Vision/sustainable_vision_grants_2013_post_award_notes.xlsx",
col_types = c("numeric", "text", "text",
"text")) %>%
left_join(match) %>%
rename("WHATID" = "Zenn ID",
"DESCRIPTION" = "Note") %>%
mutate(STATUS = "Completed",
PRIORITY = "Normal",
TYPE = "Internal Note",
TASKSUBTYPE = "Call",
ACTIVITYDATE = as.Date(`Created Date`),
OWNER = ifelse(`Created by` == "Brenna Breeding", "00539000005UlQaAAK",
ifelse(`Created by` == "Michael Norton", "00539000004pukIAAQ",
ifelse(`Created by` == "Patricia Boynton", "00570000001K3bpAAC",
ifelse(`Created by` == "Rachel Agoglia", "00570000003QASWAA4",
"00570000004VlXPAA0"))
)
)
) %>%
select(
WHATID, ACTIVITYDATE, `Created by`, DESCRIPTION, TYPE, STATUS, PRIORITY, OWNER
)
# commit
write_csv("new/task_2013.csv")
|
c9908d534cb36e72bd1e7e3840f6f71cb8d905ec
|
ff11015a34c325891bac121e9b7432c119b33888
|
/R/RedshiftSQL.r
|
08a3d5ac02a3ac73f8e2e7aa0ed24df661cd3c90
|
[] |
no_license
|
mtreadwell/RRedshiftSQL
|
6c52b3e360fc3c6aab9775ac30c13c20723f103b
|
53f422f3d27b0bebbf2dd1c42d67991181da6f01
|
refs/heads/master
| 2021-01-10T23:20:41.145135
| 2016-10-11T13:27:52
| 2016-10-11T13:27:52
| 70,595,593
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,253
|
r
|
RedshiftSQL.r
|
#' @import DBI
#' @import RPostgreSQL
NULL
.PostgreSQLPkgName <- "RPostgreSQL"
setClass('RedshiftSQLDriver', contains = getClassDef('PostgreSQLDriver', package = 'RPostgreSQL'))
setAs('PostgreSQLDriver', 'RedshiftSQLDriver',
def = function(from) methods::new('RedshiftSQLDriver', Id = methods::as(from, 'integer')))
#' Instantiate a Redshift client
#'
#' This function creates and initializes a PostgreSQL client with class
#' RedshiftSQLDriver which is simply a superclass of PostgreSQLDriver
#'
#' @export
#' @examples
#' \dontrun{
#' con <- dbConnect(RedshiftSQL(), user="u", password = "p", host="h", dbname="n", port = "5439")
#' query <- dbSendQuery(con, "SELECT * FROM table")
#' }
RedshiftSQL <- function() {
pg <- RPostgreSQL::PostgreSQL()
pg <- methods::as(pg, 'RedshiftSQLDriver')
return(pg)
}
setClass('RedshiftSQLConnection', contains = getClassDef('PostgreSQLConnection', package = 'RPostgreSQL'))
setAs('PostgreSQLConnection', 'RedshiftSQLConnection',
def = function(from) methods::new('RedshiftSQLConnection'))
setMethod("dbConnect", "RedshiftSQLDriver",
def = function(drv, ...) redshiftsqlNewConnection(drv, ...),
valueClass = "RedshiftSQLConnection"
)
|
93c200e9b2913bce62f82faebc9b28088ea9fea5
|
1fc6cdf2b36678fa0096015640ab9e6f14d7aefc
|
/R/cpgBoxplots.R
|
ad2679bc61debc6692272fdfd1ba945d77d7fc48
|
[] |
no_license
|
clark-lab-robot/Repitools_bioc
|
e36b4a9912f8fe3c34ab592a02069afe860a6afa
|
b838a8fd34b2ecc41dd86276bd470bfdae53d544
|
refs/heads/master
| 2021-01-01T18:37:22.034108
| 2014-04-15T01:49:48
| 2014-04-15T01:49:48
| 2,335,128
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,367
|
r
|
cpgBoxplots.R
|
setGeneric("cpgBoxplots", function(this, ...){standardGeneric("cpgBoxplots")})
.cpgBoxplots <- function(dm, bins, gcContent, nBins, calcDiff, pdfFile, mfrow, col, ylim, gcCount, cb, sampleNames)
{
if(calcDiff){
title1 <- paste( col, paste(sampleNames,collapse="-"), sep="=" )
}else{
title1 <- paste( paste(col,sampleNames,sep="="), collapse="," )
}
if( !is.null(pdfFile) ) {
pdf(pdfFile,width=10,height=10)
par(mfrow=mfrow)
}
actualNBins <- length( levels(bins) )
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Loop through G+C contents and
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
bp <- NULL
count <- 0
for(j in gcContent) {
w <- which( gcCount==j)
if( length(w)==0 )
next
title2 <- paste("[Probe G+C =",j,"]","Percentage of Probes:",round(length(w)/length(cb)*100,2))
main <- paste(title1,title2,sep="\n")
if (calcDiff) {
count <- count+1
bp[[count]] <- boxplot( dm[w,1]-dm[w,2] ~ bins[w], xlim=c(0,actualNBins),ylim=ylim,col=col,main=main,las=2,cex.axis=.8,cex.main=.9)
names(bp) <- paste(colnames(dm), collapse="-")
} else {
count <- count+1
bp[[count]] <- boxplot( dm[w,1] ~ bins[w], at=(1:actualNBins)/2, boxwex=.4,xlim=c(0,actualNBins),ylim=ylim,col=col[1],main=main,las=2,cex.axis=.8,cex.main=.9)
count <- count+1
bp[[count]] <- boxplot( dm[w,2] ~ bins[w], at=((actualNBins+1):(actualNBins*2))/2,boxwex=.4, main="",col=col[2],add=TRUE,las=2,cex.axis=.8)
names(bp) <- colnames(dm)
}
}
if( !is.null(pdfFile) )
dev.off()
invisible(bp)
}
.createBins <- function(u, nBins) {
q<-quantile(u,prob=(0:nBins)/nBins)
q[1] <- q[1]-.000000001
n <- length(q)
q[n] <- q[n]+.000000001
cut(u,breaks=q)
}
setMethod("cpgBoxplots", "AffymetrixCelSet", function(this, samples=c(1,2), subsetChrs="chr[1-5]", gcContent=7:18,
calcDiff=FALSE, verbose=FALSE, nBins=40, pdfFile=NULL,
ylim=if (calcDiff) c(-5,6) else c(4,15),
col=if (calcDiff) "salmon" else c("lightgreen","lightblue"),
mfrow=if (!is.null(pdfFile)) c(2,2) else c(1,1)) {
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Validate arguments
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Argument 'verbose':
verbose <- Arguments$getVerbose(verbose);
if (verbose) {
pushState(verbose);
on.exit(popState(verbose));
}
if( length(samples) != 2 )
stop("Can only do boxplots on a pair of samples.")
if(calcDiff && length(col) != 1)
stop("calcDiff=TRUE, but length(col) != 1.")
if(!calcDiff && length(col) != 2)
stop("calcDiff=FALSE, but length(col) != 2.")
if( max(samples) > nbrOfArrays(this) )
stop("'samples' is out of range.")
cdf <- getCdf(this)
mainCdf <- getMainCdf(cdf)
if (is.null(subsetChrs))
units <- seq_len(nbrOfUnits(cdf))
else
units <- indexOf(cdf,subsetChrs)
if( length(units) == 0 )
stop("'units' is length 0. Specify an appropriate 'subsetChrs' argument.")
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Read indices
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
verbose && enter(verbose, sprintf("Reading indices for %d units (unique CDF) ",length(units)));
indices <- getCellIndices(cdf,units=units,stratifyBy="pm",verbose=verbose)
indices <- unlist(indices,use.names=FALSE)
verbose && exit(verbose);
verbose && enter(verbose, sprintf("Reading indices for %d units (main CDF) ",length(units)));
mainIndices <- getCellIndices(mainCdf,units=units,stratifyBy="pm",verbose=verbose)
mainIndices <- unlist(mainIndices,use.names=FALSE)
verbose && exit(verbose);
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Counting bases
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
verbose && enter(verbose, sprintf("Counting bases for %d probes",length(mainIndices)));
acs <- AromaCellSequenceFile$byChipType(getChipType(mainCdf))
cb <- countBases(acs,cells=mainIndices)
gcCount <- rowSums( cb[,c("C","G")] )
verbose && exit(verbose);
cs <- extract(this,samples)
sampleNames <- getNames(cs)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Reading data
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
verbose && enter(verbose, "Reading intensity data");
dm <- extractMatrix(cs,cells=indices,verbose=verbose)
dm <- log2(dm)
verbose && exit(verbose);
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Reading CpG density data
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
verbose && enter(verbose, "Reading and binning Cpg data");
acc <- AromaCellCpgFile$byChipType(getChipType(cdf))
cpgDens <- acc[indices,1,drop=TRUE]
bins <- .createBins(cpgDens, nBins)
verbose && exit(verbose);
.cpgBoxplots(dm, bins, gcContent, nBins, calcDiff, pdfFile, mfrow, col, ylim, gcCount, cb, sampleNames)
}
)
setMethod("cpgBoxplots", "matrix", function(this, ndfTable = NULL, organism, samples=c(1,2), subsetChrs="chr[1-5]", gcContent=7:18,
calcDiff=FALSE, verbose=FALSE, nBins=40, pdfFile=NULL,
ylim=if (calcDiff) c(-5,6) else c(4,15),
col=if (calcDiff) "salmon" else c("lightgreen","lightblue"),
mfrow=if (!is.null(pdfFile)) c(2,2) else c(1,1)) {
if(is.null(ndfTable))
stop("Probe positions not given.")
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Validate arguments
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Argument 'verbose':
verbose <- Arguments$getVerbose(verbose);
if (verbose) {
pushState(verbose);
on.exit(popState(verbose));
}
if( length(samples) != 2 )
stop("Can only do boxplots on a pair of samples.")
if(calcDiff && length(col) != 1)
stop("calcDiff=TRUE, but length(col) != 1.")
if(!calcDiff && length(col) != 2)
stop("calcDiff=FALSE, but length(col) != 2.")
if( max(samples) > ncol(this) )
stop("'samples' is out of range.")
if (is.null(subsetChrs))
usefulProbeIndices <- 1:nrow(ndfTable)
else
usefulProbeIndices <- grep(subsetChrs, ndfTable$chr)
if( length(usefulProbeIndices) == 0 )
stop("'units' is length 0. Specify an appropriate 'subsetChrs' argument.")
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Counting bases
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
verbose && enter(verbose, sprintf("Counting bases for %d probes",length(usefulProbeIndices)))
ndfTable <- ndfTable[usefulProbeIndices, ]
gcCount <- sapply(gregexpr("[CG]", ndfTable$sequence), length)
cb <- sapply(ndfTable$sequence, length)
verbose && exit(verbose)
densities <- cpgDensityCalc(ndfTable, 300, organism = organism)
bins <- .createBins(densities, nBins)
sampleNames <- colnames(this)[samples]
.cpgBoxplots(this[usefulProbeIndices, samples], bins, gcContent, nBins, calcDiff, pdfFile, mfrow, col, ylim, gcCount, cb, sampleNames)
}
)
|
eb77ad35a53a8442a465061f00b49916d001f06a
|
d204c97fae0f1a3b5a907e59a8b24c649f9c05c0
|
/8_anova_and_glm.R
|
bf7eb336ea7bc05067b9137194a41d20e1d95b4b
|
[
"CC0-1.0",
"LicenseRef-scancode-public-domain"
] |
permissive
|
brasilbrasil/IUCN_threat_analysis
|
cafdeefc8fda707862efbd695d7e954b68f03949
|
c783b645a687d9fcb1bfec6a8fe4a38df9807d48
|
refs/heads/master
| 2021-01-18T23:06:54.409887
| 2016-11-03T00:50:34
| 2016-11-03T00:50:34
| 72,684,676
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,451
|
r
|
8_anova_and_glm.R
|
rm(list = ls()) #remove all past worksheet variables
library(reshape2)
library(ggplot2)
##run this code after merging all IUCN data
sink.reset <- function(){
for(i in seq_len(sink.number())){
sink(NULL)
}
}
#wd="C:/Users/Kaipo Dye/Dropbox/PICCC/Kaipo vulnerability and multiple threats/IUCN_test_analysis_results20160621/"
#wd="D:/Dropbox/current work/contracting shared folders/Kaipo vulnerability and multiple threats/IUCN_test_analysis_results20160621/"
wd="D:/Dropbox/current work/IUCN_threat_publication/IUCN_test_analysis_results20160621/"
setwd(wd)
all_data_combined = read.csv(paste0("results/all_data_combined_onlySppWThreatInfo",".csv"), header = T, row.names = NULL, check.names = FALSE)
#all_data_combined_onlySppWThreatInfo have at least 1 threat (or else test will not be of augmentation)
#all_data_combined=all_data_combined[all_data_combined$n_nonCC_threat!=0,] <-- line removed(must have at least 1 threat)
dependents="n_nonCC_threat"
data=all_data_combined
#0ne way anova
independents=c("CC_threat") ##n_nCC_threats ~ CC_threat/Red.List.status/Kingdom
int <- aov(as.formula(paste0(dependents, " ~ ", independents[1])), data=data)
aov_summary=summary(int)
aov_summary
plot(int)
con=file(paste0("results/","n_nonCC_threats_dep_vs_AOV_cc_threat.txt"), open="wt")
sink(con)
cat('these are the results for the 1-way anova test for independent variables ', independents, " and dependent ",dependents,"\n")
int
cat("\n", "\n", '1-way anova sumary below:', "\n")
aov_summary
cat('2-way anova test results done', "\n")
sink.reset()
close(con)
#two way anova (Kingdom/IUCN Status)
independents=c("CC_threat", "Red.List.status")
int2 <- aov(as.formula(paste0(dependents, " ~ ", independents[1],"*", independents[2])), data=data)
aov_summary2=summary(int2)
plot(int2)
con1=file(paste0("results/","n_nonCC_threats_dep_vs_AOV_cc_threat+IUCN_Status_interactive.txt"), open="wt")
sink(con1)
cat('these are the results for the 2-way anova test for independent variables ', independents, " and dependent ",dependents,"\n")
int2
cat("\n", "\n", '2-way anova sumary below:', "\n")
aov_summary2
cat('2-way anova test results done', "\n")
sink.reset()
close(con1)
independents=c("CC_threat", "Kingdom")
int3 <- aov(as.formula(paste0(dependents, " ~ ", independents[1],"*", independents[2])), data=data)
aov_summary3=summary(int3)
plot(int3)
con3=file(paste0("results/","n_nonCC_threats_dep_vs_AOV_cc_threat+Kingdom_interactive.txt"), open="wt")
sink(con3)
cat('these are the results for the 2-way anova test for independent variables ', independents, " and dependent ",dependents,"\n")
int3
cat("\n", "\n", '2-way anova sumary below:', "\n")
aov_summary3
cat('2-way anova test results done', "\n")
sink.reset()
close(con3)
#three way anova (with and without interaction)
independents=c("CC_threat", "Red.List.status", "Kingdom")
aov.with <- aov(as.formula(paste0(dependents, " ~ ", independents[1],"*", independents[2], "*", independents[3])), data=data)
#int <- aov(all_data_combined$n_threats ~ all_data_combined$CC_threat*all_data_combined$status)
summary(aov.with)
plot(aov.with)
#Investigate each interaction (cc_threat,Red.List.Status)
TukeyHSD(aov.with, conf.level=.99)
independents=c("CC_threat", "Red.List.status", "Kingdom")
aov.wout <- aov(as.formula(paste0(dependents, " ~ ", independents[1],"+", independents[2], "+", independents[3])), data=data)
#int <- aov(all_data_combined$n_threats ~ all_data_combined$CC_threat*all_data_combined$status)
summary(aov.wout)
#Chi-square test for interactivity legitimacy
anova(aov.with,aov.wout,test="Chi")
Chi=anova(aov.with,aov.wout,test="Chi")
Chi
Chi_summary=summary(Chi)
plot(Chi)
con5=file(paste0("results/","n_nonCC_threats_AOV_cc_threat_Chi.txt"), open="wt")
sink(con5)
cat('these are the results for the Chi-square interactivity test',"\n")
Chi
cat("\n", "\n", 'Chi-square sumary below:', "\n")
Chi_summary
cat('Chi-square test results done', "\n")
sink.reset()
close(con5)
#1 factor GLM models
#glm and poisson family
#poison since dependent variable is count data
independents=c("CC_threat") ##n_nCC_threats ~ CC_threat/Red.List.status/Kingdom
glmFit=glm(as.formula(paste0(dependents, " ~ ", independents[1])), data=data, family=poisson())
summary(glmFit)
plot(glmFit)
#2 factor GLM models
#glm and poisson family
#poison since dependent variable is count data
independents=c("CC_threat") ## n_nCC_threats ~ CC_threat*Red.List.status/Kingdom
glmFit=glm(as.formula(paste0(dependents, " ~ ", independents[1],"*", independents[2])), data=data, family=poisson())
summary(glmFit)
plot(glmFit)
#3 factor GLM models
#glm and poisson family (with and without interaction)
#poison since dependent variable is count data
independents=c("CC_threat", "Red.List.status", "Kingdom")
glmFitW=glm(as.formula(paste0(dependents, " ~ ", independents[1],"*", independents[2],"*", independents[3])), data=data, family=poisson())
summary(glmFitW)
plot(glmFitW)
#no interactions
glmFitWO=glm(as.formula(paste0(dependents, " ~ ", independents[1]," + ", independents[2]," + ", independents[3])), data=data, family=poisson())
summary(glmFitWO)
plot((-)glmFitWO)
#CHI-square Interactivity test (Independence test)
anova(glmFitW,glmFitWO,test="Chi")
#Multi-FACTORIAL ANOVA
aov.out=aov(n_nonCC_threat ~ CC_threat*Red.List.status*Kingdom,data=all_data_combined)
summary(aov.out)
TukeyHSD(aov.out, conf.level=.99)
library(boot)
glm.diag=glm.diag(glmFitW)
glm.diag.plots(glmFit,glm.diag)
|
c8c7669d8b8d64b03c0ad992130c34673d19ca5c
|
452eec9695d9de8774598261862d670bb10aac6c
|
/HW_4.R
|
2392c23ddbedba712bcdbef44df61ac8c1727188
|
[] |
no_license
|
thangtrinh273/Statistical_Learning_With_R
|
9e635e4e260abf708018d64760208e318d84a1ed
|
2f4b14f2b5276ac2209ef5bd2ccdd11c0c41e297
|
refs/heads/master
| 2022-12-31T04:36:49.018626
| 2020-10-19T10:11:18
| 2020-10-19T10:11:18
| 305,336,298
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,274
|
r
|
HW_4.R
|
install.packages("foreign")
library(foreign) #necessary to be able to read .dta files
#reading the .dta file into R
program <-read.dta("C:/Users/Kitteh/Dropbox/R/hsbdemo (1).dta")
#OR
program <-read.dta("https://stats.idre.ucla.edu/stat/data/hsbdemo.dta")
#sampling the file for tutorial purposes
subsample <- program[50:150, ]
#1
install.packages("RcmdrMisc")
library(RcmdrMisc) #needed for colPercents/rowPercents function
table1 <- table(subsample$ses, subsample$prog)
table1
rowPercents(table1)
colPercents(table1)
#2
mosaicplot(table1, main="Program choice vs. socio-economic status")
subsample$academic <- NA
levels(subsample$academic)<- c("non-academic","academic")
subsample$academic[subsample$prog == "academic"] <- "academic"
subsample$academic[subsample$prog == "general"] <- "non-academic"
subsample$academic[subsample$prog == "vocation"] <- "non-academic"
subsample$academic <- ordered(subsample$academic, levels= c("non-academic","academic"))
table2 <- table(subsample$ses, subsample$academic)
table2
library(vcd) #needed for loddsratio function
loddsratio(table2, log=FALSE) #for odds ratio
loddsratio(table2) #for log odds ratio
#3
chisq.test(table1)
chisq.test(table1)$expected
table1 - chisq.test(table1)$expected
#4
Fsample <- subset(subsample, female=="female")
Msample <- subset(subsample, female=="male")
tableF <- table(Fsample$ses, Fsample$prog)
chisq.test(tableF)
chisq.test(tableF)$expected
tableF - chisq.test(tableF)$expected
tableM <- table(Msample$ses, Msample$prog)
chisq.test(tableM)
chisq.test(tableM)$expected
tableM - chisq.test(tableM)$expected
mosaicplot(tableF)
mosaicplot(tableM)
#5
install.packages("nnet")
library(nnet) #needed for multinom function
model1<- multinom(prog~female+ses+schtyp+read+write+math+science
+honors+awards,data=subsample, trace=FALSE)
summary(model1)
Wald <- summary(model1, cor=FALSE, Wald=TRUE)$Wald.ratios
p <- (1 - pnorm(abs(Wald), 0, 1))*2
p
p < 0.05
#6
library(RcmdrMisc)
model2<-stepwise(model1, "backward", criterion="AIC")
summary(model2)
BIC(model2)
logLik(model2)
#7
#the following dataframe is only an example for a situation in which the final model from Q6 had three predictors: ses, schtyp and math
#always double-check the dataframe to make sure all the rows are unique!
dframe <- data.frame(ses=rep(c("low", "middle", "high"),each = 2),
math = rep(mean(program$math), 6),
schtyp=rep(c("public", "private"), each=1))
predict(model2, newdata=dframe, "probs")
#8
#the following dataframe is only an example for a situation in which the final model from Q6 had three predictors: ses, schtyp and math
#always double-check the dataframe to make sure all the rows are unique!
dframe2 <- data.frame(ses = rep(c("low", "middle", "high"),each=51),
math = rep(c(30:80),6),
schtyp=rep(c("public", "private"),each=51))
dframe3 <-predict(model2, dframe2, "probs")
dframe4 <- cbind(dframe2, dframe3) #we have to calculate means for every level of SES, that is why we bind those two dataframes
by(dframe4[,4:6], dframe4$ses, colMeans)
|
e048196ad2bb28b28025afec43e5dadec182a582
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/sparseHessianFD/examples/sparseHessianFD.Rd.R
|
31ee19a4bd0ea642bc66498bb9c1964b16d24020
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 926
|
r
|
sparseHessianFD.Rd.R
|
library(sparseHessianFD)
### Name: sparseHessianFD
### Title: sparseHessianFD
### Aliases: sparseHessianFD
### ** Examples
## Log posterior density of hierarchical binary choice model. See vignette.
set.seed(123)
data("binary_small")
N <- length(binary[["Y"]])
k <- NROW(binary[["X"]])
T <- binary[["T"]]
P <- rnorm((N+1)*k)
priors <- list(inv.Sigma = rWishart(1,k+5,diag(k))[,,1],
inv.Omega = diag(k))
true.hess <- binary.hess(P, binary, priors)
pattern <- Matrix.to.Coord(Matrix::tril(true.hess))
str(pattern)
obj <- sparseHessianFD(P, fn=binary.f, gr=binary.grad,
rows=pattern[["rows"]], cols=pattern[["cols"]],
data=binary, priors=priors)
hs <- obj$hessian(P)
all.equal(hs, true.hess)
f <- obj$fn(P) ## obj function
df <- obj$gr(P) ## gradient
fdf <- obj$fngr(P) ## list of obj function and gradient
fdfhs <- obj$fngrhs(P) ## list of obj function, gradient and Hessian.
|
0d0fd166137939849bdeed560714cea3264500a1
|
2bb14f19653e09a4b02007ce8780d1602d7d0c20
|
/scripts/find-tail-modifications
|
d37a241877ab26de38f834203476f59e8528bbdf
|
[] |
no_license
|
klmr/poly-u
|
a1c0123b98bbb895bc5c07ff5def0764b92217b8
|
81a2ffad7b973ae7d43970c44563a39318285026
|
refs/heads/master
| 2021-07-13T06:51:54.881479
| 2017-09-26T11:07:20
| 2017-09-26T11:07:20
| 63,482,171
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,776
|
find-tail-modifications
|
#!/usr/bin/env Rscript
sys = modules::import('klmr/sys')
"Extract the first (non-A) tail modification (if any) and its length, and
writes the resulting table to the standard output"
sys$run({
args = sys$cmd$parse(arg('taginfo', 'input taginfo file'))
io = modules::import('ebi-predocs/ebits/io')
modules::import_package('dplyr', attach = TRUE)
taginfo = io$read_table(args$taginfo, header = TRUE, na.strings = '_') %>%
mutate(Source = ifelse(grepl('ORV', Gene, fixed = TRUE), Gene, 'host'))
# The first version of this used `rle`, but was much too slow and memory
# intensive.
modifications = function(seq) {
first_length = function (str) {
if (! nzchar(str))
return(0L)
first_char = substr(str, 1L, 1L)
pos = 2L
while (substr(str, pos, pos) == first_char) {
pos = pos + 1L
}
pos - 1L
}
data_frame(ModLength = vapply(seq, first_length, integer(1)),
Mod = substr(seq, 1L, 1L))
}
# Previously, `N` tail modifications were converted to *no* tail
# modifications but this is inaccurate: `N` marks the presence of an
# unknown tail modification rather than its absence. Confounding these
# values would skew the analysis. We thus now remove these uninterpretable
# values.
tailinfo = taginfo %>%
do(modifications(.$Mod)) %>%
bind_cols(select(taginfo, -Mod), .) %>%
filter(Mod != 'N') %>%
# For the host cell, only consider polyadenylated genes; the rest is due
# to degradataion and/or fragmentation.
filter(Source != 'host' | `pA length` > 0)
io$write_table(tailinfo, stdout(), sep = '\t')
})
# vim: ft=r
|
|
61e883141af33f9fb64dfde5d2675b67fbca9dd2
|
b3921db7e6ac213db389b4f2f5c4cb19e32a3411
|
/WANG2021/wbsip.R
|
21df159b7174c92138711e9e01faacf6c0deace3
|
[] |
no_license
|
12ramsake/MVT-WBS-RankCUSUM
|
cebb8c84aeec47c57d816b3281baca5cfd326a2b
|
e227f96fbf8ac752d78c6f755b71d79647297199
|
refs/heads/master
| 2023-08-09T08:13:50.768287
| 2023-07-19T20:02:00
| 2023-07-19T20:02:00
| 195,120,113
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,291
|
r
|
wbsip.R
|
#returns indices of the intervals
getIntervals<-function(indices,M){
ints<-t(replicate(M,sort(sample(indices,2))))
diffs<-(ints[,2]-ints[,1])==1
if(any(diffs)){
ints[diffs,]=getIntervals(indices,sum(diffs))
return(ints)
}
else{
return(ints)
}
}
#
# checkIfSubInterval<-function(sub,super){
# return(sub[1]>=super[1]&&sub[2]<=super[2])
# }
St=function(t,s,e,data){
# print(paste0("t ",t))
# print(paste0("s ",s))
# print(paste0("e ",e))
if(is.null(dim(data)))
data=matrix(data,ncol=1)
term1=sqrt((e-t)/((e-s)*(t-s)))*(t(data[(s+1):t,])%*%(data[(s+1):t,]))
term2=sqrt((t-s)/((e-t)*(e-s)))*(t(data[(t+1):e,])%*%data[(t+1):e,])
return(term1-term2)
}
PC1=function(Xt,alpha,beta){
p=ncol(Xt)
n=nrow(Xt)
if((beta-alpha)>2*p*log(n)+1){
t_vals=ceiling(alpha+p*log(n)):floor(beta-p*log(n))
dm=t_vals[which.max(sapply(t_vals,function(t){norm(St(t,alpha,beta,Xt),type="2")} ))]
um=eigen(St(dm,alpha,beta,Xt))$vectors[,1]
}
else{um=rep(0,p)}
return(um)
}
PC=function(Wt,intervals){
M=nrow(intervals)
ums=NULL
for(i in 1:M){
ums=rbind(ums,PC1(Wt,intervals[i,1],intervals[i,2]))
}
return(ums)
}
#let the set of intervals be a matrix with 2 columns
WBSIP<-function(data,s,e,intervals,tau){
# sig.level=sig.level/2
# threshold=qBB(1-sig.level)$root
p=ncol(data)
n=nrow(data)
Wt=data[seq(1,nrow(data),by=2),]
Xt=data[seq(2,nrow(data),by=2),]
M=nrow(intervals)
#u has M rows
u=PC(Wt,intervals)
#
# s=floor(s/2)
# e=floor(e/2)
# intervals2=floor(intervals)/2
#M by n
Ytu=u%*%t(Xt)
Ytu=Ytu^2
if((e-s)<(2*p*log(n/2)+1))
return(NULL)
else{
#intervals contained in s,e
# Mes<-which(apply(intervals2,1,checkIfSubInterval,super=c(s,e)))
left_endpoint=sapply(intervals[,1],function(x){max(x,s)})
right_endpoint=sapply(intervals[,2],function(x){min(x,e)})
Mes=which((right_endpoint-left_endpoint)>=(2*log(n/2)+1))
if(length(Mes)>1){
am=rep(-1,M)
bm=rep(-1,M)
for(j in Mes){
t_vals=ceiling(left_endpoint[j]+log(n/2)):floor(right_endpoint[j]-log(n/2))
candidate_ys<-sapply(t_vals,function(t){abs(St(t,left_endpoint[j],right_endpoint[j],Ytu[j,]))} )
mm=which.max(candidate_ys)
bm[j]=t_vals[mm[1]]
am[j]=candidate_ys[mm[1]]
}
m=which.max(am)
if(am[m[1]]>tau){
# sig.level=sig.level/2
return(rbind(c(bm[m[1]]*2,am[m[1]]),
WBSIP(data,s,bm[m[1]],intervals,tau),
WBSIP(data,bm[m[1]]+1,e,intervals,tau)))
}
else
return(NULL)
}
else
return(NULL)
}
}
intervals=getIntervals(0:floor(n/2),100)
#check if the interval is big enough
big_enough=function(i){(i[2]-i[1])>(2*p*log(n/2)+1)}
intervals=intervals[apply(intervals, 1, big_enough),]
p=2
n=120
kappa=norm(diag(rep(9,2)),type="2")
B=5
Delta=0.075*n
tau=Delta*kappa*n^.4
data=rbind(replicate(p,rnorm(n/2)),replicate(p,rnorm(n/2,0,10)))
WBSIP(data,p*log(n/2)+1,n/2-p*log(n/2)+1,intervals,20)
|
638ea0edac29f3b7db702fdc5710c49729c863f2
|
a780373151d932f841e17eed14614b949cc248b6
|
/Data_Cleaning_Scripts_DMX_Linkages/Ichthyoplankton_DMX_Linkages.R
|
26f2d666d5afa227973e8c78996c2299344baf64
|
[] |
no_license
|
NCEAS/dmx-linkages
|
56816c309aaa08277670faacec3ecabafcf08a52
|
d79983fbfba8cb86280da0c93a64c2cccb1c866f
|
refs/heads/master
| 2020-12-25T17:14:32.804002
| 2016-09-22T21:06:19
| 2016-09-22T21:06:19
| 39,415,949
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 836
|
r
|
Ichthyoplankton_DMX_Linkages.R
|
###########################################################
##### Data Cleaning Script - DMX Linkages
##### Ichthyoplankton Data (Arrowtooth, Pollock, Halibut)
###########################################################
## load packages (order matters)
library(httr)
library(plyr)
library(dplyr)
library(XML)
library(curl)
library(rvest)
library(tidyr)
library(stringr)
## Steps for data cleaning:
## 1) read in data
## 2) format to annual estimates (2 column dataframe with cols=Year,spEstimate)
#############
# Data are from Janet Duffy-Anderson, from the EcoFOCI sampling program in & southwest of Shelikof Strait
URL_Ich <- "https://drive.google.com/uc?export=download&id=0B1XbkXxdfD7ualZkTUsyemluYzg"
IchGet <- GET(URL_Ich)
Ich1 <- content(IchGet, as='text')
Ich_df <- read.csv(file=textConnection(Ich1),stringsAsFactors=FALSE)
|
fb50b8e4e5aa5f0282abbdea00537979a3a309bc
|
8a475be4061006487320a7c7e094a8a50453fd70
|
/FDA2/lab04.R
|
68eeee18d42575b6322394af03f2f0acec0aa97d
|
[] |
no_license
|
smeds1/Learning
|
e4f7afd7753fc139b33ba566f4beb154efd84a0d
|
05340df2184f2aca48800039b4ba15b09b6034e6
|
refs/heads/master
| 2020-03-24T01:48:24.083293
| 2019-01-03T23:30:41
| 2019-01-03T23:30:41
| 142,351,929
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 418
|
r
|
lab04.R
|
#Sam Smedinghoff
#7/30/18
#Week 4 - Lab 4
library(SDSFoundations)
acl <- AustinCityLimits
#Question 1
tabgen <- table(acl$Genre)
expgen <- c(.25, .25, .25, .25)
chisq.test(tabgen,p=expgen)$expected
chisq.test(tabgen,p=expgen)
#Question 2
tabGenTwitter <- table(acl$Genre,acl$Twitter.100k)
prop.table(tabGenTwitter,margin=1)
chisq.test(tabGenTwitter)$expected
chisq.test(tabGenTwitter,correct=F)
|
7a5817927e23a36bc1418d4e10ea26c4b66380e4
|
ee73739bd3314929cd44aa98b6b364f23e72691f
|
/data-raw/DATASET.R
|
520539ab8c2c0502777f7a0b48dc22b99c6778a1
|
[] |
no_license
|
irinagain/mixedCCA
|
8ee85144bcf7bff615990f244506c37770147d37
|
4c2b63f754582e57654893e50484c42a0f32cdb4
|
refs/heads/master
| 2022-09-23T22:50:00.459426
| 2022-09-09T21:19:33
| 2022-09-09T21:19:33
| 140,593,736
| 20
| 9
| null | 2022-09-09T21:15:52
| 2018-07-11T15:19:15
|
R
|
UTF-8
|
R
| false
| false
| 4,841
|
r
|
DATASET.R
|
## code to prepare `DATASET` dataset goes here
# usethis::use_data("DATASET")
# grid is revised with coarser grid on August 20, 2020.
############################################################################################
# For multilinear interpolation approximation for bridge Inverse
############################################################################################
############################################################################################
# For TC case
############################################################################################
load("~/Dropbox/TAMU/Irina/mixedCCAfast/R1_PrecomputedResults/tc_0804.Rda")
# grid values that used to create precomputed values.
# d1 <- log10(seq(1, 10^0.99, length = 50))
# tau <- seq(-0.99, 0.99, by = 0.01) # "by" increased from 0.005 to 0.01.
# create computed values (in matrix) and grid (in list) for ipol function.
value <- matrix(unlist(gridTCinv), ncol = length(d1), byrow = FALSE)
grid <- list(tau, d1) # the length of list should be the same as the kinds of inputs.
interp_multilin <- chebpol::ipol(value, grid = grid, method = "multilin")
# create input values for ipol
TCvalue <- matrix(unlist(gridTCinv), ncol = length(d1), byrow = FALSE)
# create grid input for ipol
TCipolgrid <- list(tau, d1)
# interpolation
TCipol <- chebpol::ipol(TCvalue, grid = TCipolgrid, method = "multilin")
############################################################################################
# For TT case
############################################################################################
load("~/Dropbox/TAMU/Irina/mixedCCAfast/R1_PrecomputedResults/tt_0804.Rda")
# grid values that used to create precomputed values.
# d2 <- log10(seq(1, 10^0.99, length = 50))
# tau <- seq(-0.99, 0.99, by = 0.01) # "by" increased from 0.005 to 0.01.
TTvalue <- array(NA, dim = c(length(tau), length(d1), length(d2)))
for (i in 1:length(d1)){
for ( j in 1:length(d2)){
for ( k in 1:length(tau)){
TTvalue[k, i, j] <- gridTTinv[[length(d2)*(i - 1) + j]][k]
}
}
}
# create grid input for ipol
TTipolgrid <- list(tau, d1, d2)
# interpolation.
TTipol <- chebpol::ipol(TTvalue, grid = TTipolgrid, method = "multilin")
############################################################################################
# For TB case
############################################################################################
load("~/Dropbox/TAMU/Irina/mixedCCAfast/R1_PrecomputedResults/tb_0817.Rda")
# grid values that used to create precomputed values
# d1 <- log10(seq(1, 10^0.99, length = 50))
# d2 <- seq(0.01, 0.99, length.out = 50)
# tau1 <- c(seq(-0.5, -0.1, by = 0.007), seq(-0.095, -0.001, by = 0.005))
# tau <- c(tau1, 0, rev(-tau1))
TBvalue <- array(NA, dim = c(length(tau), length(d1), length(d2)))
for (i in 1:length(d1)){
for ( j in 1:length(d2)){
for ( k in 1:length(tau)){
TBvalue[k, i, j] <- gridTBinv[[length(d2)*(i - 1) + j]][k]
}
}
}
# create grid input for ipol
TBipolgrid <- list(tau, d1, d2)
# interpolation.
TBipol <- chebpol::ipol(TBvalue, grid = TBipolgrid, method = "multilin")
############################################################################################
# For BC case
############################################################################################
load("~/Dropbox/TAMU/Irina/mixedCCAfast/R1_PrecomputedResults/bc_0817.Rda")
# grid values that used to create precomputed values
# d1 <- seq(0.01, 0.99, length.out = 50)
# tau1 <- c(seq(-0.5, -0.1, by = 0.007), seq(-0.095, -0.001, by = 0.005))
# tau <- c(tau1, 0, rev(-tau1))
# create input values for ipol
BCvalue <- matrix(unlist(gridBCinv), ncol = length(d1), byrow = FALSE)
# create grid input for ipol
BCipolgrid <- list(tau, d1)
# interpolation
BCipol <- chebpol::ipol(BCvalue, grid = BCipolgrid, method = "multilin")
############################################################################################
# For BB case
############################################################################################
load("~/Dropbox/TAMU/Irina/mixedCCAfast/R1_PrecomputedResults/bb_0817.Rda")
# grid values that used to create precomputed values
# d1 <- d2 <- seq(0.01, 0.99, length.out = 50)
# tau1 <- c(seq(-0.5, -0.1, by = 0.007), seq(-0.095, -0.001, by = 0.005))
# tau <- c(tau1, 0, rev(-tau1))
BBvalue <- array(NA, dim = c(length(tau), length(d1), length(d2)))
for (i in 1:length(d1)){
for ( j in 1:length(d2)){
for ( k in 1:length(tau)){
BBvalue[k, i, j] <- gridBBinv[[length(d2)*(i - 1) + j]][k]
}
}
}
# create grid input for ipol
BBipolgrid <- list(tau, d1, d2)
# interpolation.
BBipol <- chebpol::ipol(BBvalue, grid = BBipolgrid, method = "multilin")
usethis::use_data(TCipol, TTipol, TBipol, BCipol, BBipol, internal = TRUE, overwrite = TRUE, compress = "xz")
|
553b65d71633d6be42fef47037479394d29136e7
|
b9cddf1a01a484252b3e09f52cdf5450ed79ca7d
|
/R/bayes.R
|
aad742ba4b3a907f0dc25fa65f16a06efba1b169
|
[] |
no_license
|
alvarolemos/machinelearning
|
1b436f0bcd954d1cb8ba8ab81b4a2a35787215e6
|
51d1a613bf73ad241a6e75701aaadbf3fd418e26
|
refs/heads/master
| 2021-07-21T16:40:53.510873
| 2017-11-01T11:19:48
| 2017-11-01T11:19:48
| 103,881,107
| 3
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,467
|
r
|
bayes.R
|
library(foreach)
fitBayesian <- function(X, y) {
# Fits a Naive Bayes model.
#
# Args:
# X: Samples matrix, where each row is a sample and the columns are features
# y: Label vector, with one label for each sample
#
# Returns:
# A list with the following parameters for each label of the training set:
# - Vector with means for each feature
# - Covariance matrix
# - Prior probability
model <- list()
labels <- sort(unique(y))
# foreach (label = unique(y)) %do% {
for (label in labels) {
mask <- which(y == label)
Xlabel <- as.matrix(X[mask, ])
means <- apply(Xlabel, 2, mean)
covMatrix <- cov(Xlabel)
priorProb <- length(y[mask]) / length(y)
model[[label]] <- list(
means = means,
covMatrix = covMatrix,
priorProb = priorProb
)
}
model
}
predictBayesian <- function(model, X) {
# Classifies samples based on Bayes' Formula.
#
# Args:
# model: A bayesian model, trained by the fitBayesian function
# X: samples to be classified
#
# Returns:
# A vector with predicted classes
probs <- predictProbs(model, X)
preds <- apply(probs, 1, which.max)
as.vector(preds)
}
predictProbs <- function(model, X) {
# Calculates posterior probabilities using Bayes' Formula.
#
# Args:
# model: A bayesian model, trained by the fitBayesian function
# X: samples to have its posterior probabilities calculated
#
# Returns:
# A matrix with posterior probabilities as columns and samples as rows
probs <- c()
for (labelParams in model) {
labelProbs <- apply(as.matrix(X), 1, calcPosteriorProb, labelParams)
probs <- cbind(probs, labelProbs)
}
probs
}
calcPosteriorProb <- function(x, model) {
# Calculates posterior probabilities using Bayes' Formula, for one sample.
#
# Args:
# x: Sample
# m: Distribution's mean vector
# K: Distribution's covariance matrix
# priorProb: Distribution's prior probability
#
# Returns:
# Probabilities for each one of the dataset's labels
likelihood <- pdfnvar(x, model$means, model$covMatrix)
likelihood * model$priorProb
}
pdfnvar <- function(x, m, K) {
# Evaluates the multi-variate normal distribution value of a sample.
#
# Args:
# x: Sample
# m: Distribution's mean vector
# K: Distribution's covariance matrix
n <- length(x)
(1 / (sqrt((2 * pi) ^ n * (det(K))))) * exp(-0.5 * (t(x - m) %*% (solve(K)) %*% (x - m)))
}
|
7232424c81d934e1b1b09fd42a2ab3e72f6f180c
|
123d808ae2e00215090b17956968b617fb6e0e2a
|
/pwl.R
|
4a06ec4be60ff4b0bb4d441a24331e73d55d5639
|
[] |
no_license
|
tudou2015/pwl
|
2f79c6e4348a1cba061a6cb0e81330a836949728
|
8e7e25b88c1b2a39333795c175d2d0d6538e16b2
|
refs/heads/master
| 2020-06-11T06:48:04.321503
| 2016-06-20T14:51:55
| 2016-06-20T14:51:55
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,061
|
r
|
pwl.R
|
# Find piecewise linear approximation function for the given data and number of breakpoints (bp)
# Length (l) specifies the minimum lenght of the segments (i.e. length between breakpoints)
# class "pwl" consists of the locations of the breakpoints, coefficients of the equations,
# fitted values, residuals, and mse.
# The function returns a list of pwl as result.
pwl <- function(data, noOfBP, l, error, maxBP, ...){
# check if l is given
if(missing(l)) stop("Please specify mininum length between break points:\n length is to be at least 1", call. = FALSE)
# check if either no of BP or error is given
if(missing(noOfBP)&& missing(error) && missing(maxBP)) stop("Please specify either number of desired breakpoints or error", call. = FALSE)
# first sort the given dataset
data <- data[sort.list(data[,1]),]
# check if the length and no of BP given will fit into the given data
size <- nrow(data)
maxBP.allowed <- (size/l) -1
if(noOfBP > maxBP.allowed) stop("The data set is not big enought to fit the number of breakpoints given.\n Either lower the number of desired break points or distance between break points", call. = FALSE)
result <- list(minssr=0, minmse=0, BP=c())
#Use this if noOfBP is given..
# If there is only one breakpoint, we don't need to calculate the MSE matrix
# MSE matrix is used when there are more than 1 BP and/or when error is given
# and no. of BP is unknown
if(noOfBP == 1){
result <- findoneBP(data, l)
result$minmse <- result$minssr/nrow(data)
}else{
ssrMatrix <- calculateSSRMatrix(data)
print("SSRMatrix done!")
result <- findBP(ssrMatrix, noOfBP, l, 1, nrow(data))
print(paste0("BP found, BP = ", result$BP))
BP <- result$BP
result$minmse <- result$minssr/nrow(data)
result$BP <- data[BP,1]
}
piecewise <- getequations(data, result$BP)
piecewise$mse <- result$minmse
class(piecewise) <- "pwl"
#allpwl <- list(piecewise)
piecewise
#allpwl
}
|
7c7ca20e9caef3899ee0eb252bc67aca0469caeb
|
50e8d7f49c8ce112ce0d7d625d3d89728f45cb64
|
/drunk/nv.R
|
d9c052d3ad3608403bb3374c0c1c05dfdc97e3e9
|
[] |
no_license
|
MarcosGrzeca/R-testes
|
dfbaa3786362a631ccc43239990f919957d8b32e
|
15449cd4dade39d425c24274f744bd84cbde6650
|
refs/heads/master
| 2020-04-16T03:34:40.009107
| 2017-08-02T00:14:39
| 2017-08-02T00:14:39
| 68,065,310
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,252
|
r
|
nv.R
|
library(tools)
PATH_FIT <- "resultados/nv/fit.Rda"
PATH_PRED <- "resultados/nv/pred.Rda"
PATH_IMAGE <- "resultados/nv/nv.RData"
load("rda/alemao_base_completa.Rda")
print("Naive Bayes")
library(caret)
trainAlgoritmo <- function(dadosP) {
fit_nv <- train(x = subset(dadosP, select = -c(alc)),
y = dadosP$alc,
method = "nb",
trControl = trainControl(method = "cv", number = 10)
)
return (fit_nv)
}
#source(file_path_as_absolute("classificador_default.R"))
source(file_path_as_absolute("aspectos.R"))
load(PATH_FIT)
importantes(fit)
teste <- function() {
library(caret)
if (!require("doMC")) {
install.packages("doMC")
}
library(doMC)
registerDoMC(4)
print("Treinando")
fit <- trainAlgoritmo(dadosFinal)
save(fit, file=PATH_FIT)
#load(PATH_FIT)
load(PATH_FIT)
fit
print("Prevendo")
bh_pred <- predict(fit, dadosFinal)
save(bh_pred, file=PATH_PRED)
#load(PATH_PRED)
print("Resultados")
a <- table(bh_pred, dadosFinal$alc)
a
uarA <- a[1,1] / (a[1,1] + a[2,1])
uarNA <- a[2,2] / (a[2,2] + a[1,2])
if (uarNA == "NaN"){
uarNA = 0
}
uar = (uarA + uarNA) / 2
uar
save.image(file=PATH_IMAGE)
}
#stopCluster(cl)
|
d90dc5e50479938ece29c7f51c0e70b042a42662
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/TAR/examples/LS.lognorm.Rd.R
|
f0a35688b063d431f3f393d35b2803a04300ae88
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 389
|
r
|
LS.lognorm.Rd.R
|
library(TAR)
### Name: LS.lognorm
### Title: Estimate a log-normal TAR model using Least Square method given
### the structural parameters.
### Aliases: LS.lognorm
### ** Examples
Z<-arima.sim(n=500,list(ar=c(0.5)))
l <- 2
r <- 0
K <- c(2,1)
theta <- matrix(c(1,0.5,-0.3,-0.5,-0.7,NA),nrow=l)
H <- c(1, 1.3)
X <- simu.tar.lognorm(Z,l,r,K,theta,H)
ts.plot(X)
LS.lognorm(Z,X,l,r,K)
|
9ce19ca276209ac27e61b81767e37c11378f8299
|
d771ff12fe4ede6e33699704efa371a2f33cdfaa
|
/R/demo.sum.R
|
435f0f53294c2baabf075eaf43fd37ccbce184a3
|
[
"MIT"
] |
permissive
|
ImmuneDynamics/Spectre
|
aee033979ca6a032b49ede718792c72bc6491db5
|
250fe9ca3050a4d09b42d687fe3f8f9514a9b3bf
|
refs/heads/master
| 2023-08-23T14:06:40.859152
| 2023-04-27T00:31:30
| 2023-04-27T00:31:30
| 306,186,694
| 52
| 17
|
MIT
| 2023-08-06T01:26:31
| 2020-10-22T01:07:51
|
HTML
|
UTF-8
|
R
| false
| false
| 434
|
r
|
demo.sum.R
|
#' demo.sum - Demo summary dataset with features/measurments (columns) x samples (rows)
#'
#' @docType data
#'
#' @usage demo.sum
#'
#' @format Demo summary dataset with features/measurments (columns) x samples (rows).
#'
#' @author Thomas M Ashhurst, \email{thomas.ashhurst@@sydney.edu.au}
#'
#' @source Thomas M Ashhurst.
#'
#' @references \url{https://github.com/ImmuneDynamics/Spectre}.
#'
#' @examples
#' demo.sum
#'
"demo.sum"
|
acc4cdbabf96ebaec5dcd5c1b1ae2abd8cad7a39
|
67b44263a5b0a57c302845412d653594b611e41e
|
/man/topmod.plot.wordcloud.Rd
|
9a8221aa1cce1f565e14607ee9d45b9f39e2654a
|
[
"MIT"
] |
permissive
|
arturochian/corpus-tools
|
6f562a4b79ca3982663fac37a9791eb346ede5a9
|
1e3bd46e6b227b86e308b41c687c93ac8c077f79
|
refs/heads/master
| 2021-01-18T03:12:00.054654
| 2015-02-02T12:46:18
| 2015-02-02T12:46:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 392
|
rd
|
topmod.plot.wordcloud.Rd
|
% Generated by roxygen2 (4.0.1): do not edit by hand
\name{topmod.plot.wordcloud}
\alias{topmod.plot.wordcloud}
\title{Plot wordcloud for LDA topic}
\usage{
topmod.plot.wordcloud(m, topic_nr)
}
\arguments{
\item{m}{The output of \code{\link{LDA}}}
\item{topic_nr}{The index of the topic (1 to K)}
}
\value{
Nothing, just plots
}
\description{
Plots a wordcloud of the top words per topic
}
|
35109db245b1930df5c28f6ddf041f2412ec4a4d
|
b59d7569583dc3eb4894d5165f432166b1bd740d
|
/Simulation/Code/Main.R
|
fd36f6a47800ea1bd62e2dd16ecf06a5062a92c2
|
[] |
no_license
|
boyiguo1/MOTEF-Supplementary
|
60b033ee08e39aa6f91b54440708ea9ac81949a7
|
5a130faf2e3ae7ed80141c4b309955584c1265f6
|
refs/heads/master
| 2023-02-28T15:54:04.039571
| 2021-02-10T02:30:19
| 2021-02-10T02:30:19
| 232,177,507
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,105
|
r
|
Main.R
|
args=(commandArgs(TRUE))
ind <- diag
AR <- function(x){
t <- 1:x
return(0.8^abs(outer(t,t, "-")))
}
if(length(args)==0){
print("No arguments supplied.")
}else{
for(i in 1:length(args)){
eval(parse(text=args[[i]]))
}
}
# Required Library
library(MOTE.RF)
library(glmnet)
library(tidyverse)
library(randomForest)
pi <- 0.5 # pi is the ratio between treatment groups
it <- Sys.getenv('SLURM_ARRAY_TASK_ID') %>% as.numeric
paste0("This is Run", it, "\n") %>% cat
set.seed(it)
B <- create.B(p, intercept=T)
Z <- create.Z(p, q)
sim.dat <- sim_MOTE_data(n.train=n.train, n.test=n.test, p=p, q=q, ratio=pi,
trt.f = c("Linear", "Polynomial", "Box")[trt.f],
link.f = c("Linear", "Polynomial")[link.f],
cov.mat = sigma(p),
B=B, Z = Z)
# Organize data by standardize
train.dat <- sim.dat$train
x.b <- train.dat$x.b
x.e <- train.dat$x.e
y.b <- train.dat$y.b
y.e <- train.dat$y.e
treat <- train.dat$trt
test.dat <- sim.dat$test
test.x.b <- test.dat$x.b
true.trt.diff <- test.dat$y.e.2 - test.dat$y.e.1
###### MOTEF Model #####
MOTE.fit.time <- system.time({
RF.mdl <- MOTE(x.b = x.b, x.e = x.e, # Fit MOTEF
treat = treat,
y.b = y.b, y.e = y.e,
num.trees = 200,
num.random.splits = 10,
num.threads = 1,
oob.error = FALSE,
seed = as.numeric(it),
verbose=F)})
MOTE.size <- object.size(RF.mdl)
MOTE.predict.time <- system.time({
RF.mdl.trt.diff <- predict(RF.mdl, test.x.b)
})
###### l1 penalized model #####
# prepare fitting data
dat <- data.frame(x.b, Treat = treat)
f <- as.formula(~(.-Treat)*Treat)
susan.x <- model.matrix(f, dat)
l1.fit.time <- system.time({
cv.res <- cv.glmnet(susan.x, y.e, family="mgaussian", standardize=T, intercept=T)
glm.res <- glmnet(susan.x, y.e, family="mgaussian", lambda = cv.res$lambda.min, intercept=T)
})
l1.size <- object.size(glm.res)
test.treat <- data.frame(test.x.b,
Treat=rep(levels(treat)[1],n.test) %>% factor(levels = levels(treat)))
test.untreat <- data.frame(test.x.b,
Treat=rep(levels(treat)[2],n.test) %>% factor(levels = levels(treat)))
x.test.treat <- model.matrix(f, test.treat)
x.test.untreat <- model.matrix(f, test.untreat)
l1.predict.time <- system.time({
susan.treat.pred <- predict(glm.res, x.test.treat)
susan.untreat.pred <- predict(glm.res, x.test.untreat)
susan.treat.diff <- (susan.untreat.pred - susan.treat.pred) %>% data.frame
})
###### Marginal RF #####
test.treat <- data.frame(test.x.b,
Treat=rep(levels(treat)[1],n.test) %>% factor(levels = levels(treat)))
test.untreat <- data.frame(test.x.b,
Treat=rep(levels(treat)[2],n.test) %>% factor(levels = levels(treat)))
x.test.treat <- model.matrix(f, test.treat)
x.test.untreat <- model.matrix(f, test.untreat)
RF.fit.time <- 0
RF.pred.time <- 0
RF.size <- 0
margin.RF <- matrix(NA, nrow = n.test, ncol = q)
for(c in 1:q){
y1 <- y.e[,c]
tmp.fit.time <- system.time({
mod <- randomForest(x = susan.x, y = y1, ntree=200)
})
RF.size <- RF.size + object.size(mod)
RF.fit.time <- RF.fit.time + tmp.fit.time
tmp.pred.time <- system.time({
y1.treat1 <- predict(mod, newdata = x.test.treat)
y1.treat2 <- predict(mod, newdata = x.test.untreat)
ret <- y1.treat2 - y1.treat1
})
RF.pred.time <- RF.pred.time + tmp.pred.time
margin.RF[,c] <- ret
}
###### Summarize Simulation Results #####
# Save Prediction Error
sim.res <- data.frame(
run = it,
RF.mdl.MSE = rowSums((RF.mdl.trt.diff$predictions - true.trt.diff)^2) %>% mean,
RF.mdl.MSE.sd = rowSums((RF.mdl.trt.diff$predictions - true.trt.diff)^2) %>% sd,
susan.MSE = rowSums((susan.treat.diff - true.trt.diff)^2) %>% mean,
susan.MSE.sd = rowSums((susan.treat.diff - true.trt.diff)^2) %>% sd,
margin.RF.MSE = rowSums((margin.RF - true.trt.diff)^2) %>% mean,
margin.RF.MSE.sd = rowSums((margin.RF - true.trt.diff)^2) %>% sd
)
job_name <- Sys.getenv('SLURM_JOB_NAME')
saveRDS(sim.res,
paste0("/data/user/boyiguo1/MOTE/Res/", job_name,"/it_",it,".rds"))
# Save Running Time & Space
run.time <- data.frame(run = it,
MOTE_fit = MOTE.fit.time %>% data.matrix() %>% t,
MOTE_pred = MOTE.predict.time %>% data.matrix() %>% t,
MOTE_size = MOTE.size %>% data.matrix() %>% t,
l1_fit = l1.predict.time %>% data.matrix() %>% t,
l1_pred = l1.predict.time %>% data.matrix() %>% t,
l1_size = l1.size %>% data.matrix() %>% t,
RF_fit = RF.fit.time %>% data.matrix() %>% t,
RF_pred = RF.pred.time %>% data.matrix() %>% t,
RF_size = RF.size %>% data.matrix() %>% t,
)
saveRDS(run.time,
paste0("/data/user/boyiguo1/MOTE/RunTime/", job_name,"/it_",it,".rds"))
|
d88407a4f1f2c68b6ddb938961b4198db7386cca
|
d70a9ad5ef249f67be746186fe2b24ed9a833dc8
|
/models/height/mono/Avg_heights.R
|
623fa5a1ab5c57eea1ffe1e928ce98a2fac9881d
|
[
"BSD-3-Clause"
] |
permissive
|
cct-datascience/rangeland-restore
|
637fbd5ac72b356c1e666992668d8c8c570d9ad1
|
93567756223058481005408e970109f3a63162ff
|
refs/heads/master
| 2023-04-07T13:10:05.278701
| 2022-11-02T00:56:58
| 2022-11-02T00:56:58
| 376,160,215
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,683
|
r
|
Avg_heights.R
|
# Average heights per plot
# modeled as a normal distribution
library(rjags)
load.module('dic')
library(mcmcplots)
library(postjags)
library(ggplot2)
library(dplyr)
# Read in data
load("../../../cleaned_data/cover_mono.Rdata") # cover_mono
dat <- cover_mono %>%
mutate(species = factor(species, levels = c("ELTR", "POSE", "POFE", "VUMI", "ELEL")))
# Plot
hist(dat$height)
summary(dat$height)
dat %>%
ggplot(aes(x = species, y = height)) +
geom_jitter(aes(color = grazing)) +
facet_grid(cols = vars(seed_rate),
rows = vars(seed_coat))
# model matrix
X <- model.matrix( ~ (species + seed_rate + grazing + seed_coat)^2, data = dat)
colnames(X)
# standard deviation among paddocks and blocks
log(sd(tapply(dat$height, dat$block, FUN = mean)))
# Assemble model inputs
datlist <- list(height = dat$height,
N = nrow(dat),
POSE = X[,2],
POFE = X[,3],
VUMI = X[,4],
ELEL = X[,5],
high = X[,6],
fall = X[,7],
spring = X[,8],
coated = X[,9],
nL = ncol(X) - 1, # number of offset levels
block = as.numeric(dat$block),
Nb = length(unique(dat$block)),
Ab = 5) # stand deviation among paddocks and blocks
# likely intercept value
base <- dat %>%
filter(grazing == "ungrazed",
species == "ELTR",
seed_rate == "low",
seed_coat == "UC")
hist(base$height, breaks = 30)
median(base$height)
# generate random initials
inits <- function(){
list(alpha = rnorm(1, 0, 10),
beta = rnorm(ncol(X) - 1, 0, 10),
tau.Eps = runif(1, 0, 1),
tau = runif(1, 0, 1))
}
initslist <- list(inits(), inits(), inits())
# Or, use previous starting values + set seed
load("inits/inits.Rdata")# saved.state, second element is inits
initslist <- list(append(saved.state[[2]][[1]], list(.RNG.name = array("base::Super-Duper"), .RNG.seed = array(13))),
append(saved.state[[2]][[2]], list(.RNG.name = array("base::Wichmann-Hill"), .RNG.seed = array(89))),
append(saved.state[[2]][[3]], list(.RNG.name = array("base::Mersenne-Twister"), .RNG.seed = array(18))))
# model
jm <- jags.model(file = "Avg_heights_norm.jags",
inits = initslist,
n.chains = 3,
data = datlist)
# update(jm, 10000)
# params to monitor
params <- c("deviance", "Dsum", # evaluate fit
"alpha", "beta", # parameters
"tau.Eps", "sig.eps", "tau", "sig", # precision/variance terms
"alpha.star", "eps.star", # identifiable intercept and random effects
"int_Beta", # monitored effect combinations
"m.ELEL.low.ungrazed.uncoated", "m.ELEL.low.ungrazed.coated", "m.ELEL.low.fall.uncoated", "m.ELEL.low.fall.coated", "m.ELEL.low.spring.uncoated", "m.ELEL.low.spring.coated",
"m.ELEL.high.ungrazed.uncoated", "m.ELEL.high.ungrazed.coated", "m.ELEL.high.fall.uncoated", "m.ELEL.high.fall.coated", "m.ELEL.high.spring.uncoated", "m.ELEL.high.spring.coated",
"m.VUMI.low.ungrazed.uncoated", "m.VUMI.low.ungrazed.coated", "m.VUMI.low.fall.uncoated", "m.VUMI.low.fall.coated", "m.VUMI.low.spring.uncoated", "m.VUMI.low.spring.coated",
"m.VUMI.high.ungrazed.uncoated", "m.VUMI.high.ungrazed.coated", "m.VUMI.high.fall.uncoated", "m.VUMI.high.fall.coated" , "m.VUMI.high.spring.uncoated", "m.VUMI.high.spring.coated",
"m.POFE.low.ungrazed.uncoated", "m.POFE.low.ungrazed.coated", "m.POFE.low.fall.uncoated", "m.POFE.low.fall.coated", "m.POFE.low.spring.uncoated", "m.POFE.low.spring.coated",
"m.POFE.high.ungrazed.uncoated", "m.POFE.high.ungrazed.coated", "m.POFE.high.fall.uncoated", "m.POFE.high.fall.coated", "m.POFE.high.spring.uncoated", "m.POFE.high.spring.coated",
"m.POSE.low.ungrazed.uncoated", "m.POSE.low.ungrazed.coated", "m.POSE.low.fall.uncoated", "m.POSE.low.fall.coated", "m.POSE.low.spring.uncoated", "m.POSE.low.spring.coated",
"m.POSE.high.ungrazed.uncoated", "m.POSE.high.ungrazed.coated", "m.POSE.high.fall.uncoated", "m.POSE.high.fall.coated", "m.POSE.high.spring.uncoated", "m.POSE.high.spring.coated",
"m.ELTR.low.ungrazed.uncoated", "m.ELTR.low.ungrazed.coated", "m.ELTR.low.fall.uncoated", "m.ELTR.low.fall.coated", "m.ELTR.low.spring.uncoated", "m.ELTR.low.spring.coated",
"m.ELTR.high.ungrazed.uncoated", "m.ELTR.high.ungrazed.coated", "m.ELTR.high.fall.uncoated", "m.ELTR.high.fall.coated", "m.ELTR.high.spring.uncoated", "m.ELTR.high.spring.coated"
)
coda.out <- coda.samples(jm, variable.names = params,
n.iter = 15000, thin = 5)
# plot chains
mcmcplot(coda.out, parms = c("deviance", "Dsum", "beta",
"alpha.star", "eps.star", "sig.eps",
"sig"))
caterplot(coda.out, parms = "beta", reorder = FALSE)
caterplot(coda.out, parms = "eps.star", reorder = FALSE)
# dic samples
dic.out <- dic.samples(jm, n.iter = 5000)
dic.out
# convergence?
gel <- gelman.diag(coda.out, multivariate = FALSE)
gel
# If not converged, restart model from final iterations
# newinits <- initfind(coda.out)
# newinits[[1]]
# saved.state <- removevars(newinits, variables = c(1, 3, 5:68))
# saved.state[[1]]
# save(saved.state, file = "inits/inits.Rdata")
save(coda.out, file = "coda/coda.Rdata")
# Model fit
params <- c("height.rep") #monitor replicated data
coda.rep <- coda.samples(jm, variable.names = params,
n.iter = 15000, thin = 5)
save(coda.rep, file = "coda/coda_rep.Rdata")
|
32f253d9a6893da5087c98b3958b930bad609b1d
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/kmconfband/examples/noe.Rd.R
|
0bf0925cac20822acd8148b3d4effffefda50439
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 468
|
r
|
noe.Rd.R
|
library(kmconfband)
### Name: noe
### Title: Noe Recursions for the Exact Coverage Probability of a
### Nonparametric Confidence Band for the Survivor Function
### Aliases: noe
### ** Examples
## A check of the Noe recursion calculations. This result is cited in
## Jager and Wellner's 2005 technical report, Table 1, p. 13.
## The correct value is 0.95
a<-c(0.001340,0.028958,0.114653,0.335379)
b<-c(0.664621,0.885347,0.971042,0.998660)
print(noe(4,a,b))
|
cd3eea0cdac67c0c6b001cb4c7423867934ef629
|
1943642c50bfbc19a8e6cfa8953fb5f34cca6d83
|
/R/metrics.R
|
e767d47348c4109c231b481a8651ca74aea939f8
|
[] |
no_license
|
diegomattozo/RDisc
|
7e19151efb361eec780ea767647d469f5e004810
|
bfa5146bddb5c36be6745f802aa1e6344455029b
|
refs/heads/master
| 2022-04-25T16:02:51.245025
| 2020-04-28T02:34:44
| 2020-04-28T02:34:44
| 259,181,853
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 62
|
r
|
metrics.R
|
chi_square <- function(x, y) {
chisq.test(x, y)$statistic
}
|
f161b0c3952930c3997a1649491e6c7f89233254
|
0986b0e01c2b07b18ed039705c897908e266bdd5
|
/units/0_R_Tutorial/assignment_ggplot.r
|
41a70bbc2017e348057028b67888d670a27ddce3
|
[] |
no_license
|
mtaylor-semo/438
|
8b74e6c092c7c0338dd28b5cefe35f6a55147433
|
aab07b32495297a59108d9c13cd29ff9ec3824d3
|
refs/heads/main
| 2023-07-06T14:55:25.774861
| 2023-06-21T21:36:04
| 2023-06-21T21:36:04
| 92,411,601
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,192
|
r
|
assignment_ggplot.r
|
## Code to create the Alberta Climate Plot in the
## R Tutorial Assignment. Note the use of the
## code to add a degree symbol to the Y axis, which
## is given as an extra credit challenge to the students.
#setwd('biogeo')
library(tidyverse)
#climate <- read.csv('http://mtaylor4.semo.edu/~goby/biogeo/climatedata.csv')
climate <- read_csv('tutorial_climate_data.csv')
#attach(climate)
# plot(MAT ~ MAP, xlab='Mean annual precipitation (mm)', ylab = 'Mean annual temperature', pch=c(21,22,25)[Species], bg=rainbow(8)[Ecosys], xlim=c(0,3500), ylim=c(-5,10))
# text(700, 4, 'Grassland', font=3, pos=4)
# text(1900,8, 'Redcedar', font=3, pos=4)
# text(700, -2.5, 'Larch', font=3, pos=4)
#
# legend(3100, 1, legend=c('A','B','C','D','E','F','G','H'), pch=c(25,22,21,22,22,21,25,25), col='black', pt.bg=rainbow(8))
climate %>% ggplot(aes(x = MAP, y = MAT)) +
geom_point(aes(shape = Species,
color = Ecosys,
fill = Ecosys)) +
scale_shape_manual(values = c(21, 22, 24)) +
scale_color_brewer(palette = "Dark2",
aesthetics = c("color", "fill"),
guide = "none") +
theme_minimal() +
labs(x = "Mean Annual Precipitation (mm)",
y = "Mean Annual Temperature (ยฐC)") +
guides(shape = guide_legend(override.aes = list(fill = "black"))) +
geom_text(aes(x = 700, y = 4, label ="Grassland"),
hjust = "left",
size = 5) +
geom_text(aes(x = 2100, y = 8, label = "Redcedar"),
hjust = "left",
size = 5) +
geom_text(aes(x = 1800, y = 0, label = "Larch"),
hjust = "left",
size = 5)
# op <- par(cex=1, ps=12, family='serif', mar=c(5,5,3,3))
# plot(MAT ~ MAP, xlab='Mean annual precipitation (mm)', ylab = expression(paste('Mean annual temperature (',degree,'C)')), pch=c(21,22,25)[Species], bg=rainbow(8)[Ecosys], xlim=c(0,3500), ylim=c(-5,10))
# text(700, 4, 'Grassland', font=3, pos=4)
# text(1900,8, 'Redcedar', font=3, pos=4)
# text(700, -2.5, 'Larch', font=3, pos=4)
#
# #legend(2400, 2, legend=c('A','B','C','D','E','F','G','H'), pch=c(25,22,21,22,22,21,25,25), col='black', pt.bg=rainbow(8))
#
# par(op)
#
# detach(climate)
|
1ef07b52a0c44a5e51052de6de614a0c8e265b5d
|
146f3eb628a803d9bb3a37e81b792e0032fc50a8
|
/docs/R/functions.R
|
c676f62811c20de7c59b864ca1ff83f362ed949c
|
[] |
no_license
|
brychan-manry/AstraZeneca-openFDA-Case-Study
|
31b2ed03530f6d01cd3fcd33008f59c6ee3aad2f
|
15666032fa3f729d7195843df0de303f2ee47c7d
|
refs/heads/master
| 2022-04-02T23:42:27.679039
| 2020-01-14T04:16:56
| 2020-01-14T04:16:56
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,888
|
r
|
functions.R
|
#==============================================================================================================#
# This file contains the general functions for used for generating queries, interacting with openFDA, and
# creating the various plots and tables in the report
#
# Note: given the time limitations these are still very much a WIP
#==============================================================================================================#
# ========= LIBRARY DEPENDENCIES =========#
library(httr)
library(jsonlite)
library(ggplot2)
# ========= FUNCTIONS =========#
createQuery = function(queryItemList, rootURL = 'https://api.fda.gov/drug/event.json?api_key=umOJyfx1udGdQjn1XtT1XGasUjSbgiDIvsJ47jiB&'){
#============================================#'
# Description:creates a query string using named elements in a list
# Inputs: queryItemList: Named list of query items available for openFDA
# rootURL: root URL for openFDA query (default is FAERS)
# Outputs: character string URL
#============================================#'
stopifnot(class(queryItemList) == "list")
#TODO: add check for valid query terms e.g., search, count, limit, skip
queryItems = c()
for(item in names(queryItemList)){
if(is.null(queryItemList[[item]])) next
queryItems = c(queryItems, paste(item, queryItemList[[item]], sep="="))
}
# Combine into single url string
url = paste0(rootURL, paste0(queryItems, collapse = "&"))
return(url)
}
getResults = function(q, excludeMeta = TRUE, verbose = FALSE){
#============================================#'
# Description: Submits request to openFDA and returns the results
# Inputs: q
# excludeMeta (boolean): If true only results portion of JSON will be returned
# verbose (boolean): prints additional information while running
# Outputs: list object created by jsonlite from returned openFDA JSON
#============================================#'
if(!class(q) %in% c("list", "character")){
stop("Query must be character or list")
} else if(class(q) == "list"){
q = createQuery(q)
}
# Added this to show progress when stiching together queries using skip
if(verbose){
cat('\n\n')
cat(q)
}
response = GET(q)
#TODO: error handling if status code != 200 e.g, request failed
if(response$status_code == 200 & verbose) cat("\n SUCCESS!!!")
response = content(response, as = "text")
response = fromJSON(response)
if(excludeMeta) response = response$results
return(response)
}
loadCategoryMap = function(mapName, dir = "./data/category_maps"){
#============================================#'
# Description: Some openFDA fields use numeric values for categorical fields.
# This function allows for the creation of lookup files to map
# values to text descriptions
# Inputs: mapName (character): name of field with categorical values
# dir (character): path to directory with lookup csv files
# Outputs: data.frame
#============================================#'
#check if category name is available and make sure there is only one
matchedFiles = list.files(dir, pattern = mapName)
#Load table if only one if not throw error
if(length(matchedFiles) == 1) return(read.csv(file.path(dir, matchedFiles), colClasses = c("numeric", "character")))
if(length(matchedFiles) == 0) stop("No Matching Category Map Found!")
stop("Multiple Matches Found Please Be More Specific!")
}
mapCategoryValues = function(df, n){
#============================================#'
# Description: Merges category mapping to data.frame
# Inputs: df (data.frame): usually the result of a "count" query. Must have "term" as a field
# n (character): name of field with categorical values
# Outputs: data frame
#============================================#'
valueMap = loadCategoryMap(mapName = n)
grab_names = names(df)
df = merge(df, valueMap, by = "term")[ , -1]
names(df)[ncol(df)] = "term"
return(df[grab_names])
}
countPlot = function(plotData, axis_label, plot_title = NULL, mapCategories = NULL, returnPlotObject = FALSE){
#============================================#'
# Description: Creates a horizontal bar plot for openFDA count query results
# Inputs:
# Outputs: ggplot2 plot
#============================================#'
if(!is.null(mapCategories)) plotData = mapCategoryValues(plotData, mapCategories)
plotData$term = toupper(plotData$term)
if("compare" %in% names(plotData)){
p = ggplot(plotData, aes(reorder(term, count), count, fill = compare))
} else {
p = ggplot(plotData, aes(reorder(term, count), count))
}
p = p +
geom_bar(stat = "Identity", position = 'dodge') +
xlab(axis_label) +
coord_flip()
if(!is.null(plot_title)) p = p + ggtitle(plot_title)
if(returnPlotObject) return(p)
plot(p)
}
|
4a7a9d48193b73f5036e058e3cdc275c55fa8d91
|
4b237cef2143657587c85e657696777b8a0b0a81
|
/1.Duc_Le_EDA.R
|
44b633aebf1e2281c86b9ab0b2b9de493a8fc052
|
[] |
no_license
|
dukele35/credit_evaluation
|
297b30d745b03c89b5730fad3798c72efe723d36
|
668a08564cde11f9566e90623813f6e9a9ac209d
|
refs/heads/master
| 2022-11-22T22:23:59.331661
| 2020-07-28T22:55:40
| 2020-07-28T22:55:40
| 282,753,630
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 14,773
|
r
|
1.Duc_Le_EDA.R
|
##### 1. data preparation #####
# 1.1. list the excel file's sheets
library(tidyverse)
library(readxl)
excel_sheets('Credit_Risk6_final.xlsx')
# 1.2. load the dataset
# 1.2.1 dataframe df1 from 'Training_Data' sheet
df1 <- read_excel('Credit_Risk6_final.xlsx', sheet = 'Training_Data')
View(df1)
nrow(df1)
ncol(df1)
str(df1)
# 1.2.2 dataframe df2 from 'Scoring_Data' sheet
df2 <- read_excel('Credit_Risk6_final.xlsx', sheet = 'Scoring_Data')
View(df2)
nrow(df2)
ncol(df2)
str(df2)
# 1.3. change columns' names
# df1
colnames(df1)
names(df1)[names(df1) == 'ID'] <- 'id'
names(df1)[names(df1) == 'Checking Acct'] <- 'checking'
names(df1)[names(df1) == 'Credit History'] <- 'history'
names(df1)[names(df1) == 'Loan Reason'] <- 'loan_reason'
names(df1)[names(df1) == 'Savings Acct'] <- 'saving'
names(df1)[names(df1) == 'Employment'] <- 'employment'
names(df1)[names(df1) == 'Personal Status'] <- 'status'
names(df1)[names(df1) == 'Housing'] <- 'housing'
names(df1)[names(df1) == 'Job Type'] <- 'job'
names(df1)[names(df1) == 'Foreign National'] <- 'foreign'
names(df1)[names(df1) == 'Months since Checking Acct opened'] <- 'months'
names(df1)[names(df1) == 'Residence Time (In current district)'] <- 'residence'
names(df1)[names(df1) == 'Age'] <- 'age'
names(df1)[names(df1) == 'Credit Standing'] <- 'credit'
# df2
colnames(df2)
names(df2)[names(df2) == 'ID'] <- 'id'
names(df2)[names(df2) == 'Checking Acct'] <- 'checking'
names(df2)[names(df2) == 'Credit History'] <- 'history'
names(df2)[names(df2) == 'Loan Reason'] <- 'loan_reason'
names(df2)[names(df2) == 'Savings Acct'] <- 'saving'
names(df2)[names(df2) == 'Employment'] <- 'employment'
names(df2)[names(df2) == 'Personal Status'] <- 'status'
names(df2)[names(df2) == 'Housing'] <- 'housing'
names(df2)[names(df2) == 'Job Type'] <- 'job'
names(df2)[names(df2) == 'Foreign National'] <- 'foreign'
names(df2)[names(df2) == 'Months since Checking Acct opened'] <- 'months'
names(df2)[names(df2) == 'Residence Time'] <- 'residence'
names(df2)[names(df2) == 'Age'] <- 'age'
# 1.4 checking duplications and overlaps
# 1.4.1 checking column id
# building a function to check whether a vector is consecutive or not
check_consecutive <- function(x){
if(all(diff(x) == 1)){
print('This is consecutive')
}else{
print('This is not consecutive')
print('Positions are not consecutive')
print(which(diff(x) != 1))
}
}
check_consecutive(df1$id)
check_consecutive(df2$id)
# 1.4.1a IDs vs credit - checking the normality of the 'credit' entry
# percentage of the good credit
prop_good <- prop.table(table(df1$credit))[[2]]
prop_good
# percentage of the bad credit
prop_bad <- prop.table(table(df1$credit))[[1]]
prop_bad
# getting the lengths of consecutive credits which are similar
consecutive_credit <- rle(df1$credit)
credit_freq <- consecutive_credit[[1]]
credit_value <- consecutive_credit[[2]]
# identify the ids positions where consecutive similar credits start
id_start <- c(1, cumsum(consecutive_credit[[1]]) + 1)
id_start <- id_start[1:length(id_start)-1]
# identify the ids positions where consecutive similar credits end
id_end <- id_start + consecutive_credit[[1]] - 1
# calculate chance of events associated with consecutive similar credits
chance <- c()
for(i in 1:length(id_start)){
if(credit_value[i] == "Good"){
chance <- c(chance, prop_good^credit_freq[i]*100)
}else{
chance <- c(chance, prop_bad^credit_freq[i]*100)
}
}
# creating a dataframe for suspicious entries
# i.e. number of consecutive credit entries are greater than 6
frame_a <- data.frame(id_start, id_end, credit_freq, credit_value, chance)
colnames(frame_a) <- c('id_start', 'id_end', 'frequency', 'value', 'chance_in_percentage')
good_entry_limit <- frame_a %>%
filter(value == 'Good') %>%
filter(frequency >= 6)
bad_entry_limit <- frame_a %>%
filter(value == 'Bad') %>%
filter(frequency >= 4)
suspicious_entry <- bind_rows(good_entry_limit, bad_entry_limit)
suspicious_entry <- suspicious_entry %>% arrange(id_start)
suspicious_entry
# 1.4.2 check all columns apart from the ids
# 1.4.2.1 regardless of the ids column, checking duplications in the dataset df1
a <- duplicated(df1[,2:14])
# a vector containing rows are duplicated with other rows in the dataset df1
row_dups <- df1$id[a]
# the number of duplications in the dataset df1
cat('Number of duplications in the dataset df1:',length(row_dups))
# print all the ids having similar rows in the dataset df1
for(j in row_dups){
for(i in 1:nrow(df1)){
if(j != i){
if(identical(df1[j,2:14], df1[i, 2:14])){
cat('\n In the dataset df1, the id number',df1[j,1]$id, 'is having similar row to the id number',df1[i,1]$id)
}
}
}
}
# for example, id 470 has similar row to id 7
df1[df1$id == 470,]
df1[df1$id == 7,]
# 1.4.2.2 regardless of the ids column, checking duplications in the dataset df2
b <- duplicated(df2[,2:13])
b # there is no duplication in the dataset df2
# 1.4.2.3 checking any overlap between df1 & df2 regardless of the ids columns
# i.e. checking whether any new observations are from the past observations
for(i in 1:nrow(df2)){
for(j in 1:nrow(df1)){
if(identical(df2[i,2:13], df1[j,2:13])){
cat('\nThe id', df2[i,1]$id,'in dataset df2 is having similar row to the id', df1[j,1]$id, 'in dataset df1')
}
}
}
# for example, id 782 in dataset df2 is similar to the id 607 in dataset df1
df2[df2$id == 782,]
df1[df1$id == 607,]
# for example, id 783 in dataset df2 is similar to the id 603 in dataset df1
df2[df2$id == 783,]
df1[df1$id == 603,]
# checking whether these df1's ids are in duplicated rows which was checked in the step 1.4.2.1 above, i.e. row_dups
c(607,603) %in% row_dups
# 1.4.3 removing duplications in dataset df1
df1 <- df1[-row_dups,]
str(df1)
# 1.5. checking categorical variables
# 1.5.1. factorise columns whose classes are characters
# df1
str(df1)
for(i in 2:ncol(df1)){
if(is.character(df1[[i]])){
df1[[i]] <- as.factor(df1[[i]])
}
}
str(df1)
# df2
str(df2)
for(i in 2:ncol(df2)){
if(is.character(df2[[i]])){
df2[[i]] <- as.factor(df2[[i]])
}
}
str(df2)
# 1.5.2. changing level's name from '0Balance' to 'No Balance' in the checking column
# df1
levels(df1$checking)[levels(df1$checking) == '0Balance'] <- 'No Balance'
levels(df1$checking)
# df2
levels(df2$checking)[levels(df2$checking) == '0Balance'] <- 'No Balance'
levels(df2$checking)
# 1.5.3 checking missing values for each categorical variable
for(i in 2:ncol(df1)){
if(is.factor(df1[[i]])){
cat('\n','This is the column', names(df1[i]), " - column's position",i, '\n')
cat('No. of missing values', sum(is.na(df1[[i]])), '\n')
}
}
# 1.5.4. investigating different values of each categorical variable
for(i in 2:ncol(df1)){
if(is.factor(df1[[i]])){
cat('\n', 'This is the column', names(df1[i]), " - column's position", i, '\n')
cat('Number of factors -', length(levels(df1[[i]])), '\n')
print(levels(df1[[i]]))
}
}
# 1.6. checking numeric columns
# 1.6.1 checking missing values
for(i in 2:ncol(df1)){
if(is.numeric(df1[[i]])){
cat('\n','This is the column', names(df1[i]), " - column's position",i, '\n')
cat('No. of missing values', sum(is.na(df1[[i]])), '\n')
}
}
# 1.7 dealing with missing values
# instruction: https://cran.r-project.org/web/packages/mice/mice.pdf
# install.packages('mice')
library(mice)
# 1.7.1 visualising the missing-data matrix
md.pattern(df1)
# 1.7.2 imputing the missing data
impute <- mice(df1[,2:14], m=5, seed = 696) # m: Number of multiple imputations
print(impute) # for catogerical variables having missing values, i.e. employment, status & housing, multinomial logistic regression is applied
# printing imputed values which are grouped in 5 imputations
impute$imp$employment
impute$imp$status
impute$imp$housing
# complete data
df1 <- bind_cols(as.data.frame(df1$id), complete(impute, 1))
names(df1)[names(df1) == 'df1$id'] <- 'id'
str(df1)
# # 1.8 export csv files from dataframes df1 & df2
# write.csv(df1, 'df1.csv', row.names = FALSE)
# write.csv(df2, 'df2.csv', row.names = FALSE)
##### a.EDA #####
# a.0. setting positions of numeric variables and categorical variables
nume_pos <- c(11,12,13)
cate_pos <- c(2,3,4,5,6,7,8,9,10,14)
# a.1 investigating correlations among variables
# a.1.1 correlations among numeric variables
# a.1.1.1 create a dataframe having numeric variables
df1numeric <- df1[,nume_pos]
str(df1numeric)
# a.1.1.2 building correlation matrix among variables
cor.mat <- cor(df1numeric, use = 'complete.obs')
# a.1.1.3 building p-value matrix among variables
cor.mtest <- function(mat, ...) {
mat <- as.matrix(mat)
n <- ncol(mat)
p.mat<- matrix(NA, n, n)
diag(p.mat) <- 0
for (i in 1:(n - 1)) {
for (j in (i + 1):n) {
tmp <- cor.test(mat[, i], mat[, j], ...)
p.mat[i, j] <- p.mat[j, i] <- tmp$p.value
}
}
colnames(p.mat) <- rownames(p.mat) <- colnames(mat)
p.mat
}
p.mat <- cor.mtest(df1numeric)
# a.1.1.4 plotting correlation matrix
# install.packages("corrplot")
# instruction: http://www.sthda.com/english/wiki/visualize-correlation-matrix-using-correlogram
library(corrplot)
col1 <- colorRampPalette(c("#083c5d",'white', "#d98310"))
corrplot(cor.mat,
method = 'ellipse',
# choosing lower half of the corr. plot
# type = 'lower',
# add the correlation
addCoef.col = 'black', number.cex = 0.7,
# changing the axis's color
tl.col="black",
tl.srt=0,
# dropping the correlation between a variabe with itself
diag=F,
# grey coloring the cells with insignificant level of p-value being greater than 0.01 - NB. view full screen
p.mat = p.mat, sig.level = 0.01, pch.col = 'grey91', pch.cex = 11, pch = 19,
col = col1(100))
# a.1.2 correlations among categorical variables - chi-squared test
# install.packages('greybox')
# instruction: https://rdrr.io/cran/greybox/man/cramer.html
library(greybox)
# a.1.2.1 create a dataframe having categorical variables
df1cate <- df1[,cate_pos]
View(df1cate)
# a.1.2.2 create chi-square matrix & corresponding p-value matrix
chi_elements <- c()
pchi_elements <- c()
for(i in 1:length(df1cate)){
for(j in 1:length(df1cate)){
chi <- cramer(df1cate[[i]], df1cate[[j]], use = 'complete.obs')
chi_elements <- c(chi_elements, chi$value)
pchi_elements <- c(pchi_elements, chi$p.value)
}
}
chi.mat <- matrix(chi_elements,
nrow = length(df1cate),
dimnames = list(names(df1cate), names(df1cate)))
pchi.mat <- matrix(pchi_elements,
nrow = length(df1cate),
dimnames = list(names(df1cate), names(df1cate)))
View(chi.mat)
View(pchi.mat)
# a.1.2.3 plotting chi-square matrix
col1 <- colorRampPalette(c("#083c5d",'white', "#d98310"))
corrplot(chi.mat,
method = 'ellipse',
# choosing lower half of the corr. plot
type = 'lower',
# add the correlation
addCoef.col = 'black', number.cex = 0.7,
# changing the axis's color
tl.col="black",
tl.srt=45,
# dropping the correlation between a variabe with itself
diag=F,
# grey coloring the cells with insignificant level of p-value being greater than 0.01 - NB. view full screen
p.mat = pchi.mat, sig.level = 0.01, pch.col = 'grey91', pch.cex = 7.6, pch = 19,
col = col1(100))
# a.1.3 intraclass correlations between categorical variables vs numerical variables (ANOVA)
# a.1.3.1 create intraclass coefficient matrix and corresponding p-value matrix
# instruction: https://cran.r-project.org/web/packages/ICC/ICC.pdf
# install.packages('ICC')
library(ICC)
intra_elements <- c()
p.intra_elements <- c()
for(i in nume_pos){
for(j in cate_pos){
# create a vector of intraclass coefficients
ano <- ICCest(df1[[j]], df1[[i]], alpha = 0.01)
intra_elements <- c(intra_elements, ano$ICC)
# create a vector of p-values
anova_test <- aov(df1[[i]] ~ df1[[j]])
p.intra <- summary(anova_test)[[1]][["Pr(>F)"]][1]
p.intra_elements <- c(p.intra_elements, p.intra)
}
}
# View(intra_elements)
# View(p.intra_elements)
intra.mat <- matrix(intra_elements,
nrow = length(nume_pos),
byrow = TRUE,
dimnames = list(names(df1numeric), names(df1cate)))
p.intra.mat <- matrix(p.intra_elements,
nrow = length(nume_pos),
byrow = TRUE,
dimnames = list(names(df1numeric), names(df1cate)))
View(intra.mat)
View(p.intra.mat)
# a.2 visualisations
# a.2.1 univariate visualisations
# a.2.1.1 barplots for categorical variables
for(i in 2:ncol(df1)){
# choose the categorical variables only
if(is.factor(df1[[i]])){
# select columns having missing values
if(any(is.na(df1[[i]]))){
df <- as.data.frame(table(df1[[i]], useNA = 'always'))
levels(df[[1]]) <- c(levels(df[[1]]), '"missing"')
df[[1]][is.na(df[[1]])] <- '"missing"'
bp <- barplot(df[[2]],
names.arg=df[[1]],
las = 1,
border = F,
ylim = c(0,max(df[[2]]) + 100),
main = names(df1[i]),
col = '#083c5d',
cex.names = 1)
text(bp, df[[2]] + 20, labels = df[[2]], cex=1, col = 'black')
# select columns not having missing values
}else{
df <- as.data.frame(table(df1[[i]]))
bp <- barplot(df[[2]],
names.arg=df[[1]],
las = 1,
border = F,
ylim = c(0,max(df[[2]]) + 100),
main = names(df1[i]),
col = '#083c5d',
cex.names = 1)
text(bp, df[[2]] + 20, labels = df[[2]], cex=1, col = 'black')
}
}
}
# a.2.1.2 histograms for numeric variables
par(mfrow=c(1,3))
for(i in 2:ncol(df1)){
if(is.numeric(df1[[i]])){
hist(df1[[i]], main = names(df1[i]), xlab = names(df1[i]),
col = '#083c5d',
ylim = c(0, 350))
}
}
par(mfrow=c(1,1))
# a.2.1.3 boxplots for numeric variables
for(i in 2:ncol(df1)){
if(is.numeric(df1[[i]])){
plt <- ggplot(df1, aes(x=1, y=df1[[i]])) +
geom_boxplot(fill = '#d98310', alpha = 1) +
labs(y = names(df1[i]), title = paste(names(df1[i]), 'boxplot'))
print(plt)
}
}
# a.2.1.4 density plots for numeric variables
for(i in 2:ncol(df1)){
if(is.numeric(df1[[i]])){
plt <- ggplot(df1, aes(x=df1[[i]])) +
geom_density(color="black", fill="#d98310") +
labs(x = names(df1[i]), title = paste(names(df1[i]), 'density plot'))
print(plt)
}
}
# a.2.2 bivariate visualisations
# a.2.3 multivariate visualisations
|
1c4ac4f55a6b25556434f265e8e94e608e6b13f1
|
17e405a893b652f1da704f0b8c1b4669d8a2dc72
|
/plotting_lms.R
|
5f6ad55f98d8c79e7306d38c20c6d5160012c276
|
[] |
no_license
|
eflynn90/L_and_L
|
b59fdfb5e4ffb78199e796c14642ecd9e8abc404
|
13777cc04bad354ac06ffd30088bcd398ec88c67
|
refs/heads/main
| 2023-06-29T17:52:10.341120
| 2021-07-30T15:00:25
| 2021-07-30T15:00:25
| 391,095,596
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,419
|
r
|
plotting_lms.R
|
#!/usr/bin/Rscript
##
## EDF 7/30/21
##
library(dplyr)
library(ggplot2)
setwd("~/Downloads/")
## Read in expression and genotype data
expr_gt = read.table("APBB1IPexpr_gt.txt",
header=TRUE,sep='\t')
names(expr_gt)
head(expr_gt)
## Plot expression and genotype data
expr_gt %>%
ggplot(aes(chr10_26434585_G_A_b38, ABPP1IP_expr)) +
geom_point()
expr_gt %>%
ggplot(aes(as.factor(chr10_26434585_G_A_b38),
log10(ABPP1IP_expr))) +
geom_point() +
geom_boxplot()
## Calculate linear model on expression and genotype data
summary(lm(data = expr_gt,
ABPP1IP_expr ~ chr10_26434585_G_A_b38))
summary(lm(data = expr_gt,
log10(ABPP1IP_expr) ~ chr10_26434585_G_A_b38))
lm_var = summary(lm(data = expr_gt,
log10(ABPP1IP_expr) ~ chr10_26434585_G_A_b38))
lm_var$coefficients
## Optional: plot data with linear regression line
expr_gt %>%
ggplot(aes(chr10_26434585_G_A_b38,
log10(ABPP1IP_expr))) +
geom_point(position=position_jitter(width=.2)) +
geom_abline(slope = lm_var$coefficients[2,1],
intercept = lm_var$coefficients[1,1])
expr_gt %>%
ggplot(aes(as.factor(chr10_26434585_G_A_b38),
log10(ABPP1IP_expr))) +
geom_point() +
geom_boxplot() +
geom_abline(slope = lm_var$coefficients[2,1],
intercept = lm_var$coefficients[1,1] -
lm_var$coefficients[2,1])
|
e6bf24cd1d1d6e8397c288fb0ace410fd679945b
|
a39fca6eb72a004709de0baf2e824d4282e4f14d
|
/R/index.freqDD.R
|
456039a4d895582c1ae1b760d47e0b5a62f0e327
|
[] |
no_license
|
SantanderMetGroup/R_VALUE
|
7258fba9d0ec6a67382e1f5c8b7d6d768fb4f86d
|
86042cac384205e832eb8ae9dc918cd3d4202d2a
|
refs/heads/devel
| 2023-07-07T19:34:31.848619
| 2023-06-21T16:46:39
| 2023-06-21T16:46:39
| 15,336,613
| 7
| 1
| null | 2021-11-23T16:59:05
| 2013-12-20T11:17:15
|
R
|
UTF-8
|
R
| false
| false
| 611
|
r
|
index.freqDD.R
|
#' @title Dry-dry probability
#' @description Function to compute the dry-dry probability index.
#' @author Neyko Neykov \email{neyko.neykov@@meteo.bg}, J. Bedia, D. San-Mart\'in, S. Herrera
#' @param ts A vector containing the data
#' @param threshold A float number defining the threshold considered. Default to 1.
#' @return A float number corresponding to the dry-dry transition probability.
#' @export
index.freqDD <- function(ts, threshold = 1) {
indToday <- 1:(length(ts) - 1)
indTomorrow <- 2:length(ts)
mean((ts[indToday] < threshold)*(ts[indTomorrow] < threshold), na.rm = TRUE)
}
|
afb4ee99f43c816a1819f8af2a590e00811295e5
|
7a95abd73d1ab9826e7f2bd7762f31c98bd0274f
|
/netrankr/inst/testfiles/checkPairs/libFuzzer_checkPairs/checkPairs_valgrind_files/1612798799-test.R
|
5ca1eabba11792905833e713e6c518772e708d33
|
[] |
no_license
|
akhikolla/updatedatatype-list3
|
536d4e126d14ffb84bb655b8551ed5bc9b16d2c5
|
d1505cabc5bea8badb599bf1ed44efad5306636c
|
refs/heads/master
| 2023-03-25T09:44:15.112369
| 2021-03-20T15:57:10
| 2021-03-20T15:57:10
| 349,770,001
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 386
|
r
|
1612798799-test.R
|
testlist <- list(x = c(3.07839225763261e+169, 9.07657702144378e+223, 3.87069807020594e+233, 2.14899131997207e+233, 9.2637000607593e+25, 8.90389806611905e+252, 3.59535147836283e+246, 8.79670844719638e-313, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), y = numeric(0))
result <- do.call(netrankr:::checkPairs,testlist)
str(result)
|
1f93f57a0ee79abf2c2649066822c3b1ca07b504
|
cb352086ed786306eabffc2bb30d294301193d3b
|
/R_codes/codes_utiles/Analysis_functions.R
|
3da05dee355010cc220260e99952340bcd75c154
|
[] |
no_license
|
MathisDeronzier/mod-lisation-Little_Washita-
|
0cda2bdcb992e9588910dd2c0ce4dd46b50fdca6
|
0076db9bbe9682248d37f5629450bdbb6406af37
|
refs/heads/master
| 2023-08-22T14:24:19.960181
| 2021-09-27T16:24:16
| 2021-09-27T16:24:16
| 397,234,994
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,189
|
r
|
Analysis_functions.R
|
#Fonction renvoyant la liste des temps de sรฉcheresse d'affilรฉ
freq_without_rain <- function(pr_serie){
l<-1
k<-0
n<-length(pr_serie)
drought<-FALSE
n_drought<-matrix(0,n)
for (i in 1:n){
if (pr_serie[i]==0){
if(drought){
k <- k+1
}
else{
drought <- TRUE
k <- 1
}
}
else{
if(drought){
n_drought[l] <- k
l <- l+1
drought<-FALSE
}
}
}
return(n_drought[1:(l-1)])
}
#Fonction affichant les dรฉbit.
#Fonction regardant les queues des quantiles
#LA classique rmse
rmse<-function(predicted, real){
return(sqrt(mean((predicted-real)^2)))
}
### On crรฉe ici la librairie permettant de faire CDFt
### sur un tableau glissant
bis<-function(year){
return ((year%%400==0) || ((year%%4==0) && (year%%100 !=0)))
}
nbj_y<-function(year){
if (bis(year)){
return(c(31,29,31,30,31,30,31,31,30,31,30,31))
}
else{return(c(31,28,31,30,31,30,31,31,30,31,30,31))}
}
n_to_day<-function(n){
nb_j<-c(31,29,31,30,31,30,31,31,30,31,30,31)
s<-0
i<-1
while (s+nb_j[i]<n){
s<-s+nb_j[i]
i<-i+1
}
return (c(0,i,n-s))
}
day_to_n<-function(date){
nb_j<-c(31,29,31,30,31,30,31,31,30,31,30,31)
s<-0
for (i in 1:(date[2]-1)){
s<-s+nb_j[i]
}
s<-s+date[3]
return(s)
}
#Fonction renvoyant le nombre de jours entre deux dates
dist<-function(date1,date2){
date1<-as.matrix(date1)
nb_j<-c(31,28,31,30,31,30,31,31,30,31,30,31)
d1<-date1[3]
d2<-date2[3]
m1<-date1[2]
m2<-date2[2]
if(m1==m2){
return(abs(d1-d2))
}
if (m1>m2){
m3<-m2
d3<-d2
m2<-m1
d2<-d1
m1<-m3
d1<-d3
}
s<-0
if (m2-m1>6){
for (i in 1:(12+m1-m2)){
s=s+nb_j[(m2+i-2)%%12+1]
}
return(s+d1-d2)
}
else{
for (i in 1:(m2-m1)){
s<-s+nb_j[m1+i-1]
}
return(s+d2-d1)
}
}
###################### Fonctions utiles pour CDFt ##############################
#Cette fonction donne les rangs des X_i quand on l'applique pour (X,X) donc aussi
#utile pour Cramer Von Mises
rangs<-function(X,Y){
n<-length(X)
m<-length(Y)
ordX<-order(X)
ordY<-order(Y)
rang<-1
rangsX<-rep(0,n)
for(i in 1:n){
while (rang<=m & X[ordX[i]]>=Y[ordY[rang]]){
rang<-rang+1
}
rangsX[ordX[i]]<-rang-1
}
return(rangsX)
}
rangs2<-function(serie){
ord<-order(serie)
n<-length(serie)
minloc<-serie[ord[1]]
rangs_serie<-rep(0,n)
rang<-1
for (i in 1:n){
if(minloc<serie[ord[i]]){
rang<-rang+1
minloc<-serie[ord[i]]
rangs_serie[ord[i]]<-rang
cat("i=",i," rang=", rang, " serie[ord[",i,"]]=", serie[ord[i]], "\n", sep="")
}
else{
rangs_serie[ord[i]]<-rang
cat("i=",i," rang=", rang, " serie[ord[",i,"]]=", serie[ord[i]], "\n", sep="")
}
}
return(rangs_serie)
}
#Pour prรฉdire la moyenne en fonction de celle obtenue
sous_part<-function(series){
p<-1/4
n<-length(pr_series[,1])
B<-rbinom(n,1,p)
return(series[which(B==1),])
}
norm<-function(X){
return((X-mean(X))/var(X))
}
|
f2e50df907d0f14c4f6ee14ee2e722af2a92ae9b
|
53f6608a8f31d2aa39fae0e899b144c98818ff54
|
/man/OSDexamples.Rd
|
63ab5ddc0fefe9ee38ecdd3ee0d24ed67dba7341
|
[] |
no_license
|
ncss-tech/sharpshootR
|
4b585bb1b1313d24b0c6428182a5b91095355a6c
|
1e062e3a4cdf1ea0b37827f9b16279ddd06d4d4a
|
refs/heads/master
| 2023-08-20T06:16:35.757711
| 2023-08-08T19:11:52
| 2023-08-08T19:11:52
| 54,595,545
| 18
| 6
| null | 2023-02-24T21:00:28
| 2016-03-23T21:52:31
|
R
|
UTF-8
|
R
| false
| true
| 425
|
rd
|
OSDexamples.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data-documentation.R
\docType{data}
\name{OSDexamples}
\alias{OSDexamples}
\title{Example output from soilDB::fetchOSD()}
\format{
An object of class \code{list} of length 17.
}
\usage{
data(OSDexamples)
}
\description{
These example data are used to test various functions in this package when network access may be limited.
}
\keyword{datasets}
|
7e272ea80c837b829e745b58d1de39554e4f200c
|
5c30f03837e69425bddea5dc49f1f192576329ba
|
/Develp_Rproduct/App2/server.R
|
6d3332cf04cc97cd11711ef0f84ebbb40f672c2c
|
[] |
no_license
|
ofialko/Data-Science-Johns-Hopkins-University
|
3da23b2a3f7535bddb63da44d0ce550828e4ea58
|
9eb3155c8070c1ce31fc7c6e3ce9c641e72ff946
|
refs/heads/master
| 2020-03-12T19:57:02.613658
| 2018-06-28T04:04:09
| 2018-06-28T04:04:09
| 130,795,057
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,509
|
r
|
server.R
|
library(shiny)
shinyServer(function(input, output) {
mtcars$mpgsp <- ifelse(mtcars$mpg - 20 >0,mtcars$mpg-20,0)
model1 <- lm(hp~mpg,data=mtcars)
model2 <- lm(hp~mpgsp+mpg,data = mtcars)
model1pred <- reactive({
mpgInput <- input$sliderMPG
predict(model1,newdata = data.frame(mpg=mpgInput))
})
model2pred <- reactive({
mpgInput <- input$sliderMPG
predict(model2,newdata = data.frame(mpg=mpgInput,
mpgsp = ifelse(mpgInput-20>0,
mpgInput -20,0)))
})
output$plot1 <- renderPlot({
mpgInput <- input$sliderMPG
plot(mtcars$mpg,mtcars$hp,xlab = 'Miles per gallon',ylab = 'Horsepower',
bty='n',pch=16,xlim = c(10,35),ylim = c(50,350))
if(input$showmodel1){
abline(model1,col='red',lwd=2)
}
if(input$showmodel2){
model2lines <- predict(model2,newdata = data.frame(
mpg = 10:35,mpgsp = ifelse(10:35-20 >0,10:35 -20,0)
))
lines(10:35,model2lines,col='blue',lwd=2)
}
legend(25,250,c('Model1 prediction','Model2 prediction'),pch=16,
col=c('red','blue'),bty='n',cex=1.2)
points(mpgInput,model1pred(),col='red',pch=16,cex=2)
points(mpgInput,model2pred(),col='red',pch=16,cex=2)
})
output$pred1 <- renderText({
model1pred()
})
output$pred2 <- renderText({
model2pred()
})
})
|
26bc181dec266fb44627e8e84adda2261dc5aaf8
|
565a032b6072a5f4902a53bdc76cd80338a240aa
|
/Annie_Brinza_week_4_phase_2.R
|
d80fad647841a4a7b85e1953d456b290d2551c65
|
[] |
no_license
|
abrinz/DataScienceAccelerator
|
e2c8846a74cf556af97b6057491b072950cd27e6
|
c42d3884b7da8444033af295eaccad7ee801a4ad
|
refs/heads/master
| 2020-04-02T14:36:30.900223
| 2019-02-04T16:46:41
| 2019-02-04T16:46:41
| 154,531,430
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,428
|
r
|
Annie_Brinza_week_4_phase_2.R
|
library(tidyverse)
library(nycflights13)
library(maps)
library(fueleconomy)
library(forcats)
##############################################################
# 13.4.6 #1-4
##############################################################
#1
# Compute the average delay by destination, then join on the airports data frame so you can show the spatial distribution of delays. Hereโs an easy way to draw a map of the United States:
#
# airports %>%
# semi_join(flights, c("faa" = "dest")) %>%
# ggplot(aes(lon, lat)) +
# borders("state") +
# geom_point() +
# coord_quickmap()
# (Donโt worry if you donโt understand what semi_join() does โ youโll learn about it next.)
#
# You might want to use the size or colour of the points to display the average delay for each airport.
averageDelay <- flights %>% group_by(dest) %>% summarise(avgDelay = mean(arr_delay,na.rm = TRUE))
delayAirports <- inner_join(averageDelay,airports, c("dest" = "faa"))
delayAirports %>%
semi_join(flights, c("dest" = "dest")) %>%
ggplot(aes(lon, lat)) +
borders("state") +
geom_point(aes(colour = avgDelay)) +
coord_quickmap()
#2
#Add the location of the origin and destination (i.e. the lat and lon) to flights.
relevantColumns<- airports %>% select(faa,lat,lon)
addingDest <- flights %>% inner_join(relevantColumns,c("dest" = "faa")) %>% select(year:dest,dest_lat = "lat", dest_lon = "lon",air_time:time_hour)
allTogetherNow <- addingDest %>% inner_join(relevantColumns,c("origin" = "faa")) %>% select(year:origin,origin_lat = "lat", origin_lon = "lon",dest:time_hour)
#3
#Is there a relationship between the age of a plane and its delays?
planes_flights <- inner_join(flights,planes,by = "tailnum")
ageCalc <- planes_flights %>% mutate(age = year.x - year.y)
ageDelay <- ageCalc %>% group_by(age) %>% summarise(avgDelay = mean(arr_delay+dep_delay,na.rm = TRUE))
ageDelay %>% ggplot(aes(age,avgDelay)) + geom_line()
#No
#4
#What weather conditions make it more likely to see a delay?
flights_weather <- inner_join(flights,weather,c("year","month","day","hour","origin"))
precipitation_delay <- flights_weather %>% group_by(precip) %>% summarise(delay = mean(arr_delay,na.rm = TRUE))
ggplot(precipitation_delay,aes(precip,delay)) + geom_line()
#It looks like any amount of precipitation causes a delay, but there's not really a strong trend of how much precipitation causes a big delay
##############################################################
# 13.5.1 #1-6
##############################################################
#1
#What does it mean for a flight to have a missing tailnum?
#What do the tail numbers that donโt have a matching record in planes have in common? (Hint: one variable explains ~90% of the problems.)
flights_wo_match <- flights %>% anti_join(planes,by = "tailnum")
flights_wo_match %>% group_by(carrier) %>% count(carrier,sort = TRUE)
#It's only two carriers that are the majority of the cases - AA and MQ
#2
#Filter flights to only show flights with planes that have flown at least 100 flights.
flights100 <- flights %>% group_by(tailnum) %>% count() %>% filter(n >= 100)
semi_join(flights,flights100,by = "tailnum")
#3
#Combine fueleconomy::vehicles and fueleconomy::common to find only the records for the most common models.
head(fueleconomy::vehicles)
head(fueleconomy::common)
common_models <- semi_join(fueleconomy::common, fueleconomy::vehicles, c("make","model"))
common_models
#4
#Find the 48 hours (over the course of the whole year) that have the worst delays.
#Cross-reference it with the weather data. Can you see any patterns?
worstDelays <- flights %>% group_by(month,day,hour) %>% summarise(avg_delay = mean(arr_delay,na.rm = TRUE)) %>% arrange(desc(avg_delay))
worstDelays48 <- worstDelays[1:48,]
delays_weather <- flights %>% inner_join(worstDelays,c("month","day","hour")) %>% left_join(weather,c("month","day","hour","year","origin"))
delays_weather %>% group_by(precip) %>% summarise(total_delay = sum(avg_delay)) %>% ggplot(aes(precip,total_delay)) + geom_line()
#It looks like there's a higher total delay when there's only a bit of precipitation, which is odd
delays_weather %>% group_by(temp) %>% summarise(total_delay = sum(avg_delay)) %>% ggplot(aes(temp,total_delay)) + geom_line()
#Temperature doesn't look like there's really a pattern
#5
#What does anti_join(flights, airports, by = c("dest" = "faa")) tell you?
anti_join(flights, airports, by = c("dest" = "faa"))
#It shows only the flights that have a destination that isn't listed in the airports data
#What does anti_join(airports, flights, by = c("faa" = "dest")) tell you?
anti_join(airports, flights, by = c("faa" = "dest"))
#It shows the airports that flights didn't fly to in 2013
#6
#You might expect that thereโs an implicit relationship between plane and airline, because each plane is flown by a single airline.
#Confirm or reject this hypothesis using the tools youโve learned above.
names(flights)
flights %>%select(tailnum,carrier) %>%distinct(tailnum,carrier) %>% group_by(tailnum) %>% count() %>% filter(n > 1)
#There are several tailnums with multiple carriers - probably because planes can be sold between carriers
##############################################################
# 15.3.1 #1-3
##############################################################
#1
#Explore the distribution of rincome (reported income). What makes the default bar chart hard to understand? How could you improve the plot?
gss_cat %>% count(rincome)
ggplot(gss_cat,aes(rincome)) + geom_bar()
#The labels overlap - it could be fixed by switching the axes or making the labels vertical instead of horizontal
#2
#What is the most common relig in this survey? Whatโs the most common partyid?
gss_cat %>% count(relig,sort=TRUE)
#Protestant is the most common religion
gss_cat %>% count(partyid,sort=TRUE)
#Independent is the most common partyid
#3
#Which relig does denom (denomination) apply to? How can you find out with a table? How can you find out with a visualisation?
gss_cat %>% group_by(relig) %>% count(denom,sort = TRUE)
#It applies to Protestant. See above for figuring out with a table
gss_cat %>% count(relig,denom) %>% ggplot(aes(x=relig,y=denom,size=n)) + geom_point()
##############################################################
# 15.4.1 #1-3
##############################################################
#1
#There are some suspiciously high numbers in tvhours. Is the mean a good summary?
summary(gss_cat["tvhours"])
gss_cat %>%
ggplot() +
geom_density(aes(tvhours))
#I think it could be - the mean appears to fall right in the middle of the densest part of the plot and it is close to the median
#2
#For each factor in gss_cat identify whether the order of the levels is arbitrary or principled.
head(gss_cat)
#So marital, race, rincome, partyid, relig, and denom are factors
levels(gss_cat$marital) #Seems arbitrary to me - there's no quantitative way to order these
levels(gss_cat$race) #Arbitrary
levels(gss_cat$rincome) #Principled
levels(gss_cat$partyid) #Principled - ordered from right to left on the political spectrum
levels(gss_cat$relig) #Arbitrary - if it was principled, the different types of Christian would be near each other
levels(gss_cat$denom) #Principled - denominations with similar names are near each other
#3
#Why did moving โNot applicableโ to the front of the levels move it to the bottom of the plot?
#It gives "NA" the value of 1
|
33bb6961ec2317b5e9e39a74338627e37955c3ac
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/datapack/examples/parseSystemMetadata.Rd.R
|
afca40083be7b16f5cbcbc883b9663d37e0a220d
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 443
|
r
|
parseSystemMetadata.Rd.R
|
library(datapack)
### Name: parseSystemMetadata
### Title: Parse an external XML document and populate a SystemMetadata
### object with the parsed data
### Aliases: parseSystemMetadata parseSystemMetadata,SystemMetadata-method
### ** Examples
library(XML)
doc <- xmlParseDoc(system.file("testfiles/sysmeta.xml", package="datapack"), asText=FALSE)
sysmeta <- new("SystemMetadata")
sysmeta <- parseSystemMetadata(sysmeta, xmlRoot(doc))
|
7084842d99b8b89219638e26ebdc31f2c6f05b5c
|
72d9009d19e92b721d5cc0e8f8045e1145921130
|
/robustreg/R/robustRegH.R
|
2934a241e4571b33398a7096bc24d6e911a8bff6
|
[] |
no_license
|
akhikolla/TestedPackages-NoIssues
|
be46c49c0836b3f0cf60e247087089868adf7a62
|
eb8d498cc132def615c090941bc172e17fdce267
|
refs/heads/master
| 2023-03-01T09:10:17.227119
| 2021-01-25T19:44:44
| 2021-01-25T19:44:44
| 332,027,727
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,434
|
r
|
robustRegH.R
|
robustRegH<-function(formula,data,tune=1.345,m=TRUE,max.it=1000,tol=1e-5,anova.table=FALSE){
#psiHuber<-function(r,c){
#middle<-abs(r)<=c
#high<- r>c
#low<-r<(-c)
#h<-middle*r + high*c + low*-c
#return(h)}
bi<-FALSE
if(m==FALSE){bi<-TRUE}
modelFrame=model.frame(formula,data)
X=model.matrix(formula,data)
y=model.extract(modelFrame,"response")
model=lm.fit(X,y)
b=model$coefficients
n<-length(y)
p<-length(b)
if(bi){
tune<-(tune*sqrt(2*p*n))/(n-2*p)
hii<-lm.influence(model)$hat
pi<-(1-hii)/sqrt(hii)}
convergence<-FALSE
for(i in 1:max.it){
b_temp<-b
r<-y-fit_rcpp(X,b) #replaced r<-y-X%*%b
s<-mad_rcpp(r) #replaced s<-median(abs(r-median(r)))/.6745
#rstar<-ifelse(m,r/s,r/(s*pi))
if(m){rstar<-(r/s)}else{rstar<-r/(s*pi)}
psiH<-psiHuber_rcpp(rstar,tune)
w<-psiH/rstar
b<-lm.wfit(x=X,y=y,w=w[,1])$coefficients
if(i>4){
if(sum(abs(b-b_temp))<tol){
cat("\nRobust Regression with Huber Function\n")
cat("Convergence achieved after:",i,"iterations\n")
convergence<-TRUE
break}}
}
#if(convergence==FALSE){b<-NULL;w<-NULL}
#MSE Calc
if(convergence){
ybarw<-sum(y*w)/sum(w)
ytild=fit_rcpp(X,b) #replaced ytild<-X%*%b
sserr<-sum(w*(y-ytild)^2)
dfr<-length(b)-1
dferr<-length(y)-dfr-1
mse<-sserr/dferr}
else{
b<-NULL;w<-NULL;mse<-NULL}
if(convergence && anova.table){
derivPsiHuber<-function(r,c){
true<-abs(r)<=c
false<-(r<c*-1 || r>c)
dph<-true*1 +false*0
return(dph)
}
r3<-function(x){return(round(x,3))}
r2<-function(x){return(round(x,2))}
ssreg<-sum(w*(ytild-ybarw)^2)
sstot<-sum(w*(y-ybarw)^2)
dftot<-length(y)-1
msr<-ssreg/dfr
sbsq<-(s^2*(n^2/(n-length(b)))*sum(psiH^2))/sum(derivPsiHuber(rstar,tune))^2
F<-msr/sbsq
W<-Diagonal(x=w[1:length(w),])
c<-diag(x=solve(a=t(X)%*%W%*%X))
sec<-sqrt(sbsq*c)
t<-b/sec
pval<-sapply(t, function(t) if(t>0){2*(1-pt(q=t,df=n-p))}else{2*(1-pt(q=t,df=n-p,lower.tail = FALSE))})
cat("source ","\t","SS","\t","\t","df","\t","MS","\t","\t","F","\n")
cat("model ","\t",r2(ssreg),"\t",dfr,"\t",r2(msr),"\t",r2(F),"\n")
cat("error ","\t",r2(sserr),"\t",dferr,"\n")
cat("tot ","\t",r2(sstot),"\t",dftot,"\n")
cat("rsquared","\t",r3(ssreg/sstot),"\n")
cat("mse ","\t",mse,"\n\n")
cat("Coefficients:\n")
estimates<-cbind(b,sec,r2(t),round(pval,5))
colnames(estimates)<-c("estimate","std error","t value","p value")
print(estimates)
}
object=list("coefficients"=b,"weights"=w, "mse"=mse)
return(invisible(object))
}
|
5c3a0b773dce5120a0804954fc645820f970d187
|
523efae75822b2211dd231513bdc0bc86d908a48
|
/mampcg/R/generateMAMPCG.R
|
511a52fd56f6e6f42f98124fb0dff4394220a535
|
[] |
no_license
|
mgomez-olmedo/mampcg-paperVersion
|
603db292625bb5fbe6395db64b76621bb492629c
|
c727a5d0e7b67ac87832a0e8d204c78ff22d308e
|
refs/heads/master
| 2021-01-10T06:30:47.865416
| 2015-10-26T22:46:32
| 2015-10-26T22:46:32
| 44,955,875
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 34,975
|
r
|
generateMAMPCG.R
|
library(bnlearn)
library(parallel)
#'###################### SECTION 1: auxiliar functions #########################
#'#############################################################
#' gets a new id for a model analyzing a given folder. If networks
#' folder contains models artificial1....artificial10 the new model
#' will be artificial11
#' arguments:
#' @param baseFileName reference name
#' @param folder to analyze
#' @return new file name
#'#############################################################
getNewFileName <- function(baseFileName, pathNet){
# gets all the files matching baseFileName in the path
files <- NULL
files <- list.files(pathNet, pattern=baseFileName, include.dirs=FALSE)
# gets the biggest id
maxId <- 0
if (length(files) != 0){
for(i in 1:length(files)){
name <- files[[i]]
# removes the extension
basename <- strsplit(name,'[.]')[[1]][1]
# gets the number
number <- as.numeric(substring(basename,nchar(baseFileName)+1,
nchar(basename)))
# checks if needed to update maxId
if (maxId < number){
maxId <- number
}
}
}
# returns the new name
name <- paste0(baseFileName,maxId+1)
}
#'#############################################################
#' creates a directed edge between two nodes
#' arguments:
#' @param errorNode a extreme of the new edge
#' @param node another extreme of the new edge
#' @param edges edges defining the model
#' @param leftDeco deco for left node
#' @param rightDeco deco for right node
#' @return new list of edges after the addition
#'##############################################################
createEdge <- function(errorNode, node, edges, leftDeco, rightDeco){
# gets the number of edges
nrows <- nrow(edges)
# adds one to nrow
nrows <- nrows+1
# adds the edge
edges[nrows,] <- c(errorNode, node, leftDeco, rightDeco)
# return edges
edges
}
#'##############################################################
#' deletes a row from the data frame
#' arguments:
#' @param from first node
#' @param to second node
#' @param edges edges defining the model
#' @return new list of edges after removal
#'##############################################################
deleteEdge <- function(from, to, edges){
# removes the edge
index <- which((edges$from == from & edges$to == to) |
(edges$to == from & edges$from == to))
edges <- edges[-index, ]
# changes row names
rownames(edges) <- 1:nrow(edges)
# return edges
return(edges)
}
#'#############################################################
#' deletes an edge selected at random between those belonging
#' to a given path
#' arguments:
#' @param path path to consider
#' @param edges set of edges to analyze
#' @return new list of edges after deletion
#'#############################################################
deleteEdgeAtRandom <- function(path, edges){
# removes the cycle deleting an edge
pathLength <- length(path)
# selects a random number between 1 and pathLength
index <- sample(1:pathLength,1)
# deletes the edge
from <- path[[index]]
if (index < pathLength){
to <- path[[index+1]]
}
else{
to <- path[[1]]
}
# detele the edge
edges <- deleteEdge(from, to, edges)
}
#'#############################################################
#' checks if there is an undirected edge between two nodes
#' arguments:
#' @param nodeA first node
#' @param nodeB second node
#' @param edges edges to consider
#' @return flag boolean value
#'#############################################################
checkUndirectedEdge <- function(nodeA, nodeB, edges){
flag <- FALSE
# gets undirected edges for both nodes
undirectedEdges <- edges[((edges$from == nodeA & edges$to == nodeB &
edges$left == "none" & edges$right == "none") |
(edges$from == nodeB & edges$to == nodeA &
edges$left == "none" & edges$right == "none")),]
if (!is.null(undirectedEdges) & nrow(undirectedEdges) != 0){
flag <- TRUE
}
# return flag
return(flag)
}
#'##############################################################
#' gets undirected edges for a given node
#' arguments:
#' @param node node to consider
#' @param edges edges where to look for
#' @return list of edges containing node
#'##############################################################
getUndirectedEdges <- function(node, edges){
# gets undirected edges for node
edgesForNode <- edges[((edges$from == node | edges$to == node) &
(edges$left == "none" & edges$right == "none")),]
}
#'#############################################################
#' get the nodes involved in a pair of edges, being node
#' the common node between them
#' arguments:
#' @param node common node between the edges
#' @param edge1 first edge
#' @param edge2 second edge
#' @return a list with nodes involved in both edges A - B - C
#'#############################################################
getNodesInEdges <- function(node, edge1, edge2){
# sets nodeB
nodeB <- node
# get nodes A and C
nodeA <- getOppositeNode(node, edge1)
nodeC <- getOppositeNode(node, edge2)
# return a list with these nodes
return(list(A=nodeA, B=nodeB, C=nodeC))
}
#'##############################################################
#' get the opposite node to the one passed as first argument in the
#' edge passed as second argument
# arguments:
#' @param node reference node
#' @param edge edge to consider
#' @return node
#'##############################################################
getOppositeNode <- function(node, edge){
other <- edge$from
if (node == edge$from){
other <- edge$to
}
# return node
return(other)
}
#'##############################################################
#' get the spouses for a given node
#' arguments:
#' @param node reference node
#' @param edges edges to analyze
#' @return list of nodes (spouses)
#'##############################################################
getSpousesForNode <- function(node, edges){
# gets edges for node and with arraw-arrow as decoration
edgesForNode <- edges[((edges$from == node | edges$to == node) &
edges$left == "arrow" & edges$right == "arrow"), ]
# consider every edge and insert the other node to spouses list
spouses <- list()
if (!is.null(edgesForNode) & nrow(edgesForNode) != 0){
for(i in 1:nrow(edgesForNode)){
edge <- edgesForNode[i,]
# gets the other node
spouses <- c(getOppositeNode(node, edge), spouses)
}
}
# remove repetitions
spouses <- unique(spouses)
# return spouses
return(spouses)
}
#'##############################################################
#' get spouses for every node
#' arguments:
#' @param edges edges to consider
#' @return list of spouses for each node
#'##############################################################
getSpouses <- function(edges){
# gets all the nodes
nodes <- unique(c(unique(edges$from), unique(edges$to)))
# apply method for computing spouses for a given variable
spouses <- lapply(nodes, getSpousesForNode, edges)
}
#'##############################################################
#' gets the neighnbours of node
#' arguments:
#' @param node node of reference
#' @param edges edges to analyze
#' @return list of neighbours
#'##############################################################
getNeighbours <- function(node, edges){
# initializes neighbours
neighbours <- list()
# get the edges related to node
edgesForNode <- edges[(edges$from == node | edges$to == node), ]
# gets all the nodes in edges and removes node
if (!is.null(edgesForNode) & nrow(edgesForNode) != 0){
for(i in 1:nrow(edgesForNode)){
neighbours <- c(getOppositeNode(node, edgesForNode[i,]), neighbours)
}
}
# return neighbours
return(neighbours)
}
#'##############################################################
#' gets the neighnbours of node but taking into account the direction.
#' if A -> B then B is neighbour of A, but A is not neighbour of B
#' arguments:
#' @param node reference node
#' @param edges edges to analyze
#' @return list of neighbours
#'##############################################################
getNeighboursWithDirections <- function(node, edges){
# initializes neighbours
neighbours <- list()
# get the edges related to node
edgesForNode <- edges[(edges$from == node | edges$to == node), ]
# exclude edges with arrow in node side
edgesForNode <- edgesForNode[!(((edgesForNode$to == node) & (edgesForNode$right == "arrow") &
(edgesForNode$left == "none")) |
((edgesForNode$from == node) & (edgesForNode$left == "arrow") &
(edgesForNode$right == "none"))),]
# gets all the nodes in edges and removes node
if (!is.null(edgesForNode) & nrow(edgesForNode) != 0){
for(i in 1:nrow(edgesForNode)){
neighbours <- c(getOppositeNode(node, edgesForNode[i,]), neighbours)
}
}
# return neighbours
return(neighbours)
}
#'##############################################################
#' gets the neighnbours of node but taking into accounto only
#' undirected edges
#' arguments:
#' @param node reference node
#' @param edges edges to analyze
#' @return list of neighbours
#'##############################################################
getNeighboursWithUndirectedEdges <- function(node, edges){
# initializes neighbours
neighbours <- list()
# get the edges related to node
edgesForNode <- getUndirectedEdges(node, edges)
# gets all the nodes in edges and removes node
if (!is.null(edgesForNode) & nrow(edgesForNode) != 0){
for(i in 1:nrow(edgesForNode)){
neighbours <- c(getOppositeNode(node, edgesForNode[i,]), neighbours)
}
}
# return neighbours
return(neighbours)
}
#'##############################################################
#' method for getting the path between two nodes
#' arguments:
#' @param from start node
#' @param to destination node
#' @param visited flag of boolean values to control visited nodes
#' @param edges edges to analyze
#' @param neighboursFunction function to use for neighbours detection
#' @return list with three entries: boolean flag, list of visited
#' nodes and list of non visited edges
#'##############################################################
getPath <- function(from, to=from, visited, edges, neighboursFunction){
# inializes the result
result <- list(flag=FALSE, path=visited, edges=edges)
# base case: if to belongs to visited, return true
if (any(visited == to)){
result$flag <- TRUE
}
else{
# inductive case: get neigbours of from
neighbours <- neighboursFunction(from, edges)
# gets the list of nodes to visit
toVisit <- setdiff(neighbours, visited)
# consider every node to visit
if(length(toVisit) != 0){
for(i in 1:length(toVisit)){
# sort nodes in lexicographical order
toVisit <- sort(unlist(toVisit))
# select the node
nodeToVisit <- toVisit[i]
# removes the edge between nodeToVisit and from
edges <- deleteEdge(from, nodeToVisit, edges)
# makes a recursive call
result <- getPath(nodeToVisit, to, c(nodeToVisit, visited), edges, neighboursFunction)
# if result is true, breaks the loop because the path
# was found
if (result$flag == TRUE){
break
}
}
}
}
# return result
return(result)
}
#'####################### SECTION 2: MAMP functions #########################
#'##############################################################
#' check condition1 for MAMPCG
#' arguments:
#' @param edges set of edges of the model
#' @return list with: boolean flag (true if the list of edges had
#' to be changed) and list of resultant edges
#'##############################################################
checkCondition1 <- function(edges){
changed <- FALSE
continue <- TRUE
while(continue){
# get edges none - arrow
candidateEdges <- edges[((edges$left == "none" & edges$right == "arrow") |
(edges$left == "arrow" & edges$right == "none")), ]
# change continue value
continue <- FALSE
# checks the paths for every node
if (nrow(candidateEdges) != 0){
for(i in 1:nrow(candidateEdges)){
# gets the edge
edge <- candidateEdges[i,]
# gets node from and to
if (edge$left == "none"){
nodeFrom <- edge$from
nodeTo <- edge$to
}
else{
nodeFrom <- edge$to
nodeTo <- edge$from
}
result <- getPath(nodeTo, nodeFrom, list(nodeTo), edges, getNeighboursWithDirections)
# if there is a path and the first edge is none - arrow
# the condition 1 must be applied
if (result$flag == TRUE){
# removes an edge randomly selected
edges <- deleteEdgeAtRandom(result$path, edges)
# repeats the loop
continue <- TRUE
# sets changed to TRUE
changed <- TRUE
# breaks the for
break
}
}
}
}
return(list(changed=changed, edges=edges))
}
#'##############################################################
#' check condition2 for MAMPCG
#' arguments:
#' @param edges edges defining the model
#' @return list with: boolean flag (true if the list of edges had
#' to be changed) and list of resultant edges
#'##############################################################
checkCondition2 <- function(edges){
continue <- TRUE
changed <- FALSE
# detection loop
while(continue){
# get edges arrow - arrow
candidateEdges <- edges[(edges$left == "arrow" & edges$right == "arrow"), ]
# change continue value
continue <- FALSE
# checks the paths for every node
if (nrow(candidateEdges) != 0){
for(i in 1:nrow(candidateEdges)){
# gets the edge
edge <- candidateEdges[i,]
# gets node from and to
nodeFrom <- edge$from
nodeTo <- edge$to
# check if there is a path from nodeFrom to nodeTo with undirected edges
result <- getPath(nodeTo, nodeFrom, list(nodeTo), edges, getNeighboursWithUndirectedEdges)
# if there is a path and the first edge is none - arrow
# the condition 1 must be applied
if (result$flag == TRUE){
# removes an edge randomly selected
edges <- deleteEdgeAtRandom(result$path, edges)
# repeats the loop
continue <- TRUE
# sets changed to TRUE
changed <- TRUE
# breaks the for
break
}
}
}
}
return(list(changed=changed, edges=edges))
}
#'##############################################################
#' check condition3 for MAMPCG
#' arguments:
#' @param edges edges defining the model
#' @return list with: boolean flag (true if the list of edges had
#' to be changed) and list of resultant edges
#'##############################################################
checkCondition3 <- function(edges){
changed <- FALSE
continue <- TRUE
# check loop
while(continue){
# sets continue to false
continue <- FALSE
# gets nodes
nodes <- unique(c(edges$from, edges$to))
# makes continue FALSE. Only with a change on the edges
# this flag will be changed to TRUE
continue <- FALSE
# considers every node
for(i in 1:length(nodes)){
node <- nodes[i]
# gets undirected edges
edgesForNode <- getUndirectedEdges(node, edges)
# work only if there are at leas two edges
if (!is.null(edgesForNode) & nrow(edgesForNode) >= 2){
# considers every pair
for(j in 1:(nrow(edgesForNode)-1)){
for(k in (j+1):nrow(edgesForNode)){
edge1 <- edgesForNode[j,]
edge2 <- edgesForNode[k,]
# gets the nodes involved in these edges: A - B - C
nodesInEdges <- getNodesInEdges(node, edge1, edge2)
# gets B node spouses
bSpouses <- getSpousesForNode(node, edges)
# if this set is not empty, then there must be an
# endge between A and C
if(!is.null(bSpouses) & length(bSpouses) != 0){
# check the link between A and C
flag <- checkUndirectedEdge(nodesInEdges$A, nodesInEdges$C, edges)
# if flag is false, then adds an edge between A and C
if (flag == FALSE){
edges[(nrow(edges)+1),] <- c(from=nodesInEdges$A,to=nodesInEdges$C,
left="none",right="none")
# sets continue to TRUE
continue <- TRUE
}
}
}
}
}
}
}
# return edges
return(list(changed=changed, edges=edges))
}
#'##############################################################
#' check if the graph is a MAMPCG
#' arguments:
#' @param edges edges defining the model
#' @return list of edges required for a valid MAMPCG model
#'##############################################################
checkMAMPCG <- function(edges){
# initializes flag to TRUE
flag <- TRUE
# while flag is TRUE
while(flag){
# check condition1
res1 <- checkCondition1(edges)
edges <- res1$edges
# check condition2
res2 <- checkCondition2(edges)
edges <- res2$edges
# check condition3
res3 <- checkCondition3(edges)
edges <- res3$edges
# compose the final result
if (res1$changed == FALSE & res2$changed == FALSE & res3$changed == FALSE){
flag=FALSE
}
}
# return the set of edges
return(edges)
}
#'##############################################################
#' generates a random graph with a certain probability for directed
#' undirected and bidirected graphs
#' arguments:
#' @param numberNodes number of nodes to consider
#' @param probDirected probability for directed edges
#' @param probUndirected probability for undirected edges
#' @param probBidirected probability for bidirected edges
#' @return list of resultant edges
#'##############################################################
generateRandomGraph <- function(numberNodes, probDirected, probUndirected,
probBidirected){
# generates a random graph
rnet <- bnlearn::random.graph(LETTERS[1:numberNodes],method="ic-dag",
max.in.degree=2)
# now gets the arcs
rnetArcs <- bnlearn::arcs(rnet)
# probability vector: probs for directed, undirected, bidirected
probs <- c(probDirected, probUndirected, probBidirected)
aprobs <- cumsum(probs)
# generates a data frame with the required structure for edges
edges <- data.frame(from=character(), to=character(),
left=character(), right=character(),
stringsAsFactors=FALSE)
# considers every arc
for(i in 1:nrow(rnetArcs)){
# selects the edge
arc <- rnetArcs[i,]
# generates a random number
rnumber <- runif(1)
# gets the type according to rnumber
type <- min(which(aprobs > rnumber))
if (type == 1){
# it is directed and nothing to do. Just insert the
# edge
edges[i,] <- c(arc["from"], arc["to"], "none","arrow")
}
else{
if (type == 2){
# it is undirected
edges[i,] <- c(arc["from"], arc["to"], "none","none")
}
else{
# bidirected
edges[i,] <- c(arc["from"], arc["to"], "arrow","arrow")
}
}
}
# return edges
return(edges)
}
#'##############################################################
#' method for generating a MAMPCG model
#' arguments:
#' @param numberNodes number of nodes
#' @param probs probs to use for the generation of directed, undirected
#' and bidirected
#' @return list of edges defining the model
#'##############################################################
generateRandomMAMPCG <- function(numberNodes, probs){
# generate the basic initial structure
edges <- generateRandomGraph(numberNodes, probs[1], probs[2], probs[3])
# checks the conditions
edges <- checkMAMPCG(edges)
}
#'############### SECTION 3: functions for databases generation ################
#'##############################################################
#' method for transforming a set of edges in order to construct a
#' bayesian network
#' arguments:
#' @param edges edges defining the model
#' @return list with two entries: edges of the resultant BN and
#' inmoralities produced by the conversion
#'##############################################################
transformToBayesianNetwork <- function(edges){
# gets all the nodes
nodes <- unique(c(edges$from,edges$to))
# paste error prefix to every node
rnodes <- sapply(nodes,function(node){
rnode <- paste0("error",node)
})
# include an directed edge for errori to i
for(i in 1:length(nodes)){
# creates a new edge
edges <- createEdge(rnodes[i],nodes[i],edges,"none","arrow")
}
# selects undirected edges
undirected <- edges[(edges$left == "none" & edges$right == "none"),]
# initializes inmoralities to empty list
inmoralities <- list()
# remove the edge and add new edges between errorFrom and errorTo
if (nrow(undirected) != 0){
for(i in 1:nrow(undirected)){
from <- undirected[i,]$from
to <- undirected[i,]$to
# removes the edge
edges <- deleteEdge(from, to, edges)
# add edges from error nodes to a new error node
errorFrom <- paste0("error",from)
errorTo <- paste0("error",to)
error <- paste0("error",from)
error <- paste0(error,to)
edges <- createEdge(errorFrom, error, edges,"none","arrow")
edges <- createEdge(errorTo, error, edges, "none", "arrow")
# stores error into inmoralities list
inmoralities <- unique(c(error, inmoralities))
}
}
# selected bidirected edges
bidirected <- edges[(edges$left == "arrow" & edges$right == "arrow"),]
# for every bdirected node introduces links from common error
# to error nodes
if (nrow(bidirected) != 0){
for(i in 1:nrow(bidirected)){
from <- bidirected[i,]$from
to <- bidirected[i,]$to
# removes the edge
edges <- deleteEdge(from, to, edges)
# add edges from error to from and to
error <- paste0("error",from)
error <- paste0(error,to)
edges <- createEdge(error, from, edges, "none", "arrow")
edges <- createEdge(error, to, edges, "none", "arrow")
}
}
# return edges and inmoralities
return(list(edges=edges, inmoralities=inmoralities))
}
#'##############################################################
#' creates a Bnlearn net for helping the generation of distributions
#' arguments:
#' @param edges edges defining the model
#' @param check flag to show if the existance the cycles will be considered
#' @return resultant bnet
#'##############################################################
createBnlearnNet <- function(edges, check){
# creates an empty graph with the variables
nodes <- unique(c(edges$from, edges$to))
# creates a bnet
bnet <- bnlearn::empty.graph(nodes)
# now adds all the edges
for(i in 1:nrow(edges)){
edge <- edges[i,]
#adds the arc
bnet <- bnlearn::set.arc(bnet,from=edge$from,to=edge$to, check.cycles=check,
debug=FALSE)
}
# return bnet
return(bnet)
}
#'##############################################################
#' method for generating distributions for root nodes
#' arguments:
#' @param net net to be considered for databses generation
#' @return list of distributions for root nodes
#'##############################################################
generateDistributionForRootNodes <- function(net){
# gets all the nodes without parents
nodes <- bnlearn::nodes(net)
# gets all the parents
parentsOfNodes <- sapply(nodes,function(node){
bnlearn::parents(net,node)
})
# initializes the list of distributions
distributions <- list()
# considers every node
for (i in 1:length(nodes)){
# gets node
node <- nodes[i]
# gets parents
nodeParents <- parentsOfNodes[[i]]
# check if there are no parents
if (identical(nodeParents, character(0))){
# gets a ramdom value between 1 and 2
deviation <- runif(1)+1
# sets the distribution
distribution <- list(coef = c("(Intercept)" = 0), sd = deviation)
# add the distribution to distributions
distributions[[node]] <- distribution
}
}
# return distributions
distributions
}
#'##############################################################
#' method for generating distributions for non-root nodes
#' arguments:
#' @param net net to be considered for databses generation
#' @return list of distributions for non root nodes
#'##############################################################
generateDistributionForNonRootNodes <- function(net){
# gets all the nodes without parents
nodes <- bnlearn::nodes(net)
# gets all the parents
parentsOfNodes <- sapply(nodes,function(node){
bnlearn::parents(net,node)
})
# initializes the list of distributions
distributions <- list()
# considers every node
for (i in 1:length(nodes)){
# gets node
node <- nodes[i]
# gets parents
nodeParents <- parentsOfNodes[[i]]
# node will have average = 0 and deviation = 0
coefs <- c("(Intercept)"=0)
# check if there are no parents
if (!identical(nodeParents, character(0))){
# considers every parent
for(j in 1: length(nodeParents)){
# generate the factor
parent <- nodeParents[j]
# checks if it is a error node
if(length(grep("error",parent)) > 0){
factor <- 1
}
else{
factor <- runif(1)+1
}
# adds the factor to coefs
coefs[parent] <- factor
}
# sets the distribution
distribution <- list(coef=coefs, sd = 0)
# add the distribution to distributions
distributions[[node]] <- distribution
}
}
# return distributions
distributions
}
#'##############################################################
#' method for setting the parameters to the net
#' arguments:
#' @param net net to consider
#' @param params parameters to set
#' @param resultant bnet.fit (with parameters)
#'##############################################################
setParameters <- function(net, params){
# composes all the distributions
net <- bnlearn::custom.fit(net, params)
}
#'##############################################################
#' method for remmoving from the complete data set the evidential
#' variables
#' arguments:
#' @param sample sample to filter removing evidence variables
#' @param evidenceVariables variables to remove from sample
#' @return dataframe with samples after removing columns for
#' evidential variables
#'##############################################################
removeEvidentialVariables <- function(sample, evidenceVariables){
# gets the complete list of nodes
nodes <- names(sample)
# removes evidence variables
sample[ , -which(names(sample) %in% evidenceVariables)]
}
#'##############################################################
#' method for generating the complete dataset with the required sample
#' size
#' arguments:
#' @param model model to sample from
#' @param sampleSize size of the sample to generate
#' @param threshold value to consider for evidence expressions
#' @param cl cluster to use (if possible to use several cores)
#' @return dataframe with samples
#'##############################################################
generateSample <- function(model, sampleSize, threshold, cl){
# forms a expression where nodes in inmoralities are set to a value
# >= -threshold and <= threshold
# forms the expressions for evidence: strB for >= expression
# strL for <= expression and strC for the concatenation of both of them
strB=paste("(",model$inmoralities, " >= ", -threshold, ")", sep="", collapse = " & ")
strL=paste("(",model$inmoralities, " <= ", threshold, ")", sep="", collapse = " & ")
strC=paste(strB, strL, sep=" & ")
cat("Evidence expression: ", strC, "\n")
# export strC to cluster nodes
environment(strC) <- .GlobalEnv
parallel::clusterExport(cl, "strC", envir=environment())
# loop for getting the samples
data <- NULL
nSamples <- 0
while(nSamples < sampleSize){
# generate data. Perhaps the parameters must be changed for a faster generation
# depending on the concrete model
dataf <- bnlearn::cpdist(model$bnetSampling, nodes=bnlearn::nodes(model$bnet),
evidence=eval(parse(text=strC)), method="ls",
debug=FALSE, cluster=cl, batch=50000,
n=sampleSize*30000)
# updates the number of samples
nSamples <- nSamples+nrow(dataf)
cat("..",nSamples,"..")
# join all the samples into data
if(is.null(data)){
data <- dataf
}
else{
data <- rbind(data,dataf)
}
}
# now remove all the variables containing error in their names
data <- data[,-grep("error", colnames(data))]
# remove extra samples
data <- data[(1:sampleSize),]
# return data
return(data)
}
#'##############################################################
#' function for storing the data set to a file
#' arguments:
#' @param sample: sample to store
#' @param id id of the database
#' @param numberSamples number of samples
#' @param path path where databases must be stored
#' @param filename filename to use
#' @return
#'##############################################################
storeDatabase <- function(sample, id, numberSamples, path, filename){
# compose the complete path to the file and creates the folder
# if needed
cpath <- paste0(path,filename)
if (!file.exists(cpath)){
dir.create(file.path(path,filename))
}
# gets the number of samples and concatenates with cpath
if (!file.exists(paste(cpath,numberSamples,sep="/"))){
dir.create(file.path(cpath,numberSamples))
}
cpath <- paste(cpath,numberSamples,sep="/")
# now composes the name of the file
filename <- paste(filename,numberSamples,sep="-")
filename <- paste(filename,id,sep="-")
filename <- paste0(filename,".db")
filename <- paste(cpath,filename,sep="/")
# writes the table
write.table(sample,file=filename,col.names=TRUE, row.names=FALSE,sep=",")
}
#'##############################################################
#' method for storing the complete model: edges, bnet with
#' distributions and inmoralities
#' arguments:
#' @param model list containg all the information
#' @param folder where to store the model
#'##############################################################
storeModel <- function(model, folder){
# compose the folder with model name
pathName <- paste0(folder,model$name)
pathName <- paste0(pathName,".mampcg")
# uses the name of the model for storing info into an R file
# (name, edges, bnet and inmoralities)
saveRDS(model, pathName)
}
#'##############################################################
#' method for retrieving the complete model: edges, bnet with
#' distributions and inmoralities
#' arguments:
#' @param name of the model
#' @param folder where to look for the model
#' @return model
#'##############################################################
retrieveModel <- function(modelname, folder){
# compose the folder with model name
pathName <- paste0(folder,modelname)
pathName <- paste0(pathName,".mampcg")
# uses the name of the model for storing info into an R file
# (name, edges, bnet and inmoralities)
model <- readRDS(pathName)
}
#'##############################################################
#' prepare a MAMPCG model for sampling data from it
#' arguments:
#' @param edges
#' @param basename base name for the models
#' @param folder to analyze in order to assign an unique identifier
#' @return list with four entries: name, edges, bnet and inmoralities
#'##############################################################
prepareMAMPCGForSampling <- function(edges, basename, folder){
# gets a unique name for this model
name <- getNewFileName(basename, folder)
# creates a BN from edges, without checking edges
baseBnet <- createBnlearnNet(edges, check=FALSE)
# transform into BN
result <- transformToBayesianNetwork(edges)
# create a bnlearn net for preparing parameters generaration
bnet <- createBnlearnNet(result$edges, TRUE)
# compute distributions for root nodes
distributionsRootNodes <- generateDistributionForRootNodes(bnet)
# compute distributions for non root nodes
distributionsNonRootNodes <- generateDistributionForNonRootNodes(bnet)
# compose the complete set of distributions
distributions <- c(distributionsRootNodes, distributionsNonRootNodes)
# now sets all the distributions to the net
bnet <- setParameters(bnet, distributions)
# finally return the name, edges, net and the set of inmoralities
return(list(name=name, edges=edges, bnet=baseBnet, bnetSampling=bnet, inmoralities=result$inmoralities))
}
#'##############################################################
# method for generating data sets given a model
# arguments:
#' @param model model to use for generation
#' @param variants number of variants to generate
#' @param sampleSizes vector of sample sizes
#' @param threshold for evidence expressions
#' @param pathDb path where databases will be generated
#' @param cluster cluster to use several cores
#'##############################################################
generateDatabases <- function(model, variants, sampleSizes, threshold, pathDb, cluster){
# considers every sample size
for(ss in 1:length(sampleSizes)){
# generates the variants
for(v in 1:variants){
# use this net for generating a sample
data <- generateSample(model, sampleSizes[ss], threshold, cluster)
# finally store the data
storeDatabase(data, v, sampleSizes[ss], pathDb, model$name)
}
}
}
|
f1dde2c86f15fee2f7443276185fe9bfad465c07
|
02536fad62b930b2e4da91f55d800d805bcb9bce
|
/staphopia/man/get_samples_by_pmid.Rd
|
866ec7e8fdb84113e668ce9aab496d4e6dc901e1
|
[] |
no_license
|
staphopia/staphopia-r
|
ef22481b947e0717cbcdda1ae7e7755fd0af1b88
|
df19fa91421e18a990d861b5d138698acf0a731c
|
refs/heads/master
| 2023-08-07T15:43:10.300845
| 2023-08-01T21:06:34
| 2023-08-01T21:06:34
| 59,863,793
| 4
| 3
| null | 2018-04-09T17:17:13
| 2016-05-27T21:18:55
|
HTML
|
UTF-8
|
R
| false
| true
| 398
|
rd
|
get_samples_by_pmid.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tag.R
\name{get_samples_by_pmid}
\alias{get_samples_by_pmid}
\title{get_samples_by_pmid}
\usage{
get_samples_by_pmid(pmid)
}
\arguments{
\item{pmid}{An integer PubMed ID}
}
\value{
Parsed JSON response.
}
\description{
Retrieve all samples associated with a given PubMed ID.
}
\examples{
get_samples_by_pmid(15155238)
}
|
457c0de7b6ff657770d7196b16e3e8a6446b8de0
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/drake/tests/test-dsl.R
|
85a58332a06a5bfbc502430bca867789be7a43d1
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 54,281
|
r
|
test-dsl.R
|
drake_context("dsl")
test_with_dir("nothing to transform", {
exp <- drake_plan(a = 1)
out <- transform_plan(exp)
equivalent_plans(out, exp)
})
test_with_dir("empty transforms", {
expect_warning(
out <- drake_plan(
a = target(x, transform = cross()),
b = target(y, transform = combine()),
c = target(z, transform = map())
),
regexp = "grouping or splitting variable"
)
equivalent_plans(out, drake_plan())
expect_warning(
out <- drake_plan(a = target(x, transform = cross())),
regexp = "grouping or splitting variable"
)
expect_warning(
out <- drake_plan(b = target(y, transform = combine())),
regexp = "grouping or splitting variable"
)
expect_warning(
out <- drake_plan(c = target(z, transform = map())),
regexp = "grouping or splitting variable"
)
})
test_with_dir("more empty transforms", {
x_vals <- NULL
expect_warning(
out <- drake_plan(a = target(x, transform = map(x = !!x_vals))),
regexp = "grouping or splitting variable"
)
equivalent_plans(out, drake_plan())
})
test_with_dir("1 grouping level", {
out <- drake_plan(
a = target(x, transform = cross(x = 1)),
b = target(a, transform = map(a)),
c = target(b, transform = combine(b))
)
exp <- drake_plan(
a_1 = 1,
b_a_1 = a_1,
c = list(b_a_1)
)
equivalent_plans(out, exp)
})
test_with_dir("empty grouping levels", {
out <- drake_plan(x = target(y, transform = map(y = c(z, NULL))))
exp <- weak_tibble(
target = c("x_z", "x_NULL"),
command = c("z", "")
)
equivalent_plans(out, exp)
})
test_with_dir("bad transform", {
expect_error(
drake_plan(x = target(1, transform = 132)),
regexp = "invalid transform"
)
})
test_with_dir("simple expansion", {
out <- drake_plan(a = target(1 + 1, transform = cross(x = c(1, 2))))
exp <- weak_tibble(
target = c("a_1", "a_2"),
command = rep("1 + 1", 2)
)
equivalent_plans(out, exp)
})
test_with_dir("replicates", {
out <- drake_plan(
trace = TRUE,
a = target(x, transform = map(x = c(1, 1))),
b = target(f(a), transform = map(a))
)
exp <- drake_plan(
a_1 = target(
command = 1,
x = "1",
a = "a_1"
),
a_1_2 = target(
command = 1,
x = "1",
a = "a_1_2"
),
b_a_1 = target(
command = f(a_1),
x = "1",
a = "a_1",
b = "b_a_1"
),
b_a_1_2 = target(
command = f(a_1_2),
x = "1",
a = "a_1_2",
b = "b_a_1_2"
)
)
equivalent_plans(out, exp)
})
test_with_dir("single tag_in", {
out <- drake_plan(
x = target(
y,
transform = cross(
x = c(1, 2),
.tag_in = single
)
),
trace = T
)
exp <- drake_plan(
x_1 = target(
command = y,
x = "x_1",
single = "x"
),
x_2 = target(
command = y,
x = "x_2",
single = "x"
)
)
equivalent_plans(out, exp)
})
test_with_dir("multiple tag_in", {
out <- drake_plan(
x = target(
y,
transform = cross(
x = c(1, 2),
.tag_in = c(one, second)
)
),
trace = T
)
exp <- drake_plan(
x_1 = target(
command = y,
x = "x_1",
one = "x",
second = "x"
),
x_2 = target(
command = y,
x = "x_2",
one = "x",
second = "x"
)
)
equivalent_plans(out, exp)
})
test_with_dir("single tag_out", {
out <- drake_plan(
x = target(
y,
transform = cross(
x = c(1, 2),
.tag_out = single
)
),
trace = T
)
exp <- drake_plan(
x_1 = target(
command = y,
x = "x_1",
single = "x_1"
),
x_2 = target(
command = y,
x = "x_2",
single = "x_2"
)
)
equivalent_plans(out, exp)
})
test_with_dir("multiple tag_out", {
out <- drake_plan(
x = target(
y,
transform = cross(
x = c(1, 2),
.tag_out = c(one, second)
)
),
trace = T
)
exp <- drake_plan(
x_1 = target(
command = y,
x = "x_1",
one = "x_1",
second = "x_1"
),
x_2 = target(
command = y,
x = "x_2",
one = "x_2",
second = "x_2"
)
)
equivalent_plans(out, exp)
})
test_with_dir("simple map", {
out <- drake_plan(a = target(1 + 1, transform = map(x = c(1, 2))))
exp <- weak_tibble(
target = c("a_1", "a_2"),
command = rep("1 + 1", 2)
)
equivalent_plans(out, exp)
})
test_with_dir("simple map with 2 factors", {
out <- drake_plan(
a = target(1 + 1, transform = map(x = c(1, 2), y = c(3, 4)))
)
exp <- weak_tibble(
target = c("a_1_3", "a_2_4"),
command = rep("1 + 1", 2)
)
equivalent_plans(out, exp)
})
test_with_dir("all new crossings", {
out <- drake_plan(
analysis = target(
analyze_data(source),
transform = cross(source = c(source1, source2))
)
)
exp <- drake_plan(
analysis_source1 = analyze_data(source1),
analysis_source2 = analyze_data(source2)
)
equivalent_plans(out, exp)
})
test_with_dir("1 new map", {
out <- drake_plan(
analysis = target(
analyze_data(source),
transform = map(source = c(source1, source2))
)
)
exp <- drake_plan(
analysis_source1 = analyze_data(source1),
analysis_source2 = analyze_data(source2)
)
equivalent_plans(out, exp)
})
test_with_dir("2 new maps", {
out <- drake_plan(
analysis = target(
analyze_data(source, set),
transform = map(source = c(source1, source2), set = c(set1, set2))
)
)
exp <- drake_plan(
analysis_source1_set1 = analyze_data(source1, set1),
analysis_source2_set2 = analyze_data(source2, set2)
)
equivalent_plans(out, exp)
})
test_with_dir("groups and command symbols are undefined", {
expect_warning(
out <- drake_plan(
small = simulate(48),
large = simulate(64),
lots = target(nobody(home), transform = cross(a, b)),
mots = target(everyone(out), transform = map(c, d)),
winners = target(min(nobodyhome), transform = combine(data))
),
regexp = "grouping or splitting variable"
)
exp <- drake_plan(
small = simulate(48),
large = simulate(64)
)
equivalent_plans(out, exp)
})
test_with_dir("command symbols are for combine() but the plan has them", {
out <- drake_plan(
data = target(x, transform = map(x = c(1, 2))),
nope = target(x, transform = map(x = c(1, 2))),
winners = target(min(data, nope), transform = combine(data))
)
exp <- drake_plan(
data_1 = 1,
data_2 = 2,
nope_1 = 1,
nope_2 = 2,
winners = min(data_1, data_2, nope)
)
equivalent_plans(out, exp)
})
test_with_dir("combine different groups together", {
out <- drake_plan(
data_group1 = target(
sim_data(mean = x, sd = y),
transform = map(x = c(1, 2), y = c(3, 4))
),
data_group2 = target(
pull_data(url),
transform = map(url = c("example1.com", "example2.com"))
),
larger = target(
bind_rows(data_group1, data_group2),
transform = combine(
data_group1,
data_group2
)
)
)
exp <- drake_plan(
data_group1_1_3 = sim_data(mean = 1, sd = 3),
data_group1_2_4 = sim_data(mean = 2, sd = 4),
data_group2_.example1.com. = pull_data("example1.com"),
data_group2_.example2.com. = pull_data("example2.com"),
larger = bind_rows(
data_group1_1_3, data_group1_2_4,
data_group2_.example1.com., data_group2_.example2.com. # nolint
)
)
equivalent_plans(out, exp)
})
test_with_dir("multiple groups and multiple splits", {
out <- drake_plan(
data_group1 = target(
sim(mean = x, sd = y),
transform = cross(x = c(1, 2), y = c(3, 4))
),
data_group2 = target(
pull(mean = x, sd = y),
transform = cross(x = c(1, 2), y = c(3, 4))
),
larger = target(
bind_rows(data_group1, data_group2),
transform = combine(
data_group1,
data_group2,
.by = c(x, y)
)
)
)
exp <- drake_plan(
data_group1_1_3 = sim(mean = 1, sd = 3),
data_group1_2_3 = sim(mean = 2, sd = 3),
data_group1_1_4 = sim(mean = 1, sd = 4),
data_group1_2_4 = sim(mean = 2, sd = 4),
data_group2_1_3 = pull(mean = 1, sd = 3),
data_group2_2_3 = pull(mean = 2, sd = 3),
data_group2_1_4 = pull(mean = 1, sd = 4),
data_group2_2_4 = pull(mean = 2, sd = 4),
larger_1_3 = bind_rows(data_group1_1_3, data_group2_1_3),
larger_2_3 = bind_rows(data_group1_2_3, data_group2_2_3),
larger_1_4 = bind_rows(data_group1_1_4, data_group2_1_4),
larger_2_4 = bind_rows(data_group1_2_4, data_group2_2_4)
)
equivalent_plans(out, exp)
})
test_with_dir("dsl with different types", {
plan <- drake_plan(
a = target(1 + 1, transform = cross(x = c(1, 2))),
transform = FALSE
)
plan$command <- list(quote(1 + 1))
plan <- transform_plan(plan, envir = environment())
plan$command <- unlist(lapply(plan$command, safe_deparse))
expect_equal(sort(plan$target), sort(c("a_1", "a_2")))
expect_equal(plan$command, rep("1 + 1", 2))
})
test_with_dir("dsl with a version of the mtcars plan", {
out <- drake_plan(
small = simulate(48),
large = simulate(64),
reg = target(
reg_fun(data),
transform = cross(reg_fun = c(reg1, reg2), data = c(small, large))
),
summ = target(
sum_fun(data, reg),
transform = cross(sum_fun = c(coef, residuals), reg)
),
winners = target(
min(summ),
transform = combine(summ, .by = c(data, sum_fun))
),
others = target(
analyze(list(c(summ, data))) + 1,
transform = combine(
summ,
data,
.by = c(data, sum_fun)
)
),
final_winner = target(
min(winners),
transform = combine(winners)
)
)
exp <- drake_plan(
small = simulate(48),
large = simulate(64),
reg_reg1_small = reg1(small),
reg_reg2_small = reg2(small),
reg_reg1_large = reg1(large),
reg_reg2_large = reg2(large),
summ_coef_reg_reg1_large = coef(large, reg_reg1_large),
summ_residuals_reg_reg1_large = residuals(large, reg_reg1_large),
summ_coef_reg_reg1_small = coef(small, reg_reg1_small),
summ_residuals_reg_reg1_small = residuals(small, reg_reg1_small),
summ_coef_reg_reg2_large = coef(large, reg_reg2_large),
summ_residuals_reg_reg2_large = residuals(large, reg_reg2_large),
summ_coef_reg_reg2_small = coef(small, reg_reg2_small),
summ_residuals_reg_reg2_small = residuals(small, reg_reg2_small),
winners_large_coef = min(
summ_coef_reg_reg1_large,
summ_coef_reg_reg2_large
),
winners_small_coef = min(
summ_coef_reg_reg1_small,
summ_coef_reg_reg2_small
),
winners_large_residuals = min(
summ_residuals_reg_reg1_large,
summ_residuals_reg_reg2_large
),
winners_small_residuals = min(
summ_residuals_reg_reg1_small,
summ_residuals_reg_reg2_small
),
others_large_coef = analyze(list(c(
summ_coef_reg_reg1_large,
summ_coef_reg_reg2_large,
large
))) + 1,
others_small_coef = analyze(list(c(
summ_coef_reg_reg1_small,
summ_coef_reg_reg2_small,
small
))) + 1,
others_large_residuals = analyze(list(c(
summ_residuals_reg_reg1_large,
summ_residuals_reg_reg2_large,
large
))) + 1,
others_small_residuals = analyze(list(c(
summ_residuals_reg_reg1_small,
summ_residuals_reg_reg2_small,
small
))) + 1,
final_winner = min(
winners_large_coef,
winners_small_coef,
winners_large_residuals,
winners_small_residuals
)
)
equivalent_plans(out, exp)
})
test_with_dir("more map", {
out <- drake_plan(
small = simulate(48),
large = simulate(64),
reg = target(
reg_fun(data),
transform = map(reg_fun = c(reg1, reg2), data = c(small, large))
),
summ = target(
sum_fun(data, reg),
transform = map(sum_fun = c(coef, residuals), reg),
custom1 = 123L
),
winners = target(
min(summ),
transform = combine(summ, .by = c(sum_fun, data)),
custom2 = 456L
)
)
exp <- drake_plan(
small = simulate(48),
large = simulate(64),
reg_reg1_small = reg1(small),
reg_reg2_large = reg2(large),
summ_coef_reg_reg1_small = target(
command = coef(small, reg_reg1_small),
custom1 = 123L
),
summ_residuals_reg_reg2_large = target(
command = residuals(large, reg_reg2_large),
custom1 = 123L
),
winners_residuals_large = target(
command = min(
summ_residuals_reg_reg2_large),
custom2 = 456L
),
winners_coef_small = target(
command = min(
summ_coef_reg_reg1_small
),
custom2 = 456L
)
)
equivalent_plans(out, exp)
})
test_with_dir("map on mtcars-like workflow", {
out <- drake_plan(
data = target(
simulate(nrows),
transform = map(nrows = c(48, 64))
),
reg = target(
reg_fun(data),
transform = cross(reg_fun = c(reg1, reg2), data)
),
summ = target(
sum_fun(data, reg),
transform = cross(sum_fun = c(coef, resid), reg)
),
winners = target(
min(summ),
transform = combine(summ, .by = c(data, sum_fun))
)
)
exp <- drake_plan(
data_48 = simulate(48),
data_64 = simulate(64),
reg_reg1_data_48 = reg1(data_48),
reg_reg2_data_48 = reg2(data_48),
reg_reg1_data_64 = reg1(data_64),
reg_reg2_data_64 = reg2(data_64),
summ_coef_reg_reg1_data_48 = coef(data_48, reg_reg1_data_48),
summ_resid_reg_reg1_data_48 = resid(data_48, reg_reg1_data_48),
summ_coef_reg_reg1_data_64 = coef(data_64, reg_reg1_data_64),
summ_resid_reg_reg1_data_64 = resid(data_64, reg_reg1_data_64),
summ_coef_reg_reg2_data_48 = coef(data_48, reg_reg2_data_48),
summ_resid_reg_reg2_data_48 = resid(data_48, reg_reg2_data_48),
summ_coef_reg_reg2_data_64 = coef(data_64, reg_reg2_data_64),
summ_resid_reg_reg2_data_64 = resid(data_64, reg_reg2_data_64),
winners_data_48_coef = min(
summ_coef_reg_reg1_data_48,
summ_coef_reg_reg2_data_48
),
winners_data_64_coef = min(
summ_coef_reg_reg1_data_64,
summ_coef_reg_reg2_data_64
),
winners_data_48_resid = min(
summ_resid_reg_reg1_data_48,
summ_resid_reg_reg2_data_48
),
winners_data_64_resid = min(
summ_resid_reg_reg1_data_64,
summ_resid_reg_reg2_data_64
)
)
equivalent_plans(out, exp)
})
test_with_dir("map with unequal columns", {
expect_error(
drake_plan(
small = simulate(48),
large = simulate(64),
reg = target(
reg_fun(data),
transform = map(reg_fun = c(reg1, reg2), data = c(small, large, huge))
)
),
regexp = "uneven groupings detected in map"
)
})
test_with_dir("map with an indicator column", {
out <- drake_plan(
small = simulate(48),
large = simulate(64),
reg = target(
reg_fun(data),
transform = map(reg_fun = reg1, data = c(small, large, huge))
),
trace = TRUE
)
exp <- drake_plan(
small = simulate(48),
large = simulate(64),
reg_reg1_small = target(
command = reg1(small),
reg_fun = "reg1",
data = "small",
reg = "reg_reg1_small"
),
reg_reg1_large = target(
command = reg1(large),
reg_fun = "reg1",
data = "large",
reg = "reg_reg1_large"
),
reg_reg1_huge = target(
command = reg1(huge),
reg_fun = "reg1",
data = "huge",
reg = "reg_reg1_huge"
)
)
equivalent_plans(out, exp)
})
test_with_dir("dsl and custom columns", {
e <- quote(
drake_plan(
small = simulate(48),
large = simulate(64),
reg = target(
reg_fun(data),
transform = cross(reg_fun = c(reg1, reg2), data = c(small, large))
),
summ = target(
sum_fun(data, reg),
transform = cross(sum_fun = c(coef, residuals), reg),
custom1 = 123L
),
winners = target(
min(summ),
transform = combine(summ, .by = c(data, sum_fun)),
custom2 = 456L
)
)
)
expect_silent(plan <- eval(e))
expect_equal(
plan$custom1,
c(rep(NA_integer_, 6), rep(123L, 8), rep(NA_integer_, 4))
)
expect_equal(
plan$custom2,
c(rep(NA_integer_, 14), rep(456L, 4))
)
illegals <- list(
quote(target(simulate(48), transform = map(command))),
quote(target(simulate(48), transform = map(transform))),
quote(target(simulate(48), transform = map(target))),
quote(target(simulate(48), transform = map(target = 123))),
quote(target(simulate(48), transform = map(command = 123))),
quote(target(simulate(48), transform = map(transform = 123))),
quote(target(simulate(48), data = 123)),
quote(target(simulate(48), reg = 123)),
quote(target(simulate(48), reg_fun = 123)),
quote(target(simulate(48), sum_fun = 123)),
quote(target(simulate(48), summ = 123))
)
msg <- "cannot also be custom column names in the plan"
for (illegal in illegals[1:2]) {
e[[2]] <- illegal
expect_error(eval(e))
}
for (illegal in illegals[-1:-2]) {
e[[2]] <- illegal
expect_error(eval(e), regexp = msg)
}
})
test_with_dir("dsl trace", {
plan <- drake_plan(
small = simulate(48),
large = simulate(64),
reg = target(
reg_fun(data),
transform = cross(reg_fun = c(reg1, reg2), data = c(small, large))
),
summ = target(
sum_fun(data, reg),
transform = cross(sum_fun = c(coef, residuals), reg)
),
winners = target(
min(summ),
transform = combine(data, sum_fun)
),
trace = FALSE
)
expect_false("trace" %in% plan$target)
expect_equal(sort(colnames(plan)), sort(c("target", "command")))
plan <- drake_plan(
small = simulate(48),
large = simulate(64),
reg = target(
reg_fun(data),
transform = cross(reg_fun = c(reg1, reg2), data = c(small, large))
),
summ = target(
sum_fun(data, reg),
transform = cross(sum_fun = c(coef, residuals), reg)
),
winners = target(
min(summ),
transform = combine(data, sum_fun)
),
trace = TRUE
)
expect_false("trace" %in% plan$target)
expect_equal(
sort(colnames(plan)),
sort(c(
"target", "command", "reg", "reg_fun", "data", "summ",
"sum_fun", "winners"
))
)
})
test_with_dir("running a dsl-generated mtcars-like plan", {
skip_on_cran()
skip_if_not_installed("knitr")
load_mtcars_example()
rm(my_plan)
plan <- drake_plan(
small = simulate(48),
large = simulate(64),
reg = target(
reg_fun(data),
transform = cross(data = c(small, large), reg_fun = c(reg1, reg2))
),
summ = target(
summary(reg)$sumtype,
transform = cross(reg, sumtype = c(residuals, coefficients))
)
)
expect_equal(nrow(plan), 14L)
cache <- storr::storr_environment()
make(plan, session_info = FALSE, cache = cache)
config <- drake_config(plan, cache = cache)
expect_equal(sort(justbuilt(config)), sort(plan$target))
make(plan, session_info = FALSE, cache = cache)
expect_equal(justbuilt(config), character(0))
})
test_with_dir("dsl .tag_out groupings", {
out <- drake_plan(
small = simulate(48),
large = simulate(64),
reg1 = target(
rgfun(data),
transform = cross(data = c(small, large), .tag_out = c(reg, othergroup))
),
reg2 = target(
rgfun(data),
transform = cross(data = c(small, large), .tag_out = reg)
),
winners = target(min(reg), transform = combine(reg), a = 1),
trace = TRUE
)
exp <- drake_plan(
small = simulate(48),
large = simulate(64),
reg1_small = target(
command = rgfun(small),
data = "small",
reg1 = "reg1_small",
reg = "reg1_small",
othergroup = "reg1_small"
),
reg1_large = target(
command = rgfun(large),
data = "large",
reg1 = "reg1_large",
reg = "reg1_large",
othergroup = "reg1_large"
),
reg2_small = target(
command = rgfun(small),
data = "small",
reg = "reg2_small",
reg2 = "reg2_small"
),
reg2_large = target(
command = rgfun(large),
data = "large",
reg = "reg2_large",
reg2 = "reg2_large"
),
winners = target(
command = min(
reg1_small,
reg1_large,
reg2_small,
reg2_large
),
a = 1,
winners = "winners"
)
)
equivalent_plans(out, exp)
})
test_with_dir("combine() and tags", {
i <- as.numeric(1:3)
out <- drake_plan(
x = target(1, transform = map(f = !!i, .tag_in = grp, .tag_out = targs)),
y = target(1, transform = map(g = !!i, .tag_in = grp, .tag_out = targs)),
z = target(
min(targs),
transform = combine(
targs,
.by = grp,
.tag_in = im,
.tag_out = here
)
),
trace = TRUE
)
exp <- drake_plan(
x_1 = target(
command = 1,
f = "1",
x = "x_1",
grp = "x",
targs = "x_1"
),
x_2 = target(
command = 1,
f = "2",
x = "x_2",
grp = "x",
targs = "x_2"
),
x_3 = target(
command = 1,
f = "3",
x = "x_3",
grp = "x",
targs = "x_3"
),
y_1 = target(
command = 1,
grp = "y",
targs = "y_1",
g = "1",
y = "y_1"
),
y_2 = target(
command = 1,
grp = "y",
targs = "y_2",
g = "2",
y = "y_2"
),
y_3 = target(
command = 1,
grp = "y",
targs = "y_3",
g = "3",
y = "y_3"
),
z_x = target(
command = min(x_1, x_2, x_3),
grp = "x",
z = "z_x",
im = "z",
here = "z_x"
),
z_y = target(
command = min(y_1, y_2, y_3),
grp = "y",
z = "z_y",
im = "z",
here = "z_y"
)
)
equivalent_plans(out, exp)
})
test_with_dir("can disable transformations in dsl", {
out <- drake_plan(
small = simulate(48),
large = simulate(64),
reg1 = target(
reg_fun(data),
transform = cross(data = c(small, large), .tag_out = reg)
),
reg2 = target(
reg_fun(data),
transform = cross(data = c(small, large), .tag_out = reg)
),
winners = target(
min(reg),
transform = combine(data),
a = 1
),
transform = FALSE
)
expect_equal(
sort(out$target),
sort(c("small", "large", "reg1", "reg2", "winners"))
)
})
test_with_dir("dsl with differently typed group levels", {
plan1 <- drake_plan(
analysis = target(
analyze_data(source),
transform = cross(source = c("source1", source2, 3))
),
transform = FALSE
)
plan2 <- drake_plan(
reducks = target(
combine_analyses(analysis),
transform = combine(analysis)
),
transform = FALSE
)
plan <- bind_plans(plan1, plan2)
out <- transform_plan(plan, envir = environment())
exp <- drake_plan(
analysis_.source1. = analyze_data("source1"), # nolint
analysis_source2 = analyze_data(source2),
analysis_3 = analyze_data(3),
reducks = combine_analyses(
analysis_.source1., # nolint
analysis_source2,
analysis_3
)
)
equivalent_plans(out, exp)
out <- transform_plan(plan, envir = environment(), trace = TRUE)
exp <- drake_plan(
analysis_.source1. = target( # nolint
command = analyze_data("source1"),
source = "\"source1\"",
analysis = "analysis_.source1."
),
analysis_source2 = target(
command = analyze_data(source2),
source = "source2",
analysis = "analysis_source2"
),
analysis_3 = target(
command = analyze_data(3),
source = "3",
analysis = "analysis_3"
),
reducks = target(
command = combine_analyses(
analysis_.source1., # nolint
analysis_source2,
analysis_3
),
reducks = "reducks"
)
)
expect_true(ncol(exp) > 2)
equivalent_plans(out, exp)
})
test_with_dir("tidy eval in the DSL", {
sms <- rlang::syms(letters)
out <- drake_plan(
x = target(
f(char),
trigger = trigger(condition = g(char)),
custom = h(char),
transform = map(char = !!sms)
)
)
exp <- drake_plan(
x_a = target(
command = f(a),
trigger = trigger(
condition = g(a)
),
custom = h(a)
),
x_b = target(
command = f(b),
trigger = trigger(
condition = g(b)
),
custom = h(b)
),
x_c = target(
command = f(c),
trigger = trigger(
condition = g(c)
),
custom = h(c)
),
x_d = target(
command = f(d),
trigger = trigger(
condition = g(d)
),
custom = h(d)
),
x_e = target(
command = f(e),
trigger = trigger(
condition = g(e)
),
custom = h(e)
),
x_f = target(
command = f(f),
trigger = trigger(
condition = g(f)
),
custom = h(f)
),
x_g = target(
command = f(g),
trigger = trigger(
condition = g(g)
),
custom = h(g)
),
x_h = target(
command = f(h),
trigger = trigger(
condition = g(h)
),
custom = h(h)
),
x_i = target(
command = f(i),
trigger = trigger(
condition = g(i)
),
custom = h(i)
),
x_j = target(
command = f(j),
trigger = trigger(
condition = g(j)
),
custom = h(j)
),
x_k = target(
command = f(k),
trigger = trigger(
condition = g(k)
),
custom = h(k)
),
x_l = target(
command = f(l),
trigger = trigger(
condition = g(l)
),
custom = h(l)
),
x_m = target(
command = f(m),
trigger = trigger(
condition = g(m)
),
custom = h(m)
),
x_n = target(
command = f(n),
trigger = trigger(
condition = g(n)
),
custom = h(n)
),
x_o = target(
command = f(o),
trigger = trigger(
condition = g(o)
),
custom = h(o)
),
x_p = target(
command = f(p),
trigger = trigger(
condition = g(p)
),
custom = h(p)
),
x_q = target(
command = f(q),
trigger = trigger(
condition = g(q)
),
custom = h(q)
),
x_r = target(
command = f(r),
trigger = trigger(
condition = g(r)
),
custom = h(r)
),
x_s = target(
command = f(s),
trigger = trigger(
condition = g(s)
),
custom = h(s)
),
x_t = target(
command = f(t),
trigger = trigger(
condition = g(t)
),
custom = h(t)
),
x_u = target(
command = f(u),
trigger = trigger(
condition = g(u)
),
custom = h(u)
),
x_v = target(
command = f(v),
trigger = trigger(
condition = g(v)
),
custom = h(v)
),
x_w = target(
command = f(w),
trigger = trigger(
condition = g(w)
),
custom = h(w)
),
x_x = target(
command = f(x),
trigger = trigger(
condition = g(x)
),
custom = h(x)
),
x_y = target(
command = f(y),
trigger = trigger(
condition = g(y)
),
custom = h(y)
),
x_z = target(
command = f(z),
trigger = trigger(
condition = g(z)
),
custom = h(z)
)
)
equivalent_plans(out, exp)
})
test_with_dir("dsl: exact same plan as mtcars", {
skip_if_not_installed("knitr")
out <- drake_plan(
report = knit(knitr_in("report.Rmd"), file_out("report.md"), quiet = TRUE),
small = simulate(48),
large = simulate(64),
regression1 = target(
reg1(data),
transform = map(data = c(small, large), .tag_out = reg)
),
regression2 = target(
reg2(data),
transform = map(data = c(small, large), .tag_out = reg)
),
summ = target(
suppressWarnings(summary(reg$residuals)),
transform = map(reg)
),
coef = target(
suppressWarnings(summary(reg))$coefficients,
transform = map(reg)
)
)
load_mtcars_example()
equivalent_plans(out, my_plan)
})
test_with_dir("dsl: no NA levels in combine()", {
out <- drake_plan(
data_sim = target(
sim_data(mean = x, sd = y),
transform = cross(x = c(1, 2), y = c(3, 4), .tag_out = c(data, local))
),
data_download = target(
download_data(url = x),
transform = map(
x = c("http://url_1", "http://url_2"),
.tag_out = c(real, data)
)
),
data_pkg = target(
load_data_from_package(pkg = x),
transform = map(
x = c("gapminder", "Ecdat"),
.tag_out = c(local, real, data)
)
),
summaries = target(
compare_ds(data_sim),
transform = combine(data_sim, .by = local)
)
)
exp <- drake_plan(
data_sim_1_3 = sim_data(mean = 1, sd = 3),
data_sim_2_3 = sim_data(mean = 2, sd = 3),
data_sim_1_4 = sim_data(mean = 1, sd = 4),
data_sim_2_4 = sim_data(mean = 2, sd = 4),
data_download_.http...url_1. = download_data(url = "http://url_1"),
data_download_.http...url_2. = download_data(url = "http://url_2"),
data_pkg_.gapminder. = load_data_from_package(pkg = "gapminder"),
data_pkg_.Ecdat. = load_data_from_package(pkg = "Ecdat"),
summaries_data_sim_1_3 = compare_ds(data_sim_1_3),
summaries_data_sim_1_4 = compare_ds(data_sim_1_4),
summaries_data_sim_2_3 = compare_ds(data_sim_2_3),
summaries_data_sim_2_4 = compare_ds(data_sim_2_4)
)
equivalent_plans(out, exp)
})
test_with_dir("trace has correct provenance", {
out <- drake_plan(
trace = TRUE,
a = target(x, transform = map(x = c(1, 1), y = c(3, 3))),
b = target(a, transform = map(a)),
c = target(b, transform = map(b)),
d = target(b, transform = cross(b, c)),
e = target(c, transform = map(c)),
f = target(c, transform = map(c)),
g = target(b, transform = map(b)),
h = target(a, transform = map(a)),
i = target(e, transform = combine(e)),
j = target(f, transform = combine(f))
)
exp <- drake_plan(
a_1_3 = target(
command = 1,
x = "1",
y = "3",
a = "a_1_3"
),
a_1_3_2 = target(
command = 1,
x = "1",
y = "3",
a = "a_1_3_2"
),
b_a_1_3 = target(
command = a_1_3,
x = "1",
y = "3",
a = "a_1_3",
b = "b_a_1_3"
),
b_a_1_3_2 = target(
command = a_1_3_2,
x = "1",
y = "3",
a = "a_1_3_2",
b = "b_a_1_3_2"
),
c_b_a_1_3 = target(
command = b_a_1_3,
x = "1",
y = "3",
a = "a_1_3",
b = "b_a_1_3",
c = "c_b_a_1_3"
),
c_b_a_1_3_2 = target(
command = b_a_1_3_2,
x = "1",
y = "3",
a = "a_1_3_2",
b = "b_a_1_3_2",
c = "c_b_a_1_3_2"
),
d_b_a_1_3_c_b_a_1_3 = target(
command = b_a_1_3,
x = "1",
y = "3",
a = "a_1_3",
b = "b_a_1_3",
c = "c_b_a_1_3",
d = "d_b_a_1_3_c_b_a_1_3"
),
d_b_a_1_3_c_b_a_1_3_2 = target(
command = b_a_1_3,
b = "b_a_1_3",
c = "c_b_a_1_3_2",
d = "d_b_a_1_3_c_b_a_1_3_2"
),
d_b_a_1_3_2_c_b_a_1_3 = target(
command = b_a_1_3_2,
b = "b_a_1_3_2",
c = "c_b_a_1_3",
d = "d_b_a_1_3_2_c_b_a_1_3"
),
d_b_a_1_3_2_c_b_a_1_3_2 = target(
command = b_a_1_3_2,
x = "1",
y = "3",
a = "a_1_3_2",
b = "b_a_1_3_2",
c = "c_b_a_1_3_2",
d = "d_b_a_1_3_2_c_b_a_1_3_2"
),
e_c_b_a_1_3 = target(
command = c_b_a_1_3,
x = "1",
y = "3",
a = "a_1_3",
b = "b_a_1_3",
c = "c_b_a_1_3",
e = "e_c_b_a_1_3"
),
e_c_b_a_1_3_2 = target(
command = c_b_a_1_3_2,
x = "1",
y = "3",
a = "a_1_3_2",
b = "b_a_1_3_2",
c = "c_b_a_1_3_2",
e = "e_c_b_a_1_3_2"
),
f_c_b_a_1_3 = target(
command = c_b_a_1_3,
x = "1",
y = "3",
a = "a_1_3",
b = "b_a_1_3",
c = "c_b_a_1_3",
f = "f_c_b_a_1_3"
),
f_c_b_a_1_3_2 = target(
command = c_b_a_1_3_2,
x = "1",
y = "3",
a = "a_1_3_2",
b = "b_a_1_3_2",
c = "c_b_a_1_3_2",
f = "f_c_b_a_1_3_2"
),
g_b_a_1_3 = target(
command = b_a_1_3,
x = "1",
y = "3",
a = "a_1_3",
b = "b_a_1_3",
g = "g_b_a_1_3"
),
g_b_a_1_3_2 = target(
command = b_a_1_3_2,
x = "1",
y = "3",
a = "a_1_3_2",
b = "b_a_1_3_2",
g = "g_b_a_1_3_2"
),
h_a_1_3 = target(
command = a_1_3,
x = "1",
y = "3",
a = "a_1_3",
h = "h_a_1_3"
),
h_a_1_3_2 = target(
command = a_1_3_2,
x = "1",
y = "3",
a = "a_1_3_2",
h = "h_a_1_3_2"
),
i = target(
command = list(e_c_b_a_1_3, e_c_b_a_1_3_2),
i = "i"
),
j = target(
command = list(f_c_b_a_1_3, f_c_b_a_1_3_2),
j = "j"
)
)
equivalent_plans(out, exp)
})
test_with_dir("row order does not matter", {
plan1 <- drake_plan(
coef = target(
suppressWarnings(summary(reg))$coefficients,
transform = map(reg)
),
summ = target(
suppressWarnings(summary(reg$residuals)),
transform = map(reg)
),
report = knit(knitr_in("report.Rmd"), file_out("report.md"), quiet = TRUE),
regression1 = target(
reg1(data),
transform = map(data = c(small, large), .tag_out = reg)
),
regression2 = target(
reg2(data),
transform = map(data = c(small, large), .tag_out = reg)
),
small = simulate(48),
large = simulate(64),
trace = TRUE
)
plan2 <- drake_plan(
small = simulate(48),
large = simulate(64),
report = knit(knitr_in("report.Rmd"), file_out("report.md"), quiet = TRUE),
regression2 = target(
reg2(data),
transform = map(data = c(small, large), .tag_out = reg)
),
regression1 = target(
reg1(data),
transform = map(data = c(small, large), .tag_out = reg)
),
summ = target(
suppressWarnings(summary(reg$residuals)),
transform = map(reg)
),
coef = target(
suppressWarnings(summary(reg))$coefficients,
transform = map(reg)
),
trace = TRUE
)
expect_equal(nrow(plan1), 15L)
equivalent_plans(plan1, plan2)
})
test_with_dir("same test (row order) different plan", {
plan1 <- drake_plan(
small = simulate(48),
large = simulate(64),
reg = target(
reg_fun(data),
transform = cross(reg_fun = c(reg1, reg2), data = c(small, large))
),
summ = target(
sum_fun(data, reg),
transform = cross(sum_fun = c(coef, residuals), reg)
),
winners = target(
min(summ),
transform = combine(summ, .by = c(data, sum_fun))
),
others = target(
analyze(list(c(summ), c(data))),
transform = combine(
summ,
data,
.by = c(data, sum_fun)
)
),
final_winner = target(
min(winners),
transform = combine(winners)
)
)
plan2 <- drake_plan(
final_winner = target(
min(winners),
transform = combine(winners)
),
reg = target(
reg_fun(data),
transform = cross(reg_fun = c(reg1, reg2), data = c(small, large))
),
small = simulate(48),
summ = target(
sum_fun(data, reg),
transform = cross(sum_fun = c(coef, residuals), reg)
),
others = target(
analyze(list(c(summ), c(data))),
transform = combine(
summ,
data,
.by = c(data, sum_fun)
)
),
winners = target(
min(summ),
transform = combine(summ, .by = c(data, sum_fun))
),
large = simulate(64)
)
expect_equal(nrow(plan1), 23L)
equivalent_plans(plan1, plan2)
})
test_with_dir("gh #696", {
my_split <- function(from, stem, n) {
suffixes <- with(
expand.grid(y = letters, x = letters),
paste0(x, y)
)[1:n]
out.files <- paste0(stem, suffixes)
out <- rlang::quo({
file_in(!!from)
file_out(!!out.files)
system2(
"split",
c(paste0("-n r/", !!n),
!!from,
!!stem)
)
})
out <- quo_squash(out)
}
manysplits <- paste0("lf", 1:2, ".txt")
out <- drake_plan(
splits = target(!!my_split(f, f, 3), transform = map(f = !!manysplits))
)
exp <- drake_plan(
splits_.lf1.txt. = {
file_in("lf1.txt")
file_out(c("lf1.txtaa", "lf1.txtab", "lf1.txtac"))
system2("split", c(paste0("-n r/", 3), "lf1.txt", "lf1.txt"))
},
splits_.lf2.txt. = {
file_in("lf2.txt")
file_out(c("lf2.txtaa", "lf2.txtab", "lf2.txtac"))
system2("split", c(paste0("-n r/", 3), "lf2.txt", "lf2.txt"))
}
)
equivalent_plans(out, exp)
})
test_with_dir("transformations in triggers", {
out <- drake_plan(
small = simulate(48),
large = simulate(64),
reg = target(
reg_fun(data),
trigger = trigger(change = reg_fun(data)),
transform = cross(reg_fun = c(reg1, reg2), data = c(small, large))
),
summ = target(
sum_fun(data, reg),
trigger = trigger(change = sum_fun(data, reg)),
transform = cross(sum_fun = c(coef, residuals), reg)
),
winners = target(
min(summ),
trigger = trigger(change = min(summ)),
transform = combine(summ, .by = c(data, sum_fun))
),
others = target(
analyze(list(c(summ), c(data))),
trigger = trigger(change = analyze(list(c(summ), c(data)))),
transform = combine(
summ,
data,
.by = c(data, sum_fun)
)
),
final_winner = target(
min(winners),
trigger = trigger(change = min(winners)),
transform = combine(winners)
)
)
exp <- drake_plan(
small = target(
command = simulate(48),
trigger = NA
),
large = target(
command = simulate(64),
trigger = NA
),
reg_reg1_small = target(
command = reg1(small),
trigger = trigger(
change = reg1(small)
)
),
reg_reg2_small = target(
command = reg2(small),
trigger = trigger(
change = reg2(small)
)
),
reg_reg1_large = target(
command = reg1(large),
trigger = trigger(
change = reg1(large)
)
),
reg_reg2_large = target(
command = reg2(large),
trigger = trigger(
change = reg2(large)
)
),
summ_coef_reg_reg1_large = target(
command = coef(large, reg_reg1_large),
trigger = trigger(
change = coef(large, reg_reg1_large)
)
),
summ_residuals_reg_reg1_large = target(
command = residuals(large, reg_reg1_large),
trigger = trigger(
change = residuals(large, reg_reg1_large)
)
),
summ_coef_reg_reg1_small = target(
command = coef(small, reg_reg1_small),
trigger = trigger(
change = coef(small, reg_reg1_small)
)
),
summ_residuals_reg_reg1_small = target(
command = residuals(small, reg_reg1_small),
trigger = trigger(
change = residuals(small, reg_reg1_small)
)
),
summ_coef_reg_reg2_large = target(
command = coef(large, reg_reg2_large),
trigger = trigger(
change = coef(large, reg_reg2_large)
)
),
summ_residuals_reg_reg2_large = target(
command = residuals(large, reg_reg2_large),
trigger = trigger(
change = residuals(large, reg_reg2_large)
)
),
summ_coef_reg_reg2_small = target(
command = coef(small, reg_reg2_small),
trigger = trigger(
change = coef(small, reg_reg2_small)
)
),
summ_residuals_reg_reg2_small = target(
command = residuals(small, reg_reg2_small),
trigger = trigger(
change = residuals(small, reg_reg2_small)
)
),
winners_large_coef = target(
command = min(
summ_coef_reg_reg1_large,
summ_coef_reg_reg2_large
),
trigger = trigger(
change = min(
summ_coef_reg_reg1_large,
summ_coef_reg_reg2_large
)
)
),
winners_small_coef = target(
command = min(
summ_coef_reg_reg1_small,
summ_coef_reg_reg2_small
),
trigger = trigger(
change = min(
summ_coef_reg_reg1_small,
summ_coef_reg_reg2_small
)
)
),
winners_large_residuals = target(
command = min(
summ_residuals_reg_reg1_large,
summ_residuals_reg_reg2_large
),
trigger = trigger(
change = min(
summ_residuals_reg_reg1_large,
summ_residuals_reg_reg2_large
)
)
),
winners_small_residuals = target(
command = min(
summ_residuals_reg_reg1_small,
summ_residuals_reg_reg2_small
),
trigger = trigger(
change = min(
summ_residuals_reg_reg1_small,
summ_residuals_reg_reg2_small
)
)
),
others_large_coef = target(
command = analyze(list(
c(summ_coef_reg_reg1_large, summ_coef_reg_reg2_large),
c(large)
)),
trigger = trigger(
change = analyze(list(
c(summ_coef_reg_reg1_large, summ_coef_reg_reg2_large),
c(large)
))
)
),
others_small_coef = target(
command = analyze(list(
c(summ_coef_reg_reg1_small, summ_coef_reg_reg2_small),
c(small)
)),
trigger = trigger(
change = analyze(list(
c(summ_coef_reg_reg1_small, summ_coef_reg_reg2_small),
c(small)
))
)
),
others_large_residuals = target(
command = analyze(list(
c(summ_residuals_reg_reg1_large, summ_residuals_reg_reg2_large),
c(large)
)),
trigger = trigger(
change = analyze(list(
c(summ_residuals_reg_reg1_large, summ_residuals_reg_reg2_large),
c(large)
))
)
),
others_small_residuals = target(
command = analyze(list(
c(summ_residuals_reg_reg1_small, summ_residuals_reg_reg2_small),
c(small)
)),
trigger = trigger(
change = analyze(list(
c(summ_residuals_reg_reg1_small, summ_residuals_reg_reg2_small),
c(small)
))
)
),
final_winner = target(
command = min(
winners_large_coef, winners_small_coef, winners_large_residuals,
winners_small_residuals
),
trigger = trigger(
change = min(
winners_large_coef, winners_small_coef, winners_large_residuals,
winners_small_residuals
)
)
)
)
equivalent_plans(out, exp)
})
test_with_dir(".id = FALSE", {
x_ <- letters[1:2]
y_ <- letters[3:4]
z_ <- letters[11:14]
out <- drake_plan(
a = target(c(x, y), transform = cross(x = !!x_, y = !!y_, .id = FALSE)),
b = target(c(a, z), transform = map(a, z = !!z_, .id = FALSE)),
d = target(b, transform = combine(b, .by = x, .id = FALSE))
)
exp <- drake_plan(
a = c("a", "c"),
a_2 = c("b", "c"),
a_3 = c("a", "d"),
a_4 = c("b", "d"),
b = c(a, "k"),
b_2 = c(a_2, "l"),
b_3 = c(a_3, "m"),
b_4 = c(a_4, "n"),
d = list(b, b_3),
d_2 = list(b_2, b_4)
)
equivalent_plans(out, exp)
})
test_with_dir("(1) .id = syms. (2) map() finds the correct cross() syms", {
x_ <- letters[1:2]
y_ <- letters[3:4]
z_ <- letters[11:12]
out <- drake_plan(
A = target(
c(x, y, z),
transform = cross(x = !!x_, y = !!y_, z = !!z_, .id = z)
),
B = target(c(A, y, z), transform = map(A, y, z, .id = c(y, z))),
C = target(B, transform = combine(B, .by = c(x, y), .id = bad))
)
# nolint start
exp <- drake_plan(
A_.k. = c("a", "c", "k"),
A_.k._2 = c("b", "c", "k"),
A_.k._3 = c("a", "d", "k"),
A_.k._4 = c("b", "d", "k"),
A_.l. = c("a", "c", "l"),
A_.l._2 = c("b", "c", "l"),
A_.l._3 = c("a", "d", "l"),
A_.l._4 = c("b", "d", "l"),
B_.c._.k. = c(A_.k., "c", "k"),
B_.c._.k._2 = c(A_.k._2, "c", "k"),
B_.d._.k. = c(A_.k._3, "d", "k"),
B_.d._.k._2 = c(A_.k._4, "d", "k"),
B_.c._.l. = c(A_.l., "c", "l"),
B_.c._.l._2 = c(A_.l._2, "c", "l"),
B_.d._.l. = c(A_.l._3, "d", "l"),
B_.d._.l._2 = c(A_.l._4, "d", "l"),
C = list(B_.c._.k., B_.c._.l.),
C_2 = list(B_.c._.k._2, B_.c._.l._2),
C_3 = list(B_.d._.k., B_.d._.l.),
C_4 = list(B_.d._.k._2, B_.d._.l._2)
)
# nolint end
equivalent_plans(out, exp)
})
test_with_dir("upstream .id columns are available", {
factor_a_ <- as.character(c(4, 5, 6, 7, 8))
factor_b_ <- "2"
out <- drake_plan(
raw_data = get_data(),
data = clean_data(raw_data),
analysis = target(
data %>%
filter(factor_a == factor_a_ & factor_b == factor_b_),
transform = cross(factor_a_ = !!factor_a_, factor_b_ = !!factor_b_)
),
summary = target(
my_summarize(analysis),
transform = map(analysis, .id = c(factor_a_, factor_b_))
),
results = target(bind_rows(summary), transform = combine(summary))
)
# nolint start
exp <- drake_plan(
raw_data = get_data(),
data = clean_data(raw_data),
analysis_.4._.2. = data %>% filter(factor_a == "4" & factor_b == "2"),
analysis_.5._.2. = data %>% filter(factor_a == "5" & factor_b == "2"),
analysis_.6._.2. = data %>% filter(factor_a == "6" & factor_b == "2"),
analysis_.7._.2. = data %>% filter(factor_a == "7" & factor_b == "2"),
analysis_.8._.2. = data %>% filter(factor_a == "8" & factor_b == "2"),
summary_.4._.2. = my_summarize(analysis_.4._.2.),
summary_.5._.2. = my_summarize(analysis_.5._.2.),
summary_.6._.2. = my_summarize(analysis_.6._.2.),
summary_.7._.2. = my_summarize(analysis_.7._.2.),
summary_.8._.2. = my_summarize(analysis_.8._.2.),
results = bind_rows(
summary_.4._.2., summary_.5._.2., summary_.6._.2.,
summary_.7._.2., summary_.8._.2.
)
)
# nolint end
equivalent_plans(out, exp)
})
test_with_dir("repeated maps do not duplicate targets", {
x_ <- rep("a", 2)
y_ <- rep("b", 2)
out <- drake_plan(
A = target(x, transform = map(x = !!x_, .id = FALSE)),
B = target(c(A, x), transform = map(A, x, .id = FALSE)),
C = target(y, transform = map(y = !!y_, .id = FALSE)),
D = target(c(A, B, C, x, y), transform = map(A, B, C, x, y, .id = FALSE))
)
exp <- drake_plan(
A = "a",
A_2 = "a",
B = c(A, "a"),
B_2 = c(A_2, "a"),
C = "b",
C_2 = "b",
D = c(A, B, C, "a", "b"),
D_2 = c(A_2, B_2, C_2, "a", "b")
)
equivalent_plans(out, exp)
})
test_with_dir("unequal trace vars are not duplicated in map()", {
inputs <- lapply(LETTERS[1:4], as.symbol)
types <- rep(c(1, 2), each = 2)
out <- drake_plan(
wide1 = target(
ez_parallel(a),
transform = map(a = !!inputs, type = !!types) ),
prelim = target(
preliminary(wide1),
transform = combine(wide1, .by = type) ),
main = target(
expensive_calc(prelim),
transform = map(prelim)
),
format = target(
postformat(prelim, main),
transform = map(prelim, main)
)
)
exp <- drake_plan(
wide1_A_1 = ez_parallel(A),
wide1_B_1 = ez_parallel(B),
wide1_C_2 = ez_parallel(C),
wide1_D_2 = ez_parallel(D),
prelim_1 = preliminary(wide1_A_1, wide1_B_1),
prelim_2 = preliminary(wide1_C_2, wide1_D_2),
main_prelim_1 = expensive_calc(prelim_1),
main_prelim_2 = expensive_calc(prelim_2),
format_prelim_1_main_prelim_1 = postformat(prelim_1, main_prelim_1),
format_prelim_2_main_prelim_2 = postformat(prelim_2, main_prelim_2)
)
equivalent_plans(out, exp)
})
test_with_dir("commands from combine() produce the correct values", {
skip_on_cran()
x_ <- letters[1:2]
plan <- drake_plan(
A = target(x, transform = map(x = !!x_)),
B = target(A, transform = combine(A)),
C = target(list(A), transform = combine(A)),
trace = TRUE
)
cache <- storr::storr_environment()
make(plan, cache = cache, session_info = FALSE)
exp <- list("a", "b")
expect_equal(unname(readd(B, cache = cache)), exp)
expect_equal(unname(readd(C, cache = cache)), exp)
})
test_with_dir("grids", {
grid <- data.frame(
z = c(5, 6),
w = c("7", "8"),
v = c("a", "b"),
stringsAsFactors = FALSE
)
grid$v <- rlang::syms(grid$v)
out <- drake_plan(
a = target(
1 + f(x, y, z, w, v),
transform = map(x = c(1, 2), y = c(3, 4), .data = !!grid)
)
)
exp <- drake_plan(
a_1_3_5_.7._a = 1 + f(1, 3, 5, "7", a),
a_2_4_6_.8._b = 1 + f(2, 4, 6, "8", b)
)
equivalent_plans(out, exp)
})
test_with_dir("empty grids", {
grid <- data.frame(
z = c(5, 6),
w = c("7", "8"),
v = c("a", "b"),
stringsAsFactors = FALSE
)
grid$v <- rlang::syms(grid$v)
expect_warning(
out <- drake_plan(
a = target(
1 + f(x, y, z, w, v),
transform = map(
x = c(),
y = c(),
.data = !!grid[logical(0), , drop = FALSE] # nolint
)
)
),
regexp = "grouping or splitting variable"
)
equivalent_plans(out, drake_plan())
})
test_with_dir("grid for GitHub issue 697", {
grid <- expand.grid(
group = c("G1", "G2"),
rep = c("R1", "R2", "R3", "R4", "R5", "R6"),
stringsAsFactors = FALSE
)
grid <- grid[!(grid$group == "G2" & grid$rep %in% c("R5", "R6")), ]
out <- drake_plan(
s_load = target(load_csv(group, rep), transform = map(.data = !!grid))
)
exp <- drake_plan(
s_load_.G1._.R1. = load_csv("G1", "R1"),
s_load_.G2._.R1. = load_csv("G2", "R1"),
s_load_.G1._.R2. = load_csv("G1", "R2"),
s_load_.G2._.R2. = load_csv("G2", "R2"),
s_load_.G1._.R3. = load_csv("G1", "R3"),
s_load_.G2._.R3. = load_csv("G2", "R3"),
s_load_.G1._.R4. = load_csv("G1", "R4"),
s_load_.G2._.R4. = load_csv("G2", "R4"),
s_load_.G1._.R5. = load_csv("G1", "R5"),
s_load_.G1._.R6. = load_csv("G1", "R6")
)
equivalent_plans(out, exp)
})
test_with_dir("grid for GitHub issue 710", {
inputs <- lapply(LETTERS[1:5], as.symbol)
types <- rep(c(1, 2), length.out = 5)
df <- data.frame(
serial_ = paste0("serial_", types),
wide_ = paste0("wide_", inputs),
stringsAsFactors = FALSE
)
for (col in colnames(df)) {
df[[col]] <- rlang::syms(df[[col]])
}
out <- drake_plan(
wide = target(
ez_parallel(a),
transform = map(a = !!inputs, type = !!types)
),
serial = target(
expensive_calc(wide),
transform = combine(wide, .by = type)
),
dist = target(
distribute_results(serial_, wide_),
transform = map(.data = !!df)
)
)
exp <- drake_plan(
wide_A_1 = ez_parallel(A),
wide_B_2 = ez_parallel(B),
wide_C_1 = ez_parallel(C),
wide_D_2 = ez_parallel(D),
wide_E_1 = ez_parallel(E),
serial_1 = expensive_calc(wide_A_1, wide_C_1, wide_E_1),
serial_2 = expensive_calc(wide_B_2, wide_D_2),
dist_serial_1_wide_A = distribute_results(serial_1, wide_A),
dist_serial_2_wide_B = distribute_results(serial_2, wide_B),
dist_serial_1_wide_C = distribute_results(serial_1, wide_C),
dist_serial_2_wide_D = distribute_results(serial_2, wide_D),
dist_serial_1_wide_E = distribute_results(serial_1, wide_E)
)
equivalent_plans(out, exp)
})
test_with_dir("combine() with symbols instead of calls", {
out <- drake_plan(
data = target(
get_data(param),
transform = map(param = c(1, 2))
),
results = target(
.data %>%
select(data),
transform = combine(data)
)
)
exp <- drake_plan(
data_1 = get_data(1),
data_2 = get_data(2),
results = .data %>% select(data_1, data_2)
)
equivalent_plans(out, exp)
})
test_with_dir("combine() with complicated calls", {
out <- drake_plan(
data = target(
get_data(param),
transform = map(param = c(1, 2))
),
results = target(
.data %>%
c(min(0, data, na.rm = FALSE), 2),
transform = combine(data)
)
)
exp <- drake_plan(
data_1 = get_data(1),
data_2 = get_data(2),
results = .data %>% c(min(0, data_1, data_2, na.rm = FALSE), 2)
)
equivalent_plans(out, exp)
})
test_with_dir("invalid splitting var", {
expect_warning(
out <- drake_plan(
data = target(x, transform = map(x = c(1, 2)), nothing = NA),
results = target(
data,
transform = combine(data, .by = nothing)
)
),
regexp = "grouping or splitting variable"
)
out <- out[, c("target", "command")]
exp <- drake_plan(
data_1 = 1,
data_2 = 2
)
equivalent_plans(out, exp)
})
test_with_dir("uneven combinations", {
out <- drake_plan(
data1 = target(
sim_data1(mean = x, sd = y, skew = z),
transform = map(x = c(1, 2), y = c(3, 4))
),
data2 = target(
sim_data2(mean = x, sd = y, skew = z),
transform = cross(x = c(1, 2), y = c(3, 4))
),
combined = target(
bind_rows(data1, data2, .id = "id") %>%
arrange(sd) %>%
head(n = 400),
transform = combine(data1, data2, .by = c(x, y))
)
)
exp <- drake_plan(
data1_1_3 = sim_data1(mean = 1, sd = 3, skew = z),
data1_2_4 = sim_data1(mean = 2, sd = 4, skew = z),
data2_1_3 = sim_data2(mean = 1, sd = 3, skew = z),
data2_2_3 = sim_data2(mean = 2, sd = 3, skew = z),
data2_1_4 = sim_data2(mean = 1, sd = 4, skew = z),
data2_2_4 = sim_data2(mean = 2, sd = 4, skew = z),
combined_1_3 = bind_rows(data1_1_3, data2_1_3, .id = "id") %>%
arrange(sd) %>%
head(n = 400),
combined_2_4 = bind_rows(data1_2_4, data2_2_4, .id = "id") %>%
arrange(sd) %>%
head(n = 400)
)
equivalent_plans(out, exp)
})
test_with_dir("dates in the DSL", {
skip_on_cran()
dates <- seq(as.Date("2019-01-01"), as.Date("2019-01-03"), by = 1)
plan <- drake_plan(
y = target(d, transform = map(d = !!dates, .id = FALSE))
)
cache <- storr::storr_environment()
make(plan, cache = cache, session_info = FALSE)
expect_true(inherits(cache$get("y"), "Date"))
})
|
0f58141e1c23219275b1b1e28b25c170b1b98b78
|
8b7fe1cfee5ef609a78b28ebb2e994bc100b3811
|
/man/ensembl2hgnc.Rd
|
d6bb9d8c4a46d1eca3bbb7b9d86beed8757e259e
|
[
"MIT"
] |
permissive
|
letaylor/bioutils
|
98b17f8dc21422be4f4ffda26fcff86ce86a823c
|
cec31913d86a5beec2449b85e45dbf70718354b7
|
refs/heads/master
| 2020-04-05T14:14:45.284994
| 2019-04-04T21:24:35
| 2019-04-04T21:24:35
| 156,920,447
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 828
|
rd
|
ensembl2hgnc.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ensembl2hgnc.R
\name{ensembl2hgnc}
\alias{ensembl2hgnc}
\title{Ensembl ids 2 hgnc}
\usage{
ensembl2hgnc(ensembl_gene_ids, host = "grch37.ensembl.org",
drop_dot_ensembl_id = TRUE)
}
\arguments{
\item{ensembl_gene_ids}{Character vector.
List of Ensembl gene ids to get hgnc symbols for.}
\item{host}{Character.
Ensembl biomaRt host.}
\item{drop_dot_ensembl_id}{Logical.
Drop "." from ENSG00000072310.12}
}
\value{
Character.
The corresponding hgnc symbols to the Ensembl ids.
}
\description{
\code{ensembl2hgnc} converts Ensembl gene ids to hgnc symbols. If no
hgnc symbol then uses external_gene_name. If no external_gene_name then
uses Ensembl gene id.
}
\examples{
ensembl2hgnc(c("ENSG00000109501", "ENSG00000058453", "ENSG00000030066"))
}
|
4e50d3608c0cb044d346d265163ce336e92a4169
|
eda5858803afac2edd220a3edaf3fb8aa64c7b48
|
/R/first-last.R
|
0d6a2005691afd94f44bc83865bd24ade7d5a57d
|
[
"MIT"
] |
permissive
|
echukwuka/tidytable
|
2a64e38b7971f71aee1f58ae535f9431cd600cf7
|
29136342ef2ea6f99a0a5b8e1a97fd163e405388
|
refs/heads/main
| 2023-06-03T08:38:56.814415
| 2021-06-21T15:49:52
| 2021-06-21T15:49:52
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 350
|
r
|
first-last.R
|
#' Extract the first or last value from a vector
#'
#' @description
#' Extract the first or last value from a vector.
#'
#' @param x A vector
#'
#' @export
#'
#' @examples
#' vec <- letters
#'
#' first.(vec)
#' last.(vec)
first. <- function(x) {
vec_slice(x, 1L)
}
#' @rdname first.
#' @export
last. <- function(x) {
vec_slice(x, vec_size(x))
}
|
5e1b850e3ee9818124313380fe41c7cdb32572fa
|
4f9015f385c8a02ff258414ba931952afb1e6fac
|
/R-libraries/spm/R/spm.zeroFun.R
|
a62f253f523e696871044a878d184ce094dcd465
|
[] |
no_license
|
NIWAFisheriesModelling/SPM
|
0de0defd30ccc92b47612fa93946ef876196c238
|
0412f06b19973d728afb09394419df582f1ecbe4
|
refs/heads/master
| 2021-06-06T00:15:07.548068
| 2021-05-27T06:07:46
| 2021-05-27T06:07:46
| 21,840,937
| 6
| 3
| null | null | null | null |
UTF-8
|
R
| false
| false
| 160
|
r
|
spm.zeroFun.R
|
#' utility function
#'
#' @author Alistair Dunn
#'
spm.zeroFun<-function(x,delta=1e-11) {
res<-ifelse(x>=delta,x,delta/(2-(x/delta)))
return(res)
}
|
69559e793fcdc67841490612c2102168a0b463d8
|
b79edeeacb7c68a5d9de393b7c1979cc3efa9d18
|
/DataHandling/fbi_data_handling.R
|
edcb381461334dcbef5a8ec3a1e04c750489eb90
|
[] |
no_license
|
ds-elections/anti-muslim-sentiment
|
df42c7612b98a72b196ee6eab09dc2a3612fdbda
|
15b04c34c35ec74ee73b0adf3e28010980accbbc
|
refs/heads/master
| 2021-01-18T16:53:08.003947
| 2017-06-01T03:24:26
| 2017-06-01T03:24:26
| 86,778,861
| 0
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,004
|
r
|
fbi_data_handling.R
|
library(tidyverse)
library(lubridate)
library(dplyr)
full_fbi <- readLines(file("C:\\Users\\tdounias\\Downloads\\HC 2013 (1)\\HC 2013.txt", open = "r"), skipNul = TRUE)
#Function that counts the nuber of incidents reported by each precinct
count_incidents <- function(x){
if(substr(full_fbi[x + 1], 1, 1) == "B"){
return(0)
}
if(substr(full_fbi[x + 1], 1, 1) == "I"){
return(count_incidents(x + 1) + 1)
}
}
#Function for reading data into individual datasets
read_var <- function(data, varname, start, end){
df <- data.frame()
for(i in seq_along(data)){
df[i, 1] <- substr(data[i], start, end)
}
colnames(df) <- varname
return(df)
}
#Create the reporters dataset
#Sets up some useful counters and variables
fbi_reporting <- vector()
fbi_incidents <- vector()
j <- 1
z <- 1
incident_no <- data.frame()
#creates vector with only reporting precincts
for(i in seq_along(full_fbi)){
if(substr(full_fbi[i], 1, 1) == "B" && substr(full_fbi[i], 218, 225) != " "){
fbi_reporting[j] <- full_fbi[i]
incident_no[j, 1] <- i
j <- j + 1
}
if(substr(full_fbi[i], 1, 1) == "I"){
fbi_incidents[z] <- full_fbi[i]
z <- z + 1
}
}
#Counts the incidents
for(i in seq_along(fbi_reporting)){
incident_no[i, 2] <- count_incidents(incident_no[i, 1])
}
colnames(incident_no) <- c("Number_Original_list", "Number_of_Incidents_Year")
#Next section is for the creation of the fbi precincts dataframe
reporting_df <- data.frame()
for(i in seq_along(fbi_reporting)){
reporting_df[i, 1] <- i
}
#State code variable
reporting_df[, 1] <- read_var(fbi_reporting, "State_Code", 3, 4)
colnames(reporting_df) [1] <- "State_Code"
#State Abreviation Variable
reporting_df[, 2] <- read_var(fbi_reporting, "State_Abr", 5, 6)
#Country Region variable
reporting_df[, 3] <- read_var(fbi_reporting, "Country_Region", 65, 65)
for(i in seq_along(fbi_reporting)){
ifelse(reporting_df[i, 3] == "1", reporting_df[i, 3] <- "NorthEast",
ifelse(reporting_df[i, 3] == "2", reporting_df[i, 3] <- "NorthCentral",
ifelse(reporting_df[i, 3] == "3", reporting_df[i, 3] <- "South", reporting_df[i, 3] <- "West")))
}
#Agency_Type Variable
reporting_df[, 4] <- read_var(fbi_reporting, "Agency_Type", 66, 66)
for(i in seq_along(fbi_reporting)){
ifelse(reporting_df[i, 4] == "0", reporting_df[i, 4] <- "CoveredbyOther",
ifelse(reporting_df[i, 4] == "1", reporting_df[i, 4] <- "City",
ifelse(reporting_df[i, 4] == "2", reporting_df[i, 4] <- "County",
ifelse(reporting_df[i, 4] == "3", reporting_df[i, 4] <- "Uni/Col", reporting_df[i, 4] <- "StatePolice"))))
}
#Core_City Variable
reporting_df[, 5] <- read_var(fbi_reporting, "Metro_Area", 67, 67)
for(i in seq_along(fbi_reporting)){
ifelse(reporting_df[i, 5] == "N", reporting_df[i, 5] <- 0, reporting_df[i, 5] <- 1)
}
#Date ORI was added var
reporting_df[, 6] <- read_var(fbi_reporting, "Date_Added", 14, 21)
reporting_df[, 6] <- ymd(reporting_df[, 6])
#Date ORI went NIBRS
reporting_df[, 7] <- read_var(fbi_reporting, "Date_NIBRS", 22, 29)
reporting_df[, 7] <- ymd(reporting_df[, 7])
#City Name
reporting_df[, 8] <- read_var(fbi_reporting, "City_Name", 30, 59)
#Pop_Group Codes variable
reporting_df[, 9] <- read_var(fbi_reporting, "Pop_Group_Code", 62, 63)
#Judicial District Code
reporting_df[, 10] <- read_var(fbi_reporting, "Judicial_Dist_in_State", 81, 84)
#Is Nibrs Active?
for(i in seq_along(fbi_reporting)){
ifelse(substr(fbi_reporting[i], 85, 85) == "A", reporting_df[i, 10] <- 1, reporting_df[i, 10] <- 0)
}
colnames(reporting_df)[10] <- "IsActiveNIBRS"
#Current population covered
reporting_df[, 11] <- read_var(fbi_reporting, "Current_Pop", 94, 100)
#FIPS County Code
reporting_df[, 12] <- read_var(fbi_reporting, "FIPS_Code", 256, 270)
#UCR County Code
reporting_df[, 13] <- read_var(fbi_reporting, "UCR_Code", 103, 105)
#MSA Code
reporting_df[, 14] <- read_var(fbi_reporting, "MSA_Code", 106, 108)
#Last Population
reporting_df[, 15] <- read_var(fbi_reporting, "Last_Population", 109, 117)
#Master File Year
reporting_df[, 16] <- read_var(fbi_reporting, "Master_File_Year", 214, 217)
#Agency ID
reporting_df[, 17] <- read_var(fbi_reporting, "Agency_ID", 5, 13)
#Quarters of Activity
for(i in seq_along(fbi_reporting)){
c <- 0
for(y in c(218:221)){
reporting_df[i, 18 + c] <- substr(fbi_reporting[i], y, y)
c <- c + 1
}
}
colnames(reporting_df)[18] <- "1"
colnames(reporting_df)[19] <- "2"
colnames(reporting_df)[20] <- "3"
colnames(reporting_df)[21] <- "4"
#Bind this before gathering
reporting_df <- cbind(reporting_df, incident_no)
reporting_df <- reporting_df %>%
gather(key = Quarter, value = Incidents, 18, 19, 20, 21)
#Next section is on the fbi incidents reported.
incidents_df <- data.frame()
for(i in seq_along(fbi_incidents)){
incidents_df[i, 1] <- i
}
#State Code
incidents_df[, 1] <- read_var(fbi_incidents, "State_Code", 3, 4)
colnames(incidents_df) [1] <- "State_Code"
#Agency ID
incidents_df[, 2] <- read_var(fbi_incidents, "Agency_ID", 5, 13)
#Incident Date
incidents_df[, 3] <- read_var(fbi_incidents, "Incident_Date", 26, 33)
incidents_df[, 3] <- ymd(incidents_df[, 3])
#Quarter
incidents_df[, 4] <- read_var(fbi_incidents, "Quarter", 35, 35)
#Number of Victims
incidents_df[, 5] <- read_var(fbi_incidents, "Victims", 36, 38)
incidents_df[, 5] <- as.integer(incidents_df[, 5])
#Offenders number
incidents_df[, 6] <- read_var(fbi_incidents, "Offenders", 39, 40)
incidents_df[, 6] <- as.integer(incidents_df[, 6])
#Offender's Race
incidents_df[, 7] <- read_var(fbi_incidents, "Offenders_Race", 41, 41)
#UCR Offense Code
incidents_df[, 8] <- read_var(fbi_incidents, "UCR_Code", 42, 44)
#Location
incidents_df[, 9] <- read_var(fbi_incidents, "Location_Code", 48, 49)
#Bias Motivation
incidents_df[, 10] <- read_var(fbi_incidents, "Bias_Motivation", 50, 51)
#Is it anti-Muslim?
for(i in seq_along(fbi_incidents)){
incidents_df[i, 11] <- ifelse(incidents_df[i, 10] == "24", "Y", "N")
}
colnames(incidents_df)[11] <- "Anti_Muslim"
#Type of victim
for(i in seq_along(fbi_incidents)){
incidents_df[i, 12] <- substr(fbi_incidents[i], 52, 59)
}
colnames(incidents_df)[12] <- "Victim_Type"
incidents_df[, 12] <- read_var(fbi_incidents, "Victim_Type", 52, 59)
#Join
incidents_full <- rbind(incidents_df_2011, incidents_df_2012, incidents_df_2013)
reporting_full <- rbind(reporting_df_2011, reporting_df_2012, reporting_df_2013)
write.csv(incidents_full, "hatecrime_incidents_2011to13.csv")
write.csv(reporting_full, "hatecrime_reporters_2011to13.csv")
|
576f5d73358ee3bbafed3db1cc948cdd7527148b
|
22793d1a7c9ea29d118695db911a5133607b414d
|
/public/ecaviar.R
|
1a10de1a57b1f2e71e0448fd913939e9a5f83310
|
[] |
no_license
|
Sandyyy123/mrlocusPaper
|
c3ca8c88b7491fe266486c6e6a93bc199a6a402d
|
6fadfd8df778122c8c9137429ea2c11f7b5052cd
|
refs/heads/master
| 2023-04-15T03:05:40.835273
| 2021-04-29T14:44:56
| 2021-04-29T14:44:56
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,989
|
r
|
ecaviar.R
|
cmd_args=commandArgs(TRUE)
ecav.bin <- cmd_args[1]
dir <- cmd_args[2]
tsv.filename <- cmd_args[3]
ld.filename <- cmd_args[4]
out.filename <- cmd_args[5]
tsv_files <- scan(tsv.filename, what="char")
ld_files <- scan(ld.filename, what="char")
stopifnot(length(tsv_files) == length(ld_files))
# mapping from TSV files to the indexLD matrix
info <- read.table(list.files(dir, pattern="indexinfo", full=TRUE), header=TRUE)
# matrix of LD for the index SNPs
ld <- as.matrix(read.table(list.files(dir, pattern="indexLD", full=TRUE)))
ld <- ld[info$idx, info$idx] # reorder according to indexinfo
# matrix of within-clump LD
ld_mat <- lapply(ld_files, function(x) {
read.table(file.path(dir, x))
})
sapply(ld_mat, nrow)
sum_stat <- lapply(tsv_files, function(x) {
out <- read.table(file.path(dir, x), header=TRUE)
out$z <- out$beta_eQTL/out$se_eQTL
out$abs.z <- abs(out$z)
out$z2 <- out$beta_GWAS/out$se_GWAS # for testing ecaviar
out
})
# write out files for testing ecaviar
nclust <- length(ld_mat)
ecav.coloc <- list()
for (j in seq_len(nclust)) {
tmp <- sub("ecav", paste0("ecav_out_j",j), out.filename)
ld.tmp <- sub("ecav_out","ecav_ld",tmp)
z.tmp <- sub("ecav_out","ecav_z",tmp)
z2.tmp <- sub("ecav_out","ecav_z2",tmp)
write.table(ld_mat[[j]], file=ld.tmp,
row.names=FALSE, col.names=FALSE, quote=FALSE)
write.table(sum_stat[[j]][,c("SNP","z")], file=z.tmp,
row.names=FALSE, col.names=FALSE, quote=FALSE)
write.table(sum_stat[[j]][,c("SNP","z2")], file=z2.tmp,
row.names=FALSE, col.names=FALSE, quote=FALSE)
system(paste(ecav.bin, "-o", tmp,
"-l", ld.tmp, "-l", ld.tmp,
"-z", z.tmp, "-z", z2.tmp, "-c 1"))
ecav.coloc[[j]] <- read.table(paste0(tmp,"_col"), header=TRUE)
file.remove(ld.tmp,z.tmp,z2.tmp) # remove ecaviar input
tmp.out <- list.files(dirname(tmp),basename(tmp), full.names=TRUE)
file.remove(tmp.out) # remove ecaviar output
}
save(ecav.coloc, file=out.filename)
|
bb95774c86c49ce16e0686c44a5bfa2aae3a6efc
|
9262e777f0812773af7c841cd582a63f92d398a4
|
/inst/userguide/figures/Covar--Covar_sec2_1_load-plankton-data.R
|
eff3c79097e0839efde58c19da346796a40441a8
|
[
"CC0-1.0",
"LicenseRef-scancode-public-domain"
] |
permissive
|
nwfsc-timeseries/MARSS
|
f0124f9ba414a28ecac1f50c4596caaab796fdd2
|
a9d662e880cb6d003ddfbd32d2e1231d132c3b7e
|
refs/heads/master
| 2023-06-07T11:50:43.479197
| 2023-06-02T19:20:17
| 2023-06-02T19:20:17
| 438,764,790
| 1
| 2
|
NOASSERTION
| 2023-06-02T19:17:41
| 2021-12-15T20:32:14
|
R
|
UTF-8
|
R
| false
| false
| 448
|
r
|
Covar--Covar_sec2_1_load-plankton-data.R
|
###################################################
### code chunk number 3: Covar_sec2_1_load-plankton-data
###################################################
fulldat <- lakeWAplanktonTrans
years <- fulldat[, "Year"] >= 1965 & fulldat[, "Year"] < 1975
dat <- t(fulldat[years, c("Greens", "Bluegreens")])
the.mean <- apply(dat, 1, mean, na.rm = TRUE)
the.sigma <- sqrt(apply(dat, 1, var, na.rm = TRUE))
dat <- (dat - the.mean) * (1 / the.sigma)
|
2f56bb779c307a99073a376e76063b02d930da0b
|
f733ced3f53a8747b15fbbad3ddd2989599db815
|
/sol.R
|
a7c15b44256b0203e1cb4cd79bc20be62157b8f1
|
[] |
no_license
|
yieun0408/TOBIGS
|
b4752ac03b606224d2d26ab2336b9e8bc804b22d
|
0aab07ac6220b024948d71ec2083bf39fe50fb4e
|
refs/heads/master
| 2020-04-23T11:51:51.804447
| 2019-03-05T01:21:54
| 2019-03-05T01:21:54
| 171,150,380
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,561
|
r
|
sol.R
|
###################
##์ฃผํ๊ฒฝ๋งค ๋ฐ์ดํฐ##
###################
rm(list=ls())
setwd("C:/Users/laep9/Desktop")
#๋ถ๋ฌ์ค๊ธฐaq2@qaaa2q
df = read.csv("Auction_master_train.csv", stringsAsFactors = T, fileEncoding="utf-8")
str(df)
#regist = read.csv("Auction_regist.csv", stringsAsFactors = T, fileEncoding="utf-8")
#rent = read.csv("Auction_rent.csv", stringsAsFactors = T, fileEncoding="utf-8")
#result = read.csv("Auction_result.csv", stringsAsFactors = T, fileEncoding="utf-8")
#๊ฒฐ์ธก์น ํ์ธ
colSums(is.na(df)) #road_bunji2์์ ๊ฒฐ์ธก์น 1778
#๋ณ์๋ช
์ ํ์ธํ๋๋, ๋๋ก๋ช
์ฃผ์.
#ํ์ ์๋ ๋ณ์ ์ ๊ฑฐ
df2 = subset(df, select = -c(road_bunji1, road_bunji2, Auction_key, addr_bunji2,
addr_li, addr_san, addr_bunji1, addr_etc, road_name,
point.y, point.x))
str(df2)
#๋ ์งํ์ผ๋ก ๋ฐ๊ฟ์ฃผ๊ธฐ
df2$Appraisal_date = as.Date(df2$Appraisal_date)
df2$First_auction_date = as.Date(df2$First_auction_date)
df2$Final_auction_date = as.Date(df2$Final_auction_date)
df2$Close_date = as.Date(df2$Close_date)
df2$Preserve_regist_date = as.Date(df2$Preserve_regist_date)
str(df2)
colSums(is.na(df2))
unique(df2$Close_result) #์ฌ๋ฐฑ๊ณผ ๋ฐฐ๋น์ผ๋ก๋ง ๋์ด์๋ค
#์๊ฐํ
#1. ํ์ฌ์ ์ง๊ฐ ์ฌ์ด์ ๊ด๊ณ๊ฐ ์์๊น?์
library(ggplot2)
plot(df2$Appraisal_company, df2$Hammer_price)
#๋ฑํ ์์ด๋ณด์ธ๋ค.
#2. ์์ธ๊ณผ ๋ถ์ฐ์ ์ง๊ฐ์ฐจ์ด๊ฐ ์์๊น?
plot(df2$addr_do, df2$Hammer_price)
#์์ธ์ด ๋ถ์ฐ๋ณด๋ค๋ ๋๋ค
#3. Apartment_usage์ ์ง๊ฐ๊ณผ์ ๊ด๊ณ
plot(df2$Apartment_usage, df2$Hammer_price)
#4. ์ฐ์๋ณ์๋ค๋ง ๋ฝ์๋ด์ ์ข
์๋ณ์์์ ์๊ด์ฑ์ ๋ณด์
#cor()
land.building = subset(df2, select = c(Total_land_gross_area, Total_land_real_area,
Total_land_auction_area,
Total_building_area,
Total_building_auction_area,
Total_appraisal_price,
Minimum_sales_price))
cor(land.building)
plot(land.building)
#์ดํ ์ง๊ฒฝ๋งค๋ฉด์ , ์ด๊ฑด๋ฌผ๋ฉด์ , ์ด๊ฑด๋ฌผ๊ฒฝ๋งค๋ฉด์ , ์ด๊ฐ์ ๊ฐ, ์ต์ ๋งค๊ฐ๊ฐ๊ฒฉ๋ผ๋ฆฌ์
#์๊ด์ฑ์ด ๋๊ฒ ๋ํ๋จ = ๊ฒฝ๋งค์ ์์ด์ ์ค์ํ ๊ฒ์ ๊ฑด๋ฌผ๊ณผ ๊ด๋ จ
#5. ์ต์ข
์
์ฐฐ๊ฐ๊ฒฉ ๋ฐ ์ต์ ๊ฐ๊ฒฉ๊ณผ ์
์ฐฐ๊ฐ๊ฒฉ๊ฐ์ ๊ด๊ณ
##############ํ์๋ณ์
#ํ์๋ณ์ 1: Final_auction_date - First_auction_date = ๊ฒฝ๋งค ์งํ์ผ์
df2$during = df2$Final_auction_date - df2$First_auction_date
#ํ์๋ณ์ 2: per_height (ํ์ฌ ์ธต์ /๊ฑด๋ฌผ ์ธต์)
df2$per_height = df2$Current_floor/df2$Total_floor
#ํ์๋ณ์ 3: k-means ํด๋ฌ์คํฐ๋ง -> ์๋ ๋ณ์์ ๊ฐ๊ฒฉ์ ์ถ๊ฐํด๋ฅผ ๊ทธ ์์์ ํด๋ฌ์คํฐ ๋ง๋ค๊ธฐ
#์ด๊ฑธ ์๋ก์ด ๋ณ์๋ก ํ ๋น!
colSums(is.na(df2))
add.do = subset(df, select = c(point.x, point.y, Hammer_price))
add.do.scaled = scale(add.do)
#์ ๋นํ k๋ฅผ ๊ตฌํด๋ณผ๊น~
wss = 0 #๋ฐ๋์ ์ค์ ํด์ฃผ์ด์ผํ๋ค
for(i in 1:20) wss[i] = sum(kmeans(add.do.scaled, centers = i)$withinss)
plot(1:20, wss, type = "b",
xlab = "Number of Clusters", ylab = "Within group sum of squares")
#k๋ฅผ 6๋ก ์ ํจ
add.kmeans <- kmeans(add.do.scaled, centers = 6, nstart = 1)
add.kmeans$cluster
plot(add.do.scaled, col = add.kmeans$cluster)
points(add.kmeans$centers, col = 1:6, pch = 8, cex = 2)
df2$cluster <- add.kmeans$cluster
#๋ค์ํ๋ฒ ํ์์๋ ๋ณ์ ์ ๊ฑฐ
colnames(df2)
df3 = subset(df2, select = -c(Specific, Appraisal_company, Creditor,
First_auction_date, Final_auction_date,
Appraisal_date, addr_si, addr_do, addr_dong,
Apartment_usage,Close_date, Preserve_regist_date,
Total_land_gross_area, Total_land_real_area,
Total_land_auction_area,Final_result))
str(df3)
colnames(df3)
#๋ผ๋ฒจ ๋ถ์ด๊ธฐ
median(df3$Hammer_price)
df3$label = ifelse(df3$Hammer_price >= median(df3$Hammer_price),
2,1)
df3$label = as.factor(df3$label)
df4 = subset(df3, select = -c(Hammer_price))
df4$cluster = as.factor(df4$cluster)
#################๋ฐ์ดํฐ train, test ๋ถ๋ฆฌ 7:3
#install.packages("caret")
library(caret)
idx <- createDataPartition(y = df4$label, p = 0.7, list =FALSE)
#7:3์ผ๋ก ๋๋ ๋ผ
train<- df4[idx,]
test <- df4[-idx,]
###์ค์ ๋ณ์ ์ ์
library(randomForest)
?randomForest
rf = randomForest(label~., data = train)
importance(rf)
varImpPlot(rf)
#์ค์ํ ์๋ค๋ง ๋ฝ์
df_rf = subset(df4, select = c(Claim_price, Total_building_area,
Total_building_auction_area,
Total_appraisal_price,
Minimum_sales_price,
cluster, label))
table(df_rf$cluster)
##############knn
#๊ฑฐ๋ฆฌ ์กฐ์ ์ค์ผ์ผ๋ง
## min-max ์ค์ผ์ผ๋ง
normalize <- function(x){
return( (x-min(x))/(max(x)-min(x)) )
}
df_rf_n <- as.data.frame(lapply(df_rf[-c(7:8)], normalize))
summary(df_rf_n)
df_rf_n$cluster <- df_rf$cluster
df_rf_n$label = df_rf$label
colnames(train_rf_n)
str(train_rf_n)
set.seed(1)
idx <- createDataPartition(y = df_rf_n$label, p = 0.7, list =FALSE)
#7:3์ผ๋ก ๋๋ ๋ผ
train_n<- df_rf_n[idx,]
test_n<- df_rf_n[-idx,]
#์ต์ ์ k ์ฐพ๊ธฐ
#๊ทธ๋ฆฌ๋ ์์น cv
cv <- trainControl(method = "cv", number = 5, verbose = T)
knn.grid = expand.grid(
.k = c(1,3,5,7,9,11,13,15)
)
train.knn <- train(label~.,train_n, method = "knn",trControl = cv,
tuneGrid = knn.grid)
train.knn$results
train.knn$bestTune #9
predict.knn <- predict(train.knn, test_n)
confusionMatrix(predict.knn, test_n$label) #Accuracy : 0.9655
########svm
library(e1071)
?svm
str(train_n)
svm = tune.svm(label~., data = train_n, cost=10:100,gamma=seq(0,3,0.1))
svm$best.parameters #gamma:0.4, cost:34
svm_tune = svm(label~., data =train_n, cost = 34,
gamma = 0.4, kernel ="radial")
summary(svm_tune)
svm_tune$degree
svm_tune$index
#์ ํ๋
svm_predict = predict(svm_tune, test_n[,-8])
confusionMatrix(svm_predict, test_n$label) #Accuracy : 0.9724
#####๋์ด๋ธ๋ฒ ์ด์ฆ
nb
set.seed(1)
idx <- createDataPartition(y = df_rf$label, p = 0.7, list =FALSE)
#7:3์ผ๋ก ๋๋ ๋ผ
train<- df_rf[idx,]
test<- df_rf[-idx,]
nb = naiveBayes(label~., data = train, laplace = 1)
nb
nb_predict = predict(nb, test[,-8])
confusionMatrix(nb_predict, test$label) #Accuracy : 0.9309
############๋ก์ง์คํฑ
#๋๋ฏธ๋ณ์ ๋ง๋ค๊ธฐ
dm = dummyVars('~cluster', df_rf)
dm = data.frame(predict(dm,df_rf))
df_rf_dummy = cbind(df_rf, dm)
colnames(df_rf_dummy)
#์๋ณ์ ์ธ trip ์ญ์
df_rf_dummy2 = subset(df_rf_dummy, select = -cluster)
set.seed(1)
idx <- createDataPartition(y = df_rf_dummy2$label, p = 0.7, list =FALSE)
#7:3์ผ๋ก ๋๋ ๋ผ
train_dm<- df_rf_dummy2[idx,]
test_dm<- df_rf_dummy2[-idx,]
str(train_dm)
glm = glm(label~., data = train_dm, family=binomial)
summary(glm)
glm_predict = predict(glm, test_dm[,-6], type = "response")
pred_threshold2<-as.factor(ifelse(glm_predict>=0.5,2,1))
table(pred_threshold2, test_dm$label)
mean(pred_threshold2==test$label) #0.970639
#roc๊ทธ๋ํ
pr = prediction(glm_predict, test_dm$label)
|
3fde8b01adbf3fbedaf166d45ec67bc5969ba09b
|
b26682cd791feda1be6b259f4f4d9b037c0c92b0
|
/man/pcamix.Rd
|
e2d714c2fa828bd8ac137d65502bc97976b9f77c
|
[] |
no_license
|
robingenuer/CoVVSURF
|
768ba05a636f011b8fdbe6b6835166fee60d62e5
|
c7b909fa81375e478a87e1d8137977f1f45ea0ea
|
refs/heads/master
| 2021-01-20T18:44:54.391659
| 2019-01-07T14:15:31
| 2019-01-07T14:15:31
| 63,255,808
| 0
| 4
| null | null | null | null |
UTF-8
|
R
| false
| true
| 264
|
rd
|
pcamix.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/CoVVSURF.R
\name{pcamix}
\alias{pcamix}
\title{Performs splitmix and PCAmix}
\usage{
pcamix(X, ndim = 5)
}
\arguments{
\item{X}{A dataset}
\item{ndim}{Number of dimensions in PCAmix}
}
|
0fb0f8e89822a09c4957e9a59ac7d5aa7fbed5f3
|
86888aaccefb80eb1b6928f511d2ff467d516242
|
/man/plot_flight_vertical_time.Rd
|
970d3e59fedd7e05d6310f6c1ba5050ff2c9a62c
|
[] |
no_license
|
caverject10/trrrj
|
8948603c055c4c5cb302ea8154f73e18490ad5f9
|
87391882c20ce2e651e3c7344df3aa82e3ea55a2
|
refs/heads/master
| 2020-08-14T00:21:04.977328
| 2019-09-10T13:31:50
| 2019-09-10T13:31:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 817
|
rd
|
plot_flight_vertical_time.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot.R
\name{plot_flight_vertical_time}
\alias{plot_flight_vertical_time}
\title{Plot the vertical profile of the recorded positions of a flight from
lapsed time perspective.}
\usage{
plot_flight_vertical_time(poss)
}
\arguments{
\item{poss}{a dataframe of position reports with (at least)
`timestamp` (a date-time) and `altitude` (in feet) columns}
}
\value{
a \code{ggplot2} plot object.
}
\description{
Plot the vertical profile of the recorded positions of a flight from
lapsed time perspective.
}
\examples{
\dontrun{
plot_flight_vertical_time(poss)
}
}
\seealso{
Other plot functions: \code{\link{plot_cpr_horizontal}},
\code{\link{plot_flight_horizontal}},
\code{\link{plot_flight_vertical_distance}}
}
\concept{plot functions}
|
547248ac9ec74133148e26b54c4db5cea1373d91
|
910a9f85f4712cfb05be5b6a8e0c9c36096aeea6
|
/snp_heritability.R
|
27293d28c2c7b152ec5a2d0b2005fbf72ea28680
|
[] |
no_license
|
lizhihao1990/Cadzow2017_Ukbiobank_Gout
|
8345f7edbd2f0142f4eb08ebb2d3233b1eb85d4c
|
15eefd10ba5b58cbeb7d4f785971e4c96d6616f3
|
refs/heads/master
| 2020-05-09T15:29:25.186983
| 2017-08-15T22:24:42
| 2017-08-15T22:24:42
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 635
|
r
|
snp_heritability.R
|
library(data.table)
cond <- c('all','winnard','hosp','self','self_ult','ult')
for(gc in cond){
tmp <- list()
for(i in 1:22){
tmp[[i]] <- fread(paste0('/media/xsan/scratch/merrimanlab/murray/working_dir/UkBio/GWAS_all_controls/controls/',gc,'/adjusted/controls',gc,'_age_sex_chr',i,'.assoc.logistic.tsv'))
}
gwas <- rbindlist(tmp)
write.table(gwas[TEST == 'ADD' & P < 1e-5]$SNP, file = paste0('/media/xsan/scratch/merrimanlab/murray/working_dir/UkBio/GWAS_all_controls/controls/Heritability/',gc,'/',gc,'_age_sex_nominally_sig_snps.txt'),
quote = FALSE, row.names = FALSE, col.names=FALSE, sep ='\t')
}
|
092b118d4b79711286c98670d8aa13f39cf132d2
|
a034d355e35d8fa4fc562b8f71a771bca760a877
|
/R/plot.results.table.R
|
87b741c10ad1b39037bb4a87192c55cef9aedd42
|
[] |
no_license
|
tshmak/Tmisc
|
eeda1df8d449c3df7dd0d91f6ee698b10f6f3839
|
81f224e89a8d2ee9455f5ccfd1eae34e0ef7d8c6
|
refs/heads/master
| 2021-07-15T13:27:45.526848
| 2020-05-14T05:34:18
| 2020-05-14T05:34:18
| 144,684,908
| 3
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,284
|
r
|
plot.results.table.R
|
plot.results.table <- function(data, x=NULL, y=NULL, fill=NULL, X=NULL, Y=NULL,
wrap=NULL, scales=NULL) {
#' Automatic plotting of results.table...
x <- deparse(substitute(x))
y <- deparse(substitute(y))
fill <- deparse(substitute(fill))
p <- ggplot(data=data, map=aes_string(x=x, y=y, fill=fill))
# p <- ggplot(data=data, map=aes(x=substitute(x), y=substitute(y),
# fill=substitute(fill)))
dodge <- position_dodge(0.9)
bar <- stat_summary(fun.y=mean, geom="bar", pos=dodge)
errorbar <- stat_summary(fun.data=function(x) mean_sdl(x,mult=1),
geom="errorbar",
position=dodge, aes_string(group=fill))
todraw <- p + bar + errorbar
X <- substitute(X)
Y <- substitute(Y)
wrap <- substitute(wrap)
if(!is.null(wrap)) formula <- facet_wrap(as.formula(paste("~", wrap)), scales=scales)
if(!is.null(X) && !is.null(Y)) formula <- facet_grid(as.formula(paste(Y, "~", X)), scales=scales)
else if(!is.null(X)) formula <- facet_grid(as.formula(paste(".", "~", X)), scales=scales)
else if(!is.null(Y)) formula <- facet_grid(as.formula(paste(Y, "~", ".")), scales=scales)
if(!is.null(formula)) todraw <- todraw + formula
return(todraw)
}
|
755fa357656d3b802fd4ca725c21ac12f6feb781
|
7a95abd73d1ab9826e7f2bd7762f31c98bd0274f
|
/mcga/inst/testfiles/ByteCodeMutation/libFuzzer_ByteCodeMutation/ByteCodeMutation_valgrind_files/1612802884-test.R
|
a09f3ee534a36cc78258c171a141990cc023dd57
|
[] |
no_license
|
akhikolla/updatedatatype-list3
|
536d4e126d14ffb84bb655b8551ed5bc9b16d2c5
|
d1505cabc5bea8badb599bf1ed44efad5306636c
|
refs/heads/master
| 2023-03-25T09:44:15.112369
| 2021-03-20T15:57:10
| 2021-03-20T15:57:10
| 349,770,001
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 630
|
r
|
1612802884-test.R
|
testlist <- list(bytes1 = c(-690563370L, -690563370L, -690563370L, -690563370L, -690563370L, -690563370L, -690563370L, -690563370L, -690563370L, -690563370L, -690563370L, -690563370L, -690563370L, -690563370L, -690563370L, -690563370L, -690563370L, -690563370L, -690563584L, 147456L, 67108643L, 561577984L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L), pmutation = 0)
result <- do.call(mcga:::ByteCodeMutation,testlist)
str(result)
|
d0a4f1e17b080c676e34df3a615f736412787fc7
|
4475e9a0ce50c14e83cd6bfadca929d9a7412911
|
/MAPI/thetas_density_kernels.R
|
2bee427a41189bdf17a06847c8bbca34b8a20114
|
[] |
no_license
|
jarvist/MAPI-MD-analysis
|
9fa1745a0b639939add6f8e0972cb60be945891e
|
f742253656a627130a159e501ebf2279d1746225
|
refs/heads/master
| 2020-05-21T00:27:47.384554
| 2015-08-12T09:58:13
| 2015-08-12T09:58:13
| 19,031,370
| 2
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 261
|
r
|
thetas_density_kernels.R
|
MA_thetas<-read.table("thetas.dat")
t_density<-density(MA_thetas$V1)
p_density<-density(MA_thetas$V2)
par(mfrow=c(2,2))
hist(MA_thetas$V1,breaks=25)
hist(MA_thetas$V2,breaks=25)
plot(t_density)
plot(p_density)
dev.print("thetas_density_kernels.pdf",device=pdf)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.