blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
8281574395ec1ec580cb285e8724228fba508d77
|
8e1ddb4cce71f94035644128b213f4d1eb295b2d
|
/point_different.R
|
c1cf09cc5fae0bc4aca4d74639961979d3dcd521
|
[] |
no_license
|
shimw6828/Rscript
|
c4056a988730921ffb169ac21b152924bc9b171e
|
cb0ae9ead82db5778b3d1f3606a4b6d1943ceba8
|
refs/heads/master
| 2021-09-06T01:15:00.683810
| 2018-02-01T08:01:41
| 2018-02-01T08:01:41
| 119,802,766
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 10,362
|
r
|
point_different.R
|
filename <- list.files(path = "/home/shimw/yushixiong/dot/",pattern = ".csv",full.names = FALSE)
filenamet <- list.files(path = "/home/shimw/yushixiong/dot/",pattern = ".csv",full.names = TRUE)
for (i in 1:4){
assign(as.character(filename[[i]]),read.csv(filenamet[i],header = TRUE))
}
##????Ϊcav??????
##?????е?tissueת?????ַ???ʽ??????????ɸѡ????
norgarcav_many_true.csv$tissue.cav.x <- as.character(norgarcav_many_true.csv$tissue.cav.x)
norgarcav_many_true.csv$tissue.cav.y <- as.character(norgarcav_many_true.csv$tissue.cav.y)
norgarcav_many_true.csv$tissue.gar <- as.character(norgarcav_many_true.csv$tissue.gar)
##??????tauֵС??0.8??tissue??Ϊall
norgarcav_many_true.csv$tissue.cav.x[which(norgarcav_many_true.csv$tau.cav.x<0.8)] <- "all"
norgarcav_many_true.csv$tissue.cav.y[which(norgarcav_many_true.csv$tau.cav.y<0.8)] <- "all"
norgarcav_many_true.csv$tissue.gar[which(norgarcav_many_true.csv$tau.gar<0.8)] <- "all"
##????}?????ݿ?x??y??b??4??????????Ȼ????????Ӧ????????֯??Ϣ
norgarcav_many_tau_x <- data.frame(Gene.ID=norgarcav_many_true.csv$Gene.ID,Cave.fish.gene.stable.ID=norgarcav_many_true.csv$Cave.fish.gene.stable.ID.x.,tau.cav=norgarcav_many_true.csv$tau.cav.x,tau.gar=norgarcav_many_true.csv$tau.gar,tissue.cav.x=norgarcav_many_true.csv$tissue.cav.x,tissue.cav.y=norgarcav_many_true.csv$tissue.cav.y,tissue.gar=norgarcav_many_true.csv$tissue.gar,type=rep("x",451),avi=rep("1",451))
norgarcav_many_tau_y <- data.frame(Gene.ID=norgarcav_many_true.csv$Gene.ID,Cave.fish.gene.stable.ID=norgarcav_many_true.csv$Cave.fish.gene.stable.ID.y.,tau.cav=norgarcav_many_true.csv$tau.cav.y,tau.gar=norgarcav_many_true.csv$tau.gar,tissue.cav.x=norgarcav_many_true.csv$tissue.cav.x,tissue.cav.y=norgarcav_many_true.csv$tissue.cav.y,tissue.gar=norgarcav_many_true.csv$tissue.gar,type=rep("y",451),avi=rep("1",451))
##ɸѡ??ͼ????ͼ?δ?????p_cav??
norgarcav_many_tau <- rbind(norgarcav_many_tau_x,norgarcav_many_tau_y)
norgarcav_many_tau$avi <- as.character(norgarcav_many_tau$avi)
norgarcav_many_tau$avi[which(norgarcav_many_tau$tissue.cav.x==norgarcav_many_tau$tissue.gar&norgarcav_many_tau$tissue.cav.y==norgarcav_many_tau$tissue.gar)] <- "3"
norgarcav_many_tau$avi[which(norgarcav_many_tau$tissue.cav.x==norgarcav_many_tau$tissue.gar&norgarcav_many_tau$tissue.cav.y!=norgarcav_many_tau$tissue.gar)] <- "2"
norgarcav_many_tau$avi[which(norgarcav_many_tau$tissue.cav.y==norgarcav_many_tau$tissue.gar&norgarcav_many_tau$tissue.cav.x!=norgarcav_many_tau$tissue.gar)] <- "2"
p_cav <- ggplot(norgarcav_many_tau,aes(x=tau.cav,y=tau.gar,colour=avi)) + geom_point() + scale_colour_manual(values = c("black","blue","red"))
##????ÿ???????ĸ???????Ϊx??y???ֱ??г??????䱾ӦΪһ?飬??ÿ????ֵ??????2??????3??ʾ??????֯ȫ????ͬ??2??ʾxy??ֻ??һ????gar??ͬ??1??ʾ??????֯ȫ??????ͬ
cav_number_3 <- length(norgarcav_many_tau[norgarcav_many_tau$avi==3,]$avi)/2
cav_number_2 <- length(norgarcav_many_tau[norgarcav_many_tau$avi==2,]$avi)/2
cav_number_1 <- length(norgarcav_many_tau[norgarcav_many_tau$avi==1,]$avi)/2
##????Ϊcod??????
norgarcod_many_true.csv$tissue.cod.x <- as.character(norgarcod_many_true.csv$tissue.cod.x)
norgarcod_many_true.csv$tissue.cod.y <- as.character(norgarcod_many_true.csv$tissue.cod.y)
norgarcod_many_true.csv$tissue.gar <- as.character(norgarcod_many_true.csv$tissue.gar)
norgarcod_many_true.csv$tissue.cod.x[which(norgarcod_many_true.csv$tau.cod.x<0.8)] <- "all"
norgarcod_many_true.csv$tissue.cod.y[which(norgarcod_many_true.csv$tau.cod.y<0.8)] <- "all"
norgarcod_many_true.csv$tissue.gar[which(norgarcod_many_true.csv$tau.gar<0.8)] <- "all"
le <- length(norgarcod_many_tarue.csvGene.ID)
n###错误在此,计算成了norgarcod_many_tau的长度了
orgarcod_many_tau_x <- data.frame(Gene.ID=norgarcod_many_true.csv$Gene.ID,Cod.gene.stable.ID=norgarcod_many_true.csv$Cod.gene.stable.ID.x.,tau.cod=norgarcod_many_true.csv$tau.cod.x,tau.gar=norgarcod_many_true.csv$tau.gar,tissue.cod.x=norgarcod_many_true.csv$tissue.cod.x,tissue.cod.y=norgarcod_many_true.csv$tissue.cod.y,tissue.gar=norgarcod_many_true.csv$tissue.gar,type=rep("x",le),avi=rep("1",le))
norgarcod_many_tau_y <- data.frame(Gene.ID=norgarcod_many_true.csv$Gene.ID,Cod.gene.stable.ID=norgarcod_many_true.csv$Cod.gene.stable.ID.y.,tau.cod=norgarcod_many_true.csv$tau.cod.y,tau.gar=norgarcod_many_true.csv$tau.gar,tissue.cod.x=norgarcod_many_true.csv$tissue.cod.x,tissue.cod.y=norgarcod_many_true.csv$tissue.cod.y,tissue.gar=norgarcod_many_true.csv$tissue.gar,type=rep("y",le),avi=rep("1",le))
norgarcod_many_tau <- rbind(norgarcod_many_tau_x,norgarcod_many_tau_y)
norgarcod_many_tau$avi <- as.character(norgarcod_many_tau$avi)
norgarcod_many_tau$avi[which(norgarcod_many_tau$tissue.cod.x==norgarcod_many_tau$tissue.gar&norgarcod_many_tau$tissue.cod.y==norgarcod_many_tau$tissue.gar)] <- "3"
norgarcod_many_tau$avi[which(norgarcod_many_tau$tissue.cod.x==norgarcod_many_tau$tissue.gar&norgarcod_many_tau$tissue.cod.y!=norgarcod_many_tau$tissue.gar)] <- "2"
norgarcod_many_tau$avi[which(norgarcod_many_tau$tissue.cod.y==norgarcod_many_tau$tissue.gar&norgarcod_many_tau$tissue.cod.x!=norgarcod_many_tau$tissue.gar)] <- "2"
p_cod <- ggplot(norgarcod_many_tau,aes(x=tau.cod,y=tau.gar,colour=avi)) + geom_point() + scale_colour_manual(values = c("black","blue","red"))
cod_number_3 <- length(norgarcod_many_tau[norgarcod_many_tau$avi==3,]$avi)/2
cod_number_2 <- length(norgarcod_many_tau[norgarcod_many_tau$avi==2,]$avi)/2
cod_number_1 <- length(norgarcod_many_tau[norgarcod_many_tau$avi==1,]$avi)/2
##????Ϊmed??????
norgarmed_many_true.csv$tissue.med.x <- as.character(norgarmed_many_true.csv$tissue.med.x)
norgarmed_many_true.csv$tissue.med.y <- as.character(norgarmed_many_true.csv$tissue.med.y)
norgarmed_many_true.csv$tissue.gar <- as.character(norgarmed_many_true.csv$tissue.gar)
norgarmed_many_true.csv$tissue.med.x[which(norgarmed_many_true.csv$tau.med.x<0.8)] <- "all"
norgarmed_many_true.csv$tissue.med.y[which(norgarmed_many_true.csv$tau.med.y<0.8)] <- "all"
norgarmed_many_true.csv$tissue.gar[which(norgarmed_many_true.csv$tau.gar<0.8)] <- "all"
le <- length(norgarmed_many_true.csv$Gene.ID)
norgarmed_many_tau_x <- data.frame(Gene.ID=norgarmed_many_true.csv$Gene.ID,Medaka.gene.stable.ID=norgarmed_many_true.csv$Medaka.gene.stable.ID.x.,tau.med=norgarmed_many_true.csv$tau.med.x,tau.gar=norgarmed_many_true.csv$tau.gar,tissue.med.x=norgarmed_many_true.csv$tissue.med.x,tissue.med.y=norgarmed_many_true.csv$tissue.med.y,tissue.gar=norgarmed_many_true.csv$tissue.gar,type=rep("x",le),avi=rep("1",le))
norgarmed_many_tau_y <- data.frame(Gene.ID=norgarmed_many_true.csv$Gene.ID,Medaka.gene.stable.ID=norgarmed_many_true.csv$Medaka.gene.stable.ID.y.,tau.med=norgarmed_many_true.csv$tau.med.y,tau.gar=norgarmed_many_true.csv$tau.gar,tissue.med.x=norgarmed_many_true.csv$tissue.med.x,tissue.med.y=norgarmed_many_true.csv$tissue.med.y,tissue.gar=norgarmed_many_true.csv$tissue.gar,type=rep("y",le),avi=rep("1",le))
norgarmed_many_tau <- rbind(norgarmed_many_tau_x,norgarmed_many_tau_y)
norgarmed_many_tau$avi <- as.character(norgarmed_many_tau$avi)
norgarmed_many_tau$avi[which(norgarmed_many_tau$tissue.med.x==norgarmed_many_tau$tissue.gar&norgarmed_many_tau$tissue.med.y==norgarmed_many_tau$tissue.gar)] <- "3"
norgarmed_many_tau$avi[which(norgarmed_many_tau$tissue.med.x==norgarmed_many_tau$tissue.gar&norgarmed_many_tau$tissue.med.y!=norgarmed_many_tau$tissue.gar)] <- "2"
norgarmed_many_tau$avi[which(norgarmed_many_tau$tissue.med.y==norgarmed_many_tau$tissue.gar&norgarmed_many_tau$tissue.med.x!=norgarmed_many_tau$tissue.gar)] <- "2"
p_med <- ggplot(norgarmed_many_tau,aes(x=tau.med,y=tau.gar,colour=avi)) + geom_point() + scale_colour_manual(values = c("black","blue","red"))
med_number_3 <- length(norgarmed_many_tau[norgarmed_many_tau$avi==3,]$avi)/2
med_number_2 <- length(norgarmed_many_tau[norgarmed_many_tau$avi==2,]$avi)/2
med_number_1 <- length(norgarmed_many_tau[norgarmed_many_tau$avi==1,]$avi)/2
##??????zeb??????
norgarzeb_many_true.csv$tissue.zeb.x <- as.character(norgarzeb_many_true.csv$tissue.zeb.x)
norgarzeb_many_true.csv$tissue.zeb.y <- as.character(norgarzeb_many_true.csv$tissue.zeb.y)
norgarzeb_many_true.csv$tissue.gar <- as.character(norgarzeb_many_true.csv$tissue.gar)
norgarzeb_many_true.csv$tissue.zeb.x[which(norgarzeb_many_true.csv$tau.zeb.x<0.8)] <- "all"
norgarzeb_many_true.csv$tissue.zeb.y[which(norgarzeb_many_true.csv$tau.zeb.y<0.8)] <- "all"
norgarzeb_many_true.csv$tissue.gar[which(norgarzeb_many_true.csv$tau.gar<0.8)] <- "all"
le <- length(norgarzeb_many_true.csv$Gene.ID)
norgarzeb_many_tau_x <- data.frame(Gene.ID=norgarzeb_many_true.csv$Gene.ID,Zebrafish.gene.stable.ID=norgarzeb_many_true.csv$Zebrafish.gene.stable.ID.x.,tau.zeb=norgarzeb_many_true.csv$tau.zeb.x,tau.gar=norgarzeb_many_true.csv$tau.gar,tissue.zeb.x=norgarzeb_many_true.csv$tissue.zeb.x,tissue.zeb.y=norgarzeb_many_true.csv$tissue.zeb.y,tissue.gar=norgarzeb_many_true.csv$tissue.gar,type=rep("x",le),avi=rep("1",le))
norgarzeb_many_tau_y <- data.frame(Gene.ID=norgarzeb_many_true.csv$Gene.ID,Zebrafish.gene.stable.ID=norgarzeb_many_true.csv$Zebrafish.gene.stable.ID.y.,tau.zeb=norgarzeb_many_true.csv$tau.zeb.y,tau.gar=norgarzeb_many_true.csv$tau.gar,tissue.zeb.x=norgarzeb_many_true.csv$tissue.zeb.x,tissue.zeb.y=norgarzeb_many_true.csv$tissue.zeb.y,tissue.gar=norgarzeb_many_true.csv$tissue.gar,type=rep("y",le),avi=rep("1",le))
norgarzeb_many_tau <- rbind(norgarzeb_many_tau_x,norgarzeb_many_tau_y)
norgarzeb_many_tau$avi <- as.character(norgarzeb_many_tau$avi)
norgarzeb_many_tau$avi[which(norgarzeb_many_tau$tissue.zeb.x==norgarzeb_many_tau$tissue.gar&norgarzeb_many_tau$tissue.zeb.y==norgarzeb_many_tau$tissue.gar)] <- "3"
norgarzeb_many_tau$avi[which(norgarzeb_many_tau$tissue.zeb.x==norgarzeb_many_tau$tissue.gar&norgarzeb_many_tau$tissue.zeb.y!=norgarzeb_many_tau$tissue.gar)] <- "2"
norgarzeb_many_tau$avi[which(norgarzeb_many_tau$tissue.zeb.y==norgarzeb_many_tau$tissue.gar&norgarzeb_many_tau$tissue.zeb.x!=norgarzeb_many_tau$tissue.gar)] <- "2"
p_zeb <- ggplot(norgarzeb_many_tau,aes(x=tau.zeb,y=tau.gar,colour=avi)) + geom_point() + scale_colour_manual(values = c("black","blue","red"))
zeb_number_3 <- length(norgarzeb_many_tau[norgarzeb_many_tau$avi==3,]$avi)/2
zeb_number_2 <- length(norgarzeb_many_tau[norgarzeb_many_tau$avi==2,]$avi)/2
zeb_number_1 <- length(norgarzeb_many_tau[norgarzeb_many_tau$avi==1,]$avi)/2
|
084be70208ff531d2530aec722377752027dd20a
|
d7258807978432d320c176e415ec249962620165
|
/scripts/gaur_paml_go_kegg_pathway_analysis.R
|
219cd2aa37f0228138f25c21a04004cdd0948dd0
|
[
"MIT"
] |
permissive
|
Genorater/GaurAssemblyProject
|
359784ac2f2586029d0ce9e4d3587097110e2851
|
cfb27d5b59d67c1e95af97888718d6de87acfd05
|
refs/heads/main
| 2023-05-09T05:23:51.162021
| 2021-06-07T00:05:27
| 2021-06-07T00:05:27
| 362,017,366
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,745
|
r
|
gaur_paml_go_kegg_pathway_analysis.R
|
#------------------------------------------------------
# Program name: gaur_paml_go_kegg_pathway_analysis.R
# Objective: GO and KEGG pathway analysis for positive selection results
#
# Author: Kelly Ren
# Email add: kellydren@gmail.com
#------------------------------------------------------
# Packages
library(magrittr)
library(readr)
library(dplyr)
library(stringr)
library(tidyverse)
library(readxl)
library(gtools)
library(tibble)
library(limma)
'%!in%' <- function(x,y)!('%in%'(x,y))
# Analysis
# Read data
OGtbl <- read.table("input/From_OrthoFinder/OGtbl.tsv",header = T)%>%
set_colnames(c("orthogroup_ID", "geneID", "spc"))
OGtbl$orthogroup_ID <- gsub("OG1v", "OG",OGtbl$orthogroup_ID)
# Check species
table(OGtbl$spc)
results_Positive_select <- read.csv("output/PAML/branch_results_Positive_select.csv")
results_Positive_select%<>%subset(InnateDB %in% "TRUE")
colnames(results_Positive_select) <- gsub("spc", "species",colnames(results_Positive_select))
ARS_UOA_Gaur_1pep_vs_ARS_UCD1_2pep_correspond <- readRDS("input/From_blast/ARS_UOA_Gaur_1pep_vs_ARS_UCD1_2pep_correspond.rds")
head(ARS_UOA_Gaur_1pep_vs_ARS_UCD1_2pep_correspond)
# How many orthogroup ID
OGtbl$orthogroup_ID%>%unique()%>%length()
OGtbl$geneID <- as.character(OGtbl$geneID)
results_Positive_select_OGtbl_Bgau <- subset(OGtbl, orthogroup_ID %in% results_Positive_select$ID)%>%
subset(spc %in% "Bgau")%>%
left_join(ARS_UOA_Gaur_1pep_vs_ARS_UCD1_2pep_correspond,by = c("geneID"= "Bosg_gene_id_nover"))%>%
unique()
head(results_Positive_select_OGtbl_Bgau)
## Gaur (Cattle database)
results_Positive_select_OGtbl_Hbta <- subset(OGtbl, orthogroup_ID %in% results_Positive_select$ID)%>%
subset(spc %in% "Hbta")%>%
unique()
head(results_Positive_select_OGtbl_Hbta)
#ah <- AnnotationHub()
#saveRDS(ah,"All_annotation.rds")
ah <- read_rds("/Users/kellydren/Documents/Kelly_annotation/All_annotation.rds")
#subset(ah, rdataclass == "EnsDb" & species == "Bos taurus")
ensDb <- ah[["AH83145"]]
ensDb
genesGR <- GenomicFeatures::genes(ensDb)
genesGR
{r }
cols2Keep <- c("gene_id","gene_name", "gene_biotype", "description", "entrezid")
mcols(genesGR) <- mcols(genesGR)[, cols2Keep]
# get gene annotation
Genes <- genesGR%>%
as.data.frame()
Genes$entrezid <- Genes$entrezid%>%as.character()
cattle_ALL_entrezID <- genesGR %>%
subset(!is.na(entrezid)) %>%
mcols() %>%
.[["entrezid"]] %>%
unlist() %>%
unique()
# trans to entrezid
results_Positive_select_OGtbl_Bgau$Bost_gene_id %<>% gsub("\\..","",. )
results_Positive_select_OGtbl_Bgau <- results_Positive_select_OGtbl_Bgau%>%left_join(Genes[,c("gene_id","entrezid")], by = c("Bost_gene_id" = "gene_id"))
results_Positive_select_OGtbl_Bgau_entrezid <- results_Positive_select_OGtbl_Bgau$entrezid%>%
gsub("c","",.)%>%
gsub("\\(","",.)%>%
gsub("\\)","",.)%>%
str_split( ", ")%>%
unlist()
### GO
goRes <- goana(results_Positive_select_OGtbl_Bgau_entrezid, cattle_ALL_entrezID, species = "Bt")
Gaur_goRes <- goRes%>%
rownames_to_column("GO_ID")%>%
mutate(fdr = p.adjust(P.DE, "fdr"))%>%
subset(fdr < 0.05)%>%
arrange(fdr)
Gaur_goRes%>%
dim()
head(Gaur_goRes)
### KEGG
keggRes <- kegga(results_Positive_select_OGtbl_Bgau_entrezid, cattle_ALL_entrezID, species = "Bt")
Gaur_keggRes <- keggRes%>%
rownames_to_column("KEGG_ID")%>%
mutate(fdr = p.adjust(P.DE, method = "fdr"))%>%
subset(fdr < 0.05)%>%
arrange(fdr)
Gaur_keggRes%>%
dim()
head(Gaur_keggRes)
# ECM-receptor interaction pathways were the most upregulated gene-enriched signaling pathways. They play an important role in the process of tumor shedding, adhesion, degradation, movement and hyperplasia. The role of ECM in other cancers has been proved.
|
6c1accd9db5421d418dd05a68054ca5aed991911
|
411ae251f59fabb35eaa2d4bbf85a596322dd521
|
/project-submission/CODE/shinyApp/modeling.R
|
02c78884c43b9c917335f14aaf409668d6e9001c
|
[
"Apache-2.0"
] |
permissive
|
hepaces89/multi-agent-data-analysis
|
7ab8eae13298998863fae1763a153cc7f78acf59
|
7ac425baab72cbc162db78a5d9db775def752f32
|
refs/heads/master
| 2020-03-25T06:47:59.121910
| 2018-04-30T23:04:33
| 2018-04-30T23:04:33
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,852
|
r
|
modeling.R
|
# This is the file that has the function run_modeling that is called in app.R
# for the modeling tab
library(data.table)
library(sqldf)
library(dplyr)
library(plyr)
library(readxl)
library(datasets)
library(corrplot)
library(data.table)
library(ggplot2)
library(caTools)
library(pls)
library(neuralnet)
run_modeling <- function(data2, model_selection) {
result_col = "NonTeamCapture"
# Variables varied: {values they had}
# max_speed_t_1: {25}
# max_speed_t_2_predator: {30 40 50}
# turn_rate_max_t_1: {0.25 0.5 0.75 1.0 1.25 1.5}
# turn_rate_max_predator: {0.5}
# allow_prey_switching_t_2_predator: {True False}
########provide column names of numeric variables
numericCol <- c("turn_rate_max_t_1",
"max_speed_t_2_predator",
"allow_prey_switching_t_2_predator")
# numericCol <- colnames(data[sapply(data, is.numeric)])
categoryCol <- c("team_id",
"allow_prey_switching_t_2_predator")
model = run_modelingWithParams(data2, result_col, numericCol, categoryCol , model_selection)
return (model)
}
# The function
run_modelingWithParams <- function(data2, result_col, numericCol, categoryCol , model_selection) {
##specify response variable (y) and name it result
colnames(data2)[colnames(data2) == result_col] <- 'result'
data <- data2
dataTypes <- sapply(data, class)
resultCol <- data['result']
########provide column names of category variables
##read useful variables only (specified above)
mydata <- cbind(data$result, data[ append(numericCol, categoryCol)])
colnames(mydata)[colnames(mydata) == 'data$result'] <- 'result'
##Clean missing data
mydata <- na.omit(mydata)
# mydata <- mydata[mydata$team_id > 1,]
boxcox = FALSE
if(boxcox){
bc <- boxcox(result ~ ., data = mydata, lambda = seq(-1, 1, length = 10))
lambda <- bc$x[which.max(bc$y)]
mydata$result_old <- mydata$result
mydata$result <- NULL
mydata$result <- (mydata$result_old^lambda-1)/lambda
}
dataset = mydata
split = sample.split(mydata$result, SplitRatio = 2/3)
training_set = subset(dataset, split == TRUE)
test_set = subset(dataset, split == FALSE)
# Fitting the model to the training set
if (model_selection == 1) { # Multivarite linear regression model
model = lm(formula = result ~., data = training_set)
y_pred<- predict(model,test_set)
} else if(model_selection == 2) { # Linear regression with backward selection
lmtest <- lm(result ~ . , data=training_set)
model <- step(lmtest)
y_pred<- predict(model,test_set)
} else if (model_selection == 3) { # Principal component regresson
require(pls)
model = pcr(result~., data = training_set, ncomp=dim(training_set)[2]-1, validation="CV")
pcrCV<- RMSEP(model,estimate = "CV")
#plot(pcrCV, main(""))
param_num<-which.min(pcrCV$val)
y_pred = predict(model,test_set,ncop = param_num)
} else if (model_selection == 4) { # Partial least squares
model = plsr(result~., data = training_set, ncomp = dim(training_set)[2]-1, validation ="CV")
plsCV<- RMSEP(model, estimate = "CV")
#plot(plsCV, main = "")
param_num <- which.min(plsCV$val)
y_pred = predict(model,test_set,ncomp = param_num)
} else if (model_selection == 5) { # Random Forest Regression
library(randomForest)
model = randomForest(x = training_set[,-1],y = training_set$result, subset = training_set, ntree = 20)
y_pred <- predict(model,test_set)
} else if (model_selection == 6) { # Neural networks
require(nnet)
model = nnet(result~., data = training_set,size = 10)
y_pred<- predict(model,test_set)
}
# The function returns the model so that we can access whatever we need in the
# app.R file for the visualization
return(model)
#summary(model)
#(mean((y_pred-test_set$result)^2))
#par(mfrow=c(2,2))#drawing in 2 by 2 format
#plot(model,which=c(1:4), col = "cornflowerblue")
#plot(model$fitted.values, model$residuals)
}
###### fitted values
ypred <- function(data2, result_col, numericCol, categoryCol , model_selection) {
##specify response variable (y) and name it result
colnames(data2)[colnames(data2) == result_col] <- 'result'
data <- data2
dataTypes <- sapply(data, class)
resultCol <- data['result']
# Variables varied: {values they had}
# max_speed_t_1: {25}
# max_speed_t_2_predator: {30 40 50}
# turn_rate_max_t_1: {0.25 0.5 0.75 1.0 1.25 1.5}
# turn_rate_max_predator: {0.5}
# allow_prey_switching_t_2_predator: {True False}
# ########provide column names of numeric variables
# numericCol <- c("turn_rate_max_t_1",
# "max_speed_t_2_predator",
# "allow_prey_switching_t_2_predator")
# # numericCol <- colnames(data[sapply(data, is.numeric)])
#
# ########provide column names of category variables
# categoryCol <- c("team_id",
# "allow_prey_switching_t_2_predator")
##read useful variables only (specified above)
mydata <- cbind(data$result, data[ append(numericCol, categoryCol)])
colnames(mydata)[colnames(mydata) == 'data$result'] <- 'result'
##Clean missing data
mydata <- na.omit(mydata)
boxcox = FALSE
if(boxcox){
bc <- boxcox(result ~ ., data = mydata, lambda = seq(-1, 1, length = 10))
lambda <- bc$x[which.max(bc$y)]
mydata$result_old <- mydata$result
mydata$result <- NULL
mydata$result <- (mydata$result_old^lambda-1)/lambda
}
dataset = mydata
split = sample.split(mydata$result, SplitRatio = 2/3)
training_set = subset(dataset, split == TRUE)
test_set = subset(dataset, split == FALSE)
# Fitting the model to the training set
if (model_selection == 1) { # Multivariate linear regression model
model = lm(formula = result ~., data = training_set)
y_pred<- predict(model,test_set)
} else if(model_selection == 2) { # Linear regression with backward selection
lmtest <- lm(result ~ . , data=training_set)
model <- step(lmtest)
y_pred<- predict(model,test_set)
} else if (model_selection == 3) { # Principal component regresson
require(pls)
model = pcr(result~., data = training_set, ncomp=dim(training_set)[2]-1, validation="CV")
pcrCV<- RMSEP(model,estimate = "CV")
#plot(pcrCV, main(""))
param_num<-which.min(pcrCV$val)
y_pred = predict(model,test_set,ncop = param_num)
} else if (model_selection == 4) { # Partial least squares
model = plsr(result~., data = training_set, ncomp = dim(training_set)[2]-1, validation ="CV")
plsCV<- RMSEP(model, estimate = "CV")
#plot(plsCV, main = "")
param_num <- min(which.min(plsCV$val), dim(training_set)[2]-1)
y_pred = predict(model,test_set,ncomp = param_num)
} else if (model_selection == 5) { # Random Forest Regression
library(randomForest)
model = randomForest(x = training_set[,-1],y = training_set$result, subset = training_set, ntree = 20)
y_pred <- predict(model,test_set)
} else if (model_selection == 6) { # Neural networks
require(nnet)
model = nnet(result~., data = training_set,size = 2)
y_pred<- predict(model,test_set)
}
return(y_pred)
}
###### actual values
result <- function(data2, result_col, numericCol, categoryCol, model_selection) {
##specify response variable (y) and name it result
colnames(data2)[colnames(data2) == result_col] <- 'result'
data <- data2
dataTypes <- sapply(data, class)
resultCol <- data['result']
# Variables varied: {values they had}
# max_speed_t_1: {25}
# max_speed_t_2_predator: {30 40 50}
# turn_rate_max_t_1: {0.25 0.5 0.75 1.0 1.25 1.5}
# turn_rate_max_predator: {0.5}
# allow_prey_switching_t_2_predator: {True False}
########provide column names of numeric variables
# numericCol <- c("turn_rate_max_t_1",
# "max_speed_t_2_predator",
# "allow_prey_switching_t_2_predator")
# # numericCol <- colnames(data[sapply(data, is.numeric)])
#
# ########provide column names of category variables
# categoryCol <- c("team_id",
# "allow_prey_switching_t_2_predator")
##read useful variables only (specified above)
mydata <- cbind(data$result, data[ append(numericCol, categoryCol)])
colnames(mydata)[colnames(mydata) == 'data$result'] <- 'result'
##Clean missing data
mydata <- na.omit(mydata)
boxcox = FALSE
if(boxcox){
bc <- boxcox(result ~ ., data = mydata, lambda = seq(-1, 1, length = 10))
lambda <- bc$x[which.max(bc$y)]
mydata$result_old <- mydata$result
mydata$result <- NULL
mydata$result <- (mydata$result_old^lambda-1)/lambda
}
dataset = mydata
split = sample.split(mydata$result, SplitRatio = 2/3)
training_set = subset(dataset, split == TRUE)
test_set = subset(dataset, split == FALSE)
return(test_set$result)
}
|
6d41360581038fc600c2adf347463d6a04d0f958
|
422308ef4a5d2f1cfb775fa7dff7ca56a5d41f02
|
/UI.R
|
7d41af849749435c7c0d46ee72f219536f4f2612
|
[] |
no_license
|
BDFace/DDP_Project_Shiny
|
66c61349028bc08113990f323814d9feabbecd4f
|
7defa1c9c99e977ddfdd6748f1bca234b0e073b8
|
refs/heads/master
| 2016-09-05T16:00:22.861523
| 2015-04-20T21:09:25
| 2015-04-20T21:09:25
| 34,286,218
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,226
|
r
|
UI.R
|
shinyUI(pageWithSidebar(
headerPanel("Child's height estimate based on parent height"),
sidebarPanel(
numericInput('heightdad', 'Height of father (cm)', 185.6, min = 100, max = 400, step = 5),
numericInput('heightmum', 'Height of mother (cm)', 170.9, min = 100, max = 400, step = 5),
checkboxInput(inputId = "son",
label = strong("Son"),
value = FALSE),
checkboxInput(inputId = "daughter",
label = strong("Daughter"),
value = FALSE),
submitButton('Submit')
),
mainPanel(
tabsetPanel(
tabPanel("App",
h3('Average child height based on height of the parents'),
h4('On average a child born to parents of the given heights would grow to a height (cm) of: '),
verbatimTextOutput("prediction"),
# display only if son box is ticked
conditionalPanel(condition = "input.son == true",
h4('A son would on average grow to a height (cm) of: '),
verbatimTextOutput("son")),
# display only if daughter box is ticked
conditionalPanel(condition = "input.daughter == true",
h4('A daughter would on average grow to a height (cm) of: '),
verbatimTextOutput("daughter"))
),
tabPanel("Documentation",
h4('The app is intended to show a prediction for a childs height based on the heights of the parents.'),
p('The user needs to add in the height of the father and height of the mother and check the box for
if they want to know the predicted height of a son, a daughter or both. The app will run a simple
calculation (note. this is not based of any real data of childrens heights but just a simple sum
to shows the functions are working) and show the average expected height of a child. If the check
boxes are ticked a calculation for a son and/or a daughter will also appear'),
p('The ui.R and server.R files can be found at the following github site: https://github.com/BDFace/DDP_Project_Shiny')
)
)
)
)
)
|
637cdbe203c302c30b0f3540e28182f688340a10
|
114c3031ba27cd9f039f299da0ea6c581f0d005b
|
/Figures.R
|
832462d537f29179f20b1ec31558735ce4dc8466
|
[] |
no_license
|
joyceongjl/hotspots_coherence
|
ad7b3fc97e332c4c5ba3a5c95fc29b03d6d46f72
|
ba1f08220348d267d4057bbfbe6fd24d32ac9fc3
|
refs/heads/master
| 2022-12-01T09:07:35.744406
| 2020-08-12T22:40:17
| 2020-08-12T22:40:17
| 286,776,009
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 12,225
|
r
|
Figures.R
|
#R code for figures in global MS
#### Code for Fig 1 pedagogical
#Plot of pedagogical figure,a is inphase on long ts but antiphase on short,
#b is antiphase on long ts but inphase on short
#c is coh with phase diff 0 (in phase), d is coherence with lag (non zero phase diff)
#code for a and b
tt=100 #number of timesteps
ncycles1=1.5 #number of cycles in signal 1
ncycles2=10 #number of cycles in signal 2
res=10 #resolution, number of sub-intervals per timestep--increasing this makes a smoother plot
x<-seq(from=res/tt,to=tt,by=res/tt)
s1<-sin(seq(from=0,to=2*pi*ncycles1,length.out=tt*res))+2.5
s2<-sin(seq(from=0,to=2*pi*ncycles2,length.out=tt*res))
s3<-(s1-2.5)*-1+2.5
s4<-s2*-1
#df for sine waves with long and short timescales
tsfd<-as.data.frame(x)
head(tsfd)
tsfd$a1<-s1+s2#this is for both a1 and b1
tsfd$a2<-s1*0.9+s4
tsfd$b2<-s3+s2
#code for c and d, coherence with phase diff = 0 and non-zero
tt=100
y1<-arima.sim(model=list(ar=c(0.7,-0.5)), n=tt)
y2<-y1 + rnorm(tt, 0, 0.3)
y3<-c(rnorm(2), y1[1:(tt-2)]) + rnorm(tt,0,0.3)
y4<-y1[sample(tt, tt, replace=F)]
#df for random ts with coherence, phase diff=0 or phase lags
tscd<-as.data.frame(1:tt)
tscd$c1<-as.numeric(y1)#same as d1
tscd$c2<-as.numeric(y2)
tscd$d2<-as.numeric(y3)
tscd$noc<-as.numeric(y4)
head(tscd)
#correlation tests
cor.test(tscd$c1, tscd$c2)#pearsons corr = 0.976, p<0.01, or p = 2.2e-16
cor.test(tscd$c1, tscd$d2)#pearsons corr = 0.044, p=0.662
cor.test(tscd$c1, tscd$noc)#pearsons corr = 0.046, p=0.648
#coherence tests
y1cln<-cleandat(y1, times=1:100, clev=1)$cdat
y2cln<-cleandat(y2, times=1:100, clev=1)$cdat
y3cln<-cleandat(y3, times=1:100, clev=1)$cdat
y4cln<-cleandat(y4, times=1:100, clev=1)$cdat
cohts<-coh(y1cln, y2cln, times=1:100, norm="powall", sigmethod="fast")
Mod(mean(cohts$coher, na.rm=T))#0.881
plotmag(cohts)#coherence sig from 2-20 timescales
cohts<-bandtest(cohts, c(2,30))
get_bandp(cohts)#p<0.01, or p = 0.00100
cohts2<-coh(y1cln, y3cln, times=1:100, norm="powall", sigmethod="fast")
Mod(mean(cohts2$coher, na.rm=T))#0.452
plotmag(cohts2)#coherence sig from 2-10 timescales
cohts2<-bandtest(cohts2, c(2,30))
get_bandp(cohts2)#p<0.01, or p = 0.00100
cohts3<-coh(y1cln, y4cln, times=1:100, norm="powall", sigmethod="fast")
Mod(mean(cohts3$coher, na.rm=T))#0.194
plotmag(cohts3)#coherence insig for all timescales
cohts3<-bandtest(cohts3, c(2,30))
get_bandp(cohts3)#p=0.437
p1a<-ggplot(tscd, aes(x=`1:tt`, y=c1)) + geom_line() + theme_classic(base_size=14) +
geom_line(aes(x=`1:tt`, y=c2), color="red") + labs(x="Time", y="Signal") +
scale_y_continuous(breaks=seq(-4,4,2),limits=c(-4.2,4)) +
scale_x_continuous(breaks=seq(0,100, 25),limits=c(0,100))
p2a<-ggplot(tscd, aes(x=`1:tt`, y=c1)) + geom_line() + theme_classic(base_size=14) +
geom_line(aes(x=`1:tt`, y=d2), color="red") + labs(x="Time", y="Signal") +
scale_y_continuous(breaks=seq(-4,4,2),limits=c(-4.2,4)) +
scale_x_continuous(breaks=seq(0,100, 25),limits=c(0,100))
p3a<-ggplot(tscd, aes(x=`1:tt`, y=c1)) + geom_line() + theme_classic(base_size=14) +
geom_line(aes(x=`1:tt`, y=noc), color="red") + labs(x="Time", y="Signal") +
scale_y_continuous(breaks=seq(-4,4,2),limits=c(-4.2,4)) +
scale_x_continuous(breaks=seq(0,100, 25),limits=c(0,100))
g<-arrangeGrob(p1a, p2a, p3a, nrow=3)
####code for Fig 2
library(dplyr)
library(ggplot2)
library(cowplot)
theme_set(theme_classic(base_size=14))
theme_set(theme_bw(base_size=14))
###global scale figures
nullredqvdf<-read.csv("~/nullredqvdf_pvals_red07rand100_20190417.csv")
str(nullredqvdf)
length(which(nullredqvdf$globpt>0.568))#r=0, pval=0+1/100+1 = 0.0099
#histogram with numbers
globnullqvp2<-ggplot(nullredqvdf, aes(nfdr20))
globp2<-globnullqvp2 + geom_histogram(binwidth=20) + theme_classic(base_size=14) +
labs(title="", x="", y="Count") + theme(axis.text=element_text(size=14)) +
scale_y_continuous(expand=c(0,0), limits=c(0,30)) +
scale_x_continuous(limits=c(0, 3400)) + geom_vline(xintercept = 3382, col="red")#
red07cohvalsampl<-read.csv("~/red07cohvalsampl_rand100.csv")
str(red07cohvalsampl)
#note that mean coh of observed 595686 rships globally = 0.5426
length(which(red07cohvalsampl$meancoh3382s>0.5426))#r=97, pval = 98/101 = 0.9703
globnullcoh3382sp1<-ggplot(red07cohvalsampl, aes(meancoh3382s))
globcoh<-globnullcoh3382sp1 + geom_histogram(binwidth=0.0004) + theme_classic(base_size=14) +
labs(title="", x="", y="") + theme(axis.text=element_text(size=14)) +
scale_y_continuous(expand=c(0,0), limits =c(0,20)) +
geom_vline(xintercept = 0.5426, col="red")
## circular histogram of all 3382 coh rships globally
coh3382ph<-read.csv("~/coh3382ph_20190813.csv")
length(which(coh3382ph$mn_phs>-0.786 & coh3382ph$mn_phs<0.786))#1058 out of 3382 are in-phase, ie. 31.29%
globdomphp2<-ggplot(coh3382ph, aes(mn_phs))
globcirp2<-globdomphp2 + geom_histogram(binwidth=0.08) +
scale_x_continuous(breaks=c(0, pi/2, pi, -pi/2), labels=expression(0,pi/2,pi/-pi,-pi/2), expand=c(0,0)) +
coord_polar() + labs(x="", y="", title="") + theme_bw(base_size=14) + theme(panel.grid.minor=element_blank(), axis.text=element_text(size=14))
###ocean scale figures
randout<-read.csv("~/oceans_randout_20181031.csv")
str(randout)
length(which(randout$winocpt>0.6037141))#r=1, estimate p value = 1+1/1000+1 = 0.002 for within ocean coh
length(which(randout$wincoh>0.6610896))#r=30, est pval = 0.031 for within ocean
oc4tsb<-read.csv("D:/Rutgers_postdoc/data/FAO landings data/withinocean_4tsband_20190419.csv")
str(oc4tsb)
length(which(oc4tsb$mn_phs<0.786 & oc4tsb$mn_phs>(-0.786)))#n=1069 out of 3127, 0.34186
#new hist for winoc by numbers
randout1a<-ggplot(data=randout, aes(randout$winoc))
ocp2<-randout1a + geom_histogram(binwidth=10) + theme_classic(base_size=14) +
labs(title="", x="", y="Count") + theme(axis.text=element_text(size=14)) +
scale_y_continuous(expand=c(0,0), limits=c(0,200)) +
scale_x_continuous(limits=c(1250, 1450)) + geom_vline(xintercept = 1427, col="red")
randout2<-ggplot(data=randout, aes(randout$wincoh))
occoh<-randout2 + geom_histogram(binwidth=0.0005) + theme_classic(base_size=14) +
labs(title="", x="", y="") + theme(axis.text = element_text(size=14)) +
scale_y_continuous(expand=c(0,0), limits =c(0,150)) +
geom_vline(xintercept = 0.6610896, col="red")
#new cir hist for within oceans
ocalltsb<-read.csv("~/coh1427_winoc_avgph_20190813.csv")
length(which(ocalltsb$mn_phs>-0.786 & ocalltsb$mn_phs<0.786))#501 out of 1427 are in-phase, ie. 35.11%
domphoc3<-ggplot(ocalltsb, aes(mn_phs))
occir2<-domphoc3 + geom_histogram(binwidth=0.15) +
scale_x_continuous(breaks=c(0, pi/2, pi, -pi/2), labels=expression(0,pi/2,pi/-pi,-pi/2), expand=c(0,0)) +
coord_polar() + labs(x="", y="", title="") + theme_bw(base_size=14) + theme(panel.grid.minor=element_blank(), axis.text=element_text(size=14))
#################fao scale
regrandout<-read.csv("~/faoregions_randout_20181030.csv")
regrandout<-regrandout[,-1]
str(regrandout)
length(which(regrandout$meancohintra>0.6814009))#r=0, pval = 0.001
#new hist for fao using numbers
regionrandout1a<-ggplot(data=regrandout, aes(regrandout$nintra))
faop2<-regionrandout1a + geom_histogram(binwidth=5) + theme_classic(base_size=14) +
labs(title="", x="Number", y="Count") +
scale_y_continuous(expand=c(0,0), limits=c(0,150)) + theme(axis.text = element_text(size=14)) +
geom_vline(xintercept = 381, col="red") + scale_x_continuous(limits=c(200, 400))
ggsave(filename="D:/Rutgers_postdoc/Global MS/Figures/Aug2019/hist_rand1000_fao_num_20190812.eps", device="eps", scale=1, width=5, height=4, units="in", dpi=300)
regionrandout2<-ggplot(data=regrandout, aes(regrandout$meancohintra))#use ptinter for percentage inter
faocoh<-regionrandout2 + geom_histogram(binwidth=0.002) + theme_classic(base_size=14) +
labs(title="", x="Coherence", y="") + theme(axis.text = element_text(size=14)) +
scale_y_continuous(expand=c(0,0), limits=c(0,150)) +#ptinter 200, mean coh 150
geom_vline(xintercept = 0.681401, col="red") + theme(text = element_text(size=14))
#new cir hist for within fao
faodf<-read.csv("~/coh381_winfao_avgph_20190813.csv")
length(which(faodf$mn_phs>-0.786 & faodf$mn_phs<0.786))#185 out of 381 are in-phase, ie. 48.56%
domphfao<-ggplot(faodf, aes(mn_phs))
faocir2<-domphfao + geom_histogram(binwidth=0.15) +
scale_x_continuous(breaks=c(0, pi/2, pi, -pi/2), labels=expression(0,pi/2,pi/-pi,-pi/2), expand=c(0,0)) +
coord_polar() + labs(x="Mean phase", y="", title="") + theme_bw(base_size=14) + theme(panel.grid.minor=element_blank(), axis.text=element_text(size=14))
comb3x3<-plot_grid(globp2, globcoh, globcirp2, ocp2, occoh, occir2, faop2, faocoh, faocir2,
labels=c("A", "B", "C", "D", "E", "F", "G", "H", "I"), align="h", nrow=3, ncol=3, label_size=18)
save_plot("~/combinedglobocfao.eps",
comb3x3, ncol=3, nrow=3, base_height=4, base_aspect_ratio=1.1)
#### Code for Fig 3, spatial map
library(maps)
library(mapdata)
library(rgdal)
library(plyr)
library(dplyr)
library(reshape2)
library(RColorBrewer)
library(fields) # colorbar.plot()
###do new map of fao regions with pvals of % coh rships (color) and pvals of coh values (size)
regionmat<-read.csv("~/regionmat_20181101.csv")
psigcvcoh2<-read.csv(file="~/cf_fao_rcorr_null_20180929.csv")
# Read FAO data
fao_areas <- readOGR(dsn="~/FAO mapping shape files", layer="FAO_AREAS_NOCOASTLINE", verbose=F)
fao_areas <- subset(fao_areas, F_LEVEL=="MAJOR")
regionpv<-regionmat[,c(1,4:5)]
str(regionpv)
psigcvcoh3<-psigcvcoh2[,c(2:4,6:7,40:41)]
psigcvcoh3<-psigcvcoh3 %>% left_join(regionpv, by="region")
#color
psigcvcoh3$pvalpt_bin <- cut(psigcvcoh3$pvalpt, breaks=c(0,0.01,0.05,1), include.lowest=TRUE)
colors_pvalpt <- colorRampPalette(c("red", "orange", "cyan"))(nlevels(psigcvcoh3$pvalpt_bin))
psigcvcoh3$pvalpt_bin_color <- colors_pvalpt[psigcvcoh3$pvalpt_bin]
psigcvcoh3$pvcoh_bin <- cut(psigcvcoh3$pvalcoh, breaks=c(0,0.01,0.05,1), include.lowest=TRUE)
size_pvcoh <- c(4, 3.5, 1.5)
psigcvcoh3$pvcohcex <- size_pvcoh[psigcvcoh3$pvcoh_bin]
postscript("~/map_pvptcoh.eps", width=720, height=480)
layout(matrix(c(1,2), ncol=2, byrow=T), widths=c(0.9,0.1))
par(mar=c(0.1,0.1,0.1,0.1), xpd=NA)
xlim <- c(-180, 180)
ylim <- c(-90, 90)
# Plot FAO stat areas
plot(fao_areas, border="grey70", lty=3, xlim=xlim, ylim=ylim)
# Plot world countries
map("world", col="grey65", fill=T, border="white", lwd=0.3,
xlim=xlim, ylim=ylim, add=T)
points(x=psigcvcoh3$x, y=psigcvcoh3$y,
pch=19, col=psigcvcoh3$pvalpt_bin_color, cex=psigcvcoh3$pvcohcex)
text(x=psigcvcoh3$x, y=psigcvcoh3$y, labels=psigcvcoh3$nsp, cex=0.9, pos=1, offset=0.65, font=2, col="black")
text(x=psigcvcoh3$x, y=psigcvcoh3$y, labels=psigcvcoh3$area_short, pos=3, offset=0.65, cex=0.9, font=2, col="black")
#size legend
legend(x=190, y=60,legend=c("p>0.05", "p<0.05","p<0.01"), bty="n", pch=c(1,1,1),
title="Average\ncoherence", pt.cex=c(1.5, 3.5, 4), cex=0.9, y.intersp = 1.7)
#color legend
plot(1:10, 1:10, bty="n", type="n", xaxt="n", yaxt="n", xlab="", ylab="")
colorbar.plot(x=0, y=3, adj.x=0, adj.y=0, horiz=F, col=colors_pvalpt,
strip=seq(-1.5,1.5, length.out=nlevels(psigcvcoh3$pvalpt_bin)),
strip.width=0.3, strip.length=1, xpd=NA)
text(x=2.5, y=c(3.2, 3.7, 4.2), pos=4, labels=c("p<0.01", "p<0.05", "p>0.05"), cex=0.9)
text(x=-2, y=4.8, pos=4, labels="Percentage of\ncoherent\nrelationships", font=2, cex=0.9)
dev.off()
# Portfolio effects and percentage of in-phase
peallts<-ggplot(alltsphpe, aes(x=ptph0, y=pe))
peallts + geom_text(aes(label=area_short, color=ptphlag), size=5) + theme_classic(base_size=14) +
labs(x="Percentage of in-phase relationships", y="Portfolio effect") +
scale_y_continuous(breaks=seq(15,85, 10),limits=c(20,85)) + scale_x_continuous(breaks=seq(0,100,10),limits=c(0, 100))
# Density plot of ANE and IOE
alldf2a<-read.csv("D:/Rutgers_postdoc/data/FAO landings data/alldf2a_20190411.csv")
win2<-subset(alldf2a, fao=="ANE"|fao=="IOE")
win2p3<-ggplot(win2, aes(mn_phs, color=fao))
win2p3 + geom_density() + theme_classic() +scale_y_continuous(expand=c(0,0), limits=c(0,0.7)) +
labs(x="Mean phase", y="Density") +
scale_x_continuous(breaks=c(-pi, -pi/2, 0, pi/2, pi), labels=expression(-pi,-pi/2,0,pi/2,pi))
|
aee978e1c621e322531ffa23ef93e4c82fb10659
|
8a38f23cbf50f0acf948318a1b56d566bf39aba9
|
/new_export.R
|
89c1ca80286a0c6e1bf718513bd314af48e5c3d8
|
[] |
no_license
|
Chirurgus/discrete_copula
|
91b3210357df599dafbc55b783816ceab7e3b7b8
|
21c138a54fa97beacf2c43bad374365f6b791d35
|
refs/heads/master
| 2020-03-27T17:58:19.805059
| 2018-08-31T14:51:41
| 2018-08-31T14:51:41
| 146,889,107
| 2
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,807
|
r
|
new_export.R
|
# Created by Oleksandr Sorochynskyi
# On 29 08 18
tri <- function(m) {
m[lower.tri(m)]
}
pair_near <- c(23,12)
pair_far <- c(23,3)
pair_median <- c(23,2)
# params
load("summary/params.RData")
# covar
load("summary/covar.RData")
# distances
load("summary/dist.RData")
# 1) infered param: distance param_check param_fullfieldd param_bar
infered_param <- data.frame(distance= tri(distance[1:25,1:25]),
param_check= tri(params$model2.2$par$check[1:25,1:25]),
param_fullfield= tri(params$model2.2$par$fullfield[1:25,1:25]),
param_bar= tri(params$model2.2$par$bar[1:25,1:25]));
write.table(infered_param, "export/time_model/params.txt", row.names=FALSE)
# 2) time-behavior of noise-cov 12&23: noise-cov-data(time), noise-cov-model(time)
data <- bin_checkerboard(pair_near);
rand_check <- cond_model2.2_rand(1, 2, data, params$model2.2$par$check[pair_near[1], pair_near[2]], 1000);
data_noise_cov <- sapply(1:dim(data)[2], function(t) {
cov(data[1,t,],data[2,t,]);
});
rand_check_noise_cov <- sapply(1:dim(rand_check)[2], function(t) {
cov(rand_check[1,t,],rand_check[2,t,]);
});
time_behavior <- data.frame(noise_check_data_check_param_time_data= data_noise_cov,
noise_check_data_check_param_time_model= rand_check_noise_cov);
write.table(time_behavior, "export/time_model/cov_noise_check_data_check_param_time_12_23.txt")
# 3) for (check, fullfield,bars): noise-cov-data noise-cov-model
check <- data.frame(noise_check_data_check_param_data= tri(covar$check_data$check_param$model2.2$real$noise[1:25,1:25]),
noise_check_data_check_param_model= tri(covar$check_data$check_param$model2.2$simu$noise[1:25,1:25]));
fullfield <- data.frame(noise_fullfield_data_fullfield_param_data= tri(covar$fullfield_data$fullfield_param$model2.2$real$noise[1:25,1:25]),
noise_fullfield_data_fullfield_param_model= tri(covar$fullfield_data$fullfield_param$model2.2$simu$noise[1:25,1:25]));
bar <- data.frame(noise_bar_data_bar_param_data= tri(covar$bar_data$bar_param$model2.2$real$noise[1:25,1:25]),
noise_bar_data_bar_param_model= tri(covar$bar_data$bar_param$model2.2$simu$noise[1:25,1:25]));
write.table(check, "export/time_model/cov_noise_check_data_check_param.txt", row.names= FALSE)
write.table(fullfield, "export/time_model/cov_noise_fullfield_data_fullfield_param.txt", row.names= FALSE)
write.table(bar, "export/time_model/cov_noise_bar_data_bar_param.txt", row.names= FALSE)
# 4) with bar_data: noise-cov-data(time), noise-cov-model(time,twobar-param), noise-cov-model(time,check-param)
data <- bin_barmovie(pair_near);
rand_bar <- cond_model2.2_rand(1, 2, data, params$model2.2$par$bar[pair_near[1], pair_near[2]], 1000);
rand_check <- cond_model2.2_rand(1, 2, data, params$model2.2$par$check[pair_near[1], pair_near[2]], 1000);
rand_fullfield <- cond_model2.2_rand(1, 2, data, params$model2.2$par$fullfield[pair_near[1], pair_near[2]], 1000);
data_noise_cov <- sapply(1:dim(data)[2], function(t) {
cov(data[1,t,],data[2,t,]);
});
rand_bar_noise_cov <- sapply(1:dim(rand_bar)[2], function(t) {
cov(rand_bar[1,t,],rand_bar[2,t,]);
});
rand_check_noise_cov <- sapply(1:dim(rand_check)[2], function(t) {
cov(rand_check[1,t,],rand_check[2,t,]);
});
rand_fullfield_noise_cov <- sapply(1:dim(rand_fullfield)[2], function(t) {
cov(rand_fullfield[1,t,],rand_fullfield[2,t,]);
});
time_behavior_bar <- data.frame(noise_bar_data_bar_param_time_data = data_noise_cov,
noise_bar_data_bar_param_time_model= rand_bar_noise_cov,
noise_bar_data_fullfield_param_time_model= rand_fullfield_noise_cov,
noise_bar_data_check_param_time_model= rand_check_noise_cov);
write.table(time_behavior_bar, "export/time_model/cov_noise_bar_data_all_param_time_12_23.txt",row.names= FALSE)
# 5) for (fullfield,bars): noise-cov-data(check_param) noise-cov-model(check_param)
fullfield <- data.frame(noise_fullfield_data_fullfield_param_data= tri(covar$fullfield_data$check_param$model2.2$real$noise[1:25,1:25]),
noise_fullfield_data_fullfield_param_model= tri(covar$fullfield_data$check_param$model2.2$simu$noise[1:25,1:25]));
bar <- data.frame(noise_bar_data_bar_param_data= tri(covar$bar_data$check_param$model2.2$real$noise[1:25,1:25]),
noise_bar_data_bar_param_model= tri(covar$bar_data$check_param$model2.2$simu$noise[1:25,1:25]));
write.table(fullfield, "export/time_model/cov_noise_fullfield_data_check_param.txt", row.names= FALSE)
write.table(bar, "export/time_model/cov_noise_bar_data_check_param.txt", row.names= FALSE)
|
6417baeb86b9444f283e3e5c9e09ae61f0cb6c6c
|
0d2190a6efddb7167dee3569820724bfeed0e89c
|
/R Package Creation/STAR/man/GomezTenureStatus.Rd
|
b61a49d3c970857bbb7678434d40925e1d39d431
|
[] |
no_license
|
djnpisano/RScriptLibrary
|
6e186f33458396aba9f4151bfee0a4517d233ae6
|
09ae2ac1824dfeeca8cdea62130f3c6d30cb492a
|
refs/heads/master
| 2020-12-27T10:02:05.719000
| 2015-05-19T08:34:19
| 2015-05-19T08:34:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 459
|
rd
|
GomezTenureStatus.Rd
|
\name{GomezTenureStatus}
\alias{GomezTenureStatus}
\docType{data}
\title{GomezTenureStatus}
\description{GomezTenureStatus}
\usage{data(GomezTenureStatus)}
\format{
A data frame with 187 observations on the following 2 variables.
\describe{
\item{\code{TenureStatus}}{a factor with levels \code{fixed-rent} \code{owner} \code{share-rent}}
\item{\code{FarmerClassif}}{a factor with levels \code{adopter} \code{nonadopter}}
}
}
\keyword{datasets}
|
1054286128f076872f92eebb8874947a2aeb6af1
|
35da33a02f5e37cc4c860059dc969597e97ef800
|
/map/Rfile/baidumap.R
|
6bd6be6729abde577408e3fdd73f1d21985f4b8d
|
[] |
no_license
|
yuanqingye/R_Projects
|
f36c176b6295818defb1429859b9faa3dac8862d
|
dc460dc56cb6a36b4bd7aeffeb1fb70e6791b1ec
|
refs/heads/master
| 2020-03-11T06:46:14.832080
| 2018-05-18T06:33:05
| 2018-05-18T06:33:05
| 129,839,300
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,570
|
r
|
baidumap.R
|
library(ggmap)
library(baidumap)
options(baidumap.key = '4SY3dY8GrhfU5ChOeqFMvhcaf9DWo7dc')
#高德地图API key
gaode_key = "de4e66af67591588df24da020bb3d3eb"
#AIzaSyAQwxqx2skmYVbgffhTfyKixuxAgU9PTbs google map api
# 9792023 4SY3dY8GrhfU5ChOeqFMvhcaf9DWo7dc 服务端 设置 删除
bj_college = getPlace('大学','北京')
pku_map = getBaiduMap('北京大学', width=600, height=600, zoom=18, scale = 2, messaging=FALSE)
ggmap(pku_map)
home_coordinate = getCoordinate('上海市普陀区泸定路555号',output='xml',formatted = TRUE)
sh_mcdonald = getPlace('麦当劳', '上海')
hz_travel = getPlace('旅游景点','杭州')
business_center = getPlace('商业中心','中国')
bj_business_center = getPlace('商业中心','北京')
sh_business_center = getPlace('商业中心','上海')
bjMap = getBaiduMap('北京',color = 'bw')
df = getRoute('首都国际机场', '北京南苑机场')
ggmap(bjMap) + geom_path(data = df, aes(lon, lat), alpha = 0.5, col = 'red')
bjMap2 = getBaiduMap('北京市区',color = 'color')
df2 = getRoute('北京市海淀区北洼路46号','北京市第三十五中学')
ggmap(bjMap2) + geom_path(data = df2,aes(lon,lat),alpha = 0.5,col = 'blue')
ls_travel = getPlace('旅游景点','拉萨')
library(maps)
map('world', fill = TRUE, col = rainbow(200),ylim = c(-90, 90), mar = c(0, 2, 0, 0))
getCoordinate('红星·美凯龙',output = 'xml')
getLocation(location = c(121.39264,31.24583))
liyangmap = getBaiduMap("溧阳市")
ggmap(liyangmap)
sh_redstar = getPlace('美凯龙 商场', '上海')
changshamap = getBaiduMap("长沙政区")
ggmap(changshamap)
coordinate = getCoordinate(result_dt$address)
library(rjson)
rrcoordinate = sapply(coordinate,fromJSON)
h = basicJSONHandler()
fromJSON(co, h)
cosample = rcoordinate[1]
lng = cosample[[1]]$result$location$lng
cosample[[1]]$result$location$lat
library(data.table)
rrcoordinate = data.table(lng = vector(mode = 'numeric',length = 0),lat = vector(mode = 'numeric',length = 0))
for(i in 1:length(rcoordinate)){
cosample = rcoordinate[i]
lng = ifelse(is.null(cosample[[1]]$result$location$lng),NaN,cosample[[1]]$result$location$lng)
lat = ifelse(is.null(cosample[[1]]$result$location$lat),NaN,cosample[[1]]$result$location$lat)
tempdt = data.table(lng = lng,lat = lat)
rrcoordinate = rbind(rrcoordinate,tempdt)
}
result = cbind(result_dt,rrcoordinate)
bj_subway = getPlace('地铁',"北京")
sh_subway = getPlace('地铁',"上海")
changsha_shops = read.table(text =
"NAME
我
你
他",header = FALSE,fileEncoding = "GBK" )
|
f9846b8656477c90654aa7b2dc71640e41bb176d
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/SixSigma/examples/ss.cc.Rd.R
|
378d1c5471b2f9a687442d5a7d8d12cc8db74b91
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 242
|
r
|
ss.cc.Rd.R
|
library(SixSigma)
### Name: ss.cc
### Title: Control Charts
### Aliases: ss.cc
### ** Examples
ss.cc("mr", ss.data.pb1, CTQ = "pb.humidity")
testout <- ss.data.pb1
testout[31,] <- list(31,17)
ss.cc("mr", testout, CTQ = "pb.humidity")
|
7a83633b9f7a7105b2fdab41403b4e4ac7ca4461
|
8f836dfe40598caeb2fa1dce3e47f6960c1c7dd5
|
/SCBToolBox.R
|
0a234e92855c8ede945ebfbee9960c9a57c45d44
|
[] |
no_license
|
barberena/ExData_Plotting1
|
9e8cca3264902ab08fee7a1b225c03196daa6215
|
081f437a0c117b844d7a20404e548c5c4f80d32b
|
refs/heads/master
| 2021-01-18T02:19:22.526571
| 2015-10-08T04:41:04
| 2015-10-08T04:41:04
| 43,849,888
| 0
| 0
| null | 2015-10-07T23:08:23
| 2015-10-07T23:08:23
| null |
UTF-8
|
R
| false
| false
| 5,146
|
r
|
SCBToolBox.R
|
## -----------------------------------------------------------------------------
## Peforms a few useful things
## To use:
## source("SCBToolBox.R")
##
## Writen By Steven Barberena - Austin, TX 2015
##
## This is a library of R code I've been writting as I started
## taking the Coursera Data Science classes. If you haven't started
## creating your own library file, I highly recommend that you do. It will
## save you time for future projects and you can add new functions you find
## are useful for your general R development.
## -----------------------------------------------------------------------------
## -----------------------------------------------------------------------------
## Creates a subset file of data from a large file that match the given
## regular expression
## -----------------------------------------------------------------------------
readBigDataSubsetRegEx <- function(inputFile, outputFile, regExValue, header = TRUE)
{
inputCon <- file(inputFile, 'r')
outputCon <- file(outputFile, 'w')
methodIndex <- 1
# reading data in 10,000 line chunks, this makes things faster
while (length(input<- readLines(inputCon, n=10000)) > 0)
{
consoleWriteLine(paste("Processing Lines", methodIndex, "to", methodIndex + 10000, sep=" "))
for (ix in 1:length(input))
{
if(methodIndex == 1 & ix == 1 & header == TRUE)
{
# keep header data
writeLines(input[ix], con=outputCon)
}
else if(grepl(regExValue, input[ix], perl=TRUE))
{
writeLines(input[ix], con=outputCon)
}
}
flush(outputCon)
methodIndex <- methodIndex + 10000
}
close(outputCon)
close(inputCon)
}
## -----------------------------------------------------------------------------
## Creates a subset file of data from a large file for the given
## index start and stop
## -----------------------------------------------------------------------------
readBigDataSubset <- function(inputFile, outputFile, startIndex, stopIndex, header = TRUE)
{
inputCon <- file(inputFile, 'r')
outputCon<- file(outputFile, 'w')
methodIndex <- 1
# reading data in 10,000 line chunks, this makes things faster
while (length(input<- readLines(inputCon, n=10000)) > 0)
{
consoleWriteLine(paste("Processing Lines", methodIndex, "to", methodIndex + 10000, sep=" "))
if(methodIndex == 1 & header == TRUE & length(input) > 0)
{
# keep header data
writeLines(input[ix], con=outputCon)
}
if(startIndex >= methodIndex & stopIndex <= methodIndex + 10000)
{
for (ix in 1:length(input))
{
locationIx <- methodIndex + ix;
if(locationIx >= startIndex & locationIx <= stopIndex)
{
writeLines(input[ix], con=outputCon)
}
}
}
flush(outputCon)
methodIndex <- methodIndex + 10000
}
close(outputCon)
close(inputCon)
}
## -----------------------------------------------------------------------------
## Called to write information to a log file as well as to the console
## Logs will include a time stamp as to when the message was written
## -----------------------------------------------------------------------------
logWrite <- function(messageIn)
{
if(!exists("logFile"))
{
dateStamp <- getDateStamp()
logFile <<- paste("LogFile_", dateStamp, ".log", sep = "") ## logFile is now a global
}
now <- format(Sys.time(), "%Y-%m-%d %H:%M:%S %Z | ")
newMessage <- paste(now, messageIn, sep = "")
consoleMessage <- paste(newMessage, "\n", sep = "")
cat(consoleMessage)
write(newMessage, file = logFile, append = TRUE)
}
## -----------------------------------------------------------------------------
## Called to write information to the console
## -----------------------------------------------------------------------------
consoleWriteLine <- function(messageIn)
{
consoleMessage <- paste(messageIn, "\n", sep = "")
cat(consoleMessage)
}
## --------------------------------------------------------------------------
## This function downloads the data from the website and unzips the file
## --------------------------------------------------------------------------
downloadAndUnzip <- function(fileUrl, targetFolder)
{
dateTimeStamp <- getDateTimeStamp()
if (!file.exists(targetFolder))
dir.create(targetFolder)
# The source and target names for downloading the file
destFile <- paste("./", targetFolder, "/Downloded_", dateTimeStamp, ".zip", sep = "")
logWrite("Download File")
# Mac Download Command - comment out the windows version and use this if running on Mac
#download.file(fileUrl, destfile = destFile, method = "curl")
# Windows Download Command
download.file(fileUrl, destfile = destFile)
# log the date we downloaded the file
dateDownload <- date()
logWrite(paste("Downloaded File on ", dateDownload, sep = ""))
# unzip the downloaded file
logWrite(paste("Unzip File: ", destFile, sep = ""))
unzipInfo <- unzip(destFile, overwrite = TRUE, exdir = paste(".", targetFolder, sep = "/"), setTimes = TRUE)
logWrite(unzipInfo)
}
getDateTimeStamp <- function()
{
format(Sys.time(), "%Y_%m_%d_%H_%M")
}
getDateStamp <- function()
{
format(Sys.time(), "%Y_%m_%d")
}
getTimeStamp <- function()
{
format(Sys.time(), "%H_%M")
}
|
5ee0db66cf7432e5854e49934ae5058d4621adb5
|
b87bb8cc21adeb55d256fea7fcf1573f5db7c383
|
/MarginplotRscript.R
|
499c43d185fc06d5b96cf2feaab1cdb045f9673c
|
[] |
no_license
|
PhDMattyB/MarginPlot
|
8011c98877ea4574f4da994d5d7138be81b6ade0
|
be65c793ee19a60a95573c80b1eeb6191e83636f
|
refs/heads/master
| 2020-03-23T11:08:43.924002
| 2018-09-25T14:42:00
| 2018-09-25T14:42:00
| 141,486,103
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 486
|
r
|
MarginplotRscript.R
|
library(tidyverse)
library(wesanderson)
library(ggExtra)
iris.graph = ggplot(data = iris, aes(x = Sepal.Length, y = Petal.Length))+
geom_point(aes(col = Species))+
labs(x = 'Sepal Length', y = 'Petal Length')+
theme_bw()+
scale_color_manual(values = wes_palette('Darjeeling1', n = 6, type ='continuous'))+
theme(legend.position = 'none')
ggMarginal(iris.graph, margins = 'both', type = 'histogram', groupColour = T,
groupFill = T, position = 'dodge', bins = 15)
|
6b137fa5c59a865941763e7abd90e7f0aff1ec83
|
5416264e9a51f3f5e45940ae63e18594ada711e5
|
/statCognition/man/state_linearity.Rd
|
8f87abef03a6c6e193b9032903c761b461efa64d
|
[
"MIT"
] |
permissive
|
linnykos/statCognition
|
452d31fe96a62875a24395483de443d6f0b5ab95
|
f3f2867b0e50f6a2a0194c14d9d8404833d267c4
|
refs/heads/master
| 2021-06-16T18:38:26.955235
| 2017-05-14T17:58:02
| 2017-05-14T17:58:02
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 565
|
rd
|
state_linearity.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/state_correlation.R
\name{state_linearity}
\alias{state_linearity}
\title{State feature: Pairwise linearity}
\usage{
state_linearity(dat, test_prop = 0.1, quant = 0.75, num_pairs = 50, ...)
}
\arguments{
\item{dat}{data object}
\item{test_prop}{proportion of values left as test sample}
\item{quant}{quantile of the difference in residuals}
\item{num_pairs}{maximum number of pairs to look at}
\item{...}{not used}
}
\value{
value
}
\description{
State feature: Pairwise linearity
}
|
31c75d5666aa705f193c63f938dc9f94a6f72cb5
|
6eddde9b74487719db12c51caefa7a788bcdf04a
|
/man/plsropt.Rd
|
c119e1cc4b3f1e2f471c201b9dab41d0293a9e5c
|
[] |
no_license
|
uwadaira/plsropt
|
79be7e7e91398b78ce4c662caed2cef81fcdd2c5
|
b633eaa63257333bd7ee5f64d824e8101f1855c7
|
refs/heads/master
| 2020-04-12T01:46:42.868932
| 2017-08-15T07:49:58
| 2017-08-15T07:49:58
| 45,820,246
| 2
| 1
| null | 2016-03-30T05:52:14
| 2015-11-09T06:45:12
|
R
|
UTF-8
|
R
| false
| true
| 377
|
rd
|
plsropt.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plsropt.R
\docType{package}
\name{plsropt}
\alias{plsropt}
\alias{plsropt-package}
\title{plsropt: A package for optimizing spectral preprocessing method for PLS regression}
\description{
plsropt package can be used to optimize spectral preprocessing method for partial least squares regression.
}
|
af0141e6a4647afca43a68d427532f05ad5ee53c
|
0f811c3e6c6bac8fcab04175516d085c26834ee1
|
/man/RANBP9mutated.Rd
|
f25b6a7640c62dff15ff2bca148c3717b1b86b96
|
[
"MIT"
] |
permissive
|
lukatrkla/lanpAnalysis
|
7674b1b5a137b9456b956d2b30eee41c08aa6bb7
|
b4a747df063103d7c11ef4e2bd288cd2d4eb6a26
|
refs/heads/master
| 2023-02-04T17:00:58.641015
| 2020-12-10T01:32:52
| 2020-12-10T01:32:52
| 307,875,938
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 422
|
rd
|
RANBP9mutated.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{RANBP9mutated}
\alias{RANBP9mutated}
\title{RANBP9 Mutated Peptides}
\format{
A string
@examples
}
\source{
\url{https://www.uniprot.org/uniprot/Q96S59}
}
\usage{
RANBP9mutated
}
\description{
A string of nucleotides for RANBP9 that has been modified to include
neoantigen relevant mutations.
}
\keyword{datasets}
|
d11c74e616ac52fdefd9e61b5e7ce66187d1abbd
|
d8db097eb655dfa1498a6056a839ab3a967d6d7e
|
/LDML.R
|
2632cdf1844cbe3ccf12500c27aaff7a7babd2b6
|
[] |
no_license
|
zhangc927/LocalizedDebiasedMachineLearning
|
e4f35cabca3e3c7ea216b7c0063abcaae9697549
|
21f32ddfbb5c12688617860fd744ede5a13682b8
|
refs/heads/master
| 2023-07-26T12:06:15.092315
| 2021-09-06T16:52:24
| 2021-09-06T16:52:24
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 26,675
|
r
|
LDML.R
|
packages.needed = c('foreach','tidyverse','Hmisc','gbm','glmnetUtils','nnet','hdm','ks','randomForest','quantregForest');
lapply(packages.needed, library, character.only = TRUE);
# returns list of length n of integers in {1,..,K} indicating fold membership
make.cvgroup = function(n, K, right = TRUE) {
split = runif(n)
return(as.numeric(cut(split, quantile(split, probs = seq(0, 1, 1/K)), include.lowest = TRUE, right = right)))
}
# conditions on fraction of 1s to 0s being similar across folds
make.cvgroup.balanced = function(data, K, form_t) {
cvgroup = numeric(nrow(data))
cvgroup[data[[form_t]]==1] = make.cvgroup(sum(data[[form_t]]==1), K, right = TRUE)
cvgroup[data[[form_t]]==0] = make.cvgroup(sum(data[[form_t]]==0), K, right = FALSE)
return(cvgroup)
}
# conditions on distribution of (t,w) being similar across folds
make.cvgroup.balanced2 = function(data, K, form_t, form_w) {
cvgroup = numeric(nrow(data))
for (t in 0:1) { for (w in 0:1) {
cvgroup[data[[form_t]]==t&data[[form_w]]==w] = make.cvgroup(sum(data[[form_t]]==t&data[[form_w]]==w), K, right = (t==1))
} }
return(cvgroup)
}
cross.fit.propensities = function(data, cvgroup, form_x, form_t, method_prop, option_prop, trim=c(0.01,0.99), trim.type='none', normalize=T, trainmask=T) {
K = max(cvgroup)
prop = numeric(nrow(data))
for (k in 1:K) {
prop[cvgroup==k] = method_prop(data, (cvgroup!=k) & trainmask, cvgroup==k, form_x, form_t, option_prop)
}
if(trim.type == 'drop') {
keep = (prop>trim[1] & prop<trim[2])
prop[!keep] = 0.5
} else {
keep = rep(T, nrow(data))
}
if(trim.type == 'clip') {
prop[prop<trim[1]] = trim[1]
prop[prop>trim[2]] = trim[2]
}
if(normalize) {
prop[keep] = data[[form_t]][keep]*prop[keep]*mean(data[[form_t]][keep]/prop[keep]) + (1.-data[[form_t]][keep])*(1.-(1.-prop[keep])*mean((1.-data[[form_t]][keep])/(1.-prop[keep])))
}
return(list(prop=prop, keep=keep))
}
## Return i for i such that summing v[1:i] is closest to c
## (If v is increasing, as it is for ipw and ldml, really should do this by golden section search)
solve.cumsum = function(v,c) {
return(which.min(abs(cumsum(v)-c)))
}
## Estimate the density of data in X at point x with weights w
density_ = function(X, w, x) {
if(all(w>=0)) {
density(X, n = 1, from = x, to = x, weights = w/sum(w), bw = 'SJ')$y
} else {
kde(X, eval.points = x, w = w/sum(w)*length(w), binned = F)$estimate
}
}
check.data = function(data, form_x, form_t, form_y) {
stopifnot(
all(sort(unique(data[[form_t]]))==c(F,T))
)
}
check.data2 = function(data, form_x, form_w, form_t, form_y) {
stopifnot(
all(sort(unique(data[[form_w]]))==c(F,T))
)
check.data(data, form_x, form_t, form_y)
}
## This uses the estimating equation 1/n sum_i T_i I[Y_i<=theta] / e(X_i) = gamma
## Where e(x)=P(T=1|X=x)
## We cross-fit e using K folds to estimate e(X_i) by ehat_i
## We then solve the equation as the gamma quantile of the data {Y_i:T_i=1} reweighted by W_i=(n_1/n)/ehat_i
## Symmetrically for control outcome
## If avg.eqn is T we solve the average equation where each element is using out-of-fold nuisnaces
## If avg.eqn is F we solve the equation in each fold and then average the estimates
est.quantile.ipw = function(gammas, data, form_x, form_t, form_y, method_prop, option_prop, K=5, trim=c(0.01,0.99), trim.type='none', normalize=T, avg.eqn=T) {
data = data%>%arrange(!! sym(form_y))
cvgroup = make.cvgroup.balanced(data, K, form_t)
prop = cross.fit.propensities(data, cvgroup, form_x, form_t, method_prop, option_prop, trim=trim, trim.type=trim.type, normalize=normalize)
W1 = prop$keep*data[[form_t]]/prop$prop
W0 = prop$keep*(1.-data[[form_t]])/(1.-prop$prop)
return(foreach(gamma=gammas, .combine=rbind)%do% {
q1 = if(avg.eqn) data[[form_y]][solve.cumsum(W1/sum(prop$keep),gamma)] else foreach(k=1:K, .combine=sum)%do%{data[[form_y]][solve.cumsum(W1*(cvgroup==k)/sum(prop$keep&(cvgroup==k)),gamma)]}/K;
q0 = if(avg.eqn) data[[form_y]][solve.cumsum(W0/sum(prop$keep),gamma)] else foreach(k=1:K, .combine=sum)%do%{data[[form_y]][solve.cumsum(W0*(cvgroup==k)/sum(prop$keep&(cvgroup==k)),gamma)]}/K;
psi1 = (W1[prop$keep] * (data[[form_y]][prop$keep] <= q1) - gamma) / density_(data[[form_y]][data[[form_t]]==1 & prop$keep], 1./prop$prop[data[[form_t]]==1 & prop$keep], q1);
psi0 = (W0[prop$keep] * (data[[form_y]][prop$keep] <= q0) - gamma) / density_(data[[form_y]][data[[form_t]]==0 & prop$keep], 1./(1.-prop$prop[data[[form_t]]==0 & prop$keep]), q0);
se1 = sd(psi1) / sqrt(sum(prop$keep));
se0 = sd(psi0) / sqrt(sum(prop$keep));
seqte = sd(psi1-psi0) / sqrt(sum(prop$keep));
data.frame(
gamma=gamma,
q1=q1,
q0=q0,
qte=q1-q0,
se1=se1,
se0=se0,
seqte=seqte
)})
}
## This uses the estimating equation 1/n sum_i T_i I[Y_i<=theta] / e(X_i) = gamma - 1/n sum_i f(theta,X_i)*(1-T_i/e(X_i))
## Where f(theta,x)=P(Y<=theta|X=x,T=1)
## We use LDML cross-fitting with K folds
## Namely, for each fold, we take half the remaining data and use it for fitting an initial guess for theta
## This initial guess is done using cross-fit IPW as above on this data subset alone
## On the other half of the remaining data we fit e(.) and f(intialguess,.) and use these on X_i in the fold to get ehat_i, fhat_i
## (if semiadaptive is set to TRUE then we use all out-of-fold data for both IPW and fitting nuisances)
## We finally compute c1 = 1/n sum_i fhat_i*(1-T_i/ehat_i)
## And then solve the equation as the gamma-c1 quantile of the data {Y_i:T_i=1} reweighted by (n_1/n)/ehat_i
## Symmetrically for control outcome
## If q.oracle is given then we use that given fixed value for localization; it shuold be a data.frame with columns gamma, q1.true, q0.true
## If avg.eqn is T we solve the average equation where each element is using out-of-fold nuisnaces
## If avg.eqn is F we solve the equation in each fold and then average the estimates
est.quantile.ldml = function(gammas, data, form_x, form_t, form_y, method_ipw, option_ipw, method_prop, option_prop, method_cdf, option_cdf, K=5, K_ipw=NULL, semiadaptive=FALSE, trim=c(0.01,0.99), trim.type='none', normalize=T, q.oracle=NULL, avg.eqn=T) {
data = data%>%arrange(!! sym(form_y))
cvgroup = make.cvgroup.balanced(data, K, form_t)
prop = cross.fit.propensities(data, cvgroup, form_x, form_t, method_prop, option_prop, trim=trim, trim.type=trim.type, normalize=normalize)
W1 = prop$keep*data[[form_t]]/prop$prop
W0 = prop$keep*(1.-data[[form_t]])/(1.-prop$prop)
if(is.null(K_ipw)) {K_ipw = ceil((K-1)/2)}
if(is.null(q.oracle)) {
ipwquant = foreach(k = 1:K, .combine=rbind)%do% {
est.quantile.ipw(gammas, data[if(semiadaptive) cvgroup!=k else cvgroup!=k & (cvgroup-(cvgroup>k)) %% 2==0,], form_x, form_t, form_y, method_ipw, option_ipw, K=K_ipw, trim=trim, trim.type = trim.type, normalize = normalize) %>% mutate(k=k)
}}
return(foreach(gamma=gammas, .combine=rbind)%do% {
cdf1 = numeric(nrow(data));
cdf0 = numeric(nrow(data));
for (k in 1:K) {
## take out k from the list of folds, renumber so k+1->k, k+2->k+1, ..., and use only the even folds after renumbering for ipw
## and use the odd folds after renumbering for fitting nuisances, using the result from ipq for eta_1
## unless semiadaptive is set to TRUE
q1.ipw = if(is.null(q.oracle)) ipwquant%>%filter(gamma==!!gamma,k==!!k)%>%select(q1) else q.oracle%>%filter(gamma==!!gamma)%>%select(q1.true)
q0.ipw = if(is.null(q.oracle)) ipwquant%>%filter(gamma==!!gamma,k==!!k)%>%select(q0) else q.oracle%>%filter(gamma==!!gamma)%>%select(q0.true)
form_cdf1 = paste('I(',form_y,'<=',as.numeric(q1.ipw),')')
cdf1[cvgroup==k] = method_cdf(data, (if(semiadaptive) cvgroup!=k else cvgroup!=k & (cvgroup-(cvgroup>k)) %% 2==1) & data[[form_t]]==1, cvgroup==k, form_x, form_cdf1, option_cdf)
form_cdf0 = paste('I(',form_y,'<=',as.numeric(q0.ipw),')')
cdf0[cvgroup==k] = method_cdf(data, (if(semiadaptive) cvgroup!=k else cvgroup!=k & (cvgroup-(cvgroup>k)) %% 2==1) & data[[form_t]]==0, cvgroup==k, form_x, form_cdf0, option_cdf)
};
q1 = if(avg.eqn) {
data[[form_y]][solve.cumsum(W1/sum(prop$keep),gamma - mean(cdf1[prop$keep] * (1.- data[[form_t]][prop$keep]/prop$prop[prop$keep])))]
} else {
foreach(k=1:K, .combine=sum)%do%{data[[form_y]][solve.cumsum(W1*(cvgroup==k)/sum(prop$keep&(cvgroup==k)),gamma - mean(cdf1[prop$keep&(cvgroup==k)] * (1.- data[[form_t]][prop$keep&(cvgroup==k)]/prop$prop[prop$keep&(cvgroup==k)])))]}/K};
q0 = if(avg.eqn) {
data[[form_y]][solve.cumsum(W0/sum(prop$keep),gamma - mean(cdf0[prop$keep] * (1.- (1.-data[[form_t]][prop$keep])/(1.-prop$prop[prop$keep]))))]
} else {
foreach(k=1:K, .combine=sum)%do%{data[[form_y]][solve.cumsum(W0*(cvgroup==k)/sum(prop$keep&(cvgroup==k)),gamma - mean(cdf0[prop$keep&(cvgroup==k)] * (1.- (1.-data[[form_t]][prop$keep&(cvgroup==k)])/(1.-prop$prop[prop$keep&(cvgroup==k)]))))]}/K};
psi1 = (W1[prop$keep] * (data[[form_y]][prop$keep] <= q1) - gamma - cdf1[prop$keep] * (1.- data[[form_t]][prop$keep]/prop$prop[prop$keep])) / density_(data[[form_y]][data[[form_t]]==1 & prop$keep], 1./prop$prop[data[[form_t]]==1 & prop$keep], q1);
psi0 = (W0[prop$keep] * (data[[form_y]][prop$keep] <= q0) - gamma - cdf0[prop$keep] * (1.- (1.-data[[form_t]][prop$keep])/(1.-prop$prop[prop$keep]))) / density_(data[[form_y]][data[[form_t]]==0 & prop$keep], 1./(1.-prop$prop[data[[form_t]]==0 & prop$keep]), q0);
se1 = sd(psi1) / sqrt(sum(prop$keep));
se0 = sd(psi0) / sqrt(sum(prop$keep));
seqte = sd(psi1-psi0) / sqrt(sum(prop$keep));
data.frame(
gamma=gamma,
q1 = q1,
q0 = q0,
qte=q1-q0,
se1=se1,
se0=se0,
seqte=seqte
)
})
}
## This uses the estimating equation 1/n sum_i T_i I[Y_i<=theta] / e(X_i) = gamma - 1/n sum_i f(theta,X_i)*(1-T_i/e(X_i))
## Where f(theta,x)=P(Y<=theta|X=x,T=1)
## We use non-localized DML cross-fitting with K folds
## Namely, for each fold, we take the remaining data and use it for fitting e(.) and the whole f(.,.)
## We use this on X_i in the fold to get ehat_i, fhat_i(theta)
## We restrict the range of theta to a discretized grid of marginal Y quantiles
## Then we fit f(theta,.) for each theta in the range
## The range of quantiles is given by the list qrange
## Or if qrange is a number the we use all quantiles by qrange increments
## We then solve the equation by brute force search over theta in qrange to minimize abs of eqn
## If cdf_regress is T then method_cdf is a binary regresison method that we apply to each I[Y_i<=theta]
## If cdf_regress is F then method_cdf takes list of quantiles to simultaneously predict
## If avg.eqn is T we solve the average equation where each element is using out-of-fold nuisnaces
## If avg.eqn is F we solve the equation in each fold and then average the estimates
est.quantile.dml = function(gammas, data, form_x, form_t, form_y, method_prop, option_prop, method_cdf, option_cdf, cdf_regress=T, qrange=0.01, K=5, trim=c(0.01,0.99), trim.type='none', normalize=T, avg.eqn=T) {
if(length(qrange)==1) {
qrange = seq(qrange,1.-qrange,qrange)
#qrange = qrange[(qrange>=min(gammas)-.1) & (qrange<=max(gammas)+.1)]
}
cvgroup = make.cvgroup.balanced(data, K, form_t)
prop = cross.fit.propensities(data, cvgroup, form_x, form_t, method_prop, option_prop, trim=trim, trim.type=trim.type, normalize=normalize)
yqs = quantile(data[[form_y]], qrange)
cdf1 = matrix(0L, length(qrange), nrow(data));
cdf0 = matrix(0L, length(qrange), nrow(data));
if (cdf_regress) {
for (i in 1:length(qrange)) {
for (k in 1:K) {
form_cdf1 = paste('I(',form_y,'<=',as.numeric(yqs[i]),')')
cdf1[i,cvgroup==k] = method_cdf(data, cvgroup!=k & data[[form_t]]==1, cvgroup==k, form_x, form_cdf1, option_cdf)
form_cdf0 = paste('I(',form_y,'<=',as.numeric(yqs[i]),')')
cdf0[i,cvgroup==k] = method_cdf(data, cvgroup!=k & data[[form_t]]==0, cvgroup==k, form_x, form_cdf0, option_cdf)
}
}
} else {
for (k in 1:K) {
cdf1[,cvgroup==k] = t(method_cdf(data, cvgroup!=k & data[[form_t]]==1, cvgroup==k, form_x, form_y, yqs, option_cdf))
cdf0[,cvgroup==k] = t(method_cdf(data, cvgroup!=k & data[[form_t]]==0, cvgroup==k, form_x, form_y, yqs, option_cdf))
}
}
yleq = outer(yqs,data[[form_y]],'>=')
a1 = if(avg.eqn) (yleq %*% (prop$keep*data[[form_t]]/prop$prop) + cdf1 %*% (prop$keep*(1-data[[form_t]]/prop$prop))) else foreach(k=1:K)%do%{(yleq %*% ((prop$keep&cvgroup==k)*data[[form_t]]/prop$prop) + cdf1 %*% ((prop$keep&cvgroup==k)*(1-data[[form_t]]/prop$prop)))}
a0 = if(avg.eqn) (yleq %*% (prop$keep*(1-data[[form_t]])/(1-prop$prop)) + cdf1 %*% (prop$keep*(1-(1-data[[form_t]])/(1-prop$prop)))) else foreach(k=1:K)%do%{(yleq %*% ((prop$keep&cvgroup==k)*(1-data[[form_t]])/(1-prop$prop)) + cdf1 %*% ((prop$keep&cvgroup==k)*(1-(1-data[[form_t]])/(1-prop$prop))))}
return(foreach(gamma=gammas, .combine=rbind)%do% {
q1 = if(avg.eqn) yqs[which.min(abs(a1/sum(prop$keep) - gamma))] else foreach(k=1:K, .combine=sum)%do%{yqs[which.min(abs(a1[[k]]/sum(prop$keep&cvgroup==k) - gamma))]}/K;
q0 = if(avg.eqn) yqs[which.min(abs(a0/sum(prop$keep) - gamma))] else foreach(k=1:K, .combine=sum)%do%{yqs[which.min(abs(a0[[k]]/sum(prop$keep&cvgroup==k) - gamma))]}/K;
i1 = which.min(abs(yqs-q1))
i0 = which.min(abs(yqs-q0))
psi1 = (yleq[i1,] * (prop$keep*data[[form_t]]/prop$prop) + cdf1[i1,] * (prop$keep*(1-data[[form_t]]/prop$prop)) - gamma) / density_(data[[form_y]][data[[form_t]]==1 & prop$keep], 1./prop$prop[data[[form_t]]==1 & prop$keep], q1);
psi0 = (yleq[i0,] * (prop$keep*(1-data[[form_t]])/(1-prop$prop)) + cdf1[i0,] * (prop$keep*(1-(1-data[[form_t]])/(1-prop$prop))) - gamma) / density_(data[[form_y]][data[[form_t]]==0 & prop$keep], 1./(1.-prop$prop[data[[form_t]]==0 & prop$keep]), q0);
se1 = sd(psi1[prop$keep]) / sqrt(sum(prop$keep));
se0 = sd(psi0[prop$keep]) / sqrt(sum(prop$keep));
seqte = sd(psi1[prop$keep]-psi0[prop$keep]) / sqrt(sum(prop$keep));
data.frame(
gamma=gamma,
q1=q1,
q0=q0,
qte=q1-q0,
se1=se1,
se0=se0,
seqte=seqte)
})
}
est.ivquantile.ipw = function(gammas, data, form_x, form_w, form_t, form_y, method_prop, option_prop, K=5, trim=c(0.01,0.99), trim.type='none', normalize=T, avg.eqn=T, one.way.noncompliance=F) {
data = data%>%arrange(!! sym(form_y))
cvgroup = make.cvgroup.balanced2(data, K, form_t, form_w)
propw = cross.fit.propensities(data, cvgroup, form_x, form_w, method_prop, option_prop, trim=trim, trim.type=trim.type, normalize=normalize)
proptw1 = cross.fit.propensities(data, cvgroup, form_x, form_t, method_prop, option_prop, trainmask = data[[form_w]]==1, trim=NULL, trim.type='none', normalize=F)
proptw0 = if (one.way.noncompliance) NULL else cross.fit.propensities(data, cvgroup, form_x, form_t, method_prop, option_prop, trainmask = data[[form_w]]==0, trim=NULL, trim.type='none', normalize=F)
keep = propw$keep
propw = propw$prop
proptw1 = proptw1$prop
proptw0 = if (one.way.noncompliance) rep(0.,nrow(data)) else proptw0$prop
nu1 = sum(keep*(proptw1 + data[[form_w]]*(data[[form_t]]-proptw1)/propw))/sum(keep)
nu0 = if (one.way.noncompliance) 0. else sum(keep*(proptw0 + (1-data[[form_w]])*(data[[form_t]]-proptw0)/(1-propw)))/sum(keep)
W = keep*(data[[form_w]]-propw)/(propw*(1-propw))
W1 = data[[form_t]]*W / (nu1 - nu0)
W0 = (1-data[[form_t]])*W / (nu0 - nu1)
return(foreach(gamma=gammas, .combine=rbind)%do% {
q1 = if(avg.eqn) data[[form_y]][solve.cumsum(W1/sum(keep),gamma)] else foreach(k=1:K, .combine=sum)%do%{data[[form_y]][solve.cumsum(W1*(cvgroup==k)/sum(keep&(cvgroup==k)),gamma)]}/K;
q0 = if(avg.eqn) data[[form_y]][solve.cumsum(W0/sum(keep),gamma)] else foreach(k=1:K, .combine=sum)%do%{data[[form_y]][solve.cumsum(W0*(cvgroup==k)/sum(keep&(cvgroup==k)),gamma)]}/K;
psi1 = (W1[keep] * (data[[form_y]][keep] <= q1) - gamma) / (density_(data[[form_y]][data[[form_t]]==1 & keep], W[data[[form_t]]==1 & keep] / (nu1 - nu0), q1));
psi0 = (W0[keep] * (data[[form_y]][keep] <= q0) - gamma) / (density_(data[[form_y]][data[[form_t]]==0 & keep], W[data[[form_t]]==0 & keep] / (nu0 - nu1), q0));
se1 = sd(psi1) / sqrt(sum(keep));
se0 = sd(psi0) / sqrt(sum(keep));
seqte = sd(psi1-psi0) / sqrt(sum(keep));
data.frame(
gamma=gamma,
q1=q1,
q0=q0,
qte=q1-q0,
se1=se1,
se0=se0,
seqte=seqte
)})
}
est.ivquantile.ldml = function(gammas, data, form_x, form_w, form_t, form_y, method_ipw, option_ipw, method_prop, option_prop, method_cdf, option_cdf, K=5, K_ipw=NULL, semiadaptive=FALSE, trim=c(0.01,0.99), trim.type='none', normalize=T, one.way.noncompliance=F, q.oracle=NULL, avg.eqn=T) {
data = data%>%arrange(!! sym(form_y))
cvgroup = make.cvgroup.balanced2(data, K, form_t, form_w)
propw = cross.fit.propensities(data, cvgroup, form_x, form_w, method_prop, option_prop, trim=trim, trim.type=trim.type, normalize=normalize)
proptw1 = cross.fit.propensities(data, cvgroup, form_x, form_t, method_prop, option_prop, trainmask = data[[form_w]]==1, trim=NULL, trim.type='none', normalize=F)
proptw0 = if (one.way.noncompliance) NULL else cross.fit.propensities(data, cvgroup, form_x, form_t, method_prop, option_prop, trainmask = data[[form_w]]==0, trim=NULL, trim.type='none', normalize=F)
keep = propw$keep
propw = propw$prop
proptw1 = proptw1$prop
proptw0 = if (one.way.noncompliance) rep(0.,nrow(data)) else proptw0$prop
nu1 = sum(keep*(proptw1 + data[[form_w]]*(data[[form_t]]-proptw1)/propw))/sum(keep)
nu0 = if (one.way.noncompliance) 0. else sum(keep*(proptw0 + (1-data[[form_w]])*(data[[form_t]]-proptw0)/(1-propw)))/sum(keep)
W = keep*(data[[form_w]]-propw)/(propw*(1-propw))
W1 = data[[form_t]]*W / (nu1 - nu0)
W0 = (1-data[[form_t]])*W / (nu0 - nu1)
if(is.null(K_ipw)) {K_ipw = ceil((K-1)/2)}
if(is.null(q.oracle)) {
ipwquant = foreach(k = 1:K, .combine=rbind)%do% {
est.ivquantile.ipw(gammas, data[if(semiadaptive) cvgroup!=k else cvgroup!=k & (cvgroup-(cvgroup>k)) %% 2==0,], form_x, form_w, form_t, form_y, method_ipw, option_ipw, K=K_ipw, trim=trim, trim.type = trim.type, normalize = normalize, one.way.noncompliance = one.way.noncompliance) %>% mutate(k=k)
}}
return(foreach(gamma=gammas, .combine=rbind)%do% {
cdf11 = numeric(nrow(data)); # P(Y<=th,T=1|W=1,X)
cdf10 = numeric(nrow(data)); # P(Y<=th,T=0|W=1,X)
cdf01 = numeric(nrow(data)); # P(Y<=th,T=1|W=0,X)
cdf00 = numeric(nrow(data)); # P(Y<=th,T=0|W=0,X)
for (k in 1:K) {
## take out k from the list of folds, renumber so k+1->k, k+2->k+1, ..., and use only the even folds after renumbering for ipw
## and use the odd folds after renumbering for fitting nuisances, using the result from ipq for eta_1
## unless semiadaptive is set to TRUE
q1.ipw = if(is.null(q.oracle)) ipwquant%>%filter(gamma==!!gamma,k==!!k)%>%select(q1) else q.oracle%>%filter(gamma==!!gamma)%>%select(q1.true)
q0.ipw = if(is.null(q.oracle)) ipwquant%>%filter(gamma==!!gamma,k==!!k)%>%select(q0) else q.oracle%>%filter(gamma==!!gamma)%>%select(q0.true)
form_cdf1 = paste('I(',form_y,'<=',as.numeric(q1.ipw),'&',form_t,'==1)')
cdf11[cvgroup==k] = method_cdf(data, (if(semiadaptive) cvgroup!=k else cvgroup!=k & (cvgroup-(cvgroup>k)) %% 2==1) & data[[form_w]]==1, cvgroup==k, form_x, form_cdf1, option_cdf)
cdf01[cvgroup==k] = if (one.way.noncompliance) 0. else method_cdf(data, (if(semiadaptive) cvgroup!=k else cvgroup!=k & (cvgroup-(cvgroup>k)) %% 2==1) & data[[form_w]]==0, cvgroup==k, form_x, form_cdf1, option_cdf)
form_cdf0 = paste('I(',form_y,'<=',as.numeric(q1.ipw),'&',form_t,'==0)')
cdf10[cvgroup==k] = method_cdf(data, (if(semiadaptive) cvgroup!=k else cvgroup!=k & (cvgroup-(cvgroup>k)) %% 2==1) & data[[form_w]]==1, cvgroup==k, form_x, form_cdf0, option_cdf)
cdf00[cvgroup==k] = method_cdf(data, (if(semiadaptive) cvgroup!=k else cvgroup!=k & (cvgroup-(cvgroup>k)) %% 2==1) & data[[form_w]]==0, cvgroup==k, form_x, form_cdf0, option_cdf)
};
q1 = if(avg.eqn) {
data[[form_y]][solve.cumsum(W1/sum(keep), gamma - sum(keep * ( cdf11 - cdf01 - data[[form_w]] * cdf11 / propw + (1-data[[form_w]]) * cdf01 / (1-propw) ))/sum(keep)/(nu1 - nu0) )]
} else {
foreach(k=1:K, .combine=sum)%do%{data[[form_y]][solve.cumsum(W1*(cvgroup==k)/sum(keep&(cvgroup==k)),gamma - sum((keep&cvgroup==k) ( cdf11 - cdf01 - data[[form_w]] * cdf11 / propw + (1-data[[form_w]]) * cdf01 / (1-propw) ))/sum(keep&cvgroup==k)/(nu1 - nu0) )]}/K};
q0 = if(avg.eqn) {
data[[form_y]][solve.cumsum(W0/sum(keep), gamma - sum(keep * ( cdf10 - cdf00 - data[[form_w]] * cdf10 / propw + (1-data[[form_w]]) * cdf00 / (1-propw) ))/sum(keep)/(nu0 - nu1) )]
} else {
foreach(k=1:K, .combine=sum)%do%{data[[form_y]][solve.cumsum(W0*(cvgroup==k)/sum(keep&(cvgroup==k)),gamma - (1/(nu0 - nu1)) * sum((keep&cvgroup==k) ( cdf10 - cdf00 - data[[form_w]] * cdf10 / propw + (1-data[[form_w]]) * cdf00 / (1-propw) ))/sum(keep&cvgroup==k) )]}/K};
psi1 = (W1[keep] * (data[[form_y]][keep] <= q1) - gamma + ( cdf11[keep] - cdf01[keep] - data[[form_w]][keep] * cdf11[keep] / propw[keep] + (1-data[[form_w]][keep]) * cdf01[keep] / (1-propw[keep]) ) / (nu1 - nu0)) / (density_(data[[form_y]][data[[form_t]]==1 & keep], W[data[[form_t]]==1 & keep] / (nu1 - nu0), q1));
psi0 = (W0[keep] * (data[[form_y]][keep] <= q0) - gamma + ( cdf10[keep] - cdf00[keep] - data[[form_w]][keep] * cdf10[keep] / propw[keep] + (1-data[[form_w]][keep]) * cdf00[keep] / (1-propw[keep]) ) / (nu0 - nu1)) / (density_(data[[form_y]][data[[form_t]]==0 & keep], W[data[[form_t]]==0 & keep / (nu0 - nu1)], q0));
se1 = sd(psi1) / sqrt(sum(keep));
se0 = sd(psi0) / sqrt(sum(keep));
seqte = sd(psi1-psi0) / sqrt(sum(keep));
data.frame(
gamma=gamma,
q1 = q1,
q0 = q0,
qte=q1-q0,
se1=se1,
se0=se0,
seqte=seqte
)
})
}
const_option = list()
const = function(data, trainmask, testmask, form_x, form_resp, option) {
rep(mean(if(form_resp%in%colnames(data)) data[[form_resp]] else model.matrix(as.formula(paste('~',form_resp,'-1')), data=data[trainmask,])[,2]), sum(testmask))
}
boost_option = list(distribution = 'bernoulli', bag.fraction = .5, train.fraction = 1.0, interaction.depth=2, n.trees=1000, shrinkage=.01, n.cores=1, cv.folds=5, verbose = FALSE)
boost = function(data, trainmask, testmask, form_x, form_resp, option) {
form = as.formula(paste(form_resp, "~", form_x));
fit = do.call(gbm, append(list(formula=form, data=data[trainmask,]), option));
best = if('cv.folds' %in% names(option) && option[['cv.folds']]>0) gbm.perf(fit,plot.it=FALSE,method="cv") else gbm.perf(fit,plot.it=FALSE,method="OOB");
return(predict(fit, n.trees=best, newdata=data[testmask,], type="response"))
}
forest_option = list(nodesize=1, ntree=1000, na.action=na.omit, replace=TRUE)
forest = function(data, trainmask, testmask, form_x, form_resp, option) {
form = as.formula(paste("as.factor(",form_resp, ") ~", form_x));
tryCatch({
fit = do.call(randomForest, append(list(formula=form, data=data[trainmask,]), option))
return(predict(fit, newdata=data[testmask,], type="prob")[,2])
}, error = function(err) const(data, trainmask, testmask, form_x, form_resp, const_option))
}
neuralnet_option = list(linout=FALSE, size=2, maxit=1000, decay=0.02, MaxNWts=10000, trace=FALSE)
neuralnet = function(data, trainmask, testmask, form_x, form_resp, option) {
form = as.formula(paste(form_resp, "~", form_x));
fit = do.call(nnet, append(list(formula=form, data=data[trainmask,]), option))
return(predict(fit, newdata=data[testmask,], type="raw"));
}
lassor_option = list(penalty = list(homoscedastic = FALSE, X.dependent.lambda =FALSE, lambda.start = NULL, c = 1.1), intercept = TRUE)
lassor_post_option = append(lassor_option, list(post=TRUE))
lassor_nopost_option = append(lassor_option, list(post=FALSE))
lassor = function(data, trainmask, testmask, form_x, form_resp, option) {
form = as.formula(paste(form_resp, "~ (", form_x,")^2"));
tryCatch({
fit = do.call(rlassologit, append(list(formula=form, data=data[trainmask,]), option))
return(predict(fit, newdata=data[testmask,], type="response"))
}, error = function(err) const(data, trainmask, testmask, form_x, form_resp, const_option))
}
logistic_option = list(family = "binomial")
logistic = function(data, trainmask, testmask, form_x, form_resp, option) {
form = as.formula(paste(form_resp, "~", form_x));
tryCatch({
fit = do.call(glm, append(list(formula=form, data=data[trainmask,]), option))
return(predict(fit, newdata=data[testmask,], type="response"))
}, error = function(err) const(data, trainmask, testmask, form_x, form_resp, const_option))
}
reglm_option = list(family="binomial")
reglm_lasso_option = append(reglm_option, list(alpha=1))
reglm_ridge_option = append(reglm_option, list(alpha=0))
reglm_elast_option = append(reglm_option, list(alpha=0.5))
reglm = function(data, trainmask, testmask, form_x, form_resp, option) {
form = as.formula(paste(form_resp, "~ (", form_x,")^2"));
tryCatch({
fit = do.call(cv.glmnet, append(list(formula=form, data=data[trainmask,]), option))
return(predict(fit, newdata=data[testmask,], type="response"))
}, error = function(err) const(data, trainmask, testmask, form_x, form_resp, const_option))
}
methods.classification = list(
boost = list(method=boost, option=boost_option),
forest = list(method=forest, option=forest_option),
neuralnet = list(method=neuralnet, option=neuralnet_option),
lassor = list(method=lassor, option=lassor_nopost_option),
lassorpost = list(method=lassor, option=lassor_post_option),
lasso = list(method=reglm, option=reglm_lasso_option),
ridge = list(method=reglm, option=reglm_ridge_option),
elast = list(method=reglm, option=reglm_elast_option),
logistic = list(method=logistic, option=logistic_option),
const = list(method=const, option=const_option)
)
forestcdf_option = list()
forestcdf = function(data, trainmask, testmask, form_x, form_resp, ths, option) {
form = as.formula(paste(form_resp, "~", form_x));
lmfit = lm(form, x = TRUE, y = TRUE, data = data[trainmask,]);
fit = do.call(quantregForest, append(list(x=lmfit$x[ ,-1], y=lmfit$y), option));
return(predict(fit, newdata=data[testmask,], what=function(z){colMeans(outer(z,ths,'<='))}));
}
|
f1efba41cc74394df428c169f28ce537a54d283e
|
d80f66c21e6098c0592c37c0137828bb1009a650
|
/cachematrix.R
|
3ffbd29997ec2b72e2d587fa7ff7cbc1ef8ab20f
|
[] |
no_license
|
TKarashima/CourseraR_ProgAssign2
|
3f5d7857fe124e52dfac1d61018b83033d16506e
|
532e4846fe51210de2320dd0613dbfff0369442f
|
refs/heads/master
| 2020-04-01T16:48:35.611280
| 2018-03-06T02:47:36
| 2018-03-06T02:47:36
| 32,692,114
| 0
| 0
| null | 2016-09-12T15:36:24
| 2015-03-22T19:45:12
|
R
|
UTF-8
|
R
| false
| false
| 2,154
|
r
|
cachematrix.R
|
# The two following functions are designed to avoid multiple calculation of
# the inverse of a matrix. It is useful in the sense that one can save compu-
# tational resources by avoiding more than one heavy calculation that is
# called many times throughout a work in an R program.
#
# The function makeCacheMatrix() is responsible for saving an evaluated inver-
# se made by cacheSolve(), and for returning a saved inverse after queried by
# cacheSolve().
# THIS IS A GITHUB
# Right. Maybe this is best.
makeCacheMatrix <- function(x = matrix()) {
ii <- NULL
set <- function(y) {
x <<- y
ii <<- NULL
}
get <- function() x
setinv <- function(inv) ii <<- inv
getinv <- function() ii
list(set = set, get = get,
setinv = setinv,
getinv = getinv)
}
# cacheSolve() first assesses if the inverse of that matrix, saved by means of
# makeCacheMatrix(), has already been solved. If yes, cacheSolve() returns the
# previously calculated inverse. Otherwise, cacheSolve() will solve the inverse
# and call the functions inside makeCacheMatrix() to get and save the solved
# inverse in a parent environment, through the use of the deep assignment <<-.
cacheSolve <- function(x, ...) {
ii <- x$getinv()
if(!is.null(ii)) {
message("getting cached data")
return(ii)
}
data <- x$get()
ii <- solve(data, ...)
x$setinv(ii)
ii
}
# To test the functions, you can use debug(cacheSolve) to understand the
# step-by-step of the function. You will see that once
# the inverse has been previously evaluated, when you call cacheSolve() again
# for the same "matrix" (created with makeCacheMatrix()) the function will
# simply display the value of the inverse, stored as ii in the namespace
# of the function cacheSolve. Try this to run the functions:
debug(cacheSolve)
x1 <- matrix(2:5, 2, 2)
x1
x2 <- makeCacheMatrix(x1)
x3 <- cacheSolve(x2)
x3
# In the following line the function returns the stored value, without solving
# it again.
x4 <- cacheSolve(x2)
x4
|
6b7d2a62509f916a3e45c3e9b7da200d05687e6e
|
e54f4d3145e1d1371b37c516bba42034fcc57c4a
|
/lfmm.r
|
69f71a42dd38663af15e79fef1566eb47b36f253
|
[] |
no_license
|
BrendaDH/LEA-R-script
|
a2d74879258442679dd903b57fa4727f91695c42
|
b4e632e087322a9dc7e190a202b1b5946b23f3fd
|
refs/heads/main
| 2023-05-03T14:51:59.219664
| 2021-05-25T15:59:34
| 2021-05-25T15:59:34
| 370,750,535
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,060
|
r
|
lfmm.r
|
library(LEA)
### sNMF
project <- NULL
project <- snmf("file.geno", K=1:15, entropy = TRUE, repetitions = 10, project = "new", iterations = 100000)
# plot cross-entropy criterion of all runs of the project
plot(project, cex = 1.2, col = "lightblue", pch = 19)
# get the cross-entropy of each run for K = 8 (e.g. 169 samples, all species)
ce <- cross.entropy(project, K = 8)
# select the run with the lowest cross-entropy
best <- which.min(ce)
best
### LFMM
obj.lfmm <- lfmm("file.lfmm", "file.env", K = 8, rep = 5, iterations = 200000, burnin = 50000, project="new")
# correcting p-values
qv1 = which(qvalue(adj.p.values1, fdr = .1)$signif)
zs1 = z.scores(obj.lfmm, K = 8, d=1)
zs.median1 = apply(zs1, MARGIN = 1, median)
adj.p.values1 = pchisq(zs.median1^2/1, df = 1, lower = FALSE)
qv1 = which(qvalue(adj.p.values1, fdr = .1)$signif)
par(mfrow=c(3,3))
# histogram
hist(adj.p.values1, col = "orangered")
# Manhattan plot
plot(-log10(adj_pvals1), pch = 19, col = "royalblue1", cex = .7, ylab="-log10(pvalues)")
# and so on replacing argument d= from 2 to 13
|
01c90b1b339e5ca46a7b8f35b87258ed5a796067
|
fbe57536cc2d84e69a5bf799c88fcb784e853558
|
/R/median.test.twosample.independent.mann.whitney.fx.R
|
66e90ce147c95a1e485dba542008751a0dc0276d
|
[
"MIT"
] |
permissive
|
burrm/lolcat
|
78edf19886fffc02e922b061ce346fdf0ee2c80f
|
abd3915791d7e63f3827ccb10b1b0895aafd1e38
|
refs/heads/master
| 2023-04-02T11:27:58.636616
| 2023-03-24T02:33:34
| 2023-03-24T02:33:34
| 49,685,593
| 5
| 2
| null | 2016-10-21T05:14:49
| 2016-01-15T00:56:55
|
R
|
UTF-8
|
R
| false
| false
| 975
|
r
|
median.test.twosample.independent.mann.whitney.fx.R
|
median.test.twosample.independent.mann.whitney.fx <- function(
fx
,data
,...
) {
cell <- compute.group.cell.codes(fx, data = data)
fx.terms<-terms(fx)
response<-all.vars(fx)[attributes(fx.terms)$response]
response.split <- split(data[[response]], cell)
if (length(response.split) == 2) {
median.test.twosample.independent.mann.whitney(
g1 = response.split[[1]]
,g2 = response.split[[2]]
,...
)
} else if (length(response.split) > 2) {
cmbn <- combn(1:length(response.split), 2)
ret <- list()
for (i in 1:ncol(cmbn)) {
g1 <- cmbn[1,i]
g2 <- cmbn[2,i]
ret[[paste(g1,"vs.",g2)]] <- median.test.twosample.independent.mann.whitney(
g1 = response.split[[g1]]
,g2 = response.split[[g2]]
,...
)
ret[[paste(g1,"vs.",g2)]]$data.name <- paste(g1,"vs.",g2)
}
ret
} else {
stop("Need to provide at least 2 groups.")
}
}
|
2b0d5be61b8765033ec7bea8072373bd0868f10d
|
e190cbf2302ef6b24ca725efda0e2dcea04e36e0
|
/man/proposal_nclades_plus_3_pars.Rd
|
fe74da0cb7982cb4fd2ed1e8f39bb108a38f5397
|
[] |
no_license
|
cran/BBMV
|
043b458988249f4225606951f1ca6ae48bae4e42
|
a6ca4c69ec0e8c7b6b8b501448102c0060fb570a
|
refs/heads/master
| 2021-01-20T07:29:28.190312
| 2018-04-30T06:34:16
| 2018-04-30T06:34:16
| 90,007,020
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 797
|
rd
|
proposal_nclades_plus_3_pars.Rd
|
\name{proposal_nclades_plus_3_pars}
\alias{proposal_nclades_plus_3_pars}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Parameter update for the multiclade MCMC function
}
\description{
Internal function that proposes parameter updates used in MCMC estimation of the BBMV model.
}
\usage{
proposal_nclades_plus_3_pars(type = "Uniform", sensitivity, pars, n_clades)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{type}{
The type of proposal function, only 'Uniform' is available (the default).
}
\item{sensitivity}{
A numeric vector specifying the width of the uniform proposal for each parameter.
}
\item{pars}{
The current parameters in the MCMC chain.
}
\item{n_clades}{
The number of clades under study.
}
}
\author{
F. C. Boucher
}
|
003d9be78eab43b07aff9a67e86177c8c771ee5a
|
4516a5b398614cb11eca60f71d3db7e57abf3e33
|
/man/digIt.Rd
|
ed88bb5209d5215a166c4abba864b70821a9d896
|
[] |
no_license
|
SigmaMonstR/digIt
|
147af149c82b390506b2875d7a96493591a26b9d
|
2047d021dac5a334a6715cd262a69ce39d26dafe
|
refs/heads/master
| 2021-09-02T10:15:57.770908
| 2018-01-01T20:25:48
| 2018-01-01T20:25:48
| 111,838,917
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,453
|
rd
|
digIt.Rd
|
\name{digIt}
\alias{digIt}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
digIt() for accessing and importing data
}
\description{
Most example datasets available for statistics and data science are overly simplified. This wrapper provides access to a curated set of more complex data.
}
\usage{
digIt(dataset, download = FALSE, readme = FALSE)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{dataset}{
%% ~~Describe \code{dataset} here~~
}
\item{download}{
%% ~~Describe \code{download} here~~
}
\item{readme}{
%% ~~Describe \code{readme} here~~
}
}
\details{
%% ~~ If necessary, more details than the description above ~~
}
\value{
%% ~Describe the value returned
%% If it is a LIST, use
%% \item{comp1 }{Description of 'comp1'}
%% \item{comp2 }{Description of 'comp2'}
%% ...
}
\references{
%% ~put references to the literature/web site here ~
}
\author{
%% ~~who you are~~
}
\note{
%% ~~further notes~~
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
%% ~~objects to See Also as \code{\link{help}}, ~~~
}
\examples{
##---- Should be DIRECTLY executable !! ----
##-- ==> Define data, use random,
##-- or do help(data=index) for the standard data sets.
## The function is currently defined as
function (dataset, download = FALSE, readme = FALSE)
{
base.path <- "https://s3.amazonaws.com/whoa-data/"
index.pos <- grep(dataset, digit.cache$dataset)[1] * 1
if (length(index.pos) == 0) {
warning("Dataset not found. Look up datasets using digList()")
}
else {
download.zip <- paste0(base.path, digit.cache$zip.package[index.pos])
download.data <- paste0(base.path, digit.cache$file.name[index.pos])
download.readme <- paste0(base.path, digit.cache$readme[index.pos])
load.function <- digit.cache$func[index.pos]
}
if (download == TRUE && length(index.pos) > 0) {
download.file(download.zip, getwd())
message(paste0(dataset, " has been downloaded to ", getwd()))
}
else if (download == FALSE && length(index.pos) > 0) {
if (load.function == "import") {
df <- import(download.data)
message(paste0(dataset, " has been loaded into memory."))
message(paste0("Dimensions: n = ", nrow(df), ", k = ",
ncol(df)))
if (readme == TRUE) {
temp.file <- tempfile()
download.file(download.readme, temp.file, quiet = TRUE)
file.show(temp.file)
}
return(df)
}
else if (load.function == "shp" && length(index.pos) >
0) {
if (readme == TRUE) {
temp.file <- tempfile()
download.file(download.readme, temp.file, quiet = TRUE)
file.show(temp.file)
}
temp.file <- tempfile()
download.file(download.zip, temp.file, quiet = TRUE)
temp.dir <- tempdir()
unzip(temp.file, exdir = temp.dir)
shape <- readOGR(dsn = temp.dir, layer = "cb_2016_us_cd115_20m")
message(paste0(dataset, " has been loaded into memory."))
return(shape)
}
}
}
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ ~kwd1 }% use one of RShowDoc("KEYWORDS")
\keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
|
f2df9ac7e9067e1ebeaf44546a64dcf63a5c2321
|
618fb1c25fded55c52728970aef6bc23c3e4ee27
|
/lulcc/man/summary-methods.Rd
|
924072459094d64a54729b707a20d30cdc0fb71f
|
[] |
no_license
|
npp97/r_lulcc2
|
b8a5417a54d55c89276ab6c726c5d0ee9ede15a8
|
e95cf8f853422e0227088635c7dd1f1761d5d0e5
|
refs/heads/master
| 2020-04-20T05:26:52.399114
| 2018-06-20T18:31:46
| 2018-06-20T18:31:46
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 642
|
rd
|
summary-methods.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/summary.R
\docType{methods}
\name{summary}
\alias{summary}
\alias{summary,LulcRasterStack-method}
\alias{summary,ExpVarRasterStack-method}
\alias{summary,Model-method}
\title{Summary}
\usage{
summary(object, ...)
\S4method{summary}{LulcRasterStack}(object, ...)
\S4method{summary}{ExpVarRasterStack}(object, ...)
\S4method{summary}{Model}(object, ...)
}
\arguments{
\item{object}{an object belonging to one of the classes in \code{lulcc}}
\item{...}{additional arguments (none)}
}
\value{
A matrix, data.frame or list
}
\description{
Summarise lulcc objects
}
|
8bacc56673cdadf0a2b06c550acae31c681fa194
|
4cad9e2066a3163f66a927742b530d7cb7691eca
|
/DigitsRecognition/digitRecognition.R
|
0a9ba51e2c0573ad485248817c539622c5e7367e
|
[] |
no_license
|
pickacarrot/Projects
|
e89a05328f6d4370f28b8b3b8196020baa119ef6
|
e655ffb2aeb88301f11e4e1b86cc16ac5319730b
|
refs/heads/master
| 2021-01-01T03:57:27.561374
| 2016-05-23T06:51:23
| 2016-05-23T06:51:23
| 59,448,831
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 11,089
|
r
|
digitRecognition.R
|
# read data
digits = read.csv("/Users/PullingCarrot/Desktop/201509-12/STA141statisticalComputing/homework/hw3/digitsTrain.csv")
dim(digits)
head(digits)
summary(digits)
# randomilize the whole data
set.seed(0112)
randIndex = sample(1:nrow(digits), nrow(digits))
digits = digits[randIndex,]
# 1. euclidean distance matrix
# exclude the first label column when computing distance
eucD = dist(digits[,-1], method = "euclidean", diag = TRUE, upper = TRUE, p = 2)
eucD = as.matrix(eucD)
dim(eucD)
eucD[1:5, 1:5]
# 2. manhattan distance matrix
manD = dist(digits[,-1], method = "manhattan", diag = TRUE, upper = TRUE)
manD = as.matrix(manD)
dim(manD)
manD[1:5,1:5]
# 3. euclidean distance matrix after standardization for each pixel
# find out the columns with all zeros and exclude them when standardizing
temp = unlist(lapply(digits[,-1], function(column) round(max(column),5) > 0))
notAllZeroPixel = digits[,-1][,temp]
dim(notAllZeroPixel)
# standardize each of 784 pixels
stand = as.data.frame(lapply(notAllZeroPixel, function(x) (x-mean(x))/sd(x)))
dim(stand)
eucStandD = dist(stand, method = "euclidean", diag = TRUE, upper = TRUE)
eucStandD = as.matrix(eucStandD)
dim(eucStandD)
eucStandD[1:5, 1:5]
# 4. manhattan distance matrix after standardization for each pixel
manStandD = dist(stand, method = "manhattan", diag = TRUE, upper = TRUE)
manStandD = as.matrix(manStandD)
dim(manStandD)
manStandD[1:5, 1:5]
# 5. euclidean distance matrix after scaling by maximum value
scale = as.data.frame(lapply(notAllZeroPixel, function(x) x/max(x)))
eucScaleD = dist(scale, method = "euclidean", diag = TRUE, upper = TRUE)
eucScaleD = as.matrix(eucScaleD)
dim(eucScaleD)
eucScaleD[1:5, 1:5]
# 6. manhattan distance matrix after scaling by maximum value
manScaleD = dist(scale, method = "manhattan", diag = TRUE, upper = TRUE)
manScaleD = as.matrix(manScaleD)
dim(manScaleD)
manScaleD[1:5, 1:5]
# function of finding out mode
# resource: http://stackoverflow.com/questions/2547402/standard-library-function-in-r-for-finding-the-mode
getMode <- function(x) {
ux <- unique(x)
ux[which.max(tabulate(match(x, ux)))]
}
# split the cross validation groups
# for this assignment, we use 5-fold cross validation
kfolds = 5
size = nrow(digits)
groupSize = size/kfolds
# folds is a list of vectors, each vector has the length of 1000, which is also the index
# for each group of test data
folds = list(1:groupSize, (groupSize+1):(2*groupSize), (2*groupSize+1):(3*groupSize),
(3*groupSize+1):(4*groupSize), (4*groupSize+1):size)
#substrct the labels
trueLabels = digits$label
predictionError =
function(k, D) {
# k nearest neighbor model
# D is the distance matrix
# the output is the misclassification rate under knn model and D distance matrix
listOfPredicts = lapply(1:kfolds, function(i) {
# substract the distance matrix corresponding to each fold of test set and their distances
# with the rest training set, which has a dimension of 1000*4000
cv = D[folds[[i]], -folds[[i]]]
# substract the labels corresponding to the each fold of training set, which has a length of 4000
cv_label = trueLabels[-folds[[i]]]
apply(cv, 1, function(row) {
# row is a row from distance matrix
row = as.matrix(row)
# find the correponding labels of the k neighbors
kNearLabel = cv_label[order(row)][1:k]
# get the mode of the k lables
getMode(kNearLabel)
})
})
# we get 5000 predictions for the whole data
allPredicts = unlist(listOfPredicts)
# compare predictions with the original data's label and get the number of inconformity
totalError = sum(digits$label!=allPredicts)
# define misclasssification rate
misClassRate = totalError/nrow(digits)
misClassRate
}
# create a table to record the misclassfication rate under each combination of
# k nearest neighbor and distance metric
# D is a list of distance matrix
D = list(eucD, manD, eucStandD, manStandD, eucScaleD, manScaleD)
names(D) = c("eucD", "manD", "eucStandD", "manStandD", "eucScaleD", "manScaleD")
upperK = 30
# errors is a list of 4 vectors, each vector records the errors under a certain distance matrix and
# knn model with k = 1:30
errors = lapply(D, function(d){
temp = lapply(1:upperK, function(k){
predictionError(k, d)
})
unlist(temp)
})
# combine this error vectors into a data frame
errorTable = as.data.frame(do.call("cbind", errors))
colnames(errorTable) = names(D)
errorTable
# find the smallest error rate and corresponding model combination
which(errorTable == min(errorTable), arr.ind = TRUE)
# Draw a plot showing the overall cross-validation misclassification rate
# versus k and the distance metrics
errorTable$k = 1:upperK
library(ggplot2)
ggplot(errorTable, aes(x = k)) +
geom_line(aes(y = eucD, colour = "eucD")) +
geom_line(aes(y = manD, colour = "manD")) +
geom_line(aes(y = eucStandD, colour = "eucStandD")) +
geom_line(aes(y = manStandD, colour = "manStandD")) +
geom_line(aes(y = eucScaleD, colour = "eucScaleD")) +
geom_line(aes(y = manScaleD, colour = "manScaleD")) +
scale_x_continuous(breaks = seq(1, 30, by = 1)) +
xlab("Number of nearest neighbors") +
ylab("Average misclassification rate") +
ggtitle("Average misclassification rate\n under different models")
# Calculate the confusion matrix for the training set using the chosen value of k and metric
# use model k=4 and eucD matrix
predictions =
function(k, D) {
# k nearest neighbor model
# D is the distance matrix
# the output is the 5000 predicts under knn model and D distance matrix
listOfPredicts = lapply(1:nrow(D), function(i) {
# leave one data out, use the rest 4999 as traing set
row = D[i, -i]
# substract the labels corresponding to the training set, which has a length of 4999
row_label = trueLabels[-i]
row = as.matrix(row)
# find the correponding labels of the k neighbors
kNearLabel = row_label[order(row)][1:k]
# get the mode of the k lables
getMode(kNearLabel)
})
# we get 5000 predictions for the whole data
unlist(listOfPredicts)
}
predicts = predictions(4, eucD)
trueAndPredict = as.data.frame(cbind(trueLabels, predicts))
confusionMatrix = table(trueAndPredict)
confusionMatrix
# Which digits were generally classified best? worst?
accuracy = lapply(1:10, function(i) {
confusionMatrix[i,i]/sum(confusionMatrix[i,])
})
accuracy = unlist(accuracy)
uniqueLabels = 0:9
accuracyTable = as.data.frame(cbind(uniqueLabels, accuracy))
accuracyTable[order(accuracyTable$accuracy),]
# Which digits were generally confused with others?
confusionMatrix
confusion = lapply(1:10, function(i) {
1-(confusionMatrix[i,i]/sum(confusionMatrix[,i]))
})
confusion = unlist(confusion)
uniqueLabels = 0:9
confusionTable = as.data.frame(cbind(uniqueLabels, confusion))
confusionTable[order(confusionTable$confusion),]
# Show some of the digits that were mis-classified that were difficult for a human to classify.
# Suggest why these were misclassified.
# function of drawing an image
getImage =
function(vals)
{
matrix(as.integer(vals), 28, 28, byrow = TRUE)
}
draw = function(vals, colors = rgb((255:0)/255, (255:0)/255, (255:0)/255), ...)
{
if(!is.matrix(vals))
vals = getImage(vals)
m = t(vals) # transpose the image
m = m[,nrow(m):1] # turn up-side-down
image(m, col = colors, ..., xaxt = "n", yaxt = "n")
}
# wrongly predicted digits
wrongPredict = digits[trueAndPredict$trueLabels != trueAndPredict$predicts,]
# how many digits are predicted wrongly
numOfWrong = nrow(wrongPredict)
# look at some mis-classified digits
par(mfrow = c(6,6), mar = c(0,0,1,1) + 0.1)
# digit 2 mis-classified as 1
twoAsOne = digits[trueAndPredict$trueLabels == 2 & trueAndPredict$predicts == 1,][1:12,]
lapply(1:nrow(twoAsOne), function(i) {
draw(twoAsOne[i,-1])
return (twoAsOne$label[i])
})
# digit 4 mis-classified as 9
fourAsNine = digits[trueAndPredict$trueLabels == 4 & trueAndPredict$predicts == 9,][1:12,]
lapply(1:nrow(fourAsNine), function(i) {
draw(fourAsNine[i,-1])
return (fourAsNine$label[i])
})
# digit 7 mis-classified as 1
sevenAsOne = digits[trueAndPredict$trueLabels == 7 & trueAndPredict$predicts == 1,][1:12,]
lapply(1:nrow(sevenAsOne), function(i) {
draw(sevenAsOne[i,-1])
return (sevenAsOne$label[i])
})
# look at one misclassification
par(mfrow = c(1,1), mar = c(0,0,1,1) + 0.1)
sevenAsOne[2,]
draw(sevenAsOne[2,-1])
rownames(sevenAsOne[2,])
# based on our model, the 4 nearest neighbors of this misclassifications are:
neighbors =
function(k, D, rowIndex) {
# k nearest neighbor model
# D is the distance matrix
# rowIndex is the index of the digit which we want to find the k nearest neighbors for
# the output is the k nearest neighbors
row = D[rowIndex,]
row = as.matrix(row)
# find the k neighbors, use 2: k+1 to exclude itself as a neighbor
kNearNeighbor = order(row)[2:(k+1)]
# get the mode of the k lables
digits[kNearNeighbor,]
}
nb = neighbors(4, eucD, '2065')
nb$label
# draw the four neighbors
par(mfrow = c(1,4), mar = c(0,0,1,1) + 0.1)
lapply(1:nrow(nb), function(i) {
draw(nb[i,-1])
return(nb$label[i])
})
# draw some true "7"s
par(mfrow = c(4,6), mar = c(0,0,1,1) + 0.1)
someSevens = digits[digits$label == 7,][1:24,]
lapply(1:nrow(someSevens), function(i) {
draw(someSevens[i,-1])
return(someSevens$label[i])
})
###############################################################################################
# Distance to Average & Cross-Validation
avgDigits =
function(df) {
# df is the input data frame
# this function outputs a data frame with 10 rows and 785 columns, each row starts with a digit and
# followed by its average 784 pixels
numbers = 0:9
ll = lapply(numbers, function(number) {
sub = subset(df, df$label == number)
colMeans(sub)
})
mm = do.call("rbind", ll)
as.data.frame(mm)
}
folds = list(1:groupSize, (groupSize+1):(2*groupSize), (2*groupSize+1):(3*groupSize),
(3*groupSize+1):(4*groupSize), (4*groupSize+1):size)
predictionError.avgDist =
function(df, method) {
listOfPredicts = lapply(1:kfolds, function(i) {
test = df[folds[[i]],]
train = df[-folds[[i]],]
avg = avgDigits(train)
predictionForTest =
lapply(1:nrow(test), function(rowNum) {
avgDist = dist(rbind(test[rowNum,], avg), method, upper = TRUE, diag = TRUE)
avgDist = as.matrix(avgDist)
numbers = 0:9
numbers[order(avgDist[1,-1])][1]
})
unlist(predictionForTest)
})
# we get 5000 predictions for the whole data
allPredicts = unlist(listOfPredicts)
# compare predictions with the original data's label and get the number of inconformity
totalError = sum(df$label!=allPredicts)
# define misclasssification rate
misClassRate = totalError/nrow(df)
misClassRate
}
predictionError.avgDist(digits, "euclidean")
predictionError.avgDist(digits, "manhattan")
|
b7c4d84cf4c739231fe86151704efa7582d223b7
|
c2e589d75eae2b603abc6b126b1206780e87cf70
|
/pre-analysis/agg_harv_biomass.R
|
65f7853833bd6cc9e3e332abc70ee1833810fa13
|
[] |
no_license
|
MARIASUAM/harvest_x_climate_LANDIS
|
13b3a8cfcdc68d3d59a72cda2a31b192a741fe1f
|
a7810e42c2ded4863432bb7db3befb825e6701e8
|
refs/heads/master
| 2023-04-18T02:09:23.889062
| 2022-09-14T14:15:40
| 2022-09-14T14:15:40
| 412,398,572
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,389
|
r
|
agg_harv_biomass.R
|
# Analysis of harvested biomass on aggregated tables
mgmt.scenarios <- c("20210921_nomanag_current_MIROC5",
"20210921_nomanag_rcp45_MIROC5",
"20210921_nomanag_rcp85_MIROC5")
### SETUP
library(dplyr)
library(ggplot2)
library(reshape2)
library(lubridate)
di <- "/Users/maria.suarez.munoz/Google Drive/proj_LANDIS/experiments/"
cols <- c("conserv" = "#33A02C", # dark green
"proactive" = "#6A3D9A", # dark purple
"proactiveplus" = "#FF7F00", # orange
"nomanag" = "#E31A1C") # red
lines <- c("current" = "solid", "rcp45" = "dotdash", "rcp85" = "dashed")
# Harvested biomass
harv_bio_pines <- data.frame()
harv_bio_dense_pines <- data.frame()
for (i in 1:length(mgmt.scenarios)) {
temp <- read.table(paste(di, mgmt.scenarios[i], "/results/aggregated_harvested_biomass_pines.txt", sep =""), header = TRUE) %>%
mutate(Harv_scenario = strsplit(as.character(Scenario), split = "_")[[1]][2],
Clim_scenario = strsplit(as.character(Scenario), split = "_")[[1]][3])
harv_bio_pines <- rbind(harv_bio_pines, temp)
temp <- read.table(paste(di, mgmt.scenarios[i], "/results/aggregated_harvested_biomass_dense_pines.txt", sep =""), header = TRUE) %>%
mutate(Harv_scenario = strsplit(as.character(Scenario), split = "_")[[1]][2],
Clim_scenario = strsplit(as.character(Scenario), split = "_")[[1]][3])
harv_bio_dense_pines <- rbind(harv_bio_dense_pines, temp)
}
jpeg(file = paste(di, "outputs/210921_harv_bio_pines_mask.jpeg", sep = ""), width=6, height=4, units="in", res=300)
harv_bio_pines %>%
ggplot(aes(x = Time, y = Avg_harv_biomass, group = Scenario)) +
geom_line(aes(linetype = Clim_scenario, color = Harv_scenario)) +
geom_point(aes(color = Harv_scenario)) +
theme_classic() +
theme(legend.position = "bottom") +
scale_color_manual(values = cols) +
scale_linetype_manual(values = lines)
dev.off()
jpeg(file = paste(di, "outputs/210921_harv_bio_dense_pines_mask.jpeg", sep = ""), width=6, height=4, units="in", res=300)
harv_bio_dense_pines %>%
ggplot(aes(x = Time, y = Avg_harv_biomass, group = Scenario)) +
geom_line(aes(linetype = Clim_scenario, color = Harv_scenario)) +
geom_point(aes(color = Harv_scenario)) +
theme_classic() +
theme(legend.position = "bottom") +
scale_color_manual(values = cols) +
scale_linetype_manual(values = lines)
dev.off()
|
20c8a24050ccf92d15d735a6349ca7e07f5621a9
|
94423d6a20d98955fab28fdb97cbca950389c241
|
/01_StreamR_Setup/01_01_OAuth_File_Generator.R
|
0b3f4c07209f02becdd913a4d06df4e1f27d0f63
|
[] |
no_license
|
alabarga/kschool_data_science_master_project
|
2f3b48ff61461d86305944c78993db5ba74c6de7
|
234d92ce487d6104a4f1b0c1959c788af7e1ba28
|
refs/heads/master
| 2021-04-15T09:39:52.970982
| 2016-04-26T09:52:03
| 2016-04-26T09:52:03
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,996
|
r
|
01_01_OAuth_File_Generator.R
|
#This script creates an OAuth credentials file.
#It will be used as a "key" to login in Twitter by StreamR.
# Original idea and more info:
# http://pablobarbera.com/blog/archives/1.html
#Load required library.
library(ROAuth)
#Get Twitter API credentials from XML file.
dfCredentials <- xmlToDataFrame(doc ="twitter_credentials.xml" )
### PART 1 ###
# Declare Twitter API Credentials & Create Handshake
requestURL <- "https://api.twitter.com/oauth/request_token"
accessURL <- "https://api.twitter.com/oauth/access_token"
authURL <- "https://api.twitter.com/oauth/authorize"
#Assigning the secret keys got from the XML file.
consumerKey <- as.character(dfCredentials[[1]][1])
consumerSecret <- as.character(dfCredentials[[1]][2])
#New instance of the Oauth factory object.
my_oauth <- OAuthFactory$new(consumerKey = consumerKey,
consumerSecret = consumerSecret,
requestURL = requestURL,
accessURL = accessURL,
authURL = authURL)
#Remove variables to avoid sensitive information to be accessed from memory.
rm (consumerSecret)
rm (consumerKey)
rm (requestURL)
rm (accessURL)
rm (dfCredentials)
#Execute the next step and wait for the web browser to open. Then grant access to your previously created application here https://apps.twitter.com/app/new
my_oauth$handshake(cainfo = system.file("CurlSSL", "cacert.pem", package = "RCurl"))
#Now wait and introduce on R console the PIN provided on Twitter so that the OAuth file is generated.
### STOP HERE!!! ###
##### PART 2 #####
# Save the my_oauth data to an .Rdata file. This file will be used in future steps to login on Twitter when running automated tasks...
save(my_oauth, file = "my_oauth_twitter.Rdata")
#Now the key that will be used by StreamR to connect to Twitter is ready. This script has the only purpose of generating it and won't be
#used anymore except if another PIN is required for a new application.
|
cc38c4b2040f1a143f3a2d341bf862da7f25089b
|
fddd539b6b3a84d9fff480d5ff8f84801154f1a1
|
/R/annr.model.prod.R
|
a8337d0987f6e27276e28be3b7c7dd61e1c9481c
|
[] |
no_license
|
btorobrob/cesr
|
7ca9ac1efe853316e99801aeec37100792df41ea
|
6881ec6b9bd2ee7e4293c7c92297449cfccc7e91
|
refs/heads/master
| 2023-09-03T14:56:12.168122
| 2023-08-24T10:28:35
| 2023-08-24T10:28:35
| 230,813,022
| 1
| 1
| null | 2020-05-31T14:54:00
| 2019-12-29T22:58:12
|
R
|
UTF-8
|
R
| false
| false
| 1,942
|
r
|
annr.model.prod.R
|
annr.model.prod <-
function(x, year=-1, offset=TRUE, cl=0.95){
ad <- x$ad.data[ , c('site','year','totcaps','corrcaps') ]
names(ad) <- c('site','year','adcaps','adexcaps')
jv <- x$jv.data[ , c('site','year','totcaps','corrcaps') ]
names(jv) <- c('site','year','jvcaps','jvexcaps')
x <- merge(ad, jv, by=c('site','year')) # exclude sites with no ad/jv coverage, so no all=
x$totcaps <- x$adcaps + x$jvcaps
x <- x[x$totcaps>0, ] # no birds caught so doesn't contribute to model fit
if( year==-1 )
year <- max(x$year)
if( length(table(x$site)) < 6 ){
wmessage <- "Fewer than 6 sites found, the model may struggle to converge"
warning(wmessage, call.=FALSE, immediate.=TRUE)
}
if (offset) {
x <- calc.offset(x)
} else {
x$offset <- 0
}
x.lm <- lme4::glmer(as.matrix(cbind(x$jvcaps,x$adcaps)) ~ (1|year)+(1|site)+(1|site:year), family="binomial", offset=offset, data=x)
years <- as.numeric(row.names(ranef(x.lm)[[3]]))
parm <- ranef(x.lm)[[3]][ , 1]
se <- sqrt(c(attr(ranef(x.lm)[[3]], 'postVar')))
res <- cbind(years, data.frame(cbind(parm, se))) # necessary to stop factor conversion!
row.names(res) <- NULL
if( res$parm[nrow(res)] > 0 )
res$parm <- res$parm - res$parm[nrow(res)]
else
res$parm <- res$parm + res$parm[nrow(res)]
res$index <- exp(res$parm) # NOTE: log back-transform rather than logistic!! gives no jv per ad
# rather simply ppn jvs
cl.int <- qnorm(1-((1-cl)/2))
res$lcl <- exp(res$parm - cl.int * res$se)
res$ucl <- exp(res$parm + cl.int * res$se)
vc <- VarCorr(x.lm)
var.comp <- list(var.s=vc[[2]][[1]], se.s=as.numeric(attr(vc[[2]], "stddev")),
var.y=vc[[3]][[1]], se.y=as.numeric(attr(vc[[3]], "stddev")),
var.sy=vc[[1]][[1]], se.sy=as.numeric(attr(vc[[1]], "stddev")))
return(list(model=x.lm, parms=res, test=var.comp))
}
|
cf95cb1517b429f1606567445b42927ecad7d82f
|
ff2618d2d0272aca50980396124697808b30baac
|
/man/temp.plot.ibutton.Rd
|
4cf1b05dcd41e20837bc9e457cb11c3bfbb8c566
|
[] |
no_license
|
panx125/figsci
|
217d1208a2c82958e75388533cbfebe9d3ac51dc
|
b7671ec1b5bf8ce64c904e59631fcc5d581f1f70
|
refs/heads/master
| 2020-03-11T01:03:28.328645
| 2018-04-12T10:41:12
| 2018-04-12T10:41:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 557
|
rd
|
temp.plot.ibutton.Rd
|
\name{temp.plot.ibutton}
\alias{temp.plot.ibutton}
\title{Plot humidity and temperature curve}
\usage{
temp.plot.ibutton(temp=temperature,humidity=NULL)
}
\description{
This function can be used to plot temperature or humidity curve graph. Gray color is shown for regions between max and min values. If humidity data is unavailable. It can plot and save temperature curve seperately in local disk.
}
\arguments{
\item{temp} {the name of temperature file.}
\item{humidity} {the name of humidity file. Default is drawing temperature curve across time.}
}
|
2f55d1eccfe2e26dac3329398f1c7735553eedc7
|
9b34b2250d39c1b05a9d44392d7fed4711d26d30
|
/R/cor.test.goodies.R
|
38f21a141fdaa913e081c70123fefaac1673bdd8
|
[] |
no_license
|
lbraglia/lbstat
|
11bbd806dfb74e46ce332cac23c33da726541205
|
f8dc128b507bc1b1cb2741af49c171971abe658c
|
refs/heads/master
| 2023-05-11T00:24:32.746694
| 2023-04-28T12:18:40
| 2023-04-28T12:18:40
| 51,751,382
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,396
|
r
|
cor.test.goodies.R
|
#' cor.test method for data.frame
#'
#' Calculates all coefficients and confidence intervals for the
#' combinations of variable available in the data.frame
#'
#' @param x a data.frame
#' @param alternative cor.test alternative parameter
#' @param method cor.test method parameter
#' @param exact cor.test exact parameter
#' @param conf.level cor.test conf.level parameter
#' @param continuity cor.test continuity parameter
#' @param ... further arguments passed to cor.test
#' @examples cor.test(airquality)
#' @export
cor.test.data.frame <-
function(x,
alternative = c('two.sided', 'less', 'greater'),
method = c("pearson", "kendall", "spearman"),
exact = NULL, conf.level = 0.95,
continuity = FALSE,
...)
{
## generate all the combination of variable in a reasonable way
indexes <- as.list(as.data.frame((combn(seq_len(ncol(x)), 2))))
cor_ci_f <- function(x)
setNames(c(x$estimate, x$conf.int), c('est', 'low', 'up'))
res <- lapply(indexes, function(id) {
var1_name <- names(x)[id[1]]
var2_name <- names(x)[id[2]]
var1 <- x[, var1_name]
var2 <- x[, var2_name]
test <- stats::cor.test(x = var1, y = var2,
alternative = alternative,
method = method,
exact = exact,
conf.level = conf.level,
continuity = continuity,
...)
test_ci <- as.list(cor_ci_f(test))
data.frame('v1' = var1_name,
'v2' = var2_name,
'est' = test_ci[1],
'low' = test_ci[2],
'up' = test_ci[3])
})
res <- do.call(rbind, res)
rownames(res) <- NULL
res
}
#' cor.test p-values matrix
#' @param x matrix/data.frame of variable to be analyzed
#' @param ... parameters passed to \code{cor.test}
#'
#' @export
cor.test_p <- function(x, ...)
{
# x è la matrice di dati su cui fare i test di correlazione
# ... sono le opzioni da passare a cor.test
FUN <- function(x, y) cor.test(x, y, ...)[[3]]
z <- outer(
colnames(x),
colnames(x),
Vectorize(function(i,j) FUN(x[,i], x[,j]))
)
dimnames(z) <- list(colnames(x), colnames(x))
z
}
|
3ac89900ccd14a4d9bcaffa04d838e2bc950ad67
|
f0ba703ae2b2ec88ef816af1f39fbde26e3c13dd
|
/scripts/rbp/rbp_functions.R
|
ec7f4ffb4721cd91f8393c7ce7d32112a94a1c84
|
[
"MIT"
] |
permissive
|
vaguiarpulido/covid19-research
|
576582deaae06d1407f8e813da6e12f8baa77342
|
84ec19cbdd60f4eaee5b5f3e593286dff67fcc95
|
refs/heads/master
| 2023-04-08T04:04:29.817613
| 2021-05-17T23:13:05
| 2021-05-17T23:13:05
| 264,233,353
| 4
| 1
|
NOASSERTION
| 2021-05-17T23:13:06
| 2020-05-15T15:42:56
|
R
|
UTF-8
|
R
| false
| false
| 12,069
|
r
|
rbp_functions.R
|
library(data.table)
library(seqinr)
library(Biostrings)
library(TFBSTools)
library(foreach)
library(doParallel)
library(rtracklayer)
###################################################################################
# Data I/O functions
###################################################################################
# Function to read GFFs
readGFF=function(gff_file, skipfirst=T){
gff = import(gff_file)
if(skipfirst){
gff = gff[2:length(gff)]
}
gff = as.data.table(gff)
gff = gff[, .(seqnames, start, end, type, gene)]
return(gff)
}
# Function to read fasta > string
readFasta=function(fasta_file, toRNA=F){
fa = read.fasta(fasta_file, as.string = T, forceDNAtolower = F)
seq = fa[[1]][[1]]
if(toRNA){
seqString = RNAString(gsub("T", "U", seq))
} else{
seqString = DNAString(seq)
}
seqString = list(seqString)
names(seqString) = names(fa)
return(seqString)
}
# Function to read PWMs
readPWMsFromFasta = function (pwm_file) {
# Read all lines from PWM file
lines = readLines(pwm_file)
# Find header lines, start and end of each PWM
ind = which(substr(lines, 1L, 1L) == ">")
nseq = length(ind)
start = ind + 1
end = ind - 1
end = c(end[-1], length(lines))
# Get PWM IDs
ids = lapply(seq_len(nseq), function(i) {
firstword <- strsplit(lines[ind[i]], "\t")[[1]][1]
substr(firstword, 2, nchar(firstword))
})
# Split PWMs
pwms = lapply(seq_len(nseq), function(i) strsplit(lines[start[i]:end[i]], "\t"))
# Format as numeric matrix
pwms = lapply(pwms, function(x) matrix(as.numeric(unlist(x)), ncol=4, byrow=T))
# Convert to PWMatrix class
pwms = lapply(seq_len(nseq), function(i){
PWMatrix(profileMatrix=matrix(c(pwms[[i]]), byrow=TRUE, nrow=4, dimnames=list(c("A", "C", "G", "T"))), ID=ids[[i]])
})
# Name with PWM ID
names(pwms) = ids
return(pwms)
}
###################################################################################
# Function to calculate PWM information content
GetPWMEntropy = function(pwm){
m=as.matrix(pwm)
ic = -sum(m*log2(m))
return(ic)
}
addGapsGFF=function(gff){
gaps = as.data.table(gaps(IRanges(gff$start, gff$end)))
gaps[, seqnames:=gff$seqnames[1]]
gaps[, type:="intergenic"][, gene:=NA]
gapped_gff = rbind(gff, gaps[, .(seqnames, start, end, type, gene)])
gapped_gff = gapped_gff[order(start),]
return(gapped_gff)
}
# Function to split genome sequence into regions
split_genome_gff=function(ref, gff){
gff_seqs=list()
for(i in 1:nrow(gff)){
gff_seqs[[i]] = ref[[1]][gff$start[i]:gff$end[i]]
}
gff_seqs = DNAStringSet(gff_seqs)
gff_seqs_by_type = list()
for(typ in gff$type){
gff_seqs_by_type[[typ]] = gff_seqs[gff$type==typ]
}
gff_seqs_by_type[["genome"]] = DNAStringSet(ref)
#gff_seqs_by_type[["neg_genome"]] = reverseComplement(DNAStringSet(ref))
return(gff_seqs_by_type)
}
###################################################################################
# Binding site discovery functions
###################################################################################
# Multithreaded function to scan sequence(s) with multiple PWMs
ScanSeqWithPWMs = function(seqString, pwmList, rbp_to_pwm, seqName, strand="*", print_num=TRUE){
sites = foreach(i = 1:length(pwmList), .combine=rbind) %dopar% {
# Read PWM ID
id = as.character(names(pwmList)[i])
# Scan genome
curr_sites = searchSeq(pwmList[[i]], seqString, min.score="90%", strand=strand, seqname = seqName)
if(length(curr_sites) > 0){
# Convert to data table
curr_sites = as.data.table(writeGFF3(curr_sites))
if(length(curr_sites) > 0){
# format
curr_sites[, seq:= curr_sites[, tstrsplit(attributes, split=";|=", perl=T)][, V6]]
curr_sites[, attributes:=NULL]
curr_sites[, Matrix_id:= id]
}
}
}
# Match binding sites with protein name
sites = merge(sites, rbp_to_pwm, by="Matrix_id")
# Find duplicate binding sites for the same protein and choose the one with the highest score
sites = sites[sites[, .I[score == max(score)], by=.(start, end, strand, Gene_name, seqname)]$V1]
sites = sites[!duplicated(sites[, .(start, end, strand, Gene_name, seqname)]),]
# sort by position
sites = sites[order(seqname, start),]
# Filter columns
sites[, source:=NULL]
sites[, feature:=NULL]
# Add site length
sites[, len:=nchar(seq)]
if(print_num){
print(paste0("Found ", nrow(sites), " sites."))
}
return(sites)
}
# Annotate sites with genomic features
annotateSites=function(sites, gff){
sites_ranges = GRanges(sites)
gff_ranges = GRanges(gff)
site_overlaps = findOverlaps(sites_ranges, gff_ranges, ignore.strand=F, type="any")
annotated_sites = sites_ranges[queryHits(site_overlaps)]
annotated_sites$type = gff_ranges$type[subjectHits(site_overlaps)]
annotated_sites$gene = gff_ranges$gene[subjectHits(site_overlaps)]
# Convert to data table
annotated_sites = as.data.table(annotated_sites)
return(annotated_sites)
}
###################################################################################
# Scrambling functions
###################################################################################
# Function to scramble a single sequence
scrambleSingleSeq = function(sequence, N){
# Calculate nucleotide frequency in real sequence
freqs = alphabetFrequency(sequence)[1:4]
# List all bases to scramble
bases=c()
for(base_type in names(freqs)){
bases = c(bases, rep(base_type, freqs[base_type]))
}
# Generate scrambled sequences
sim_seqs = list()
for(i in 1:N){
sim_seqs[[i]] = DNAString(paste(sample(bases), collapse=""))
}
sim_seqs = DNAStringSet(sim_seqs)
return(sim_seqs)
}
# Function to scramble a set of sequences
scrambleMultiSeq = function(seqs, N){
# Calculate nucleotide frequency in real sequences
widths = width(seqs)
combined_seqs = DNAString(paste0(seqs, collapse=""))
freqs = alphabetFrequency(combined_seqs)[1:4]
# List all bases to scramble
bases=c()
for(base_type in names(freqs)){
bases = c(bases, rep(base_type, freqs[base_type]))
}
# Get positions at which to split combined sequence
positions = c(1)
for(w in widths){
positions = c(positions, positions[length(positions)] + w)
}
# Generate scrambled sequences
sim_seqs = list()
for(i in 1:N){
sim_seqs[[i]] = paste(sample(bases), collapse="")
}
# Split scrambled sequences
for(i in 1:N){
stringSet = list()
for(spos in 2:length(positions)){
stringSet[[spos-1]] = DNAString(substr(sim_seqs[[i]], positions[spos-1], positions[spos]-1))
}
sim_seqs[[i]] = DNAStringSet(stringSet)
names(sim_seqs[[i]]) = rep(i, length(sim_seqs[[i]]))
}
# Combine all sequences
sim_seqs = do.call(c, sim_seqs)
return(sim_seqs)
}
###################################################################################
# Enrichment functions
###################################################################################
# Enrichment test for RBP
enrich_rbps_real=function(real_sites, sim_sites){
print("Counting real binding sites per protein per strand")
site_count = real_sites[, .N, by=.(Gene_name, strand)]
print("Counting binding sites per protein per strand, on the simulated sequence")
sim_site_count = sim_sites[, .N, by=.(seqname, Gene_name, strand)]
sim_site_count = dcast(sim_site_count, seqname+strand~Gene_name, value.var = "N", fill=0)
sim_site_count = melt(sim_site_count, id.vars=1:2, variable.name = "Gene_name", value.name = "N")
sim_site_count = sim_site_count[, .(mean_count=mean(N), sd_count=sd(N)), by=.(Gene_name, strand)]
print("Comparing binding site counts")
site_count = merge(site_count, sim_site_count, by = c("Gene_name", "strand"), all=T)
site_count[is.na(N), N:=0]
# Calculate z-score
print("Calculating z-scores")
site_count[, z:=(N-mean_count)/sd_count]
site_count[, pval1:=pnorm(-z)]
site_count[, pval2:=2*pnorm(-abs(z))]
# Multiple hypothesis correction
print("FDR correction")
site_count[, padj1:=p.adjust(pval1, "fdr"), by=strand]
site_count[, padj2:=p.adjust(pval2, "fdr"), by=strand]
return(site_count)
}
######################################################################
# Apply functions to each annotated region of a genome
SimulateSeqsByRegion=function(seqsets, N, regions=c("genome", "three_prime_UTR", "five_prime_UTR", "intergenic")){
result=list()
for(region in regions){
print(paste0("Simulating ", N, " copies of ", region))
if(region=="intergenic"){
result[[region]] = scrambleMultiSeq(seqsets[[region]], N)
}
else{
result[[region]] = scrambleSingleSeq(seqsets[[region]][[1]], N)
}
}
return(result)
}
EnrichSeqSetsByRegion=function(seqsets, sim_seqsets, pwmList, rbp_to_pwm, prefix, real_sites, regions=c("three_prime_UTR", "five_prime_UTR", "intergenic", "genome")){
for(region in regions){
sim = sim_seqsets[[region]]
if(region!="genome"){
print(paste0("Finding binding sites in simulated ", region))
sim_sites = ScanSeqWithPWMs(sim, pwmList, rbp_to_pwm, strand="+")
print("saving")
save(sim_sites, file=paste0(prefix, "_sim_sites_", region, ".RData"))
print("Enrichment")
enr=enrich_rbps_real(real_sites[type==region][strand=="+"], sim_sites)
print("saving")
save(enr, file=paste0(prefix, "_sim_enrich_", region, ".RData"))
rm(sim_sites)
rm(enr)
}
else{
# Positive strand
print("Finding binding sites")
sim_sites = ScanSeqWithPWMs(sim, pwmList, rbp_to_pwm, strand="+")
print("saving")
save(sim_sites, file=paste0(prefix, "_sim_sites_genome.RData"))
print("Enrichment")
enr=enrich_rbps_real(real_sites[strand=="+"], sim_sites)
save(enr, file=paste0(prefix, "_sim_enrich_genome.RData"))
rm(sim_sites)
rm(enr)
# Negative strand
print("Finding binding sites - negative")
sim_sites = ScanSeqWithPWMs(sim, pwmList, rbp_to_pwm, strand="-")
print("saving")
save(sim_sites, file=paste0(prefix, "_sim_sites_neg_genome.RData"))
print("Enrichment - negative")
enr = enrich_rbps_real(real_sites[strand=="-"], sim_sites)
print("saving - negative")
save(enr, file=paste0(prefix, "_sim_enrich_neg_genome.RData"))
rm(sim_sites)
rm(enr)
}
}
}
ConstructEnrichmentList=function(prefix, regions=c("genome", "neg_genome", "three_prime_UTR", "five_prime_UTR", "intergenic")){
result=list()
for(region in regions){
load(paste0(prefix, "_sim_enrich_", region, ".RData"))
result[[region]] = enr
}
return(result)
}
FilterToEnriched=function(sites, enrichment, minq, regions=c("genome", "three_prime_UTR", "five_prime_UTR", "intergenic", "neg_genome")){
results=list()
for(region in regions){
if(region=="genome"){
results[[region]] = sites[strand=="+"][Gene_name %in% enrichment[[region]][padj1<minq]$Gene_name]
}
else if(region=="neg_genome"){
results[[region]] = sites[strand=="-"][Gene_name %in% enrichment[[region]][padj1<minq]$Gene_name]
}
else {
results[[region]] = sites[strand=="+"][type==region][Gene_name %in% enrichment[[region]][padj1<minq]$Gene_name]
}
}
return(results)
}
SiteConservation=function(candidate_sites, mat, ref_base_count){
for(i in 1:nrow(candidate_sites)){
if(i %% 10==0){print(i)}
start_col=ref_base_count[base==candidate_sites[i, start], col]
end_col=ref_base_count[base==candidate_sites[i, end], col]
if(length(start_col)>1){start_col = start_col[1]}
if(length(end_col)>1){end_col = end_col[1]}
site_mat=mat[, start_col:end_col]
site_mat=as.vector(apply(site_mat, 1, function(x){paste0(x, collapse="")}))
ref_seq=site_mat[[1]]
chars = sapply(site_mat, function(x){unique(strsplit(x, "")[[1]])})
candidate_sites$n_N[i]=length(grep("N|Y|K|R|W|V", chars))
candidate_sites$n_match[i]=sum(site_mat==ref_seq)
}
candidate_sites[,N_cand:=nrow(mat)-n_N]
return(candidate_sites)
}
|
69a740c1f6cafa8bd3494752dad57dfbfc2b0837
|
ed160c28e3f908fbe48b2da540471ac2e8647bee
|
/man/summarize_values.Rd
|
c4fa63e7feffcb8a99f52942de28e68acdc5d681
|
[
"MIT"
] |
permissive
|
karawoo/dccmonitor
|
5f10f332a93172f98cec310d5b6a7d16d37a6278
|
736a45c97de0fdd770caf1a4c798ee88610c9ff9
|
refs/heads/master
| 2020-12-08T21:30:54.456576
| 2020-01-06T21:01:35
| 2020-01-06T21:01:35
| 233,100,081
| 0
| 0
|
NOASSERTION
| 2020-01-10T17:44:24
| 2020-01-10T17:44:24
| null |
UTF-8
|
R
| false
| true
| 506
|
rd
|
summarize_values.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data-visualization.R
\name{summarize_values}
\alias{summarize_values}
\title{Summarize values present}
\usage{
summarize_values(values)
}
\arguments{
\item{values}{The values to summarize in a list.}
}
\value{
String with the form "value1 (2), value2 (4)",
where the value is given with the number of
occurrences in parenthesis.
}
\description{
Get a list of values present and
the number of times each variable appeared.
}
|
c52a81b324e8f0042f6680b43a155d21f76f7c11
|
3646e4d6536c2392ffb9bc93daeed198610503a5
|
/Perc_SNP_POA_PZA.R
|
bfc692f071d6c9d6fadf3a5ee16a8a9ec3e01d95
|
[] |
no_license
|
martacaballer/Analisis_Mutaciones_TFM
|
fa814205efe7e0a2ec444c4b925a21736282d92b
|
2ebd38df1d7fe55540579cfd5b321bc612fe888b
|
refs/heads/main
| 2023-06-08T02:44:17.700779
| 2021-07-01T09:51:35
| 2021-07-01T09:51:35
| 378,483,155
| 0
| 0
| null | null | null | null |
WINDOWS-1250
|
R
| false
| false
| 2,691
|
r
|
Perc_SNP_POA_PZA.R
|
if(!require(readxl)) install.packages("readxl")
library(readxl)
if(!require(rJava)) install.packages("rJava")
library(rJava)
if(!require(xlsxjars)) install.packages("xlsxjars")
library(xlsxjars)
library("xlsx")
#abrir archivo deleciones
setwd("C:/Users/marta/OneDrive/Desktop/Máster/Trabajo Fin de Máster/Parte_informatica_TFM/Scripts_definitivos/Perc_Del")
Deleciones <- read.csv("C:/Users/marta/OneDrive/Desktop/Máster/Trabajo Fin de Máster/Parte_informatica_TFM/Datos/allDels.csv")
View (Deleciones)
#funcion para sacar los % de cada gen
getdelpercentajes <- function(genes,lin, perc){
subDels <- Deleciones[match(genes, Deleciones$gene), grep(lin, colnames(Deleciones))]
subDelsBin <- apply(subDels, 2, function(x) x > perc)
percDel <- apply(subDelsBin, 1, function(x) sum(x)/ncol(subDelsBin)*100)
names(percDel) <- genes
return(percDel)
}
#Cargar el archivo que contiene los genes de resistencia a POA y a PZA que presentan deleciones y definir el % a partir del cual se considera que hay deleción
genes_POA_PZA <- read.csv("C:/Users/marta/OneDrive/Desktop/Máster/Trabajo Fin de Máster/Parte_informatica_TFM/Scripts_definitivos/Deleciones_Resistencia_POA_PZA_definitivo.csv")
genes <- c(genes_POA_PZA$gene)
perc <- 0.01
#Buscar los porcentajes de deleción para cada linaje
A1 <- getdelpercentajes(genes = genes, lin = "A1", perc = perc)
A2 <- getdelpercentajes(genes = genes, lin = "A2", perc = perc)
A3 <- getdelpercentajes(genes = genes, lin = "A3", perc = perc)
A4 <- getdelpercentajes(genes = genes, lin = "A4", perc = perc)
L1 <- getdelpercentajes(genes = genes, lin = "L1", perc = perc)
L2 <- getdelpercentajes(genes = genes, lin = "L2", perc = perc)
L3 <- getdelpercentajes(genes = genes, lin = "L3", perc = perc)
L4 <- getdelpercentajes(genes = genes, lin = "L4", perc = perc)
L5 <- getdelpercentajes(genes = genes, lin = "L5", perc = perc)
L6 <- getdelpercentajes(genes = genes, lin = "L6", perc = perc)
L7 <- getdelpercentajes(genes = genes, lin = "L7", perc = perc)
L8 <- getdelpercentajes(genes = genes, lin = "L8", perc = perc)
L9 <- getdelpercentajes(genes = genes, lin = "L9", perc = perc)
#Juntar todos los porcentajes de deleción en un mismo archivo
Perc_Del_POA_PZA = data.frame (
A1 = c(A1),
A2 = c(A2),
A3 = c(A3),
A4 = c(A4),
L1 = c(L1),
L2 = c(L2),
L3 = c(L3),
L4 = c(L4),
L5 = c(L5),
L6 = c(L6),
L7 = c(L7),
L8 = c(L8),
L9 = c(L9))
View (Perc_Del_POA_PZA)
setwd("C:/Users/marta/OneDrive/Desktop/Máster/Trabajo Fin de Máster/Parte_informatica_TFM/Scripts_definitivos/Perc_Del")
write.xlsx(Perc_Del_POA_PZA,file = "Perc_Del_POA_PZA.xlsx")
|
1acb2bccb762c4f25c8d662659ad52b59e85592a
|
6588da4ddb03d0437ddea28bd52a12dadbbcb6d5
|
/Scripts/uUF.R
|
c15a1a04f850ca87c11a0d58fcd41cc69d8908d1
|
[] |
no_license
|
LisandroHA/Proyecto_Final_Bioinf2017-II-1
|
ae94ecb2291fcff0beccada3ce395b0193005523
|
85b397b77783553689567e84c07b395e4bf71ea6
|
refs/heads/master
| 2020-12-30T13:22:21.818459
| 2017-05-04T22:40:08
| 2017-05-04T22:40:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 16,730
|
r
|
uUF.R
|
### Analyses by Maggie R. Wagner
### maggie.r.wagner@gmail.com
##### Parallel analysis: Linear mixed models of beta diversity using the unweighted UniFrac metric
####### Clear workspace ########
rm(list=ls())
####### Load source file #######
source('ecotypes_source.R')
####### Session Info #######
sessionInfo()
"R version 3.2.3 (2015-12-10)
Platform: x86_64-apple-darwin13.4.0 (64-bit)
Running under: OS X 10.10.5 (Yosemite)
locale:
[1] en_US.UTF-8/en_US.UTF-8/en_US.UTF-8/C/en_US.UTF-8/en_US.UTF-8
attached base packages:
[1] grid parallel stats4 stats graphics grDevices utils datasets methods base
other attached packages:
[1] tidyr_0.4.1 VennDiagram_1.6.17 futile.logger_1.4.1 BiocParallel_1.4.3
[5] reshape2_1.4.1 lmerTest_2.0-30 lme4_1.1-11 Matrix_1.2-4
[9] scales_0.4.0 ggplot2_2.1.0 vegan_2.3-4 lattice_0.20-33
[13] permute_0.9-0 doParallel_1.0.10 iterators_1.0.8 foreach_1.4.3
[17] dplyr_0.4.3 plyr_1.8.3 genefilter_1.52.1 Biostrings_2.38.4
[21] XVector_0.10.0 ape_3.4 mapdata_2.2-6 maps_3.1.0
[25] DESeq2_1.10.1 RcppArmadillo_0.6.600.4.0 Rcpp_0.12.3 SummarizedExperiment_1.0.2
[29] Biobase_2.30.0 GenomicRanges_1.22.4 GenomeInfoDb_1.6.3 IRanges_2.4.8
[33] S4Vectors_0.8.11 BiocGenerics_0.16.1 phyloseq_1.14.0
loaded via a namespace (and not attached):
[1] splines_3.2.3 Formula_1.2-1 assertthat_0.1 latticeExtra_0.6-28 RSQLite_1.0.0
[6] digest_0.6.9 chron_2.3-47 RColorBrewer_1.1-2 minqa_1.2.4 colorspace_1.2-6
[11] XML_3.98-1.4 zlibbioc_1.16.0 xtable_1.8-2 annotate_1.48.0 mgcv_1.8-12
[16] lazyeval_0.1.10 nnet_7.3-12 survival_2.38-3 RJSONIO_1.3-0 magrittr_1.5
[21] nlme_3.1-126 MASS_7.3-45 foreign_0.8-66 tools_3.2.3 data.table_1.9.6
[26] stringr_1.0.0 munsell_0.4.3 locfit_1.5-9.1 cluster_2.0.3 AnnotationDbi_1.32.3
[31] lambda.r_1.1.7 compiler_3.2.3 ade4_1.7-4 nloptr_1.0.4 biom_0.3.12
[36] igraph_1.0.1 labeling_0.3 gtable_0.2.0 codetools_0.2-14 multtest_2.26.0
[41] DBI_0.3.1 R6_2.1.2 gridExtra_2.2.1 Hmisc_3.17-2 futile.options_1.0.0
[46] stringi_1.0-1 geneplotter_1.48.0 rpart_4.1-10 acepack_1.3-3.3 "
####### Load data: variance-stabilizing transformed Phyloseq objects for roots and leaves at 3 sites ######
load("intermediate_data/phylo_leaf3_withEndog_vst.RData")
load("intermediate_data/phylo_root3_withEndog_vst.RData")
####### Register parallel backend #######
registerDoParallel(cores=4)
####### UNweighted UniFrac and PCoA: separately for leaf and root datasets #######
# replace negative values with 0s just for distance calculations
uUF.root3.withEndog.vst<-UniFrac(
transform_sample_counts(root3.withEndog.vst,function(x) x<-ifelse(x<0,0,x)),
weighted=FALSE,parallel=TRUE)
uUF.leaf3.withEndog.vst<-UniFrac(
transform_sample_counts(leaf3.withEndog.vst,function(x) x<-ifelse(x<0,0,x)),
weighted=FALSE,parallel=TRUE)
save(uUF.leaf3.withEndog.vst,file="uUF/uUF_leaf3_wEndog_vst.RData")
save(uUF.root3.withEndog.vst,file="uUF/uUF_root3_wEndog_vst.RData")
cap.uUF.root3.withEndog.vst<-capscale(uUF.root3.withEndog.vst~1,data=as(sample_data(root3.withEndog.vst),'data.frame'))
cap.uUF.leaf3.withEndog.vst<-capscale(uUF.leaf3.withEndog.vst~1,data=as(sample_data(leaf3.withEndog.vst),'data.frame'))
## Get inertia for top 3 PCoA axes: leaf ##
cap.uUF.leaf3.withEndog.vst$CA$eig[1:3]/sum(cap.uUF.leaf3.withEndog.vst$CA$eig)
" MDS1 MDS2 MDS3
0.18686611 0.12875009 0.03706156 "
cap.uUF.root3.withEndog.vst$CA$eig[1:3]/sum(cap.uUF.root3.withEndog.vst$CA$eig)
" MDS1 MDS2 MDS3
0.30832924 0.09191178 0.05417460 "
####### Fig. S5 Scree plots: unweighted UniFrac #######
pdf(file="plots/Fig_S5c_Supp_screeplot_uUF_leaf.pdf")
barplot(cap.uUF.leaf3.withEndog.vst$CA$eig[1:20]/sum(cap.uUF.leaf3.withEndog.vst$CA$eig),
main="Leaves: unweighted UniFrac",xlab="Principal coordinate",ylab="Proportion Variance",
cex.lab=2,cex.main=2.5,cex.axis=1.5,font.axis=2,font.main=2,font.lab=2,xaxt='n')
dev.off()
pdf(file="plots/Fig_S5d_screeplot_uUF_root.pdf")
barplot(cap.uUF.root3.withEndog.vst$CA$eig[1:20]/sum(cap.uUF.root3.withEndog.vst$CA$eig),
main="Roots: unweighted UniFrac",xlab="Principal coordinate",ylab="Proportion Variance",
cex.lab=2,cex.main=2.5,cex.axis=1.5,font.axis=2,font.main=2,font.lab=2,xaxt='n')
dev.off()
####### How much variation is explained by the top 3 PCo axes? #######
sink("ordination_top3_cumulative_PVEs.txt")
print("cumulative percent variance explained by top 3 unweighted UniFrac PCo:")
print("unweighted UniFrac, roots:")
sum(cap.uUF.root3.withEndog.vst$CA$eig[1:3])/sum(cap.uUF.root3.withEndog.vst$CA$eig) # 0.4544
print("unweighted UniFrac, leaves:")
sum(cap.uUF.leaf3.withEndog.vst$CA$eig[1:3])/sum(cap.uUF.leaf3.withEndog.vst$CA$eig) # 0.3527
print("Individual percent variance explained by top 3 unweighted UniFrac PCo:")
print("unweighted UniFrac, roots:")
(cap.uUF.root3.withEndog.vst$CA$eig[1:3])/sum(cap.uUF.root3.withEndog.vst$CA$eig)
print("unweighted UniFrac, leaves:")
(cap.uUF.leaf3.withEndog.vst$CA$eig[1:3])/sum(cap.uUF.leaf3.withEndog.vst$CA$eig)
sink()
####### Fig. unweighted UniFrac Ordination ~ Site #######
pdf("plots/Fig_S4a_Ordination_uUF1_2_Site_leaf.pdf",width=9,height=9)
plot_ordination(leaf3.withEndog.vst,cap.uUF.leaf3.withEndog.vst,type="samples",axes=1:2,color="Site")+
scale_colour_manual(values=sitePalette)+
geom_point(size=4,alpha=1)+
xlab("unweighted UniFrac\nPCo1 [18.7%]")+ylab("unweighted UniFrac\nPCo2 [12.9%]")+
ggtitle("Leaves")+theme_classic()+
theme(plot.title = element_text(size=44, face="bold",color="forest green"))+
theme(axis.title.x=element_text(size=36,face="bold"),axis.text.x=element_text(size=30,face="bold"))+
theme(axis.title.y=element_text(size=36,face="bold"),axis.text.y=element_text(size=30,face="bold"))+
theme(legend.title= element_text(size=40),legend.text=element_text(size=36,face="bold"))+
theme(legend.key.height=unit(2.5,"lines"),legend.key.width=unit(2,"lines"))+
theme(legend.background = element_rect(fill="gray90", size=.5))
dev.off()
pdf("plots/Fig_S4b_Ordination_uUF1_2_Site_root.pdf",width=9,height=9)
plot_ordination(root3.withEndog.vst,cap.uUF.root3.withEndog.vst,type="samples",axes=1:2,color="Site")+
scale_colour_manual(values=sitePalette,guide=FALSE)+
geom_point(size=4,alpha=1)+
xlab("unweighted UniFrac\nPCo1 [30.8%]")+ylab("unweighted UniFrac\nPCo2 [9.2%]")+
ggtitle("Roots")+theme_classic()+
theme(plot.title = element_text(size=44, face="bold",color="grey"))+
theme(axis.title.x=element_text(size=36,face="bold"),axis.text.x=element_text(size=30,face="bold"))+
theme(axis.title.y=element_text(size=36,face="bold"),axis.text.y=element_text(size=30,face="bold"))
dev.off()
####### Save major PCoA axes and Alpha diversity metrics for use in LMMs: #######
leaf3.smd.uUF.withEndog<-as(sample_data(leaf3.withEndog.vst),'data.frame') %>%
mutate(SampleID=row.names(.)) %>%
merge(.,as.data.frame(cap.uUF.leaf3.withEndog.vst$CA$u[,1:3]),by.x='SampleID',by.y="row.names") %>%
plyr::rename(replace=c('MDS1'='uUF1','MDS2'='uUF2','MDS3'='uUF3'))
root3.smd.uUF.withEndog<-as(sample_data(root3.withEndog.vst),'data.frame') %>%
mutate(SampleID=row.names(.)) %>%
merge(.,as.data.frame(cap.uUF.root3.withEndog.vst$CA$u[,1:3]),by.x='SampleID',by.y="row.names") %>%
plyr::rename(replace=c('MDS1'='uUF1','MDS2'='uUF2','MDS3'='uUF3'))
save(leaf3.smd.uUF.withEndog,file="uUF/smd_leaf3_withEndog.RData")
save(root3.smd.uUF.withEndog,file="uUF/smd_root3_withEndog.RData")
####### Remove endogenous plants for model fitting #######
leaf3.smd.uUF<-filter(leaf3.smd.uUF.withEndog,Age!='endog')
root3.smd.uUF<-filter(root3.smd.uUF.withEndog,Age!='endog')
####### Check residuals and R^2 for unweighted UniFrac models and save LS means for plotting #######
# unweighted UniFrac PCo1: leaves
leaf3.uUF1<-lmer(uUF1~Genotype*Site + Age*Site + Harvested*Site + (1|Site:Block) + (1|Genotype:Line)+(1|newPlate)+logObs,data=leaf3.smd.uUF,REML=TRUE)
plot(residuals(leaf3.uUF1)~fitted(leaf3.uUF1))
qqnorm(residuals(leaf3.uUF1)); qqline(residuals(leaf3.uUF1))
LSM.uUF1.leaf3<-as.data.frame(lmerTest::lsmeans(leaf3.uUF1)[1])
colnames(LSM.uUF1.leaf3)<-c("Genotype","Site","Age","Harvested","Estimate","SE","DF","t","lowerCI","upperCI","P_uncorrected")
r2.LMM(leaf3.uUF1) # 0.85085
rm(leaf3.uUF1)
# unweighted UniFrac PCo2: leaves
leaf3.uUF2<-lmer(uUF2~Genotype*Site + Age*Site + Harvested*Site + (1|Site:Block) + (1|Genotype:Line)+(1|newPlate)+logObs,data=leaf3.smd.uUF,REML=TRUE)
plot(residuals(leaf3.uUF2)~fitted(leaf3.uUF2))
qqnorm(residuals(leaf3.uUF2)); qqline(residuals(leaf3.uUF2))
LSM.uUF2.leaf3<-as.data.frame(lmerTest::lsmeans(leaf3.uUF2)[1])
colnames(LSM.uUF2.leaf3)<-c("Genotype","Site","Age","Harvested","Estimate","SE","DF","t","lowerCI","upperCI","P_uncorrected")
r2.LMM(leaf3.uUF2) # 0.80380
rm(leaf3.uUF2)
# unweighted UniFrac PCo3: leaves
leaf3.uUF3<-lmer(uUF3~Genotype*Site + Age*Site + Harvested*Site + (1|Site:Block) + (1|Genotype:Line)+(1|newPlate)+logObs,data=leaf3.smd.uUF,REML=TRUE)
plot(residuals(leaf3.uUF3)~fitted(leaf3.uUF3))
qqnorm(residuals(leaf3.uUF3)); qqline(residuals(leaf3.uUF3))
LSM.uUF3.leaf3<-as.data.frame(lmerTest::lsmeans(leaf3.uUF3)[1])
colnames(LSM.uUF3.leaf3)<-c("Genotype","Site","Age","Harvested","Estimate","SE","DF","t","lowerCI","upperCI","P_uncorrected")
r2.LMM(leaf3.uUF3) # 0.8697594
rm(leaf3.uUF3)
# unweighted UniFrac PCo1: roots
root3.uUF1<-lmer(uUF1~Genotype*Site + Age*Site + Harvested*Site + (1|Site:Block) + (1|Genotype:Line)+(1|newPlate)+logObs,data=root3.smd.uUF,REML=TRUE)
plot(residuals(root3.uUF1)~fitted(root3.uUF1))
qqnorm(residuals(root3.uUF1)); qqline(residuals(root3.uUF1))
LSM.uUF1.root3<-as.data.frame(lmerTest::lsmeans(root3.uUF1)[1])
colnames(LSM.uUF1.root3)<-c("Genotype","Site","Age","Harvested","Estimate","SE","DF","t","lowerCI","upperCI","P_uncorrected")
r2.LMM(root3.uUF1) # 0.9784
rm(root3.uUF1)
# unweighted UniFrac PCo2: roots
root3.uUF2<-lmer(uUF2~Genotype*Site + Age*Site + Harvested*Site + (1|Site:Block) + (1|Genotype:Line)+(1|newPlate)+logObs,data=root3.smd.uUF,REML=TRUE)
plot(residuals(root3.uUF2)~fitted(root3.uUF2))
qqnorm(residuals(root3.uUF2)); qqline(residuals(root3.uUF2))
LSM.uUF2.root3<-as.data.frame(lmerTest::lsmeans(root3.uUF2)[1])
colnames(LSM.uUF2.root3)<-c("Genotype","Site","Age","Harvested","Estimate","SE","DF","t","lowerCI","upperCI","P_uncorrected")
r2.LMM(root3.uUF2) # 0.9575643
rm(root3.uUF2)
# unweighted UniFrac PCo3: roots
root3.uUF3<-lmer(uUF3~Genotype*Site + Age*Site + Harvested*Site + (1|Site:Block) + (1|Genotype:Line)+(1|newPlate)+logObs,data=root3.smd.uUF,REML=TRUE)
plot(residuals(root3.uUF3)~fitted(root3.uUF3))
qqnorm(residuals(root3.uUF3)); qqline(residuals(root3.uUF3))
LSM.uUF3.root3<-as.data.frame(lmerTest::lsmeans(root3.uUF3)[1])
colnames(LSM.uUF3.root3)<-c("Genotype","Site","Age","Harvested","Estimate","SE","DF","t","lowerCI","upperCI","P_uncorrected")
r2.LMM(root3.uUF3) # 0.91394
rm(root3.uUF3)
####### Table S5: unweighted UniFrac results #######
# use "LMMstats" function to control for MiSeq run and sequencing depth (logObs)
stats.uUF.leaf3<-LMMstats(leaf3.smd.uUF,resp="uUF1",reml=TRUE) %>%
rbind(.,LMMstats(leaf3.smd.uUF,resp="uUF2",reml=TRUE)) %>%
rbind(.,LMMstats(leaf3.smd.uUF,resp="uUF3",reml=TRUE)) %>%
Pcorrect(method='holm')
stats.uUF.root3<-LMMstats(root3.smd.uUF,resp="uUF1",reml=TRUE) %>%
rbind(.,LMMstats(root3.smd.uUF,resp="uUF2",reml=TRUE)) %>%
rbind(.,LMMstats(root3.smd.uUF,resp="uUF3",reml=TRUE)) %>%
Pcorrect(method='holm')
TableUUF<-data.frame(row.names=levels(stats.uUF.leaf3$Term))
TableUUF$Term<-rownames(TableUUF)
for (h in 1:2){
organ<-c('leaf','root')[h]
stats<-get(c("stats.uUF.leaf3","stats.uUF.root3")[h])
for (i in 1:length(levels(stats$Response))){
resp<-levels(stats$Response)[i]
for (j in 1:length(levels(stats$Term))){
term<-levels(stats$Term)[j]
subTableUUF<-subset(stats,Response==resp&Term==term)
teststat<-ifelse(term%in%c('Block','Line','Plate'),
paste0("ChiSq",subTableUUF$df,"=",format(subTableUUF$F.or.ChiSq,digits=3)),
paste0("F",subTableUUF$df,",",ceiling(subTableUUF$ddf),"=",format(subTableUUF$F.or.ChiSq,digits=3)))
p<-ifelse(subTableUUF$P_corrected==0,"P<3e-16",paste0("P=",format(subTableUUF$P_corrected,digits=2)))
TableUUF[term,paste0(organ,"__",resp)]<-paste0(teststat,";",p)
}
}
}
write.table(TableUUF,file="tables/Table_S5_UUFstats.txt",sep='\t',row.names=FALSE,col.names=TRUE)
####### Combine all LS means into a single dataframe #####
LSM.uUF.all<-rbind(mutate(LSM.uUF1.leaf3,Response="uUF1",Organ="leaf",Term=rownames(LSM.uUF1.leaf3)),
mutate(LSM.uUF2.leaf3,Response="uUF2",Organ="leaf",Term=rownames(LSM.uUF2.leaf3)),
mutate(LSM.uUF3.leaf3,Response="uUF3",Organ="leaf",Term=rownames(LSM.uUF3.leaf3)),
mutate(LSM.uUF1.root3,Response="uUF1",Organ="root",Term=rownames(LSM.uUF1.root3)),
mutate(LSM.uUF2.root3,Response="uUF2",Organ="root",Term=rownames(LSM.uUF2.root3)),
mutate(LSM.uUF3.root3,Response="uUF3",Organ="root",Term=rownames(LSM.uUF3.root3)))
rm(LSM.uUF1.leaf3,LSM.uUF2.leaf3,LSM.uUF3.leaf3)
rm(LSM.uUF1.root3,LSM.uUF2.root3,LSM.uUF3.root3)
save(LSM.uUF.all,file="uUF/uUF_LSmeans.RData")
####### Figure S13 : Leaf&Root: unweighted UniFrac LSM ordination ~ Genotype #######
pdf(file="plots/Fig_S13a_uUF_LSMordination_byGenotype_leaf.pdf",width=9,height=9)
filter(LSM.uUF.all, grepl("Genotype ",Term), Response%in%c('uUF1','uUF3'), Organ=='leaf') %>%
reshape(v.names=c('Estimate','SE'),timevar='Response',direction='wide',idvar=c('Genotype')) %>%
plyr::rename(replace=c('Estimate.uUF1'='uUF1','Estimate.uUF3'='uUF3')) %>%
ggplot(.,aes(x=uUF1,y=uUF3,color=Genotype))+
geom_point(size=5)+
geom_errorbar(aes(ymin=uUF3-SE.uUF3,ymax=uUF3+SE.uUF3),width=0.00,size=2)+
geom_errorbarh(aes(xmin=uUF1-SE.uUF1,xmax=uUF1+SE.uUF1),height=0.00,size=2)+
scale_color_manual(values=popPalette)+
ylab("unweighted UniFrac\nPCo3 [3.7%]")+xlab("unweighted UniFrac\nPCo1 [18.7%]")+
ggtitle("Leaves\n")+theme_classic()+
theme(plot.title = element_text(size=40, face="bold",color="forest green"))+
theme(axis.title.x=element_text(size=36,face="bold"),axis.text.x=element_text(size=25,face="bold"))+
theme(axis.title.y=element_text(size=36,face="bold"),axis.text.y=element_text(size=25,face="bold"))+
theme(legend.title= element_text(size=34),legend.text=element_text(size=30,face="bold"))+
theme(legend.key.height=unit(1.5,"lines"),legend.key.width=unit(1.5,"lines"))+
theme(legend.background = element_rect(fill="gray90", size=.5))
dev.off()
pdf(file="plots/Fig_S13b_uUF_LSMordination_byGenotype_root.pdf",width=9,height=9)
filter(LSM.uUF.all, grepl("Genotype ",Term), Response%in%c('uUF1','uUF2'), Organ=='root') %>%
reshape(v.names=c('Estimate','SE'),timevar='Response',direction='wide',idvar=c('Genotype')) %>%
plyr::rename(replace=c('Estimate.uUF1'='uUF1','Estimate.uUF2'='uUF2')) %>%
ggplot(.,aes(x=uUF1,y=uUF2,color=Genotype))+
geom_point(size=5)+
geom_errorbar(aes(ymin=uUF2-SE.uUF2,ymax=uUF2+SE.uUF2),width=0.00,size=2)+
geom_errorbarh(aes(xmin=uUF1-SE.uUF1,xmax=uUF1+SE.uUF1),height=0.00,size=2)+
scale_color_manual(values=popPalette)+
scale_x_continuous(breaks=c(-0.0125,-0.0075,-0.0025))+
ylab("unweighted UniFrac\nPCo2 [9.2%]")+xlab("unweighted UniFrac\nPCo1 [30.8%]")+
ggtitle("Roots\n")+theme_classic()+
theme(plot.title = element_text(size=40, face="bold",color="dark grey"))+
theme(axis.title.x=element_text(size=36,face="bold"),axis.text.x=element_text(size=25,face="bold"))+
theme(axis.title.y=element_text(size=36,face="bold"),axis.text.y=element_text(size=25,face="bold"))+
theme(legend.title= element_text(size=34),legend.text=element_text(size=30,face="bold"))+
theme(legend.key.height=unit(1.5,"lines"),legend.key.width=unit(1.5,"lines"))+
theme(legend.background = element_rect(fill="gray90", size=.5))
dev.off()
####### Save image #######
save.image(paste0("uUF/image_",date(),".RData"))
|
344149195fe7fb948ef704416690f9c5b2b3968a
|
ef8d9dae6020f8211d30f816b7648c32e501e1e1
|
/run_analysis.R
|
b0e6c8226d56194752785c5564777547235b2f7e
|
[] |
no_license
|
freeblue5577/cleanDataProject
|
d2d6ac29f21e020e5c87b39ee1c44f712d5d6b5a
|
03c9f3cfe8b0e111f4ad664dfa22643fdd4e4603
|
refs/heads/master
| 2021-01-23T22:43:11.222350
| 2015-03-23T00:10:18
| 2015-03-23T00:10:18
| 32,669,726
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,429
|
r
|
run_analysis.R
|
## for data cleaning course project
## wenwen xu
run_analysis <- function() {
cur_dir <- getwd()
# all files' path
feature_list_dir <- paste(cur_dir, "/wenwen_project/cleanDataProject/UCI_HAR_Dataset/features.txt", sep="")
x_train_dir <- paste(cur_dir, "/wenwen_project/cleanDataProject/UCI_HAR_Dataset/train/X_train.txt", sep="")
x_test_dir <- paste(cur_dir, "/wenwen_project/cleanDataProject/UCI_HAR_Dataset/test/X_test.txt", sep="")
y_train_dir <- paste(cur_dir, "/wenwen_project/cleanDataProject/UCI_HAR_Dataset/train/y_train.txt", sep="")
y_test_dir <- paste(cur_dir, "/wenwen_project/cleanDataProject/UCI_HAR_Dataset/test/y_test.txt", sep="")
activity_list_dir <- paste(cur_dir, "/wenwen_project/cleanDataProject/UCI_HAR_Dataset/activity_labels.txt", sep="")
write_dir <- paste(cur_dir, "/wenwen_project/cleanDataProject/", sep="")
subject_train_dir <- paste(cur_dir, "/wenwen_project/cleanDataProject/UCI_HAR_Dataset/train/subject_train.txt", sep="")
subject_test_dir <- paste(cur_dir, "/wenwen_project/cleanDataProject/UCI_HAR_Dataset/test/subject_test.txt", sep="")
subject_train <- read.table(subject_train_dir, sep="")
subject_test <- read.table(subject_test_dir, sep="")
subject_merge <- rbind(subject_train, subject_test)
feature_list <- read.table(feature_list_dir, sep="")
x_train <- read.table(x_train_dir, sep="")
x_test <- read.table(x_test_dir, sep="")
y_train <- read.table(y_train_dir, sep="")
y_test <- read.table(y_test_dir, sep="")
activity_list <- read.table(activity_list_dir, sep="")
x_merge <- rbind(x_train, x_test)
x_merge_copy <- x_merge
y_merge <- rbind(y_train, y_test)
y_convertActName <- add_activityNames(y_merge, activity_list)
# attach decriptive feature name
for (i in 1:dim(x_merge)[2]) {
colnames(x_merge)[i] <- as.character(feature_list[i,2])
}
final_merge <- cbind(y_convertActName, x_merge)
final_merge <- cbind(subject_merge, final_merge)
# attach standard devation to create a new table
std_table <- as.table(matrix(nrow=2, ncol=dim(x_merge)[2]))
rownames(std_table) <- (c("mean", "std"))
# names(std_table) <- names(final_merge)
# colnames(std_table)[1] <- colnames(final_merge)[1]
#
for (i in 1:dim(std_table)[2]) {
# assign same col names for merge
std_table[1, i] <- mean(x_merge[,i])
std_table[2, i] <- sd(x_merge[,i])
colnames(std_table)[i] <- as.character(feature_list[i,2])
}
# std_merge <- rbind(x_merge_copy, std_table)
#temp_merge <- rbind(x_merge, std_table)
# list(final_merge, std_merge)
#View(final_merge)
#View(std_merge)
# call function to print step 5 of final tidy data
write_tidy_table(final_merge, write_dir)
# write.table(std_table, file=write_dir, row.names=FALSE)
final_merge
}
add_activityNames <- function(actNum_table, actName_table) {
convertName_table <- matrix(nrow=dim(actNum_table)[1], ncol=1)
for (i in 1:dim(actNum_table)[1]) {
convertName_table[i, 1] <- as.character(actName_table[as.numeric(actNum_table[i,1]), 2])
}
convertName_table
}
write_tidy_table <- function (merge_all, dir) {
myMelt <- melt(merge_all, id=c("V1", "y_convertActName"), measure.vars=colnames(qq)[3:563])
tidy_table <- dcast(myMelt, formula=V1+y_convertActName ~ variable, mean)
file_path <- paste(dir, "tidy_data.txt", sep="")
write.table(tidy_table, file_path, row.name=FALSE)
}
|
649b1c93211006bd9efc46372ee5bd3fecfdb364
|
cc187746861950ea8bbc9c39c65318ec21275283
|
/man/FswXt2Df.Rd
|
2988d890e505cd90d846c5f6fe0e349564075390
|
[] |
no_license
|
c3h3/QuantitativeBacktestingTools
|
a042509df4750a3778d53600aeb85f0297736f4f
|
2c2d84163297b4884647ba03a5a64a542beeaaf2
|
refs/heads/master
| 2020-06-26T20:04:02.373324
| 2016-09-07T22:01:25
| 2016-09-07T22:01:25
| 67,580,438
| 3
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 672
|
rd
|
FswXt2Df.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/forwardSamplingTools.R
\name{FswXt2Df}
\alias{FswXt2Df}
\title{FswXt2Df}
\usage{
FswXt2Df(fswXt, filterDatetimes = NULL, longFormat = F)
}
\arguments{
\item{fswXt}{a forward sampling xts}
\item{filterDatetimes}{filtered fswXt by datetimes}
\item{longFormat}{logical variable which defined your output format is long format or wide format}
}
\description{
a function which can help you transform your forward sampling xts to data.frame
}
\examples{
Xt = getSymbols('2330.TW', auto.assign = F)
Xt \%>\% Pt2Rt \%>\% ForwardSlidingWindow(20) \%>\% FswXt2Df(index(Xt)[c(1,3,5,7,9)]) \%>\% head
}
|
1c7b6839e52115d283050703a9f6fa347c09afe9
|
da9b15a6d555b3c9540705e69f0c4d7baa39a1b3
|
/scripts/from_monica/getsplines.R
|
94af37d5ee9ba957027f377a48d33c546fc4b0ed
|
[] |
no_license
|
RohanAlexander/hansard
|
0be2c6b43b053a048896d3d8d98fc633dde323fa
|
300fac35e8714871dcf0a6225db3e4a1f33754d2
|
refs/heads/master
| 2022-03-11T00:08:03.183499
| 2019-11-25T10:53:18
| 2019-11-25T10:53:18
| 138,767,582
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,298
|
r
|
getsplines.R
|
GetSplines <- function(
x.i,
x0 = NULL,
I = 2.5,
degree = 3
) {
if (is.null(x0)) {
x0 <- max(x.i)-0.5*I
}
# get knots, given that one knot needs to be in year0
knots <- seq(x0-1000*I, x0+1000*I, I)
while (min(x.i) < knots[1]) knots <- c(seq(knots[1]-1000*I, knots[1]-I,I), knots)
while (max(x.i) > knots[length(knots)]) knots <- c(knots, seq(knots[length(knots)]+I,
knots[length(knots)]+1000*I, I))
Btemp.ik <- bs(x.i, knots = knots[-c(1, length(knots))], degree = degree,
Boundary.knots = knots[c(1, length(knots))])
indicesofcolswithoutzeroes <- which(apply(Btemp.ik, 2, sum) > 0)
# only remove columns with zeroes at start and end
startnonzerocol <- indicesofcolswithoutzeroes[1]
endnonzerocol <- indicesofcolswithoutzeroes[length(indicesofcolswithoutzeroes)]
B.ik <- Btemp.ik[,startnonzerocol:endnonzerocol]
colnames(B.ik) <- paste0("spline", seq(1, dim(B.ik)[2]))
knots.k <- knots[startnonzerocol:endnonzerocol]
names(knots.k) <- paste0("spline", seq(1, dim(B.ik)[2]))
##value<< List of B-splines containing:
return(list(B.ik = B.ik, ##<< Matrix, each row is one observation, each column is one B-spline.
knots.k = knots.k ##<< Vector of knots.
))
}
|
2292ee917ef2c3661ed90900c8ba50a57c033fcf
|
d3c500e5204d2a7d8965f25c8c61e46d24d4fe28
|
/man/reorderbyseg.Rd
|
6f66df83b23a2b48658dc357c64ab656b70a585f
|
[
"MIT"
] |
permissive
|
WeiquanLuo/rivertopo
|
c7d4f7fd9617ebdeff79d2115aad47d5fab1e788
|
e0e533c8e3d58b8c7b8d10dc3145ad37c76a684c
|
refs/heads/master
| 2020-09-09T18:20:21.296681
| 2020-01-27T05:52:11
| 2020-01-27T05:52:11
| 221,524,684
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 670
|
rd
|
reorderbyseg.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/reorderbyseg.R
\name{reorderbyseg}
\alias{reorderbyseg}
\title{reorder site with grouping by seg in closet order from endvert of each seg}
\usage{
reorderbyseg(site_sf, arc_sf.set)
}
\arguments{
\item{site_sf}{a site sf object with column: id, lon, lat, geometry, X, Y, and the snapped point information from the riverdist::xy2segvert() columns: seg, vert, snapdist}
\item{arc_sf.set}{a arc sf object from makearc() containing columns:
from, seg0, vert0, x0, y0, to, seg1, vert1, x1, y1, geometry}
}
\description{
reorder site with grouping by seg in closet order from endvert of each seg
}
|
750838fdf21941548d8e2207b8c4efd7d2a5f8c7
|
f192d5d354a9d4a4c8b31f731ad87f94fe50cdc6
|
/src/AtlasRDF/man/getExperimentIdsForGeneURI.Rd
|
aa49700f1960f70b0369a69c58ec7c8c862f872f
|
[
"Apache-2.0"
] |
permissive
|
jamesmalone/AtlasRDF-R
|
0264e82fa5647ac9879e0a5c2dbf7e3d438cf13e
|
9ea2dc99496cd376522ffd2d59cb9f044dd511eb
|
refs/heads/master
| 2021-01-10T21:37:36.918520
| 2014-02-20T15:01:23
| 2014-02-20T15:01:23
| 12,704,372
| 3
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 786
|
rd
|
getExperimentIdsForGeneURI.Rd
|
\name{getExperimentIdsForGeneURI}
\alias{getExperimentIdsForGeneURI}
\title{
Get ArrayExpress experiment ID which contain a gene specified by URI.
}
\description{
Get all of the ArrayExpress experiment IDs which contain a gene, specified by a URI.
}
\usage{
getExperimentIdsForGeneURI(geneuri, endpoint = "http://www.ebi.ac.uk/rdf/services/atlas/sparql")
}
\arguments{
\item{geneuri}{
Gene URI
}
\item{endpoint}{
The location of the SPARQL endpoint - default value is live Atlas endpoint
}
}
\value{
List of ArrayExpress experiment IDs for the specified gene.
}
\author{
James Malone, Simon Jupp
}
\examples{
###get the experiment IDs for a gene ENSG00000142556 (ENSEMBL ID)
getExperimentIdsForGeneURI("http://identifiers.org/ensembl/ENSG00000142556")
}
|
8463f66ef8ae62f5d48ec057723646f45e919e56
|
b38f0078b2fcbf32dfc9b3d6d35c26363ac4c509
|
/run_analysis.R
|
7db67ed304c1946694e09e86f9441e68a4091bf1
|
[] |
no_license
|
shaliniraol/Coursera
|
4995716ecb04ac3686bf71f0d8f1971f0da11d56
|
727e4ccc80e4683307140513a3c77f37efab4feb
|
refs/heads/master
| 2020-05-01T14:25:51.593662
| 2015-04-26T23:23:31
| 2015-04-26T23:23:31
| 34,635,452
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 176
|
r
|
run_analysis.R
|
require(knitr)
require(markdown)
setwd("~/Coursera/CleaningData/Project")
knit("run_analysis.Rmd", encoding="ISO8859-1")
markdownToHTML("run_analysis.md", "run_analysis.html")
|
74967042e0367f13750432f63dc381fe52680d48
|
04988c81516b15442e2baab7a6d8dd9a3b633e18
|
/Criar_Rasters.R
|
c77f4b788b80be006c9ce5df72ea164afe2b9d01
|
[] |
no_license
|
Kerenvascs/r_stats
|
5f75ecb248f9008447315e210fcf33ca45533c85
|
90e2deb16f7dadc5cde694c8af14f4b302636514
|
refs/heads/master
| 2020-05-22T00:25:40.643879
| 2019-05-11T19:04:01
| 2019-05-11T19:04:01
| 186,171,838
| 0
| 0
| null | null | null | null |
ISO-8859-1
|
R
| false
| false
| 4,053
|
r
|
Criar_Rasters.R
|
library(raster)
library(sp)
library(spatialEco)
library(SDMTools)
library(plotrix)
setwd("C:/Users/SAMSUNG/Desktop/Raster")
# Abre o raster
r2=raster(x ="Raster2_PRODES_Exercicio_UTM.tif" )
r2
plot(r2)
# Transforma esse raster (paisagem) em raster binário
r2=calc(r2, fun=function(x) { x[x != 8] <- 0; return(x) } ) ## tudo que não for 8 vai ficar zero
r2=calc(r2, fun=function(x) { x[x == 8] <- 1; return(x) } ) ## tudo que for 8 vai ser um
plot(r2)
#seleciona ponto central para criacao de duas paisagens
x=sampleRegular(r2, 3, na.rm = TRUE, sp = TRUE) ##seleciona número(3) de pontos que eu quero de forma regular, o na.rm tira os NA´s se tiver
x@coords ##pra ver os pontos que ele plotou pq o objeto é simplepointdataframe
points(x)
#raios das extensoes a serem analisadas
rads=c(100,500,seq(1000,13000,1000))
rads
#Cria o objeto que guardara os resultados
results=list()
# roda a analise ### função loop
for(i in 1:length(rads)){
results[[i]]=land.metrics(x=x, y=r2, bw=rads[i], bkgd = NA, metrics = c(2,3,4,6,9,10))
}
names(results)=rads
results
#Criando uma tabela mais amigavel para analise
res=results[[1]][[2]][1,]
res2=results[[1]][[2]][2,]
for(i in 2:15){
res=rbind(res,results[[i]][[2]][1,])
res2=rbind(res2,results[[i]][[2]][2,])
}
res=data.frame(res,raio=rads)
res
res2
for(i in 2:7){
plot(rads,res[,i],xlab="Raio",ylab=colnames(res)[i])
lines(rads,res[,i])
points(rads,res2[,i],col=2)
lines(rads,res2[,i],col=2)
}
plot(r2)
points(x,pch=16)
draw.circle(x@coords[2,1],x@coords[2,2],100)
###############################################################################################################################
r3=raster(x="Raster2_PRODES_Exercicio_UTM_120.tif") #2
r4=raster(x="Raster2_PRODES_Exercicio_UTM_240.tif") #4
r5=raster(x="Raster2_PRODES_Exercicio_UTM_480.tif") #8
r6=raster(x="Raster2_PRODES_Exercicio_UTM_960.tif") #16
r3=calc(r3, fun=function(x) { x[x != 8] <- 0; return(x) } )
r3=calc(r3, fun=function(x) { x[x == 8] <- 1; return(x) } )
r4=calc(r4, fun=function(x) { x[x != 8] <- 0; return(x) } )
r4=calc(r4, fun=function(x) { x[x == 8] <- 1; return(x) } )
r5=calc(r5, fun=function(x) { x[x != 8] <- 0; return(x) } )
r5=calc(r5, fun=function(x) { x[x == 8] <- 1; return(x) } )
r6=calc(r6, fun=function(x) { x[x != 8] <- 0; return(x) } )
r6=calc(r6, fun=function(x) { x[x == 8] <- 1; return(x) } )
results2=list()
results3=list()
results4=list()
results5=list()
j=1
for(i in c(3,12)){ ##POSIÇÃO DO RAIO 1000 E 10000)
results2[[j]]=land.metrics(x=x, y=r3, bw=rads[i], bkgd = NA, metrics = c(2,3,4,6,9,10))
j=j+1
}
j=1
for(i in c(3,12)){
results3[[j]]=land.metrics(x=x, y=r4, bw=rads[i], bkgd = NA, metrics = c(2,3,4,6,9,10))
j=j+1
}
j=1
for(i in c(3,12)){
results4[[j]]=land.metrics(x=x, y=r5, bw=rads[i], bkgd = NA, metrics = c(2,3,4,6,9,10))
j=j+1
}
j=1
for(i in c(3,12)){
results5[[j]]=land.metrics(x=x, y=r6, bw=rads[i], bkgd = NA, metrics = c(2,3,4,6,9,10))
j=j+1
}
res3=results[[3]][[2]][1,]
res3=rbind(res3,results[[12]][[2]][1,])
res3=rbind(res3,results2[[1]][[2]][1,])
res3=rbind(res3,results2[[2]][[2]][1,])
res3=rbind(res3,results3[[1]][[2]][1,])
res3=rbind(res3,results3[[2]][[2]][1,])
res3=rbind(res3,results4[[1]][[2]][1,])
res3=rbind(res3,results4[[2]][[2]][1,])
res3=rbind(res3,results5[[1]][[2]][1,])
res3=rbind(res3,results5[[2]][[2]][1,])
res3=data.frame(res3,raio=rep(c(1000,10000),5))
res3
res3.2=results[[3]][[2]][2,]
res3.2=rbind(res3.2,results[[12]][[2]][2,])
res3.2=rbind(res3.2,results2[[1]][[2]][2,])
res3.2=rbind(res3.2,results2[[2]][[2]][2,])
res3.2=rbind(res3.2,results3[[1]][[2]][2,])
res3.2=rbind(res3.2,results3[[2]][[2]][2,])
res3.2=rbind(res3.2,results4[[1]][[2]][2,])
res3.2=rbind(res3.2,results4[[2]][[2]][2,])
res3.2=rbind(res3.2,results5[[1]][[2]][2,])
res3.2=rbind(res3.2,results5[[2]][[2]][2,])
res3.2=data.frame(res3.2,raio=rep(c(1000,10000),5))
res3.2
resol=c(60,120,240,480,960)
plot(resol, res3[which(res3$raio==1000),7])
points(resol, res3[which(res3$raio==10000),7],col=2)
plot(resol, res3[which(res3$raio==10000),7],col=2)
|
c4dfac95dba63b1989befc4c4b0eb07f0c1c7066
|
007ae03cfe5abf41a0ad864eade451141c267cca
|
/auto-docs/executables/ggplot2/igray.r
|
ec419ed3396bb782c11336aa437f52018e039511
|
[] |
no_license
|
VukDukic/documentation
|
ca96eb1994eeb532fe60c542960b017354bcede1
|
8e5aefdc38788956cfe31d8fe8b4b77cdf790e57
|
refs/heads/master
| 2021-01-18T09:02:27.034396
| 2015-01-20T23:46:58
| 2015-01-20T23:46:58
| 30,007,728
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 333
|
r
|
igray.r
|
library(plotly)
library("ggthemes")
py <- plotly(username='TestBot', key='r1neazxo9w')
dsamp <- diamonds[sample(nrow(diamonds), 1000), ]
gray <- (qplot(carat, price, data = dsamp, colour = cut) +
theme_igray())
out <- py$ggplotly(gray, kwargs=list(filename="igray", fileopt="overwrite"))
plotly_url <- out$response$url
|
571796b873978258d0f1a10575f614c30f8fa1f9
|
4894c5851994527bf47aa0106318d906bfb3a57b
|
/man/weightMatrix.Rd
|
8c9f2e734718dee2c1880436c46db4de2f651d2e
|
[] |
no_license
|
rajamuthiah/ureda
|
c74b776889175092b7dccac00295c92b783d9c1a
|
46d0402e69ab9f4b6e2a9960663d0b4b532eb526
|
refs/heads/master
| 2021-01-20T00:40:02.377294
| 2014-07-15T04:06:36
| 2014-07-15T04:06:36
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 365
|
rd
|
weightMatrix.Rd
|
% Generated by roxygen2 (4.0.1.99): do not edit by hand
\name{weightMatrix}
\alias{weightMatrix}
\title{Weight Coefficients for Sample L-moments.}
\usage{
weightMatrix(nlmom, n)
}
\arguments{
\item{nlmom}{The number of sample l-moments.}
\item{n}{The sample size.}
}
\value{
A matrix of weight factors.
}
\description{
Weight Coefficients for Sample L-moments.
}
|
fc2d73a926852a4b97f740e5e5266d085e882ff0
|
f8270ad35ceaf8654274a74d9fcc28ffb60e3e82
|
/exp/results.R
|
1836a5f706a2e6f790ef249b9478a72d802f53f8
|
[
"Apache-2.0"
] |
permissive
|
gkobeaga/cpsrksec
|
bc5c7b5004f8013fe8b081e99f06acf4a269ae1a
|
d702b9688722fde35040db62b29d3c50ba07c126
|
refs/heads/master
| 2022-09-12T12:27:22.842104
| 2020-06-04T11:50:49
| 2020-06-04T11:50:49
| 259,394,517
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 12,230
|
r
|
results.R
|
## tables package continue showing Hmisc messages although silencing it
suppressPackageStartupMessages(
library(tables, quietly = T, warn.conflicts = F)
)
library(plyr, quietly = T, warn.conflicts = F)
library(dplyr, quietly = T, warn.conflicts = F)
library(tidyr, quietly = T, warn.conflicts = F)
bt <- booktabs()
dir.create("tables", showWarnings = FALSE)
## Format for the Latex tables.
fmt_tm <- function(x, decimals = 2, ...) {
x <- round(x * 10^decimals) / 10^decimals
s <- format(x, ...)
s <- tables::latexNumeric(s)
s
}
## Change tabular environment to spread the table to the full column width.
change_tabular_env <- function(file) {
if (version$os == "linux-gnu") {
cmd <- paste(paste0("sed -i 's/\\\\begin{tabular}{/\\\\begin{tabular}",
"{\\\\columnwidth}{@{\\\\extracolsep{\\\\fill}}/g'"),
file)
system(cmd)
cmd <- paste("sed -i '1 s/}$/@{}}/'", file)
system(cmd)
cmd <- paste("sed -i 's/tabular/tabular\\*/g'", file)
system(cmd)
}
else
warning(paste("change_tabular_env not implemented for", version$os))
}
## Import the raw data of the experiments
srk_tmp <- read.csv("exp-results.csv", header = T, row.names = NULL)
## Get the instance names and the score generation group for the OP.
srk_tmp$instance <- vapply(strsplit(as.character(srk_tmp$name), "-"),
`[`, 1, FUN.VALUE = character(1))
srk_tmp$gen <- capitalize(vapply(strsplit(as.character(srk_tmp$name), "-"),
`[`, 2, FUN.VALUE = character(1)))
## Convert the time from nanoseconds to milliseconds
srk_tmp <- srk_tmp %>%
mutate(pre_time = pre_time / 1000) %>%
mutate(sep_time = sep_time / 1000) %>%
mutate(cut_time = cut_time / 1000)
## Group data accordingly
srk_tmp <- srk_tmp %>%
mutate(srk = paste(strat, s2, sep = "_")) %>%
mutate(separation = paste(gomoryhu, s3, extra, sep = "_")) %>%
mutate(maxinout = factor(paste(max_in, max_out, sep = "x"))) %>%
mutate(size = factor(ifelse(nv <= 1500, "Medium", "Large"),
levels = c("Medium", "Large")))
srk_tmp$srk <- revalue(srk_tmp$srk,
c("0_0" = "NO",
"1_0" = "C1",
"2_0" = "C1C2",
"3_0" = "C1C2C3",
"4_0" = "S1",
"4_1" = "S1S2"
))
## Reorder the shrinking strategies and set NO as the reference strategy
srk_tmp <- srk_tmp %>%
mutate(srk = factor(srk, levels = c("NO", "C1", "C1C2", "C1C2C3",
"S1", "S1S2")))
srk_tmp$srk <- factor(srk_tmp$srk) %>% relevel(srk, ref = "NO")
## Reorder the separation strategies and set Hong as the reference strategy
srk_tmp$separation <- revalue(srk_tmp$separation,
c("0_0_0" = "EH",
"0_1_0" = "DH",
"0_1_1" = "DHI",
"1_0_0" = "EPG"
))
srk_tmp$separation <- factor(srk_tmp$separation) %>% relevel(srk, ref = "EH")
## Reorder the cut generation strategies and set 1x1 as the reference strategy
srk_tmp <- srk_tmp %>%
mutate(maxinout = factor(maxinout, levels = c("1x1", "10x10")))
srk_tmp$maxinout <- factor(srk_tmp$maxinout) %>% relevel(maxinout, ref = "1x1")
## Clone the results of DH-NO to DHI-NO
dhe_no <- srk_tmp[which(srk_tmp$srk == "NO" & srk_tmp$separation == "DH"), ]
dhe_no$separation <- "DHI"
srk_tmp <- rbind(srk_tmp, dhe_no)
## Calc the size of the shrunk graph in relation to
## the support graph (percentage)
srk_tmp$nv_rel <- (srk_tmp$snv) / srk_tmp$gnv * 100
srk_tmp$na_rel <- (srk_tmp$sna) / srk_tmp$gna * 100
## Group the results by instance, vertex score generation, shrinking strategy,
## separation strategy, and cut generation strategy
srk <- ddply(srk_tmp,
.(instance, gen, srk, separation, maxinout), summarize
, nv = mean(nv)
, gnv = mean(gnv)
, gna = mean(gna)
, snv = mean(snv)
, sna = mean(sna)
, sep_count_extra = mean(sep_count_extra)
, pre_count_qsets = mean(pre_count_qsets)
, sep_count_qsets = mean(sep_count_qsets)
, nsec = mean(valid)
, maxv = mean(maxval)
, pre_time = mean(pre_time)
, sep_time = mean(sep_time)
, cut_time = mean(cut_time)
, pre_count_c1 = mean(pre_count_c1)
, pre_count_c2 = mean(pre_count_c2)
, pre_count_c3 = mean(pre_count_c3)
, pre_count_s1 = mean(pre_count_s1)
, pre_count_s2 = mean(pre_count_s2)
, pre_count_queue = mean(pre_count_queue)
, size = unique(size)
, nv_rel = mean(nv_rel)
, na_rel = mean(na_rel)
)
## Calc the number of violated cuts per unit of time (millisecond)
## found by each combination of strategies
srk <- srk %>% mutate(cxms = nsec / cut_time)
###################################################
## Generate the Latex tables ##
###################################################
############
## 1. Graph size and separation time in relation to the reference strategy
cat("Building the table of the algorithm speedups..........")
srk$ref_time <- 0
for (instance in unique(srk$instance)) {
for (gen in unique(srk$gen)) {
for (shrink in levels(srk$srk)) {
for (separation in levels(srk$separation)) {
srk[which(srk$instance == instance & srk$gen == gen &
srk$srk == shrink & srk$separation == separation), ]$ref_time <-
mean(srk[which(srk$instance == instance & srk$gen == gen &
srk$srk == "NO" & srk$separation == "EH"), ]$sep_time)
}
}
}
}
srk$time_rel <- srk$ref_time / srk$sep_time
srk_comp_time <- tabular((Size = size) * (Shrinking = srk) ~
(Heading("Preprocess") * 1 * Heading("Graph Size") *
Format(fmt_tm(decimals = 2)) * mean *
(Heading("$\\%|\\bar{V}|$") * (nv_rel) +
Heading("$\\%|\\bar{E}|$") * (na_rel)
) +
Heading("Separation") * 1 * Heading("Speedup") *
Format(fmt_tm(decimals = 0)) * separation *
Heading() * mean * (Heading() * time_rel)
),
data = srk
)
#srk_comp_time
rowLabels(srk_comp_time)[7, 1] <- "\\midrule Large"
outf <- paste0("tables/comparison-sep-srk-time.tex")
outt <- latex(srk_comp_time, file = outf)
change_tabular_env(outf)
cat("OK\n")
############
## 2. Time and obtained Q sets by shrinking and separation strategy
cat("Building the table of the times and obtained Q sets...")
srk_comp_cut <- tabular((Size = size) * (Shrinking = srk) ~
Format(fmt_tm(decimals = 1)) *
(Heading("Preprocess") * 1 *
(Heading("All") *
(Heading("\\#Q") * mean * Heading() * pre_count_qsets +
Heading("Time") * mean * Heading() * pre_time
)
) +
Heading("Separation") * Heading() * separation *
Format(fmt_tm(decimals = 1)) *
(Heading("\\#Q") * mean * Heading() * sep_count_qsets +
Heading("Time") * mean * Heading() * sep_time
)
),
data = srk
)
#srk_comp_cut
rowLabels(srk_comp_cut)[7, 1] <- "\\midrule Large"
outf <- paste0("tables/comparison-sep-srk-qsets.tex")
outt <- latex(srk_comp_cut, file = outf)
change_tabular_env(outf)
cat("OK\n")
############
## 3. Tables in the supplementary material
cat("Building the tables in the supplementary material\n")
srk$maxinout <- revalue(srk$maxinout,
c("1x1" = "1x1 (10 runs)",
"10x10" = "10x10 (10 runs)"
))
for (inst in unique(srk$instance)) {
cat(" -", inst, paste0(paste0(rep(".", 8 - nchar(inst)), collapse = ""), "..."))
instdata <- srk %>% filter(instance == as.character(inst))
# Size
srk_comp_size <- tabular((Shrinking = srk) ~
(Heading() * mean * (`$|V|$` = nv) +
Heading() * Factor(gen) *
(Heading("Support graph") *
(Heading() * mean * (`$|\\bar{V}|$` = gnv) +
Heading() * mean * (`$|\\bar{E}|$` = gna)
) +
Heading("Shrunk graph", override = T) *
(Heading() * mean * (`$|\\bar{V}|$` = snv) +
Heading() * mean * (`$|\\bar{E}|$` = sna)
) +
Heading("Preprocess") *
(Heading() * Format(fmt_tm()) * mean * (`\\#Q` = pre_count_qsets) +
Heading() * Format(fmt_tm(decimals = 2)) *
mean * (Time = pre_time)
)
)
),
data = instdata
)
outf <- paste0("tables/comparison-size-", inst, ".tex")
outt <- latex(srk_comp_size, file = outf)
change_tabular_env(outf)
# Cuts
srk_comp_cuts <- tabular(((Sep. = separation) * (Shrinking = srk)) ~
(Heading() * Factor(gen) *
(Heading("Separation") * 1 * Heading("(20 runs)") *
(Heading() * mean *
Format(fmt_tm(decimals = 1)) * (`\\#Q` = sep_count_qsets) +
Heading() * mean *
Format(fmt_tm(decimals = 1)) * (Time = sep_time)
) +
Heading("SEC Generation") * maxinout *
(Heading() * mean * Format(fmt_tm(decimals = 1)) * (`\\#SEC` = nsec) +
Heading() * mean * Format(fmt_tm(decimals = 1)) * (Time = cut_time)
)
)
),
data = instdata
)
rowLabels(srk_comp_cuts)[7, 1] <- "\\midrule DH"
rowLabels(srk_comp_cuts)[13, 1] <- "\\midrule DHI"
rowLabels(srk_comp_cuts)[19, 1] <- "\\midrule EPG"
outf <- paste0("tables/comparison-cuts-", inst, ".tex")
outt <- latex(srk_comp_cuts, file = outf)
change_tabular_env(outf)
# Counts
srk_comp_counts <- tabular((Shrinking = Factor(srk)) ~
(Heading() * Factor(gen) *
#Format(fmt_tm(decimals = 1)) *
(Heading("Preprocess") *
(Heading() * mean * Format(fmt_tm()) * (`C1` = pre_count_c1) +
Heading() * mean * Format(fmt_tm()) * (`C2` = pre_count_c2) +
Heading() * mean * Format(fmt_tm()) * (`C3` = pre_count_c3) +
Heading() * mean * Format(fmt_tm()) * (`S1` = pre_count_s1) +
Heading() * mean * Format(fmt_tm()) * (`S2` = pre_count_s2) +
Heading() * mean * Format(fmt_tm(decimals = 2)) *(`H` = pre_count_queue)
) +
Heading("DHI") * mean * Format(fmt_tm(decimals = 1)) *(`Extra` = sep_count_extra)
)
),
data = instdata)
outf <- paste0("tables/comparison-counts-", inst, ".tex")
outt <- latex(srk_comp_counts, file = outf)
change_tabular_env(outf)
cat("OK\n")
}
|
70fe83648fd09408f9d285ec69c66cf752b568af
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/Temporal/examples/rGenGamma.Rd.R
|
4858cb44338e1f6051995fc7556e1863122755c8
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 286
|
r
|
rGenGamma.Rd.R
|
library(Temporal)
### Name: rGenGamma
### Title: Simulation from the Generalized Gamma Distribution
### Aliases: rGenGamma
### ** Examples
# Generalized gamma event times with shapes (2,2) and rate 2
# Expected censoring proportion of 20%
D = rGenGamma(n=1e3,a=2,b=2,l=2,p=0.2);
|
3c2f3e70d94c8cb4095b1479cfc3e12fec89f4d7
|
35a1e73c2ab2cae03f51f10baa4bd6b488d95a84
|
/man/smbinning.sumiv.plot.Rd
|
23a59867749c780072257ce50ac7c6d72aba9fbb
|
[] |
no_license
|
mauropelucchi/smbinning
|
30b13e331dfecafdb7eaf46ac475fecb5859760b
|
83f402391dd31ddd4a1144dd815150ae625e98ec
|
refs/heads/master
| 2021-01-17T11:20:48.103845
| 2016-06-20T10:20:35
| 2016-06-20T10:20:35
| 66,144,448
| 0
| 0
| null | 2016-08-20T11:58:59
| 2016-08-20T11:58:59
| null |
UTF-8
|
R
| false
| true
| 1,333
|
rd
|
smbinning.sumiv.plot.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/smbinning.R
\name{smbinning.sumiv.plot}
\alias{smbinning.sumiv.plot}
\title{Plot Information Value Summary}
\usage{
smbinning.sumiv.plot(sumivt, cex = 0.9)
}
\arguments{
\item{sumivt}{A data frame saved after \code{smbinning.sumiv}.}
\item{cex}{Optional parameter for the user to control the font size of the characteristics
displayed on the chart. The default value is 0.9}
}
\value{
The command \code{smbinning.sumiv.plot} returns a plot that shows the IV
for each numeric and factor characteristic in the dataset.
}
\description{
It gives the user the ability to plot the Information Value by characteristic.
The chart only shows characteristics with a valid IV.
}
\examples{
# Package loading and data exploration
library(smbinning) # Load package and its data
data(chileancredit) # Load smbinning sample dataset (Chilean Credit)
# Training and testing samples (Just some basic formality for Modeling)
chileancredit.train=subset(chileancredit,FlagSample==1)
chileancredit.test=subset(chileancredit,FlagSample==0)
# Plotting smbinning.sumiv
sumivt=smbinning.sumiv(chileancredit.train,y="FlagGB")
sumivt # Display table with IV by characteristic
smbinning.sumiv.plot(sumivt,cex=0.8) # Plot IV summary table
}
|
635f7fc80405b4e8b997260e3237b4a3015b50da
|
84526595f5bb52ad787c5bcd4eac5ee6af937ffb
|
/R/mymcl.R
|
3ac8b3c1ed65774e9405992370f7fa4dda324618
|
[] |
no_license
|
GregStacey/ppicluster
|
d2b9cb378603c627436d1d9fd454a3d6b110bd5f
|
912c31dcc74e2bc9f8ed5c5d248b1f9cab54d019
|
refs/heads/master
| 2021-07-03T16:31:37.156136
| 2020-09-01T00:34:12
| 2020-09-01T00:34:12
| 160,438,936
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,976
|
r
|
mymcl.R
|
mymcl = function (m, infl, iter = 1000, remove.self.loops = FALSE, prune = FALSE,
thresh = 1e-06, pruning.prob = 1e-06, use.sparse = NULL,
verbose = FALSE)
{
if (nrow(m) != ncol(m)) {
stop("input matrix must be a square matrix")
}
if (remove.self.loops) {
diag(m) = 0
}
n = nrow(m)^2
m <- sweep(m, 2, colSums(m), `/`)
m[is.na(m)] <- 0
if (prune) {
m[m < pruning.prob] = 0
}
force.sparse = FALSE
if (length(use.sparse) == 0) {
force.sparse = ((sum(m == 0)/n) >= 0.5)
use.sparse = force.sparse
}
if (use.sparse || force.sparse) {
{
m = Matrix(m)
if (verbose) {
print("sparse matrices will be used")
}
}
}
m0 <- m
m <- m %*% m
m <- m^infl
m <- sweep(m, 2, colSums(m), `/`)
m[is.na(m)] <- 0
i = 1
if (sum(m0 - m) != 0) {
for (i in 2:iter) {
m <- m %*% m
m <- m^infl
m <- sweep(m, 2, colSums(m), `/`)
m[is.na(m)] <- 0
if ((sum(m > 0 & m < 1) == 0) || (sqrt((sum((m -
m0)^2)/n)) < thresh)) {
break
}
if (prune) {
m[m < pruning.prob] <- 0
}
m0 = m
}
}
if (verbose) {
print(paste("mcl converged after", i, "iterations"))
}
if (class(matrix) != "matrix") {
m = as.matrix(m)
}
nrow <- nrow(m)
ncol <- ncol(m)
clusters <- vector(length = nrow, mode = "numeric")
csums = colSums(m)
lonely = which(csums == 0)
clustered = which(csums > 0)
clusters[lonely] = lonely
attractors = sort(which(rowSums(m) > 0))
j = 1
lcc = length(clustered)
unlabelled = lcc
while (unlabelled > 0) {
i = attractors[j]
if (clusters[i] == 0) {
attracts <- which(m[i, 1:ncol] > 0)
clusters[attracts] <- i
}
unlabelled = sum(clusters[clustered] == 0)
j = j + 1
# fix bug where j > length(attractors)
if (j > length(attractors)) unlabelled = 0
}
return(clusters)
}
|
0331792d162f9827e4c1851fa0ecf405ad1c53ac
|
19361af6ab987d9a87334a3f6c83e07b434d2698
|
/R/old/wpd_clust.R
|
244ff79154c47b14fdc70f29ab19a8680836154e
|
[] |
no_license
|
Sayani07/gracsr
|
1f3ef4395874316994e3b265758f847407f8444e
|
0f365cd358f808cd077403d7d7a534cc584e59ce
|
refs/heads/master
| 2023-09-03T22:46:45.477390
| 2021-10-27T00:45:27
| 2021-10-27T00:45:27
| 395,581,498
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,827
|
r
|
wpd_clust.R
|
#' Title
#'
#' @param .data
#' @param harmony_tbl
#' @param filter_comb
#' @param nperm
#' @param nsamp
#'
#' @return
#' @export
#'
#' @examples
#' library(gravitas)
#' library(tidyverse)
#' library(parallel)
#' library(tsibble)
#' library(rlang)
#' sm <- smart_meter10 %>%
#' filter(customer_id %in% c("10006704", "10017936", "10006414", "10018250"))
#' gran1 <- "hour_day"
#' gran2 <- NULL
#' harmonies <- sm %>%
#' harmony(
#' ugran = "month",
#' filter_in = "wknd_wday",
#' filter_out = c("hhour", "fortnight")
#' )
#'
#' v <- suppressWarnings(wpd_clust(sm, harmony_tbl = harmonies))
#' v
wpd_clust <- function(.data,
harmony_tbl = NULL,
filter_comb = NULL,
nperm = 2,
nsamp = 2,
kopt = 2) {
if (is.null(kopt)) {
koptimal <- fpc::nselectboot(d,
B = 50,
method = "complete",
clustermethod = fpc::disthclustCBI,
classification = "averagedist",
krange = 2:nmaxclust
)
kopt <- koptimal$kopt
}
if (is.null(harmony_tbl)) {
stop("harmony table must be provided")
}
harmonies <- harmony_tbl %>%
mutate(comb = paste(facet_variable,
x_variable,
sep = "-"
)) %>%
filter(comb %in% c(
"hour_day-wknd_wday",
"day_month-hour_day",
"wknd_wday-hour_day",
"hour_day-day_week",
"day_week-hour_day"
)) %>%
select(-comb)
# if(is.null(key)){
key <- tsibble::key(.data)
key <- key[1] %>% as.character()
# }
uni_cust <- unique(.data %>% pull(!!sym(key)))
customer_ref <- tibble(
customer_serial_id = as.character(seq(length(uni_cust))),
customer_id = uni_cust
)
elec_split <- .data %>% group_split(!!sym(key))
elec_select_harmony <- parallel::mclapply(seq_len(length(elec_split)), function(x) {
data_id <- elec_split %>%
magrittr::extract2(x) %>%
as_tsibble(index = reading_datetime)
k <- hakear::select_harmonies(data_id,
harmony_tbl = harmonies,
response = {{ response }},
nperm = nperm,
nsamp = nsamp
)
}, mc.cores = parallel::detectCores() - 1, mc.preschedule = FALSE, mc.set.seed = FALSE) %>%
dplyr::bind_rows(.id = "customer_serial_id") %>%
# dplyr::mutate(!!key := m) %>%
# dplyr::select(-m) %>%
dplyr::left_join(customer_ref) %>%
dplyr::select(-customer_serial_id)
write_rds(elec_select_harmony, "data/elec_select_harmony.rds")
mydist <- elec_select_harmony %>%
mutate(comb = paste(facet_variable, x_variable, sep = "-")) %>%
select(comb, customer_id, wpd) %>%
pivot_wider(names_from = comb, values_from = wpd)
hc <- stats::hclust(dist(mydist[-1]), method = "complete")
groups <- tibble(group = cutree(hc, k = kopt), customer_id = mydist$customer_id)
groups
}
|
76127d428b875b291eabdde2d21f55fb9f620c71
|
1f24f8687c4a0f7631208f38b532f96504cdc830
|
/UpdatedWordEndAnalysis.R
|
ebb5b6075cff941eea6dd1e274efb51e06b29a98
|
[] |
no_license
|
smkeane/word-end-analysis
|
e3bbcc02a79c2c4b69281c47d2e7673b570699c5
|
770e96fb524ccf280d2c6af0780ea0ee094c670d
|
refs/heads/master
| 2020-12-12T21:11:41.703966
| 2020-02-24T04:44:47
| 2020-02-24T04:44:47
| 234,229,103
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,063
|
r
|
UpdatedWordEndAnalysis.R
|
# imports stringr library
install.packages("stringr")
library(stringr)
# accesses nouns.csv file that has noun info in it
nouns = read.csv("nouns.csv")
# make a list of the final letter from each word
nouns$finalSegment = str_match(nouns$noun,".$")
# makes table of terminal letters and the number of times words with that terminal letter were male or female
xtabs(~finalSegment+gender, nouns)
# creates a subset of words that don't end in 'a' or 'o'
notAorO = subset(nouns, finalSegment!="a" & finalSegment!="o")
# makes a column of all the last letters of the words in notAorO called finalSegment
notAorO$finalSegment = str_match(notAorO$noun, ".$")
# creates a table, a, of the terminal letters and the number of times their word was male or female
a = xtabs(~finalSegment+gender, notAorO)
# makes a column and then a table for the terminal 2 letters
notAorO$final2Segments = str_match(notAorO$noun, "..$")
b = xtabs(~final2Segments+gender, notAorO)
# same thing for terminal 3 letters
notAorO$final3Segments = str_match(notAorO$noun, "...$")
c = xtabs(~final3Segments+gender, notAorO)
# again for terminal 4 letters
notAorO$final4Segments = str_match(notAorO$noun, "....$")
d = xtabs(~final4Segments+gender, notAorO)
# function that makes a list of accuracy values for each table of terminal letters (tables a, b, c, and d):
getAccuracy <- function(x) {
# finds if word ending in certain terminal letter(s) more likely to be female or male
# appends probability that words ending in given terminal letter(s) will be this more likely gender
# example: from set a, words ending in 'e' are female 1 time and male 6 times, so since 6 > 1, 6/(1+6)=0.857, which is added to the accuracies set
if (x[1]>x[1,2]){
accuracies = (x[1]/(x[1]+x[1,2]))
}else{
accuracies = (x[1,2]/(x[1]+x[1,2]))
}
# does same thing through rest of rows, adds probabilities to accuracies set
for(i in (2:nrow(x))) {
if (x[i]>x[i,2]){
accuracies = c(accuracies,(x[i]/(x[i]+x[i,2])))
}else{
accuracies = c(accuracies,(x[i,2]/(x[i]+x[i,2])))
}}
# returns set of numbers that show the accuracy of predicting a word's gender based on its terminal letters
return(accuracies)
}
# sets of each segment list's accuracy percentages
# enter the set's name below to view the accuracies and the column name (a, b, c, or d) to see the rows the values correspond to
e = getAccuracy(a)
f = getAccuracy(b)
g = getAccuracy(c)
h = getAccuracy(d)
# take samples of each 1,000 times w/ replacement:
sim1 = replicate(10000, mean(sample(e, replace=T)))
sim2 = replicate(10000, mean(sample(f, replace=T)))
sim3 = replicate(10000, mean(sample(g, replace=T)))
sim4 = replicate(10000, mean(sample(h, replace=T)))
# find overlap between each of these samples:
# example: diff1VS2 gives a value to how similar the odds of guessing the gender correctly based on the last letter are to guessing based on the last two letters
diff1VS2 = 100*mean(sim1 %in% sim2)
diff2VS3 = 100*mean(sim2 %in% sim3)
diff3VS4 = 100*mean(sim3 %in% sim4)
|
72abf3d6b65cfe5d505ac5e0f38a528c80664b8f
|
e33c27c79295195487163817be57bff73c4a6526
|
/man/get.decluttered.Rd
|
fb858f15e7fb18d4dccba1093ca84695332d38bb
|
[] |
no_license
|
cran/tempR
|
082e33c6ad7bfb2857f475f0eb1e7c3bc78a1a20
|
d223bd869d90a0155c4e4a04b4b94ba02587ff40
|
refs/heads/master
| 2022-02-28T10:46:59.262280
| 2022-02-18T21:20:02
| 2022-02-18T21:20:02
| 62,116,242
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,845
|
rd
|
get.decluttered.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tcata.R
\encoding{UTF-8}
\name{get.decluttered}
\alias{get.decluttered}
\title{Get decluttering matrix indicating where to show/hide reference lines}
\usage{
get.decluttered(x = x, n.x = n.x, y = y, n.y = n.y, alpha = 0.05)
}
\arguments{
\item{x}{selections for sample of interest (can be a vector if several samples of interest)}
\item{n.x}{evaluations of \code{x} (can be a vector if several samples of interest)}
\item{y}{selections for comparison (can be a vector if several comparisons will be made)}
\item{n.y}{evaluations of \code{y} (can be a vector if several comparisons of interest)}
\item{alpha}{significance level}
}
\value{
declutter vector in which \code{1} indicates "show" and \code{NA} indicates "hide"
}
\description{
Declutter TCATA curves by hiding reference lines from plots showing TCATA curves.
}
\examples{
# functionality of get.decluttered() is conveniently provided in citation.counts()
# Data set: ojtcata
# Get declutter matrix for comparison of Product 2 vs. average of all products
data(ojtcata)
oj2.v.all <- citation.counts(ojtcata, product.name = "2", product.col = 2,
attribute.col = 4, results.col = 5:25, comparison = "average")
oj2.v.all$declutter
# same as
p2.declutter <- get.decluttered(x = c(oj2.v.all$P1), n.x = oj2.v.all$Pn,
y = c(oj2.v.all$C1), n.y = oj2.v.all$Cn)
(p2.declutter <- matrix(p2.declutter, nrow = nrow(oj2.v.all$P1)))
}
\references{
Castura, J.C., Antúnez, L., Giménez, A., Ares, G. (2016). Temporal check-all-that-apply (TCATA): A novel temporal sensory method for characterizing products. \emph{Food Quality and Preference}, 47, 79-90. \doi{10.1016/j.foodqual.2015.06.017}
}
\seealso{
\code{\link[stats]{fisher.test}}, \code{\link[tempR]{citation.counts}}
}
|
21dec7ca7a3b4c20eaae7699f3c5d723bb1dcd1a
|
23ac665552c844602528be7702c74c86d80bcff2
|
/Product1SentimentGraph.R
|
c7c3a639f4f9366c8b83e7726f2a27926607772a
|
[] |
no_license
|
sahaditya/sentiment-analysis-model
|
330e83045d6384dbc4e1cc5f6e20dfeb8a4347f7
|
f0e7dfa91caf56921a95018c72730fa9eebd5bef
|
refs/heads/master
| 2021-07-14T04:49:42.991956
| 2017-10-14T13:40:04
| 2017-10-14T13:40:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 913
|
r
|
Product1SentimentGraph.R
|
#Sentiment graph for product 1 ######
sentimentP1->x
Scorep1<-c(sum(x$anger),sum(x$anticipation),sum(x$disgust),sum(x$fear),sum(x$joy),sum(x$sadness),sum(x$surprise),sum(x$trust),sum(x$negative),sum(x$positive))
Sentisp1<-c("anger","anticipation","disgust","fear","joy","sadness","surprise","trust","negative","positive")
Totalp1<-c(56,56,56,56,56,56,56,56,56,56)
sentiScorep1<-data.frame(Sentisp1,Scorep1,Totalp1)
ggplot(sentiScorep1, aes(Sentisp1, y = Scorep1))+geom_histogram(aes(fill = Scorep1), stat = "identity")
#############################################
negativep1<-c("anger","disgust","fear","sadness","negative")
ouncep1 <- c(9,3,8,7,17)
founcep1<-c(56,56,56,56,56)
negSentip1<-data.frame(negativep1,ouncep1,founcep1)
negSentip1
l<-sum(negSentip1$ouncep1)
t<-sum(negSentip1$founcep1)
posiPercentp1<-((t-l)*100)/280
posiPercentp1
##################################################
|
d4fd35ddff0b9bfefe4f7108c47302d69b6df298
|
324634eff15949983d404326afc1b406de87b185
|
/cpReg/tests/testthat/test_changepoint_fixedthreshold.R
|
19b000978487bd4c02386aa7d41224b4509ee1eb
|
[
"MIT"
] |
permissive
|
linnykos/cpReg
|
f266e809da3ea65b367b235ef5f35ac6ee9d81ff
|
b296de544c96f5b4a90b8c87e65e00a1c5a37190
|
refs/heads/master
| 2021-10-25T14:48:59.907105
| 2019-04-04T21:12:48
| 2019-04-04T21:12:48
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,781
|
r
|
test_changepoint_fixedthreshold.R
|
context("Test wild binary segmentation")
## .find_breakpoint is correct
test_that(".find_breakpoint works", {
set.seed(10)
n <- 100
true_partition <- c(0, 0.5, 1)
dat <- create_data(list(c(1,1,1), c(2,-1,2)), true_partition*n)
lambda <- oracle_tune_lambda(dat$X, dat$y, true_partition)
tau <- cpReg::oracle_tune_tau(dat$X, dat$y, lambda, true_partition,
factor = 1/2)
compute_cusum_func <- .compute_regression_cusum
tau_function <- function(data, interval, ...){
tau
}
M <- 100
delta <- 10
max_candidates <- NA
res <- .find_breakpoint(dat, c(0, 100), delta = delta, max_candidates = max_candidates,
data_length_func = function(x){nrow(x$X)},
compute_cusum_func = compute_cusum_func, verbose = F,
lambda = lambda)
expect_true(is.list(res))
expect_true(all(names(res) == c("val", "b")))
})
test_that(".find_breakpoint exits gracefully when delta is too large", {
set.seed(10)
n <- 1000
true_partition <- c(0, 0.5, 1)
dat <- create_data(list(c(1,1,1), c(2,-1,2)), true_partition*n)
lambda <- oracle_tune_lambda(dat$X, dat$y, true_partition)
tau <- cpReg::oracle_tune_tau(dat$X, dat$y, lambda, true_partition,
factor = 1/2)
compute_cusum_func <- .compute_regression_cusum
tau_function <- function(data, interval, ...){
tau
}
delta <- 100
max_candidates <- 10
res <- .find_breakpoint(dat, c(723, 1000), delta = delta, max_candidates = max_candidates,
data_length_func = function(x){nrow(x$X)},
compute_cusum_func = compute_cusum_func, verbose = F,
lambda = lambda)
expect_true(is.list(res))
})
|
cb8298e7a26e5c558e1bf32c1ad0d09933898f60
|
1dedfa2451f5bdf76dc6ac9f6f2e972865381935
|
/sandbox/playing_around.R
|
3f5177516c3f50af425c9eb878724dd8b9cca697
|
[
"MIT"
] |
permissive
|
nhejazi/haldensify
|
95ef67f709e46554085371ffd4b5ade68baf06a4
|
e2cfa991e2ba528bdbf64fd2a24850e22577668a
|
refs/heads/master
| 2022-10-07T09:51:03.658309
| 2022-09-26T18:07:59
| 2022-09-26T18:07:59
| 165,715,134
| 15
| 6
|
NOASSERTION
| 2022-08-24T14:03:36
| 2019-01-14T18:43:32
|
R
|
UTF-8
|
R
| false
| false
| 1,390
|
r
|
playing_around.R
|
# author: David Benkeser
library(data.table)
library(ggplot2)
library(dplyr)
library(hal9001)
devtools::load_all()
set.seed(76924)
# simulate data: W ~ Rademacher and A|W ~ N(mu = \pm 1, sd = 0.5)
n_train <- 100
w <- runif(n_train, -4, 4)
a <- rnorm(n_train, w, 0.5)
# learn relationship A|W using HAL-based density estimation procedure
# tune over different choices of n_bins and grid_type
haldensify_fit <- haldensify(
A = a, W = w,
grid_type = c("equal_range","equal_mass"),
n_bins = c(5, 10),
lambda_seq = exp(seq(-1, -13, length = 250))
)
# predictions to recover conditional density of A|W
new_a <- seq(-8, 8, by = 0.01)
w_val <- c(-3, -1, 1, 3)
add_line <- function(a_val = seq(-5, 5, by = 0.01),
w_val = 0, new_plot = TRUE,
col_true = 1, col_est = 1, ...) {
pred <- predict(haldensify_fit,
new_A = a_val, new_W = rep(w_val, length(a_val)))
if (new_plot) {
plot(0, 0, pch = "", xlim = range(a_val), ylim = c(0, max(pred) * 1.10),
xlab = "a", ylab = "Density")
}
# add true density
lines(x = a_val, y = dnorm(a_val, w_val, 0.5), lty = 2, col = col_true)
# add predicted density
lines(x = a_val, y = pred, lty = 1, col = col_est)
}
add_line(col_true = 1, col_est = 1)
add_line(w_val = -2, new_plot = FALSE, col_true = 2, col_est = 2)
add_line(w_val = 2, new_plot = FALSE, col_true = 4, col_est = 4)
|
c04bc15b4fba103d1964ef13991755a2b0f79a80
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/MXM/examples/cor.drop1.Rd.R
|
5e60b46035658b1774a8b3998eeddb1f7f3ae23d
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 369
|
r
|
cor.drop1.Rd.R
|
library(MXM)
### Name: Drop all possible single terms from a model using the partial correlation
### Title: Drop all possible single terms from a model using the partial
### correlation
### Aliases: cor.drop1
### Keywords: Area under the curve receiver operating curve
### ** Examples
y <- rnorm(1000)
x <- matrix( rnorm(1000 * 20), ncol = 20)
cor.drop1(y, x)
|
9f78a996d8a5f4b9fb21d9ba0d4dc5845d9e9797
|
317068287ce279c644a63a6d719f04920f9f8413
|
/pokemon.r
|
0e4cdcbcb5e662762544ec5aa94e4be0b3a3c33d
|
[] |
no_license
|
williamfiset1/GitTest
|
a4ac6679dfa705e5809a8d673fad4a41a6114c8e
|
96396a173cd7f58d060fab97667c6f269953e8b8
|
refs/heads/master
| 2020-12-19T11:17:24.383981
| 2020-01-23T05:30:57
| 2020-01-23T05:30:57
| 235,718,173
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 64
|
r
|
pokemon.r
|
## this is a r file
x <- 1
y <- 6
x+y
z <- runif(8)
m <- 1e5
|
1463c2c09c2b937eb5f74bdc4bed26a110181644
|
9835e7be4084940d7484fdec8a78a25c20cb43e2
|
/scripts/SQLite_gwas.R
|
2f665f3cac856d4ba68c4b7b4c152162ebf0b965
|
[
"BSD-3-Clause"
] |
permissive
|
mengyuankan/realgar
|
ef5c91fda9896d75bd6d762ba0270a0c7c1f1c37
|
4a5e16b93e584d15f0f12d12f111c3db3a114efe
|
refs/heads/master
| 2023-07-07T00:41:18.105432
| 2023-06-21T01:38:26
| 2023-06-21T01:38:26
| 107,676,927
| 0
| 0
| null | 2017-10-20T12:44:30
| 2017-10-20T12:44:30
| null |
UTF-8
|
R
| false
| false
| 11,255
|
r
|
SQLite_gwas.R
|
#Install package
require("RSQLite")
library(DBI)
library(dplyr)
library(dbplyr)
library(feather)
library(viridis)
# Read in GWAS SNP files
# Original files are save under the lab server: /projects/AsthmaApp/REALGAR/GWAS/hg38_annot/[study].hg38.realgar.final.bed.
# The files are transferred to the R server: /mnt/volume_nyc3_01/realgar_files/hg38_annot
readbed_func <- function(bed_fn) {
dat <- read.table(bed_fn, header=T, sep="\t", comment.char = "")
names(dat)[1] <- "chromosome"
return(dat)
}
#SNP data from GRASP
snp_fn <- "/mnt/volume_nyc3_01/realgar_files/hg38_annot/GRASP.hg38.realgar.final.bed"
snp <- readbed_func(snp_fn) %>%
dplyr::rename(meta_P=pval) %>%
dplyr::select(chromosome, end, SNP, symbol, meta_P, pmid) %>%
dplyr::filter(!is.na(meta_P)&meta_P!="") %>%
unique() %>%
dplyr::arrange(chromosome, end)
#SNP data from EVE
snp_eve_fn <- "/mnt/volume_nyc3_01/realgar_files/hg38_annot/EVE.hg38.realgar.final.bed"
snp_eve <- readbed_func(snp_eve_fn)
snp_eve_all <- snp_eve %>%
dplyr::select(chromosome, end, SNP, symbol, meta.p) %>%
dplyr::rename(meta_P=meta.p) %>%
dplyr::filter(!is.na(meta_P)&meta_P!="") %>%
unique() %>%
dplyr::arrange(chromosome, end)
snp_eve_ea <- snp_eve %>%
dplyr::select(chromosome, end, SNP, symbol, EA.p) %>%
dplyr::rename(meta_P=EA.p) %>%
dplyr::filter(!is.na(meta_P)&meta_P!="") %>%
unique() %>%
dplyr::arrange(chromosome, end)
snp_eve_aa <- snp_eve %>%
dplyr::select(chromosome, end, SNP, symbol, AA.p) %>%
dplyr::rename(meta_P=AA.p) %>%
dplyr::filter(!is.na(meta_P)&meta_P!="") %>%
unique() %>%
dplyr::arrange(chromosome, end)
snp_eve_la <- snp_eve %>%
dplyr::select(chromosome, end, SNP, symbol, LA.p) %>%
dplyr::rename(meta_P=LA.p) %>%
dplyr::filter(!is.na(meta_P)&meta_P!="") %>%
unique() %>%
dplyr::arrange(chromosome, end)
#SNP data from GABRIEL
snp_gabriel_fn <- "/mnt/volume_nyc3_01/realgar_files/hg38_annot/GABRIEL.hg38.realgar.final.bed"
snp_gabriel <- readbed_func(snp_gabriel_fn) %>%
dplyr::select(chromosome, end, SNP, symbol, meta.p) %>%
dplyr::rename(meta_P=meta.p) %>%
dplyr::filter(!is.na(meta_P)&meta_P!="") %>%
unique() %>%
dplyr::arrange(chromosome, end)
#SNP data from Ferreira
snp_fer_fn <- "/mnt/volume_nyc3_01/realgar_files/hg38_annot/Ferreira.hg38.realgar.final.bed"
snp_fer <- readbed_func(snp_fer_fn) %>%
dplyr::select(chromosome, end, SNP, symbol, pval) %>%
dplyr::rename(meta_P=pval) %>%
dplyr::filter(!is.na(meta_P)&meta_P!="") %>%
unique() %>%
dplyr::arrange(chromosome, end)
#SNP data from TAGC
snp_TAGC_fn <- "/mnt/volume_nyc3_01/realgar_files/hg38_annot/TAGC.hg38.realgar.final.bed"
snp_TAGC <- readbed_func(snp_TAGC_fn)
snp_TAGC_multi <- snp_TAGC %>%
dplyr::select(chromosome, end, SNP, symbol, pval_multi) %>%
dplyr::rename(meta_P=pval_multi) %>%
dplyr::filter(!is.na(meta_P)&meta_P!="") %>%
unique() %>%
dplyr::arrange(chromosome, end)
snp_TAGC_euro <- snp_TAGC %>%
dplyr::select(chromosome, end, SNP, symbol, pval_euro) %>%
dplyr::rename(meta_P=pval_euro) %>%
dplyr::filter(!is.na(meta_P)&meta_P!="") %>%
unique() %>%
dplyr::arrange(chromosome, end)
#SNP data from UKBB
snp_UKBB_asthma_fn <- "/mnt/volume_nyc3_01/realgar_files/hg38_annot/ukbb.asthma.hg38.realgar.final.bed"
snp_UKBB_asthma <- readbed_func(snp_UKBB_asthma_fn) %>%
dplyr::select(chromosome, end, SNP, symbol, P) %>%
dplyr::rename(meta_P=P) %>%
dplyr::filter(!is.na(meta_P)&meta_P!="") %>%
unique() %>%
dplyr::arrange(chromosome, end)
snp_UKBB_copd_fn <- "/mnt/volume_nyc3_01/realgar_files/hg38_annot/ukbb.copd.hg38.realgar.final.bed"
snp_UKBB_copd <- readbed_func(snp_UKBB_copd_fn) %>%
dplyr::select(chromosome, end, SNP, symbol, P) %>%
dplyr::rename(meta_P=P) %>%
dplyr::filter(!is.na(meta_P)&meta_P!="") %>%
unique() %>%
dplyr::arrange(chromosome, end)
snp_UKBB_aco_fn <- "/mnt/volume_nyc3_01/realgar_files/hg38_annot/ukbb.aco.hg38.realgar.final.bed"
snp_UKBB_aco <- readbed_func(snp_UKBB_aco_fn) %>%
dplyr::select(chromosome, end, SNP, symbol, P) %>%
dplyr::rename(meta_P=P) %>%
dplyr::filter(!is.na(meta_P)&meta_P!="") %>%
unique() %>%
dplyr::arrange(chromosome, end)
# add color columns
breaks <- c(seq(0,8,by=0.001), Inf) # this sets max universally at 8 (else highest one OF THE SUBSET would be the max)
addcolor_func <- function(dat) {
dat <- dplyr::mutate(dat, neg_log_p = -log10(meta_P))
dat$color <- inferno(8002)[as.numeric(cut(dat$neg_log_p, breaks = breaks))]
return(dat)
}
#GRASP SNP
snp <- addcolor_func(snp)
#SNP_EVE
snp_eve_all <- addcolor_func(snp_eve_all)
snp_eve_aa <- addcolor_func(snp_eve_aa)
snp_eve_ea <- addcolor_func(snp_eve_ea)
snp_eve_la <- addcolor_func(snp_eve_la)
#SNP gabriel
snp_gabriel <- addcolor_func(snp_gabriel)
#SNP fer
snp_fer <- addcolor_func(snp_fer)
#SNP TAGC
snp_TAGC_multi <- addcolor_func(snp_TAGC_multi)
snp_TAGC_euro <- addcolor_func(snp_TAGC_euro)
#UKBB
snp_UKBB_asthma <- addcolor_func(snp_UKBB_asthma)
snp_UKBB_copd <- addcolor_func(snp_UKBB_copd)
snp_UKBB_aco <- addcolor_func(snp_UKBB_aco)
# apply p-value thresholds
datsel_func <- function(dat, pval_thr=0.05) {
dat %>% dplyr::filter(meta_P<pval_thr)
}
pval_thr1 = 10^(-5)
#GRASP SNP - nominal significance
snp_nominal <- datsel_func(snp, pval_thr = pval_thr1)
#SNP_EVE - nominal significance
snp_eve_all_nominal <- datsel_func(snp_eve_all, pval_thr = pval_thr1)
snp_eve_aa_nominal <- datsel_func(snp_eve_aa, pval_thr = pval_thr1)
snp_eve_ea_nominal <- datsel_func(snp_eve_ea, pval_thr = pval_thr1)
snp_eve_la_nominal <- datsel_func(snp_eve_la, pval_thr = pval_thr1)
#SNP gabriel - nominal significance
snp_gabriel_nominal <- datsel_func(snp_gabriel, pval_thr = pval_thr1)
#SNP fer - nominal significance
snp_fer_nominal <- datsel_func(snp_fer, pval_thr = pval_thr1)
#SNP TAGC - nominal significance
snp_TAGC_multi_nominal <- datsel_func(snp_TAGC_multi, pval_thr = pval_thr1)
snp_TAGC_euro_nominal <- datsel_func(snp_TAGC_euro, pval_thr = pval_thr1)
#UKBB - nominal significance
snp_UKBB_asthma_nominal <- datsel_func(snp_UKBB_asthma, pval_thr = pval_thr1)
snp_UKBB_copd_nominal <- datsel_func(snp_UKBB_copd, pval_thr = pval_thr1)
snp_UKBB_aco_nominal <- datsel_func(snp_UKBB_aco, pval_thr = pval_thr1)
pval_thr2 = 5*10^(-8)
#GRASP SNP - gwas significance
snp_gwas <- datsel_func(snp, pval_thr = pval_thr2)
#SNP_EVE - gwas significance
snp_eve_all_gwas <- datsel_func(snp_eve_all, pval_thr = pval_thr2)
snp_eve_aa_gwas <- datsel_func(snp_eve_aa, pval_thr = pval_thr2)
snp_eve_ea_gwas <- datsel_func(snp_eve_ea, pval_thr = pval_thr2)
snp_eve_la_gwas <- datsel_func(snp_eve_la, pval_thr = pval_thr2)
#SNP gabriel - gwas significance
snp_gabriel_gwas <- datsel_func(snp_gabriel, pval_thr = pval_thr2)
#SNP fer - gwas significance
snp_fer_gwas <- datsel_func(snp_fer, pval_thr = pval_thr2)
#SNP TAGC - gwas significance
snp_TAGC_multi_gwas <- datsel_func(snp_TAGC_multi, pval_thr = pval_thr2)
snp_TAGC_euro_gwas <- datsel_func(snp_TAGC_euro, pval_thr = pval_thr2)
#UKBB - gwas significance
snp_UKBB_asthma_gwas <- datsel_func(snp_UKBB_asthma, pval_thr = pval_thr2)
snp_UKBB_copd_gwas <- datsel_func(snp_UKBB_copd, pval_thr = pval_thr2)
snp_UKBB_aco_gwas <- datsel_func(snp_UKBB_aco, pval_thr = pval_thr2)
#Put it in database
db = dbConnect(SQLite(), dbname="/mnt/volume_nyc3_01/realgar_files/hg38_annot/sqilte_results/realgar-gwas-hg38-normal.sqlite") #sudo mv to /mnt/volume_nyc3_01/realgar_data/
#Check table
dbListTables(db)
#Add in database
dbWriteTable(conn=db, name="snp", snp, row.names=F, overwrite=T)
dbWriteTable(conn=db, name="snp_eve_all", snp_eve_all, row.names=F, overwrite=T)
dbWriteTable(conn=db, name="snp_eve_aa", snp_eve_aa, row.names=F, overwrite=T)
dbWriteTable(conn=db, name="snp_eve_ea", snp_eve_ea, row.names=F, overwrite=T)
dbWriteTable(conn=db, name="snp_eve_la", snp_eve_la, row.names=F, overwrite=T)
dbWriteTable(conn=db, name="snp_gabriel", snp_gabriel, row.names=F, overwrite=T)
dbWriteTable(conn=db, name="snp_fer", snp_fer, row.names=F, overwrite=T)
dbWriteTable(conn=db, name="snp_TAGC_multi", snp_TAGC_multi, row.names=F, overwrite=T)
dbWriteTable(conn=db, name="snp_TAGC_euro", snp_TAGC_euro, row.names=F, overwrite=T)
dbWriteTable(conn=db, name="snp_UKBB_asthma", snp_UKBB_asthma, row.names=F, overwrite=T)
dbWriteTable(conn=db, name="snp_UKBB_copd", snp_UKBB_copd, row.names=F, overwrite=T)
dbWriteTable(conn=db, name="snp_UKBB_aco", snp_UKBB_aco, row.names=F, overwrite=T)
#Check table
dbListTables(db)
#Disconnect
dbDisconnect(db)
#Put it in database - nominal significance
db = dbConnect(SQLite(), dbname="/mnt/volume_nyc3_01/realgar_files/hg38_annot/sqilte_results/realgar-gwas-hg38-nominal.sqlite") #sudo mv to /mnt/volume_nyc3_01/realgar_data/
#Check table
dbListTables(db)
#Add in database
dbWriteTable(conn=db, name="snp", snp_nominal, row.names=F, overwrite=T)
dbWriteTable(conn=db, name="snp_eve_all", snp_eve_all_nominal, row.names=F, overwrite=T)
dbWriteTable(conn=db, name="snp_eve_aa", snp_eve_aa_nominal, row.names=F, overwrite=T)
dbWriteTable(conn=db, name="snp_eve_ea", snp_eve_ea_nominal, row.names=F, overwrite=T)
dbWriteTable(conn=db, name="snp_eve_la", snp_eve_la_nominal, row.names=F, overwrite=T)
dbWriteTable(conn=db, name="snp_gabriel", snp_gabriel_nominal, row.names=F, overwrite=T)
dbWriteTable(conn=db, name="snp_fer", snp_fer_nominal, row.names=F, overwrite=T)
dbWriteTable(conn=db, name="snp_TAGC_multi", snp_TAGC_multi_nominal, row.names=F, overwrite=T)
dbWriteTable(conn=db, name="snp_TAGC_euro", snp_TAGC_euro_nominal, row.names=F, overwrite=T)
dbWriteTable(conn=db, name="snp_UKBB_asthma", snp_UKBB_asthma_nominal, row.names=F, overwrite=T)
dbWriteTable(conn=db, name="snp_UKBB_copd", snp_UKBB_copd_nominal, row.names=F, overwrite=T)
dbWriteTable(conn=db, name="snp_UKBB_aco", snp_UKBB_aco_nominal, row.names=F, overwrite=T)
#Check table
dbListTables(db)
#Disconnect
dbDisconnect(db)
#Put it in database - nominal significance
db = dbConnect(SQLite(), dbname="/mnt/volume_nyc3_01/realgar_files/hg38_annot/sqilte_results/realgar-gwas-hg38-genomewide.sqlite") #sudo mv to /mnt/volume_nyc3_01/realgar_data/
#Check table
dbListTables(db)
#Add in database
dbWriteTable(conn=db, name="snp", snp_gwas, row.names=F, overwrite=T)
dbWriteTable(conn=db, name="snp_eve_all", snp_eve_all_gwas, row.names=F, overwrite=T)
dbWriteTable(conn=db, name="snp_eve_aa", snp_eve_aa_gwas, row.names=F, overwrite=T)
dbWriteTable(conn=db, name="snp_eve_ea", snp_eve_ea_gwas, row.names=F, overwrite=T)
dbWriteTable(conn=db, name="snp_eve_la", snp_eve_la_gwas, row.names=F, overwrite=T)
dbWriteTable(conn=db, name="snp_gabriel", snp_gabriel_gwas, row.names=F, overwrite=T)
dbWriteTable(conn=db, name="snp_fer", snp_fer_gwas, row.names=F, overwrite=T)
dbWriteTable(conn=db, name="snp_TAGC_multi", snp_TAGC_multi_gwas, row.names=F, overwrite=T)
dbWriteTable(conn=db, name="snp_TAGC_euro", snp_TAGC_euro_gwas, row.names=F, overwrite=T)
dbWriteTable(conn=db, name="snp_UKBB_asthma", snp_UKBB_asthma_gwas, row.names=F, overwrite=T)
dbWriteTable(conn=db, name="snp_UKBB_copd", snp_UKBB_copd_gwas, row.names=F, overwrite=T)
dbWriteTable(conn=db, name="snp_UKBB_aco", snp_UKBB_aco_gwas, row.names=F, overwrite=T)
#Check table
dbListTables(db)
#Disconnect
dbDisconnect(db)
|
a1fe805d58676726fedd9e1f0f7d04192ab207f9
|
b987f7e769e717cc39f583378f52188d3a9a0c9f
|
/R/setup-package.R
|
2877f88959d48ceecb9b036e704fe18af716b51e
|
[] |
no_license
|
rfortherestofus/great-graphs
|
9b51a110681a8e584d6e05936058b6e140e1bdb7
|
989fc3ad8e79b57fe8dc859277245828fece9e9a
|
refs/heads/master
| 2020-04-25T19:18:04.864272
| 2019-03-26T16:52:18
| 2019-03-26T16:52:18
| 173,014,788
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 705
|
r
|
setup-package.R
|
library(tidyverse)
library(here)
library(janitor)
# This file brings in assets (data, setup.Rmd and CSS) from the assets repo (https://github.com/rfortherestofus/assets).
# Get Data ----------------------------------------------------------------
download.file("https://github.com/rfortherestofus/assets/raw/master/data/nhanes.csv",
destfile = here("nhanes.csv"))
nhanes <- read_csv(here("nhanes.csv")) %>%
clean_names() %>%
write_csv(here("nhanes.csv"))
# Get CSS -----------------------------------------------------------------
download.file("https://raw.githubusercontent.com/rfortherestofus/course-assets/master/style/style.css",
destfile = here("style.css"))
|
a12cf6bf5dad991f33c555d15182f38ac6b8a584
|
2169d874f96e8b5436874c26319e3d8fd43c1bf5
|
/R/RcppExports.R
|
22db8c74eeae8552e7eca11e0ddf987a647c1a5c
|
[] |
no_license
|
cralo31/tnsrcomp
|
3e821a28e78828a548bd06a21fe21be50dc26d64
|
f040efb44e8bda8c8746c96d335725b0524bb4f0
|
refs/heads/main
| 2023-02-13T00:00:46.946887
| 2021-01-14T03:08:03
| 2021-01-14T03:08:03
| 329,444,615
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,665
|
r
|
RcppExports.R
|
# Generated by using Rcpp::compileAttributes() -> do not edit by hand
# Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393
kro_prod <- function(A, B) {
.Call(`_tnsrcomp_kro_prod`, A, B)
}
krao_prod <- function(A, B) {
.Call(`_tnsrcomp_krao_prod`, A, B)
}
unfold_ten <- function(tens) {
.Call(`_tnsrcomp_unfold_ten`, tens)
}
fold_ten <- function(A, dim, mode) {
.Call(`_tnsrcomp_fold_ten`, A, dim, mode)
}
f_norm <- function(tens, approx) {
.Call(`_tnsrcomp_f_norm`, tens, approx)
}
tens_mat <- function(tens, X, mode) {
.Call(`_tnsrcomp_tens_mat`, tens, X, mode)
}
core_ten <- function(core_mat) {
.Call(`_tnsrcomp_core_ten`, core_mat)
}
find_core <- function(tens, mats) {
.Call(`_tnsrcomp_find_core`, tens, mats)
}
vec_tensor <- function(A, B, C) {
.Call(`_tnsrcomp_vec_tensor`, A, B, C)
}
als_up <- function(X_list, tens_list, mode, nng) {
.Call(`_tnsrcomp_als_up`, X_list, tens_list, mode, nng)
}
wy_bls <- function(X_modes, tens_list, mode, tau, beta) {
.Call(`_tnsrcomp_wy_bls`, X_modes, tens_list, mode, tau, beta)
}
bontf <- function(X, X_n, tnsr_list, rank, iter, tol, nng) {
.Call(`_tnsrcomp_bontf`, X, X_n, tnsr_list, rank, iter, tol, nng)
}
uo_decomp <- function(X, X_n, tnsr_list, rank, iter, tol, nng) {
.Call(`_tnsrcomp_uo_decomp`, X, X_n, tnsr_list, rank, iter, tol, nng)
}
ntd <- function(X, X_n, tnsr_list, rank, iter, tol) {
.Call(`_tnsrcomp_ntd`, X, X_n, tnsr_list, rank, iter, tol)
}
cp_als <- function(tens, modes, rank, iter, thres) {
.Call(`_tnsrcomp_cp_als`, tens, modes, rank, iter, thres)
}
gramat <- function(layer) {
.Call(`_tnsrcomp_gramat`, layer)
}
|
fa5e7ded9c307d065a3249e7b360b850520c08f3
|
a3a60954a025b39251bd32f42319812357eb83e4
|
/Tetraselmis_experiment/R/22_variable_predictions_comparison.R
|
835f372a24927316c5e873c5d83da2bbe51cc9ea
|
[
"MIT"
] |
permissive
|
JoeyBernhardt/thermal-variability
|
f63cc337e40fb79b955ab0130de6f44bfa54ca83
|
f773f404fe715e29597d8fd379f7dd08714ae60f
|
refs/heads/master
| 2021-01-02T08:37:37.272112
| 2019-02-25T02:55:13
| 2019-02-25T02:55:13
| 99,034,540
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,478
|
r
|
22_variable_predictions_comparison.R
|
## goal: estimate the growth rate over time...with the daily temperature data
### so get time averaged growth rate
library(tidyverse)
library(janitor)
library(lubridate)
library(cowplot)
curve_data <- read_csv("Tetraselmis_experiment/data-processed/curve_data_20140606.csv") %>%
clean_names()
growth_raw <- read_csv("Tetraselmis_experiment/data-processed/growth_data_20140606.csv")
all_thermal_data <- read_csv("Tetraselmis_experiment/data-processed/all_thermal_data.csv") %>%
clean_names
temperature <- read_csv("Tetraselmis_experiment/data-processed/temps_all_33.875N.csv")
all_thermal_data %>%
filter(source == "Dokai Bay, Japan" , curvequal == "good") %>% View
curve <- all_thermal_data %>%
# filter(grepl("Detonula", speciesname)) %>%
filter(isolate_code == 462)
tpc1<-function(x){
res<-curve$a[1]*exp(curve$b[1]*x)*(1-((x-curve$z[1])/(curve$w[1]/2))^2)
res
}
p <- ggplot(data = data.frame(x = 0), mapping = aes(x = x))
p + stat_function(fun = tpc1, color = "black", size = 2) +xlim(35, 35.5) +
# ylim(0, 1.5) +
theme_bw() + ylab("Growth rate") +
theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
panel.background = element_blank(),
axis.line.y = element_line(color="black"),
axis.line.x = element_line(color="black")) +
theme(text = element_text(size=14, family = "Helvetica")) +
xlab(expression("Temperature (" *degree * "C)")) + geom_hline(yintercept = 0)
# now take time averaged growth rate --------------------------------------
tpc <- function(df){
x <- df$sst
res<-curve$a[1]*exp(curve$b[1]*x)*(1-((x-curve$z[1])/(curve$w[1]/2))^2)
data.frame(temperature = x, growth_rate = res)
}
temp_split <- temperature %>%
split(.$Date)
growth_time <- temp_split %>%
map_df(tpc, .id = "date")
growth_time2 <- growth_time %>%
mutate(date = ymd(date))
growth_time2 %>%
ggplot(aes(x = date, y = growth_rate)) + geom_line() +
theme_classic()
str(temperature)
temperature2 <- temperature %>%
rename(date = Date)
temps_growth <- left_join(temperature2, growth_time2, by = "date") %>%
gather(key = type, value = value, temperature:growth_rate)
temps_growth_wide <- left_join(temperature2, growth_time2, by = "date")
mean(temperature$sst)
temp_plot <- temps_growth_wide %>%
ggplot(aes(x = date, y = temperature)) + geom_line() +
theme_classic() + geom_hline(yintercept = 24.3521) +
geom_hline(yintercept = 9.7, color = "blue") + geom_hline(yintercept = 35.7, color = "red") +
geom_hline(yintercept = 18.727, color = "grey", linetype = "dashed")
growth_plot <- temps_growth_wide %>%
ggplot(aes(x = date, y = growth_rate)) + geom_line() +
theme_classic()
plot_grid(temp_plot, growth_plot, nrow=2)
temps_growth %>%
ggplot(aes(x = date, y = value, color = type)) + geom_line() +
theme_classic() + facet_wrap( ~ type, scales = "free")
time_integrated_prediction <- mean(growth_time$growth_rate)
## now the approximation
derivative <- function(f, x, ..., order = i, delta = 0.1, sig = 6) {
# Numerically computes the specified order derivative of f at x
vals <- matrix(NA, nrow = order + 1, ncol = order + 1)
grid <- seq(x - delta/2, x + delta/2, length.out = order + 1)
vals[1, ] <- sapply(grid, f, ...) - f(x, ...)
for (i in 2:(order + 1)) {
for (j in 1:(order - i + 2)) {
stepsize <- grid[i + j - 1] - grid[i + j - 2]
vals[i, j] <- (vals[i - 1, j + 1] - vals[i - 1, j])/stepsize
}
}
return(signif(vals[order + 1, 1], sig))
}
tpc1 <- function(x){
x <- curve$mean
res<-curve$a[1]*exp(curve$b[1]*x)*(1-((x-curve$z[1])/(curve$w[1]/2))^2)
res
}
x <- seq(0, 45, by = 0.1)
x <- 15
## step 2
variable_predictions <- function(x) {
y <- tpc1(x) + derivative(f = tpc1, x = x, order = 2)*0.5*(curve$sd^2)
}
predicted_growth_variable <- sapply(x, variable_predictions)
variable_predictions <- function(data) {
data <- curve
x <- data$mean
SD <- data$sd
y <- tpc1(x) + derivative(f = tpc1, x = x, order = 2)*0.5*(SD^2)
y
}
variable_predictions(curve)
tpc1(curve$mean)
all <- curve
x <- curve$mean
tpc<-function(x){
res<-all$a[1]*exp(all$b[1]*x)*(1-((x-all$z[1])/(all$w[1]/2))^2)
res
}
## step 2
variable_predictions <- function(x) {
x <- data$mean
y <- tpc(x) + derivative(f = tpc, x = x, order = 2)*0.5*(all$sd^2)
y
}
predict_function <- function(data) {
all <- data
x <- seq(0, 45, by = 0.1)
tpc<-function(x){
res<-all$a[1]*exp(all$b[1]*x)*(1-((x-all$z[1])/(all$w[1]/2))^2)
res
}
## step 2
variable_predictions <- function(x) {
y <- tpc(x) + derivative(f = tpc, x = x, order = 2)*0.5*(all$SD^2)
}
predicted_growth_variable <- sapply(x, variable_predictions)
predicted_growth_variable2 <- data.frame(x, predicted_growth_variable) %>%
rename(temperature = x,
growth.rate = predicted_growth_variable) %>%
top_n(n = 1, wt = growth.rate)
# data.frame(all$isolate.code, predicted_growth_variable2)
}
approx_prediction <- variable_predictions(curve)
no_var_prediction <- tpc(curve$mean)
time_integrated_prediction <- mean(growth_time$growth_rate)
compare <- data.frame(approx = approx_prediction, no_var = no_var_prediction, time_int = time_integrated_prediction)
compare %>%
rename(time_integration = time_int,
approximation = approx,
no_variability = no_var) %>%
gather(key = "prediction type", value = "estimated growth rate") %>%
ggplot(aes(x = reorder(`prediction type`, `estimated growth rate`), y = `estimated growth rate`)) + geom_point() +
xlab("prediction type")
|
536c07cf6332683c73d1675abc2358d593c919c1
|
0f172b6f94115e34fab3994a4c95a047294e36fa
|
/R/drop.df.R
|
69c1a027c4f205806c7ddffc3d93a35e2ff620db
|
[] |
no_license
|
jcval94/DataMiningTools
|
866932e4df4f1e2e645a14bc966921395737d6b4
|
fb4e7995b2f5acee742492e52fd2ad982d7b4d59
|
refs/heads/master
| 2020-07-07T03:42:10.047077
| 2020-01-07T05:38:26
| 2020-01-07T05:38:26
| 203,234,373
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 523
|
r
|
drop.df.R
|
#' Drop repeated rows or columns and columns with just one value
#'
#' @param df data frame to be clean
#'
#' @return data.frame cleane
#' @export
#'
#' @importFrom purrr map_lgl
#'
#' @examples
#'
#' data(iris)
#' iris[["A"]] <- 0
#' head(iris)
#' head(drop.df(iris))
#'
drop.df <- function(df) {
df1<-df[, purrr::map_lgl(df, ~length(unique(.x)) > 1)]
df1<-df1[!duplicated(df1),]
df1<-dft(df1)
df1<-df1[, purrr::map_lgl(df1, ~length(unique(.x)) > 1)]
df1<-df1[!duplicated(df1),]
return(dft(df1))
}
|
9f4e541f769284c5c795b2698c5c3fb7176da091
|
271f93c39f957a6357dc64514b9a047e3489e5a9
|
/figures/deem_graph.R
|
61b0daee01ccd9a410480009a82b917d7a691cb6
|
[] |
no_license
|
schlogl2017/influenza_HA_evolution
|
87954c5e0101541341e7cf0ab4012b112d6c752a
|
725e6aa17c0bb170eeca1132e7de3632067d392c
|
refs/heads/master
| 2022-03-09T06:44:10.142013
| 2015-05-05T21:28:27
| 2015-05-05T21:28:27
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,424
|
r
|
deem_graph.R
|
rm(list = ls())
library(ggplot2)
library(grid)
library(cowplot)
library(igraph)
setwd('~/Google Drive/Data/influenza_HA_evolution/')
deem<-c(122, 124, 126, 131, 133, 137, 140, 142, 144, 145, 128, 155, 156, 157, 158, 159, 189, 192, 193, 197, 45, 50, 273, 275, 278, 312, 121, 172, 173, 201, 207, 219, 226, 227, 229, 246, 57, 62, 75, 78, 83, 92, 260, 262, 3, 5, 25, 33, 49, 106, 202, 222, 225, 271)
b.edges <- read.table('epitope_data/b_edges.dat', head=T, sep=',')
b.clusters <- rep('None', 550)
b.clusters[deem] <- 'Deem'
b.vertices <- data.frame(site=1:550, ep=b.clusters)
b.vertices <- b.vertices[unique(c(b.edges$n1, b.edges$n2)), ]
b.vertices <- b.vertices[order(b.vertices$site), ]
b.network <- graph.data.frame(b.edges, vertices=b.vertices, directed=F)
require(latticeExtra)
mycols <- dput(ggplot2like(n = 5, h.start = 0, l = 65)$superpose.line$col)
cbbPalette <- c('Deem' = mycols[1], 'None' = "#000000")
b <- simplify(b.network)
V(b)$color[V(b)$ep == 'Deem'] <- cbbPalette[1]
V(b)$color[V(b)$ep == 'None'] <- cbbPalette[2]
E(b)$weight <- seq(ecount(b))
E(b)$curved <- 0.1
pdf('analysis/deem_network.pdf', height=7, width=7, useDingbats = F)
par(mar=c(0,0,0,0))
plot(b,
#layout=layout.circle,
edge.width=0.2,
vertex.size = 3,
vertex.frame.color= "white",
vertex.label.color = "white",
vertex.label.cex = 0.2,
vertex.label.family = "sans",
edge.color="black")
dev.off()
|
fa60a7b3d4c8ce7f983077a4616cf2f3adfd5d05
|
c427137e6e786c287e9f5fc92909d5ac6d2593b8
|
/man/NCEP.bind.Rd
|
ff0e0a2dd9d23856f890fa6cf3051c12617e2d84
|
[] |
no_license
|
cran/RNCEP
|
28cb74be5aaab66eeca6ca855d7e4ae48728bce1
|
cb2027905b385e4678c648bfd5e8f60d7d0f3f0b
|
refs/heads/master
| 2021-01-10T21:01:00.585187
| 2020-05-27T04:30:22
| 2020-05-27T04:30:22
| 17,682,750
| 2
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,900
|
rd
|
NCEP.bind.Rd
|
\name{NCEP.bind}
\alias{NCEP.bind}
\title{ Bind Two 3-D Arrays of Weather Data Along the Prime Meridian }
\description{
This function is applied automatically by \code{\link{NCEP.gather}} whenever it is needed. It binds the results from either side of the Prime Meridian.
}
\usage{
NCEP.bind(data.west, data.east)
}
\arguments{
\item{data.west}{ a 3-D array of weather data, as returned by \code{ NCEP.gather }, from the West side of the Prime Meridian }
\item{data.east}{ a 3-D array of weather data, as returned by \code{ NCEP.gather }, from the East side of the Prime Meridian }
}
\details{
This function is applied automatically by \code{\link{NCEP.gather}} whenever it is needed.
The arrays specified in \code{data.west} and \code{data.east} must have the same latitude and datetime intervals and extents.
This function depends on the package \pkg{\link{abind}}
The maximum longitudinal extent of the NCEP dataset is 357.5 not 360.
}
\value{
A 3-D array with the same latitude and datetime intervals and extent as \code{data.west} and \code{data.east}. Row names (i.e. longitudes) for data from the west of the Prime Meridian are converted from positive to negative values.
}
\references{ Kemp, M. U., van Loon, E. E., Shamoun-Baranes, J., and Bouten, W. 2011. RNCEP:global weather and climate data at your fingertips. -- Methods in Ecology and Evolution. DOI:10.1111/j.2041-210X.2011.00138.x. }
\author{ Michael U. Kemp \email{mukemp+RNCEP@gmail.com} }
\examples{
\dontrun{
library(RNCEP)
## Using NCEP.gather(), query weather data from both sides of
## the Prime Meridian ##
## NCEP.bind() is applied automatically ##
wx <- NCEP.gather(variable='air', level=925,
months.minmax=10, years.minmax=2003,
lat.southnorth=c(50, 52.5), lon.westeast=c(-2.5, 2.5),
reanalysis2=FALSE, return.units=TRUE)
}
}
|
324c619e1c772dccd93914ee076b92888d41ff1e
|
4201e9b754760dc35fc0aeef9df5a8b9d801c47f
|
/bin/R-3.5.1/src/library/utils/man/setRepositories.Rd
|
f6e8bfe71314a3a5b84359be1fe8ce3f162842c1
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"GPL-2.0-only"
] |
permissive
|
lifebit-ai/exomedepth
|
cbe59cb7fcf2f9183d187f8d466c6620fb1a0c2e
|
5a775ae5e2a247aeadc5208a34e8717c7855d080
|
refs/heads/master
| 2020-03-27T12:55:56.400581
| 2018-10-11T10:00:07
| 2018-10-11T10:00:07
| 146,578,924
| 0
| 0
|
MIT
| 2018-08-29T09:43:52
| 2018-08-29T09:43:51
| null |
UTF-8
|
R
| false
| false
| 3,095
|
rd
|
setRepositories.Rd
|
% File src/library/utils/man/setRepositories.Rd
% Part of the R package, https://www.R-project.org
% Copyright 1995-2015 R Core Team
% Distributed under GPL 2 or later
\name{setRepositories}
\alias{setRepositories}
\title{Select Package Repositories}
\description{
Interact with the user to choose the package repositories to be used.
}
\usage{
setRepositories(graphics = getOption("menu.graphics"),
ind = NULL, addURLs = character())
}
\arguments{
\item{graphics}{Logical. If true, use a graphical list: on Windows or
macOS GUI use a list box, and on a Unix-alike if \pkg{tcltk} and an X
server are available, use Tk widget. Otherwise use a text
\code{\link{menu}}.}
\item{ind}{\code{NULL} or a vector of integer indices, which have the
same effect as if they were entered at the prompt for
\code{graphics = FALSE}.}
\item{addURLs}{A character vector of additional URLs: it is often
helpful to use a named vector.}
}
\details{
The default list of known repositories is stored in the file
\file{\var{\link{R_HOME}}/etc/repositories}.
That file can be edited for a site, or a user can have a personal copy
in the file pointed to by the environment variable
\env{R_REPOSITORIES}, or if this is unset or does not exist,
in \file{\var{HOME}/.R/repositories}, which will take precedence.
A Bioconductor mirror can be selected by setting
\code{\link{options}("BioC_mirror")}, e.g.\sspace{}via
\code{\link{chooseBioCmirror}} --- the default value is
\samp{"https://bioconductor.org"}.
The items that are preselected are those that are currently in
\code{options("repos")} plus those marked as default in the
list of known repositories.
The list of repositories offered depends on the setting of option
\code{"pkgType"} as some repositories only offer a subset of types
(e.g., only source packages or not macOS binary packages).
Further, for binary packages some repositories (notably R-Forge) only
offer packages for the current or recent versions of \R.
(Type \code{"both"} is equivalent to \code{"source"}.)
Repository \samp{CRAN} is treated specially: the value is taken from
the current setting of \code{getOption("repos")} if this has an
element \code{"CRAN"}: this ensures mirror selection is sticky.
This function requires the \R session to be interactive unless
\code{ind} is supplied.
}
\note{
This does \strong{not} set the list of repositories at startup: to do
so set \code{\link{options}(repos =)} in a start up file (see help topic
\link{Startup}).
}
\value{
This function is invoked mainly for its side effect of updating
\code{options("repos")}. It returns (invisibly) the previous
\code{repos} options setting (as a \code{\link{list}} with component
\code{repos}) or \code{\link{NULL}} if no changes were applied.
}
\seealso{
\code{\link{chooseCRANmirror}}, \code{\link{chooseBioCmirror}},
\code{\link{install.packages}}.
}
\examples{\dontrun{
setRepositories(addURLs =
c(CRANxtras = "http://www.stats.ox.ac.uk/pub/RWin"))
}}
\keyword{ utilities }
|
c2510df5b0d93f32e52cc7e0c0b2c3991ca89ccd
|
6f1273c7954481281ef6a92de02dc39c5754eec5
|
/Manifesto/Ashley/2015/decagon_cleaning_2015.R
|
f42d795c7cc38d89877305e198cee44215b70203
|
[] |
no_license
|
HallettLab/usda-climvar
|
e825894ed5ab10e2a349375ba2ebe4a249feebac
|
af566f66be74ce61a33f6249ca7fc422640a6bb9
|
refs/heads/master
| 2023-06-25T17:41:18.256845
| 2023-06-13T20:41:14
| 2023-06-13T20:41:14
| 137,396,286
| 3
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,275
|
r
|
decagon_cleaning_2015.R
|
require(gdata)
library(tidyr)
library(dplyr)
library(lubridate)
library(ggplot2)
## Set working directory
setwd("~/Dropbox/ClimVar/DATA/Decagon data")
##A FUNCTION TO IMPORT AND CLEAN DECAGON FILES##
cleanDecagon<-function(X){
mydat<-read.xls(X, sheet=1, header=T, na.strings="#N/A!")
mydf<-tbl_df(mydat[-c(1,2),])
mydf$plot<-as.character(names(mydf[1]))
names(mydf)=c("time", "B", "C", "F", "G", "XC", "plot")
mydf<-mydf%>%
mutate(plot=extract_numeric(plot))%>%
mutate(date=parse_date_time(time, "m%d%Y I%M p")) %>%
mutate(year=year(date), month=month(date), day=day(date), hour=hour(date),julian=yday(date))%>%
mutate(time=julian+(hour/24)) %>%
mutate(date=as.character(date))
}
##LIST ALL DECAGON FILES##
allfilenames<-list.files()
excelfilenames<-as.matrix(subset(allfilenames, grepl(".xls", allfilenames)==T))
#double check if there is an excel version of the master; if so remove from list
#no master (in separate folder)
#remove Mar 25 2017 files from the list until after we talk to Caitlin about the dates & data
excelfilenames<- excelfilenames %>% subset(excelfilenames[,1]!="PLOT1 25Mar17-0135.xls")
excelfilenames<- excelfilenames %>%subset(excelfilenames[,1]!="PLOT10 25Mar17-0124.xls")
excelfilenames<- excelfilenames %>% subset(excelfilenames[,1]!="PLOT11 25Mar17-0122.xls")
excelfilenames<- excelfilenames %>%subset(excelfilenames[,1]!="PLOT12 25Mar17-0120.xls")
excelfilenames<- excelfilenames %>%subset(excelfilenames[,1]!="PLOT13 25Mar17-0120.xls")
excelfilenames<- excelfilenames %>%subset(excelfilenames[,1]!="PLOT14 25Mar17-0118.xls")
excelfilenames<- excelfilenames %>%subset(excelfilenames[,1]!="PLOT15 25Mar17-0117.xls")
excelfilenames<- excelfilenames %>%subset(excelfilenames[,1]!="PLOT16 25Mar17-0115.xls")
excelfilenames<- excelfilenames %>%subset(excelfilenames[,1]!="PLOT9 25Mar17-0127.xls")
excelfilenames<- excelfilenames %>%subset(excelfilenames[,1]!="PLOT8 25Mar17-0128.xls")
excelfilenames<- excelfilenames %>%subset(excelfilenames[,1]!="PLOT7 25Mar17-0129.xls")
excelfilenames<- excelfilenames %>%subset(excelfilenames[,1]!="PLOT6 25Mar17-0130.xls")
excelfilenames<- excelfilenames %>%subset(excelfilenames[,1]!="PLOT5 25Mar17-0131.xls")
excelfilenames<- excelfilenames %>%subset(excelfilenames[,1]!="PLOT4 25Mar17-0132.xls")
excelfilenames<- excelfilenames %>%subset(excelfilenames[,1]!="PLOT3 25Mar17-0133.xls")
excelfilenames<- excelfilenames %>%subset(excelfilenames[,1]!="PLOT2 25Mar17-0134.xls")
##CLEAN ALL DECAGON FILES##
myupdates_list<-apply(excelfilenames, 1, cleanDecagon)
#create a single data frame of new files
myupdates_df <- tbl_df(do.call("rbind", myupdates_list))
##FORMAT FOR R##
#tidy myupdates_df
dat<-myupdates_df
dat<-dat%>%
gather(subplot, sm, B:XC)
key<-read.csv("Shelter_key.csv")
dat2<-tbl_df(merge(key, dat))
dat2$plot<-as.factor(as.character(dat2$plot))
dat2$sm<-as.numeric(dat2$sm)
#check for duplicate records
duplicated(dat2)
duplicates<-dat2[duplicated(dat2),]
duplicates$year<-as.factor(as.character(duplicates$year))
levels(duplicates$year)
#there seems to be a problem with plot 10 from Mar 24 2017 data. For some reason, data coded as year 2000 and 2044. I will remove these & remove duplicates
dat2<- unique(dat2)
#remove year 2000 and 2044 from record
dat2<-dat2 %>% filter(year!="2000")%>%filter(year!="2044")
#check again for duplicates from same plot/time but with dif sm values
duplicates3<-dat2[duplicated(dat2[,c(1:12)]),]
duplicates3$year<-as.factor(as.character(duplicates3$year))
#no dups, let's move on
sm2015<-dat2 %>% filter(year=="2014"|year=="2015")
write.csv(sm2015, file ="~/Dropbox/ClimVar/DATA/Decagon data/ClimVar_sm_2015.csv")
pdf("smXsppXtrt.pdf")
biggraph<-ggplot(data=dat2, aes(x=time,
y=sm, group=subplot, color=subplot)) + geom_line() + facet_grid(shelterBlock~treatment)
biggraph
print(biggraph)
dev.off()
pdf("smXtrt_controlspp.pdf")
controldat<-subset(dat2, subplot=="XC")
controlgraph<-ggplot(data=controldat, aes(x=time,
y=sm, color=treatment, group=treatment)) + geom_line() + facet_grid(~shelterBlock) +
scale_y_continuous(breaks=c(seq(-.4,.8,.1)))
controlAC<-subset(controldat, shelterBlock=="A" | shelterBlock=="C")
controlAC$shelterBlock<-as.character(controlAC$shelterBlock)
controlgraph2<-ggplot(data=controlAC, aes(x=time,
y=sm, color=treatment, group=treatment)) + geom_line() + facet_grid(~shelterBlock) +
scale_y_continuous(breaks=c(seq(-.4,.8,.1)))
print(controlgraph)
print(controlgraph2)
dev.off()
# pull out focal plots, an
smdat2 <- dat2 %>%
# filter(subplot == "B" | subplot == "G" | subplot == "F" ) %>%
# filter(subplot == "XC" | subplot == "C") %>%
# filter(subplot == "XC" | subplot == "B") %>%
#filter( subplot == "G" | subplot == "F" ) %>%
filter(subplot !="C") %>%
tbl_df() %>%
mutate(doy3= julian/365,
doy4 = year + doy3)
smdat2 %>%
group_by(treatment, shelterBlock) %>%
summarize(meansm = mean(sm, na.rm=T))
getOption("device")
ggplot(subset(smdat2), aes(x=doy4, y=sm, color = subplot, group = interaction(subplot, plot))) +
geom_line(size = .5) + theme_bw() + facet_wrap(~treatment) # facet_grid(shelterBlock~treatment)
ggplot(subset(smdat2), aes(x=doy4, y=sm, color = subplot)) + geom_line() + facet_wrap(~treatment)
ggplot(subset(smdat2, subplot != "B"), aes(x=doy4, y=sm, color = treatment)) + geom_line() + facet_wrap(~subplot)
# aggregate across treatments
smdat3 <- smdat2 %>%
group_by(subplot, treatment, year, doy4, doy3) %>%
summarize(sm = mean(sm, na.rm=T))
dat2%>%
group_by(treatment, shelterBlock)%>%
summarise(meansm=mean(sm, na.rm=T))
ggplot(subset(smdat3), aes(x=doy4, y=sm, color = treatment, group = (treatment))) +
geom_line(size = .5) + theme_bw() + facet_wrap(~subplot) # facet_grid(shelterBlock~treatment)
ggplot(subset(smdat2), aes(x=doy4, y=sm, color = treatment, group = (treatment))) +
geom_line(size = .5) + theme_bw() + facet_wrap(~plot) # facet_grid(shelterBlock~treatment)
ggplot(subset(smdat2), aes(x=doy4, y=sm, color = subplot, group = (subplot))) +
geom_line(size = .5) + theme_bw() + facet_wrap(subplot~plot) # facet_grid(shelterBlock~treatment)
#really weird data in plot 10, subplot B - did a sensor malfunction? remove plot 10, subplot B, year 2016 from the dataset
smdat4<-smdat2 %>%
mutate_all(.funs = function(x) replace(x, which(x < 0 ), NA))
smdat5 <- smdat4 %>%
group_by(subplot, treatment, year, doy4, doy3) %>%
summarize(sm = mean(sm, na.rm=T))
#create a plot showing sm data by treatment
ggplot(subset(smdat5), aes(x=doy4, y=sm, color = treatment, group = (treatment))) +
geom_line(size = .5) + theme_bw() # facet_wrap(~subplot) # facet_grid(shelterBlock~treatment)
smdat4 %>%
group_by(treatment) %>%
summarize(meansm = mean(sm, na.rm=T))
#create a new variable for growing season?
smdat4<-smdat4 %>% mutate( season=ifelse(doy4 %in% 2014:2015.5, "one", ifelse(doy4 %in% 2015.8:2016.5, "two", ifelse(doy4 %in% 2016.8:2017.5, "three", "summer"))))
CV <- function(x){(sd(x)/mean(x))*100}
moistCV<-aggregate(sm ~ treatment*shelterBlock*subplot*year, data= smdat4, FUN = CV)
colnames(moistCV)[colnames(moistCV)=="sm"] <- "sm_cv"
|
131f19cc8590893b8da5463843b42c35d092fe92
|
ca96aa69a485886e69efa306a21d06d8769cc6d1
|
/R/get_prism_monthlys.R
|
a41ff4461a6f150c3c8a51316d279d6046379024
|
[] |
no_license
|
yangxhcaf/prism
|
46b14662b57d1651e41e85b4d9843bfb21c0480f
|
55895dcf2360a777e204c8d6ccb0dd1a196d3f5a
|
refs/heads/master
| 2020-06-04T04:15:40.348812
| 2018-12-10T23:03:03
| 2018-12-10T23:03:03
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,629
|
r
|
get_prism_monthlys.R
|
#' Download monthly prism data
#'
#' Download monthly data from the prism project at 4km grid cell resolution for
#' precipitation, mean, min, and max temperature
#'
#' @inheritParams get_prism_dailys
#'
#' @param years a valid numeric years, or vector of years, to download data for.
#' If no month is specified, years averages for that years will be downloaded.
#'
#' @param mon a valid numeric month, or vector of months.
#'
#' @param keepZip if true, leave the downloaded zip files in your 'prism.path',
#' if FALSE, they will be deleted
#'
#' @details Data is available from 1891 until 2014, however you have to download
#' all data for years prior to 1981.
#' Therefore if you enter a vector of years that bounds 1981,
#' you will automatically download all data for all years in the vector.
#' If the "all" parameter is set to TRUE, it will override any mon
#' entered and download all data. Data will be downloaded for all mon
#' in all the years in the vectors supplied. You must make sure
#' that you have set up a valid download directory.
#' This must be set as options(prism.path = "YOURPATH")
#'
#' @examples \dontrun{
#' ### Get all the data for January from 1990 to 2000
#' get_prism_monthlys(type="tmean", years = 1990:2000, mon = 1, keepZip=FALSE)
#' }
#'
#' @export
get_prism_monthlys <- function(type, years = NULL, mon = NULL, keepZip = TRUE){
### parameter and error handling
path_check()
type <- match.arg(type, c("ppt", "tmean", "tmin", "tmax"))
### Check mon
if(!is.numeric(mon)) {
stop("You must enter a numeric month between 1 and 12")
}
if(any(mon < 1 | mon > 12)) {
stop("You must enter a month between 1 and 12")
}
### Check year
if(!is.numeric(years)){
stop("You must enter a numeric year from 1895 onwards.")
}
### Check mon
if(any(years < 1895)){
stop("You must enter a year from 1895 onwards.")
}
pre_1981 <- years[years<1981]
post_1981 <- years[years>=1981]
uris_pre81 <- vector()
uris_post81 <- vector()
if(length(pre_1981)){
uris_pre81 <- sapply(pre_1981,function(x){paste("http://services.nacse.org/prism/data/public/4km",type,x,sep="/")})
}
if(length(post_1981)){
uri_dates_post81 <- apply(expand.grid(post_1981,mon_to_string(mon)),1,function(x){paste(x[1],x[2],sep="")})
uris_post81 <- sapply(uri_dates_post81,function(x){paste("http://services.nacse.org/prism/data/public/4km",type,x,sep="/")})
}
download_pb <- txtProgressBar(min = 0, max = length(uris_post81) + length(uris_pre81) , style = 3)
counter <- 0
### Handle post 1980 data
if(length(uris_post81) > 0){
for(i in 1:length(uris_post81)){
prism_webservice(uris_post81[i],keepZip)
setTxtProgressBar(download_pb, i)
}
}
counter <- length(uris_post81)+1
### Handle pre 1981 files
if(length(uris_pre81) > 0){
pre_files <-vector()
for(j in 1:length(uris_pre81)){
pre_files[j] <- prism_webservice(uris_pre81[j],keepZip,returnName = T)
setTxtProgressBar(download_pb, counter)
counter <- counter + 1
}
### Process pre 1981 files
pre_files <- unlist(strsplit(pre_files,"\\."))
pre_files <- pre_files[seq(1,length(pre_files),by =2)]
for(k in 1:length(pre_files)){
yr <- regmatches(pre_files[k],regexpr('[0-9]{4}',pre_files[k]))
monstr <- mon_to_string(mon)
to_split <- sapply(monstr,function(x){
gsub(pattern = "_all",replacement = x,x = pre_files[k])
} )
process_zip(pre_files[k], to_split)
}
}
close(download_pb)
}
|
9b2c2c611a5fe2b55150d2ef7fe08e50eafec656
|
9bbb1ffb23af6057d6af05a7a9e1a8818830c634
|
/R/response.R
|
890d3ff4bd3d42525dd630acb680be0c13d29393
|
[
"MIT"
] |
permissive
|
aaronwolen/gnr
|
ea9cde26615edfcaea0035a9832fe9f5bc1a336c
|
c129520514f7f15d9f0b4f518fcf5b2e1dd84a7b
|
refs/heads/master
| 2020-05-29T16:13:08.248190
| 2019-05-24T19:10:59
| 2019-05-24T19:28:46
| 189,242,311
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 877
|
r
|
response.R
|
#' Processs GeneNetwork API responses
#'
#' Convert json content of API responses to a [tibble][tibble::tibble-package].
#'
#' @param res HttpResponse object
#' @noRd
process_response <- function(res) {
stopifnot(inherits(res, "HttpResponse"))
# currently the API throws 200 status codes even for invalid calls so we
# can't rely on raise for status
res$raise_for_status()
# until proper status codes are returned check for error field
parsed <- parse_json(res)
if (!is.null(parsed$errors)) {
stop_glue(
"GeneNetwork error - {title}\n {detail}",
.data = parsed$errors[[1]]
)
}
out <- jsonlite::fromJSON(res$parse("UTF-8"), simplifyVector = TRUE)
tibble::new_tibble(out, nrow = nrow(out))
}
parse_json <- function(res) {
stopifnot(inherits(res, "HttpResponse"))
jsonlite::fromJSON(res$parse("UTF-8"), simplifyVector = FALSE)
}
|
16ba3d5b98d7dd69f51fc5ba2b1b8b14fdca32ba
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/recluster/examples/recluster.hist.Rd.R
|
e29d707e26d23279052ddeb67f2abe3a6f0d45c2
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 244
|
r
|
recluster.hist.Rd.R
|
library(recluster)
### Name: recluster.hist
### Title: Histogram with tied and zero values
### Aliases: recluster.hist
### Keywords: dissimilarity
### ** Examples
data(datamod)
simpdiss<- recluster.dist(datamod)
recluster.hist(simpdiss)
|
35e0764ac673acdcf42cf568d081db3a31583bb5
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/LICORS/examples/estimate_state_probs.Rd.R
|
57667bda6e3c5023d8b680148d7ee934ac997b74
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 302
|
r
|
estimate_state_probs.Rd.R
|
library(LICORS)
### Name: estimate_state_probs
### Title: Estimate conditional/marginal state probabilities
### Aliases: estimate_state_probs
### Keywords: distribution multivariate nonparametric
### ** Examples
WW <- matrix(runif(10000), ncol = 10)
WW <- normalize(WW)
estimate_state_probs(WW)
|
9f94acece8b1270a10b78309a24e5536f2688c26
|
257561e9d37684a7c9848ac26ae5f612b6a3b510
|
/demo/click_animation.R
|
aea0d25b0f095cb460ac5daf9f41099b2e806e22
|
[] |
no_license
|
sbarman-mi9/rthreejs
|
1419dcb34c7af03a76966ea04e6ee62f58d5087e
|
36aabb8035daabfeba58f39e6c8e5e85a8f2f14a
|
refs/heads/master
| 2021-01-24T07:28:58.537804
| 2017-05-24T20:10:47
| 2017-05-24T20:10:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,680
|
r
|
click_animation.R
|
# This example decomposes the `LeMis` character co-occurence network
# into six clusters using the igraph `cluster_louvain` method. The
# example displays a reduced network consisting of the most (globally)
# central character from each cluster, as measured by Page Rank.
#
# When a user clicks on one of the vertices, its corresponding cluster
# is expanded to show all vertices in that cluster.
#
# See `demo("click_animation2", package="threejs") for a related example.
library(threejs)
data(LeMis)
N <- length(V(LeMis))
# Vertex page rank values (a measure of network centrality for each vertex)
pr <- page_rank(LeMis)$vector
# order the page rank values
i <- order(pr, decreasing=TRUE)
# Vertex cluster membership
cl <- unclass(membership(cluster_louvain(LeMis)))
# Find the index of the highest page rank vertex in each cluster
idx <- aggregate(seq(1:N)[i], by=list(cl[i]), FUN=head, 1)$x
# Create a default force-directed layout for the whole networl
l1 <- norm_coords(layout_with_fr(LeMis, dim=3))
# Collapse the layout to just the idx vertices
l0 <- Reduce(rbind,Map(function(i) l1[idx[i],], cl))
# Create grouped vertex colors, setting all but idx vertices transparent
col <- rainbow(length(idx), alpha=0)[cl]
col[idx] <- rainbow(length(idx), alpha=1)
# animation layouts, one for each of the idx vertices, and
# animation color schemes, one scheme for each idx vertex
click <- Map(function(i)
{
x <- l0
x[cl == i, ] <- l1[cl == i, ]
c <- col
c[cl == i] <- rainbow(length(idx), alpha=1)[i]
list(layout=x, vertex.color=c)
}, seq(idx))
names(click) <- paste(idx)
(graphjs(LeMis, layout=l0, click=click, vertex.color=col, fps=20, font.main="96px Arial"))
|
b2a06ae08e45e6be17e117e67864540b731568de
|
1721a042328b7035f6a6c401d9d8ba3edc538dce
|
/12_StrategiesReconstruction/Discrete_mapping.R
|
eb30a4f202dbf9845645daebe0ec78a12924e967
|
[] |
no_license
|
ftheunissen/Woodpecker
|
dbfd1bb15212574b8083543ef465c51f768954f2
|
0521927adfbf73ecdd4dcf4e3484ae949a1858b7
|
refs/heads/master
| 2022-10-16T10:59:57.410738
| 2020-06-17T16:53:08
| 2020-06-17T16:53:08
| 273,028,370
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,550
|
r
|
Discrete_mapping.R
|
#Change working directory to folder with your files (change accordingly)
setwd("../12_StrategiesReconstruction/")
rm(list=ls())#empty the workspace
#Load relevant packages
library(ape)
library(geiger)
library(phytools)
# Read in tree and dataset
tree <- read.nexus("TreePicidae.txt") # the tree
data <- read.csv("Full_Species_data.csv", header=T, row.names = 1)
# reorder
data <- data[tree$tip.label,]
# #Check that names match
name.check(tree, data)
# Extract variable os interest to reconstruct: here drumming strategy is this variables (discrete variables with 6 states)
Strategies <- data$AcousticClust
# Test models with different transistion probability matrices
fitER<-ace(Strategies, tree,model='ER', type='discrete')
fitSYM<-ace(Strategies, tree,model='SYM', type='discrete') # Bsed on Log-likelihood, this model (SYM) is better than ER
############## plot tree
# with tip pie charts inside
#pdf("Reconstructed_Strategies_dotsInside.pdf")
par(lwd=0.5)
plot(tree, type='fan', cex=0.5, label.offset=1.5, x.lim = c(-30,30), y.lim = c(-40,40), edge.width = 0.5)
cols<-setNames(palette()[1:length(unique(Strategies))],sort(unique(Strategies)))
nodelabels(node=1:tree$Nnode+Ntip(tree),pie=fitSYM$lik.anc,piecol=cols,cex=0.3) #pie charts at nodes
tiplabels(pie=to.matrix(Strategies,sort(unique(Strategies))),piecol=cols,cex=0.25, offset= 0.75) # Pie charts at tips
#dev.off()
# or with tip pie charts outside
# plot tree
#pdf("Reconstructed_Strategies_dotsOutside.pdf")
par(lwd=0.5)
plot(tree, type='fan', cex=0.5, label.offset=0.5, x.lim = c(-30,30), y.lim = c(-40,40), edge.width = 0.5)
cols<-setNames(palette()[1:length(unique(Strategies))],sort(unique(Strategies)))
nodelabels(node=1:tree$Nnode+Ntip(tree),pie=fitSYM$lik.anc,piecol=cols,cex=0.3) #pie charts at nodes
tiplabels(pie=to.matrix(Strategies,sort(unique(Strategies))),piecol=cols,cex=0.25, offset= 19) # Pie charts at tips
#dev.off()
fitSYM$lik.anc # outputs scaled likelihoods of each ancetstral states
nodelabels() # CAREFUL: nodelabels start at 93 --> node labelled '93' is the first line in 'fitSYM$lik.anc'
branching.times(tree) #checkup line.
# Another step is to get a quantification of the phylogenetic signal reconstructed with from the acoustic strategies
# Using geiger's fitDiscrete function for this (makes the same reconstruction as method above, but it gives Lambda in addition)
names(Strategies) <- rownames(data)
fitDiscrete(tree,Strategies,model = 'SYM', transform="lambda")
# Creating Sup Fig 4a with tip pie charts outside ## note: issues creating a pdf from console --> run what's below and export as pdf from plot window
# need to also run "Mapping_Accoustic_Phylo.R" for isolated variable (nb #38) before, to get the obj$tree object to plot
#pdf("Reconstructed_Strategies_dotsOutside.pdf")
par(lwd=0.5)
plot(tree, type='fan', cex=0.5, label.offset=0.5, x.lim = c(-30,30), y.lim = c(-40,40), edge.width = 0.5, edge.color = "transparent", tip.color = "transparent", rotate.tree = 360/92) #only way to integrate tip 'round dots' on tree. Careful, this alignement is not good --> need to rotate by one species, hence 360/92
cols<-setNames(palette()[1:length(unique(Strategies))],sort(unique(Strategies)))
#nodelabels(node=1:tree$Nnode+Ntip(tree),pie=fitSYM$lik.anc,piecol=cols,cex=0.3) #pie charts at nodes
tiplabels(pie=to.matrix(Strategies,sort(unique(Strategies))),piecol=cols,cex=0.25, offset= 16) # Pie charts at tips
plot(obj$tree,type="fan",fsize=0.55,ftype="i",add=TRUE,
xlim=obj2$x.lim,ylim=obj2$y.lim,lwd=3,colors=obj$cols)
#dev.off()
|
7fb4781933d32655ea0d0e856d70466638dbe2d0
|
ef121d82b4f5f0ab4f4872d17db50dea22252c81
|
/Complaint_Analysis_Project/Results/ac_analytics_insights/what_are_their_excuses_code (mehmet).R
|
f10361e06184bf181dff3112a9bcbfd2f77b15ec
|
[] |
no_license
|
indra-ch/ac-datadive-2018
|
d9725f3127549ba4fe371dd36b86776b467307a4
|
f2406899368cb17ec9fdc8c9579e8b63a5fb1e0d
|
refs/heads/master
| 2020-03-21T11:15:27.106968
| 2018-06-24T15:28:58
| 2018-06-24T15:28:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 182
|
r
|
what_are_their_excuses_code (mehmet).R
|
mosaicplot(table(acc[ELIGIBLE==0 & If_No_Eligibility_Why_1!='Unknown',.(IAM,If_No_Eligibility_Why_1)]),las=2,ylab="Reason for ineligibility",main="What are their excuses?",border=NA)
|
1cb8ee9976b6dd9db61e2aac52a48850b079e98c
|
7ae5b8df405d79d65bcf7280bd428239cc6d9dda
|
/workshop_packages_to_install.R
|
ee5d7465350d85c81a66ee5c3636f1abf5821214
|
[] |
no_license
|
rladiestunis/6th-Meetup-Parameterization-and-generalization-of-R-Markdown-documents
|
addbc64810c00911c2d8025f3a70d36c98a6ff68
|
78f17798b14008b5376bd7f2dbe9f7c0519b767c
|
refs/heads/master
| 2022-12-05T18:51:16.278365
| 2020-08-29T12:44:15
| 2020-08-29T12:44:15
| 291,265,288
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 437
|
r
|
workshop_packages_to_install.R
|
###### CODE WORKING FOR R VERSION <= 3.6.3
install.packages("pacman")
pacman::p_load("yaml","data.table","stringr","readr","config","ggthemes",
"ggplot2","prettydoc","rmarkdown","scales","tidyverse","dplyr",
"kableExtra","RCurl","EQL","kdensity")
###### INSTALL PrescRiptions from source
install.packages("05_PrescRiptions_project/packages/PrescRiptions_0.2.5.tar.gz", repos = NULL, type = "source")
|
10e5584eaab8f7392d8e0205c0031097d1495dfe
|
cd181d1f57308093a0142a6f3630db006bda2c1d
|
/mean_entry.R
|
bb8684f9ce9e185a348c88b3ad95c552e471d5ac
|
[] |
no_license
|
CoMoS-SA/agriLOVE
|
717df7d531faacdc361360f53613af93595716a0
|
91153495a29dd2cba938df64cf745daacf398b0f
|
refs/heads/main
| 2023-08-26T20:09:25.028184
| 2021-10-13T15:32:58
| 2021-10-13T15:32:58
| 416,800,601
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,259
|
r
|
mean_entry.R
|
mean_entry_f <- function(j){
print(paste("Rebound at time", t, "cell", cells_on_sale[j,"row"],cells_on_sale[j,"col"]))
repeat{ # pick up a non-forest random cell with positive wealth
ii <<- sample(1:x,1)
kk <<- sample(1:y,1)
if(wealth[ii,kk,t,p]>0 & world[ii,kk,t,p]!=0){
break
}
}
RDint[cells_on_sale[j,"row"],cells_on_sale[j,"col"],t,p] <<- RDint[ii,kk,t,p]
sales[cells_on_sale[j,"row"],cells_on_sale[j,"col"],t,p] <<- sales[ii,kk,t,p]
wealth[cells_on_sale[j,"row"],cells_on_sale[j,"col"],t,p] <<- wealth[ii,kk,t,p]
revenues[cells_on_sale[j,"row"],cells_on_sale[j,"col"],t,p] <<- revenues[ii,kk,t,p]
tot_cost[cells_on_sale[j,"row"],cells_on_sale[j,"col"],t,p] <<- tot_cost[ii,kk,t,p]
L[cells_on_sale[j,"row"],cells_on_sale[j,"col"],t,p] <<- L[cells_on_sale[j,"row"],cells_on_sale[j,"col"],t,p]
reborn[cells_on_sale[j,"row"],cells_on_sale[j,"col"],t,p] <<- 1
#update rebound counter
rebound_counter[p] <<- rebound_counter[p] +1
}
|
d0fd4a1c1941d1e12b688aa2391257d1dc1c2452
|
6044948fedc5f0b304e578e10e4f764cb6d5400f
|
/cachematrix.R
|
0e28445d90c172a1eb5aa3702c7ae0cec45c56fd
|
[] |
no_license
|
CourseraRcourse/ProgrammingAssignment2
|
4a6ffdf8f6551ec863c9ca723287c9fbbc22b16d
|
8c37d3576b111671a9e17036b1b9d195fa6350cd
|
refs/heads/master
| 2021-01-22T07:32:26.495328
| 2015-02-22T23:48:05
| 2015-02-22T23:48:05
| 31,186,048
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 764
|
r
|
cachematrix.R
|
# ProgrammingAssignment2
## here you can find the code in R to calculate de mean for a vector, like matrix
makeCacheMatrix <- function(mtx = matrix()) {
inverse <- NULL
set <- function(x) {
mtx <<- x;
inverse <<- NULL;
}
get <- function() return(mtx);
setinv <- function(inv) inverse <<- inv;
getinv <- function() return(inverse);
return(list(set = set, get = get, setinv = setinv, getinv = getinv))
}
###this is the code to make a solve of the matrix
cacheSolve <- function(mtx, ...) {
inverse <- mtx$getinv()
if(!is.null(inverse)) {
message("Getting cached data...")
return(inverse)
}
data <- mtx$get()
invserse <- solve(data, ...)
mtx$setinv(inverse)
return(inverse)
}
|
8d20301f9ac19448131a1157392776a44f1eccf7
|
9eaf9acc455fdcf74d44623033f76dbf3ac5f4d6
|
/Scripts/BuildModels.R
|
b5df66663d06a328fe16cb1c2380a81469a57c64
|
[] |
no_license
|
eoinreilly93/Numerai-Model
|
53a7e7a414424dcb793d78ac12d13dfa3343474d
|
e678445b8a5a352fc132133576d91b8ce7b8ed5a
|
refs/heads/master
| 2021-01-19T08:40:24.611518
| 2017-04-12T20:36:06
| 2017-04-12T20:36:06
| 87,657,797
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 649
|
r
|
BuildModels.R
|
################################
# IMPORT LIBRARIES AND SCRIPTS
################################
source("Scripts/DataPreprocessing.R")
library("bnlearn", lib.loc="~/R/win-library/3.3")
library("boot", lib.loc="~/R/win-library/3.3")
######################
# BUILD MODELS
######################
#NAIVE
naivemodel <- naive.bayes(trainingdata, "target")
naivefitted <- bn.fit(naivemodel, trainingdata, method = "bayes", iss=1)
#TAN
treemodel <- tree.bayes(trainingdata, "target")
treefitted <- bn.fit(treemodel, trainingdata, method = "bayes", iss=1)
#Logistic Regression
lrmodel <- glm(target~., trainingdata, family=binomial(link = "logit"))
|
7738f39100deb3ebfefc70457fe11ab5cf121ee8
|
a6081dd26a6bde5c642a360cdb5e691a92e8cd05
|
/strataG/R/privateAlleles.R
|
02901487b71cb54248240a46e17af01b30e63fa7
|
[] |
no_license
|
hjanime/strataG
|
615829d0db7fb9b783718cca0dbac9f9d5e5d10e
|
46a573fe7d99094fcd2d885602a6ad4b77d43b89
|
refs/heads/master
| 2021-01-21T07:20:09.762903
| 2015-09-15T21:40:16
| 2015-09-15T21:40:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 820
|
r
|
privateAlleles.R
|
#' @title Private Alleles
#' @description The number of private alleles in each strata and locus.
#'
#' @param g a \linkS4class{gtypes} object.
#'
#' @return matrix with the number of private alleles in each strata at each locus.
#'
#' @author Eric Archer \email{eric.archer@@noaa.gov}
#'
#' @examples
#' data(dolph.msats)
#' data(dolph.strata)
#' msats.merge <- merge(dolph.strata[, c("ids", "fine")], dolph.msats, all.y = TRUE)
#' msats <- df2gtypes(msats.merge, ploidy = 2)
#'
#' privateAlleles(msats)
#'
#' @export
#'
privateAlleles <- function(g) {
freqs <- alleleFreqs(g, T)
do.call(rbind, lapply(freqs, function(f) {
f <- f[, 1, ]
f[f > 0] <- 1
pa <- rowSums(apply(f, 1, function(x) {
if(sum(x > 0) == 1) x else rep(0, length(x))
}))
names(pa) <- colnames(f)
pa
}))
}
|
2dab66d959b502e09bf7ecbcec0452921a2fb354
|
d73855543142c069377e5e37607f427ef2c9b764
|
/man/fun_hairpin_trimming.Rd
|
25b90e33a68a07596e34eca6df6e7b9d7c44333b
|
[] |
no_license
|
cran/MicroSEC
|
66ed5292c0571493728153ec49a8514a440baeb3
|
dca6895d234d82b4e9e90f86527791a9bdeb86ab
|
refs/heads/master
| 2023-01-30T08:58:45.352825
| 2020-12-02T09:30:07
| 2020-12-02T09:30:07
| 318,757,856
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 665
|
rd
|
fun_hairpin_trimming.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fun_hairpin_trimming.R
\name{fun_hairpin_trimming}
\alias{fun_hairpin_trimming}
\title{Adapter sequence trimming function}
\usage{
fun_hairpin_trimming(hairpin_seq, mut_read_strand, adapter_1, adapter_2)
}
\arguments{
\item{hairpin_seq}{The sequence to be trimmed.}
\item{mut_read_strand}{The strand of the sequence, "+" or "-".}
\item{adapter_1}{The Read 1 adapter sequence of the library.}
\item{adapter_2}{The Read 2 adapter sequence of the library.}
}
\value{
Adapter-trimmed hairpin_seq
}
\description{
This function attempts to find and cut the adapter sequences in the ends.
}
|
36efcff2e15c60696d1abe593f553358394e540c
|
d6cd91cadceb6fe91ee56d3bf66066bae06e1d7b
|
/man/natlparkFlickr.Rd
|
fae4237faa76a585c5b6064d6270e36909edc2e1
|
[
"MIT"
] |
permissive
|
RussJGoebel/natlparkFlickr
|
8a5fe69188d8012fd86ac2be35d56ebeee0d4741
|
2289c27e8ef49fdac190b1af4fecd7f0418ff530
|
refs/heads/master
| 2023-02-17T12:25:48.029021
| 2020-09-20T17:27:25
| 2020-09-20T17:27:25
| 297,125,078
| 0
| 0
|
NOASSERTION
| 2021-01-22T17:32:19
| 2020-09-20T17:12:35
|
HTML
|
UTF-8
|
R
| false
| true
| 986
|
rd
|
natlparkFlickr.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{package}
\name{natlparkFlickr}
\alias{natlparkFlickr}
\title{natlparkFlickr: Datasets for national and park-level Flickr usage}
\description{
This package contains two datasets: flickr_userdays stores a time series of national level user-days for Flickr between 2005 and 2018, and park_visitation stores Flickr photo-user-days associated with twenty popular national parks in the United States. Here, user-days counts the number of unique Flickr users in a day, and photo-user-days counts the number of unique users who post a photo on Flickr from within the boundary of a given park.
}
\section{natlparkFlickr Datasets}{
\itemize{
\item{\code{flickr_userdays}}: A time series of national level user-days for Flickr between 2005 and 2018.
\item{\code{park_visitation}}: A data frame storing Flickr photo-user-days associated with twenty popular national parks in the United States.
}
}
|
e372278fc30e60d781157cd30eb6c5df90968f8e
|
907798f8081c9c2ded54e1541acb6fa5882dd60f
|
/Gcomp_random.R
|
f430f7daae04f319e253a96b0ae939aa403a7e6b
|
[] |
no_license
|
JasonLaurich/Turnera_multiple_mutualisms
|
3ab37626ceba11776eed09fbd1bb0f4cb91b9f70
|
df7d55ffb6792a064ef3247eaa045b079117f454
|
refs/heads/main
| 2023-04-11T20:27:43.866122
| 2022-08-04T15:36:08
| 2022-08-04T15:36:08
| 471,031,478
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 13,960
|
r
|
Gcomp_random.R
|
library(MCMCglmm)
library(foreach)
library(doParallel)
library(gdata)
library(matrixcalc)
setwd("C:/Users/Jason/Desktop/Data/TurnChap1/Dryad Files")
# Load randomized G matrices
BT<-read.table('BT_ranG_final.txt', header= T)
Ca<-read.table('Ca_ranG_final.txt', header= T)
MC<-read.table('MC_ranG_final.txt', header= T)
Mu<-read.table('Mu_ranG_final.txt', header= T)
SA<-read.table('SA_ranG_final.txt', header= T)
#number of MCMC samples
MCMCsamp <- 1000
#number of traits
n <- 4
#number of matrices to compare
m <- 5
#trait names
traitnames <- c("ela","efn","fn","herk")
#matrix labels
Gnames <- c("BT","Ca","MC","Mu","SA")
Garray <- array(,c(n,n,m,MCMCsamp))
dimnames(Garray) <- list(traitnames,traitnames,Gnames)
# Load the randomized G matrices into Garray
for (i in 1:1000){
Garray[1,1,1,i]<-BT[i,1]
Garray[1,2,1,i]<-BT[i,2]
Garray[2,1,1,i]<-BT[i,2]
Garray[1,3,1,i]<-BT[i,3]
Garray[3,1,1,i]<-BT[i,3]
Garray[1,4,1,i]<-BT[i,4]
Garray[4,1,1,i]<-BT[i,4]
Garray[2,2,1,i]<-BT[i,5]
Garray[2,3,1,i]<-BT[i,6]
Garray[3,2,1,i]<-BT[i,6]
Garray[2,4,1,i]<-BT[i,7]
Garray[4,2,1,i]<-BT[i,7]
Garray[3,3,1,i]<-BT[i,8]
Garray[3,4,1,i]<-BT[i,9]
Garray[4,3,1,i]<-BT[i,9]
Garray[4,4,1,i]<-BT[i,10]
Garray[1,1,2,i]<-Ca[i,1]
Garray[1,2,2,i]<-Ca[i,2]
Garray[2,1,2,i]<-Ca[i,2]
Garray[1,3,2,i]<-Ca[i,3]
Garray[3,1,2,i]<-Ca[i,3]
Garray[1,4,2,i]<-Ca[i,4]
Garray[4,1,2,i]<-Ca[i,4]
Garray[2,2,2,i]<-Ca[i,5]
Garray[2,3,2,i]<-Ca[i,6]
Garray[3,2,2,i]<-Ca[i,6]
Garray[2,4,2,i]<-Ca[i,7]
Garray[4,2,2,i]<-Ca[i,7]
Garray[3,3,2,i]<-Ca[i,8]
Garray[3,4,2,i]<-Ca[i,9]
Garray[4,3,2,i]<-Ca[i,9]
Garray[4,4,2,i]<-Ca[i,10]
Garray[1,1,3,i]<-MC[i,1]
Garray[1,2,3,i]<-MC[i,2]
Garray[2,1,3,i]<-MC[i,2]
Garray[1,3,3,i]<-MC[i,3]
Garray[3,1,3,i]<-MC[i,3]
Garray[1,4,3,i]<-MC[i,4]
Garray[4,1,3,i]<-MC[i,4]
Garray[2,2,3,i]<-MC[i,5]
Garray[2,3,3,i]<-MC[i,6]
Garray[3,2,3,i]<-MC[i,6]
Garray[2,4,3,i]<-MC[i,7]
Garray[4,2,3,i]<-MC[i,7]
Garray[3,3,3,i]<-MC[i,8]
Garray[3,4,3,i]<-MC[i,9]
Garray[4,3,3,i]<-MC[i,9]
Garray[4,4,3,i]<-MC[i,10]
Garray[1,1,4,i]<-Mu[i,1]
Garray[1,2,4,i]<-Mu[i,2]
Garray[2,1,4,i]<-Mu[i,2]
Garray[1,3,4,i]<-Mu[i,3]
Garray[3,1,4,i]<-Mu[i,3]
Garray[1,4,4,i]<-Mu[i,4]
Garray[4,1,4,i]<-Mu[i,4]
Garray[2,2,4,i]<-Mu[i,5]
Garray[2,3,4,i]<-Mu[i,6]
Garray[3,2,4,i]<-Mu[i,6]
Garray[2,4,4,i]<-Mu[i,7]
Garray[4,2,4,i]<-Mu[i,7]
Garray[3,3,4,i]<-Mu[i,8]
Garray[3,4,4,i]<-Mu[i,9]
Garray[4,3,4,i]<-Mu[i,9]
Garray[4,4,4,i]<-Mu[i,10]
Garray[1,1,5,i]<-SA[i,1]
Garray[1,2,5,i]<-SA[i,2]
Garray[2,1,5,i]<-SA[i,2]
Garray[1,3,5,i]<-SA[i,3]
Garray[3,1,5,i]<-SA[i,3]
Garray[1,4,5,i]<-SA[i,4]
Garray[4,1,5,i]<-SA[i,4]
Garray[2,2,5,i]<-SA[i,5]
Garray[2,3,5,i]<-SA[i,6]
Garray[3,2,5,i]<-SA[i,6]
Garray[2,4,5,i]<-SA[i,7]
Garray[4,2,5,i]<-SA[i,7]
Garray[3,3,5,i]<-SA[i,8]
Garray[3,4,5,i]<-SA[i,9]
Garray[4,3,5,i]<-SA[i,9]
Garray[4,4,5,i]<-SA[i,10]
}
# Ok so 1 to 10 are the vector correlations for each population pair, 11 to 14 are the H values for the 4 eigenvectors (Krzanowski's).
# 4 more for the 4th order covariance tensor
ranskew_stats_ranG<-array(,c(1000,10))
k_stats_ranG<-array(,c(1000,4))
four_stats_ranG<-array(,c(1000,4))
Vec_rsp_ran<-array(rep(1,1000*n*m), dim=c(1000,n,m))
# Matrix to hold 1000 estimates of vector corrs. 10 is the number of population comparisons.
vect_cor_ran<-array(rep(1,1000,10), dim=c(1000,10))
# Matrix to store mean vector correlations (for all the MCMC samples of each G) for each vector. The means and HPD intervals of these is what we report.
mean_vect_cor_ran<-array(rep(1,1000*10), dim=c(1000,10))
# Now we do Krzanowski's analysis
#START
kr.subspace <- function(Gs, vec){
if (dim(Gs)[[1]] != dim(Gs)[[2]]){
stop("G array must be of order n x n x m x MCMCsamp")
}
if (is.na(dim(Gs)[4])) {
stop("There are no MCMCsamples")
}
n <- dim(Gs)[[1]]
m <- dim(Gs)[[3]]
MCMCsamp <- dim(Gs)[[4]]
if(length(vec) != m){stop("vec must have length = m")}
h <- function (g, v){
AA <- array(, c(4, 4, 5))
for (k in 1:5){
g.vec <- eigen(g[,,k])$vectors[,1:(v[k])]
AA[,,k] <- g.vec %*% t(g.vec)
}
H <- apply(AA, 1:2, sum)
list(H = H, AA = AA)
}
#internal function to calculate AA and H
MCMC.H <- array(, c(n, n, MCMCsamp))
dimnames(MCMC.H) <- list(dimnames(Gs)[[1]], dimnames(Gs)[[1]], dimnames(Gs)[[4]])
MCMC.AA <- array(, c(n, n, m, MCMCsamp))
dimnames(MCMC.AA) <- list(dimnames(Gs)[[1]], dimnames(Gs)[[1]], dimnames(Gs)[[3]], dimnames(Gs)[[4]])
for (z in 1:MCMCsamp){
kr <- h(Gs[,,,z], v = vec)
MCMC.H[,,z] <- kr$H
MCMC.AA[,,,z] <- kr$AA
}
#calculate AA and H for the ith MCMC sample of the G array
avH <- apply(MCMC.H, 1:2, mean)
rownames(avH) <- dimnames(Gs)[[1]]
colnames(avH) <- dimnames(Gs)[[1]]
#calculate the posterior mean H
avAA <- apply(MCMC.AA, 1:3, mean)
dimnames(avAA) <- list(dimnames(Gs)[[1]], dimnames(Gs)[[1]], dimnames(Gs)[[3]])
#calculate the posterior mean AA
avH.vec <- eigen(avH)$vectors
#eigenanalysis of posterior mean H
proj<- function(a, b) t(b) %*% a %*% b
#internal function to do projection
avH.theta <- matrix(, n, m)
for (i in 1:n){
for (i in 1:n){
avH.theta[i,] <- acos(sqrt(apply(avAA, 3, proj, b = avH.vec[,i]))) * (180/pi)
}
}
#angles between the eigenvectors posterior mean H and the posterior mean subspaces of each population
MCMC.H.val <- matrix(, MCMCsamp, n)
colnames(MCMC.H.val) <- paste("h", 1:n, sep="")
for (i in 1:n){
MCMC.H.val[,i] <- apply(MCMC.H, 3, proj, b = avH.vec[,i])
}
#posterior distribution of the genetic variance for the eigenvectors of posterior mean H
MCMC.H.theta <- array(, c(n, m, MCMCsamp))
rownames(MCMC.H.theta) <- paste("h", 1:n, sep="")
colnames(MCMC.H.theta) <- dimnames(Gs)[[3]]
for(i in 1:n){
for(j in 1:MCMCsamp){
MCMC.H.theta[i,,j] <- acos(sqrt(apply(MCMC.AA[,,,j], 3, proj, b = avH.vec[,i]))) * (180/pi)
}
}
#posterior distribution of the angles between the eigenvectors of posterior mean H and the MCMC samples of the subspaces of each population
list(avAA = avAA, avH = avH, MCMC.AA = MCMC.AA, MCMC.H = MCMC.H, MCMC.H.val = MCMC.H.val, MCMC.H.theta = MCMC.H.theta)
}
#END
MCMCG.kr.rand <- kr.subspace(Garray, vec = rep(2,m))
K.ran<-colMeans(MCMCG.kr.rand$MCMC.H.val)
K.ran<-as.vector(K.ran)
print("Krzan summed")
k_stats[x,1:4]<-K.ran
write.table(k_stats_ranG, file=paste("k_stats_ranG",y,".txt"), sep="\t")
#Generate multivariate normal selection vectors.
vec<-1000
n <- dim(Garray)[[1]]
m <- dim(Garray)[[3]]
MCMCsamp <- dim(Garray)[[4]]
rand.vec <-matrix(,vec,n)
Vec_rsp<-array(rep(1,vec*n*m), dim=c(vec,n,m))
for (i in 1:vec){
b <- rnorm(n,0,1)
rand.vec[i,] <- b/(sqrt(sum(b^2)))
}
print("vectors generated")
#for each vector
for (j in 1:1000){
#Record vector response for each MCMC estimate
for(k in 1:1000){
for (p in 1:m){
Vec_rsp_ran[k,,p]<-Garray[,,p,k]%*%rand.vec[k,]
}
#Calculate the vector correlation, but don't store.
vect_cor_ran[k,1]<-t(Vec_rsp_ran[k,,1])%*%Vec_rsp_ran[k,,2]/sqrt(t(Vec_rsp_ran[k,,1])%*%Vec_rsp_ran[k,,1]*t(Vec_rsp_ran[k,,2])%*%Vec_rsp_ran[k,,2])
vect_cor_ran[k,2]<-t(Vec_rsp_ran[k,,1])%*%Vec_rsp_ran[k,,3]/sqrt(t(Vec_rsp_ran[k,,1])%*%Vec_rsp_ran[k,,1]*t(Vec_rsp_ran[k,,3])%*%Vec_rsp_ran[k,,3])
vect_cor_ran[k,3]<-t(Vec_rsp_ran[k,,1])%*%Vec_rsp_ran[k,,4]/sqrt(t(Vec_rsp_ran[k,,1])%*%Vec_rsp_ran[k,,1]*t(Vec_rsp_ran[k,,4])%*%Vec_rsp_ran[k,,4])
vect_cor_ran[k,4]<-t(Vec_rsp_ran[k,,1])%*%Vec_rsp_ran[k,,5]/sqrt(t(Vec_rsp_ran[k,,1])%*%Vec_rsp_ran[k,,1]*t(Vec_rsp_ran[k,,5])%*%Vec_rsp_ran[k,,5])
vect_cor_ran[k,5]<-t(Vec_rsp_ran[k,,2])%*%Vec_rsp_ran[k,,3]/sqrt(t(Vec_rsp_ran[k,,2])%*%Vec_rsp_ran[k,,2]*t(Vec_rsp_ran[k,,3])%*%Vec_rsp_ran[k,,3])
vect_cor_ran[k,6]<-t(Vec_rsp_ran[k,,2])%*%Vec_rsp_ran[k,,4]/sqrt(t(Vec_rsp_ran[k,,2])%*%Vec_rsp_ran[k,,2]*t(Vec_rsp_ran[k,,4])%*%Vec_rsp_ran[k,,4])
vect_cor_ran[k,7]<-t(Vec_rsp_ran[k,,2])%*%Vec_rsp_ran[k,,5]/sqrt(t(Vec_rsp_ran[k,,2])%*%Vec_rsp_ran[k,,2]*t(Vec_rsp_ran[k,,5])%*%Vec_rsp_ran[k,,5])
vect_cor_ran[k,8]<-t(Vec_rsp_ran[k,,3])%*%Vec_rsp_ran[k,,4]/sqrt(t(Vec_rsp_ran[k,,3])%*%Vec_rsp_ran[k,,3]*t(Vec_rsp_ran[k,,4])%*%Vec_rsp_ran[k,,4])
vect_cor_ran[k,9]<-t(Vec_rsp_ran[k,,3])%*%Vec_rsp_ran[k,,5]/sqrt(t(Vec_rsp_ran[k,,3])%*%Vec_rsp_ran[k,,3]*t(Vec_rsp_ran[k,,5])%*%Vec_rsp_ran[k,,5])
vect_cor_ran[k,10]<-t(Vec_rsp_ran[k,,4])%*%Vec_rsp_ran[k,,5]/sqrt(t(Vec_rsp_ran[k,,4])%*%Vec_rsp_ran[k,,4]*t(Vec_rsp_ran[k,,5])%*%Vec_rsp_ran[k,,5])
#Store the mean vector correlation for each vector in the table
for (a in 1:10){
ranskew_stats[x,a]<-mean(vect_cor_ran[,a])
}
}
}
write.table(ranskew_stats_ranG, file=paste("ranskew_stats_ranG",y,".txt"), sep="\t")
print("end of random skewers")
#START
covtensor <- function(Gs){
if (dim(Gs)[[1]] != dim(Gs)[[2]]){
stop("G array must be of order n x n x m x MCMCsamp")
}
if (is.na(dim(Gs)[4])) {
stop("There are no MCMCsamples")
}
neigten <- n*(n+1)/2
#Number of eigentensors
MCMC.S <- array(,c(neigten, neigten, MCMCsamp))
dimnames(MCMC.S) <- list(paste("e", 1:neigten, sep=""), paste("e", 1:neigten, sep=""))
for (k in 1:MCMCsamp){
MCMCG <- Gs[,,,k]
MCMCvarmat <- t(apply(MCMCG, 3, diag))
#find the variances of the kth G and store them
MCMCcovmat <- t(apply(MCMCG, 3, lowerTriangle))
#find the covariances of the kth G and store them
MCMC.S[1:n,1:n, k] <- cov(MCMCvarmat, MCMCvarmat)
#fill the upper left quadrant of the kth S
MCMC.S[(n+1):neigten,(n+1):neigten, k] <- 2*cov(MCMCcovmat, MCMCcovmat)
#fill the lower right quadrant of the kth S
MCMC.S[1:n,(n+1):neigten, k] <- sqrt(2)*cov(MCMCvarmat, MCMCcovmat)
#fill the upper right quadrant of the kth S
MCMC.S[(n+1):neigten,1:n, k] <- sqrt(2)*cov(MCMCcovmat, MCMCvarmat)
#fill the lower left quadrant of the kthS
}
av.S <- apply(MCMC.S, 1:2, mean)
#posterior mean S
av.S.val <- eigen(av.S)$values
#eigenvalues of posterior mean S
av.S.vec <- eigen(av.S)$vectors
#eigenvalues of posterior mean S
eTmat <- array(, c(n, n, neigten))
dimnames(eTmat) <- list(traitnames, traitnames, paste("E", 1:neigten, sep=""))
for (i in 1:neigten){
emat <- matrix(0, n, n)
lowerTriangle(emat) <- 1/sqrt(2)*av.S.vec[(n+1):neigten,i]
emat <- emat + t(emat)
diag(emat) <- av.S.vec[1:n,i]
eTmat[,,i] <- emat
}
#construct the second-order eigentensors of posterior mean S
eT.eigen <- array(, c(n+1, n, neigten))
for (i in 1:neigten){
eT.eigen[1,,i] <- t(eigen(eTmat[,,i])$values)
#Eigenvalues of the ith eigentensor
eT.eigen[2:(n+1),,i] <- eigen(eTmat[,,i])$vectors
#Eigenvectors of the ith eigentensor
eT.eigen[,,i] <- eT.eigen[,order(abs(eT.eigen[1,,i]), decreasing = T), i]
}
MCMC.S.val <- matrix(, MCMCsamp, neigten)
colnames(MCMC.S.val) <- paste("E", 1:neigten, sep="")
for (i in 1:MCMCsamp){
for(j in 1:neigten){
MCMC.S.val[i,j] <- t(av.S.vec[,j]) %*% MCMC.S[,,i] %*% av.S.vec[,j]
}
}
#posterior distribution of the genetic variance for the eigenvectors of posterior mean S
av.G.coord <- array(, c(m, neigten, 1))
dimnames(av.G.coord) <- list(Gnames, paste("E", 1:neigten, sep=""))
for (i in 1:neigten){
av.G.coord[,i,] <- apply((apply(Gs, 1:3, mean)) , 3, frobenius.prod, y = eTmat[,,i])
}
#Coordinates of the jth avG for the eigentensors of posterior mean S
MCMC.G.coord <- array(, c(m, neigten, MCMCsamp))
dimnames(MCMC.G.coord) <- list(Gnames, paste("E", 1:neigten, sep=""))
for (i in 1:neigten){
MCMC.G.coord[,i,] <- apply(Gs, 3:4, frobenius.prod, y = eTmat[,,i])
}
#Coordinates of the kth MCMC sample of the jth G for the eigentensors of posterior mean S
tensor.summary <- data.frame(rep(av.S.val,each=n), t(data.frame(eT.eigen)))
colnames(tensor.summary) <- c("S.eigval", "eT.val", traitnames)
rownames(tensor.summary)<- paste(paste("e", rep(1:neigten, each=n), sep=""), rep(1:n,neigten), sep=".")
list(tensor.summary = tensor.summary, av.S = av.S, eTmat = eTmat, av.G.coord = av.G.coord, MCMC.S = MCMC.S, MCMC.S.val = MCMC.S.val, MCMC.G.coord = MCMC.G.coord)
}
#END
nnonzero <- min(n*(n+1)/2,m-1)
MCMC.covtensor.rand <- covtensor(Garray)
colMeans(MCMC.covtensor.rand$MCMC.S.val[,1:nnonzero])
Cov.ran<-colMeans(MCMC.covtensor.rand$MCMC.S.val[,1:nnonzero])
Cov.ran<-as.vector(Cov.ran)
four_stats[x,1:4]<-Cov.ran
write.table(four_stats_ranG, file=paste("four_stats_ranG",y,".txt"), sep="\t")
} # end of looping through individual random G matrix generation based on MCMC estiamtes and summary stat generation
} # end of parallel
stopImplicitCluster()
|
b6d730987dacdfd4ec62f93001c4bf8c5e7e8250
|
c13ed34bf0a8dcb963561f9b443a29df8aee8ca5
|
/man/BAMBI.Rd
|
99429b260d80d7cc124ef45e349f6d599675d2a5
|
[] |
no_license
|
cran/BAMBI
|
61ad600a2f23fc1435b36a7319d9a8a45486b444
|
16a4513e1bdff0a48f9f3f5f380c6618d51df218
|
refs/heads/master
| 2023-03-17T06:22:34.994816
| 2023-03-08T22:10:05
| 2023-03-08T22:10:05
| 77,772,641
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 685
|
rd
|
BAMBI.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/BAMBI-package.R
\name{BAMBI}
\alias{BAMBI}
\title{\code{BAMBI}: An R package for Bivariate Angular Mixture Models}
\description{
\code{BAMBI} is an R package that provides functions for fitting
(using Bayesian methods) and simulating mixtures of univariate
and bivariate angular distributions. Please see the reference for a
detailed description of the functionalities of \code{BAMBI}.
}
\references{
Chakraborty, S., & Wong, S. W. (2021). BAMBI: An R package for
fitting bivariate angular mixture models. \emph{Journal of Statistical Software},
99 (11), 1-69. \doi{10.18637/jss.v099.i11}
}
|
50af7798d291004eb64e675142ceca8bf79cc8f3
|
5c018949cad4d1a158a85f73a01ec295c8a38132
|
/assignment1.R
|
7ba3a505d426ea8a714dd03d962b026b4c9c3cc4
|
[] |
no_license
|
yannickKuhar/IS
|
40fb82cc3f92ec9ba429c278cf0df5991d5fb370
|
d8e9d6c8e875e1af4ccd3539a1e568c8f301dbee
|
refs/heads/master
| 2020-04-09T14:46:27.277954
| 2019-01-14T22:11:29
| 2019-01-14T22:11:29
| 160,406,671
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 12,196
|
r
|
assignment1.R
|
####################### PRINT ############################
printMaze <- function(maze, rows, cols) {
for (x in seq(1, rows)) {
print(maze[((x-1)*cols +1) : (x*cols)])
}
}
##########################################################
###################### MOVE ##############################
moveUp <- function(position, rows, cols) {
newPosition <- position - cols
if (newPosition < 1) {
return (position)
} else {
return (newPosition)
}
}
moveDown <- function(position, rows, cols) {
newPosition <- position + cols
if (newPosition > rows*cols) {
return (position)
} else {
return (position + cols)
}
}
moveLeft <- function(position, rows, cols) {
newPosition <- position - 1
if ((position - 1) %/% cols != (newPosition - 1) %/% cols) {
return (position)
} else {
return (position - 1)
}
}
moveRight <- function(position, rows, cols) {
newPosition <- position + 1
if ((position - 1) %/% cols != (newPosition - 1) %/% cols) {
return (position)
} else {
return (position + 1)
}
}
##########################################################
######################## FITNESS #########################
parsePoint <- function(pos, rows, cols) {
x <- ceiling(pos / rows)
y <- pos %% cols
if(y == 0) {
y <- cols
}
return(c(x, y))
}
simulateSolution <- function(maze, solution, rows, cols, ngen) { # Update this function to serve as a fitness funcition
# The simplest example is shown here: return 1 if the solution found the exit and 0 if it did not
kazen <- 0.1
punish <- 0
currentPosition <- grep('s', maze)
endPosition <- grep('e', maze)
for (move in solution) {
oldPosition <- currentPosition
if (move == 'U') {
currentPosition <- moveUp(currentPosition, rows, cols)
} else if (move == 'D') {
currentPosition <- moveDown(currentPosition, rows, cols)
} else if (move == 'L') {
currentPosition <- moveLeft(currentPosition, rows, cols)
} else if (move == 'R') {
currentPosition <- moveRight(currentPosition, rows, cols)
} else {
print(solution)
print('Error: Incorrect solution format')
return(-1)
}
if (maze[currentPosition] == '#') {
punish <- punish + kazen
currentPosition <- oldPosition
}
if (maze[currentPosition] == 'e') {
# print('Resitev je najdena!')
# print(solution)
return(0)
}
}
endpos = parsePoint(endPosition, rows, cols)
currpos = parsePoint(currentPosition, rows, cols)
return(sqrt((endpos[2] - currpos[2])^2 + (endpos[1] - currpos[1])^2))
}
##########################################################
######################## INIT POP ########################
createOneAgent <- function(ngen) {
moves <- c('U', 'D', 'L', 'R')
tmp <- as.integer(runif(ngen, 1, 5))
# print(tmp)
agent <- c(1:ngen)
for (j in c(1:ngen)) {
agent[j] <- moves[tmp[j]]
}
return(agent)
}
createPopulateion <- function(maze, rows, cols, size, ngen) {
populacija <- vector(mode="list", length=size)
for (i in c(1:size)) {
populacija[[i]] <- createOneAgent(ngen)
# print(populacija[[i]])
}
return(populacija)
}
##########################################################
######################## MUTACIJA ########################
mutacija <- function(sol) {
moves <- c('U', 'D', 'L', 'R')
sol[[1]][runif(1, 1, length(sol))] <- moves[as.integer(runif(1, 1, 5))]
return(sol)
}
mutacija2 <- function(sol) {
moves <- c('U', 'D', 'L', 'R')
for(i in c(1:length(sol))) {
if(as.integer(runif(1, 1, 6)) == 3) {
sol[[1]][i] <- moves[as.integer(runif(1, 1, 5))]
}
}
return(sol)
}
##########################################################
####################### SELECTION ########################
TOS <- function(pop, N, f, k) {
best <- 0
for (i in c(1:k)) {
ind = as.integer(runif(1, 1, N))
if(best == 0 || f[ind] < f[best]){
best <- ind
}
}
return(best)
}
getLoserId <- function(b1, b2) {
# Generate the ID of the losers,
# who will be replaced by the winners
# of the TOS.
losers <- c()
for(i in c(1:4)) {
if(i != b1 && i != b2) {
losers <- c(losers, i)
}
}
return(losers)
}
selectionTOS <- function(pop, N, f, ngen) {
for (i in seq(from=1, to=N, by=4)) {
subpop <- c(pop[i], pop[i + 1], pop[i + 2], pop[i + 3])
subf <- c(f[i], f[i + 1], f[i + 2], f[i + 3])
# Select best 2.
best1 <- TOS(subpop, 4, subf, 2)
best2 <- TOS(subpop, 4, subf, 2)
# Quickfix for 2 same candidates.
if(best1 == best2) {
for (i in c(1:10)) {
best2 <- TOS(subpop, 4, subf, 2)
if(best1 != best2) {
break
}
}
}
# Select losers and create children.
losers <- getLoserId(best1, best2)
otroka <- crossover(subpop[[best1]], subpop[[best2]])
# Replace losers by winners or by diversity token.
# if(as.integer(runif(1, 1, 3)) == 2) {
# subpop[losers[1]] <- createOneAgent(ngen)
# }
# else {
# subpop[losers[1]] <- otroka[1]
# }
#
# if(as.integer(runif(1, 1, 3)) == 2) {
# subpop[losers[2]] <- createOneAgent(ngen)
# }
# else {
# subpop[losers[2]] <- otroka[2]
# }
subpop[losers[1]] <- otroka[1]
subpop[losers[2]] <- otroka[2]
# Replace elements in popula
pop[i] <- subpop[1]
pop[i + 1] <- subpop[2]
pop[i + 2] <- subpop[3]
pop[i + 3] <- subpop[4]
}
# Apply mutateion
for (i in c(1:N)) {
pop[i] <- mutacija2(pop[i])
# print('Mutacija')
# print(pop[i])
}
return(pop)
}
SUS <- function(populacija, fitnessVec, N, ngen) {
totalF <- sum(fitnessVec)
offspring <- N / 2
distance <- totalF / offspring
pointers <- c(0: as.integer(offspring - 1))
strt <- runif(1, 0, distance)
for (i in pointers) {
pointers[i] <- strt + i * distance
}
keep <- c()
for (p in pointers) {
i <- 1
while(sum(fitnessVec[1:i]) < p) {
i <- i + 1
}
keep <- c(keep, populacija[i])
}
return(keep)
}
repopulate <- function(selected, N) {
pop <- list()
n <- length(selected)
# Izvedemo crossover-je
for (i in seq(from=1, to=N, by=2)) {
parent1 = as.integer(runif(1, 1, n))
parent2 = as.integer(runif(1, 1, n))
otroka <- crossover(selected[[parent1]],
selected[[parent2]])
pop <- c(pop, otroka[1])
pop <- c(pop, otroka[2])
}
# Podamo se mutacije z majhno verjetnostjo.
for (i in c(1:N)) {
if(as.integer(runif(1, 1, 10) == 5)) {
pop[i] <- mutacija2(pop[i])
}
}
return(pop)
}
##########################################################
###################### GEN ALG. ##########################
geneticAlgorithm <- function(maze, rows, cols, N, ngen, maxItr) {
# Implement the genetic algorithm in this function
# You should add additional parameters to the function as needed
# Population params.
# Init pop.
populacija <- createPopulateion(maze, rows, cols, N, ngen)
# Init fitness vec.
fitnessVec <- c(1:N)
# We start with gen. 1.
itr <- 1
while (itr < maxItr) {
# Eval populacijo.
for (i in c(1:N)) {
fitnessVec[i] <- simulateSolution(maze, populacija[[i]], rows, cols, ngen)
}
populacija <- selectionTOS(populacija, N, fitnessVec)
# populacija <- SUS(populacija, fitnessVec, N, ngen)
# populacija <- repopulate(populacija, N)
cat("Generacija:", itr, "Najboljsi v generaciji:", min(fitnessVec), "\n")
if(min(fitnessVec) == 0) {
print('KONEC')
return(0)
}
itr <- itr + 1
}
return(-1)
}
evalGALG <- function(maze, rows, cols, N, ngen, maxItr, evalParam) {
count <- 0
for (i in c(1:evalParam)) {
galg <- geneticAlgorithm(maze, rows, cols, N, ngen, maxItr)
if(galg == 0) {
count <- count + 1
}
}
return(count / evalParam)
}
##########################################################
###################### TEST DATA #########################
maze1 <- c(' ', ' ', ' ', ' ', 'e',
' ', '#', '#', '#', '#',
' ', ' ', 's', ' ', ' ',
'#', '#', '#', '#', ' ',
' ', ' ', ' ', ' ', ' ')
rows1 <- 5
cols1 <- 5
solution1 <- c('L', 'L','U', 'U', 'R', 'R', 'R', 'R', 'R')
maze2 <- c('#', '#', '#', '#', '#', '#', '#', '#', '#', '#', '#', '#', '#', '#', '#', '#', '#',
'#', '#', ' ', '#', ' ', '#', ' ', ' ', ' ', ' ', '#', ' ', ' ', '#', ' ', '#', '#',
'#', '#', 'e', '#', '#', '#', '#', '#', '#', '#', '#', '#', ' ', ' ', ' ', '#', '#',
'#', '#', ' ', '#', ' ', '#', ' ', '#', ' ', '#', '#', '#', ' ', ' ', ' ', '#', '#',
'#', '#', ' ', ' ', ' ', '#', ' ', '#', ' ', '#', '#', '#', '#', '#', ' ', ' ', ' ',
'#', '#', ' ', '#', ' ', '#', ' ', '#', ' ', ' ', ' ', ' ', '#', '#', ' ', '#', ' ',
'#', ' ', ' ', '#', ' ', ' ', ' ', '#', '#', '#', '#', ' ', '#', '#', ' ', '#', ' ',
'#', '#', ' ', '#', ' ', '#', ' ', '#', '#', '#', '#', ' ', '#', '#', ' ', '#', ' ',
'#', '#', ' ', '#', ' ', ' ', ' ', ' ', ' ', '#', '#', ' ', '#', '#', ' ', ' ', ' ',
'#', ' ', ' ', ' ', '#', ' ', '#', ' ', '#', ' ', ' ', ' ', ' ', ' ', ' ', '#', ' ',
'#', ' ', '#', ' ', '#', ' ', '#', ' ', '#', ' ', '#', '#', '#', '#', ' ', '#', ' ',
'#', ' ', '#', ' ', '#', ' ', '#', ' ', '#', ' ', '#', '#', '#', '#', ' ', '#', ' ',
'#', ' ', '#', ' ', '#', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', '#', ' ', '#', ' ',
'#', ' ', ' ', ' ', '#', ' ', '#', ' ', '#', '#', '#', '#', ' ', '#', ' ', '#', ' ',
'#', '#', ' ', '#', '#', '#', '#', ' ', '#', '#', ' ', ' ', ' ', ' ', ' ', '#', 's',
'#', '#', ' ', ' ', '#', ' ', ' ', ' ', '#', '#', ' ', '#', '#', '#', ' ', '#', ' ',
'#', '#', '#', '#', '#', '#', '#', '#', '#', '#', '#', '#', '#', '#', ' ', '#', '#',
'#', '#', '#', '#', '#', '#', '#', '#', '#', '#', '#', '#', '#', '#', '#', '#', '#')
solution2 <- c('U', 'U', 'U', 'U', 'U', 'U', 'L', 'L', 'D', 'L', 'L', 'L', 'L', 'L', 'D', 'D', 'D', 'L', 'L', 'L', 'L', 'U', 'U', 'U', 'U', 'L', 'U', 'U', 'U', 'U', 'L', 'L', 'U', 'U')
cols2 <- 17
rows2 <- 18
rows3<-11;
cols3<-16;
maze3 <- c('#','#','#','#','#','#','#','#','#','#','#','#','#','#','#','#',
's',' ',' ',' ',' ',' ','#',' ',' ',' ',' ',' ','#',' ',' ','#',
'#','#','#','#',' ',' ','#','#','#','#',' ',' ','#',' ',' ','#',
'#',' ',' ',' ',' ',' ','#',' ',' ',' ',' ',' ',' ',' ',' ','#',
'#',' ',' ','#','#','#','#',' ',' ','#','#','#','#',' ',' ','#',
'#',' ',' ',' ',' ',' ',' ',' ',' ','#',' ',' ',' ',' ',' ','#',
'#',' ',' ','#','#','#','#','#','#','#',' ',' ','#','#','#','#',
'#',' ',' ',' ',' ',' ','#',' ',' ',' ',' ',' ','#',' ',' ','#',
'#','#','#','#',' ',' ','#',' ',' ','#','#','#','#',' ',' ','#',
'#',' ',' ',' ',' ',' ','#',' ',' ',' ',' ',' ',' ',' ',' ','e',
'#','#','#','#','#','#','#','#','#','#','#','#','#','#','#','#')
maze4 <- c('#', '#', '#', '#', '#',
'#', '#', ' ', ' ', 'e',
'#', '#', ' ', '#', '#',
'#', 's', ' ', '#', '#',
'#', '#', '#', '#', '#')
cols4 <- 5
rows4 <- 5
sol4 <- c('R', 'U', 'U', 'R', 'R')
##########################################################
geneticAlgorithm(maze4, rows4, cols4, 32, 8, 2000)
# print(evalGALG(maze1, rows1, cols1, 32, 16, 5000, 10))
# tpop <- createPopulateion(maze1, rows1, cols1, 16, 8)
# f <- c(1:16)
#
# for (i in c(1:16)) {
# f[i] <- simulateSolution(maze1, tpop[i], rows1, cols1)
# }
#
# tsus <- SUS(tpop, f, 16, 8)
# trepo <- repopulate(tsus, 16)
#
#
# fsol1 <- c("R", "R", "U", "U", "U", "D", "U", "L")
# fsol2 <- c("R", "R", "U", "R", "U", "D", "U", "L")
# fsol3 <- c("R", "R", "U", "U", "U", "D", "U", "L")
#
# sol <- simulateSolution(maze1, fsol1, rows1, cols1)
|
7ed159812650c96625f5d6f91919a163868f98b4
|
0844c816ade1b06645fd9b3ae661c0691978117b
|
/man/district_geofacet.Rd
|
7f8e5b7d15a2606a52f23bb0fb801587cdd8b472
|
[
"MIT"
] |
permissive
|
petrbouchal/pragr
|
d92966395ac79982c72ba3a14fae6203176b256e
|
858fbb96f69eda9ac0922e8dfbc966948e545073
|
refs/heads/master
| 2022-12-21T20:44:41.420783
| 2022-12-17T21:18:35
| 2022-12-17T21:18:35
| 186,895,587
| 3
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 950
|
rd
|
district_geofacet.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data-doc.R
\docType{data}
\name{district_geofacet}
\alias{district_geofacet}
\title{Dataset to be used in the {geofacet} package}
\format{
A data frame with 57 rows and 11 variables:
\describe{
\item{\code{code}}{character. RUIAN code. Normally should serve as ID to distribute your data points into grid cells.}
\item{\code{label}}{character. Three-character unique label.}
\item{\code{row}}{integer. row number.}
\item{\code{col}}{integer. column number.}
}
}
\usage{
district_geofacet
}
\description{
Use this as the \code{grid} argument to \code{geofacet::facet_geo()}. The layout corresponds to \code{district_tilegram}.
}
\seealso{
Other Mapping:
\code{\link{district_hexogram}},
\code{\link{district_names}},
\code{\link{district_tilegram}},
\code{\link{prg_basemap}()},
\code{\link{prg_endpoints}},
\code{\link{prg_tile}()}
}
\concept{Mapping}
\keyword{datasets}
|
cab10c38383b6024600e8a69de21beec1316f364
|
cfb642c4568a403e7cd39b66e16dcaed0d08bd49
|
/man/mapMulti.Rd
|
5631bc7c10ec12a9292ee3749cd3ac9080ad31da
|
[
"LicenseRef-scancode-warranty-disclaimer"
] |
no_license
|
JVAdams/EchoNet2Fish
|
5223bcdb98a43bb61cd629cb33f590cba9fd1fed
|
6e397345e55a13a0b3fca70df3701f79290d30b6
|
refs/heads/master
| 2023-06-22T17:56:41.457893
| 2021-02-08T16:08:09
| 2021-02-08T16:08:09
| 32,336,396
| 4
| 1
| null | 2023-06-09T17:36:08
| 2015-03-16T15:59:18
|
R
|
UTF-8
|
R
| false
| true
| 3,545
|
rd
|
mapMulti.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mapMulti.R
\name{mapMulti}
\alias{mapMulti}
\title{Multipanel Map of Locations}
\usage{
mapMulti(
bygroup,
sug = sort(unique(bygroup)),
plottext = FALSE,
ID = 1,
emphasis = NULL,
short = TRUE,
lon,
lat,
samescale = TRUE,
rlon = NULL,
rlat = NULL,
cushion = 0.1,
IDcol = NULL,
mapcol = "gray",
boxcol = "gray",
misscol = "brown",
misstext = " - No sites",
mar = c(0, 0, 2.5, 0)
)
}
\arguments{
\item{bygroup}{Vector, identifying the group membership of the locations to be mapped.}
\item{sug}{Vector, identifying the unique groups to which locations may belong,
default sort(unique(\code{bygroup})).}
\item{plottext}{Logical scalar indicating if text (TRUE) or symbols (FALSE, default)
should be printed at each location.}
\item{ID}{Vector, plot symbol (if \code{plottext}=FALSE) or text
(if \code{plottext}=TRUE)
to print on the map at each location, either of length 1 or
the same length as \code{bygroup}, default 1.}
\item{emphasis}{Logical vector, indicating observations that should be emphasized on the
map, same length as \code{bygroup}, default NULL.}
\item{short}{Logical scalar, indicating aspect of map area. If TRUE, the default,
the mapped area is assumed to be wider (longitudinally) than tall.
Used to better arrange multiple maps on a single page.}
\item{lon}{A numeric vector of longitudes in decimal degrees.}
\item{lat}{A numeric vector of latitudes in decimal degrees.
Same length as \code{lon}.}
\item{samescale}{Logical scalar, indicating if the same (TRUE, default) or different
(FALSE) lon/lat scales should be used for all panels.}
\item{rlon}{A numeric vector of length 2, range of longitudes to map,
in decimal degrees. The default, NULL, means that the range of \code{lon}
will be used, either over all panels (if \code{samescale}=TRUE) or
for each panel separately (if \code{samescale}=FALSE).}
\item{rlat}{A numeric vector of length 2, range of latitudes to map,
in decimal degrees. The default, NULL, means that the range of \code{lat}
will be used, either over all panels (if \code{samescale}=TRUE) or
for each panel separately (if \code{samescale}=FALSE).}
\item{cushion}{A numeric scalar indicating the amount of cushion to add to the \code{rlon}
and \code{rlat} ranges in decimal degrees, default 0.1.}
\item{IDcol}{A vector, the color used to map locations, same length as \code{bygroup}.
If NULL, the default, a range of colors will be assigned automatically.}
\item{mapcol}{A scalar, the color used to draw the map lines (e.g., lake boundary),
default "gray".}
\item{boxcol}{A scalar, the color used to draw the box around the map, default "gray".}
\item{misscol}{A scalar, the color used to label maps with no locations in the
given bygroup, default "brown".}
\item{misstext}{A character scalar, the text used to label maps with no locations in the
given bygroup, default " - No sites".}
\item{mar}{A numeric vector of length 4, the number of lines of margin
c(bottom, left, top, right) around each plotted map plot, default
c(0, 0, 2.5, 0).}
}
\description{
Multipanel map of locations, one map for each group.
}
\examples{
\dontrun{
mygroup <- c(1, 1, 1, 2, 2, 2, 3, 3, 3)
myID <- LETTERS[1:9]
mylon <- rnorm(9, mean=-82)
mylat <- rnorm(9, mean=45)
mapMulti(bygroup=mygroup, plottext=TRUE, ID=myID,
emphasis=myID \%in\% c("G", "A"), cushion=0, lon=mylon, lat=mylat)
mapMulti(bygroup=mygroup, sug=1:4, short=FALSE, lon=mylon, lat=mylat,
samescale=FALSE)
}
}
|
3e42d0f77ba293eff21638bb61d975255ea42700
|
27674239c0da0b7afc6ad9dc2622e084c3f5c004
|
/inst/9_3_worker.R
|
e5757fa8d0d5b7191c3553e7f13c4ec8c0499b96
|
[] |
no_license
|
RobinHankin/knotR
|
112248605c8a89a21641be35f2363c19db1c3783
|
0a5a6015a51340faa1ee43066d76be8f39adb499
|
refs/heads/master
| 2023-05-15T03:19:57.311824
| 2023-05-14T09:03:35
| 2023-05-14T09:03:35
| 99,854,849
| 5
| 0
| null | 2017-10-15T04:48:05
| 2017-08-09T21:37:28
|
R
|
UTF-8
|
R
| false
| false
| 954
|
r
|
9_3_worker.R
|
library(knotR)
filename <- "9_3.svg"
a <- reader(filename)
Mver <- matrix(c(
10,25,
24,11,
12,23,
9,1,
22,13,
21,14,
8,2,
20,15,
19,16,
7,3,
6,4,
18,17
),ncol=2,byrow=TRUE)
sym93 <-
symmetry_object(
x = a,
Mver = Mver,
xver = 5
)
ou93 <-
matrix(c(
2,14,
16,3,
5,17,
19,6,
8,20,
22,9,
11,25,
24,12,
13,23
),byrow=TRUE,ncol=2)
#as <- symmetrize(a,sym93)
#knotplot2(as,text=T,lwd=0,circ=F)
#knotplot(as,ou93)
#stop()
jj <- knotoptim(filename,
symobj = sym93,
ou = ou93,
prob=0,
iterlim=9000,print.level=2
# control=list(trace=100,maxit=1000), # these arguments for optim()
# useNLM=FALSE
)
write_svg(jj,filename,safe=FALSE)
dput(jj,file=sub('.svg','.S',filename))
|
6667617ed56e35b60604244978b271a0b7e44520
|
ef121d82b4f5f0ab4f4872d17db50dea22252c81
|
/Complaint_Analysis_Project/Results/ac_analytics_insights/Binary Tests 01.R
|
91edc10584732a0feda73081dda6c70de3a132b2
|
[] |
no_license
|
indra-ch/ac-datadive-2018
|
d9725f3127549ba4fe371dd36b86776b467307a4
|
f2406899368cb17ec9fdc8c9579e8b63a5fb1e0d
|
refs/heads/master
| 2020-03-21T11:15:27.106968
| 2018-06-24T15:28:58
| 2018-06-24T15:28:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 19,147
|
r
|
Binary Tests 01.R
|
#load("C:/Users/rawle/Desktop/Hackathon/accountability_console_data_cleaned.RData")
#write.csv(complaints,"C:/Users/rawle/Desktop/Hackathon/complaints.csv")
cnames <- names(complaints)
spc <- " "
for (i in 1:54) {
var <- cnames[[i]]
test <- unique(complaints[,var])
nt <- length(test)
nc <- nchar(var)
var <- paste(var,substr(spc,1,33-nc),sep="")
if ((nt <= 21) & (i != 27)) {
for (j in 1:nt) {
val <- test[[j]]
if (j == 1) {
cat(i,"\t",var,"\t",j,"\t",val,"\n")
} else {
cat("\t",spc,"\t",j,"\t",val,"\n")
}
}
}
}
binary <- data.frame()
binlab <- data.frame()
k <- 0
vals <- c("ABD_SPF_CRP","EIB_CM","IFC_CAO","IDB_MICI","ERBD_PCM","WB_Panel","OPIC_OA",
"FMO_ICM","ERBD_IRM","AfDB_IRM","JBIC_EEG","COES_CSR","JICA_EEG","UNDP_SRM")
for (j in 1:14) {
k <- k + 1
binlab[k,"Variable"] <- "IAM"
binlab[k,"Value"] <- vals[[j]]
}
binlab[15,"Variable"] <- "Filer"
binlab[15,"Value"] <- "Exists"
binlab[16,"Variable"] <- "IFI Support"
binlab[16,"Value"] <- "NA"
k <- 16
vals <- c("Advisory services","Equity Investment","Financial intermediary","Project/investment lending","Risk Guarantee","Other")
for (j in 1:6) {
k <- k + 1
binlab[k,"Variable"] <- "IFI Support"
binlab[k,"Value"] <- vals[[j]]
}
k <- 22
vals <- c("Agribusiness","Chemicals","Community capacity and development","Conservation and environmental protection",
"Education","Energy","Extractives","Forestry","Healthcare","Infrastructure","Land reform","Manufacturing",
"Procurement","Regulatory Development","Other")
for (j in 1:15) {
k <- k + 1
binlab[k,"Variable"] <- "Sector"
binlab[k,"Value"] <- vals[[j]]
}
k <- 37
vals <- c("Biodiversity","Consultation and disclosure","Corruption/fraud","Cultural heritage","Displacement",
"Due diligence","Gender-based violence","Human rights","Indigenous peoples","Labor","Livelihoods",
"Other community health and safety issues","Other environmental","Other gender-related issues",
"Other retaliation","Pollution","Procurement","Property damage","Violence against the community",
"Water","Other")
for (j in 1:21) {
k <- k + 1
binlab[k,"Variable"] <- "Issues"
binlab[k,"Value"] <- vals[[j]]
}
binlab[59,"Variable"] <- "Compliance"
binlab[59,"Value"] <- "Report"
binlab[60,"Variable"] <- "Compliance"
binlab[60,"Value"] <- "Non-Compliance"
binlab[61,"Variable"] <- "Filing Date"
binlab[61,"Value"] <- "Exists"
binlab[62,"Variable"] <- "Registration"
binlab[62,"Value"] <- "Started"
binlab[63,"Variable"] <- "Registration"
binlab[63,"Value"] <- "Ended"
binlab[64,"Variable"] <- "Registration"
binlab[64,"Value"] <- "NA"
k <- 64
vals <- c("closed_with_outcome","not_undertaken","closed_without_outcome")
for (j in 1:3) {
k <- k + 1
binlab[k,"Variable"] <- "Registration"
binlab[k,"Value"] <- vals[[j]]
}
binlab[68,"Variable"] <- "Eligability"
binlab[68,"Value"] <- "Started"
binlab[69,"Variable"] <- "Eligability"
binlab[69,"Value"] <- "Ended"
binlab[70,"Variable"] <- "Eligability"
binlab[70,"Value"] <- "NA"
k <- 70
vals <- c("closed_with_outcome","not_undertaken","closed_without_outcome")
for (j in 1:3) {
k <- k + 1
binlab[k,"Variable"] <- "Eligability"
binlab[k,"Value"] <- vals[[j]]
}
binlab[74,"Variable"] <- "Why Not Eligable"
binlab[74,"Value"] <- "NA"
k <- 74
vals <- c("Addressed outside process","Case closed in earlier stage","Complaint withdrawn","Filer Issue",
"Forwarded to other body within bank","Funding and/or consideration ended","Good faith requirement not met",
"Inadequate information","Issues previously raised","Mechanism deemed involvement unnecessary",
"Not desired by complainant","Outside of mandate","Project Completion Report issued","Unknown","Other")
for (j in 1:15) {
k <- k + 1
binlab[k,"Variable"] <- "Why Not Eligable"
binlab[k,"Value"] <- vals[[j]]
}
binlab[90,"Variable"] <- "Dispute Resolution"
binlab[90,"Value"] <- "Started"
binlab[91,"Variable"] <- "Dispute Resolution"
binlab[91,"Value"] <- "Ended"
binlab[92,"Variable"] <- "Dispute Resolution"
binlab[92,"Value"] <- "NA"
k <- 92
vals <- c("closed_with_outcome","not_undertaken","closed_without_outcome","in_progress")
for (j in 1:4) {
k <- k + 1
binlab[k,"Variable"] <- "Dispute Resolution"
binlab[k,"Value"] <- vals[[j]]
}
binlab[97,"Variable"] <- "Why No Dispute Resolution"
binlab[97,"Value"] <- "NA"
k <- 97
vals <- c("Case closed in earlier stage","Mechanism deemed involvement unnecessary",
"Actor(s) involved refused to participate","Unknown","Addressed outside process",
"Not desired by complainant","Not offered by mechanism","Mechanism unable to contact complainant",
"Complaint withdrawn","Funding and/or consideration ended")
for (j in 1:10) {
k <- k + 1
binlab[k,"Variable"] <- "Why No Dispute Resolution"
binlab[k,"Value"] <- vals[[j]]
}
binlab[108,"Variable"] <- "Compliance Review"
binlab[108,"Value"] <- "Started"
binlab[109,"Variable"] <- "Compliance Review"
binlab[109,"Value"] <- "Ended"
binlab[110,"Variable"] <- "Compliance Review"
binlab[110,"Value"] <- "NA"
k <- 110
vals <- c("closed_with_outcome","not_undertaken","closed_without_outcome","in_progress")
for (j in 1:4) {
k <- k + 1
binlab[k,"Variable"] <- "Compliance Review"
binlab[k,"Value"] <- vals[[j]]
}
binlab[115,"Variable"] <- "Why No Compliance Review"
binlab[115,"Value"] <- "NA"
k <- 115
vals <- c("Case closed in earlier stage","Mechanism deemed involvement unnecessary","Addressed outside process",
"Complaint withdrawn","Mechanism unable to contact complainant","Unknown",
"Funding and/or consideration ended","Complainant did not refile","Not desired by complainant",
"Board did not approve","Not offered by mechanism")
for (j in 1:11) {
k <- k + 1
binlab[k,"Variable"] <- "Why No Compliance Review"
binlab[k,"Value"] <- vals[[j]]
}
binlab[127,"Variable"] <- "Monitoring"
binlab[127,"Value"] <- "Started"
binlab[128,"Variable"] <- "Monitoring"
binlab[128,"Value"] <- "Ended"
binlab[129,"Variable"] <- "Monitoring"
binlab[129,"Value"] <- "NA"
k <- 129
vals <- c("closed_with_outcome","not_undertaken","closed_without_outcome","in_progress")
for (j in 1:4) {
k <- k + 1
binlab[k,"Variable"] <- "Monitoring"
binlab[k,"Value"] <- vals[[j]]
}
binlab[134,"Variable"] <- "Why No Monitoring"
binlab[134,"Value"] <- "NA"
k <- 134
vals <- c("Case closed in earlier stage","Mechanism deemed involvement unnecessary","Unknown",
"Board did not approve","Funding and/or consideration ended")
for (j in 1:5) {
k <- k + 1
binlab[k,"Variable"] <- "Why No Monitoring"
binlab[k,"Value"] <- vals[[j]]
}
binlab[140,"Variable"] <- "Date Closed"
binlab[140,"Value"] <- "Exists"
for (i in 1:nrow(complaints)) {
k <- 0
vals <- c("ABD_SPF_CRP","EIB_CM","IFC_CAO","IDB_MICI","ERBD_PCM","WB_Panel","OPIC_OA",
"FMO_ICM","ERBD_IRM","AfDB_IRM","JBIC_EEG","COES_CSR","JICA_EEG","UNDP_SRM")
for (j in 1:14) {
k <- k + 1
var <- paste("B",k,sep="")
binary[i,var] <- 0
val <- vals[[j]]
val0 <- complaints[[i,7]]
if (!is.na(val0)) {
if (val0 == val) {
binary[i,var] <- 1
}
}
}
binary[i,"B15"] <- 0
if (!is.na(complaints[[i,11]])){
binary[i,"B15"] <- 1
}
binary[i,"B16"] <- 0
if (is.na(complaints[[i,12]])){
binary[i,"B16"] <- 1
}
k <- 16
vals <- c("Advisory services","Equity Investment","Financial intermediary","Project/investment lending","Risk Guarantee","Other")
for (j in 1:6) {
k <- k + 1
var <- paste("B",k,sep="")
binary[i,var] <- 0
val <- vals[[j]]
for (h in 12:13) {
val0 <- complaints[[i,h]]
if (!is.na(val0)) {
if (val0 == val) {
binary[i,var] <- 1
}
}
}
}
k <- 22
vals <- c("Agribusiness","Chemicals","Community capacity and development","Conservation and environmental protection",
"Education","Energy","Extractives","Forestry","Healthcare","Infrastructure","Land reform","Manufacturing",
"Procurement","Regulatory Development","Other")
for (j in 1:15) {
k <- k + 1
var <- paste("B",k,sep="")
binary[i,var] <- 0
val <- vals[[j]]
for (h in 14:16) {
val0 <- complaints[[i,h]]
if (!is.na(val0)) {
if (val0 == val) {
binary[i,var] <- 1
}
}
}
}
k <- 37
vals <- c("Biodiversity","Consultation and disclosure","Corruption/fraud","Cultural heritage","Displacement",
"Due diligence","Gender-based violence","Human rights","Indigenous peoples","Labor","Livelihoods",
"Other community health and safety issues","Other environmental","Other gender-related issues",
"Other retaliation","Pollution","Procurement","Property damage","Violence against the community",
"Water","Other")
for (j in 1:21) {
k <- k + 1
var <- paste("B",k,sep="")
binary[i,var] <- 0
val <- vals[[j]]
for (h in 17:26) {
val0 <- complaints[[i,h]]
if (!is.na(val0)) {
if (val0 == val) {
binary[i,var] <- 1
}
}
}
}
binary[i,"B59"] <- 0
if (complaints[[i,28]]) {
binary[i,"B59"] <- 1
}
binary[i,"B60"] <- 0
if (complaints[[i,29]]) {
binary[i,"B60"] <- 1
}
binary[i,"B61"] <- 0
if (!is.na(complaints[[i,30]])) {
binary[i,"B61"] <- 1
}
binary[i,"B62"] <- 0
if (!is.na(complaints[[i,31]])) {
binary[i,"B62"] <- 1
}
binary[i,"B63"] <- 0
if (!is.na(complaints[[i,32]])) {
binary[i,"B63"] <- 1
}
binary[i,"B64"] <- 0
if (is.na(complaints[[i,33]])) {
binary[i,"B64"] <- 1
}
k <- 64
vals <- c("closed_with_outcome","not_undertaken","closed_without_outcome")
for (j in 1:3) {
k <- k + 1
var <- paste("B",k,sep="")
binary[i,var] <- 0
val <- vals[[j]]
val0 <- complaints[[i,33]]
if (!is.na(val0)) {
if (val0 == val) {
binary[i,var] <- 1
}
}
}
binary[i,"B68"] <- 0
if (!is.na(complaints[[i,35]])) {
binary[i,"B68"] <- 1
}
binary[i,"B69"] <- 0
if (!is.na(complaints[[i,36]])) {
binary[i,"B69"] <- 1
}
binary[i,"B70"] <- 0
if (is.na(complaints[[i,37]])) {
binary[i,"B70"] <- 1
}
k <- 70
vals <- c("closed_with_outcome","not_undertaken","closed_without_outcome")
for (j in 1:3) {
k <- k + 1
var <- paste("B",k,sep="")
binary[i,var] <- 0
val <- vals[[j]]
val0 <- complaints[[i,37]]
if (!is.na(val0)) {
if (val0 == val) {
binary[i,var] <- 1
}
}
}
binary[i,"B74"] <- 0
if (is.na(complaints[[i,38]])) {
binary[i,"B74"] <- 1
}
k <- 74
vals <- c("Addressed outside process","Case closed in earlier stage","Complaint withdrawn","Filer Issue",
"Forwarded to other body within bank","Funding and/or consideration ended","Good faith requirement not met",
"Inadequate information","Issues previously raised","Mechanism deemed involvement unnecessary",
"Not desired by complainant","Outside of mandate","Project Completion Report issued","Unknown","Other")
for (j in 1:15) {
k <- k + 1
var <- paste("B",k,sep="")
binary[i,var] <- 0
val <- vals[[j]]
for (h in 38:40) {
val0 <- complaints[[i,h]]
if (!is.na(val0)) {
if (val0 == val) {
binary[i,var] <- 1
}
}
}
}
binary[i,"B90"] <- 0
if (!is.na(complaints[[i,41]])) {
binary[i,"B90"] <- 1
}
binary[i,"B91"] <- 0
if (!is.na(complaints[[i,42]])) {
binary[i,"B91"] <- 1
}
binary[i,"B92"] <- 0
if (is.na(complaints[[i,43]])) {
binary[i,"B92"] <- 1
}
k <- 92
vals <- c("closed_with_outcome","not_undertaken","closed_without_outcome","in_progress")
for (j in 1:4) {
k <- k + 1
var <- paste("B",k,sep="")
binary[i,var] <- 0
val <- vals[[j]]
val0 <- complaints[[i,43]]
if (!is.na(val0)) {
if (val0 == val) {
binary[i,var] <- 1
}
}
}
binary[i,"B97"] <- 0
if (is.na(complaints[[i,44]])) {
binary[i,"B97"] <- 1
}
k <- 97
vals <- c("Case closed in earlier stage","Mechanism deemed involvement unnecessary",
"Actor(s) involved refused to participate","Unknown","Addressed outside process",
"Not desired by complainant","Not offered by mechanism","Mechanism unable to contact complainant",
"Complaint withdrawn","Funding and/or consideration ended")
for (j in 1:10) {
k <- k + 1
var <- paste("B",k,sep="")
binary[i,var] <- 0
val <- vals[[j]]
val0 <- complaints[[i,44]]
if (!is.na(val0)) {
if (val0 == val) {
binary[i,var] <- 1
}
}
}
binary[i,"B108"] <- 0
if (!is.na(complaints[[i,45]])) {
binary[i,"B108"] <- 1
}
binary[i,"B109"] <- 0
if (!is.na(complaints[[i,46]])) {
binary[i,"B109"] <- 1
}
binary[i,"B110"] <- 0
if (is.na(complaints[[i,47]])) {
binary[i,"B110"] <- 1
}
k <- 110
vals <- c("closed_with_outcome","not_undertaken","closed_without_outcome","in_progress")
for (j in 1:4) {
k <- k + 1
var <- paste("B",k,sep="")
binary[i,var] <- 0
val <- vals[[j]]
val0 <- complaints[[i,47]]
if (!is.na(val0)) {
if (val0 == val) {
binary[i,var] <- 1
}
}
}
binary[i,"B115"] <- 0
if (is.na(complaints[[i,48]])) {
binary[i,"B115"] <- 1
}
k <- 115
vals <- c("Case closed in earlier stage","Mechanism deemed involvement unnecessary","Addressed outside process",
"Complaint withdrawn","Mechanism unable to contact complainant","Unknown",
"Funding and/or consideration ended","Complainant did not refile","Not desired by complainant",
"Board did not approve","Not offered by mechanism")
for (j in 1:11) {
k <- k + 1
var <- paste("B",k,sep="")
binary[i,var] <- 0
val <- vals[[j]]
val0 <- complaints[[i,48]]
if (!is.na(val0)) {
if (val0 == val) {
binary[i,var] <- 1
}
}
}
binary[i,"B127"] <- 0
if (!is.na(complaints[[i,49]])) {
binary[i,"B127"] <- 1
}
binary[i,"B128"] <- 0
if (!is.na(complaints[[i,50]])) {
binary[i,"B128"] <- 1
}
binary[i,"B129"] <- 0
if (is.na(complaints[[i,51]])) {
binary[i,"B129"] <- 1
}
k <- 129
vals <- c("closed_with_outcome","not_undertaken","closed_without_outcome","in_progress")
for (j in 1:4) {
k <- k + 1
var <- paste("B",k,sep="")
binary[i,var] <- 0
val <- vals[[j]]
val0 <- complaints[[i,51]]
if (!is.na(val0)) {
if (val0 == val) {
binary[i,var] <- 1
}
}
}
binary[i,"B134"] <- 0
if (is.na(complaints[[i,52]])) {
binary[i,"B134"] <- 1
}
k <- 134
vals <- c("Case closed in earlier stage","Mechanism deemed involvement unnecessary","Unknown",
"Board did not approve","Funding and/or consideration ended")
for (j in 1:5) {
k <- k + 1
var <- paste("B",k,sep="")
binary[i,var] <- 0
val <- vals[[j]]
val0 <- complaints[[i,52]]
if (!is.na(val0)) {
if (val0 == val) {
binary[i,var] <- 1
}
}
}
binary[i,"B140"] <- 0
if (!is.na(complaints[[i,53]])) {
binary[i,"B140"] <- 1
}
val <- complaints[[i,54]]
if (is.na(val)) {
binary[i,"goal"] <- 0
} else {
binary[i,"goal"] <- val + 1
}
}
for (i in 1:nrow(complaints)) {
t0 <-
0
for (j in 1:14) {
t0 <- t0 + binary[[i,j]]
}
if (t0 == 0) {
cat("Prob1","\t",i,"\n")
}
t0 <-
0
for (j in 16:22) {
t0 <- t0 + binary[[i,j]]
}
if (t0 == 0) {
cat("Prob2","\t",i,"\n")
}
t0 <-
0
for (j in 23:37) {
t0 <- t0 + binary[[i,j]]
}
if (t0 == 0) {
cat("Prob3","\t",i,"\n")
}
t0 <-
0
for (j in 38:58) {
t0 <- t0 + binary[[i,j]]
}
if (t0 == 0) {
cat("Prob4","\t",i,"\n")
}
t0 <-
0
for (j in 64:67) {
t0 <- t0 + binary[[i,j]]
}
if (t0 == 0) {
cat("Prob5","\t",i,"\n")
}
t0 <-
0
for (j in 70:73) {
t0 <- t0 + binary[[i,j]]
}
if (t0 == 0) {
cat("Prob6","\t",i,"\n")
}
t0 <-
0
for (j in 74:89) {
t0 <- t0 + binary[[i,j]]
}
if (t0 == 0) {
cat("Prob7","\t",i,"\n")
}
t0 <-
0
for (j in 92:96) {
t0 <- t0 + binary[[i,j]]
}
if (t0 == 0) {
cat("Prob8","\t",i,"\n")
}
t0 <-
0
for (j in 97:107) {
t0 <- t0 + binary[[i,j]]
}
if (t0 == 0) {
cat("Prob9","\t",i,"\n")
}
t0 <-
0
for (j in 110:114) {
t0 <- t0 + binary[[i,j]]
}
if (t0 == 0) {
cat("Prob10","\t",i,"\n")
}
t0 <-
0
for (j in 115:126) {
t0 <- t0 + binary[[i,j]]
}
if (t0 == 0) {
cat("Prob11","\t",i,"\n")
}
t0 <-
0
for (j in 129:133) {
t0 <- t0 + binary[[i,j]]
}
if (t0 == 0) {
cat("Prob12","\t",i,"\n")
}
t0 <-
0
for (j in 134:139) {
t0 <- t0 + binary[[i,j]]
}
if (t0 == 0) {
cat("Prob13","\t",i,"\n")
}
}
chisqtab <- data.frame(binlab)
tb <- table(list(binary$goal))
t0 <- tb[1]
t1 <- tb[2]
t2 <- tb[3]
for (i in 1:140) {
var <- paste("B",i,sep="")
s1 <- sum(binary[binary$goal==1,var])
s2 <- sum(binary[binary$goal==2,var])
tbt <- data.frame()
tbt["goal1","B0"] <- t1 - s1
tbt["goal2","B0"] <- t2 - s2
tbt["goal1","B1"] <- s1
tbt["goal2","B1"] <- s2
r1 <- s1 + s2
r0 <- t1 + t2 - s1 - s2
r2 <- r0
if (r1 < r0) {
r2 <- r1
}
p0 <- (t2 - s2) / r0
p1 <- s2 / r1
cs <- chisq.test(tbt)
xs <- cs[[1]]
xp <- cs[[3]]
xp <- 1 - xp
cat(var,"\t",xs,"\t",xp,"\t",p0,"\t",p1,"\n")
if (is.nan(xs)) {
xs <- 0
}
if (is.nan(xp)) {
xp <- 0
}
if (is.nan(p1)) {
p1 <- 0
}
if (is.nan(r1)) {
r1 <- 0
}
chisqtab[i,"Chi_Sq_Stat"] <- xs
chisqtab[i,"Prob_of_Dependence"] <- xp
chisqtab[i,"Percent_Eligable"] <- p1
chisqtab[i,"Sample_Size"] <- r1
chisqtab[i,"Warning"] <- ""
if (r2 < 17) {
chisqtab[i,"Warning"] <- "Reults Directional"
if (r2 < 9) {
chisqtab[i,"Warning"] <- "Reults Questionable"
if (r2 < 5) {
chisqtab[i,"Warning"] <- "Reults Not Valid"
}
}
}
}
write.csv(chisqtab,"C:/Users/rawle/Desktop/Hackathon/complaints Chi Square Variable Predictabiiity.csv")
|
44c54a0fd6d16547447e785dc237334695fd59da
|
5c618b59cc2ac45e48c05bb24d2e56be4e27077c
|
/data/bathymetry/format_bathymetry_data.R
|
f4adb6f7e8b6770250496742dfb904b4eccf7ce6
|
[] |
no_license
|
cfree14/dungeness
|
fefcd5e256e0f8fe4721fbd1b627942e74704b5b
|
76054741b1209078e92ce2cc543620023900ab6d
|
refs/heads/master
| 2023-08-29T04:43:00.107409
| 2023-08-08T18:21:41
| 2023-08-08T18:21:41
| 189,051,316
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,036
|
r
|
format_bathymetry_data.R
|
# Clear workspace
rm(list = ls())
# Setup
################################################################################
# Packages
library(raster)
library(tidyverse)
library(ggplot2)
library(cowplot)
# Directories
inputdir <- "data/bathymetry/raw"
outputdir <- "data/bathymetry/processed"
# Read data
# Grid registered is authoritative; cell registered is derived from grid registered
# https://catalog.data.gov/dataset/etopo1-1-arc-minute-global-relief-model
data <- raster(file.path(inputdir, "ETOPO1_Ice_c_geotiff.tif"), crs=CRS("+init=epsg:4326"))
# Build and export data
################################################################################
# Clip data to CA
# 134W to 116W abdd 31N to 48N.
ca <- extent(matrix(data=c(-134, -116, 31, 48), nrow=2, byrow=T))
ca_plus <- extent(matrix(data=c(-134.5, -115.5, 30.5, 48.5), nrow=2, byrow=T))
ca_bathy <- crop(data, ca_plus)
plot(ca_bathy)
# Calculate slope
ca_slope <- terrain(ca_bathy, opt="slope", unit="degrees", neighbors = 8)
ca_aspect <- terrain(ca_bathy, opt="aspect", unit="degrees", neighbors = 8)
# Export
save(ca_bathy, ca_slope, ca_aspect, file=file.path(outputdir, "ca_bathymetry_data.Rdata"))
# Plot data
################################################################################
# Format bathy
ca_bathy_df <- ca_bathy %>%
as("SpatialPixelsDataFrame") %>%
as.data.frame() %>%
setNames(c("value", "x", "y"))
# Format slope
ca_slope_df <- ca_slope %>%
as("SpatialPixelsDataFrame") %>%
as.data.frame() %>%
setNames(c("value", "x", "y"))
# Format aspect
ca_aspect_df <- ca_aspect %>%
as("SpatialPixelsDataFrame") %>%
as.data.frame() %>%
setNames(c("value", "x", "y"))
# Get US states and Mexico
usa <- rnaturalearth::ne_states(country = "United States of America", returnclass = "sf")
mexico <- rnaturalearth::ne_countries(country="Mexico", returnclass = "sf")
# Setup theme
my_theme <- theme(axis.text=element_text(size=5),
axis.text.y = element_text(angle = 90, hjust = 0.5),
axis.title=element_text(size=5),
plot.title=element_text(size=7),
legend.text=element_text(size=7),
legend.title=element_text(size=9),
legend.position = "bottom",
panel.grid.major = element_line(colour = 'transparent'),
panel.grid.minor = element_blank(),
panel.background = element_blank(),
axis.line = element_line(colour = "black"))
# Plot bathy
g1 <- ggplot() +
# Plot bathymetry
geom_raster(data=ca_bathy_df, mapping=aes(x=x, y=y, fill=value)) +
scale_fill_continuous(name="Depth (m)") +
# Plot CA/Mexico
geom_sf(data=usa, fill="grey85", col="white", size=0.2) +
geom_sf(data=mexico, fill="grey85", col="white", size=0.2) +
# Crop extent
coord_sf(xlim = c(-125, -116), ylim = c(32, 42)) +
# Small things
labs(x="", y="") +
ggtitle("A. Bathymetric depth") +
theme_bw() + my_theme
# g1
# Plot slope
g2 <- ggplot() +
# Plot bathymetry
geom_raster(data=ca_slope_df, mapping=aes(x=x, y=y, fill=value)) +
scale_fill_continuous(name="Slope (degrees)") +
# Plot CA/Mexico
geom_sf(data=usa, fill="grey85", col="white", size=0.2) +
geom_sf(data=mexico, fill="grey85", col="white", size=0.2) +
# Crop extent
coord_sf(xlim = c(-125, -116), ylim = c(32, 42)) +
# Small things
labs(x="", y="") +
ggtitle("B. Bathymetric slope") +
theme_bw() + my_theme
# g2
# Plot slope
g3 <- ggplot() +
# Plot bathymetry
geom_raster(data=ca_aspect_df, mapping=aes(x=x, y=y, fill=value)) +
scale_fill_continuous(name="Aspect (degrees)") +
# Plot CA/Mexico
geom_sf(data=usa, fill="grey85", col="white", size=0.2) +
geom_sf(data=mexico, fill="grey85", col="white", size=0.2) +
# Crop extent
coord_sf(xlim = c(-125, -116), ylim = c(32, 42)) +
# Small things
labs(x="", y="") +
ggtitle("C. Bathymetric aspect") +
theme_bw() + my_theme
# g3
# Merge
g <- plot_grid(g1, g2, g3, ncol=3)
g
|
f13c92c9ec43362fecccee59e3cc25a69238ef17
|
291ed5e2041252780f7dbe4eaee28fd3e8e6fcc6
|
/man/perceptrain.Rd
|
657c7a788d0bbea685a05ae6cb1826405160c23b
|
[] |
no_license
|
xiaoyaoyang/freestats
|
a99844b040faa0ebc4157f5ea4d7d935276a7bbd
|
31ea1ecf7bb146214f1e9f800bd715938a6e2386
|
refs/heads/master
| 2021-01-01T18:42:13.937357
| 2014-10-06T04:56:35
| 2014-10-06T04:56:35
| 18,196,782
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 965
|
rd
|
perceptrain.Rd
|
\name{perceptrain}
\alias{perceptrain}
\title{An original perceptron algorithm}
\usage{
perceptrain(S, y, alpha_k = 1, endcost = 0)
}
\arguments{
\item{S}{Each row represents a data points with last
column equal to 1; S=[X,1]}
\item{y}{Class label for data points in S}
\item{alpha_k}{The speed of converge}
\item{endcost}{The termination condition of cost
function}
}
\value{
\item{z}{Normal vector of a hyperplane: z=c(-c,Vh) }
\item{Z_history}{Trajactory of normal vector of a
hyperplane} \item{NumofIteration}{Number of iterations for
algorithm}
}
\description{
Train data with perceptron algorithm
}
\details{
S is especially designed for perceptron.
For more information \code{\link{fakedata}}
}
\examples{
set.seed(1024)
z <- runif(n=3)
mydata <- fakedata(w=z,n=100)
r <- perceptrain(S=mydata$S,y=mydata$y,alpha_k=1,endcost=0)
r
}
\author{
Xiaoyao Yang. Also, thanks Ran Fu for improving function by
introducing matrix computation method.
}
|
61a51aa072499555848cff85e68871b9301f903d
|
5196906b911127b60b8ac28ea744d9ea0c9404bd
|
/gitHub.R
|
479c4ba294bc74fc4bdb31b8afa69c24536fa97d
|
[] |
no_license
|
ashwingrao/ProgrammingAssignment4
|
1879a40a78e5d08bdb240700c48a1743f87ca2d2
|
af79ca6eed206e67f7ae3cd154084d02c370b6e7
|
refs/heads/master
| 2016-09-05T21:40:22.023990
| 2015-04-15T02:05:55
| 2015-04-15T02:05:55
| 30,477,813
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,599
|
r
|
gitHub.R
|
install.packages("httpuv")
library(httpuv)
library(httr)
Sys.setenv(GITHUB_CONSUMER_SECRET="597e524550b6cc9dff5fb3a50fd39901b9ac67c2")
# 1. Find OAuth settings for github:
# http://developer.github.com/v3/oauth/
oauth_endpoints("github")
## Found this on the web
##github <- oauth_endpoint(NULL, "authorize", "access_token",
## base_url = "https://github.com/login/oauth")
# 2. Register an application at https://github.com/settings/applications;
# Use any URL you would like for the homepage URL (http://github.com is fine)
# and http://localhost:1410 as the callback url
#
# Insert your client ID and secret below - if secret is omitted, it will
# look it up in the GITHUB_CONSUMER_SECRET environmental variable.
#myapp <- oauth_app("github", key = "644a9e346d5caf938eb0", secret = "597e524550b6cc9dff5fb3a50fd39901b9ac67c2" )
myapp <- oauth_app("github", key = "644a9e346d5caf938eb0")
##
# 3. Get OAuth credentials
github_token <- oauth2.0_token(oauth_endpoints("github"), myapp)
# 4. Use API
gtoken <- config(token = github_token)
req <- GET("https://api.github.com/users/jtleek/repos", gtoken)
stop_for_status(req)
content(req)
# OR:
req <- with_config(gtoken, GET("https://api.github.com/rate_limit"))
stop_for_status(req)
content(req)
json1 <- content(req, as="text")
json2 <- jsonlite::fromJSON(toJSON(json1))
jsonlite::prettify(json2)
## A better way
data3 <- jsonlite::fromJSON("https://api.github.com/users/jtleek/repos", flatten = TRUE)
cbind(data3$full_name, data3$created_at)
## OR more simply
data3[which(data3$full_name == "jtleek/datasharing"),]$created_at
|
116cbcdfcf88799d83f842e627c8fed2e58af117
|
37226cfcc32ca706e6ccb269e83f3cbe559b8f27
|
/man/rerun.Rd
|
56e11fb3c86b193dd77d6a3197610dfa0e194573
|
[] |
no_license
|
jpritikin/metasem
|
27428007caba2b72a09c50163b255c54f3412346
|
61176d1700698f7a5abccde51220ae9bac349ae4
|
refs/heads/master
| 2020-05-29T11:34:56.630948
| 2019-06-03T11:48:58
| 2019-06-03T11:48:58
| 35,242,232
| 0
| 0
| null | 2015-05-07T20:35:46
| 2015-05-07T20:35:46
| null |
UTF-8
|
R
| false
| false
| 687
|
rd
|
rerun.Rd
|
\name{rerun}
\alias{rerun}
\title{Rerun models via mxTryHard()
}
\description{It reruns models via mxTryHard().
}
\usage{
rerun(object, ...)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{object}{An object of either class \code{tssem1FEM},
class \code{tssem1REM}, class \code{wls}, class \code{meta}, class
\code{reml}, class{MxModel}.
}
\item{\dots}{{Further arguments to be passed to \code{\link[OpenMx]{mxTryHard}}}}
}
\author{Mike W.-L. Cheung <mikewlcheung@nus.edu.sg>
}
\examples{
\dontrun{
random1 <- tssem1(Digman97$data, Digman97$n, method="REM", RE.type="Diag")
random1_rerun <- rerun(random1)
summary(random1_rerun)
}
}
\keyword{tssem}
|
ce9c5864ecdc7cad662254273fcf1f6f693f1e1d
|
2b5728585d67ad9f0210a21189459a1515faa72f
|
/man/RsqDist.Rd
|
413af776b49192a761f517575bcc3968c0519999
|
[] |
no_license
|
Matherion/userfriendlyscience
|
9fb8dd5992dcc86b84ab81ca98d97b9b65cc5133
|
46acf718d692a42aeebdbe9a6e559a7a5cb50c77
|
refs/heads/master
| 2020-12-24T16:35:32.356423
| 2018-09-25T06:41:14
| 2018-09-25T06:41:14
| 49,939,242
| 15
| 9
| null | 2018-11-17T10:34:37
| 2016-01-19T08:50:54
|
R
|
UTF-8
|
R
| false
| false
| 2,629
|
rd
|
RsqDist.Rd
|
\name{RsqDist}
\alias{dRsq}
\alias{pRsq}
\alias{qRsq}
\alias{rRsq}
\title{
The distribution of R squared (as obtained in a regression analysis)
}
\description{
These functions use the beta distribution to provide the R Squared distribution.
}
\usage{
dRsq(x, nPredictors, sampleSize, populationRsq = 0)
pRsq(q, nPredictors, sampleSize, populationRsq = 0, lower.tail = TRUE)
qRsq(p, nPredictors, sampleSize, populationRsq = 0, lower.tail = TRUE)
rRsq(n, nPredictors, sampleSize, populationRsq = 0)
}
\arguments{
\item{x, q}{
Vector of quantiles, or, in other words, the value(s) of R Squared.
}
\item{p}{
Vector of probabilites (\emph{p}-values).
}
\item{nPredictors}{
The number of predictors.
}
\item{sampleSize}{
The sample size.
}
\item{n}{
The number of R Squared values to generate.
}
\item{populationRsq}{
The value of R Squared in the population; this determines the center of the R Squared distribution. This has not been implemented yet in this version of \code{userfriendlyscience}. If anybody knows how to do this and lets me know, I'll happily integrate this of course.
}
\item{lower.tail}{
logical; if TRUE (default), probabilities are the likelihood of finding an R Squared smaller than the specified value; otherwise, the likelihood of finding an R Squared larger than the specified value.
}
}
\details{
The functions use \code{\link{convert.omegasq.to.f}} and \code{\link{convert.f.to.omegasq}} to provide the Omega Squared distribution.
}
\value{
\code{dRsq} gives the density, \code{pRsq} gives the distribution function, \code{qRsq} gives the quantile function, and \code{rRsq} generates random deviates.
}
\note{
These functions are based on the Stack Exchange (Cross Validated) post at \url{http://stats.stackexchange.com/questions/130069/what-is-the-distribution-of-r2-in-linear-regression-under-the-null-hypothesis}. Thus, the credits go to Alecos Papadopoulos, who provided the answer that was used to write these functions.
}
\author{
Gjalt-Jorn Peters (based on a CrossValidated answer by Alecos Papadopoulos)
Maintainer: Gjalt-Jorn Peters <gjalt-jorn@userfriendlyscience.com>
}
\seealso{
\code{\link{dbeta}}, \code{\link{pbeta}}, \code{\link{qbeta}}, \code{\link{rbeta}}
}
\examples{
### Generate 10 random R Squared values
### with 2 predictors and 100 participants
rRsq(10, 2, 100);
### Probability of finding an R Squared of
### .15 with 4 predictors and 100 participants
pRsq(.15, 4, 100, lower.tail = FALSE);
### Probability of finding an R Squared of
### .15 with 15 predictors and 100 participants
pRsq(.15, 15, 100, lower.tail=FALSE);
}
\keyword{ univar }
|
c36e4032007cb2e19c7e00345554cf6e6a483d94
|
0aff2c2fcb333842d0c3f3bab6a47c9dba93b8f5
|
/inst/tests/test-StaticMeasure.R
|
a3ae8dcab73c56e781f66a927d4cdaee2f0c098d
|
[] |
no_license
|
PirateGrunt/MRMR
|
9c6172b705062d3b42cffa79fea749b65cc888b3
|
dc8fb8bf2497425e0cb969c0743e04d0cd099bfc
|
refs/heads/master
| 2021-01-15T10:05:47.970229
| 2016-07-12T02:40:50
| 2016-07-12T02:40:50
| 7,688,809
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,675
|
r
|
test-StaticMeasure.R
|
#' StaticMeasure
#' +---Level
#' +----Measure
#' +----OriginPeriod
#'
#' StochasticMeasure
#' +---Level
#' +----Measure
#' +----OriginPeriod
#' +----EvaluationDate
#'
context("StaticMeasure")
# Sample data
op = OriginPeriod(StartDate = as.Date("2001-01-01"), Period=as.period(1, "years"), NumPeriods=10)
op$Moniker = paste0("AY ", as.character(year(op$StartDate)))
op$Type = "Accident Year"
sm = StaticMeasure(OriginPeriod = op
, Level=list(Line = "GL", Subline = c("PremOps", "Products"), State = c("CA", "TX"))
, Measure = c("EarnedPremium", "IncurredLoss")
, Data = data.frame(EarnedPremium=seq(from=10000, to=19000, by=1000)
, IncurredLoss = 0.75*seq(from=10000, to=19000, by=1000)))
EarnedPremium.CA = seq(from=10000, to=19000, by=1000)
IncurredLoss.CA = EarnedPremium.CA * 0.75
EarnedPremium.NY = EarnedPremium.CA * .65
IncurredLoss.NY = IncurredLoss.CA * .76
df.CA = data.frame(EarnedPremium = EarnedPremium.CA, IncurredLoss = IncurredLoss.CA)
df.NY = data.frame(EarnedPremium = EarnedPremium.NY, IncurredLoss = IncurredLoss.NY)
dfLevel.CA = data.frame(Line = "GL", ClassGroup = "PremOps", State = "CA", stringsAsFactors = FALSE)
dfLevel.NY = data.frame(Line = "GL", ClassGroup = "PremOps", State = "NY")
dfLevel.PR = data.frame(Line = "GL", ClassGroup = "PremOps", Territory = "PR")
GenericStaticMeasure = function(){
op = OriginPeriod(StartDate = as.Date("2001-01-01"), Period=as.period(1, "years"), NumPeriods=10)
op$Moniker = paste0("AY ", as.character(year(op$StartDate)))
op$Type = "Accident Year"
dfLevel.CA = data.frame(Line = "GL", ClassGroup = "PremOps", State = "CA", stringsAsFactors = FALSE)
sm = StaticMeasure(OriginPeriod = op, Measure = c("EarnedPremium", "IncurredLoss"), Level = dfLevel.CA)
sm$EarnedPremium = seq(from=10000, to=19000, by=1000)
sm$IncurredLoss = sm$EarnedPremium * 0.75
sm
}
test_that("Construction", {
# This will produce garbage. Must think of a way to address this.
# Why does this produce garbage? Because we're not actually calling one of our constructors.
# x = new("StaticMeasure", OriginPeriod = op, Measure = names(df.CA), Level = dfLevel.CA)
# expect_true(is.StaticMeasure(x))
x = StaticMeasure(OriginPeriod = op, Measure = names(df.NY), Level = dfLevel.NY)
expect_true(is.StaticMeasure(x))
x = StaticMeasure(op, names(df.NY), dfLevel.NY)
expect_true(is.StaticMeasure(x))
x = StaticMeasure(op, Measure=names(df.CA), rbind(dfLevel.CA, dfLevel.NY))
expect_true(is.StaticMeasure(x))
x = StaticMeasure(OriginPeriod=op, Level=c("GL", "PremOps", "TX"))
LevelNames(x) = c("Line", "ClassGroup", "State")
x = StaticMeasure(op, Level=c(Line = "GL", ClassGroup = "PremOps", State = "TX"))
x = StaticMeasure(op, Measure = c("EarnedPremium", "IncurredLoss", "LossRatio"), Level=c("GL", "PremOps", "TX"))
x = StaticMeasure(op, Measure = c("EarnedPremium", "IncurredLoss", "LossRatio"), Level=c(Line = "GL", ClassGroup = "PremOps", State = "TX"))
MeasureNames(x) = c("EP", "IL", "LR")
x = StaticMeasure(op, Measure = c("EarnedPremium", "IncurredLoss", "LossRatio"), Level = dfLevel.CA)
dfData = merge(dfLevel.CA, df.CA)
x = StaticMeasure(op, Data = dfData
, Measure = c("EarnedPremium", "IncurredLoss", "LossRatio")
, Level=c("Line", "ClassGroup", "State"))
})
# Properties
test_that("Accessors", {
sm = StaticMeasure(OriginPeriod = op
, Level=list(Line = "GL", Subline = c("PremOps", "Products"), State = c("CA", "TX"))
, Measure = c("EarnedPremium", "IncurredLoss")
, Data = data.frame(EarnedPremium=seq(from=10000, to=19000, by=1000)
, IncurredLoss = 0.75*seq(from=10000, to=19000, by=1000)))
# Test $ accessors
x = sm$EarnedPremium
expect_true(length(x) == 40)
x = sm$State
expect_true(length(x) == nrow(sm@Data))
x = sm$Line
expect_true(length(x) == nrow(sm@Data))
x = sm$CA
expect_true(is.StaticMeasure(x))
expect_true(length(setdiff(LevelNames(x), c("Line", "Subline", "State"))) == 0)
expect_true(unique(x$State) == "CA")
x = sm$"AY 2004"
expect_true(is.StaticMeasure(x))
x = sm$EarnedPremium[sm$State == "CA"]
# This is a TODO
# State(sm)
# Test [[. This is more or less the same. However, when we use integer indexing, we will likely return an error, except in the unlikely event that
# the user has supplied a vector, which return a single Level attribute.
x = sm[["EarnedPremium"]]
x = sm[["State", , FALSE]]
expect_true(length(x) == nrow(sm@Data))
x = sm[["State", UniqueAttribute=TRUE]]
expect_true(length(setdiff(x, c("CA", "TX"))) == 0)
x = sm[["State", UniqueAttribute=FALSE]]
x = sm[["CA"]]
expect_true(is.StaticMeasure(x))
expect_true(length(setdiff(LevelNames(x), c("Line", "Subline", "State"))) == 0)
expect_true(unique(x$State) == "CA")
# x = sm[[c(3,2)]]
# expect_true(is.StaticMeasure(x))
# expect_true(length(x) == 1)
#
x = sm[[1]]
expect_true(length(x) != 1)
# Test [
x = sm[OriginPeriod = "AY 2004"]
expect_true(is.StaticMeasure(x))
x = sm[OriginPeriod="AY 2004", sm$State=="CA"]
expect_true(is.StaticMeasure(x))
x = sm[sm$State=="CA", OriginPeriod="AY 2004"]
x = sm[sm$State == "CA", OriginPeriod = "AY 2004"]
expect_true(is.StaticMeasure(x))
x = sm[sm$State == "CA", OriginPeriod = sm$OriginPeriod$Moniker[4]]
# This won't work. The 4th element of the OriginPeriod object is an OriginPeriod object
expect_error(x <- sm[sm$State == "CA", OriginPeriod = sm$OriginPeriod[4]])
x = sm[sm$State == "CA", OriginPeriod = c("AY 2004", "AY 2005")]
x = sm[sm$State == "CA", "EarnedPremium", OriginPeriod = "AY 2004"]
x = sm[sm$State == "CA", "EarnedPremium"]
# This will basically produce BS
x = sm[1]
})
test_that("Assignment", {
# Highly experimental. This will be used when I figure out how and if to create dynamic functions.
# State(sm)[1] = "TX"
# Test $ assignment
sm$State[sm$State == "CA"] = "NY"
sm$State[sm$State == "NY"] = "CA"
# This should produce an error
sm$State = "WV"
sm$State[1:20] = "NY"
LevelNames(sm)[LevelNames(sm) == "State"] = "Territory"
# Not sure why this doesn't work
#sm$Territory[1:20] = "BC"
LevelNames(sm)[LevelNames(sm) == "Territory"] = "State"
sm$State[1:20] = "NY"
sm$EarnedPremium[sm$State == "NY"] = sm$EarnedPremium[sm$State == "NY"] * 1.05
#sm$OriginPeriod = #something
sm$LossRatio = sm$IncurredLoss / sm$EarnedPremium
y = MeasureNames(sm)
expect_true(length(y) == 3)
expect_true("EarnedPremium" %in% y)
expect_true("IncurredLoss" %in% y)
expect_true("LossRatio" %in% y)
sm$EarnedPremium[1] = 20000
expect_true(sm$EarnedPremium[1] == 20000)
sm[, "EarnedPremium"] = 4
sm[, "EarnedPremium", OriginPeriod = "AY 2004"] = 400
sm[sm$State=="CA" & sm$Subline=="PremOps", c("IncurredLoss", "EarnedPremium"), OriginPeriod = "AY 2004"] = c(4, 6)
sm[sm$State=="CA" & sm$Subline=="PremOps"
, c("IncurredLoss", "EarnedPremium")] = c(seq(1000, by=1000, length.out=10), seq(2000, by=500, length.out=10))
LevelNames(sm) = c("Bereich", "Abteiling", "Bundesstaat")
})
# Comparison
test_that("Conversion", {
df = as.data.frame(sm)
expect_true(class(df) == "data.frame")
})
test_that("Concatenate", {
sm.CA = StaticMeasure(op, Measure = c("EarnedPremium", "IncurredLoss", "LossRatio")
, Level=c(Line = "GL", ClassGroup = "PremOps", State = "CA"))
sm.NY = StaticMeasure(op, Measure = c("EarnedPremium", "IncurredLoss", "LossRatio")
, Level=c(Line = "GL", ClassGroup = "PremOps", State = "NY"))
sm.TX = StaticMeasure(op, Measure = c("EarnedPremium", "IncurredLoss", "LossRatio")
, Level=c(Line = "GL", ClassGroup = "PremOps", State = "TX"))
z = rbind(sm.CA, sm.NY)
expect_true(length(z) == 2)
z = c(sm.CA, sm.NY)
expect_true(length(z) == 2)
z = c(sm.CA, sm.NY, sm.TX)
expect_true(length(z) == 3)
})
test_that("Persistence", {
sm.CA = StaticMeasure(op, Measure = c("EarnedPremium", "IncurredLoss", "LossRatio")
, Level=c(Line = "GL", ClassGroup = "PremOps", State = "CA"))
sm.NY = StaticMeasure(op, Measure = c("EarnedPremium", "IncurredLoss", "LossRatio")
, Level=c(Line = "GL", ClassGroup = "PremOps", State = "NY"))
sm.TX = StaticMeasure(op, Measure = c("EarnedPremium", "IncurredLoss", "LossRatio")
, Level=c(Line = "GL", ClassGroup = "PremOps", State = "TX"))
z = c(sm.CA, sm.NY, sm.TX)
write.excel(z, "StaticMeasure.xlsx", overwrite=TRUE)
})
# sm.CA = new("StaticMeasure", OriginPeriod = op, Measure = df.CA, Level = dfLevel.CA)
# sm.NY = StaticMeasure(OriginPeriod = op, Measure = df.NY, Level = dfLevel.NY)
# sm.TX = StaticMeasure(op, df.TX, dfLevel.TX)
# sm.Multi = StaticMeasure(op, rbind(df.CA, df.NY, df.TX), rbind(dfLevel.CA, dfLevel.NY, dfLevel.TX))
# rm(EarnedPremium.CA, EarnedPremium.NY, EarnedPremium.TX)
# rm(IncurredLoss.CA, IncurredLoss.NY, IncurredLoss.TX)
# rm(dfLevel.CA, dfLevel.NY, dfLevel.PR, dfLevel.TX)
# rm(df.CA, df.NY, df.PR, df.TX)
# op = OriginPeriod(StartDate = as.Date("2002-01-01")
# , Period=as.period(6, "months"), EndDate=as.Date("2014-01-01"), Type="Accident Period")
# op$Moniker = paste0("H", ifelse(month(op$StartDate) == 1, "1", "2"), " ", year(op$StartDate))
|
693c78b0a6937ccd77130723761fc0cd68ea8142
|
6c72ac0e1310234a4fbb9375669e4d9695485a6d
|
/Plot2.R
|
df760a18a95cd9a139d69233c5364b2f52e3db82
|
[] |
no_license
|
rweingarten/ExData_Plotting1
|
d9e8c7f146974ec023b352ba17ec249bfa172439
|
69c55f880f7ef37d7fca94786217bf2ff1fa9f82
|
refs/heads/master
| 2020-12-11T03:56:21.971104
| 2016-01-08T21:14:25
| 2016-01-08T21:14:25
| 49,232,559
| 0
| 0
| null | 2016-01-07T21:44:11
| 2016-01-07T21:44:11
| null |
UTF-8
|
R
| false
| false
| 1,697
|
r
|
Plot2.R
|
#The purpose of this R code is to read a specific subset of data from the UC
#Irvine Machine Learning Repository and we are specifically using the dataset
# Electrical power comsumption to look at the Global Active power (kilowatts) over a
#particular period of time.
#First I begin with a clean slate:
rm(list=ls())
#Second, I read in the data that I need. For this assignment we are only looking
#at data from 2/1/2007-2/2/2007 so I experimented with determining what range of rows
#I needed and determined that I needed to skip 6636 rows and wanted to read in 2880.
final_data<-read.table("household_power_consumption.txt",
header = TRUE, sep=";", skip = 66636, nrows = 2880)
#Since I am pulling a subset of the data, I need to go back to the top of the file
# in order to read in the first line of data which has the column names.
headings_data <- read.table("household_power_consumption.txt",
header = TRUE, sep=";", nrows = 1)
#I then assign the column names from headings data into my final data data frame.
colnames(final_data)<- colnames(headings_data)
#In order for R not to look at the date and time as a vector I create a new variable
#DateTime.
DateTime<- strptime(paste(final_data$Date,final_data$Time, sep = " "),
"%d/%m/%Y %H:%M:%S")
#create png of the plot output
png("plot2.png", width=480,height=480)
#create the line plot with Global Active Power (a column in the data frame). I
# assign the title, labels and desribed in the instructions on Coursera for project 1
plot(DateTime, final_data$Global_active_power, type = "l", xlab = "",
ylab = "Global Active Power (kilowatts)")
dev.off()
|
35371ac253206b598416ad3f8af7a335a950610b
|
8d57d6e90520201c55755a9926d127199f9b5dad
|
/src/brasil/normSistem2.R
|
b14e29a69211e67f2b780dab1bda71a705cd5a79
|
[
"MIT"
] |
permissive
|
samuelwuw/Scientific_Initiation
|
cc5b730b6de18a639ca6c30b3e71fad642f5bec8
|
c9352030d140017b52498bbc1e53cb7a4972cadd
|
refs/heads/master
| 2021-12-16T06:52:40.384460
| 2021-12-09T19:02:56
| 2021-12-09T19:02:56
| 252,793,247
| 0
| 1
| null | null | null | null |
ISO-8859-1
|
R
| false
| false
| 11,386
|
r
|
normSistem2.R
|
getwd()
setwd("C:/Users/samue/Documents/www/IC/Scientific_Initiation/src/brasil")
library(kohonen)
require(kohonen)
library(RSNNS)
somFunc <- kohonen::som
df <- read.csv('database/brCitiesCsv.csv', header = TRUE, sep = ",")
#Base de dados com cidades proeminentes do nordeste, e sudeste
usedCities <- c(4:9, 30:60, 61:79, 81:85, 104:122, 141:183, 208:210, 231:242,
243:249, 250:259, 260:266, 328:374, 375:376)
df_cities <- df[usedCities, c(2,3,8)]
rownames(df_cities) <- NULL
#data_train_matrix <- as.matrix(scale(df_cities))
data_train_matrix <- as.matrix(normalizeData(df_cities, type = "norm"))
colnames(data_train_matrix) <- c("lat", "lng", "population")
som_grid <- somgrid(xdim = 3, ydim = 4, topo="hexagonal") # SOM 3x4, hexagonal
som_model <- somFunc(data_train_matrix,
grid=som_grid,
rlen=300,
alpha=c(0.05,0.01),
keep.data = TRUE,
radius = 5)
#centroides de cada estado (12)
centroides <- as.data.frame(som_model$codes)
View(centroides)
#processo de denormalização.
centroides_norm <- as.data.frame(denormalizeData(centroides, getNormParameters(data_train_matrix)))
#plots of SOM model
plot(som_model, type="changes")
#quantidade de amostras mapeadas em cada node (centroide)
plot(som_model, type="count", main = "node counts")
plot(som_model, type="dist.neighbours", main = "SOM neighbour distances")
plot(som_model, type="codes", main = "codes")
som_model$unit.classif
#########################################################################
########################## PLOTS ################################
#########################################################################
library(ggplot2)
require(ggplot2)
m <- 12 #usado em warehouse locations
n <- 211 #usado em customer locations
D <- 0
x_mean <- mean(centroides[,1]) #media x dos centroides
y_mean <- mean(centroides[,2]) #media y dos centroides
centroid_id <- 12
#population
df_cities_population <- df[usedCities, c(1,8)] #population column
#vetor com distâncias entre os customers e warehouses, e centroides para sua media
customerDistanceVector <- c()
#vetor com distâncias entre a media da posit dos centroides, e cada um deles
centroidDistanceVector <- c()
#vector with costs based in distance
centroid_costPerSquareMeter <- c()
customerCostVector <- c()
centroidCostVector <- c()
#indica a qual warehouse cada customer está atrelado
#localiz <- as.matrix(som_model$unit.classif)
#data frame customer locations
customer_locations <- data.frame(
id = 1:n,
x = df_cities[,1],
y = df_cities[,2],
localiz = as.matrix(som_model$unit.classif),
population = df_cities_population$population
)
View(customer_locations)
#calcula o custo do transporte entre o ponto de demanda e o seu arma
distanc <- function(Xc, Yc, Xw, Yw){
distance <- sqrt((Xw-Xc)**2+(Yw-Yc)**2)
return(distance)
}
#cálculo da distância entre cada centroide e a média dos centroides
for(val in 1:m){
D <- distanc(centroides$lat[[val]], centroides$lng[[val]],
x_mean, y_mean)
centroidDistanceVector[val] <- D
}
View(centroidDistanceVector)
#def of quartiles of distances between centroids mean and its locations
quartile1 <- quantile(centroidDistanceVector, 0.25)
quartile2 <- quantile(centroidDistanceVector, 0.5)
quartile3 <- quantile(centroidDistanceVector, 0.75)
for(val in 1:m){
if(centroidDistanceVector[val] <= quartile1){
centroid_costPerSquareMeter[val] <- 2000 #custo por metro quadrado
}
if(centroidDistanceVector[val] > quartile1 && centroidDistanceVector[val] <= quartile2){
centroid_costPerSquareMeter[val] <- 1500
}
if(centroidDistanceVector[val] > quartile2 && centroidDistanceVector[val] <= quartile3){
centroid_costPerSquareMeter[val] <- 1000
}
if(centroidDistanceVector[val] > quartile3 ){
centroid_costPerSquareMeter[val] <- 500
}
}
View(centroid_costPerSquareMeter)
#soma a população de cada centroide
clustPop <- vector(length = m)
for(i in 1:m){
for(j in 1:n){
if(customer_locations$localiz[j] == i){
clustPop[i] <- clustPop[i] + customer_locations$population[j]
}
}
}
View(clustPop)
#calc of warehouse size and cost
warehouse_costs <- vector(length = m)
warehouse_size <- vector(length = m)
meter_per_habitant <- 1
for(i in 1:m){
warehouse_size[i] <- (clustPop[i] * meter_per_habitant) / 100
warehouse_costs[i] <- warehouse_size[i] * centroid_costPerSquareMeter[i]
}
warehouse_locations <- data.frame(
id = 1:centroid_id,
x = centroides_norm$V1,
y = centroides_norm$V2,
dist_to_mean = centroidDistanceVector, #dist of each waarehouse to all warehouse mean
cost_per_square_meter = centroid_costPerSquareMeter, #cost based on dist_to_mean quartiles (line 162)
total_population = clustPop,
warehouse_size = warehouse_size, #size based on population
warehouse_costs = warehouse_costs #cost based on warehouse_size and cost_per_square_meter
)
View(warehouse_locations)
#calc of dist between customer and respectives warehouses
#Normalizado
for(val in customer_locations$id){
D <- distanc(customer_locations$x[[val]], customer_locations$y[[val]],
warehouse_locations$x[[customer_locations$localiz[[val]]]],
warehouse_locations$y[[customer_locations$localiz[[val]]]])
customerDistanceVector[val] <- D
customerCostVector[val] <- D * 2.5
}
View(customerDistanceVector)
#haversine
library(pracma)
require(pracma)
#transport cost calculation
transportcost_func <- function(i, j) {
customer <- customer_locations[i, ]
warehouse <- warehouse_locations[j, ]
# calcula o custo de transporte:
return(haversine(c(customer$x, customer$y), c(warehouse$x, warehouse$y))
* (2.5/25) * (warehouse$warehouse_size * 12/0.3))
}
transportcost_func(1,7)
transportCostMatrixFact <- function(){
transport_cost <- matrix(nrow = n, ncol = m)
for(row in 1:n){
for(col in 1:m){
transport_cost[row, col] <- transportcost_func(row, col)
}
}
return(transport_cost)
}
transport_cost <- as.data.frame(transportCostMatrixFact())
View(transport_cost)
summary(transport_cost)
grid_size <- 0
#principal PLOT
p <- ggplot(customer_locations, aes(x, y)) +
geom_point() +
geom_point(data = warehouse_locations, color = "red", alpha = 0.5, shape = 17) +
scale_x_continuous(limits = c(-25, -2)) +
scale_y_continuous(limits = c(-53, -33)) +
theme(axis.title = element_blank(),
axis.ticks = element_blank(),
axis.text = element_blank(), panel.grid = element_blank())
p + ggtitle("Warehouse location problem",
"Black dots are customers. Light red triangles show potential warehouse locations.")
#solving model
library(ompr)
library(magrittr)
#masked functions: and, mod, or
model_MIP <- MIPModel() %>%
# 1 iff i gets assigned to warehouse j
add_variable(x[i, j], i = 1:n, j = 1:m, type = "binary") %>%
# 1 iff warehouse j is built
add_variable(y[j], j = 1:m, type = "binary") %>%
# maximize the preferences
set_objective(sum_expr(transportcost_func(i, j) * x[i, j], i = 1:n, j = 1:m) + #trocar por transport_cost[i,j]
sum_expr(warehouse_costs[j] * y[j], j = 1:m), "min") %>% #trocar por warehouse_costs[j]
# every customer needs to be assigned to a warehouse
add_constraint(sum_expr(x[i, j], j = 1:m) == 1, i = 1:n) %>%
# if a customer is assigned to a warehouse, then this warehouse must be built
add_constraint(x[i,j] <= y[j], i = 1:n, j = 1:m)
model_MIP
library(ompr.roi)
library(ROI.plugin.glpk)
result <- solve_model(model_MIP, with_ROI(solver = "glpk", verbose = TRUE))
suppressPackageStartupMessages(library(dplyr))
matching <- result %>%
get_solution(x[i,j]) %>%
filter(value > .9) %>%
select(i, j)
#add the assignments to the previous plot
plot_assignment <- matching %>%
inner_join(customer_locations, by = c("i" = "id")) %>%
inner_join(warehouse_locations, by = c("j" = "id"))
customer_count <- matching %>% group_by(j) %>% summarise(n = n()) %>% rename(id = j)
###### problema com fixed cost (o custo fixo deste código varia)
#armazéns escolhidos
plot_warehouses <- warehouse_locations %>%
mutate(costs = warehouse_costs) %>%
inner_join(customer_count, by = "id") %>%
filter(id %in% unique(matching$j))
p +
geom_segment(data = plot_assignment, aes(x = x.y, y = y.y, xend = x.x, yend = y.x)) +
geom_point(data = plot_warehouses, color = "red", size = 3, shape = 17) +
ggrepel::geom_label_repel(data = plot_warehouses,
aes(label = paste0("fixed costs:", costs, "; customers: ", n )),
size = 3, nudge_y = 20) +
ggtitle(paste0("Cost optimal warehouse locations and customer assignment"),
"Big red triangles show warehouses that will be built, light red are unused warehouse
locations. Dots represent customers served by the respective warehouses.")
#fixed costs for setting up the 4 warehouses:
sum(warehouse_costs[unique(matching$j)])
#################################################################################################################
######################################## OBJETIVOS ##############################################################
#################################################################################################################
#1)
# Somar as populações das 77 cidades de delaware
# Dividir a população de cada cidade pelo total somado (dplyr package)
# Pegar o resultado de cada divisão (77 indices), e multiplica pela população real (google) de delaware (var realPop)
# O resultado será a população aproximada de cada cidade
#2)
# Depois vamos estabelecer um valor de m² de armazém por habitante (1m² por habitante)
# Multiplica esse valor pela população de cada cidade = tamanho de cada armazén na cidade
# multiplicar pelos custos por M² que já estão no data frame
#3)
# adicionar 2 colunas ao warehouse locations:
# uma será o tamanho du cluster ( a área de armazén = população * parmetro p)
# a outra coluna custo total será o custo do armazén, que será a área do armazén * custo por m²
#4)
# melhorar vetor de custo de transporte, adicionando o custo de cada cidade para todos os armazéns (16),
# para assim vermos quais armazéns são os melhores
# tentar recriar função de "transport cost" do warehouse locations
#5)
#pegar a soma da coluna "i" do vetor de custo de transporte, e a "i" linha do custo fixo do vetor de armazéns.
#6)
#Usar o modelo MIP do script warehouse.R, trocando a função "transportcost()" pelo valor i
#no data frame "transport_cost", e trocar
# (Problema no resultado do modelo mip)
# 7)
# calcular a matriz com os dados originais (77 por 77), e outra com os dados normalizados.
# tirar a média geral das duas matrizes, e divide a média dos dados originais pela média dos dados normalizados.
# usar esse valor para montar a matriz "transport_cost", multiplicando a dist por esse valor
# (esperar um valor alto)
# 8) montar a matriz de transport_cost (237)
fra <- c(df$lat[328], df$lng[328])
ord <- c(df$lat[250], df$lng[250])
dis <- haversine(fra, ord)
fprintf('Flight distance Frankfurt-Chicago is %8.3f km.\n', dis)
|
bdeca105dfe45bce95ee607d62e79c95d1581a23
|
3b8bb5d7051d064373dd8e894d28ac64a2e9e7d8
|
/PISA_2012_FL_part5.R
|
79a7324d0b7d3ea10445d79fc905f21907b95e99
|
[] |
no_license
|
educ0608/PISA_2012
|
981d27d7f42076882e1525270f388585a0920975
|
b44998580be96bd0d3c68707a4f92c4f3d016911
|
refs/heads/master
| 2021-01-01T06:45:44.687835
| 2015-08-26T20:43:05
| 2015-08-26T20:43:05
| 38,121,318
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 23,907
|
r
|
PISA_2012_FL_part5.R
|
# PISA2012_FL_part5
# Unraveling a secret: Vietnam's outstanding performance on the PISA test 2012 following the Fryer & Levitt (2004) approach
# Prepared by Elisabeth Sedmik on Wednesday, June 24 2015
# Based on code by Suhas D. Parandekar
# Revised on 07/28/2015
# The following code tries to unravel the secret of Vietnam's outstanding performance on the PISA 2012 assessment.
# It presents an analytical comparison of possible explanatory factors (as assessed within the PISA 2012 test)
# of Vietnam's high test score in MATH, comparing 7 other developing countries with Vietnam. The statistical
# approach taken is a modified dummy variable approach following Fryer and Levitt (2004).
##################################################################################
# PLEASE NOTE THAT THIS IS THE FILE FOR THE MATH REGRESSIONS
# For the Reading and Science regressions please see PISA_2012_FL_part7 onwards
##################################################################################
##################################################################################
# Outline:
# 1. GENERATING DATA SET (MERGING, CLEANING) (in part 1)
# 2. DESCRIPTIVE STATISTICS WITH VIETNAM + 7 DEVELOPING COUNTRIES (in part 1)
# 3. PISA SCORES (in part 1)
# 4. REGRESSION ANALYSIS FOR MATH OF A MODIFIED FRYER & LEVITT (2004) APPROACH (Math: part 2 - part 6)
##################################################################################
# Loading R packages to process PISA data:
# Admin packages
library(foreign)# To import and export data to and from R (eg. txt files)
library(xlsx)# To generate MS-Excel output
library(xtable)# To generate Latex output (in which the research paper is written)
library(epicalc)# For producing descriptives of data
library(tables) # Computes and displays complex tables of summary statistics
library(stargazer)# For latex regression and summary statistics tables
# Modeling packages
library(intsvy)# For PISA (and TIMSS, PIRLS, etc) analysis with Plausible Values (PV) and Balanced Repeated Replication (BRR)
library(TDMR)# For tuned data mining in R - eg. detect column of constants in dataframe
library(gmodels)# For model fitting, contains various R programming tools (eg. PROC FREQ like tables)
library(dplyr)# For varioys data manipulation
library(psych)# For rescaling variables to given mean and sd
library(sm)# for locally smoothed regressions and density estimation
library(lme4)# To run mixed-effects models using Eigen and S4
# Please be aware that many packages (eg. tables, intsvy) require additional packages to run. When trying to load
# the package, R will tell you which ones are missing. Overall you may need to download around 40 packages.
load("DEVCON8a.RDA")
# How big is our initital sample size?
T0 <- DEVCON8a[, c("VIETNAM")]
N0<- NROW(na.omit(T0))
N0
############### 4. REGRESSION ANALYSIS FOR MATH OF A MODIFIED FRYER & LEVITT (2004) APPROACH ################
############# 4.2 Explanatory variables - Students, teachers, pedagogical practices and schools #############
# NON-ROTATED PART:
# 1. Students
# Background: FEMALE, ST05Q01, REPEAT (indexed ST07), ST08Q01, ST09Q01, ST115Q01, MISCED, HISEI,
# --------------WEALTH, CULTPOS, HEDRES, ST28Q01
# Home Support: SC25 (Parent Participation, SC), SC24Q01 (Parental Expectations, SC)
# Gender Balance: PCGIRLS (Proportion of girls enrolled at school, SC)
# 2. Teachers
# Quantity: STRATIO, PROPCERT, PROPQUAL, TCSHORT, SMRATIO
# Quality: TCFOCST, SC30Q01, SC30Q02, SC30Q03, SC30Q04, SC31Q01-Q07 (TCH incentive), SC39Q08, SC35Q02
# 3. Pedagogical practices
# General / student-perceived teaching practices: SC40Q01-SC40Q03 (Practices in Maths, SC)
# Assessment: SC18Q01-Q08
# Classroom Management: SC39Q07 (Seeking student feedback, SC)
# 4. Schools
# Type: SC01Q01 (Public or private school, SC), SC02Q02 (Revenues from student fees, SC), SC03Q01
# --------------CLSIZE (Class Size based on SC05, SC), SCHSIZE (based on SC07, SC)
# Resources: RATCMP15 (Availabilit of resources, SC), COMPWEB (PC for learning connected to the internet, SC),
# --------------SC16Q01-Q11
# --------------SCMATEDU (Quality of educ. resources, SC), SCMATBUI (Quality of Physical Infrastructure, SC),
# --------------SC20Q01 (Additional maths lessons offered, SC)
# Leadership: LEADCOM (Framing Schools goal and curriculum, SC), LEADINST (Instructional Leadership, SC),
# --------------LEADPD (Promoting Development, SC), LEADTCH (Teacher Participation in Leadership, SC),
# --------------SC19Q01 & SC19Q02 (if Student Achievement data is made available, SC), SCHAUTON (School autonomy, SC),
# --------------TCHPARTI (Teacher participation, SC), SC39Q03 (recording of student/teacher/test data, SC)
# Selectivity: SCHSEL (School Selectivity of students, SC)
# Climate: STUDCLIM (Student aspects of school climate, SC), TEACCLIM (teacher aspects of school climate, SC),
# --------------TCMORALE (Teacher Morale, SC)
# ROTATED PART 1
# 1. Students
#-----Effort: MATWKETH
#-----Attitude: INSTMOT, INTMAT, SUBNORM, MATHEFF, FAILMAT, MATINTFC, MATBEH, PERSEV, OPENPS
# ROTATED PART 2
# 1. Students
#-----Effort: ST55Q02 (Math lessons out of school), ST57Q01-Q06 (dropped for Math)
#-----Preparation: EXAPPLM, EXPUREM, FACMCONC
# 2. Teachers
#-----Quantity: LMINS (minutes of language classes), MMINS (minutes of math classes), SMINS (minutes of science classes)
# ROTATED PART 3
# 1. Students
#-----Background: ST91Q03
#-----Attitude: SCMAT, ANXMAT, BELONG, ATSCHL, ATTLNACT, ST91Q02
# 2. Teachers
#----Quality: MTSUP, STUDREL, ST91Q04
# 3. Pedagogical Practices
#-----General: TCHBEHTD, TCHBEHSO
#-----Assessment: TCHBEHFA
#-----Cognitive Activation: COGACT
#-----Classroom Management: CLSMAN, DISCLIMA
########################## 4.2.1 Explanatory Variables - rotated & non-rotated questions #######################
############################### 4.2.8 Non-rotated & PART 2 rotated questions #############################
# Let's prepare our data set by deleting the missing data for all gap decreasing variables from the non-rotated parts
# AND deleting missing data from all variables we will use from the first (part 1) rotated part
# We will add the rotated variables according to the subsector (students, teachers, etc) they belong to (see our schematic structure)
# and within, as always, in order that they have been asked
# 1. STUDENTS
#-----Effort: ST55Q02 (Math lessons out of school), ST57Q01-Q06
#-----Preparation: EXAPPLM, EXPUREM, FACMCONC
# 2. TEACHERS
#-----Quantity: LMINS (minutes of language classes), MMINS (minutes of math classes), SMINS (minutes of science classes)
# 4. SCHOOLS
#-----Resources: ST72 (not for math but for 'test language' class) WE ARE NOT TAKING THIS FOR MATH
T1b <- DEVCON8a[, c("VIETNAM","PROPCERT","SMRATIO","TCSHORT","TCFOCST","SC30Q01","SC30Q02","SC31Q01",
"SC31Q02","SC31Q03","SC31Q04","SC31Q05","SC31Q06","SC31Q07","ST05Q01","REPEAT",
"ST08Q01","ST115Q01","ST28Q01","SC24Q01","PCGIRLS","SC18Q01","SC18Q02","SC18Q05",
"SC39Q07","SC40Q01","SC40Q02","SC03Q01","CLSIZE","COMPWEB","SCMATEDU","SCMATBUI",
"SC16Q02","SC16Q06","SC16Q10","SC16Q11","SC20Q01","SC19Q01","SC39Q03","SCHSEL",
"ST55Q02","EXAPPLM","EXPUREM","FAMCONC","LMINS","MMINS","SMINS")]
N1 <- NROW(na.omit(T1b))
N1 # 11944
N0-N1 #36539 NA's
DEVCON8j <- DEVCON8a[complete.cases(T1b),]
# Let's prepare the relevant student variables again:
#ST05Q01
#_________________________________________________________________________________________________________
DEVCON8j$PRESCHOOL[DEVCON8j$ST05Q01==1] <- 0
DEVCON8j$PRESCHOOL[DEVCON8j$ST05Q01==2] <- 1
DEVCON8j$PRESCHOOL[DEVCON8j$ST05Q01==3] <- 1
#ST28Q01
#______________________________________________________________________________________________________________
DEVCON8j$BOOK_N[DEVCON8j$ST28Q01==1] <- 5
DEVCON8j$BOOK_N[DEVCON8j$ST28Q01==2] <- 15
DEVCON8j$BOOK_N[DEVCON8j$ST28Q01==3] <- 60
DEVCON8j$BOOK_N[DEVCON8j$ST28Q01==4] <- 150
DEVCON8j$BOOK_N[DEVCON8j$ST28Q01==5] <- 350
DEVCON8j$BOOK_N[DEVCON8j$ST28Q01==6] <- 500
#SC24Q01
#________________________________________________________________________________________________________________
DEVCON8j$PARPRESSURE[DEVCON8j$SC24Q01==1] <- 1
DEVCON8j$PARPRESSURE[DEVCON8j$SC24Q01==2] <- 0
DEVCON8j$PARPRESSURE[DEVCON8j$SC24Q01==3] <- 0
#SC25Q01
#_________________________________________________________________________________________________________________
DEVCON8j$SC25Q10[is.na(DEVCON8j$SC25Q10)] <- 0
DEVCON8j$SC25Q11[is.na(DEVCON8j$SC25Q11)] <- 0
DEVCON8j$FUNDMOM <- DEVCON8j$SC25Q11
DEVCON8j$COUNCILMOM <- DEVCON8j$SC25Q10
# Now for the teacher-related variables
#SC30Q01, SC30Q02
#_________________________________________________________________________________________________________________
# Convert into 0 1 variable # Teacher Monitoring (TCM) through Student Assessment (STUASS)
DEVCON8j$TCM_STUASS[DEVCON8j$SC30Q01==1] <- 1
DEVCON8j$TCM_STUASS[DEVCON8j$SC30Q01==2] <- 0
# Convert into 0 1 variable # Teacher Monitoring (TCM) through Peer review (PEER)
DEVCON8j$TCM_PEER[DEVCON8j$SC30Q02==1] <- 1
DEVCON8j$TCM_PEER[DEVCON8j$SC30Q02==2] <- 0
#SC31Q01 - SC31Q07
#________________________________________________________________________________________________________________
SC31OUT.rda <- read.csv("C:/Users/WB484284/Desktop/PISAlatestversions/RFiles/PISA_2012/SC31DATOUT.csv")
DEVCON8j <- merge(DEVCON8j,SC31OUT.rda,by="NEWID")
DEVCON8j$TCH_INCENTV <- rescale(DEVCON8j$WMLE_SC31, mean = 0, sd = 1,df=FALSE)
# Now for the pedagogical practices-related variables
# SC18Q01-Q08
#________________________________________________________________________________________________________________
DEVCON8j$ASS_PROG[DEVCON8j$SC18Q01==1] <- 1
DEVCON8j$ASS_PROG[DEVCON8j$SC18Q01==2] <- 0
DEVCON8j$ASS_PROM[DEVCON8j$SC18Q02==1] <- 1
DEVCON8j$ASS_PROM[DEVCON8j$SC18Q02==2] <- 0
DEVCON8j$ASS_SCH[DEVCON8j$SC18Q05==1] <- 1
DEVCON8j$ASS_SCH[DEVCON8j$SC18Q05==2] <- 0
#SC39Q07
#________________________________________________________________________________________________________________
DEVCON8j$STU_FEEDB[DEVCON8j$SC39Q07==1] <- 1
DEVCON8j$STU_FEEDB[DEVCON8j$SC39Q07==2] <- 0
#SC40Q01-SC40Q03
#________________________________________________________________________________________________________________
DEVCON8j$COMP_USE[DEVCON8j$SC40Q01==1] <- 1
DEVCON8j$COMP_USE[DEVCON8j$SC40Q01==2] <- 0
DEVCON8j$TXT_BOOK[DEVCON8j$SC40Q02==1] <- 1
DEVCON8j$TXT_BOOK[DEVCON8j$SC40Q02==2] <- 0
# Now for the schools-related variables
#SC03Q01/City size
#_________________________________________________________________________________________________________
# First I have to generate a series of dummy variables
DEVCON8j$DUM_SMLTOWN <- ifelse(DEVCON8j$SC03Q01==2,1,0)
DEVCON8j$DUM_TOWN <- ifelse(DEVCON8j$SC03Q01==3,1,0)
DEVCON8j$TOWN <- DEVCON8j$DUM_SMLTOWN+DEVCON8j$DUM_TOWN
DEVCON8j$TOWN[DEVCON8j$TOWN>1] <- 1
#SC16Q01-Q11
#________________________________________________________________________________________________________
DEVCON8j$EXC2_PLAY[DEVCON8j$SC16Q02==1] <- 1
DEVCON8j$EXC2_PLAY[DEVCON8j$SC16Q02==2] <- 0
DEVCON8j$EXC6_MATHCOMP[DEVCON8j$SC16Q06==1] <- 1
DEVCON8j$EXC6_MATHCOMP[DEVCON8j$SC16Q06==2] <- 0
DEVCON8j$EXC10_SPORT[DEVCON8j$SC16Q10==1] <- 1
DEVCON8j$EXC10_SPORT[DEVCON8j$SC16Q10==2] <- 0
DEVCON8j$EXC11_UNICORN[DEVCON8j$SC16Q11==1] <- 1
DEVCON8j$EXC11_UNICORN[DEVCON8j$SC16Q11==2] <- 0
#SC20Q01
#________________________________________________________________________________________________________
DEVCON8j$SCL_EXTR_CL[DEVCON8j$SC20Q01==1] <- 1
DEVCON8j$SCL_EXTR_CL[DEVCON8j$SC20Q01==2] <- 0
#SC19Q01-Q02
#________________________________________________________________________________________________________
DEVCON8j$SCORE_PUBLIC[DEVCON8j$SC19Q01==1] <- 1
DEVCON8j$SCORE_PUBLIC[DEVCON8j$SC19Q01==2] <- 0
#SC39Q03
#_________________________________________________________________________________________________________
DEVCON8j$QUAL_RECORD[DEVCON8j$SC39Q03==1] <- 1
DEVCON8j$QUAL_RECORD[DEVCON8j$SC39Q03==2] <- 0
#ST55Q02
#________________________________________________________________________________________________________
DEVCON8j$OUTMATH[DEVCON8j$ST55Q02==1] <- 0
DEVCON8j$OUTMATH[DEVCON8j$ST55Q02==2] <- 1
DEVCON8j$OUTMATH[DEVCON8j$ST55Q02==3] <- 3
DEVCON8j$OUTMATH[DEVCON8j$ST55Q02==4] <- 5
DEVCON8j$OUTMATH[DEVCON8j$ST55Q02==5] <- 7
# LMINS, MMINS, SMINS
#________________________________________________________________________________________________________
DEVCON8j$SHRS <- (DEVCON8j$SMINS)/60
DEVCON8j$MHRS <- (DEVCON8j$MMINS)/60
DEVCON8j$LHRS <- (DEVCON8j$LMINS)/60
# Let's support R and create an intermediate file we will just load when we come back here, so that the
# R memory does not get all worked up:
save(DEVCON8j, file = "C:/Users/WB484284/Desktop/PISAlatestversions/RFiles/PISA_2012/DEVCON8j.rda")
R134 <- pisa.reg.pv(pvlabel="MATH",
x=c("VIETNAM"),
weight="W_FSTUWT",
data=DEVCON8j,export=FALSE)
R134
#Estimate Std. Error t value
#(Intercept) 398.53 3.20 124.37
#VIETNAM 119.49 6.92 17.27
#R-squared 24.92 2.56 9.73
R135 <- pisa.reg.pv(pvlabel="MATH",
x=c("VIETNAM",
"PRESCHOOL", "REPEAT", "ST08Q01","ST115Q01","BOOK_N", "PARPRESSURE",
"PCGIRLS", "FUNDMOM", "COUNCILMOM","PROPCERT","SMRATIO","TCSHORT",
"TCFOCST","TCM_STUASS","TCM_PEER","TCH_INCENTV", "ASS_PROG","ASS_PROM",
"ASS_SCH","STU_FEEDB","COMP_USE","TXT_BOOK","TOWN","CLSIZE","COMPWEB",
"SCMATEDU","SCMATBUI","EXC2_PLAY","EXC6_MATHCOMP","EXC10_SPORT","EXC11_UNICORN",
"SCL_EXTR_CL","SCORE_PUBLIC","QUAL_RECORD","SCHSEL"),
weight="W_FSTUWT",
data=DEVCON8j,export=FALSE)
R135 # Vietnam 72.62
R136 <- pisa.reg.pv(pvlabel="MATH",
x=c("VIETNAM",
"PRESCHOOL", "REPEAT", "ST08Q01","ST115Q01","BOOK_N", "PARPRESSURE",
"PCGIRLS", "FUNDMOM", "COUNCILMOM","PROPCERT","SMRATIO","TCSHORT",
"TCFOCST","TCM_STUASS","TCM_PEER","TCH_INCENTV", "ASS_PROG","ASS_PROM",
"ASS_SCH","STU_FEEDB","COMP_USE","TXT_BOOK","TOWN","CLSIZE","COMPWEB",
"SCMATEDU","SCMATBUI","EXC2_PLAY","EXC6_MATHCOMP","EXC10_SPORT","EXC11_UNICORN",
"SCL_EXTR_CL","SCORE_PUBLIC","QUAL_RECORD","SCHSEL","OUTMATH"),
weight="W_FSTUWT",
data=DEVCON8j,export=FALSE)
R136 # OUTMATH_NONE decreases
# Vietnam 69.59
R140 <- pisa.reg.pv(pvlabel="MATH",
x=c("VIETNAM",
"PRESCHOOL", "REPEAT", "ST08Q01","ST115Q01","BOOK_N", "PARPRESSURE",
"PCGIRLS", "FUNDMOM", "COUNCILMOM","PROPCERT","SMRATIO","TCSHORT",
"TCFOCST","TCM_STUASS","TCM_PEER","TCH_INCENTV", "ASS_PROG","ASS_PROM",
"ASS_SCH","STU_FEEDB","COMP_USE","TXT_BOOK","TOWN","CLSIZE","COMPWEB",
"SCMATEDU","SCMATBUI","EXC2_PLAY","EXC6_MATHCOMP","EXC10_SPORT","EXC11_UNICORN",
"SCL_EXTR_CL","SCORE_PUBLIC","QUAL_RECORD","SCHSEL","OUTMATH","EXAPPLM"),
weight="W_FSTUWT",
data=DEVCON8j,export=FALSE)
R140 # OUTMATH decreases, EXAPPLM increases
# Vietnam 70.95
R141 <- pisa.reg.pv(pvlabel="MATH",
x=c("VIETNAM",
"PRESCHOOL", "REPEAT", "ST08Q01","ST115Q01","BOOK_N", "PARPRESSURE",
"PCGIRLS", "FUNDMOM", "COUNCILMOM","PROPCERT","SMRATIO","TCSHORT",
"TCFOCST","TCM_STUASS","TCM_PEER","TCH_INCENTV", "ASS_PROG","ASS_PROM",
"ASS_SCH","STU_FEEDB","COMP_USE","TXT_BOOK","TOWN","CLSIZE","COMPWEB",
"SCMATEDU","SCMATBUI","EXC2_PLAY","EXC6_MATHCOMP","EXC10_SPORT","EXC11_UNICORN",
"SCL_EXTR_CL","SCORE_PUBLIC","QUAL_RECORD","SCHSEL","OUTMATH","EXAPPLM","EXPUREM"),
weight="W_FSTUWT",
data=DEVCON8j,export=FALSE)
R141 # OUTMATH decreases, EXAPPLM increases, EXPUREM decreases
# Vietnam 66.83
R142 <- pisa.reg.pv(pvlabel="MATH",
x=c("VIETNAM",
"PRESCHOOL", "REPEAT", "ST08Q01","ST115Q01","BOOK_N", "PARPRESSURE",
"PCGIRLS", "FUNDMOM", "COUNCILMOM","PROPCERT","SMRATIO","TCSHORT",
"TCFOCST","TCM_STUASS","TCM_PEER","TCH_INCENTV", "ASS_PROG","ASS_PROM",
"ASS_SCH","STU_FEEDB","COMP_USE","TXT_BOOK","TOWN","CLSIZE","COMPWEB",
"SCMATEDU","SCMATBUI","EXC2_PLAY","EXC6_MATHCOMP","EXC10_SPORT","EXC11_UNICORN",
"SCL_EXTR_CL","SCORE_PUBLIC","QUAL_RECORD","SCHSEL","OUTMATH","EXAPPLM","EXPUREM","FAMCONC"),
weight="W_FSTUWT",
data=DEVCON8j,export=FALSE)
R142 # OUTMATH_NONE decreases, OUTMATH_LESS2 decreases, OUTMATH_2TO4 decreases, OUTMATH_4TO6 decreases, EXAPPLM increases,
# EXPUREM decreases, FAMCONC decreases drastically (-32%)
# Vietnam 45.38
# Student Effort & Preparation testing all gap decreasing variables
R142a <- pisa.reg.pv(pvlabel="MATH",
x=c("VIETNAM",
"PRESCHOOL", "REPEAT", "ST08Q01","ST115Q01","BOOK_N", "PARPRESSURE",
"PCGIRLS", "FUNDMOM", "COUNCILMOM","PROPCERT","SMRATIO","TCSHORT",
"TCFOCST","TCM_STUASS","TCM_PEER","TCH_INCENTV", "ASS_PROG","ASS_PROM",
"ASS_SCH","STU_FEEDB","COMP_USE","TXT_BOOK","TOWN","CLSIZE","COMPWEB",
"SCMATEDU","SCMATBUI","EXC2_PLAY","EXC6_MATHCOMP","EXC10_SPORT","EXC11_UNICORN",
"SCL_EXTR_CL","SCORE_PUBLIC","QUAL_RECORD","SCHSEL","OUTMATH","EXPUREM","FAMCONC"),
weight="W_FSTUWT",
data=DEVCON8j,export=FALSE)
R142a # VIETNAM 46.20
# Student Effort & Preparation testing all gap increasing variables (EXAPPLM)
R142b <- pisa.reg.pv(pvlabel="MATH",
x=c("VIETNAM",
"PRESCHOOL", "REPEAT", "ST08Q01","ST115Q01","BOOK_N", "PARPRESSURE",
"PCGIRLS", "FUNDMOM", "COUNCILMOM","PROPCERT","SMRATIO","TCSHORT",
"TCFOCST","TCM_STUASS","TCM_PEER","TCH_INCENTV", "ASS_PROG","ASS_PROM",
"ASS_SCH","STU_FEEDB","COMP_USE","TXT_BOOK","TOWN","CLSIZE","COMPWEB",
"SCMATEDU","SCMATBUI","EXC2_PLAY","EXC6_MATHCOMP","EXC10_SPORT","EXC11_UNICORN",
"SCL_EXTR_CL","SCORE_PUBLIC","QUAL_RECORD","SCHSEL","EXAPPLM"),
weight="W_FSTUWT",
data=DEVCON8j,export=FALSE)
R142b # VIETNAM 74.16
R143 <- pisa.reg.pv(pvlabel="MATH",
x=c("VIETNAM",
"PRESCHOOL", "REPEAT", "ST08Q01","ST115Q01","BOOK_N", "PARPRESSURE",
"PCGIRLS", "FUNDMOM", "COUNCILMOM","PROPCERT","SMRATIO","TCSHORT",
"TCFOCST","TCM_STUASS","TCM_PEER","TCH_INCENTV", "ASS_PROG","ASS_PROM",
"ASS_SCH","STU_FEEDB","COMP_USE","TXT_BOOK","TOWN","CLSIZE","COMPWEB",
"SCMATEDU","SCMATBUI","EXC2_PLAY","EXC6_MATHCOMP","EXC10_SPORT","EXC11_UNICORN",
"SCL_EXTR_CL","SCORE_PUBLIC","QUAL_RECORD","SCHSEL","OUTMATH","EXAPPLM","EXPUREM","FAMCONC","LHRS"),
weight="W_FSTUWT",
data=DEVCON8j,export=FALSE)
R143 # OUTMATH decreases, EXAPPLM increases,
# EXPUREM decreases, FAMCONC decreases drastically, LHRS increases
# Vietnam 45.40
R144 <- pisa.reg.pv(pvlabel="MATH",
x=c("VIETNAM",
"PRESCHOOL", "REPEAT", "ST08Q01","ST115Q01","BOOK_N", "PARPRESSURE",
"PCGIRLS", "FUNDMOM", "COUNCILMOM","PROPCERT","SMRATIO","TCSHORT",
"TCFOCST","TCM_STUASS","TCM_PEER","TCH_INCENTV", "ASS_PROG","ASS_PROM",
"ASS_SCH","STU_FEEDB","COMP_USE","TXT_BOOK","TOWN","CLSIZE","COMPWEB",
"SCMATEDU","SCMATBUI","EXC2_PLAY","EXC6_MATHCOMP","EXC10_SPORT","EXC11_UNICORN",
"SCL_EXTR_CL","SCORE_PUBLIC","QUAL_RECORD","SCHSEL","OUTMATH",
"EXAPPLM","EXPUREM","FAMCONC","LHRS","MHRS"),
weight="W_FSTUWT",
data=DEVCON8j,export=FALSE)
R144 # OUTMATH decreases, EXAPPLM increases,
# EXPUREM decreases, FAMCONC decreases drastically, LHRS increases, MHRS increases
# Vietnam 50.74
R145 <- pisa.reg.pv(pvlabel="MATH",
x=c("VIETNAM",
"PRESCHOOL", "REPEAT", "ST08Q01","ST115Q01","BOOK_N", "PARPRESSURE",
"PCGIRLS", "FUNDMOM", "COUNCILMOM","PROPCERT","SMRATIO","TCSHORT",
"TCFOCST","TCM_STUASS","TCM_PEER","TCH_INCENTV", "ASS_PROG","ASS_PROM",
"ASS_SCH","STU_FEEDB","COMP_USE","TXT_BOOK","TOWN","CLSIZE","COMPWEB",
"SCMATEDU","SCMATBUI","EXC2_PLAY","EXC6_MATHCOMP","EXC10_SPORT","EXC11_UNICORN",
"SCL_EXTR_CL","SCORE_PUBLIC","QUAL_RECORD","SCHSEL","OUTMATH","EXAPPLM","EXPUREM","FAMCONC","LHRS","MHRS","SHRS"),
weight="W_FSTUWT",
data=DEVCON8j,export=FALSE)
R145 # OUTMATH_NONE decreases, OUTMATH_LESS2 decreases, OUTMATH_2TO4 decreases, OUTMATH_4TO6 decreases, EXAPPLM increases,
# EXPUREM decreases, FAMCONC decreases drastically, LHRS increases, MHRS increases, SHRS increases
# Vietnam 55.66
# Now we are testing all the 3 gap decreasing variables
R146 <- pisa.reg.pv(pvlabel="MATH",
x=c("VIETNAM",
"PRESCHOOL", "REPEAT", "ST08Q01","ST115Q01","BOOK_N", "PARPRESSURE",
"PCGIRLS", "FUNDMOM", "COUNCILMOM","PROPCERT","SMRATIO","TCSHORT",
"TCFOCST","TCM_STUASS","TCM_PEER","TCH_INCENTV", "ASS_PROG","ASS_PROM",
"ASS_SCH","STU_FEEDB","COMP_USE","TXT_BOOK","TOWN","CLSIZE","COMPWEB",
"SCMATEDU","SCMATBUI","EXC2_PLAY","EXC6_MATHCOMP","EXC10_SPORT","EXC11_UNICORN",
"SCL_EXTR_CL","SCORE_PUBLIC","QUAL_RECORD","SCHSEL","OUTMATH","EXPUREM","FAMCONC"),
weight="W_FSTUWT",
data=DEVCON8j,export=FALSE)
R146 # Vietnam 46.20
write.csv(R146,"MATH_rot2.csv")
# Interestingly, when we did the one-by-one regression and had EXAPPLM (which we identified as gap increasing) included,
# the dummy stood at 45.93 after including FAMCONC. But again we have to pay attention to the direction and not the absolute
# values of the coefficients
# Now we are testing all the 4 gap increasing variables
R147 <- pisa.reg.pv(pvlabel="MATH",
x=c("VIETNAM",
"PRESCHOOL", "REPEAT", "ST08Q01","ST115Q01","BOOK_N", "PARPRESSURE",
"PCGIRLS", "FUNDMOM", "COUNCILMOM","PROPCERT","SMRATIO","TCSHORT",
"TCFOCST","TCM_STUASS","TCM_PEER","TCH_INCENTV", "ASS_PROG","ASS_PROM",
"ASS_SCH","STU_FEEDB","COMP_USE","TXT_BOOK","TOWN","CLSIZE","COMPWEB",
"SCMATEDU","SCMATBUI","EXC2_PLAY","EXC6_MATHCOMP","EXC10_SPORT","EXC11_UNICORN",
"SCL_EXTR_CL","SCORE_PUBLIC","QUAL_RECORD","SCHSEL","EXAPPLM","LHRS","MHRS","SHRS"),
weight="W_FSTUWT",
data=DEVCON8j,export=FALSE)
R147 # Vietnam 82.17
|
4e513aef66e747bd419be0f49802aa9f8690abb0
|
3ae52a1ff7e4e6fda6f2abb8d25c197f0e445891
|
/figures/sources/haplotype/internal_functions.R
|
40986b972a54377351264dfcf31606560bb6f2a9
|
[] |
no_license
|
omaviv/TCR_genotype
|
cf4165468caa0c48a56927f9a18b8ece613f3fe9
|
0860eb15d8620c000170e20b7a7d0aea8173a96e
|
refs/heads/master
| 2023-04-12T04:48:15.991477
| 2021-11-07T20:33:31
| 2021-11-07T20:33:31
| 425,570,283
| 0
| 1
| null | 2021-12-07T18:04:31
| 2021-11-07T17:28:56
|
R
|
UTF-8
|
R
| false
| false
| 41,512
|
r
|
internal_functions.R
|
# Internal functions -----------------------------------------------------
#' @include rabhit.R
NULL
########################################################################################################
# Calculate models likelihood
#
# \code{createHaplotypeTable} calculate likelihoods
#
# @param X a vector of counts
# @param alpha_dirichlet alpha parameter for dirichlet distribution
# @param epsilon epsilon
# @param priors a vector of priors
#
# @return log10 of the likelihoods
#
# @export
get_probabilites_with_priors <- function(X, alpha_dirichlet = c(0.5, 0.5) * 2, epsilon = 0.01, params = c(0.5, 0.5)) {
## Hypotheses
X <- sort(X, decreasing = TRUE)
Number_Of_Divisions <- 0
H1 <- c(1, 0)
H2 <- c(params[1], params[2])
E1 <- ddirichlet((H1 + epsilon)/sum(H1 + epsilon), alpha_dirichlet + X)
E2 <- ddirichlet((H2)/sum(H2), alpha_dirichlet + X)
while (sort(c(E1, E2), decreasing = TRUE)[2] == 0) {
Number_Of_Divisions <- Number_Of_Divisions + 1
X <- X/10
E1 <- ddirichlet((H1 + epsilon)/sum(H1 + epsilon), alpha_dirichlet + X)
E2 <- ddirichlet((H2)/sum(H2), alpha_dirichlet + X)
}
return(c(log10(c(E1, E2)), Number_Of_Divisions))
}
##############################################################################################################
# Create haplotype table
#
# \code{createHaplotypeTable} Haplotype of a specific gene
#
# @details
#
# @param df table of counts
# @param HapByPriors vector of frequencies of each of the anchor gene alleles
# @param toHapByCol logical, haplotype each chromosome separetly to imrove the aligner assignmnet
# @param toHapPriors vector of frequencies of the haplotyped gene alleles
#
# @return data frame with chromosomal associasions of alleles of a specific gene
#
#
#
# @export
createHaplotypeTable <- function(df, HapByPriors = c(0.5, 0.5), toHapByCol = TRUE, toHapPriors = c(0.5, 0.5)) {
hapBy <- colnames(df)
tohap <- rownames(df)
tohap.gene <- strsplit(tohap[1], "*", fixed = T)[[1]][1]
GENES.df <- data.frame(GENE = tohap.gene, "Unk", "Unk", stringsAsFactors = F)
names(GENES.df)[2:3] <- gsub("*", ".", hapBy, fixed = T)
GENES.df.num <- reshape2::melt(df)
df.old <- df
if (toHapByCol) {
if (nrow(df) > 1) {
for (j in 1:ncol(df)) {
counts <- c(sort(df[, j], decreasing = T), 0, 0, 0)
if (sum(counts) != counts[1]) {
names(counts)[1:nrow(df)] <- names(sort(df[, j], decreasing = T))
toHapPriors_srtd <- if (!is.null(names(toHapPriors)))
toHapPriors[names(counts[1:2])] else toHapPriors
resCol <- get_probabilites_with_priors(counts[1:2], params = toHapPriors_srtd)
resMaxInd <- which.max(resCol[-(length(resCol))])
if (resMaxInd < nrow(df)) {
df[, j][order(df[, j], decreasing = T)[(resMaxInd + 1):nrow(df)]] <- 0
}
}
}
}
}
counts.list <- list()
res.list <- list()
for (i in 1:nrow(df)) {
allele <- rownames(df)[i]
if(sum(df[i,])==0){
tohap <- tohap[-which(tohap == allele)]
next
}
gene <- strsplit(allele, "*", fixed = T)[[1]][1]
counts <- c(sort(df.old[i, ], decreasing = T), 0, 0, 0)
if (ncol(df) == 1) names(counts)[1:ncol(df)] <- colnames(df)
else names(counts)[1:ncol(df)] <- names(sort(df[i, ], decreasing = T))
HapByPriors_srtd <- if (!is.null(names(HapByPriors)))
HapByPriors[names(counts[1:2])] else HapByPriors
res <- get_probabilites_with_priors(counts[1:2], params = HapByPriors_srtd)
# Assign allele for a chromosome If hetero in chomosome : check if anythong was assigned (equals 'unk'), if was (different than 'unk'), paste second
# allele in the same chromosome
if (res[1] > res[2]) {
if (GENES.df[GENES.df$GENE == gene, gsub(x = names(which.max(counts)), "*", ".", fixed = T) == names(GENES.df)] == "Unk") {
GENES.df[GENES.df$GENE == gene, gsub(x = names(which.max(counts)), "*", ".", fixed = T) == names(GENES.df)] <- strsplit(allele, "*", fixed = T)[[1]][2]
} else {
GENES.df[GENES.df$GENE == gene, gsub(x = names(which.max(counts)), "*", ".", fixed = T) == names(GENES.df)] <- paste(c(GENES.df[GENES.df$GENE ==
gene, gsub(x = names(which.max(counts)), "*", ".", fixed = T) == names(GENES.df)], strsplit(allele, "*", fixed = T)[[1]][2]), collapse = ",")
}
} else {
if (GENES.df[GENES.df$GENE == gene, 2] == "Unk") {
GENES.df[GENES.df$GENE == gene, 2] <- strsplit(allele, "*", fixed = T)[[1]][2]
} else {
GENES.df[GENES.df$GENE == gene, 2] <- paste(c(GENES.df[GENES.df$GENE == gene, 2], strsplit(allele, "*", fixed = T)[[1]][2]), collapse = ",")
}
if (GENES.df[GENES.df$GENE == gene, 3] == "Unk") {
GENES.df[GENES.df$GENE == gene, 3] <- strsplit(allele, "*", fixed = T)[[1]][2]
} else {
GENES.df[GENES.df$GENE == gene, 3] <- paste(c(GENES.df[GENES.df$GENE == gene, 3], strsplit(allele, "*", fixed = T)[[1]][2]), collapse = ",")
}
}
counts.list[[i]] <- counts
res.list[[i]] <- res
}
counts.list[sapply(counts.list, is.null)] <- NULL
res.list[sapply(res.list, is.null)] <- NULL
len.counts.list <- length(counts.list)
GENES.df <- cbind(GENES.df, data.frame(ALLELES = paste(sapply(strsplit(tohap, "*", fixed = T), "[", 2), collapse = ","), PRIORS_ROW = paste(format(HapByPriors,
digits = 2), collapse = ","), PRIORS_COL = paste(format(toHapPriors, digits = 2), collapse = ","), COUNTS1 = paste(counts.list[[1]][order(names(counts.list[[1]])[1:2])],
collapse = ","), K1 = max(res.list[[1]][1:2]) - min(res.list[[1]][1:2]), COUNTS2 = ifelse(length(counts.list) >
1, paste(counts.list[[2]][order(names(counts.list[[2]])[1:2])], collapse = ","), NA), K2 = ifelse(length(counts.list) > 1, max(res.list[[2]][1:2]) - min(res.list[[2]][1:2]), NA),
COUNTS3 = ifelse(length(counts.list) > 2, paste(counts.list[[3]][order(names(counts.list[[3]])[1:2])], collapse = ","), NA), K3 = ifelse(length(counts.list) > 2, max(res.list[[3]][1:2]) - min(res.list[[3]][1:2]), NA), COUNTS4 = ifelse(length(counts.list) > 3, paste(counts.list[[4]][order(names(counts.list[[4]])[1:2])], collapse = ","),
NA), K4 = ifelse(length(counts.list) > 3, max(res.list[[4]][1:2]) - min(res.list[[4]][1:2]),
NA), stringsAsFactors = F))
return(GENES.df)
}
########################################################################################################
# Haplotype table to plot tables
#
# \code{parseHapTab} Parse the haplotype table for each panel in the haplotype plot
#
# @param hap_table haplotype summary table
# @param chain the Ig chain: IGH,IGK,IGL. Default is IGH.
# @param hapBy_alleles Alleles columns haplotyped by
#
# @return list of data frames for plotting
#
parseHapTab <- function(hap_table, chain = c("IGH", "IGK", "IGL", "TRB"), count_df = TRUE, sample_name, hapBy_cols, hapBy_alleles) {
if (missing(chain)) {
chain = "IGH"
}
chain <- match.arg(chain)
#hap_table <- data.frame(lapply(hap_table, as.character), stringsAsFactors = FALSE)
#hap_table$ALLELES <- sapply(gsub("[.][0-9]","",hap_table$ALLELES), function(a) ifelse(nchar(a)==1,paste0("0",a),a))
#sample_name <- unique(hap_table$SUBJECT)
# id_GENE_col <- which(names(hap_table)=="GENE")
# hapBy_col_id <- c(id_GENE_col+1,id_GENE_col+2)
# hapBy_cols = names(hap_table)[hapBy_col_id]
# hapBy_alleles = gsub("_", "*", hapBy_cols)
count.df <- setNames(data.frame(matrix(ncol = 6, nrow = 0), stringsAsFactors=FALSE),
c("SUBJECT", "GENE", "hapBy", "COUNT", "ALLELES", "COUNT2"))
if(count_df){
count.df <- data.table::rbindlist(lapply(1:2, function(panel){
panel.alleles <- hap_table[[hapBy_cols[panel]]]
return(data.table::rbindlist(lapply(1:length(panel.alleles), function(i){
if (panel.alleles[i] == "Unk" | panel.alleles[i] == "NR") {
return(data.frame(SUBJECT = sample_name, GENE = hap_table$GENE[i], hapBy = hapBy_alleles[panel],
COUNT = 0, stringsAsFactors=FALSE))
} else {
if (panel.alleles[i] == "Del") {
return(data.frame(SUBJECT = sample_name, GENE = hap_table$GENE[i], hapBy = hapBy_alleles[panel],
COUNT = as.numeric(strsplit(hap_table$COUNTS1[i],",")[[1]][panel]),stringsAsFactors=FALSE))
} else {
alleles <- strsplit(panel.alleles[i], ",")[[1]]
return(data.table::rbindlist(lapply(1:length(alleles), function(j){
count_id <- which(strsplit(hap_table[i,'ALLELES'],',')[[1]]==alleles[j])
return(data.frame(SUBJECT = sample_name, GENE = paste0(hap_table$GENE[i], "*", alleles[j]),
hapBy = hapBy_alleles[panel],
COUNT = as.numeric(strsplit(hap_table[i,paste0("COUNTS", count_id)], ",")[[1]][panel]
),stringsAsFactors=FALSE))
})))
}
}
})))
}))%>% as.data.frame()
count.df$ALLELES <- sapply(strsplit(as.character(count.df$GENE), "*", fixed = T), "[", 2)
count.df$ALLELES[is.na(count.df$ALLELES)] <- "01" # Mock allele
count.df$ALLELES <- factor(count.df$ALLELES, levels = c(sort(unique(count.df$ALLELES)), "NA"))
count.df$GENE <- sapply(strsplit(as.character(count.df$GENE), "*", fixed = T), "[", 1)
## TO visualy make coutns of 1 not look like 0 , one is added
count.df$COUNT2 <- ifelse(count.df$hapBy == hapBy_alleles[1], -1 * log10(as.numeric(count.df$COUNT) + 1), log10(as.numeric(count.df$COUNT) + 1))
count.df$COUNT2[count.df$COUNT2 == Inf | count.df$COUNT2 == -Inf] <- 0
}
# K values panels data frame
panel1.alleles <- hap_table[[hapBy_cols[1]]]
# minimum of Ks if there is more than one allele
hap_table[is.na(hap_table)] <- Inf
panel1 <- sapply(1:length(panel1.alleles), function(i) {
if (panel1.alleles[i] == "Unk" | panel1.alleles[i] == "Del" | panel1.alleles[i] == "NR") {
min(as.numeric(hap_table[i, paste0("K", 1:4)]), na.rm = T)
} else {
min(as.numeric(hap_table[i, paste0("K", match(unlist(strsplit(panel1.alleles[i], ",")), unlist(strsplit(as.character(hap_table$ALLELES[i]), ","))))]),
na.rm = T)
}
})
panel1[panel1 == Inf] <- "NA"
panel2.alleles <- hap_table[[hapBy_cols[2]]]
# minimum of Ks if there is more than one allele
panel2 <- sapply(1:length(panel2.alleles), function(i) {
if (panel2.alleles[i] == "Unk" | panel2.alleles[i] == "Del" | panel2.alleles[i] == "NR") {
min(as.numeric(hap_table[i, paste0("K", 1:4)]), na.rm = T)
} else {
min(as.numeric(hap_table[i, paste0("K", match(unlist(strsplit(panel2.alleles[i], ",")), unlist(strsplit(as.character(hap_table$ALLELES[i]), ","))))]))
}
})
panel2[panel2 == Inf] <- "NA"
kval.df <- data.frame(SUBJECT = sample_name, GENE = c(hap_table$GENE, hap_table$GENE), K = c(panel1, panel2), hapBy = c(rep(hapBy_alleles[1], length(panel1)), rep(hapBy_alleles[2],
length(panel2))),stringsAsFactors=FALSE)
bins_k <- cut(as.numeric(kval.df$K[kval.df$K!="NA"]), c(0, 1, 2, 3, 4, 5, 10, 20, 50, Inf), include.lowest = T, right = F)
K_GROUPED <- gsub(",", ", ", levels(bins_k))
kval.df$K_GROUPED[kval.df$K!="NA"] <- K_GROUPED[bins_k]
kval.df$K_GROUPED[kval.df$K=="NA"] <- "NA"
kval.df$K_GROUPED <- factor(kval.df$K_GROUPED, levels = c("NA", K_GROUPED))
# Alleles panel data frame
geno.df <- data.frame(mapply(c,hap_table[, c("SUBJECT", "GENE", hapBy_cols[1])],hap_table[, c("SUBJECT", "GENE", hapBy_cols[2])]),
hapBy = c(rep(hapBy_alleles[1], nrow(hap_table)),rep(hapBy_alleles[2], nrow(hap_table))), stringsAsFactors = F)
names(geno.df)[3] <- "ALLELES"
geno.df <- splitstackshape::cSplit(geno.df, "ALLELES", sep = ",", direction = "long", fixed = T, type.convert = F)
parsed_hap_table <- list(geno.df = geno.df, kval.df = kval.df, count.df = count.df)
}
########################################################################################################
# Haplotype table to plot tables
#
# \code{parseHapTab} Parse the haplotype table for each panel in the haplotype plot
#
# @param hap_table haplotype summary table
# @param chain the Ig chain: IGH,IGK,IGL. Default is IGH.
# @param hapBy_alleles Alleles columns haplotyped by
#
# @return list of data frames for plotting
#
# parseHapTab_v2 <- function(hap_table, chain = c("IGH", "IGK", "IGL", "TRB"), sample_name, hapBy_cols, hapBy_alleles) {
#
# if (missing(chain)) {
# chain = "IGH"
# }
# chain <- match.arg(chain)
#
# hap_table[paste0("K", 1:4)][is.na(hap_table[paste0("K", 1:4)])] <- Inf
#
# count.df <- do.call(rbind,
# lapply(1:2, function(panel){
# panel.alleles <- hap_table[[hapBy_cols[panel]]]
# return(
# do.call(rbind,
# lapply(1:length(panel.alleles), function(i){
# if (panel.alleles[i] %in% c("Unk","NR","Del")){
# return(c(sample_name, hap_table$GENE[i], hapBy_alleles[panel],
# min(as.numeric(strsplit(hap_table$COUNTS1[i],",")[[1]])),
# "01", # Mock allele
# min(as.numeric(hap_table[i, paste0("K", 1:4)]), na.rm = T)
# ))
# }else{
# alleles <- strsplit(panel.alleles[i], ",")[[1]]
# k <- min(as.numeric(hap_table[i, paste0("K", match(unlist(strsplit(panel.alleles[i], ",")),
# unlist(strsplit(hap_table$ALLELES[i], ","))))]),
# na.rm = T)
# return(do.call(rbind, lapply(1:length(alleles), function(j){
# count_id <- which(strsplit(hap_table[i,'ALLELES'],',')[[1]]==alleles[j])
# return(c(sample_name, hap_table$GENE[i],
# hapBy_alleles[panel],
# as.numeric(strsplit(hap_table[i,paste0("COUNTS", count_id)], ",")[[1]][panel]),
# alleles[j],k
# ))
# })))
# }
# }
# )))
# }))%>% as.data.frame(stringsAsFactors = FALSE) %>% `colnames<-`(c("SUBJECT", "GENE", "hapBy", "COUNT", "ALLELES","K"))
#
# # K values panels data frame
# kval.df <- count.df[,c("SUBJECT","GENE","hapBy","K")] %>% dplyr::distinct()
#
# # Remove K
# count.df <- count.df[,-6]
# # Sort count alleles
# count.df$ALLELES <- factor(count.df$ALLELES, levels = c(sort(unique(count.df$ALLELES)), "NA"))
#
# # Turn counts to numeric
# count.df$COUNT <- as.numeric(count.df$COUNT)
#
# ## TO visualy make coutns of 1 not look like 0 , one is added
# count.df$COUNT2 <- ifelse(count.df$hapBy == hapBy_alleles[1], -1 * log10(as.numeric(count.df$COUNT) + 1),
# log10(as.numeric(count.df$COUNT) + 1))
# count.df$COUNT2[count.df$COUNT2 == Inf | count.df$COUNT2 == -Inf] <- 0
#
# # Bin K values
# kval.df$K[kval.df$K == Inf] <- "NA"
#
# bins_k <- cut(as.numeric(kval.df$K[kval.df$K!="NA"]), c(0, 1, 2, 3, 4, 5, 10, 20, 50, Inf), include.lowest = T, right = F)
# K_GROUPED <- gsub(",", ", ", levels(bins_k))
# kval.df$K_GROUPED[kval.df$K!="NA"] <- K_GROUPED[bins_k]
# kval.df$K_GROUPED[kval.df$K=="NA"] <- "NA"
# kval.df$K_GROUPED <- factor(kval.df$K_GROUPED, levels = c("NA", K_GROUPED))
#
#
# # Alleles panel data frame
# geno.df <- data.frame(mapply(c,hap_table[, c("SUBJECT", "GENE", hapBy_cols[1])],hap_table[, c("SUBJECT", "GENE", hapBy_cols[2])]),
# hapBy = c(rep(hapBy_alleles[1], nrow(hap_table)),rep(hapBy_alleles[2], nrow(hap_table))), stringsAsFactors = F)
# names(geno.df)[3] <- "ALLELES"
# geno.df <- tidyr::separate_rows(geno.df, "ALLELES", sep = ",")
# parsed_hap_table <- list(geno.df = geno.df, kval.df = kval.df, count.df = count.df)
#
# }
########################################################################################################
# Sort data frame by genes
#
# \code{sortDFByGene} Sort the \code{data.frame} by the genes names or position. For sorting by gene names the \code{sortAlleles} function by TIgGER is used.
# For sorting by position the defualt package gene location list is used.
#
# @param DATA data frame to sort
# @param chain the Ig chain: IGH,IGK,IGL. Default is IGH.
# @param method the method for sorting the genes. If by 'name' the genes in the output are ordered lexicographically,
# if by 'position' only functional genes are used and are ordered by their chromosomal location. Default is 'position'.
# @param removeIGH if TRUE, 'IGH'\'IGK'\'IGL' prefix is removed from gene names.
#
# @return sorted \code{data.frame}
#
sortDFByGene <- function(DATA, chain = c("IGH", "IGK", "IGL", "TRB"), method = c("name", "position"), removeIGH = FALSE, geno = FALSE, peseudo_remove = F) {
if (missing(chain)) {
chain = "IGH"
}
chain <- match.arg(chain)
if (missing(method)) {
method = "position"
}
method <- match.arg(method)
if (method == "name") {
DATA$GENE <- factor(DATA$GENE, levels = rev(sortAlleles(unique(DATA$GENE), method = method)))
if (removeIGH) {
DATA$GENE <- gsub("IG[H|K|L]|TRB", "", DATA$GENE)
DATA$GENE <- factor(DATA$GENE, levels = rev(sortAlleles(unique(DATA$GENE), method = method)))
if(!geno) DATA$hapBy <- gsub("IG[H|K|L]|TRB", "", DATA$hapBy)
}
} else {
GENE.loc.tmp <- GENE.loc[[chain]]
names(GENE.loc.tmp) <- GENE.loc.tmp
if(peseudo_remove){
DATA <- DATA[!grepl("OR|NL", DATA$GENE),]
DATA <- DATA[!(DATA$GENE %in% PSEUDO[[chain]]),]
}
DATA$GENE <- factor(DATA$GENE, levels = rev(GENE.loc.tmp))
if (removeIGH) {
GENE.loc.tmp <- gsub("IG[H|K|L]|TRB", "", GENE.loc.tmp)
names(GENE.loc.tmp) <- GENE.loc.tmp
DATA$GENE <- gsub("IG[H|K|L]|TRB", "", DATA$GENE)
DATA$GENE <- factor(DATA$GENE, levels = rev(GENE.loc.tmp))
if(!geno) DATA$hapBy <- gsub("IG[H|K|L]|TRB", "", DATA$hapBy)
}
}
return(DATA)
}
########################################################################################################
# Calculate Jaacardian distance for haplotypes
#
# \code{calcJacc} Takes as an input two haplotypes and calculates the Jaacardian distance.
#
# @param vec1A chromosome A haplotype for first individual.
# @param vec1B chromosome B haplotype for first individual.
# @param vec2A chromosome A haplotype for second individual.
# @param vec2B chromosome B haplotype for second individual.
# @param method the method to be used for calculating. pooled - All alleles and all genes taken together (assuming that all genes appear and ordered the same in both vectors)
# geneByGene - For each gene separetly and then calculates average distance.
# @param naRm if 'TRUE' ingnores genes from both samples in which there is no data, else the Jaccardian distance for those gene defined as 1 (i.e the maximal distance)
# If both are NAs will remove it either way.
# @param Kweight if 'TRUE' the Jaacardian distance is weighted by the K value of the haplotype.
# @param k1A chromosome A haplotype K value for first individual.
# @param k1B chromosome B haplotype K value for first individual.
# @param k2A chromosome A haplotype K value for second individual.
# @param k2B chromosome B haplotype K value for second individual.
#
# @return Jaacardian distance value
#
distJACC <- function(vecA, vecB, naRm = TRUE) {
if ((!is.na(vecA) & !is.na(vecB)) & (!vecA %in% c("Unk","NR") & !vecB %in% c("Unk","NR"))) {
v1 <- unlist(strsplit(vecA, ","))
v2 <- unlist(strsplit(vecB, ","))
if ((any(grepl("_[0-9][0-9]", v1)) | any(grepl("_[0-9][0-9]", v2))) & (length(intersect(v1, v2)) != length(unique(c(v1, v2))))) {
inter = 0
tot = length((c(v1, v2)))
for (i in v1) {
v1_n <- unlist(strsplit(i, "_"))
for (ii in v2) {
v2_n <- unlist(strsplit(ii, "_"))
if (length(intersect(v1_n, v2_n)) != 0) {
inter = inter + 1
tot = tot - 1
}
}
}
dist <- inter/tot
} else dist <- length(intersect(v1, v2))/length(unique(c(v1, v2)))
} else {
if (naRm) {
dist <- NA
} else {
if ((is.na(vecA) & is.na(vecB)) || (vecA %in% c("Unk","NR") & vecB %in% c("Unk","NR"))) {
dist <- NA
} else {
dist <- 0
}
}
}
}
calcJacc <- function(vec1A, vec1B, vec2A, vec2B, method = c("pooled", "geneByGene"), naRm = TRUE, Kweight = FALSE, k1A, k1B, k2A, k2B, special_jacc = FALSE) {
vec1A <- as.character(vec1A)
vec1B <- as.character(vec1B)
vec2A <- as.character(vec2A)
vec2B <- as.character(vec2B)
if (method == "geneByGene") {
# Calculate Jaccardian distance for each gene and then
jacc <- c()
jacc <- sapply(1:length(vec1A), function(i) {
distA <- distJACC(vec1A[i], vec2A[i], naRm)
distB <- distJACC(vec1B[i], vec2B[i], naRm)
jacc <- c(jacc, rowMeans(data.frame(distA, distB), na.rm = TRUE))
})
print(jacc)
if (Kweight) {
k1A[k1A == Inf] <- 0
k2A[k2A == Inf] <- 0
k1B[k1B == Inf] <- 0
k2B[k2B == Inf] <- 0
KavgA <- sapply(1:length(k1A), function(x) {
mean(c(k1A[x], k2A[x]), na.rm = T)
})
if (is.list(KavgA)) {
KavgA[sapply(KavgA, is.null)] <- NA
KavgA <- unlist(KavgA)
}
KavgB <- sapply(1:length(k1B), function(x) {
mean(c(k1B[x], k2B[x]), na.rm = T)
})
if (is.list(KavgB)) {
KavgB[sapply(KavgB, is.null)] <- NA
KavgB <- unlist(KavgB)
}
Kavg <- rowMeans(cbind(KavgA, KavgB), na.rm = T)
jacc <- weighted.mean(jacc, Kavg, na.rm = T)
return(1 - jacc)
}
return(mean(1 - jacc, na.rm = T))
}
if (method == "pooled") {
v1 <- unlist(sapply(1:length(vec1A), function(x) {
paste(paste0("G", x), unlist(strsplit(vec1A[[x]], ",")), sep = "_")
}))
v1 <- c(v1, unlist(sapply(1:length(vec1B), function(x) {
paste(paste0("G", x), unlist(strsplit(vec1B[[x]], ",")), sep = "_")
})))
v2 <- unlist(sapply(1:length(vec2A), function(x) {
paste(paste0("G", x), unlist(strsplit(vec2A[[x]], ",")), sep = "_")
}))
v2 <- c(v2, unlist(sapply(1:length(vec2B), function(x) {
paste(paste0("G", x), unlist(strsplit(vec2B[[x]], ",")), sep = "_")
})))
v1 <- v1[-grep("NA", v1, fixed = T)]
v2 <- v2[-grep("NA", v2, fixed = T)]
jacc <- length(intersect(v1, v2))/length(unique(c(v1, v2)))
return(1 - jacc)
}
}
########################################################################################################
# Binom test for deletion infrence
#
# \code{binom_test_deletion} Infer deletion from binomial test
#
# @param GENE.usage.df a data frame of relative gene usage
# @param cutoff a data frame of relative gene usage
# @param p.val.cutoff a p value cutoff to detect deletion
# @param chain the IG chain: IGH,IGK,IGL. Default is IGH.
# @param GENE.loc.IG the genes by location
#
# @return data frame with the binomial test results
#
binomTestDeletion <- function(GENE.usage.df, cutoff = 0.001, p.val.cutoff = 0.01, chain = "IGH", GENE.loc.IG) {
GENE.usage.df$pval <- sapply(1:nrow(GENE.usage.df), function(i) {
if ((GENE.usage.df$FRAC[i] < cutoff) & GENE.usage.df$min_frac[i] != Inf) {
return(binom.test(x = round(GENE.usage.df$FRAC[i] * GENE.usage.df$NREADS[i]), n = GENE.usage.df$NREADS[i], p = GENE.usage.df$min_frac[i])$p.value)
}
if (GENE.usage.df$min_frac[i] == Inf) {
return(0)
} else {
return(1)
}
})
### P.binom to detect deletion or cnv
GENE.usage.df$foradj <- sapply(1:nrow(GENE.usage.df), function(i) {
if (GENE.usage.df$FRAC[i] < cutoff & GENE.usage.df$min_frac[i] != Inf) {
return(paste0(GENE.usage.df$GENE[i], "_", 0))
}
if (GENE.usage.df$min_frac[i] == Inf) {
return(paste0(GENE.usage.df$GENE[i], "_", 1))
} else {
return(paste0(GENE.usage.df$GENE[i], "_", 2))
}
})
GENE.usage.df <- GENE.usage.df %>% group_by(.data$foradj) %>% mutate(pval_adj = p.adjust(.data$pval, method = "BH"))
GENE.usage.df$col <- sapply(1:nrow(GENE.usage.df), function(i) {
if (GENE.usage.df$pval_adj[i] <= p.val.cutoff) {
if (GENE.usage.df$FRAC[i] < cutoff & GENE.usage.df$min_frac[i] != Inf) {
return("Deletion")
}
if (GENE.usage.df$min_frac[i] == Inf) {
return("NA")
}
} else {
return("No Deletion")
}
})
GENE.usage.df$GENE <- factor(GENE.usage.df$GENE, levels = GENE.loc.IG)
GENE.usage.df$col <- factor(GENE.usage.df$col, levels = c("Deletion", "No Deletion", "NA"))
return(GENE.usage.df)
}
########################################################################################################
# Creates the allele color palette for haplotype graphical output
#
# \code{alleleHapPalette} Takes a list of the haplotype alleles and returns the allele color palette.
#
# @param hap_alleles a list of the haplotype alleles.
#
# @return Haplotype allele color palette
#
alleleHapPalette <- function(hap_alleles, NRA = TRUE) {
Alleles <- grep("[012]", unique(hap_alleles), value = T, perl = T)
AlleleCol.tmp <- sort(unique(sapply(strsplit(Alleles, "_"), "[", 1)))
tmp.col <- ALLELE_PALETTE[AlleleCol.tmp]
novels <- grep("_", Alleles, value = T)
if (length(novels) > 0) {
novels.col <- ALLELE_PALETTE[sapply(strsplit(novels, "_"), "[", 1)]
names(novels.col) <- novels
alleles.comb <- c(tmp.col, novels.col)[order(names(c(tmp.col, novels.col)))]
} else {
alleles.comb <- c(tmp.col)[order(names(c(tmp.col)))]
}
AlleleCol <- names(c(alleles.comb, Unk = "#dedede", Del = "#6d6d6d", NR = "#000000", NRA = "#fbf7f5"))
names(AlleleCol) <- c(alleles.comb, Unk = "#dedede", Del = "#6d6d6d", NR = "#000000", NRA = "#fbf7f5")
rm_allele <- function(allele,alleles,AlleleCol){
if(!allele %in% alleles){
id <- which(allele == AlleleCol)
return(AlleleCol[-id])
}
return(AlleleCol)
}
AlleleCol <- rm_allele("NR",hap_alleles,AlleleCol)
AlleleCol <- rm_allele("Del",hap_alleles,AlleleCol)
AlleleCol <- rm_allele("Unk",hap_alleles,AlleleCol)
AlleleCol <- rm_allele("NRA",hap_alleles,AlleleCol)
transper <- sapply(AlleleCol, function(x) {
if (grepl("_", x)) {
mom_allele <- strsplit(x, "_")[[1]][1]
all_novel <- grep(paste0(mom_allele, "_"), AlleleCol, value = T)
if (length(all_novel) == 1) {
return(0.5)
}
if (length(all_novel) == 2) {
m = which(all_novel == x)
return(ifelse(m == 1, 0.6, 0.3))
}
if (length(all_novel) == 3) {
m = which(all_novel == x)
if (m == 1) {
return(0.6)
}
return(ifelse(m == 2, 0.4, 0.2))
}
if (length(all_novel) > 9) {
m = which(all_novel == x)
if (m == 1) {
return(1)
}
return(1 - m/20)
}
if (length(all_novel) > 3) {
m = which(all_novel == x)
if (m == 1) {
return(0.85)
}
return(0.85 - m/10)
}
} else (1)
})
names(transper) <- AlleleCol
# remove 'mother' allele if added (when there is no germline allele but there is a novel)
special <- c("Unk", "Del", "NR", "NRA")[c("Unk", "Del", "NR", "NRA") %in% AlleleCol]
AlleleCol <- AlleleCol[AlleleCol %in% c(sort(grep("[012]", unique(hap_alleles), value = T, perl = T)), special)]
transper <- transper[names(transper) %in% AlleleCol]
return(list(transper = transper, AlleleCol = AlleleCol))
}
########################################################################################################
# Creates the non reliable allele text annotation for plots
#
# \code{nonReliableAllelesText} Takes the haplotype data frame
#
# @param hap_table a data frame of the haplotypes.
#
# @return Non reliable alleles text data frame for plots annotation.
#
nonReliableAllelesText <- function(non_reliable_alleles_text, size = 4) {
if (nrow(non_reliable_alleles_text) != 0) {
non_reliable_alleles_text$text <- non_reliable_alleles_text$ALLELES
non_reliable_alleles_text$pos <- ifelse(non_reliable_alleles_text$freq == 1, 0.5, 0.25)
non_reliable_alleles_text <- non_reliable_alleles_text %>% ungroup() %>% group_by(.data$GENE, .data$SUBJECT, .data$hapBy) %>% mutate(pos = .data$pos + ifelse(dplyr::row_number()==2,dplyr::row_number()-1.5,dplyr::row_number()-1))
non_reliable_alleles_text$size <- sapply(1:nrow(non_reliable_alleles_text), function(i) {
if (non_reliable_alleles_text$freq[i] == 1) {
if (length(strsplit(non_reliable_alleles_text$text[i], "_")[[1]]) < 5) {
return(size)
} else {
return(size - 1)
}
} else {
if (length(strsplit(non_reliable_alleles_text$text[i], "_")[[1]]) < 5) {
return(size - 1)
} else {
return(size - 2)
}
}
})
non_reliable_alleles_text$ALLELES[grep("[0-9][0-9]_[0-9][0-9]", non_reliable_alleles_text$ALLELES)] <- "NRA"
return(non_reliable_alleles_text)
} else {
return(setNames(data.frame(matrix(ncol = 8, nrow = 0)), c("GENE", "ALLELES", "hapBy", "n", "freq", "text", "pos", "size")))
}
}
nonReliableAllelesText_V2 <- function(non_reliable_alleles_text, size = 3, map = F) {
if (nrow(non_reliable_alleles_text) != 0) {
num_text <- sapply(1:length(unique(non_reliable_alleles_text$ALLELES)),function(i) paste0('[*',i,']'))
names(num_text) <- unique(non_reliable_alleles_text$ALLELES)
non_reliable_alleles_text$text <- num_text[non_reliable_alleles_text$ALLELES]
non_reliable_alleles_text$text_bottom <- paste(num_text[non_reliable_alleles_text$ALLELES],non_reliable_alleles_text$ALLELES)
non_reliable_alleles_text$pos <- ifelse(non_reliable_alleles_text$freq == 1, 0.5,
ifelse(non_reliable_alleles_text$freq == 2, seq(0.25,1,by = 0.5)[1:2],
ifelse(non_reliable_alleles_text$freq == 3, seq(0.165,1,by = 0.33)[1:3],
seq(0.125,1,by = 0.25)[1:4])))
non_reliable_alleles_text$size = size
if(!map){
non_reliable_alleles_text <- non_reliable_alleles_text %>% ungroup() %>% group_by(.data$GENE, .data$SUBJECT, .data$hapBy) %>%
mutate(pos2 = .data$pos + 1 + ifelse(dplyr::row_number()==2,dplyr::row_number()-1.5,dplyr::row_number()-1))}
else{
non_reliable_alleles_text <- non_reliable_alleles_text %>% ungroup() %>% group_by(.data$GENE, .data$SUBJECT) %>%
mutate(pos = ifelse(.data$n == 1, 0.5,
ifelse(.data$n == 2, seq(0.25,1,by = 0.5)[1:max(dplyr::row_number())],
ifelse(.data$n == 3, seq(0.165,1,by = 0.33)[1:max(dplyr::row_number())],
seq(0.125,1,by = 0.25)[1:max(dplyr::row_number())]))))
}
non_reliable_alleles_text$ALLELES[grep("[0-9][0-9]_[0-9][0-9]", non_reliable_alleles_text$ALLELES)] <- "NRA"
return(non_reliable_alleles_text)
} else {
if(!map) return(setNames(data.frame(matrix(ncol = 8, nrow = 0)), c("GENE", "ALLELES", "hapBy", "n", "freq", "text", "pos", "size")))
else return(setNames(data.frame(matrix(ncol = 8, nrow = 0)), c("GENE", "ALLELES", "n", "freq", "text", "pos", "size")))
}
}
########################################################################################################
# Creates the novel allele text annotation for plots
#
# \code{novelAlleleAnnotation} Takes the haplotype data frame
#
# @param novel_allele data frame with the novel allele cordinates.
#
# @return novel alleles text data frame for plots annotation.
#
novelAlleleAnnotation <- function(novel_allele, new_label, size = 3) {
if (nrow(novel_allele) != 0) {
novel_allele$text <- sapply(new_label[novel_allele$ALLELES],function(s) strsplit(s,'-')[[1]][1])
novel_allele$text_bottom <- paste(new_label[novel_allele$ALLELES],novel_allele$ALLELES)
novel_allele$pos <- ifelse(novel_allele$freq == 1, 1,
ifelse(novel_allele$freq == 2, 0.5,
ifelse(novel_allele$freq == 3, 0.33, 0.25)))
novel_allele$size = size
novel_allele <- novel_allele %>% ungroup() %>% group_by(.data$GENE, .data$SUBJECT, .data$hapBy) %>%
mutate(pos = ifelse(.data$n == 1, 0.5,
ifelse(.data$n == 2, seq(0.25,1,by = 0.5)[1:max(dplyr::row_number())],
ifelse(.data$n == 3, seq(0.165,1,by = 0.33)[1:max(dplyr::row_number())],
seq(0.125,1,by = 0.25)[1:max(dplyr::row_number())]))))
return(novel_allele)
} else {
return(setNames(data.frame(matrix(ncol = 8, nrow = 0)), c("GENE", "ALLELES", "hapBy", "n", "freq", "text", "pos", "size")))
}
}
########################################################################################################
# Transform character column to numeric
#
asNum <- function(row, na.strings = c(NA,"NA")) {
na <- row %in% na.strings
row[na] <- 0
row2 <- row
ex_special <- !grepl('[_|,|-]|[A-Z]|[0-2][1-9]$',as.character(row))
numIDX <- grepl('[0-9]*[^,]',as.character(row)) & ex_special
row2[!numIDX] <- row[!numIDX]
row2[numIDX] <- as.numeric(row[numIDX])
row2[na] <- NA_real_
return(row2)
}
########################################################################################################
# Get the number of unique genes assigned, modified from getSegment function from alakazam
#
getGeneCount <- function (segment_call, sep = ",")
{
segment_regex <- "((IG[HLK]|TR[ABGD])[VDJ][A-Z0-9\\(\\)]+[-/\\w]*)"
edge_regex <- paste0("[^", sep, "]*")
r <- gsub(paste0(edge_regex, "(", segment_regex, ")", edge_regex),
"\\1", segment_call, perl = T)
r <- sapply(strsplit(r, sep), function(x) length(unique(x)))
return(r)
}
########################################################################################################
# Collapse alleles, modified from getSegment and getAlleles functions from alakazam
#
alleleCollapse <- function(segment_call, sep = ",|_(?![A-Z])", collapse = "_", withGene = T){
r <- gsub("((IG[HLK]|TR[ABGD])[VDJ][A-Z0-9\\(\\)]+[-/\\w]*)[*]",
"", segment_call, perl = T)
r <- sapply(strsplit(r, sep, perl = T), function(x) paste(unique(x),
collapse = collapse))
if(withGene) r <- paste0(getGene(segment_call, strip_d = F), "*", r)
return(r)
}
########################################################################################################
# Get diagonal line for legend
#
getDigLegend <- function(color){
return(ggplotGrob(ggplot(data.frame(x=c(1,2),y=c(3,4)), aes_string("x","y")) + geom_abline(aes_string(colour="color", intercept = 1, slope = 1), show.legend = T) +
scale_color_manual(values = c("white"), name = "lK", drop = FALSE) + guides(color = guide_legend(override.aes = list(size = 0.5), order = 2)) +
theme(legend.justification = "center", legend.key = element_rect(fill = "gray"), legend.position = "bottom")))
}
########################################################################################################
# Split lines of short reads assignments
#
# The \code{splitlines} function sliplits the text by line width
#
# @param bottom_annot annotation text.
# @param line_width the line width allowed.
#
# @return
# Seperated text to lines
#
splitlines<-function(bottom_annot,line_width=60){
if(line_width<=max(sapply(bottom_annot,nchar))){
line_width = max(sapply(bottom_annot,nchar))
print(paste0("Set line width to ",line_width))
}
collapsed_annot<-paste(bottom_annot,collapse=", ")
L<-nchar(collapsed_annot)
if(L<line_width) return(collapsed_annot)
vec_annot<-substring(collapsed_annot,1:L,1:L)
ind<-grep("[",vec_annot,fixed=TRUE)
aligned_text<-NULL
#i<-line_width
i_previous<-1
while(i_previous<=(L-line_width)){
temp<-findInterval(line_width*(length(aligned_text)+1)+1,ind)
aligned_text<-c(aligned_text,substring(collapsed_annot,i_previous,ind[temp]-1))
i_previous<-ind[temp]
}
aligned_text<-c(aligned_text,substring(collapsed_annot,i_previous,L))
return(aligned_text)
}
########################################################################################################
# Write text annotations for heatmap graph
#
# \code{Write_text} takes values for plotting text on heatmap
#
# @param NR number of rows.
# @param NC number of columns.
# @param I row index.
# @param J column index.
# @param ALLELE allele index in the individual gene box.
# @param N_ALLELES number of alleles for individual in gene box.
# @param TEXT annotation text.
#
# @return plotting text annotation.
# @export
Write_text<-function(NR,NC,I,J,ALLELE,N_ALLELES,TEXT,...){
STEP_X<-1/(NC-1)
STEP_Y<-1/(NR-1)
text(STEP_X*J-STEP_X/2+STEP_X*12/N_ALLELES*(ALLELE-1/2),
STEP_Y*I,
TEXT,...)
}
########################################################################################################
# Draw lk value lines on heatmap
#
# \code{draw_segment} takes values for plotting text on heatmap
#
# @param NR number of rows.
# @param NC number of columns.
# @param I row index.
# @param J column index.
#
# @return plotting lk lines.
# @export
draw_segment<-function(NR,NC,I,J,...){
STEP_X<-1/(NC-1)
STEP_Y<-1/(NR-1)
points(c(STEP_X*(J-0.5),STEP_X*(J+1.5)),
c(STEP_Y*(I),STEP_Y*(I+0.5)), type = "l",...)
points(c(STEP_X*(J-0.5),STEP_X*(J+3.5)),
c(STEP_Y*(I-0.5),STEP_Y*(I+0.5)), type = "l",...)
points(c(STEP_X*(J+1.5),STEP_X*(J+5.5)),
c(STEP_Y*(I-0.5),STEP_Y*(I+0.5)), type = "l",...)
points(c(STEP_X*(J+3.5),STEP_X*(J+7.5)),
c(STEP_Y*(I-0.5),STEP_Y*(I+0.5)), type = "l",...)
points(c(STEP_X*(J+5.5),STEP_X*(J+9.5)),
c(STEP_Y*(I-0.5),STEP_Y*(I+0.5)), type = "l",...)
points(c(STEP_X*(J+7.5),STEP_X*(J+11.5)),
c(STEP_Y*(I-0.5),STEP_Y*(I+0.5)), type = "l",...)
points(c(STEP_X*(J+9.5),STEP_X*(J+11.5)),
c(STEP_Y*(I-0.5),STEP_Y*(I)), type = "l",...)
}
########################################################################################################
# finds next dividor for an int number
next_divisor <- function(x,y){
if(x>y) return(y)
while(T){
if(y%%x==0) return(x)
x <- x+1
}
}
|
61e1f8c04e881dbd8f7fef9eb0b764d927caa396
|
5757fcdbcf06bec35c9c72fb07a5523f99f137f2
|
/data/ebird.r
|
781489319ea5311fe80387f6d8373accbb4be781
|
[] |
no_license
|
nrminor/ebird-target-map
|
080eff43ca5d309e43f18faa357b9e7d52d70938
|
a4d121d8afc4fd6f2633d263f79afcc9a17b7a94
|
refs/heads/master
| 2023-07-06T08:25:41.105670
| 2019-03-12T15:59:05
| 2019-03-12T15:59:05
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,224
|
r
|
ebird.r
|
library(tidyverse)
library(sf)
library(rnaturalearth)
library(janitor)
library(auk)
library(httr)
library(here)
walk(list.files("R", full.names = TRUE), source)
# list of valid ebird country and state codes
ebird_countries <- get_regions("country")
ebird_states <- map_df(c("US", "CA"), ~ get_regions("subnational1", parent = .))
ebird_counties <- get_regions("subnational2", parent = "US")
ebird_regions <- bind_rows(ebird_countries, ebird_states, ebird_counties)
f <- here("data", "ebird-frequency_raw.rds")
if (!file.exists(f)) {
# apply to each region
region_freq <- ebird_regions %>%
mutate(freq = map(region, get_ebird_frequency))
saveRDS(region_freq, f)
}
region_freq <- readRDS(f)
# drop non-species taxa
region_freq_sp <- filter(ebird_taxonomy, category == "species") %>%
select(species_code) %>%
inner_join(unnest(region_freq), ., by = "species_code") %>%
select(region_code, species_code, month, frequency)
here("data", "ebird-frequency.rds") %>%
saveRDS(region_freq_sp, .)
# region list with species counts
region_freq_sp %>%
count(region_code) %>%
rename(n_species = n) %>%
inner_join(ebird_regions, ., by = "region_code") %>%
saveRDS(here("data", "ebird-regions.rds"))
|
d1e529f1986f34c7302f9dacdeae368c3c2dd2c1
|
4eee83253767b218d0348898633f2e3680d25ffb
|
/code/get_rescinded_demos.R
|
217ecb4c25eabc3385f5ab1fb8d1e6b76794e5f2
|
[
"MIT"
] |
permissive
|
Faculty-Job-Market-Collab/COVID_Job_Market_19-20
|
9a2a4c4fc904d7284d869a6990db526625428ccd
|
3f74f607245b9ae348d95f6221c0e36b117230b8
|
refs/heads/master
| 2023-07-08T07:18:20.031093
| 2021-08-11T05:35:33
| 2021-08-11T05:35:33
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,737
|
r
|
get_rescinded_demos.R
|
#explore demographics of offers rescinded due to covid
#requires data from "get_offers_data.R"
offers_made <- offers_data %>% pull(faculty_offers) %>% sum(., na.rm = TRUE)
offers_rescinded <- offers_data %>% pull(covid_offers_rescinded) %>% sum(., na.rm = TRUE)
percent_rescinded <- (offers_rescinded/offers_made)*100 #need to correct for differing interpretations, some did not include rescinded offers with the offers made
#Gender, race, field, position
res_demo_data <- left_join(offers_data, demographics, by = "id") %>%
filter(faculty_offers > 0) %>%
mutate(covid_offers_rescinded =
if_else(is.na(covid_offers_rescinded)|covid_offers_rescinded == 0, "false", "true"))
#Gender
gender_res_plot <- get_plot_summary(res_demo_data, "gender", "covid_offers_rescinded") %>%
ggplot()+
geom_col(aes(x = gender, y=percent_res))
#race
race_res_plot <- res_demo_data %>%
mutate(race_ethnicity = fct_lump(race_ethnicity, n=4)) %>%
get_plot_summary(., "race_ethnicity", "covid_offers_rescinded") %>%
ggplot()+
geom_col(aes(x = race_ethnicity, y = percent_res))+
coord_flip()+
theme(#axis.text.x = element_text(angle = 90),
legend.position = "none")
#field
field_res_plot <- get_plot_summary(data = res_demo_data,
x = "research_category", y = "covid_offers_rescinded") %>%
ggplot()+
geom_col(aes(x = research_category, y = percent_res))+
coord_flip()+
theme(legend.position = "none")
#position
position_res_plot <- get_plot_summary(res_demo_data, "position", "covid_offers_rescinded") %>%
ggplot()+
geom_col(aes(x = position, y = percent_res))+
coord_flip()+
theme(legend.position = "none")
|
466ab8043df3f3dcadbab13f6322e1cb5ccd3c20
|
2764167b5743be62adadc491ec7dfde210e0703d
|
/R/Ellipsoidal.Distance.R
|
7d411d0cef496114b8f7bdc74a70932c6ce68f75
|
[] |
no_license
|
cran/GEOmap
|
528a4cbe293211d324405037eb280b415e65f62e
|
0149894022496cee8237868b0bb693d00ef01e41
|
refs/heads/master
| 2023-08-18T14:47:52.021469
| 2023-08-13T12:40:21
| 2023-08-13T13:30:31
| 17,713,753
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,160
|
r
|
Ellipsoidal.Distance.R
|
Ellipsoidal.Distance<-function(olat, olon, tlat, tlon, a=6378137.0 , b=6356752.314, tol=10^(-12))
{
###### Vincenty's formulae are two related iterative methods used in
####### geodesy to calculate the distance
###### between two points on the surface of an spheroid, developed by
###### Thaddeus Vincenty in 1975. They are based on the assumption that
########## the figure of the Earth is an oblate
###### spheroid, and hence are more accurate than methods such as
########### great-circle distance which assume a spherical Earth.
###### The first (direct) method computes the location of a point which is a given
###### distance and azimuth (direction) from another point. The second (inverse) method
###### computes the geographical distance and azimuth between two given points.
###### They have been widely used in geodesy because they are
###### accurate to within 0.5 mm (0.020) on the Earth ellipsoid
##### default is WGS-84, these are in meters
if(missing(a)) a = 6378137.0
if(missing(b)) b = 6356752.314
if(missing(tol)) tol = 10^(-12)
## daz = distaz(12, 23, 32, 65)
## ed = Ellipsoidal.Distance(12, 23, 32, 65)
### olat= 12; olon=23; tlat=-32; tlon=-65
### R.MAPK = 6378.2064;
# ed = Ellipsoidal.Distance(12, 23, 32, 65, a=R.MAPK*1000, b=R.MAPK*1000)
###### http://en.wikipedia.org/wiki/Vincenty%27s_formulae
err=0
GIVE = list(dist=NA, az=NA, revaz=NA, err=err)
if(is.na(olat) | is.na(tlat) | is.na(tlon) | is.na(olon))
{
return(GIVE)
}
if(olat < -90 | olat > 90){ return(GIVE) }
if(tlat < -90 | tlat > 90){ return(GIVE) }
if( olat == tlat & olon == tlon )
{
GIVE = list(dist=0, az=NA, revaz=NA, err=err)
return(GIVE)
}
f = (a-b)/a
############## latitudes
phi1 = olat*pi/180
phi2 = tlat*pi/180
U1 = atan((1-f)*tan(phi1))
U2 = atan((1-f)*tan(phi2))
cU2 = cos(U2)
cU1 = cos(U1)
sU2 = sin(U2)
sU1 = sin(U1)
########### longitudes
lam1 = olon*pi/180
lam2 = tlon*pi/180
L = lam2-lam1
lam = L
#### tolerance
K = tol+1
while(K>tol)
{
slam = sin(lam)
clam = cos(lam)
sinsig = sqrt((cU2*slam)^2 + (cU1*sU2 - sU1*cU2*clam)^2)
if(sinsig==0) {
print("1 aborting Ellipsoidal.Distance")
return(GIVE) }
cossig = sU1*sU2 + cU1*cU2*clam
sig = atan2(sinsig, cossig)
sinalpha = (cU1*cU2*slam)/sinsig
cossqalpha = (1-sinalpha^2)
if(cossqalpha==0) {
print("2 aborting Ellipsoidal.Distance")
return(GIVE) }
cos2sigm = cossig - (2*sU1*sU2)/cossqalpha
C = (f/16)*cossqalpha*(4+f*(4-3*cossqalpha))
lam2 = L+(1-C)*f*sinalpha*(sig+C*sinsig*(cos2sigm+C*cossig*(-1+2*cos2sigm^2)))
K = abs(lam2-lam)
lam=lam2
}
usq = cossqalpha * (a^2 - b^2)/b^2
A = 1 + (usq/16384)*(4096+usq*(-768+usq*(320-175*usq)))
B = usq*(256 +usq*(-128+usq*(74-47*usq)))/1024
delsig = B*sinsig*(cos2sigm+0.25*B*(cossig*(-1+2*cos2sigm^2)-(1/6)*B*cos2sigm*(-3+4*sinsig^2)*(-3+4*cos2sigm^2)))
s = b*A*(sig-delsig)
alpha1 = atan2(cU2*slam, cU1*sU2 - sU1*cU2*clam)
alpha2 = atan2(cU1*slam, (-sU1*cU2 + cU1*sU2*clam))
err=1
GIVE = list(dist=s/1000, az=alpha1*180/pi, revaz=alpha2*180/pi, err=err)
return(GIVE)
}
|
922dfbc04c58837b8dfb19ed9fb6b32910fda390
|
5e5dee48911e881b41fc11d9199b1b8e9f242c5c
|
/RUN_ALL.R
|
bf971c433b55be296fdef5a798eb57be7d070f5f
|
[
"Apache-2.0"
] |
permissive
|
bcgov/popApp
|
1726eb916adfea8659f39ad56e28a7973f37cd49
|
47c3396c3444714e5cbe2c42ff310a6798d5fb29
|
refs/heads/main
| 2023-07-29T18:30:49.085816
| 2023-07-28T20:08:37
| 2023-07-28T20:08:37
| 170,174,694
| 6
| 1
|
Apache-2.0
| 2023-07-28T20:08:39
| 2019-02-11T17:53:16
|
R
|
UTF-8
|
R
| false
| false
| 3,208
|
r
|
RUN_ALL.R
|
# Copyright 2023 Province of British Columbia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and limitations under the License.
options(java.parameters = "-Xmx8g" ) ## run BEFORE loading any libraries else can't open huge .xlsx file
### 1. load libraries ----
## installs any missing packages this script uses
if (!require('here')) install.packages('here')
if (!require('tidyverse')) install.packages('tidyverse')
if (!require('rsconnect')) install.packages('rsconnect')
library(here)
library(tidyverse)
library(rsconnect) ## to connect to shiny.io dashboard to deploy app
### 2. set values ----
## A. Do you need to update the underlying data?
update_data = TRUE ## set to TRUE if you need to update the underlying data, otherwise set to FALSE
## update folder path below ** keep slashes as is to work in R **
#base_folder <- "//SFP.IDIR.BCGOV/S152/S52004/PEOPLEPROJECTIONS/P19/ACCESSDATABASE/WEB/Estimate 2018-19/"
base_folder <- here("analysis", "inputs", paste0("/"))
## variables needed to read in Excel/csv data, if updating data
file_name <- "Combined" ## file name prefix ("1" or "5" are added to file_name in function later)
file_type <- "xlsx" ## "xlsx" or "csv"
mysheet <- "Combined1" ## name of sheet with data
col_RegionType <- "TYPE" ## case-sensitive name of Region Type column
col_Region <- "TYPEID" ## case-sensitive name of Region number column
col_Year <- "YR" ## case-sensitive name of Year column
col_Gender <- "GENDERID" ## case-sensitive name of Gender ID column
col_Total <- "TOTAL" ## case-sensitive name of Total column
### 3. data ----
if(update_data == TRUE) {
## don't change order of data_cols or final_cols
data_cols <- c(col_RegionType, col_Region, col_Year, col_Gender, col_Total)
final_cols<- c("Region.Type", "Region", "Year", "Gender", "Total")
## requires: csv or xlsx files in base_folder
## assumes: 5 columns (Type, Region, Year, Gender, Total), age columns ("A90PL" & "LT1" in 5-yr)
## will make lookup.csv if it doesn't exist
source(here("analysis", "get_data.R"))
## output is new data1.rds in app\data and a copy as data1_YYYY-MM-DD.rds in app\data\Archive\
}
### 4. deploy app ----
## You need an admin access to the bcstats shiny.io account. Martin can grant you access.
## Once you have access to shiny.io dashboard, you need to deploy the app:
## Type deployApp() in console of app.R. If you get an error, you may need to set the app title:
## In app.R, click the publish button (blue icon), and choose "Publish Application".
## Type in a title (must be at least 4 characters). Publish.
#rsconnect::deployApp(appName = "popApp", appId = 958258) ## command to deploy app to shiny.io dashboard; account: bcstats
### DONE ----
|
28cddc2adab8627ca48fa827fc536af9ddb8d197
|
d28508911e5a2f5c3d8d849d7d2a97c687dbffd9
|
/Chapter06/B09948_06_Code/recommender.R
|
31978c713df90ff53db456b2be99c6345841b81b
|
[
"MIT"
] |
permissive
|
PacktPublishing/Hands-on-Deep-Learning-with-R
|
10032fb0aceed0b315cf7bb399f53e07885df8f7
|
6e3766377395d4e2a853f787d1f595e4d8d28fa5
|
refs/heads/master
| 2023-02-11T11:05:47.140350
| 2023-01-30T09:37:44
| 2023-01-30T09:37:44
| 124,351,189
| 21
| 15
|
MIT
| 2020-04-09T06:29:03
| 2018-03-08T07:03:57
|
R
|
UTF-8
|
R
| false
| false
| 1,214
|
r
|
recommender.R
|
# create custom model with user and item embeddings
dot <- function(
embedding_dim,
n_users,
n_items,
name = "dot"
) {
keras_model_custom(name = name, function(self) {
self$user_embedding <- layer_embedding(
input_dim = n_users+1,
output_dim = embedding_dim,
name = "user_embedding")
self$item_embedding <- layer_embedding(
input_dim = n_items+1,
output_dim = embedding_dim,
name = "item_embedding")
self$dot <- layer_lambda(
f = function(x)
k_batch_dot(x[[1]],x[[2]],axes=2),
name = "dot"
)
function(x, mask=NULL, training=FALSE) {
users <- x[,1]
items <- x[,2]
user_embedding <- self$user_embedding(users)
item_embedding <- self$item_embedding(items)
dot <- self$dot(list(user_embedding, item_embedding))
}
})
}
# initialize embedding parameter
embedding_dim <- 50
# define model
model <- dot(
embedding_dim,
n_users,
n_items
)
# compile model
model %>% compile(
loss = "mse",
optimizer = "adam"
)
# train model
history <- model %>% fit(
x_train,
y_train,
epochs = 10,
batch_size = 500,
validation_data = list(x_test,y_test),
verbose = 1
)
summary(model)
|
90139deb7717654dee1d4f0db45d55fa66f42b79
|
0d2190a6efddb7167dee3569820724bfeed0e89c
|
/.svn/pristine/7c/7c0ef4c5ca31ef4b4bb934737dc94f2e41b51af4.svn-base
|
70d047e3cb661dc8560b9797751755c961c72be9
|
[] |
no_license
|
djnpisano/RScriptLibrary
|
6e186f33458396aba9f4151bfee0a4517d233ae6
|
09ae2ac1824dfeeca8cdea62130f3c6d30cb492a
|
refs/heads/master
| 2020-12-27T10:02:05.719000
| 2015-05-19T08:34:19
| 2015-05-19T08:34:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 29,778
|
7c0ef4c5ca31ef4b4bb934737dc94f2e41b51af4.svn-base
|
# -------------------------------------------------------------------------------
# R-CropStat Beta Version: Functions for ANALYZE - ANALYSIS OF VARIANCE SUBMENU
# -------------------------------------------------------------------------------
# combAOVTest: Functions for performing ANOVA
# Created by: Alaine A. Gulles for International Rice Research Institute
# Modified by: Alaine A. Gulles 01.14.2013
# Note: Include remarks when data is unbalanced, when no balanced data can be generated
# -------------------------------------------------------------------------------
combAOVTest <- function(data, design, respvar, factor1, factor2 = NULL, factor3 = NULL,
factor4 = NULL, rep1 = NULL, rep2 = NULL, set, descriptive = FALSE,
normality = FALSE, homogeneity = FALSE, pwTest = NULL, pwVar = NULL,
contrastOption = NULL, sig = 0.05, outputPath = NULL) UseMethod("combAOVTest")
combAOVTest <- function(data, design, respvar, factor1, factor2 = NULL, factor3 = NULL,
factor4 = NULL, rep1 = NULL, rep2 = NULL, set, descriptive = FALSE,
normality = FALSE, homogeneity = FALSE, pwTest = NULL, pwVar = NULL,
contrastOption = NULL, sig = 0.05, outputPath = NULL) {
if (is.character(data)) {
nameData <- data
data <- eval(parse(text = data))
} else { nameData <- paste(deparse(substitute(data))) }
if (!is.data.frame(data)) { stop("The object 'data' should be a data frame.") }
availableDesign <- c("CRD", "RCBD", "LSD", "SplitCRD", "SplitRCBD", "SplitLSD", "Strip", "Split2CRD", "Split2RCBD", "Split2LSD", "Strip-Split", "Split3CRD", "Split3RCBD", "Split3LSD", "Strip-Split2")
if(is.na(match(design, availableDesign))) {
stop("Design must be one of the following:\n'CRD', 'RCBD', 'LSD',\n'SplitCRD', 'SplitRCBD', 'SplitLSD','Strip',\n'Split2CRD', 'Split2RCBD', 'Split2LSD', 'Strip-Split',\n'Split3CRD', 'Split3RCBD', 'Split3LSD', 'Strip-Split2'")
}
designChoice <- match(design, availableDesign) ## design code
designTitle <- c("Completely Randomized Design",
"Randomized Complete Block Design",
"Latin Square Design",
"Split Plot in Completely Randomized Design",
"Split Plot in Randomized Complete Block Design",
"Split Plot in Latin Square Design",
"Strip Plot Design",
"Split-Split Plot in Completely Randomized Design",
"Split-Split Plot in Randomized Complete Block Design",
"Split-Split Plot in Latin Square Design",
"Strip-Split Plot Design",
"Split-Split-Split Plot in Completely Randomized Design",
"Split-Split-Split Plot in Randomized Complete Block Design",
"Split-Split-Split Plot in Latin Square Design",
"Strip-Split-Split Plot Design")
# determine the right handside of the model
switch(designChoice,
{ modelRHS <- paste(set, paste(factor1, collapse = "*", sep = ""), paste(set , paste("(",paste(factor1, collapse = "*", sep = ""),")", sep = ""), sep = ":"), sep = " + "); modelRHS2 <- modelRHS },
{ modelRHS <- paste(set, paste(set, rep1, sep = ":"), paste(factor1, collapse = "*", sep = ""), paste(set, paste("(",paste(factor1, collapse = "*", sep = ""),")", sep = ""), sep = ":"), sep = " + "); modelRHS2 <- modelRHS},
{ modelRHS <- paste(set, paste(set, rep1, sep = ":"), paste(set, rep2, sep =":"), paste(factor1, collapse = "*", sep = ""), paste(set , paste("(",paste(factor1, collapse = "*", sep = ""),")", sep = ""), sep = ":"), sep = " + "); modelRHS2 <- modelRHS },
{ modelRHS <- paste(set, paste(c(factor1, factor2), collapse = "*", sep = ""), paste(set, paste("(",paste(c(factor1, factor2), collapse = "*", sep = ""),")", sep = ""), sep = ":"), paste("Error((", paste(c(set, rep1, factor1), collapse = ":", sep = ""),")/", rep1,")", sep = ""),sep = " + ");
modelRHS2 <- paste(set, paste(c(factor1, factor2), collapse = "*", sep = ""), paste(set, paste("(",paste(c(factor1, factor2), collapse = "*", sep = ""),")", sep = ""), sep = ":"), sep = " + ") },
{ modelRHS <- paste(set, rep1, paste(c(factor1, factor2), collapse = "*", sep = ""), paste(set, paste("(",paste(c(factor1, factor2), collapse = "*", sep = ""),")", sep = ""), sep = ":"), paste("Error((", paste(c(set, rep1, factor1), collapse = ":", sep = ""),")/(", paste(factor1, collapse = "*", sep = ""),"))", sep = ""), sep = " + ");
modelRHS2 <- paste(set, rep1, paste(c(factor1, factor2), collapse = "*", sep = ""), paste(set, paste("(",paste(c(factor1, factor2), collapse = "*", sep = ""),")", sep = ""), sep = ":"), sep = " + ") },
{ modelRHS <- paste(set, rep1, rep2, paste(c(factor1, factor2), collapse = "*", sep = ""), paste(set, paste("(",paste(c(factor1, factor2), collapse = "*", sep = ""),")", sep = ""), sep = ":"), paste("Error((", paste(c(set, rep1, rep2, factor1), collapse = ":", sep = ""),")/(", paste(factor1, collapse = "*", sep = ""),"))", sep = ""), sep = " + ");
modelRHS2 <- paste(set, rep1, rep2, paste(c(factor1, factor2), collapse = "*", sep = ""), paste(set, paste("(",paste(c(factor1, factor2), collapse = "*", sep = ""),")", sep = ""), sep = ":"), sep = " + ") },
{ modelRHS <- paste(set, rep1, paste(c(factor1, factor2), collapse = "*", sep = ""), paste(set, paste("(",paste(c(factor1, factor2), collapse = "*", sep = ""),")", sep = ""), sep = ":"), paste("Error((", paste(c(set, rep1, factor1), collapse = ":", sep = ""),")/(", paste(factor1, collapse = "*", sep = ""),") + (", paste(c(set, rep1, factor2), collapse = ":", sep = ""),"/", paste(factor2, collapse = "*", sep = ""),"))", sep = ""), sep = " + ");
modelRHS2 <- paste(set, rep1, paste(c(factor1, factor2), collapse = "*", sep = ""), paste(set, paste("(",paste(c(factor1, factor2), collapse = "*", sep = ""),")", sep = ""), sep = ":"), sep = " + ") },
{ modelRHS <- paste(set, paste(c(factor1, factor2, factor3), collapse = "*", sep = ""), paste(set, paste("(",paste(c(factor1, factor2, factor3), collapse = "*", sep = ""),")", sep = ""), sep = ":"), paste("Error((", paste(c(set, rep1, factor1), collapse = ":", sep = ""),")/(",rep1,") + (", paste(c(set, rep1, factor1, factor2), collapse = ":", sep = ""),"/", paste(factor2, collapse = ":", sep = ""),"))", sep = ""), sep = " + ");
modelRHS2 <- paste(set, paste(c(factor1, factor2, factor3), collapse = "*", sep = ""), paste(set, paste("(",paste(c(factor1, factor2, factor3), collapse = "*", sep = ""),")", sep = ""), sep = ":"), sep = " + ") },
{ modelRHS <- paste(set, rep1, paste(c(factor1, factor2, factor3), collapse = "*", sep = ""), paste(set, paste("(",paste(c(factor1, factor2, factor3), collapse = "*", sep = ""),")", sep = ""), sep = ":"), paste("Error((", paste(c(set, rep1, factor1), collapse = ":", sep = ""),")/(", paste(factor1, collapse = "*", sep = ""),") + (", paste(c(set, rep1, factor1, factor2), collapse = ":", sep = ""),"/", paste(factor2, collapse = ":", sep = ""),"))", sep = ""), sep = " + ");
modelRHS2 <- paste(set, rep1, paste(c(factor1, factor2, factor3), collapse = "*", sep = ""), paste(set, paste("(",paste(c(factor1, factor2, factor3), collapse = "*", sep = ""),")", sep = ""), sep = ":"), sep = " + ") },
{ modelRHS <- paste(set, rep1, rep2, paste(c(factor1, factor2, factor3), collapse = "*", sep = ""), paste(set, paste("(",paste(c(factor1, factor2, factor3), collapse = "*", sep = ""),")", sep = ""), sep = ":"), paste("Error((", paste(c(set, rep1, rep2, factor1), collapse = ":", sep = ""),")/(", paste(factor1, collapse = "*", sep = ""),") + (", paste(c(set, rep1, rep2, factor1, factor2), collapse = ":", sep = ""),"/", paste(factor2, collapse = ":", sep = ""),"))", sep = ""), sep = " + ");
modelRHS2 <- paste(set, rep1, rep2, paste(c(factor1, factor2, factor3), collapse = "*", sep = ""), paste(set, paste("(",paste(c(factor1, factor2, factor3), collapse = "*", sep = ""),")", sep = ""), sep = ":"), sep = " + ") },
{ modelRHS <- paste(set, rep1, paste(c(factor1, factor2, factor3), collapse = "*", sep = ""), paste(set, paste("(",paste(c(factor1, factor2, factor3), collapse = "*", sep = ""),")", sep = ""), sep = ":"), paste("Error((", paste(c(set, rep1, factor1), collapse = ":", sep = ""),")/(", paste(factor1, collapse = "*", sep = ""),") + (", paste(c(set, rep1, factor2), collapse = ":", sep = ""),"/", paste(factor2, collapse = "*", sep = ""),") + (", paste(c(set, rep1, factor1, factor2), collapse = ":", sep = ""),"/", paste(c(factor1, factor2), collapse = ":", sep = ""),"))", sep = ""), sep = " + ");
modelRHS2 <- paste(set, rep1, paste(c(factor1, factor2, factor3), collapse = "*", sep = ""), paste(set, paste("(",paste(c(factor1, factor2, factor3), collapse = "*", sep = ""),")", sep = ""), sep = ":"), sep = " + ") },
{ modelRHS <- paste(set, paste(c(factor1, factor2, factor3, factor4), collapse = "*", sep = ""), paste(set , paste("(",paste(c(factor1, factor2, factor3, factor4), collapse = "*", sep = ""),")", sep = ""), sep = ":"), paste("Error((", paste(c(set, rep1, factor1), collapse = ":", sep = ""),")/(",rep1,") + (", paste(c(set, rep1, factor1, factor2), collapse = ":", sep = ""),"/", paste(factor2, collapse = ":", sep = ""),") + (", paste(c(set, rep1, factor1, factor2, factor3), collapse = ":", sep = ""),"/", paste(factor3, collapse = ":", sep = ""),"))", sep = ""), sep = " + ");
modelRHS2 <- paste(set, paste(c(factor1, factor2, factor3, factor4), collapse = "*", sep = ""), paste(set , paste("(",paste(c(factor1, factor2, factor3, factor4), collapse = "*", sep = ""),")", sep = ""), sep = ":"), sep = " + ") },
{ modelRHS <- paste(set, rep1, paste(c(factor1, factor2, factor3, factor4), collapse = "*", sep = ""), paste(set, paste("(",paste(c(factor1, factor2, factor3, factor4), collapse = "*", sep = ""),")", sep = ""), sep = ":"), paste("Error((", paste(c(set, rep1, factor1), collapse = ":", sep = ""),")/(", paste(factor1, collapse = "*", sep = ""),") + (", paste(c(set, rep1, factor1, factor2), collapse = ":", sep = ""),"/", paste(factor2, collapse = ":", sep = ""),") + (", paste(c(set, rep1, factor1, factor2, factor3), collapse = ":", sep = ""),"/", paste(factor3, collapse = ":", sep = ""),"))", sep = ""), sep = " + ");
modelRHS2 <- paste(set, rep1, paste(c(factor1, factor2, factor3, factor4), collapse = "*", sep = ""), paste(set, paste("(",paste(c(factor1, factor2, factor3, factor4), collapse = "*", sep = ""),")", sep = ""), sep = ":"), sep = " + ") },
{ modelRHS <- paste(set, rep1, rep2, paste(c(factor1, factor2, factor3, factor4), collapse = "*", sep = ""), paste(set, paste("(",paste(c(factor1, factor2, factor3, factor4), collapse = "*", sep = ""),")", sep = ""), sep = ":"), paste("Error((", paste(c(set, rep1, rep2, factor1), collapse = ":", sep = ""),")/(", paste(factor1, collapse = "*", sep = ""),") + (", paste(c(set, rep1, rep2, factor1, factor2), collapse = ":", sep = ""),"/", paste(factor2, collapse = ":", sep = ""),") + (", paste(c(set, rep1, rep2, factor1, factor2, factor3), sep = ""),"/", paste(factor3, collapse = ":", sep = ""),"))", sep = ""), sep = " + ");
modelRHS2 <- paste(set, rep1, rep2, paste(c(factor1, factor2, factor3, factor4), collapse = "*", sep = ""), paste(set, paste("(",paste(c(factor1, factor2, factor3, factor4), collapse = "*", sep = ""),")", sep = ""), sep = ":"), sep = " + ") },
{ modelRHS <- paste(set, rep1, paste(c(factor1, factor2, factor3, factor4), collapse = "*", sep = ""), paste(set, paste("(",paste(c(factor1, factor2, factor3, factor4), collapse = "*", sep = ""),")", sep = ""), sep = ":"), paste("Error((", paste(c(set, rep1, factor1), collapse = ":", sep = ""),")/(", paste(factor1, collapse = "*", sep = ""),") + (", paste(c(set, rep1, factor2), collapse = ":", sep = ""),"/", paste(factor2, collapse = "*", sep = ""),") + (", paste(c(set, rep1, factor1, factor2), collapse = ":", sep = ""),"/",paste(c(factor1, factor2), collapse = ":", sep = ""),") + (", paste(c(set, rep1, factor1, factor2, factor3), collapse = ":", sep = ""),"/", paste(factor3, collapse = ":", sep = ""),"))", sep = ""), sep = " + ");
modelRHS2 <- paste(set, rep1, paste(c(factor1, factor2, factor3, factor4), collapse = "*", sep = ""), paste(set, paste("(",paste(c(factor1, factor2, factor3, factor4), collapse = "*", sep = ""),")", sep = ""), sep = ":"), sep = " + ") }
)
allFactor <- c(factor1, factor2, factor3, factor4)
prev.option <- options()$show.signif.stars
options(show.signif.stars = FALSE, width = 5000)
options(expressions = 500000)
tempAnova <- list()
residNfittedData <- NULL
pwOption <- list()
aovresult <- list()
rvWithSigEffect <- NULL
# define as factors
for (i in (1:length(allFactor))) { data[,allFactor[i]] <- factor(data[,allFactor[i]]) }
if (!is.null(rep1)) { data[,rep1] <- factor(data[,rep1]) }
if (!is.null(rep2)) { data[,rep2] <- factor(data[,rep2]) }
data[,set] <- factor(data[,set])
tempData <- data
tempNewData <- NULL
cat("Combined Analysis of Variance","\n",designTitle[designChoice],"\n\n", sep = "")
for (i in (1:length(respvar))) {
width1 <- 37 + nchar(respvar[i])
cat(paste(rep("=", width1), collapse = ""), "\n")
cat("ANALYSIS FOR RESPONSE VARIABLE:", respvar[i],"\n")
cat(paste(rep("=", width1), collapse = ""), "\n\n")
origData <- tempData
tempNewData <- NULL
formula <- paste(respvar[i], " ~ ", paste(c(set, rep1, rep2, allFactor), collapse = ":", sep = ""), sep = "")
# determine if balanced data
if (designChoice != 1) {
rawData <- tempData[,sort(match(c(respvar[i], allFactor, rep1, rep2, set), names(tempData)))]
if (is.list(replications(formula, rawData))) {
if (max(replications(formula, rawData)[[1]]) != 1) {
cat("ERROR: Cannot perform ANOVA for balanced data. No unique data can be generated for variables '", respvar[i],"'. \n\n", sep = "")
next
}
for (j in (1:nlevels(tempData[,set]))) {
tempNewData <- rbind(tempNewData, GenerateBalanceData(tempData[tempData[, set] == levels(tempData[,set])[j],], respvar[i], allFactor, c(rep1, rep2) , design))
}
if ((1 - (length(na.omit(tempNewData[,respvar[i]]))/nrow(tempNewData))) > 0.10) {
cat("ERROR: Cannot perform ANOVA for balanced data for response variable '", respvar[i], "'. Too many missing values.", sep = "")
next
}
tempEstData <- NULL
for (j in (1:nlevels(tempData[,set]))) {
tempEstData <- rbind(tempEstData, estMissData(design, data = tempData[tempData[, set] == levels(tempData[,set])[j],], respvar[i], factor1, factor2, factor3, factor4, rep1, rep2))
}
tempData <- tempEstData
estimatedData <- TRUE
} else { estimatedData <- FALSE }
} else { estimatedData <- FALSE }
# -- PRINTING CLASS LEVEL INFORMATION -- #
ClassInformation(tempData[, c(set, rep1, rep2, factor1, factor2, factor3, factor4, respvar[i])], respvar = respvar[i])
cat("\n\n")
# --- PRINTING DESCRIPTIVE STATISTICS --- #
if (descriptive) {
DescriptiveStatistics(data = tempData, var = respvar[i], grp = NULL, statistics = c("n", "mean", "sd", "min", "max"))
if (estimatedData) { cat("REMARK: Raw data and estimates of the missing values are used.") }
cat("\n")
}
# --- BUILD THE MODEL --- #
modelLHS <- paste(respvar[i], "~")
mymodel <- paste(modelLHS, modelRHS)
mymodel2 <- paste(modelLHS, modelRHS2)
if (estimatedData) { tempresult <- summary(suppressWarnings(aov(formula(mymodel), data = origData))) }
result <- suppressWarnings(aov(formula(mymodel), tempData))
aovresult[[i]] <- result
if (estimatedData) {
if (attr(summary(result), "class")[[1]] == "summary.aovlist") {
tempAnova[[i]] <- summary(result)
numRow <- nrow(tempAnova[[i]][[length(tempAnova[[i]])]][[1]])
dfError <- tempresult[[length(tempresult)]][[1]][nrow(tempresult[[length(tempresult)]][[1]]),"Df"]
tempAnova[[i]][[length(tempAnova[[i]])]][[1]][numRow,"Df"] <- dfError
tempAnova[[i]][[length(tempAnova[[i]])]][[1]][numRow,"Mean Sq"] <- tempAnova[[i]][[length(tempAnova[[i]])]][[1]][numRow,"Sum Sq"]/dfError
tempAnova[[i]][[length(tempAnova[[i]])]][[1]][1:(numRow - 1),"F value"] <- tempAnova[[i]][[length(tempAnova[[i]])]][[1]][1:(numRow - 1),"Mean Sq"]/tempAnova[[i]][[length(tempAnova[[i]])]][[1]]["Residuals","Mean Sq"]
tempAnova[[i]][[length(tempAnova[[i]])]][[1]][1:(numRow - 1), "Pr(>F)"] <- pf(tempAnova[[i]][[length(tempAnova[[i]])]][[1]][1:(numRow - 1),"F value"], tempAnova[[i]][[length(tempAnova[[i]])]][[1]][1:(numRow - 1),"Df"], dfError, lower.tail = FALSE)
} else {
tempAnova[[i]] <- summary(result)
tempAnova[[i]][[1]]["Df"] <- tempresult[[1]]["Df"]
tempAnova[[i]][[1]]["Mean Sq"] <- tempAnova[[i]][[1]]["Sum Sq"]/tempAnova[[i]][[1]]["Df"]
numEffects <- nrow(tempAnova[[i]][[1]])-1
dfError <- tempAnova[[i]][[1]][nrow(tempAnova[[i]][[1]]),"Df"]
tempAnova[[i]][[1]][1:numEffects, "F value"] <- tempAnova[[i]][[1]][1:numEffects,"Mean Sq"]/tempAnova[[i]][[1]]["Residuals","Mean Sq"]
tempAnova[[i]][[1]][1:numEffects, "Pr(>F)"] <- pf(tempAnova[[i]][[1]][1:numEffects,"F value"], tempAnova[[i]][[1]][1:numEffects,"Df"], dfError, lower.tail = FALSE)
}
} else { tempAnova[[i]] <- summary(result) }
#tempAnova[[i]] <- summary(result)
tempAOVTable <- ConstructAOVTable(tempAnova[[i]])
# rename the anova table
rownames(tempAOVTable) <- gsub("Error", "Pooled Error",trim.strings(rownames(tempAOVTable)))
if (!is.null(rep1)) {
index <- match(paste(c(set, rep1), collapse = ":", sep = ""), trim.strings(rownames(tempAOVTable)))
if (!is.na(index)) {
rownames(tempAOVTable)[index] <- paste(rep1, "within", set)
tempAOVTable <- rbind(tempAOVTable[c(1,index),], tempAOVTable[-I(match(c(1, index), 1:nrow(tempAOVTable))),])
}
rm(index)
}
if (!is.null(rep2)) {
index <- match(paste(c(set, rep2), collapse = ":", sep = ""), trim.strings(rownames(tempAOVTable)))
if (!is.na(index)) {
rownames(tempAOVTable)[index] <- paste(rep2, "within", set)
tempAOVTable <- rbind(tempAOVTable[c(1,index),], tempAOVTable[-I(match(c(1, index), 1:nrow(tempAOVTable))),])
}
rm(index)
}
# -- CREATE THE RESIDUAL DATA AND PREDICTED VALUES -- #
residNfittedData <- NULL
residNfittedData <- data.frame(PredictedValues(result))
if (inherits(result, what = "aovlist")) { residNfittedData <- data.frame(residNfittedData,proj(result)[[length(result)]][,"Residuals"])
} else { residNfittedData <- data.frame(residNfittedData, residuals(result)) }
colnames(residNfittedData) <- c(paste(respvar[i],"pred", sep = "_"), paste(respvar[i],"resid", sep = "_"))
# -- CREATE THE DIAGNOSTIC PLOT -- #
if (!is.null(outputPath)) {
png(filename = paste(outputPath, design,"DiagPlot_", respvar[i], ".png", sep = ""))
params <- par(mfrow = c(1,2), bg = "white")
plot(residNfittedData[,(ncol(residNfittedData)-1)], residNfittedData[,ncol(residNfittedData)], main = paste("Residual vs Fitted:\n", respvar[i], sep = ""), xlab = "Fitted Values", ylab = "Residuals")
qqnorm(residNfittedData[,ncol(residNfittedData)])
qqline(residNfittedData[,ncol(residNfittedData)])
par(params)
dev.off()
}
# -- PERFORM NORMALITY TEST AND/HOMOGENEITY OF VARIANCES -- #
if (homogeneity || normality) {
assumpData <- data.frame(CombineFactorLevels(data = tempData, concatVar = allFactor, targetName = "factor")["factor"], residNfittedData[ncol(residNfittedData)])
# --- PRINTING RESULTS OF TEST FOR HOMOGENEITY OF VARIANCES --- #
if (homogeneity) {
capture.output(bartlett.result <- HeteroskedasticityTest(data = assumpData, var = paste(names(assumpData)[2]), grp = "factor", method = c("bartlett")))
cat("Bartlett's Test for Homogeneity of Variances\n")
printDataFrame(bartlett.result[,3:ncol(bartlett.result)])
cat("\n")
rm(bartlett.result)
}
# --- PRINTING RESULT OF SHAPIRO WILK TEST --- #
if (normality) {
if (nrow(assumpData) >= 3 && nrow(assumpData) <= 5000) {
NormalityTest(data = assumpData, var = paste(names(assumpData)[2]), grp = NULL, method = c("swilk"))
cat("\n")
}
}
rm(assumpData)
}
# --- PRINTING OF ANOVA TABLE --- #
if (is.null(contrastOption)) {
cat("ANOVA TABLE\nResponse Variable: ", respvar[i], "\n", sep = "")
printAOVTable(tempAOVTable)
if (estimatedData) { cat("REMARK: Raw data and estimates of the missing values are used.\n") }
cat("\n")
} else {
ContrastCompute(data = tempData, aovTable = tempAnova[[i]], mymodel, mymodel2,contrast.option = contrastOption)
if (estimatedData) { cat("REMARK: Raw data and estimates of the missing values are used.\n") }
cat("\n")
}
# --- PRINTING OF SUMMARY STATISTICS --- #
summaryStat <- NULL
if (designChoice <= 3) {
#if (designChoice == 1 && is.list(replications(formula, tempData))) {
summaryTable <- suppressWarnings(model.tables(result, "means", se = FALSE))
#} else { summaryTable <- suppressWarnings(model.tables(result, "means", se = TRUE)) }
grandMean <- summaryTable$tables[[1]]
summaryStat <- rbind(summaryStat, data.frame(((sqrt(tempAnova[[i]][[1]][nrow(tempAnova[[i]][[1]]),3])/grandMean) * 100)))
rownames(summaryStat)[nrow(summaryStat)] <- paste("Coef Var", sep = "")
summaryStat <- t(rbind(summaryStat, grandMean))
} else {
grandMean <- mean(tempData[, respvar[i]], na.rm = TRUE)
for (j in (1:length(tempAnova[[i]]))) {
summaryStat <- rbind(summaryStat, data.frame(((sqrt(tempAnova[[i]][[j]][[1]][nrow(tempAnova[[i]][[j]][[1]]),3])/grandMean) * 100)));
rownames(summaryStat)[nrow(summaryStat)] <- paste("Coef Var(",letters[j],")", sep = "")
}
summaryStat <- t(rbind(summaryStat, grandMean))
}
colnames(summaryStat)[ncol(summaryStat)] <- paste(respvar[i], "Mean")
cat("Summary Statistics\n")
printDataFrame(as.data.frame(summaryStat))
if (estimatedData) { cat("REMARK: Raw data and estimates of the missing values are used.\n") }
cat("\n")
# if (!estimatedData) {
# if (designChoice == 1 || designChoice == 2 || designChoice == 3) {
# if (!is.null(summaryTable$se)) {
# stdErrTable <- data.frame(Effects = names(unlist(summaryTable$se)),StdErr = unlist(summaryTable$se))
# rownames(stdErrTable) <- 1:nrow(stdErrTable)
# cat("Standard Errors\n")
# printDataFrame(stdErrTable)
# cat("\n")
# }
# }
# }
# --- DETERMINE THE EFFECTS WHICH ARE SIGNIFICANT --- #
sigEffect <- SignificantEffect(tempAOVTable, alpha = sig)
if (!is.null(sigEffect)) {
sigEffect <- trim.strings(sigEffect)
rvWithSigEffect <- c(rvWithSigEffect, respvar[i])
}
# --- PRINT THE TABLE OF MEANS --- #
if (is.null(sigEffect)) {
cat("Table of Means\n")
if (length(allFactor) == 1) {
tableMeans <- as.data.frame.table(summaryTable$tables[[length(summaryTable$tables)]])
colnames(tableMeans)[ncol(tableMeans)] <- paste(respvar[i]," Means", sep = "")
printDataFrame(tableMeans)
if (estimatedData) { cat("REMARK: Raw data and estimates of the missing values are used.\n") }
} else {
if (designChoice <= 3) {
print(ftable(summaryTable$tables[[length(summaryTable$tables)]]))
if (estimatedData) { cat("REMARK: Raw data and estimates of the missing values are used.\n") }
} else {
factorInOrder <- unlist(lapply(tempData[allFactor], nlevels))[order(unlist(lapply(tempData[allFactor], nlevels)))]
tableMeans <-eval(parse(text = paste("ftable(tapply(tempData[,'",respvar[i],"'], list(tempData[,'", paste(names(factorInOrder), collapse = "'],tempData[,'", sep = ""),"']), mean))", sep = "")))
names(attr(tableMeans, "row.vars")) <- names(factorInOrder[1:(length(allFactor) - 1)])
names(attr(tableMeans, "col.vars")) <- names(factorInOrder[length(allFactor)])
print(tableMeans)
if (estimatedData) { cat("REMARK: Raw data and estimates of the missing values are used.\n") }
}
}
cat("\n\n")
} else {
if (length(allFactor) > 1) {
highestInteraction <- paste(allFactor, collapse = ":", sep = "")
if (is.na(match(highestInteraction, sigEffect))) {
cat("Table of Means\n")
if (designChoice <= 3) { print(ftable(summaryTable$tables[[length(summaryTable$tables)]]))
} else {
factorInOrder <- unlist(lapply(tempData[allFactor], nlevels))[order(unlist(lapply(tempData[allFactor], nlevels)))]
tableMeans <-eval(parse(text = paste("ftable(tapply(tempData[,'",respvar[i],"'], list(tempData[,'", paste(names(factorInOrder), collapse = "'],tempData[,'", sep = ""),"']), mean))", sep = "")))
names(attr(tableMeans, "row.vars")) <- names(factorInOrder[1:(length(allFactor) - 1)])
names(attr(tableMeans, "col.vars")) <- names(factorInOrder[length(allFactor)])
print(tableMeans)
}
cat("\n\n")
}
}
} ## END IF ELSE STMT
# --- PRINT PAIRWISE MEANCOMPARISON RESULT --- #
if (!is.null(sigEffect)) {
if (!is.na(match(respvar[i], pwVar))) {
for (j in (1:length(sigEffect))) {
pairwiseComparison(tempAnova[[i]], design, trim.strings(sigEffect[j]), data = tempData, respvar[i], pwTest, siglevel = sig)
}
} else {
for (j in (1:length(sigEffect))) {
pairwiseComparison(tempAnova[[i]], design, trim.strings(sigEffect[j]), data = tempData, respvar[i], pwTest = NULL, siglevel = sig)
}
}
}
pwOption[[i]] <- list(rv = respvar[i], test = pwTest, sigEffect = sigEffect)
cat("\n")
tempNewData <- cbind(tempData, residNfittedData)
# -- save the dataset
if (estimatedData) {
tempNewData <- merge(tempData[,-I(match(respvar[i],names(tempData)))], tempNewData)
commonCol <- match(names(tempData),names(tempNewData))
tempData <- cbind(tempNewData[,commonCol], tempNewData[,-I(commonCol)])
} else { tempData <- merge(tempData, tempNewData) }
} ### end stmt --- for (i in (1:length(respvar)))
options(show.signif.stars = prev.option)
return(invisible(list(data = tempData, aovObject = aovresult, rvWithSigEffect = rvWithSigEffect, aovTable = tempAnova, pwOption = pwOption, model = modelRHS, model2 = modelRHS2, alpha = sig)))
}
|
|
24acf16e97a35fab417fe4fe3b461c7d8d5b66f2
|
a9dcd5374dc59034f667a666cabaf0e4f5e1f359
|
/functions/read_basic_nongdx_info.R
|
d48b21d4279a33956cb7bf1558d882433bb4a834
|
[] |
no_license
|
MasonBowen/ReEDS-Data-Visualizer
|
83053e9cbe1d65061cde8daea75d81b787656eea
|
091025f6053c06d0a9f7af4dd8b486e7f47e8262
|
refs/heads/master
| 2020-04-10T01:54:12.185680
| 2018-12-21T19:50:18
| 2018-12-21T19:50:18
| 160,728,831
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,384
|
r
|
read_basic_nongdx_info.R
|
# This is to read in all data that the user might feasibly want to manually change
# saving these data in csv's allows non r users to also be able to change them relatively easily
library(data.table)
# Year Data: csv with years in model, single column
years <- fread(file.path(basicdata.directory, "year_set.csv"), sep = ",", header = FALSE, col.names = "Year")
# Timslice data: csv with timeslice, season and time of day information, 3 columns
timeslice <- fread(file.path(basicdata.directory, "hour_szn_timeslice.csv"), sep = ",", header = TRUE)
# Technology Data: csv with technology types (full, simplified, thermal/renewable, vre) and color for simplified, 6 columns
technology <- fread(file.path(basicdata.directory, "gen_tech_set.csv"), sep = ",", header = TRUE)[,1:4]
# Country region data:
state_data <- fread(file.path(basicdata.directory, "country_region_state.csv"), header = TRUE)
# Category Order for plotting
category.order <- fread(file.path(basicdata.directory, "category_order.csv"), sep = ",", header = TRUE)
category.order <- as.data.table(apply(category.order, MARGIN = c(1,2), function(x){if(x == ""){return(NA)}else{return(x)}}))
# Category Color for plotting
category.color <- fread(file.path(basicdata.directory, "gen_tech_set.csv"), header = TRUE)
# ADDITIONAL FILES AVAILABLE CONCERNING THE RESOURCE CURVE DATA, NOT SURE IF WE NEED THEM OR NOT ...
|
9548418d9862a7407eef98e29ce11fe787c6317a
|
5f160e0117368a4864f0784ba163067ae705d5dc
|
/myR/man/tct.Rd
|
ced82be542a54860fd84e3d5e2303373052fd9ca
|
[] |
no_license
|
brunnothadeu/myR_package
|
e1a006a78c30f52970531decf824ff3d53ac3b3c
|
f56fb21c11a541bf6402bccd52760e9464e7c835
|
refs/heads/master
| 2020-04-04T06:38:54.029864
| 2018-11-20T19:19:43
| 2018-11-20T19:19:43
| 155,751,907
| 0
| 0
| null | 2018-11-01T18:02:06
| 2018-11-01T17:32:46
| null |
UTF-8
|
R
| false
| true
| 2,219
|
rd
|
tct.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tct.R
\name{tct}
\alias{tct}
\title{Calcula medidas provindas da Teoria Classica dos Testes (TCT)}
\usage{
tct(dados, dadosCorr, mapa, Alts = c("A", "B", "C", "D", "E", "*", "."),
arred = 10, sdpop = FALSE, summary = FALSE)
}
\arguments{
\item{dados}{Banco de dados a ser utilizado. Este deve ser composto com os seguintes campos: (ID, CAD, PESO, IT1, ..., ITm, Desempenho).}
\item{dadosCorr}{Banco de dados corrigido a ser utilizado. Este deve ser composto com os seguintes campos: (ID, CAD, PESO, IT1, ..., ITm, Desempenho).}
\item{mapa}{DF com informacoes adicionais dos itens. Este deve estar ordenado na mesma ordem que 'dados' e 'dadosCorr', e obrigatoriamente deve conter uma coluna chama 'Gabarito'.}
\item{Alts}{Vetor com as possiveis marcacoes presentes nos dados.}
\item{arred}{Numero de casas decimais a serem consideradas no arredondamento.}
\item{sdpop}{Considerar o desvio populacional para o calculo das estatisticas?}
\item{summary}{Calcula um conjunto de informacoes sobre os dados utilizados nas estatisticas.}
}
\value{
DF com a seguinte composicao: (mapa, Dif, Disc, Abai, Acim, Bis, PercAlts, BiseAlts). Caso summary = TRUE, retorna uma lista com os dois objetos.
}
\description{
Utilizando uma estrutura de BI ou BIB, calcula as estatisticas classicas dos itens.
}
\details{
Etapa Anterior: 'escore'.
Etapa Posterior: 'pos.prop'.
}
\examples{
bib = list(CAD1 = c(1,2), CAD2 = c(2,3), CAD3 = c(3,1))
nblocos = 3
b.len = 2
resp = data.frame(matrix(sample(c("A", "B", "C"), 40, replace = T), ncol = 4), stringsAsFactors = F); names(resp) = paste0("IT", 1:4)
dados = cbind(data.frame(ID = 1:10, CAD = paste0("CAD", sample(3, 10, replace = T)), PESO = 1), resp)
dados = vetor.extendido(dados, bib, nblocos, b.len)
dadosCorr = corrigir(dados, c("A", "B", "A", "C", "A", "B"))
dadosCorr = escore(dadosCorr)
dados$Escore = dadosCorr$Escore
mapa = data.frame(Codigo = 1001:1006, Posicao = 1:6, Gabarito = c("A", "B", "A", "C", "A", "B"))
TCT = tct(dados, dadosCorr, mapa, b.len, nblocos, Alts = c("A", "B", "C"))
}
\author{
Brunno Bittencourt
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.