blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
003fc7f1d0899c3b2970d7f90275834ccaed743e
|
09f53cade40e8236d508b6014d3935623f75edf9
|
/GO_hypergetric distribution/FDR0005_time/bar_graph_GO.R
|
5f3158867653bdcf0a0890a0c2481d7872f25274
|
[] |
no_license
|
YKeito/iterations16
|
1e0c75bc838f21d94b0ee78dfcc495eef676e53a
|
c81624bf54154a2eed658b2837135b80c9c469c7
|
refs/heads/master
| 2022-12-18T05:05:10.574761
| 2020-09-28T05:26:43
| 2020-09-28T05:26:43
| 299,197,561
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 15,724
|
r
|
bar_graph_GO.R
|
#####bar graph####
####CY15####
#subcluster1_CY15_1h
library(ggplot2)
#NumCY15_1h_subcluster
subcluster1_CY15_1h <- EnrichedGO_Summ_CY15_1h_FDR005[EnrichedGO_Summ_CY15_1h_FDR005$CluNum == 1, ]
subcluster1_CY15_1h <- subcluster1_CY15_1h[1:10, ]
g <- ggplot(
subcluster1_CY15_1h,
aes(
x = reorder(GO_term, enrichment_score),
y = enrichment_score
)
)
g <- g + geom_bar(stat = "identity")
g <- g + coord_flip()
g <- g + ylab("-log2(q-value)")
g <- g + xlab("GO term")
g <- g + labs(title = "subcluster1 CY15 1h GO")
g <- g + theme(axis.text=element_text(size=20), axis.title=element_text(size=20,face="bold"))
g <- g + theme_bw(base_size = 20)
print(g)
ggsave(file = "~/Nakano_RNAseq/network_analysis/GO_results_Fig/subcluster1_CY15_1h_GO.png", plot = g)
#subcluster86_CY15_1h
library(ggplot2)
#NumCY15_1h_subcluster
subcluster86_CY15_1h <- EnrichedGO_Summ_CY15_1h_FDR005[EnrichedGO_Summ_CY15_1h_FDR005$CluNum == 86, ]
subcluster86_CY15_1h <- subcluster86_CY15_1h[1:10, ]
g <- ggplot(
subcluster86_CY15_1h,
aes(
x = reorder(GO_term, enrichment_score),
y = enrichment_score
)
)
g <- g + geom_bar(stat = "identity")
g <- g + coord_flip()
g <- g + ylab("-log2(q-value)")
g <- g + xlab("GO term")
g <- g + labs(title = "subcluster86 CY15 1h GO")
g <- g + theme(axis.text=element_text(size=20), axis.title=element_text(size=20,face="bold"))
g <- g + theme_bw(base_size = 20)
print(g)
ggsave(file = "~/Nakano_RNAseq/network_analysis/GO_results_Fig/subcluster86_CY15_1h_GO.png", plot = g)
#subcluster1_CY15_3h
#NumCY15_3h_subcluster
library(ggplot2)
subcluster1_CY15_3h <- EnrichedGO_Summ_CY15_3h_FDR005[EnrichedGO_Summ_CY15_3h_FDR005$CluNum == 1, ]
subcluster1_CY15_3h <- subcluster1_CY15_3h[1:10, ]
g <- ggplot(
subcluster1_CY15_3h,
aes(
x = reorder(GO_term, enrichment_score),
y = enrichment_score
)
)
g <- g + geom_bar(stat = "identity")
g <- g + coord_flip()
g <- g + ylab("-log2(q-value)")
g <- g + xlab("GO term")
g <- g + labs(title = "subcluster1 CY15 3h GO")
g <- g + theme(axis.text=element_text(size=20), axis.title=element_text(size=20,face="bold"))
g <- g + theme_bw(base_size = 20)
print(g)
ggsave(file = "~/Nakano_RNAseq/network_analysis/GO_results_Fig/subcluster1_CY15_3h_GO.png", plot = g)
#subcluster62_CY15_3h
#NumCY15_3h_subcluster
library(ggplot2)
subcluster62_CY15_3h <- EnrichedGO_Summ_CY15_3h_FDR005[EnrichedGO_Summ_CY15_3h_FDR005$CluNum == 62, ]
subcluster62_CY15_3h <- subcluster62_CY15_3h[1:10, ]
g <- ggplot(
subcluster62_CY15_3h,
aes(
x = reorder(GO_term, enrichment_score),
y = enrichment_score
)
)
g <- g + geom_bar(stat = "identity")
g <- g + coord_flip()
g <- g + ylab("-log2(q-value)")
g <- g + xlab("GO term")
g <- g + labs(title = "subcluster62 CY15 3h GO")
g <- g + theme(axis.text=element_text(size=20), axis.title=element_text(size=20,face="bold"))
g <- g + theme_bw(base_size = 20)
print(g)
ggsave(file = "~/Nakano_RNAseq/network_analysis/GO_results_Fig/subcluster62_CY15_3h_GO.png", plot = g)
#subcluster1_CY15_12h
#NumCY15_12h_subcluster
library(ggplot2)
subcluster1_CY15_12h <- EnrichedGO_Summ_CY15_12h_FDR005[EnrichedGO_Summ_CY15_12h_FDR005$CluNum == 1, ]
subcluster1_CY15_12h <- subcluster1_CY15_12h[1:10, ]
g <- ggplot(
subcluster1_CY15_12h,
aes(
x = reorder(GO_term, enrichment_score),
y = enrichment_score
)
)
g <- g + geom_bar(stat = "identity")
g <- g + coord_flip()
g <- g + ylab("-log2(q-value)")
g <- g + xlab("GO term")
g <- g + labs(title = "subcluster1 CY15 12h GO")
g <- g + theme(axis.text=element_text(size=20), axis.title=element_text(size=20,face="bold"))
g <- g + theme_bw(base_size = 20)
print(g)
ggsave(file = "~/Nakano_RNAseq/network_analysis/GO_results_Fig/subcluster1_CY15_12h_GO.png", plot = g)
#subcluster12_CY15_12h
#NumCY15_12h_subcluster
library(ggplot2)
subcluster12_CY15_12h <- EnrichedGO_Summ_CY15_12h_FDR005[EnrichedGO_Summ_CY15_12h_FDR005$CluNum == 12, ]
subcluster12_CY15_12h <- subcluster12_CY15_12h[1:10, ]
g <- ggplot(
subcluster12_CY15_12h,
aes(
x = reorder(GO_term, enrichment_score),
y = enrichment_score
)
)
g <- g + geom_bar(stat = "identity")
g <- g + coord_flip()
g <- g + ylab("-log2(q-value)")
g <- g + xlab("GO term")
g <- g + labs(title = "subcluster12 CY15 12h GO")
g <- g + theme(axis.text=element_text(size=20), axis.title=element_text(size=20,face="bold"))
g <- g + theme_bw(base_size = 20)
print(g)
ggsave(file = "~/Nakano_RNAseq/network_analysis/GO_results_Fig/subcluster12_CY15_12h_GO.png", plot = g)
#subcluster1_CY15_24h
#NumCY15_24h_subcluster
library(ggplot2)
subcluster1_CY15_24h <- EnrichedGO_Summ_CY15_24h_FDR005[EnrichedGO_Summ_CY15_24h_FDR005$CluNum == 1, ]
subcluster1_CY15_24h <- subcluster1_CY15_24h[1:10, ]
g <- ggplot(
subcluster1_CY15_24h,
aes(
x = reorder(GO_term, enrichment_score),
y = enrichment_score
)
)
g <- g + geom_bar(stat = "identity")
g <- g + coord_flip()
g <- g + ylab("-log2(q-value)")
g <- g + xlab("GO term")
g <- g + labs(title = "subcluster1 CY15 24h GO")
g <- g + theme(axis.text=element_text(size=20), axis.title=element_text(size=20,face="bold"))
g <- g + theme_bw(base_size = 20)
print(g)
ggsave(file = "~/Nakano_RNAseq/network_analysis/GO_results_Fig/subcluster1_CY15_24h_GO.png", plot = g)
####CY16####
#subcluster2_CY16_1h
library(ggplot2)
#NumCY16_1h_subcluster
subcluster2_CY16_1h <- EnrichedGO_Summ_CY16_1h_FDR005[EnrichedGO_Summ_CY16_1h_FDR005$CluNum == 2, ]
subcluster2_CY16_1h <- subcluster2_CY16_1h[1:10, ]
g <- ggplot(
subcluster2_CY16_1h,
aes(
x = reorder(GO_term, enrichment_score),
y = enrichment_score
)
)
g <- g + geom_bar(stat = "identity")
g <- g + coord_flip()
g <- g + ylab("-log2(q-value)")
g <- g + xlab("GO term")
g <- g + labs(title = "subcluster2 CY16 1h GO")
g <- g + theme(axis.text=element_text(size=20), axis.title=element_text(size=20,face="bold"))
g <- g + theme_bw(base_size = 20)
print(g)
ggsave(file = "~/Nakano_RNAseq/network_analysis/GO_results_Fig/subcluster2_CY16_1h_GO.png", plot = g)
#subcluster1_CY16_3h
#NumCY16_3h_subcluster
library(ggplot2)
subcluster1_CY16_3h <- EnrichedGO_Summ_CY16_3h_FDR005[EnrichedGO_Summ_CY16_3h_FDR005$CluNum == 1, ]
subcluster1_CY16_3h <- subcluster1_CY16_3h[1:10, ]
g <- ggplot(
subcluster1_CY16_3h,
aes(
x = reorder(GO_term, enrichment_score),
y = enrichment_score
)
)
g <- g + geom_bar(stat = "identity")
g <- g + coord_flip()
g <- g + ylab("-log2(q-value)")
g <- g + xlab("GO term")
g <- g + labs(title = "subcluster1 CY16 3h GO")
g <- g + theme(axis.text=element_text(size=20), axis.title=element_text(size=20,face="bold"))
g <- g + theme_bw(base_size = 20)
print(g)
ggsave(file = "~/Nakano_RNAseq/network_analysis/GO_results_Fig/subcluster1_CY16_3h_GO.png", plot = g)
#subcluster1_CY16_12h
#NumCY16_12h_subcluster
library(ggplot2)
subcluster1_CY16_12h <- EnrichedGO_Summ_CY16_12h_FDR005[EnrichedGO_Summ_CY16_12h_FDR005$CluNum == 1, ]
subcluster1_CY16_12h <- subcluster1_CY16_12h[1:10, ]
g <- ggplot(
subcluster1_CY16_12h,
aes(
x = reorder(GO_term, enrichment_score),
y = enrichment_score
)
)
g <- g + geom_bar(stat = "identity")
g <- g + coord_flip()
g <- g + ylab("-log2(q-value)")
g <- g + xlab("GO term")
g <- g + labs(title = "subcluster1 CY16 12h GO")
g <- g + theme(axis.text=element_text(size=20), axis.title=element_text(size=20,face="bold"))
g <- g + theme_bw(base_size = 20)
print(g)
ggsave(file = "~/Nakano_RNAseq/network_analysis/GO_results_Fig/subcluster1_CY16_12h_GO.png", plot = g)
#subcluster1_CY16_24h
#NumCY16_24h_subcluster
library(ggplot2)
subcluster1_CY16_24h <- EnrichedGO_Summ_CY16_24h_FDR005[EnrichedGO_Summ_CY16_24h_FDR005$CluNum == 1, ]
subcluster1_CY16_24h <- subcluster1_CY16_24h[1:10, ]
g <- ggplot(
subcluster1_CY16_24h,
aes(
x = reorder(GO_term, enrichment_score),
y = enrichment_score
)
)
g <- g + geom_bar(stat = "identity")
g <- g + coord_flip()
g <- g + ylab("-log2(q-value)")
g <- g + xlab("GO term")
g <- g + labs(title = "subcluster1 CY16 24h GO")
g <- g + theme(axis.text=element_text(size=20), axis.title=element_text(size=20,face="bold"))
g <- g + theme_bw(base_size = 20)
print(g)
ggsave(file = "~/Nakano_RNAseq/network_analysis/GO_results_Fig/subcluster1_CY16_24h_GO.png", plot = g)
#subcluster14_CY16_24h
#NumCY16_24h_subcluster
library(ggplot2)
subcluster14_CY16_24h <- EnrichedGO_Summ_CY16_24h_FDR005[EnrichedGO_Summ_CY16_24h_FDR005$CluNum == 14, ]
subcluster14_CY16_24h <- subcluster14_CY16_24h[1:10, ]
g <- ggplot(
subcluster14_CY16_24h,
aes(
x = reorder(GO_term, enrichment_score),
y = enrichment_score
)
)
g <- g + geom_bar(stat = "identity")
g <- g + coord_flip()
g <- g + ylab("-log2(q-value)")
g <- g + xlab("GO term")
g <- g + labs(title = "subcluster14 CY16 24h GO")
g <- g + theme(axis.text=element_text(size=20), axis.title=element_text(size=20,face="bold"))
g <- g + theme_bw(base_size = 20)
print(g)
ggsave(file = "~/Nakano_RNAseq/network_analysis/GO_results_Fig/subcluster14_CY16_24h_GO.png", plot = g)
####CY20####
#subcluster1_CY20_24h
#NumCY20_24h_subcluster
library(ggplot2)
subcluster1_CY20_24h <- EnrichedGO_Summ_CY20_24h_FDR005[EnrichedGO_Summ_CY20_24h_FDR005$CluNum == 1, ]
subcluster1_CY20_24h <- subcluster1_CY20_24h[1:10, ]
g <- ggplot(
subcluster1_CY20_24h,
aes(
x = reorder(GO_term, enrichment_score),
y = enrichment_score
)
)
g <- g + geom_bar(stat = "identity")
g <- g + coord_flip()
g <- g + ylab("-log2(q-value)")
g <- g + xlab("GO term")
g <- g + labs(title = "subcluster1 CY20 24h GO")
g <- g + theme(axis.text=element_text(size=20), axis.title=element_text(size=20,face="bold"))
g <- g + theme_bw(base_size = 20)
print(g)
ggsave(file = "~/Nakano_RNAseq/network_analysis/GO_results_Fig/subcluster1_CY20_24h_GO.png", plot = g)
####CY20_48h####
#subcluster3_CY20_48h
#NumCY20_48h_subcluster
library(ggplot2)
subcluster3_CY20_48h <- EnrichedGO_Summ_CY20_48h_FDR005[EnrichedGO_Summ_CY20_48h_FDR005$CluNum == 3, ]
subcluster3_CY20_48h <- subcluster3_CY20_48h[1:10, ]
g <- ggplot(
subcluster3_CY20_48h,
aes(
x = reorder(GO_term, enrichment_score),
y = enrichment_score
)
)
g <- g + geom_bar(stat = "identity")
g <- g + coord_flip()
g <- g + ylab("-log2(q-value)")
g <- g + xlab("GO term")
g <- g + labs(title = "subcluster3 CY20 48h GO")
g <- g + theme(axis.text=element_text(size=20), axis.title=element_text(size=20,face="bold"))
g <- g + theme_bw(base_size = 20)
print(g)
ggsave(file = "~/Nakano_RNAseq/network_analysis/GO_results_Fig/subcluster3_CY20_48h_GO.png", plot = g)
#subcluster7_CY20_48h
#NumCY20_48h_subcluster
library(ggplot2)
subcluster7_CY20_48h <- EnrichedGO_Summ_CY20_48h_FDR005[EnrichedGO_Summ_CY20_48h_FDR005$CluNum == 3, ]
subcluster7_CY20_48h <- subcluster7_CY20_48h[1:10, ]
g <- ggplot(
subcluster7_CY20_48h,
aes(
x = reorder(GO_term, enrichment_score),
y = enrichment_score
)
)
g <- g + geom_bar(stat = "identity")
g <- g + coord_flip()
g <- g + ylab("-log2(q-value)")
g <- g + xlab("GO term")
g <- g + labs(title = "subcluster7 CY20 48h GO")
g <- g + theme(axis.text=element_text(size=20), axis.title=element_text(size=20,face="bold"))
g <- g + theme_bw(base_size = 20)
print(g)
ggsave(file = "~/Nakano_RNAseq/network_analysis/GO_results_Fig/subcluster7_CY20_48h_GO.png", plot = g)
#subcluster17_CY20_48h
#NumCY20_48h_subcluster
library(ggplot2)
subcluster17_CY20_48h <- EnrichedGO_Summ_CY20_48h_FDR005[EnrichedGO_Summ_CY20_48h_FDR005$CluNum == 17, ]
subcluster17_CY20_48h <- subcluster17_CY20_48h[1:10, ]
g <- ggplot(
subcluster17_CY20_48h,
aes(
x = reorder(GO_term, enrichment_score),
y = enrichment_score
)
)
g <- g + geom_bar(stat = "identity")
g <- g + coord_flip()
g <- g + ylab("-log2(q-value)")
g <- g + xlab("GO term")
g <- g + labs(title = "subcluster17 CY20 48h GO")
g <- g + theme(axis.text=element_text(size=20), axis.title=element_text(size=20,face="bold"))
g <- g + theme_bw(base_size = 20)
print(g)
ggsave(file = "~/Nakano_RNAseq/network_analysis/GO_results_Fig/subcluster17_CY20_48h_GO.png", plot = g)
#subcluster35_CY20_48h
#NumCY20_48h_subcluster
library(ggplot2)
subcluster35_CY20_48h <- EnrichedGO_Summ_CY20_48h_FDR005[EnrichedGO_Summ_CY20_48h_FDR005$CluNum == 35, ]
subcluster35_CY20_48h <- subcluster35_CY20_48h[1:10, ]
g <- ggplot(
subcluster35_CY20_48h,
aes(
x = reorder(GO_term, enrichment_score),
y = enrichment_score
)
)
g <- g + geom_bar(stat = "identity")
g <- g + coord_flip()
g <- g + ylab("-log2(q-value)")
g <- g + xlab("GO term")
g <- g + labs(title = "subcluster35 CY20 48h GO")
g <- g + theme(axis.text=element_text(size=20), axis.title=element_text(size=20,face="bold"))
g <- g + theme_bw(base_size = 20)
print(g)
ggsave(file = "~/Nakano_RNAseq/network_analysis/GO_results_Fig/subcluster35_CY20_48h_GO.png", plot = g)
#subcluster42_CY20_48h
#NumCY20_48h_subcluster
library(ggplot2)
subcluster42_CY20_48h <- EnrichedGO_Summ_CY20_48h_FDR005[EnrichedGO_Summ_CY20_48h_FDR005$CluNum == 35, ]
subcluster42_CY20_48h <- subcluster42_CY20_48h[1:10, ]
g <- ggplot(
subcluster42_CY20_48h,
aes(
x = reorder(GO_term, enrichment_score),
y = enrichment_score
)
)
g <- g + geom_bar(stat = "identity")
g <- g + coord_flip()
g <- g + ylab("-log2(q-value)")
g <- g + xlab("GO term")
g <- g + labs(title = "subcluster42 CY20 48h GO")
g <- g + theme(axis.text=element_text(size=20), axis.title=element_text(size=20,face="bold"))
g <- g + theme_bw(base_size = 20)
print(g)
ggsave(file = "~/Nakano_RNAseq/network_analysis/GO_results_Fig/subcluster42_CY20_48h_GO.png", plot = g)
#subcluster51_CY20_48h
#NumCY20_48h_subcluster
library(ggplot2)
subcluster51_CY20_48h <- EnrichedGO_Summ_CY20_48h_FDR005[EnrichedGO_Summ_CY20_48h_FDR005$CluNum == 51, ]
subcluster51_CY20_48h <- subcluster51_CY20_48h[1:10, ]
g <- ggplot(
subcluster51_CY20_48h,
aes(
x = reorder(GO_term, enrichment_score),
y = enrichment_score
)
)
g <- g + geom_bar(stat = "identity")
g <- g + coord_flip()
g <- g + ylab("-log2(q-value)")
g <- g + xlab("GO term")
g <- g + labs(title = "subcluster51 CY20 48h GO")
g <- g + theme(axis.text=element_text(size=20), axis.title=element_text(size=20,face="bold"))
g <- g + theme_bw(base_size = 20)
print(g)
ggsave(file = "~/Nakano_RNAseq/network_analysis/GO_results_Fig/subcluster51_CY20_48h_GO.png", plot = g)
#subcluster76_CY20_48h
#NumCY20_48h_subcluster
library(ggplot2)
subcluster76_CY20_48h <- EnrichedGO_Summ_CY20_48h_FDR005[EnrichedGO_Summ_CY20_48h_FDR005$CluNum == 76, ]
subcluster76_CY20_48h <- subcluster76_CY20_48h[1:10, ]
g <- ggplot(
subcluster76_CY20_48h,
aes(
x = reorder(GO_term, enrichment_score),
y = enrichment_score
)
)
g <- g + geom_bar(stat = "identity")
g <- g + coord_flip()
g <- g + ylab("-log2(q-value)")
g <- g + xlab("GO term")
g <- g + labs(title = "subcluster76 CY20 48h GO")
g <- g + theme(axis.text=element_text(size=20), axis.title=element_text(size=20,face="bold"))
g <- g + theme_bw(base_size = 20)
print(g)
ggsave(file = "~/Nakano_RNAseq/network_analysis/GO_results_Fig/subcluster76_CY20_48h_GO.png", plot = g)
#subcluster139_CY20_48h
#NumCY20_48h_subcluster
library(ggplot2)
subcluster139_CY20_48h <- EnrichedGO_Summ_CY20_48h_FDR005[EnrichedGO_Summ_CY20_48h_FDR005$CluNum == 139, ]
subcluster139_CY20_48h <- subcluster139_CY20_48h[1:10, ]
g <- ggplot(
subcluster139_CY20_48h,
aes(
x = reorder(GO_term, enrichment_score),
y = enrichment_score
)
)
g <- g + geom_bar(stat = "identity")
g <- g + coord_flip()
g <- g + ylab("-log2(q-value)")
g <- g + xlab("GO term")
g <- g + labs(title = "subcluster139 CY20 48h GO")
g <- g + theme(axis.text=element_text(size=20), axis.title=element_text(size=20,face="bold"))
g <- g + theme_bw(base_size = 20)
print(g)
ggsave(file = "~/Nakano_RNAseq/network_analysis/GO_results_Fig/subcluster139_CY20_48h_GO.png", plot = g)
|
bd6b37fcd2bb45fcea8c07fef79b80d176d7c444
|
71af941bf29e9a831b95abb2e95a1440481bf0eb
|
/petstore.R
|
29d9477ea099de953dfb2cb5cbeb31044f41e82d
|
[] |
no_license
|
SaffraP/senior_project
|
f05fd9d3573ed154c7000cf5560294cac52e359d
|
04cdcf52f755291472d95a631d84046764fe933f
|
refs/heads/master
| 2022-11-18T13:32:11.707400
| 2020-07-17T04:40:11
| 2020-07-17T04:40:11
| 263,985,591
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 529
|
r
|
petstore.R
|
library(tidyverse)
petstore <- read_csv("Data/petstore_data.csv")
sapply(petstore, mode)
sapply(petstore, class)
petstore[, c(1:3)] <- sapply(petstore[, c(1:3)], as.character)
petstore[, c(2:3)] <- sapply(petstore[, c(2:3)], as.numeric)
table(apply(petstore, MARGIN = 1, function(x) sum(is.na(x))))
# 0 = # of rows that are not missing any values
# 1 = # of rows that are missing a value in a single column
# 2 = # of rows that are missing values in two columns.
# etc
# Add them all up to get the total number of rows.
|
7f1db281c1f5ba2453259aeb929e33dfe6d0f8e7
|
9765e867e097a4460608299af8f66883b0447e2c
|
/AllocateR/alloc_discrete_paper_graphs/fs_rev.R
|
072d65828d36a47e6e9e2d7f313b4933d8ef95ef
|
[
"MIT"
] |
permissive
|
DynamicProgramming-StructuralEstimation/PrjOptiSNW
|
eba9f1956297401df138487e98699761536ce662
|
c65a989b36b4ceba6c99d7a54dad98724d304a9e
|
refs/heads/master
| 2023-03-29T05:36:16.169607
| 2021-03-30T19:45:39
| 2021-03-30T19:45:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,785
|
r
|
fs_rev.R
|
# Graphs where allocation lines from multiple planners are jointly compared.
# "Paper\Appendix_graphs_and_tables\Robustness_Tax_model_Actual_vs_feasible_c_allocation_round1_18-64_year-olds_by_income.png"
try(dev.off(dev.list()["RStudioGD"]),silent=TRUE)
try(dev.off(),silent=TRUE)
# Libraries
library(tidyverse)
library(REconTools)
library(scales)
# Parameters and Options --------
# Possible rho values to select from
ar_seq_equi <- seq(-2, 2, length.out=30)
# initial points
ar_rho_init <- 1 - (10^(c(seq(-2, 2, length.out=30))))
# ar_rho_init <- ar_rho_init[seq(1, 30, by=2)]
ar_rho_init <- c(1, round(ar_rho_init, 4), -100)
# extra points
ar_rho_extra <- 1 - (10^(c(seq(ar_seq_equi[23], ar_seq_equi[27], length.out=15))))
# ar_rho_extra <- round(ar_rho_extra, 4)
# combine together
ar_rho <- c(ar_rho_init)
ar_rho <- sort(ar_rho)
# log10((-1)*(ar_rho - 1))
# 1/(1-ar_rho)
# Results from which planners should
ls_it_rho_combo_type <- c(1)
# Types of allocation files to consider
ls_st_file_suffix_trumpchk <-
c('snwx_trumpchk_moredense_a65zh266zs5_b1_xi0_manna_168_bt95',
'snwx_trumpchk_moredense_a65zh266zs5_b1_xi0_manna_168_bt60',
'snwx_trumpchk_moredense_a65zh266zs5_b1_xi0_manna_168_married',
'snwx_trumpchk_moredense_a65zh266zs5_b1_xi0_manna_168_unmarried',
'snwx_trumpchk_moredense_a65zh266zs5_b1_xi0_manna_168')
ls_st_file_suffix_trumpchk <- rev(ls_st_file_suffix_trumpchk)
ls_st_file_suffix_bidenchk <-
c('snwx_bidenchk_moredense_a65zh266zs5_b1_xi0_manna_168_bt95',
'snwx_bidenchk_moredense_a65zh266zs5_b1_xi0_manna_168_bt60',
'snwx_bidenchk_moredense_a65zh266zs5_b1_xi0_manna_168_married',
'snwx_bidenchk_moredense_a65zh266zs5_b1_xi0_manna_168_unmarried',
'snwx_bidenchk_moredense_a65zh266zs5_b1_xi0_manna_168')
ls_st_file_suffix_bidenchk <- rev(ls_st_file_suffix_bidenchk)
ls_st_file_suffix_bchklock <-
c('snwx_bchklock_moredense_a65zh266zs5_b1_xi0_manna_168_bt95',
'snwx_bchklock_moredense_a65zh266zs5_b1_xi0_manna_168_bt60',
'snwx_bchklock_moredense_a65zh266zs5_b1_xi0_manna_168_married',
'snwx_bchklock_moredense_a65zh266zs5_b1_xi0_manna_168_unmarried',
'snwx_bchklock_moredense_a65zh266zs5_b1_xi0_manna_168')
ls_st_file_suffix_bchklock <- rev(ls_st_file_suffix_bchklock)
# list to run
ls_st_file_suffix <- c(ls_st_file_suffix_trumpchk,
ls_st_file_suffix_bidenchk,
ls_st_file_suffix_bchklock)
ls_st_file_suffix <- c('snwx_bidenchk_tiny_b1_xi0_manna_168')
ls_st_file_suffix <- c('snwx_bidenchk_moredense_a65zh266zs5_b1_xi0_manna_168')
# Per capita or per household results
ls_bl_per_capita <- c(TRUE)
# Allocation bounds types
ls_st_bound_files <- c('14ca14ck', '17ca17ck', '20ca20ck')
# Loop over file types1
for (bl_per_capita in ls_bl_per_capita) {
for (st_which_solu in ls_st_file_suffix) {
# Generate files by aggregating over rho types
# Results counter
it_ls_results_ctr <- 0
ls_of_ls_rev_results <- vector(mode = "list", length = length(ls_st_bound_files))
for (st_bound_files in ls_st_bound_files) {
it_ls_results_ctr <- it_ls_results_ctr + 1
# Generate files by aggregating over rho types
# Results counter
it_results_ctr <- 0
ls_rev_results <- vector(mode = "list", length = length(ar_rho))
# Loop over results
for (fl_rho in ar_rho) {
# Counter updating
it_results_ctr <- it_results_ctr + 1
# Load support file
ls_output <- fs_opti_support_202103(st_which_solu, bl_per_capita=bl_per_capita, fl_rho=fl_rho)
# Load folder names etc
srt_results_root <- ls_output$srt_results_root
bl_save_img <- ls_output$bl_save_img
srt_imgcsv_rev_root <- ls_output$srt_imgcsv_rev_root
srt_folder <- ls_output$srt_folder
bl_save_img <- TRUE
srt_paper_appendix_textgraph <- ls_output$srt_paper_appendix_textgraph
# folder that is not rho specific for storing aggregate folder
srt_img_aggregator_save_root <- ls_output$srt_img_aggregator_save_root
# Get Last folder which is per-capita/hhousehold and rho specific
st_hhpercapita_rho <- tail(strsplit(srt_results_root, "/")[[1]],n=1)
# Get folder name path
srt_res_source_folder <- file.path(srt_folder, 'csv')
srt_csv_allocate_subfolder <- paste0('b1_a64_', st_bound_files, '_18t64')
srt_csv_file <- paste0('rev_feasible_', st_bound_files, '.csv')
spn_csv_full_path <- file.path(srt_results_root, srt_res_source_folder,
srt_csv_allocate_subfolder, srt_csv_file)
# Clean up each file
df_alloc_cur <- as_tibble(read.csv(spn_csv_full_path))
ls_rev_results[[it_results_ctr]] <- df_alloc_cur
}
# bind files together
mt_rev_across_rhos <- do.call(rbind, ls_rev_results)
ls_of_ls_rev_results[[it_ls_results_ctr]] <- mt_rev_across_rhos %>%
mutate(bounds = srt_csv_allocate_subfolder)
# CSV Save
snm_save_csv <- paste0('rev_feasible_', st_bound_files, '_rhomultiple.csv')
write.csv(mt_rev_across_rhos,
paste0(srt_imgcsv_rev_root, snm_save_csv),
row.names = TRUE)
# Graph
mt_rev_across_rhos %>% filter(objective == "c2020")
}
# cAll results from different bounds to the same file
mt_rev_across_rhos_bounds <- do.call(rbind, ls_of_ls_rev_results)
# collect only relevant columns
mt_rev_across_rhos_bounds_selected <- mt_rev_across_rhos_bounds %>%
filter(objective == "c2020") %>%
mutate(REV = REV*100) %>%
mutate(one_minus_rho = 1 - rho_val) %>%
select(one_minus_rho, REV, bounds) %>%
rename(Outcome = bounds, rev = REV)
# graph, copied from https://fanwangecon.github.io/PrjOptiAlloc/articles/ffv_opt_sobin_rkone_allrw_training.html
# x-labels
x.labels <- c('\u03bb \U2248 1.00', '\u03bb = 0.90', '\u03bb = 0', '\u03bb = -10', '\u03bb = -100')
x.breaks <- c(0.01, 0.10, 1, 10, 100)
# title line 2
# title_line1 <- sprintf("Percentage of Training Spots Misallocated, NSW Lalonde (AER, 1986)")
# title_line2 <- sprintf("REV (Resource Equivalent Variation) Along Planner Spectrum")
st_title <- sprintf(paste0('How much Fewer Resources are Needed (Shares) to Achieve the Same Welfare'))
title_line1 <- sprintf("Compare alternative allocations to optimal allocations given observables and estimates")
title_line2 <- sprintf("Solid Red Line: train 297 random NSW treatment individuals vs. optimally allocating 297 spots")
title_line3 <- sprintf("Dashed Blue Line: train 297 lowest baseline wage individuals vs. optimally allocating 297 spots")
# Relabel Variable
Outcome_levels <- c("$1,400" = "b1_a64_14ca14ck_18t64",
"$2,000" = "b1_a64_20ca20ck_18t64")
mt_rev_across_rhos_bounds_selected_fig <- mt_rev_across_rhos_bounds_selected %>%
filter(Outcome == 'b1_a64_14ca14ck_18t64'
| Outcome == 'b1_a64_20ca20ck_18t64') %>%
mutate(Outcome = as_factor(Outcome)) %>%
mutate(Outcome = fct_recode(Outcome, !!!Outcome_levels))
# Graph Results--Draw
line.plot <- mt_rev_across_rhos_bounds_selected_fig %>%
ggplot(aes(x=one_minus_rho, y=rev,
group=Outcome,
colour=Outcome,
linetype=Outcome,
shape=Outcome)) +
# geom_point() +
# geom_line() +
geom_smooth(span = 0.35, se=FALSE, size=2) +
# geom_vline(xintercept=c(1), linetype="dotted") +
labs(x = 'Planner inequality aversion, \u03bb',
y = paste0('Resource equivalent variation (percent)')) +
scale_x_continuous(trans='log10', labels = x.labels, breaks = x.breaks) +
theme_bw(base_size=8) +
ylim(0, 100)
# legend area
line.plot <- line.plot +
theme(
text = element_text(size = 16),
legend.position = c(0.18, 0.85),
legend.background = element_rect(fill = "white", colour = "black",
linetype='solid'))
line.plot <- line.plot +
labs(colour = "Check limit per adult and child",
shape = "Check limit per adult and child",
linetype = "Check limit per adult and child")
# # Labeling
# line.plot$labels$linetype <- "REV\nOptimal\nvs.\nAlternatives"
# line.plot$labels$colour <- "REV\nOptimal\nvs.\nAlternatives"
# line.plot$labels$shape <- "REV\nOptimal\nvs.\nAlternatives"
# Print
print(line.plot)
# add png
snm_save_png <- paste0('rev_feasible_rhomultiple.png')
# Save
ggsave(line.plot,
file=file.path(srt_imgcsv_rev_root, snm_save_png),
width = 270,
height = 216, units='mm',
dpi = 300)
}
}
|
e31b415554dc5f7569ab6937f31c4659d600bf50
|
acd69edac208c44604fa81b27c8687c288efdee2
|
/R/model_selection.R
|
87cc972d20be6356335b4c110e23347f868b7938
|
[] |
no_license
|
kcf-jackson/glmGraph
|
483a2c92001d4f2fc41a8560b3f0c11cc40186a3
|
81c2a2ea59194d5cc828f4855d1660398a6a0a8a
|
refs/heads/master
| 2021-01-19T08:47:17.748331
| 2017-05-03T15:00:21
| 2017-05-03T15:00:21
| 87,677,603
| 1
| 1
| null | 2017-05-03T15:00:22
| 2017-04-09T02:32:52
|
R
|
UTF-8
|
R
| false
| false
| 6,707
|
r
|
model_selection.R
|
#' Model selection for GLM graph
#' @param data0 dataframe; the data.
#' @param p Between 0 and 1; prior brief of how connected the graph is.
#' @param lambda Tuning parameter for gibbs sampler.
#' @param num_iter number of iterations for gibbs sampler.
#' @param graph_init characters string; one of "random", "correlation" and "mutual".
#'"random" generates a graph randomly. "correlation" computes the pairwise correlation
#'between variables and keeps the ones above the third quartile. "mutual" is similar to
#'"correlation" except it uses pairwise mutual information instead.
#' @export
learn_graph <- function(data0, p = 0.2, lambda, num_iter = 100,
graph_init = "random") {
if (!all(sapply(head(data0), is.numeric))) {
stop("data has to be all numerics at the moment.")
}
if (missing(lambda)) {
lambda <- 1 / sqrt(nrow(data0))
}
num_var <- ncol(data0)
rgraph <- create_random_graph(num_var, p = p)
nr <- nrow(rgraph)
nc <- ncol(rgraph)
family <- apply(data0, 2, analyze_variable)
if ("unknown" %in% family) {
stop(check_family(family))
}
current_model <- fit_graph(rgraph, family, data0)
current_likelihood <- get_model_likelihood(current_model) - sum(rgraph)
current_factorisation <- essential_spec(rgraph, family)
print(current_likelihood)
#---------------Variables for model selection--------------------
best_measure_graph <- list(rgraph = rgraph, score = current_likelihood)
frequency_graph <- list(
rgraph = matrix(0, nrow = nr, ncol = nc), score = NA
)
#----------------------------------------------------------------
# Gibbs model selection
pb <- txtProgressBar(1, num_iter, style = 3)
for (iter in 1:num_iter) {
for (i in 1:(nr - 1)) {
for (j in (i+1):nc) {
rgraph[i,j] %<>% flip_bit()
rgraph[j,i] <- rgraph[i,j]
new_likelihood <- current_likelihood +
add_new_likelihood(current_factorisation[i,], j, rgraph[i,j], data0,
IC_factor = 2)
#-------------------Update best graph----------------------
has_improved <- (new_likelihood > best_measure_graph$score)
if (has_improved) {
cat("Improved! Loglikelihood:", new_likelihood, "\n")
plot_graph(rgraph)
best_measure_graph$rgraph <- rgraph
best_measure_graph$score <- new_likelihood
}
#----------------------------------------------------------
jump <- gibbs_update(lambda, c(current_likelihood, new_likelihood))
if (jump == 1) {
rgraph[i,j] %<>% flip_bit()
rgraph[j,i] <- rgraph[i,j]
} else {
current_likelihood <- new_likelihood
current_factorisation <- essential_spec(rgraph, family)
}
}
}
#-------------------Update frequency graph---------------------
frequency_graph$rgraph <- frequency_graph$rgraph + rgraph
#--------------------------------------------------------------
setTxtProgressBar(pb, iter)
}
best_measure_graph$family <- family
frequency_graph$rgraph <- frequency_graph$rgraph / num_iter
list(best_model = best_measure_graph, freq_graph = frequency_graph)
}
#' @keywords internal
initialise_graph <- function(data0, method = "random", threshold = 0.75) {
method <- tolower(method)
if (!(method %in% c("random", "correlation", "mutual", "copula"))) {
stop("The method must be one of 'random', 'correlation', 'mutual' and 'copula.")
}
num_nodes <- ncol(data0)
if (method == "random") {
g <- create_random_graph(num_nodes, p = min(0.5, 2 / num_nodes^2))
return(g)
} else {
g <- compute_distance_matrix(data0, method)
}
g <- (g > quantile(g, threshold))
diag(g) <- 0
g
}
#' @keywords internal
compute_distance_matrix <- function(data0, method) {
if (method == "correlation") {
g <- cor(data0)
diag(g) <- -1
} else if (method == "mutual") {
g <- data0 %>% infotheo::discretize() %>% infotheo::mutinformation()
diag(g) <- 0
} else if (method == "copula") {
g <- copula_cor(data0)
}
g
}
#' @keywords internal
gibbs_update <- function(lambda, score_vec) {
score_vec <- score_vec - score_vec[1] #protect against divide by 0
prob_vec <- exp(lambda * score_vec)
prob_vec <- prob_vec / sum(prob_vec)
sample(seq_along(score_vec), size = 1, prob = prob_vec)
}
#' @keywords internal
check_family <- function(family) {
unknown_column <- which(family == "unknown")
warning_msg <- paste(
"I have problems figuring out what type of variables the columns",
paste(unknown_column, collapse = ","), "have.", sep = " "
)
}
#' @keywords internal
essential_spec <- function(rgraph, family) {
data.frame(factorise(rgraph), family, stringsAsFactors = F)
}
#' @keywords internal
fit_graph <- function(rgraph, family, data0) {
full_spec <- build_conditional(factorise(rgraph), family)
MLE_graph(full_spec, data0)
}
#' @keywords internal
flip_bit <- function(x) {
abs(x - 1)
}
# #' @keywords internal
# compute_GLM_full_class_likelihood <- function(data0) {
# "gamma", gamma_deriv2, gamma_deriv3,
# "poisson", poisson_deriv2, poisson_deriv3,
# "binomial", binomial_deriv2, binomial_deriv3
# return(list(family_name, likelihood))
# }
#' Detect variable type and decide what family of distribution to use
#' @keywords internal
#' @param x0; data vector
analyze_variable <- function(x0) {
variable_range <- length(unique(x0))
if (variable_range == 2) {
return("binomial") #binary data
} else {
if (is.factor(x0)) {
return("multinomial")
} else if (is.numeric(x0)) {
if (all(is.wholenumber(x0))) {
return("poisson") #count data
} else {
if (all(x0 > 0)) {
return("gamma")
} else {
return("gaussian")
}
}
}
}
return("unknown")
}
#' @keywords internal
add_new_likelihood <- function(current, j, state, data0, IC_factor = 2) {
fixed <- current$fixed[[1]]
given <- current$given[[1]]
family <- current$family[[1]]
current_marginal_likelihood <- fit_glm(
y = data0[,fixed], x = cbind(intercept = 1, data0[,given]),
family = family, engine = speedglm::speedglm.wfit
)$logLik
if (state == 1) {
new_given <- sort(c(given, j)) #Include a new edge in the graph
} else {
new_given <- setdiff(given, j) #Exclude an existing edge from the graph
}
new_marginal_likelihood <- fit_glm(
y = data0[,fixed], x = cbind(intercept = 1, data0[,new_given]),
family = family, engine = speedglm::speedglm.wfit
)$logLik
edge_num_adjustment <- ifelse(state == 1, -1, 1) * IC_factor
new_marginal_likelihood - current_marginal_likelihood + edge_num_adjustment
}
|
b19345319c530d960e8f8d8fc949c99bfdadaec8
|
6ba2ac661a2c08cfd4f638d5060da01041602cd5
|
/Rscript.R
|
bc502d8ebcbda16d3e9a1ea9784c1fd9116d8ca1
|
[] |
no_license
|
ameenkhan07/Temperature-Outliers-Analysis
|
b3cf8f4c967ac2bf4a26155bf4e20add6e9cfd5a
|
a90ef0c9f1ae305f997b47dcb576b0f37e035120
|
refs/heads/master
| 2020-04-11T13:55:24.679524
| 2017-11-05T12:16:05
| 2017-11-05T12:16:05
| 40,604,376
| 4
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,336
|
r
|
Rscript.R
|
data<-read.csv("esya.csv",header=TRUE,na.strings="NA",stringsAsFactors=FALSE)
station<-unique(data$station_name)
station_str_hot<-integer()
station_hot<-integer()
station_str_cold<-integer()
station_cold<-integer()
long<-integer()
lati<-integer()
for(st_name in station)
{
date_data<-data[data$station_name == st_name,]
str_hot<-integer()
hot<-integer()
str_cold<-integer()
cold<-integer()
year<-integer()
for(i in 1964:2013)
{
yr<-toString(i)
yr_data_ind<-grep(yr,date_data[,1],value=FALSE)
if(length(yr_data_ind) == 0)
next
year<-c(year,i)
str_hot_cnt <- (sum(date_data[yr_data_ind,9] == "Strong Hot"))/length(yr_data_ind)
hot_cnt <- (sum(date_data[yr_data_ind,9] == "Weak Hot"))/length(yr_data_ind)
str_cold_cnt <- (sum(date_data[yr_data_ind,9] == "Strong Cold"))/length(yr_data_ind)
cold_cnt <- (sum(date_data[yr_data_ind,9] == "Weak Cold"))/length(yr_data_ind)
str_hot<-c(str_hot,str_hot_cnt)
hot<-c(hot,hot_cnt)
str_cold<-c(str_cold,str_cold_cnt)
cold<-c(cold,cold_cnt)
}
lm_str_hot=coef(lm(str_hot~year))["year"]
lm_hot=coef(lm(hot~year))["year"]
lm_str_cold=coef(lm(str_cold~year))["year"]
lm_cold=coef(lm(cold~year))["year"]
station_str_hot<-c(station_str_hot,lm_str_hot)
station_hot<-c(station_hot,lm_hot)
station_str_cold<-c(station_str_cold,lm_str_cold)
station_cold<-c(station_cold,lm_cold)
long<-c(long,date_data[yr_data_ind[1],4])
lati<-c(lati,date_data[yr_data_ind[1],5])
print(paste(st_name,date_data[yr_data_ind[1],4],date_data[yr_data_ind[1],5]))
}
station_details<-data.frame(station,long,lati)
str_hot_color<-c(rep("Darkest Red",136),rep("Dark Red",136),rep("Red",407),rep("Light Red",679),rep("No Color",1358))
hot_color<-c(rep("Darkest Yellow",136),rep("Dark Yellow",136),rep("Yellow",407),rep("Light Yellow",679),rep("No Color",1358))
str_cold_color<-c(rep("Darkest Blue",136),rep("Dark Blue",136),rep("Blue",407),rep("Light Blue",679),rep("No Color",1358))
cold_color<-c(rep("Darkest Green",136),rep("Dark Green",136),rep("Green",407),rep("Light Green",679),rep("No Color",1358))
GraphStrongHot<-cbind(station_details,station_str_hot)
GraphStrongHot<-GraphStrongHot[order(-GraphStrongHot$station_str_hot),]
GraphStrongHot<-cbind(GraphStrongHot,str_hot_color)
GraphStrongHot<-GraphStrongHot[order(GraphStrongHot$station),]
GraphHot<-cbind(station_details,station_hot)
GraphHot<-GraphHot[order(-GraphHot$station_hot),]
GraphHot<-cbind(GraphHot,hot_color)
GraphHot<-GraphHot[order(GraphHot$station),]
GraphStrongCold<-cbind(station_details,station_str_cold)
GraphStrongCold<-GraphStrongCold[order(-GraphStrongCold$station_str_cold),]
GraphStrongCold<-cbind(GraphStrongCold,str_cold_color)
GraphStrongCold<-GraphStrongCold[order(GraphStrongCold$station),]
GraphCold<-cbind(station_details,station_cold)
GraphCold<-GraphCold[order(-GraphCold$station_cold),]
GraphCold<-cbind(GraphCold,cold_color)
GraphCold<-GraphCold[order(GraphCold$station),]
write.csv(GraphStrongHot,file="StrongHot.csv")
write.csv(GraphHot,file="Hot.csv")
write.csv(GraphStrongCold,file="StrongCold.csv")
write.csv(GraphCold,file="Cold.csv")
|
3747261624549c0cb907be8bee2a9453e1f7bb55
|
4296809eeea7449b42b1797fd0aaf00741307f6d
|
/tools/Scripts_Miscellaneous/0_SWSFTesting_HandRunCode.R
|
ae49ed793b27708e3d52bda1e758b797e1067b4b
|
[] |
no_license
|
BrendenBe1/rSFSW2
|
1153a1cae12338adc28ea73a8d11fcc7cc651173
|
d154c1e19b608d4789aa5b682ec9a7f598ba92db
|
refs/heads/master
| 2020-05-24T16:31:15.451960
| 2017-02-23T09:20:58
| 2017-02-23T09:20:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,158
|
r
|
0_SWSFTesting_HandRunCode.R
|
workersN <- num_cores
swDataFromFiles <- sw_inputDataFromFiles(dir=dir.sw.in,files.in=swFilesIn) #This acts for the basis for all runs.
filebasename <- basename(swFiles_WeatherPrefix(swDataFromFiles))
#i = i_sim: consecutive number of seq.todo, i.e., counting the simulation runs
#i_xxx = the i_tr-row of xxx for the i-th simulation run; if trowExperimentals > 0 then these will eventually be repeated, and below replaced with experimental values
#i_exp = the row of sw_input_experimentals for the i-th simulation run
#P_id is a unique id number for each scenario in each run
i_sim <- 1
i_tr <- seq.tr[(i_sim-1) %% runs + 1]
drv <- dbDriver("SQLite")
con <- dbConnect(drv, dbname=name.OutputDB)
if(getCurrentWeatherDataFromDatabase) conWeather <- dbConnect(drv, dbname=dbWeatherDataFile)
#weather folder name and structure
if(GriddedDailyWeatherFromMaurer2002_NorthAmerica & !any(create_treatments == "LookupWeatherFolder")){ #obtain external weather information that needs to be executed for each run
dirname.sw.runs.weather <- paste("data", format(28.8125+round((SWRunInformation[i_tr,]$Y_WGS84-28.8125)/0.125,0)*0.125, nsmall=4), format(28.8125+round((SWRunInformation[i_tr,]$X_WGS84-28.8125)/0.125,0)*0.125, nsmall=4), sep="_")
sw_weatherList <- ExtractGriddedDailyWeatherFromMaurer2002_NorthAmerica(cellname=dirname.sw.runs.weather,startYear=ifelse(any(create_treatments=="YearStart"), sw_input_treatments[i_tr,]$YearStart, simstartyr), endYear=ifelse(any(create_treatments=="YearEnd"), sw_input_treatments[i_tr,]$YearEnd, endyr))
if(is.null(sw_weatherList)) stop("ExtractGriddedDailyWeatherFromMaurer2002_NorthAmerica failed")
} else {
sw_weatherList <- NULL
# temp <- dbGetQuery(con, paste("SELECT WeatherFolder FROM header WHERE P_id=",((i_sim-1)*scenario_No+1)))
# if(WeatherDataFromDatabase) {
# sw_weatherList <- onGetWeatherData_database(con=conWeather,weatherDirName=temp,startYear=ifelse(any(create_treatments=="YearStart"), sw_input_treatments[i_tr,]$YearStart, simstartyr), endYear=ifelse(any(create_treatments=="YearEnd"), sw_input_treatments[i_tr,]$YearEnd, endyr))
# } else {
# sw_weatherList <- onGetWeatherData_folders(LookupWeatherFolder=file.path(dir.sw.in.tr, "LookupWeatherFolder"),weatherDirName=temp,filebasename=filebasename,startYear=ifelse(any(create_treatments=="YearStart"), sw_input_treatments[i_tr,]$YearStart, simstartyr), endYear=ifelse(any(create_treatments=="YearEnd"), sw_input_treatments[i_tr,]$YearEnd, endyr))
# }
}
nodeNumber <- 1
i <- i_sim
i_labels <- labels[i_tr]
i_SWRunInformation <- SWRunInformation[i_tr, ]
i_sw_input_soillayers <- sw_input_soillayers[i_tr, ]
i_sw_input_treatments <- sw_input_treatments[i_tr, ]
i_sw_input_cloud <- sw_input_cloud[i_tr, ]
i_sw_input_prod <- sw_input_prod[i_tr, ]
i_sw_input_site <- sw_input_site[i_tr, ]
i_sw_input_soils <- sw_input_soils[i_tr, ]
i_sw_input_weather <- sw_input_weather[i_tr, ]
i_sw_input_climscen <- sw_input_climscen[i_tr, ]
i_sw_input_climscen_values <- sw_input_climscen_values[i_tr, ]
i_sw_weatherList <- sw_weatherList
runs.completed <- length(seq.todo)
complete.aggregations <- TRUE
concats.completed <- length(seq.concats)
|
2416b1a122569647beeae8e7d19035a01aed1ab9
|
6585c4953346f131123537a00252f7021c647752
|
/pollutantmean.R
|
a3caed9786b6d7f78a008d4905e3db03ebcf6c38
|
[] |
no_license
|
rjbeilstein/Personal
|
de32bacf900b88f8c68c3933db0fb962794701db
|
1e93dd80ec954f8197b880c06a040adad174305e
|
refs/heads/master
| 2021-01-19T09:44:05.047438
| 2015-03-10T13:51:28
| 2015-03-10T13:51:28
| 27,402,824
| 0
| 0
| null | 2014-12-12T15:11:25
| 2014-12-01T22:14:56
|
Perl
|
UTF-8
|
R
| false
| false
| 1,759
|
r
|
pollutantmean.R
|
pollutantmean <- function(directory, pollutant, id = 1:332) {
## Copyright (C) Bob Beilstein, 2015
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
## 'directory' is a character vector of length 1 indicating
## the location of the CSV files
## 'pollutant' is a character vector of length 1 indicating
## the name of the pollutant for which we will calculate the
## mean; either "sulfate" or "nitrate".
## 'id' is an integer vector indicating the monitor ID numbers
## to be used
## Return the mean of the pollutant across all monitors list
## in the 'id' vector (ignoring NA values)
rf=paste(directory,"/",sprintf("%03d.csv",id),sep="") # Get the list of files to read
## Now loop through all of the files and accumulate the values
## read.csv will fail if we've specified a nonexistent file
vals<-NULL
for (i in 1:length(rf)) {
dat<-read.csv(rf[i]) # get the data frame
p <- if(pollutant == "sulfate") {
dat$sulfate
}
else if (pollutant == "nitrate") {
dat$nitrate
} else {
stop('Pollutant must be either "nitrate" or "sulfate"')
}
vals<-c(vals,p[!is.na(p)])
}
mean(vals)
}
|
d5255f09ebcbcde08287da71bfac65a172956cb4
|
e657528bf984624f4762d0c006c516df4c570a4e
|
/man/create_experiment.Rd
|
b384201220325041a84b1b70e91a873159105e9c
|
[
"MIT"
] |
permissive
|
sigopt/SigOptR
|
95b0323a6c66ef7ba885a5fd32ef1fcd18afb7cc
|
d580444a7af5eebced232069eea9b3406a69ff29
|
refs/heads/main
| 2023-08-31T15:38:13.614967
| 2023-08-22T20:45:11
| 2023-08-22T20:45:11
| 52,380,994
| 15
| 7
|
MIT
| 2023-08-22T20:45:13
| 2016-02-23T18:14:31
|
R
|
UTF-8
|
R
| false
| true
| 638
|
rd
|
create_experiment.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sigopt_api_wrapper.R
\name{create_experiment}
\alias{create_experiment}
\title{Create an experiment}
\usage{
create_experiment(body)
}
\arguments{
\item{body}{POST body of create request}
}
\value{
experiment created by SigOpt
}
\description{
Create an experiment
}
\examples{
env <- Sys.getenv("NOT_CRAN")
if (!identical(env, "true")) {
0
} else {
create_experiment(list(
name="R test experiment",
parameters=list(
list(name="x1", type="double", bounds=list(min=0, max=100)),
list(name="x2", type="double", bounds=list(min=0, max=100))
)
))}
}
|
0223789ec39c92cb629dd60a03b43ce05d77c60a
|
f61153b739229133bcd5a9fc80f35bd9f10268cd
|
/Module 2/random.R
|
2451a6dbe6d8bb7d447e3811c0565b9626648227
|
[] |
no_license
|
anitakurm/Cognitive-Modelling
|
54863dc98a3f35ce90afaee7347766f8a414e22a
|
e0772e88fdcdd01cb9c2d1ee165d5a1c8d9b7f32
|
refs/heads/master
| 2021-01-05T15:42:11.270725
| 2020-03-23T00:52:59
| 2020-03-23T00:52:59
| 241,064,724
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 561
|
r
|
random.R
|
random <- function(payoff, ntrials, theta) {
#bias towards option 1 (theta) and option 2
b <- c(theta, 1-theta)
#empty array to be filled
x <- array(0, c(ntrials))
#rewards array to be filled
r <- array(0, c(ntrials))
for (t in 1:ntrials) {
#agent that chooses randomly between options 1 and 2 with bias ( to 1) = theta
#binomial sample (but really from a categorical distribution)
x[t] <- rcat(1, b)
#what reward does the agent get?
r[t] <- payoff[t, x[t]]
}
result <- list(x=x, r=r)
return(result)
}
|
c434ada474b4a99d7047fe0c77f0b0fb9fc1fc79
|
52bf004a1380cc1a35554d26055cf374165d8394
|
/cachematrix.R
|
8b52a8b6a08b9d850ddfcbef18e2cd53b7d418cd
|
[] |
no_license
|
jennwv/ProgrammingAssignment2
|
7df5d7e15d997940f77ede1055a1a45b6c76543e
|
cd396b8d5f6c4a7b04060c570eb07f6d88850061
|
refs/heads/master
| 2021-01-18T02:06:54.267331
| 2015-01-21T21:18:03
| 2015-01-21T21:18:03
| 29,611,674
| 0
| 0
| null | 2015-01-21T20:38:34
| 2015-01-21T20:38:34
| null |
UTF-8
|
R
| false
| false
| 904
|
r
|
cachematrix.R
|
## Create a new matrix object that allows caching of the inverse.
## Assumes that input is an invertible square matrix.
## Makes a new matrix object from a regular matrix.
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y) {
x <<- y
inv <<- NULL
}
get <- function() x
setinverse <- function(inverse) inv <<- inverse
getinverse <- function() inv
y <- list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## Returns the inverse of the matrix stored in a matrix object,
## from cache if possible. Assumes that matrix is an
## invertible square matrix.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inv <- x$getinverse()
if(!is.null(inv)) {
message("getting cached data")
return(inv)
}
mat <- x$get()
inv <- solve(mat)
x$setinverse(inv)
inv
}
|
96c5efe76bbe7e495ad380d7eab2ed2a76415b5c
|
d7d8f1751a02d82b85fd62f7e0d7b64a6443299c
|
/plot_FST.r
|
d9d83819af971c59c28112f0475487a833fc762c
|
[
"MIT"
] |
permissive
|
zhuochenbioinfo/PopGen
|
5ac608daacec492dbed840f7897aea80b6df5666
|
181cb6a31fb61afa0ea27b7327d2db76ca6bcd0a
|
refs/heads/master
| 2023-06-10T00:07:45.003109
| 2023-05-31T10:35:23
| 2023-05-31T10:35:23
| 101,312,312
| 7
| 4
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,264
|
r
|
plot_FST.r
|
library(ggplot2)
args <- commandArgs(TRUE)
file <- args[1]
outfile <- args[2]
data <- read.delim(file)
goodChrOrder <- paste("Chr", c(1:12), sep="")
data$CHROM <- factor(data$CHROM, levels=goodChrOrder)
# 获取显著性阈值
top5.value <- as.numeric(quantile(data$WEIGHTED_FST,prob=1-5/100))
top10.value <- as.numeric(quantile(data$WEIGHTED_FST,prob=1-10/100))
# 根据显著性阈值加标签
data$sig <- "nosig"
data[data$WEIGHTED_FST >= top10.value,]$sig <- "top10pct"
data[data$WEIGHTED_FST >= top5.value,]$sig <- "top5pct"
# 将标签列转换为factor属性
data$sig <- as.factor(data$sig)
pdf(outfile,width=20,height=4)
ggplot(data[data$WEIGHTED_FST > 0,], aes(x=BIN_START/1000000, y=WEIGHTED_FST, color=sig)) +
geom_point(stat="identity", position="identity",size=0.5) +
# 设定颜色
scale_color_manual(values = c("grey", "orange", "red")) +
facet_wrap(~ CHROM, ncol=12, scales = "free_x", strip.position = "bottom") +
ggtitle("REF:MSU") +
xlab("Position(Mb)") +
ylab("value") +
theme_bw() +
theme(axis.line = element_line(colour = "black"),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.border = element_blank(),
panel.background = element_blank())
dev.off()
|
3b62cd4c79872ea6fa2a71aab7387e1e45132066
|
9b202913ece1e2916e80c913693bc17c0adba768
|
/R/fig8b.R
|
cdd627d7603c1c88e0922112ee6197e5812e6558
|
[] |
no_license
|
PMassicotte/cdoc
|
5217750db3e1fdf699a1a8d5a26a8f1b83d52253
|
fef25b848cb1ac1f2c7be171b290ad5d5fef7af7
|
refs/heads/master
| 2021-09-13T21:36:56.273815
| 2018-05-04T12:59:30
| 2018-05-04T12:59:30
| 80,427,518
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,782
|
r
|
fig8b.R
|
rm(list = ls())
cdom_complete <- read_feather("dataset/clean/cdom_dataset.feather") %>%
filter(wavelength <= 500) %>%
filter(study_id != "nelson") %>% # Nelson is missing wl < 275
filter(study_id != "greenland_lakes") %>% # These had lamp problem at 360 nm
filter(study_id != "horsen") %>%
group_by(unique_id) %>%
mutate(absorption = absorption / max(absorption)) %>%
ungroup() %>%
group_by(wavelength, ecosystem) %>%
summarise(absorption = mean(absorption)) %>%
ungroup() %>%
group_by(ecosystem) %>%
nest() %>%
mutate(model = purrr::map(data, ~cdom_spectral_curve(.$wavelength, .$absorption))) %>%
unnest(model)
# Plot --------------------------------------------------------------------
jet.colors <-
colorRampPalette(
c(
"#00007F",
"blue",
"#007FFF",
"cyan",
"#7FFF7F",
"yellow",
"#FF7F00",
"red",
"#7F0000"
)
)
p <- cdom_complete %>%
mutate(ecosystem = factor(
ecosystem,
levels = c(
"wetland",
"pond",
"lake",
"river",
"coastal",
"estuary",
"ocean"
),
labels = c(
"Wetland",
"Pond",
"Lake",
"River",
"Coastal",
"Estuary",
"Ocean"
)
)) %>%
ggplot(aes(x = wl, y = s)) +
geom_line(aes(color = r2)) +
facet_wrap(~ecosystem) +
scale_color_gradientn(
colours = jet.colors(255),
guide = guide_colorbar(
direction = "vertical",
tick = FALSE,
barwidth = 1, barheight = 5)
) +
xlab("Wavelength (nm)") +
ylab(bquote("Spectral slope"~(nm^{-1}))) +
labs(color = bquote(italic(R^2))) +
theme(legend.justification = c(0.75, 0), legend.position = c(0.42, -0.005))
ggsave("graphs/spectra_curves.pdf", p, height = 5, width = 7)
|
1a327fbd8860584b68ce6131258cf1b4f57d385a
|
36badbb3ac98c190db813af2b03194c6bc90241f
|
/tests/testthat/test_notinteractive.R
|
a824133df27ad7f174773e9d8e5de5e61093e5ad
|
[
"MIT"
] |
permissive
|
Chris1221/ggsource
|
d731aad7746dd90fc1e6d82a1e03b34007556364
|
6c2dc589de39f51cbf514488ad9428f963d688a5
|
refs/heads/main
| 2023-08-01T21:44:31.193665
| 2021-09-12T15:07:21
| 2021-09-12T15:07:21
| 397,310,117
| 2
| 0
| null | 2021-09-12T14:18:32
| 2021-08-17T15:43:28
|
R
|
UTF-8
|
R
| false
| false
| 361
|
r
|
test_notinteractive.R
|
library(ggplot2)
library(ggsource)
p = ggplot(mpg, aes(displ, hwy, colour = class)) +
geom_point()
tmp_path = tempfile(fileext = ".pdf")
test_path = "test"
test_that("ggsource runs okay",{
expect_error(ggsave(tmp_path, p, scriptname = test_path), NA)
expect_true(file.exists(tmp_path))
expect_equal(ggsource(tmp_path, interactive = F), test_path)
})
|
5baf1e7534a0442348e8ba4ef7f566cba4dc93e6
|
5db643f963b6d8285a8632881708b4b2aade03d8
|
/13_gseaSummaryTable.R
|
bd45da55560c706a7a31dcb6f89b981cec7d3725
|
[
"MIT"
] |
permissive
|
uhkniazi/BRC_SupernumeraryTooth_Gui_PID_19
|
e325c2aab69da25277398c5380a2a242f6d8b218
|
5d1fb0f56b0bc107c5c297d8fc0f2a57563a2e31
|
refs/heads/master
| 2020-04-26T17:58:27.415030
| 2019-04-23T13:08:45
| 2019-04-23T13:08:45
| 173,730,457
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,410
|
r
|
13_gseaSummaryTable.R
|
# File: 13_gseaSummaryTable.R
# Auth: umar.niazi@kcl.ac.uk
# DESC: merge the gsea results for all contrasts in one table
# Date: 14/03/2019
lFiles = list.files('results/', pattern='*pathways_mSigDb_c2_*', full.names = T, ignore.case = F)
# load the files
ldfData = lapply(lFiles, function(x) read.csv(x, row.names=1))
names(ldfData) = lFiles
sapply(ldfData, nrow)
# put everything in one order by row names
rn = rownames(ldfData[[1]])
head(rn)
ldfData = lapply(ldfData, function(df){
df = df[rn,]
})
# get the upregulated/downregulated in order
ldfData.up = ldfData[grepl(lFiles, pattern = 'upregulated')]
ldfData.down = ldfData[grepl(lFiles, pattern = 'downregulated')]
## set the names for each contrast
sn = gsub('results//(.+Vs.+)_up.+', '\\1', names(ldfData.up))
names(ldfData.up) = sn
sn = gsub('results//(.+Vs.+)_down.+', '\\1', names(ldfData.down))
names(ldfData.down) = sn
## create a table/matrix of p-values
mMerged.up = sapply(ldfData.up, function(x) x$p.val)
rownames(mMerged.up) = rownames(ldfData.up[[1]])
## create similar table for downregulated
mMerged.down = sapply(ldfData.down, function(x) x$p.val)
rownames(mMerged.down) = rownames(ldfData.down[[1]])
# sanity check
identical(rownames(mMerged.up), rownames(mMerged.down))
colnames(mMerged.up) = paste(colnames(mMerged.up), 'up', sep='-')
colnames(mMerged.down) = paste(colnames(mMerged.down), 'down', sep='-')
mMerged.c2 = cbind(mMerged.up, mMerged.down)
# reorder the columns
colnames(mMerged.c2)
o = c(1, 10, 2, 11, 3, 12, 4, 13, 5, 14, 6, 15, 7, 16, 8, 17, 9, 18)
colnames(mMerged.c2)[o]
mMerged.c2 = mMerged.c2[,o]
# remove na sections
dim(mMerged.c2)
mMerged.c2 = na.omit(mMerged.c2)
dim(mMerged.c2)
head(mMerged.c2)
### create a binary matrix based on cutoffs
getBinaryMatrix = function(mat, cutoff=0.01){
mat2 = apply(mat, 2, function(x) round(x, 3) <= cutoff)
}
mMerged.c2.bin = getBinaryMatrix(mMerged.c2)
## group this matrix into combinations
mMerged.c2.bin.grp = mMerged.c2.bin
set.seed(123)
dm = dist(mMerged.c2.bin.grp, method='binary')
hc = hclust(dm)
plot(hc, labels=F)
# cut the tree at the bottom to create groups
cp = cutree(hc, h = 0.2)
# sanity checks
table(cp)
length(cp)
length(unique(cp))
mMerged.c2.bin.grp = cbind(mMerged.c2.bin.grp, cp)
### print and observe this table and select the groups you are interested in
temp = mMerged.c2.bin.grp
temp = (temp[!duplicated(cp),])
temp2 = cbind(temp, table(cp))
rownames(temp2) = NULL
print(temp2)
## map these names to the cp
groups = cp
sig.pvals = rowSums(mMerged.c2.bin)
dfMerged.c2 = data.frame(round(mMerged.c2, 3), sig.pvals, groups, DB='mSigDB')
str(dfMerged.c2)
head(dfMerged.c2)
tail(dfMerged.c2)
########
write.csv(dfMerged.c2, file='results/gsea_msigdb_c2_merged.xls')
## merge together into one dataframe
# drop the group with most zeros
table(dfMerged.c2$groups)
t = rowSums(mMerged.c2.bin)
table(t, dfMerged.c2$groups)
dfMerged.c2.sub = dfMerged.c2[dfMerged.c2$groups != 2,]
# table(dfMerged.c5$groups)
# t = rowSums(mMerged.c5.bin)
# table(t, dfMerged.c5$groups)
# dfMerged.c5.sub = dfMerged.c5[dfMerged.c5$groups != 4,]
#
# table(dfMerged.c7$groups)
# t = rowSums(mMerged.c7.bin)
# table(t, dfMerged.c7$groups)
# dfMerged.c7.sub = dfMerged.c7[dfMerged.c7$groups != 6,]
dfMerged = rbind(dfMerged.c2.sub)#, dfMerged.c5.sub, dfMerged.c7.sub)
dfMerged = droplevels.data.frame(dfMerged)
dim(dfMerged)
str(dfMerged)
write.csv(dfMerged, file='results/gsea_msigdb_significant_c2_merged.xls')
### heatmaps
### just for a quick visual check, do not use for results
df = dfMerged
head(df)
dim(df)
mMat = as.matrix(df[,c(1:18)])
head(mMat)
mMat = -1*log(mMat+1e-16)
g1 = df[,'groups']
g1 = factor(as.character(g1))
levels(g1)
g2 = df[,'DB']
g2 = factor(as.character(g2))
levels(g2)
#ann = data.frame(DB=g2, Group=g1 )
ann = data.frame(Group=g1 )
range(mMat)
quantile(as.vector(mMat), 0:20/20)
#mMat[mMat < 15] = 15
mMat[mMat > 10] = 10
library(NMF)
library(RColorBrewer)
aheatmap(mMat, annRow = NA, scale = 'none', Rowv = order(g2:g1), Colv=NA, cexRow=5, cexCol = 0.6, #labCol=c('C2vC1', 'K1vC1', 'K2vC2', 'K2vK1'),
col=c('white', brewer.pal(9, 'YlOrRd')))
pdf('results/gsea_msigdb_significant_merged.pdf')
aheatmap(mMat, annRow = NA, scale = 'none', Rowv = order(g2:g1), Colv=NA, cexRow=5, cexCol = 0.6, #labCol=c('C2vC1', 'K1vC1', 'K2vC2', 'K2vK1'),
col=c('white', brewer.pal(9, 'YlOrRd')))
dev.off(dev.cur())
|
69a81f0623cb6104c6754027674363fd2b8ca152
|
2742a72eef62bde91aeaaa2fffd21b6dc10387cc
|
/190713/190713_diamonds.R
|
0f0eed02943bfe619b7357c93b19ef7dd593072f
|
[
"Apache-2.0"
] |
permissive
|
gittykite/r-basic-viz
|
f3d9922dbab04bd90e3b0ccb04353616594dbf0d
|
06941089d5ed29ab2a2eaf2ccab0a8c6f971f9f3
|
refs/heads/master
| 2022-03-02T08:46:54.439237
| 2019-07-27T07:34:04
| 2019-07-27T07:34:04
| 197,858,792
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,602
|
r
|
190713_diamonds.R
|
# mpg: cars
mpg
data(mpg)
str(mpg)
# diamonds
str(diamonds) # show headings
head(diamonds) # print heading data
head(diamonds, 10) # print heading data
tail(diamonds) # print tailing data
tail(diamonds, 3) # print tailing data
dim(diamonds) # row, col num
summary(diamonds)
# dplyr function (SQL func)
require(dplyr)
exam <- read.csv("190713/basic/csv_exam.csv")
exam
# filter: WHERE func
exam %>% filter(class == 1) # pipe %>% (ctrl + shift + m)
exam %>% filter(class != 1)
exam %>% filter(math >= 50)
exam %>% filter(class == 1 & math >= 50)
exam %>% filter(english >= 90 | math >= 50 | class == 5)
class7 <- exam %>% filter(class %in% c(1, 3, 5)) # include %in% IN func
mean(class7$math)
mode(class7)
# select: SELECT func
# ERROR: unable to find an inherited method for function ‘select’ for signature ‘"data.frame"’
exam
exam$math
exam %>% dplyr::select(math) # explictly call library
exam %>% dplyr::select(-math)
exam %>% dplyr::select(math, class, english)
exam %>%
filter(class == 1)
select(english)
# arrange: ORDER BY
exam %>% arrange(math)
exam %>% arrange(desc(math))
exam %>% arrange(class, math)
# mutate: make new copied dataset
exam %>%
mutate(total = math + english + science) %>%
head # get heading data
# add columms
exam %>%
mutate(total = math + english + science,
mean = (math + english + science)/3) %>%
head
exam %>%
mutate(test = ifelse(science >= 60, "pass", "fail")) %>%
head
exam %>%
mutate(total = math + english + science) %>%
arrange(total) %>%
head
|
c6ac7635da7e0d1a74ec8ec459a323cc6934d716
|
67a4ffc2937795fb803de60cd23a5d286ecd062c
|
/BIN/checkReported.R
|
2f44c391eb378f7cd2fa21a27c5d156cd8dcc5cb
|
[] |
no_license
|
TesiNicco/CentenAssoc
|
7dd8c6638b5109d351cb31a7e0ed24bd174220d0
|
38740b8b5af29b7e270b73b840791c5bf61396a0
|
refs/heads/master
| 2021-07-13T16:55:56.643449
| 2020-11-12T14:44:34
| 2020-11-12T14:44:34
| 222,994,936
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,592
|
r
|
checkReported.R
|
library(data.table)
# read input files
d <- fread("RESULTS/SINGLE_VARIANT/chrAll_reported_variants_assoc.txt", h=T, sep="\t")
ref <- fread("INPUTS_OTHER/GWAS_data_Timmers_cleaned.txt.gz", h=T)
###
# merge files together
d.my <- merge(d, ref, by.x="ID", by.y="SNP")
# initialize direction column
d.my$DIRECTION <- NA
# loop to assign directions
for (i in 1:nrow(d.my)){
if (d.my$A1.x[i] == d.my$A1.y[i]){
if (d.my$BETA.x[i]*d.my$BETA.y[i] >0){d.my$DIRECTION[i] <- "correct"} else { d.my$DIRECTION[i] <- "not_correct" }
} else {
if (d.my$BETA.x[i]*d.my$BETA.y[i] <0){d.my$DIRECTION[i] <- "correct"} else { d.my$DIRECTION[i] <- "not_correct" }
}
}
#get stats about directions
table(d.my$DIRECTION)
print(table(d.my$DIRECTION)/nrow(d.my))
print(binom.test(x=table(d.my$DIRECTION)[1], n=nrow(d.my), p=0.5))
x <- d.my
x$P_ADJ_fdr <- p.adjust(x$P.x, method="fdr")
print("### Correcting p-values for multiple tests")
print("### FDR correction")
print(paste("### ", nrow(x[which(x$P_ADJ_fdr <= 0.05),]), "/", nrow(x), " variants reached significant at FDR<5%", sep=""))
print(x$ID[which(x$P_ADJ_fdr <= 0.05)])
print(paste("### ", nrow(x[which(x$P_ADJ_fdr <= 0.10),]), "/", nrow(x), " variants reached significant at FDR<10%", sep=""))
print(x$ID[which(x$P_ADJ_fdr <= 0.10)])
print(paste("### ", nrow(x[which(x$P_ADJ_fdr <= 0.20),]), "/", nrow(x), " variants reached significant at FDR<20%", sep=""))
print(x$ID[which(x$P_ADJ_fdr <= 0.20)])
write.table(d.my, "RESULTS/SINGLE_VARIANT/chrAll_reportedVar_assoc_withDirection.txt", quote=F, row.names=F, sep="\t")
|
040e3f19e647bdf618d22a77f7adcfbd43539735
|
345091e8b1e418edb0e901d17856b2c1c3a86707
|
/man/s_cross_p.Rd
|
40c7db02e22f6c459328eafc7be5d068b19e5f09
|
[] |
no_license
|
zhaoy/zhaoy
|
56e18ce7cd8090320128036e22d54b999ec06745
|
e96086b8e7957e7430374962f4c877e8e3f3bc53
|
refs/heads/master
| 2022-09-04T03:06:49.812011
| 2022-08-11T21:45:06
| 2022-08-11T21:45:06
| 93,218,094
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 548
|
rd
|
s_cross_p.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/s_cross_p.R
\name{s_cross_p}
\alias{s_cross_p}
\title{2- or 3-way frequency tables of percents}
\usage{
s_cross_p(x, ..., direction)
}
\arguments{
\item{x}{A data-frame.}
\item{...}{2 or 3 variables.}
\item{direction}{Direction in which to calculate percents: "row", "col", or "all".}
}
\value{
A data-frame.
}
\description{
Cross-tabulate percents of unique values, including missing-data, in 2 or 3 variables.
}
\seealso{
\code{\link{s_cross_n} \link{s_cross_np}}
}
|
32bd15dfa6ec4a46dd45306e2e4cae2984b6951d
|
0a906cf8b1b7da2aea87de958e3662870df49727
|
/biwavelet/inst/testfiles/rcpp_row_quantile/libFuzzer_rcpp_row_quantile/rcpp_row_quantile_valgrind_files/1610556081-test.R
|
f7bcbc579512191eabf3559999600b5cd446e2a0
|
[] |
no_license
|
akhikolla/updated-only-Issues
|
a85c887f0e1aae8a8dc358717d55b21678d04660
|
7d74489dfc7ddfec3955ae7891f15e920cad2e0c
|
refs/heads/master
| 2023-04-13T08:22:15.699449
| 2021-04-21T16:25:35
| 2021-04-21T16:25:35
| 360,232,775
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 259
|
r
|
1610556081-test.R
|
testlist <- list(data = structure(c(Inf, 1.72723536788899e-77, 7.06327441559897e-304, -Inf, NaN, 5.0841157981812e-74, 2.46690988593888e-308), .Dim = c(1L, 7L)), q = 1.39067124445999e-309)
result <- do.call(biwavelet:::rcpp_row_quantile,testlist)
str(result)
|
51362d62ba27a73ec912b51a75b0e04c0d5b2702
|
5b1630bbb35bb556386d8bdd16ea6acc508b341d
|
/man/list2toml.Rd
|
df275a41f87dfd8d93f5a1851ebd634cb16c7e4b
|
[
"MIT"
] |
permissive
|
bioDS/beter
|
ca7282d17ffcd188d50be02fc339dffe737261a6
|
45534d062ae67f36ea0a73514de11870f9d8ed9a
|
refs/heads/master
| 2022-06-17T13:01:44.990966
| 2022-06-02T22:54:23
| 2022-06-02T22:54:23
| 224,336,560
| 3
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 892
|
rd
|
list2toml.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/list2toml.r
\name{list2toml}
\alias{list2toml}
\title{list to toml}
\usage{
list2toml(x)
}
\arguments{
\item{x}{a named list}
}
\value{
a string vector representing individual lines of TOML representation of \code{x}.
}
\description{
Convert list to a string vector that represents of TOML. Each item of the vector represents
a line of the TOML file.
}
\details{
\code{list2toml} process structured lists recursively by first processing any non-list members
and then processing remaining items of the list type using another call of \code{lit2toml}.
In these calls, the name of the item is used as a \code{parent_name}.
}
\examples{
config = list(
"xml" = list("chunk" = "<xml>{{tag}}</xml>"),
"templates" = list(),
"defaults" = list("tag" = "Just another tag in the TOML!")
)
list2toml(config)
}
|
030f488bb7e301bd249c3df625245f6e58b8bde0
|
29585dff702209dd446c0ab52ceea046c58e384e
|
/MMMS/R/extract.list.R
|
6656ac6085cdb96e2c9186f589937f25a80ef957
|
[] |
no_license
|
ingted/R-Examples
|
825440ce468ce608c4d73e2af4c0a0213b81c0fe
|
d0917dbaf698cb8bc0789db0c3ab07453016eab9
|
refs/heads/master
| 2020-04-14T12:29:22.336088
| 2016-07-21T14:01:14
| 2016-07-21T14:01:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 189
|
r
|
extract.list.R
|
extract.list <-
function(L, s1, s2=NULL) {
if(is.null(s2)) {
sapply(L,function(x) with(x,get(s1)))
} else {
sapply(L,function(x) with(with(x,get(s1)),get(s2)) )
}
}
|
d7e8bf5ab2a6b022853c5617b7c458d19a542ff3
|
db0e77329139306d1a97d816929b406064d4c16a
|
/web/IntroToShiny/exercises/19-tags/app.R
|
7e96af911779f25a759851d971451dbc4d66af01
|
[] |
no_license
|
lihm1/self-study
|
2a106824024e3bfdf8e2df753a9981b703bd467c
|
112ac12fd7d82181d756b4defb683e77d87f911a
|
refs/heads/master
| 2023-04-30T13:18:40.141160
| 2021-05-18T16:00:45
| 2021-05-18T16:00:45
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 572
|
r
|
app.R
|
library(shiny)
ui <- fluidPage(
sidebarLayout(
sidebarPanel(
sliderInput("num", "Choose a number", 1, 100, 50),
tags$img(height = 120, src = "shiny.png"),
tags$br(),
tags$em("Powered by "),
tags$a(href = "shiny.rstudio.com", "shiny")
),
mainPanel(
plotOutput("hist"),
verbatimTextOutput("sum")
)
)
)
server <- function(input, output) {
output$hist <- renderPlot({
hist(rnorm(input$num))
})
output$sum <- renderPrint({
summary(rnorm(input$num))
})
}
shinyApp(ui = ui, server = server)
|
4b98456877010091eb3508956656304ced35f652
|
55cf7bb541cff8116fe70a00834be893df322f39
|
/man/aoristic.Rd
|
a9519c738383e2b7866a3d2b121b58c26bbf0694
|
[] |
no_license
|
georgekick/aoristic
|
d2b78a0c79305027f6c1c2bbe81c101d2499d0b5
|
3ec0f6efd7f5033317ec23494c91f04b09b2bf7c
|
refs/heads/master
| 2021-01-01T16:44:44.832602
| 2015-01-10T10:33:36
| 2015-01-10T10:33:36
| 12,824,202
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 421
|
rd
|
aoristic.Rd
|
% Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/aoristic-package.R
\docType{data}
\name{aoristic}
\alias{aoristic}
\title{Sample data of crime (df) and council district (spdf)}
\source{
City of Arlington
}
\usage{
data(aoristic)
}
\description{
Sample data of burglary incident data frame and spatial polygon data frame of council districts.
}
\author{
George Kikuchi, 2013-09-13
}
|
54487704b652060985694ef0840e77f927bcfcbf
|
aaac559889d1968ee128d67460bcf4a4272e39fb
|
/figure/Plot 3.R
|
3129b6b54cc448b97f720d9074f2665034917850
|
[] |
no_license
|
Omar-Ma/ExData_Plotting1
|
7a6f9cd928afe2f42ac50f6d0e9edc5e680b99a7
|
4bfad1eb25ea314250548c63f399a7424c03ef17
|
refs/heads/master
| 2021-01-09T07:02:54.416243
| 2014-10-12T23:04:21
| 2014-10-12T23:04:21
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 924
|
r
|
Plot 3.R
|
da<-read.table("household_power_consumption.txt",sep=";",header=T)
da$Date1<-as.Date(da$Date,"%d/%m/%Y")
da1<-subset(da,Date1=="2007-02-01"|Date1=="2007-02-02")
da1$DateAndTime<-paste(da1$Date,da1$Time)
da1$DateAndTime<-strptime(da1$DateAndTime,"%d/%m/%Y %H:%M:%OS")
with(da1, plot(da1$DateAndTime,da1$Sub_metering_1,type="l",xlab="",ylab="Energy sub metering"))
with(da1, plot(da1$DateAndTime,da1$Sub_metering_2,type="l",col="red",xlab="",ylab="Energy sub metering"))
with(da1, plot(DateAndTime, Sub_metering_1, xlab="",ylab = "Energy sub metering", type = "n"))
with(subset(da1), points(DateAndTime, Sub_metering_1, col = "black",type="l"))
with(subset(da1), points(DateAndTime, Sub_metering_2, col = "red",type="l"))
with(subset(da1), points(DateAndTime, Sub_metering_3, col = "blue",type="l"))
legend("topright",lty=1,col = c("black", "red","blue"), legend = c("Sub_metering_1", "Sub_metering_2","Sub_metering_1"))
|
95c1c16120da661b2d1164d2b89b7ea4fc42bbf3
|
221863c465c6629762198fa28b9d2585f04233be
|
/download-osm-data.R
|
92d0a091ae6b0221d13d52d5ee74c8e189ee6205
|
[] |
no_license
|
Robinlovelace/20_Study
|
3ce5b88a3090938f04a06be5a4958517eba96a96
|
aa74aa076be20f221e0af3d1dfa057b9fdc2fb86
|
refs/heads/master
| 2020-12-26T08:27:42.755888
| 2016-03-30T18:27:18
| 2016-03-30T18:27:18
| 54,464,511
| 0
| 0
| null | 2016-03-22T09:58:11
| 2016-03-22T09:58:11
| null |
UTF-8
|
R
| false
| false
| 378
|
r
|
download-osm-data.R
|
# install.packages("osmplotr")
library(osmplotr)
library(tmap)
?osmplotr
b = bb("Leeds")
b = as.vector(b)
class(b)
h = extract_osm_objects(key = "highway", bbox = b, extra_pairs = c("maxspeed", "20"))
h60 = extract_osm_objects(key = "highway", bbox = b, extra_pairs = c("maxspeed", "60"))
summary(h)
h = h$obj
plot(h60$obj)
plot(h, col = "red", add = T)
mapview::mapview(h)
|
15459ad69ca23782455a7499d2c97c3637e04c93
|
42a37b1c9f47f345a0026923c549bc2e027c09f6
|
/Thesis/thesisUtility.R
|
342a924b4b93573396b1dc0958e0a0e0404c029b
|
[] |
no_license
|
kianBlanchette/Thesis
|
c73fafb94a80e020949488ec0b059a0a1406adc8
|
79ea53c296da187ddf756d00782c35a4091b1da5
|
refs/heads/master
| 2023-03-07T16:08:41.079636
| 2023-02-20T00:15:32
| 2023-02-20T00:15:32
| 289,295,575
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 15,687
|
r
|
thesisUtility.R
|
library(multitaper)
library(VGAM)
dyn.load("f95Lib.so")
dpssap <- function(V, maxdeg, alpha=0.75) {
# Sanity checks
stopifnot(is.matrix(V), is.numeric(maxdeg), maxdeg>=0)
N <- length(V[, 1])
K <- length(V[1, ])
P <- maxdeg + 1
timeArr <- 1:N
R <- matrix(data=0, nrow=N, ncol=P)
U <- matrix(data=0, nrow=K, ncol=P)
# Setup centered time index
midTime <- (1+N) / 2
scl <- 2/(N-1)
timeArrC <- (timeArr - midTime) * scl
# Start with Gegenbauer polynomials; convergence is faster
R[, 1] <- 1.0
if(maxdeg > 0) {
R[, 2] <- 2 * alpha * timeArrC
if(maxdeg > 1) {
for(j in 2:maxdeg) {
A1 <- 2 * ( (j-1) + alpha ) / j
A2 <- ( (j-2) + 2 * alpha ) / j
R[, (j+1)] <- A1 * timeArrC * R[, j] - A2 * R[, (j-1)]
} # end of loop on higher orders
} # end of maxdeg > 1
} # end of maxdeg > 0
# Inner Products of R and V
for(L in 1:P) {
Kmin <- ( (L-1) %% 2 ) + 1
for(k in seq(Kmin, K, 2)) { # loop on non-zero Slepians
U[k, L] <- t(V[, k]) %*% R[, L]
}
}
# Degree 0, 1 (manual) -- L = degree+1
for(L in 1:min(2,P)) {
scl <- 1 / sqrt( sum(U[, L]^2) )
U[, L] <- U[, L] * scl # orthonormalize
R[, L] <- R[, L] * scl
}
# loop on higher degrees, applying Gram-Schmidt only on similar
# parity functions (as even/odd are already orthogonal in U)
if( P > 2 ) {
for(L in 3:P) {
if(L %% 2 == 0) {
Kmin <- 2
} else {
Kmin <- 1
}
for(j in seq(Kmin, L-1, 2)) {
scl <- sum( U[, L] * U[, j] )
U[, L] <- U[, L] - scl * U[, j] # Gram-Schmidt
R[, L] <- R[, L] - scl * R[, j]
}
scl <- 1 / sqrt(sum(U[, L]^2))
U[, L] <- U[, L] * scl # orthonormalize
R[, L] <- R[, L] * scl
}
}
Hn <- colSums(R^2)
ap <- list(U=U,R=R,Hn=Hn)
class(ap) <- "ass.poly"
return(ap)
}
filterDesign <- function(nflt, ndec = NULL, M = 2^14, wflt = NULL){
if ((is.null(ndec) & is.null(wflt)) | (!is.null(ndec) & !is.null(wflt))){
stop("You must set one and only one of ndec or wflt.")
}
fudge <- 1.1 # deals with passband of the filter
if (is.null(wflt)){
wflt <- 0.5 * fudge / ndec
} else {
ndec <- ceiling(0.5/wflt)
wflt <- wflt * fudge
}
nw <- floor(nflt*wflt)
k <- 2*nw - 3
nfreq <- 1 + M/2
# generate slepians, keep even ordered ones
slep.tmp <- multitaper::dpss(n = nflt, k = k, nw = nw, returnEigenvalues = FALSE)$v[, seq(1, k, by = 2)]
neh <- (nflt-1)/2
nc <- neh + 1
# # how *I* think it should be ...
slep <- matrix(0, nrow = M, ncol = ncol(slep.tmp))
slep[1, ] <- slep.tmp[nc, ]
slep[2:(neh+1), ] <- slep[M:(M-neh+1), ] <- slep.tmp[(nc + 1):nflt, ]
taper <- mvfft(slep)[1:nfreq, ]
taper.real <- Re(taper)
freq <- seq(0, 0.5, by = 1/M)
fCut1 <- tail(which(freq <= wflt), 1)
fCut <- trunc(min(fCut1, 0.85 * M / (2*ndec))) ### This is the important piece!
d.qr <- qr(taper.real[1:(fCut), ])
coefs <- qr.coef(d.qr, rep(1, fCut))
fitted <- qr.fitted(d.qr, rep(1, fCut))
filter1 <- slep.tmp %*% coefs
filter2 <- filter1 / sum(filter1)
# H <- fft(c(filter2, rep(0, M - nflt)))[1:(M/2+1)]
# plot(abs(H)[1:1000]^2, type='l', log='y')
filter2
}
findGaps <- function(data, missingSignature=NA){
nas <- data %in% missingSignature
N <- length(data)
if (N == 0){
stop("Data of length 0.")
}
x <- 1:N
gaps <- c(0, 0, 0)
numGaps <- 0
i <- 1
while (i <= N){
gapLength <- 0
while(nas[i+gapLength] && (i+gapLength <= length(nas))){
gapLength <- gapLength + 1
}
if (gapLength > 0){
gaps <- rbind(gaps, c(i, i+gapLength-1, gapLength), deparse.level=0)
i <- i + gapLength
numGaps <- numGaps + 1
} else { i <- i + 1 }
}
if (numGaps == 0){
t(as.matrix(gaps))
} else if (numGaps == 1) {
t(as.matrix(gaps[-1,]))
} else { as.matrix(gaps[-1,])}
}
.is.deriv <- function(obj){
class(obj) == "dpssp"
}
.is.dpss <- function(obj){
class(obj) == "dpss"
}
.is.ap <- function(obj){
class(obj) == "ass.poly"
}
dpssp11R <- function(DW,NW){
stopifnot(.is.dpss(DW))
efn <- DW$v
ev <- DW$eigen
ndata <- nrow(efn)
K <- ncol(efn)
W <- NW / ndata
tn <- 1:(ndata-1)
b <- c(0, sin( 2*pi*W*tn) / (pi * tn^2) )
a <- 2 * W * c(0, cos( 2*pi*W*tn ) / tn )
y <- a - b
#this is multiplication by a skew-symmetric toeplitz matrix.
# the matrix is skew.toeplitz(y, upper = TRUE), meaning that
# we actually need to negate different stuff.justification on 9 June 2020 notes.
#if the skew-symmetric matrix generated by y is C, then the first row of C is
# C[1,] = c(y[1], -y[2:n]). then the relevant line from skew.toepmult is
#x <- as.matrix(c(C[1,1], -C[1,2:n], 0, C[1,n:2])), which is equivalent to
#x <- as.matrix(c(y[1], y[2:n], 0, -y[n:2])), or x <- as.matrix(c(y,0,-y[n:2]))
efnp <- matrix(data = NA, nrow = ndata, ncol = K)
x <- as.matrix(c(y,0,-y[ndata:2]))
for(k in 1:K){
p <- c(efn[,k],rep(0,ndata))
h <- as.vector(fft(p)*fft(x))
efnp[,k] <- Re(fft(h, inverse = TRUE)[1:ndata] / length(h)) / ev[k]
}
class(efnp) <- "dpssp"
return(efnp)
}
findLocalFMaxM <- function(obj, k, cutoff){
M <- nrow(obj)
stopifnot(k > M)
MAXES <- list()
for(m in 1:M){
Fval <- obj[m,]
fMaxInd <- which(Fval > qf(cutoff, 1, k-m))
maxes <- c()
if (length(fMaxInd) == 0){
next
}
for (i in 1:length(fMaxInd)){
if (fMaxInd[i] == 1 || fMaxInd[i] == length(Fval)){
next
}
if (Fval[fMaxInd[i]] > Fval[fMaxInd[i]-1] &&
Fval[fMaxInd[i]] > Fval[fMaxInd[i]+1]){
maxes <- c(maxes, fMaxInd[i])
}
}
MAXES[[m]] <- maxes
}
return(MAXES)
}
FLoop3 <- function(mxdeg, nord, FPcoef, Fcoef, nfreqs){
output <- .Fortran("Floop3",
mxdeg = as.integer(mxdeg),
nord = as.integer(nord),
FPcoef = as.double(FPcoef),
Fcoef = as.double(Fcoef),
Fp = double( (mxdeg+1)*nfreqs ),
nfreqs = as.integer(nfreqs)
)
Fp <- matrix(data = output$Fp, nrow = mxdeg+1, ncol = nfreqs)
}
inverseScales <- function(V, Vdot){
scl1 <- rowSums(V^2)
scl2 <- rowSums(Vdot^2)
scl3 <- rowSums(V*Vdot)^2 / (scl1 * scl2)
scl <- 2*pi*sqrt(scl1) / sqrt( scl2 * (1 - scl3) )
out <- list(scl = scl, scl1 = scl1, scl2 = scl2, scl3 = scl3)
return(out)
}
assPolySmoother <- function(AP, V, Vdot, smooth = 0){
H <- AP$U
G <- AP$R
D <- AP$D
k <- nrow(H)
Hproj <- diag(x=1, nrow = k) - tcrossprod(H[,1:(smooth+1), drop = FALSE])
Gproj <- tcrossprod(G[,1:(smooth+1), drop = FALSE], H[,1:(smooth+1), drop = FALSE])
A <- tcrossprod(V, Hproj) + Gproj
Dproj <- tcrossprod(D[,1:(smooth+1), drop = FALSE], H[,1:(smooth+1), drop = FALSE])
Adot <- tcrossprod(Vdot, Hproj) + Dproj
out = list(A=A, Adot=Adot)
return(out)
}
dpssapD <- function(V, maxdeg, alpha=0.75, deriv = FALSE) {
# Sanity checks
stopifnot(is.matrix(V), is.numeric(maxdeg), maxdeg>=0)
N <- length(V[, 1])
K <- length(V[1, ])
P <- maxdeg + 1
timeArr <- 1:N
if(!deriv){
R <- matrix(data=0, nrow=N, ncol=P)
U <- matrix(data=0, nrow=K, ncol=P)
D <- NULL
# Setup centered time index
midTime <- (1+N) / 2
scl <- 2/(N-1)
timeArrC <- (timeArr - midTime) * scl
# Start with Gegenbauer polynomials; convergence is faster
#alpha <- 0.75
R[, 1] <- 1.0
if(maxdeg > 0) {
R[, 2] <- 2 * alpha * timeArrC
if(maxdeg > 1) {
for(j in 2:maxdeg) {
A1 <- 2 * ( (j-1) + alpha ) / j
A2 <- ( (j-2) + 2 * alpha ) / j
R[, (j+1)] <- A1 * timeArrC * R[, j] - A2 * R[, (j-1)]
} # end of loop on higher orders
} # end of maxdeg > 1
} # end of maxdeg > 0
# Inner Products of R and V
for(L in 1:P) {
Kmin <- ( (L-1) %% 2 ) + 1
for(k in seq(Kmin, K, 2)) { # loop on non-zero Slepians
U[k, L] <- t(V[, k]) %*% R[, L]
}
}
# Degree 0, 1 (manual) -- L = degree+1
for(L in 1:min(2,P)) {
scl <- 1 / sqrt( sum(U[, L]^2) )
U[, L] <- U[, L] * scl # orthonormalize
R[, L] <- R[, L] * scl
}
# loop on higher degrees, applying Gram-Schmidt only on similar
# parity functions (as even/odd are already orthogonal in U)
if( P > 2 ) {
for(L in 3:P) {
if(L %% 2 == 0) {
Kmin <- 2
} else {
Kmin <- 1
}
for(j in seq(Kmin, L-1, 2)) {
scl <- sum( U[, L] * U[, j] )
U[, L] <- U[, L] - scl * U[, j] # Gram-Schmidt
R[, L] <- R[, L] - scl * R[, j]
}
scl <- 1 / sqrt(sum(U[, L]^2))
U[, L] <- U[, L] * scl # orthonormalize
R[, L] <- R[, L] * scl
}
}
} else {
R <- D <- matrix(data=0, nrow=N, ncol=P)
U <- matrix(data=0, nrow=K, ncol=P)
# Setup centered time index
midTime <- (1+N) / 2
tscl <- 2/(N-1)
timeArrC <- (timeArr - midTime) * tscl
# Start with Gegenbauer polynomials; convergence is faster
#alpha <- 0.75
R[, 1] <- 1.0
if(maxdeg > 0) {
R[, 2] <- 2 * alpha * timeArrC
D[, 2] <- tscl
if(maxdeg > 1) {
for(j in 2:maxdeg) {
A1 <- 2 * ( (j-1) + alpha ) / j
A2 <- ( (j-2) + 2 * alpha ) / j
R[, (j+1)] <- A1 * timeArrC * R[, j] - A2 * R[, (j-1)]
D[, (j+1)] <- tscl * j * R[, j]
} # end of loop on higher orders
} # end of maxdeg > 1
} # end of maxdeg > 0
# Inner Products of R and V
for(L in 1:P) {
Kmin <- ( (L-1) %% 2 ) + 1
for(k in seq(Kmin, K, 2)) { # loop on non-zero Slepians
U[k, L] <- t(V[, k]) %*% R[, L]
}
}
# Degree 0, 1 (manual) -- L = degree+1
for(L in 1:min(2,P)) {
scl <- 1 / sqrt( sum(U[, L]^2) )
U[, L] <- U[, L] * scl # orthonormalize
R[, L] <- R[, L] * scl
D[, L] <- D[, L] * scl
}
# loop on higher degrees, applying Gram-Schmidt only on similar
# parity functions (as even/odd are already orthogonal in U)
if( P > 2 ) {
for(L in 3:P) {
if(L %% 2 == 0) {
Kmin <- 2
} else {
Kmin <- 1
}
for(j in seq(Kmin, L-1, 2)) {
scl <- sum( U[, L] * U[, j] )
U[, L] <- U[, L] - scl * U[, j] # Gram-Schmidt
R[, L] <- R[, L] - scl * R[, j]
D[, L] <- D[, L] - scl * D[, j]
}
scl <- 1 / sqrt(sum(U[, L]^2))
U[, L] <- U[, L] * scl # orthonormalize
R[, L] <- R[, L] * scl
D[, L] <- D[, L] * scl
}
}
}
Hn <- colSums(R^2)
ap <- list(U=U,R=R,D=D,Hn=Hn)
class(ap) <- "ass.poly"
return(ap)
}
modulatedFs5 <- function(Phi,PhiP){
P <- nrow(PhiP)
K <- nrow(Phi)
stopifnot(K > P)
F1 <- F3 <- matrix(data = NA, nrow = P, ncol = ncol(Phi))
ssq1 <- colSums(Phi^2)
ssq2 <- 0
for(p in 1:P){
ssq2 <- ssq2 + PhiP[p,]^2
F1[p,] <- (K/p - 1) * ssq2 / (ssq1 - ssq2)
F3[p,] <- (K - p) * PhiP[p,]^2 / (ssq1 - ssq2)
}
return(list(F1=F1,F3=F3))
}
ModulatedF17 <- function(yk, derivIN = NULL, apIN = NULL, dpssIN = NULL){
#speed version of this function. everything must be passed in.
K <- ncol(yk)
V <- dpssIN$v
H <- apIN$U
P <- ncol(H)
nfreqs <- nrow(yk)
stopifnot(K > P)
#############################################################################
phi <- IFcompute(yk,V,derivIN)
Phi <- crossprod(V,phi)
rm(phi)
PhiP <- crossprod(H,Phi)
F3 <- FLoop3(P-1,K,PhiP,Phi,nfreqs)
mFs <- modulatedFs5(Phi,PhiP[2:P,,drop = FALSE])
mF1 <- mFs$F1
mF3 <- mFs$F3
ModF <- list(F3=F3,mF1=mF1,mF3=mF3)
return(ModF)
}
HarmonicF <- function(yk, dw){
k <- ncol(yk)
Ukz <- colSums(dw)
ssqUkz <- sum(Ukz^2)
cmv <- (yk %*% Ukz) / ssqUkz
ssqave <- ssqUkz * Mod(cmv)^2
Ukz <- as.matrix(Ukz)
ssqres <- apply( Mod(yk - (cmv %*% t(Ukz)))^2, MARGIN = 1, FUN = sum)
HF <- (k-1) * ssqave / ssqres
class(HF) <- "Ftest"
out <- list(HF = HF, cmv = cmv)
return(out)
}
FLoop4 <- function(mxdeg, nord, FPcoef, Fcoef, nfreqs){
output <- .Fortran("FLoop4",
mxdeg = as.integer(mxdeg),
nord = as.integer(nord),
FPcoef = as.double(FPcoef),
Fcoef = as.double(Fcoef),
Fp = double( mxdeg*nfreqs ),
nfreqs = as.integer(nfreqs)
)
Fp <- matrix(data = output$Fp, nrow = mxdeg, ncol = nfreqs)
}
IFcompute <- function(yk, V, Vdot){
U <- tcrossprod(V, Re(yk))
W <- tcrossprod(V, Im(yk))
Udot <- tcrossprod(Vdot, Re(yk))
Wdot <- tcrossprod(Vdot, Im(yk))
num <- U*Wdot - Udot*W
rm(Udot, Wdot)
amp2 <- U^2 + W^2
rm(U,W)
phi <- num / (2 * pi * amp2)
return(phi)
}
jkFreq <- function(Phi, H, freq){
K <- nrow(H)
P <- ncol(H)
stopifnot(K > P)
F3 <- matrix(0, nrow = P, ncol = length(freq))
fkp <- matrix(0, nrow = K, ncol = P)
for(k in 1:K){
ssq1 <- colSums( Phi[-k,]^2 )
PhiP <- crossprod(H[-k,], Phi[-k,])
ssq2 <- 0
for(p in 1:P){
ssq2 <- ssq2 + PhiP[p,]^2
F3[p,] <- (K-1-p) * PhiP[p,]^2 / (ssq1 - ssq2)
fkp[k,p] <- freq[which.max(F3[p,])]
}
}
jkMean <- apply(fkp, MARGIN = 2, FUN = function(x) mean(x))
jkVar <- numeric(P)
for(p in 1:P){
jkVar[p] <- (1-1/K) * sum( (fkp[,p] - jkMean[p])^2 )
}
jk <- list(jkMean = jkMean, jkVar = jkVar)
return(jk)
}
ModulatedF19 <- function(yk, derivIN = NULL, apIN = NULL, dpssIN = NULL,
deltat = 1, freq){
#speed version of this function. everything must be passed in.
#just computes modified F3. note that H matrix is missing first column,
#the zero degree column.
# also does jackknife estimates of mean and variance of frequency
K <- ncol(yk)
V <- dpssIN$v
H <- apIN$U[,-1, drop = FALSE]
P <- ncol(H)
stopifnot(K > P)
nfreqs <- nrow(yk)
#############################################################################
phi <- IFcompute(yk, V, derivIN)
Phi <- crossprod(V,phi)
rm(phi)
PhiP <- crossprod(H,Phi)
mF3 <- FLoop4(mxdeg,K,PhiP,Phi,nfreqs)
jk <- jkFreq(Phi,H,freq)
ModF <- list(mF3 = mF3, jk = jk)
return(ModF)
}
ModulatedF20 <- function(yk, derivIN = NULL, apIN = NULL, dpssIN = NULL){
#speed version of this function. everything must be passed in.
#computes all 4 test statistics
K <- ncol(yk)
V <- dpssIN$v
H <- apIN$U
P <- ncol(H)
#############################################################################
phi <- IFcompute(yk, V, derivIN)
Phi <- crossprod(V,phi)
rm(phi)
PhiP <- crossprod(H,Phi)
Fs <- modulatedFs5(Phi, PhiP)
mFs <- modulatedFs5(Phi, PhiP[2:P,,drop = FALSE])
ModF <- list(F1 = Fs$F1, F3 = Fs$F3, mF1 = mFs$F1, mF3 = mFs$F3)
return(ModF)
}
findLocalFMax2 <- function(obj, cutoff, k){
# Check whether this is a spec.mtm() object, or from my own CMV code.
if (any(class(obj) == "Ftest")){
Fval <- obj
} else {
stop("obj needs to be of class 'Ftest'.")
}
fMaxInd <- which(Fval > qf(cutoff, 2, 2*k-2))
maxes <- c()
if (length(fMaxInd) == 0){
return(maxes)
}
for (i in 1:length(fMaxInd)){
if (fMaxInd[i] == 1 || fMaxInd[i] == length(Fval)){
next
}
if (Fval[fMaxInd[i]] > Fval[fMaxInd[i]-1] &&
Fval[fMaxInd[i]] > Fval[fMaxInd[i]+1]){
maxes <- c(maxes, fMaxInd[i])
}
}
maxes
}
|
192473d56ca2b2bdd29d004689c6eeaa450a2780
|
0ff904b62106e21b9c47c902549d6e176e3224e9
|
/air_pollution/getMeasures.R
|
0fd20a2d8471f036df21516fbe8b45f6c4f9ad8a
|
[
"BSD-3-Clause"
] |
permissive
|
HimesGroup/k12bioinformatics
|
a2829a1ca4cd765a676890f0249d1fe4cecfa723
|
527eec1316c36824774e27e76a1d58e9e1818b3a
|
refs/heads/master
| 2021-11-29T01:03:18.455101
| 2021-11-09T19:51:53
| 2021-11-09T19:51:53
| 181,563,187
| 3
| 1
|
BSD-3-Clause
| 2021-11-09T19:51:53
| 2019-04-15T20:46:41
|
R
|
UTF-8
|
R
| false
| false
| 3,302
|
r
|
getMeasures.R
|
library(pargasite)
library(dplyr)
library(data.table)
##Get data from downloaded daily EPA data https://aqs.epa.gov/aqsweb/airdata/download_files.html
daily_df <- read.csv("daily_88101_2017.csv",header=T)
states <- c("Pennsylvania","New York","Florida","Montana","California","New Mexico","Oregon","South Dakota")
cities <- c("Albuquerque",as.vector(k12$City))
k12 <- read.table("databases/k12_sites.txt",header=TRUE)
sdates <- grep("2017-09",levels(daily_df$Date.Local),value=T)
daily_k12_df <- daily_df %>%
dplyr::filter(State.Name %in% states,Date.Local %in% sdates,City.Name %in% cities) %>%
dplyr::select(City.Name,Latitude,Longitude,Arithmetic.Mean,Date.Local,County.Name,State.Name,Method.Code)
ddf1 <- daily_k12_df %>% dplyr::filter(City.Name %in% c("Portland","Miami","Los Angeles"),Method.Code =="145")
ddf2 <- daily_k12_df %>% dplyr::filter(City.Name %in% cities,!Method.Code %in% c("145","170","182"))
final_ddf <- rbind(ddf1, ddf2)
final_ddf <- final_ddf %>% dplyr::distinct(City.Name,Date.Local,County.Name,.keep_all=TRUE) %>% dplyr::select(-Method.Code) %>% dplyr::filter(County.Name!="Bronx")
write.csv(final_ddf,"databases/EPA_measures_daily_average_Sept2017.csv",row.names = F,quote=F)
############################################################
## GET PM.25 AND CO EPA measures from pargasite package ##
############################################################
###Get PM 2.5 measures for tab "EPA Measures in USA"
long <- k12$Longitude
lat <- k12$Latitude
pm_list <- list()
for (i in seq(1,nrow(k12))){
pm <- getMonthPollutionEstimate(long[i], lat[i], pollutant = "PM2.5", monthyear="09-2017")
pm_list[[i]] <- pm
}
k12$PM <- unlist(pm_list)
k12$Location <- paste0(k12$City,",",k12$State)
write.csv(k12,"k12_sites.csv",row.names = FALSE)
###Get PM2.5 and CO measures for all cities from 2007-2017 for tab "Seasonality of measures"
k12 <- read.csv("databases/k12_sites.csv")
dates <- seq(1,12)
months <- c("Jan","Feb","Mar","Apr","May","June","Jul","Aug","Sept","Oct","Nov","Dec")
years <- seq(2007,2017)
fdates <- unlist(lapply(years, function(x) paste0(dates,"-",x)))
iter = 1
ph_list <- list()
ch_list <- list()
for (k in k12$State){
pk12 <- k12 %>% dplyr::filter(State == k) %>% dplyr::select(Longitude,Latitude)
pm_list <- list()
co_list <- list()
y_list <- list()
count = 1
for (i in seq(1,length(fdates))){
pm <- getMonthPollutionEstimate(pk12$Longitude, pk12$Latitude, pollutant="PM2.5", monthyear = fdates[i])
co <- getMonthPollutionEstimate(pk12$Longitude, pk12$Latitude, pollutant="CO", monthyear = fdates[i])
pm_list[[i]] <- pm
co_list[[i]] <- co
if (i %% 12 == 0){
y_list[[i]] <- years[count]
count = count + 1
} else {
y_list[[i]] <- years[count]
}
}
ph_df <- data.frame("Dates" = fdates, "PM2.5" = unlist(pm_list),"Year" = unlist(y_list),"Month"= months,"State"=k)
ph_list[[iter]] <- ph_df
co_df <- data.frame("Dates" = fdates, "CO" = unlist(co_list),"Year" = unlist(y_list),"Month"= months,"State"=k)
ch_list[[iter]] <- co_df
iter = iter + 1
}
ph_df <- rbindlist(ph_list, fill = TRUE)
co_df <- rbindlist(ch_list, fill = TRUE)
write.csv(ph_df,"databases/all_k12_sites_PM.csv", row.names = FALSE)
write.csv(co_df,"databases/all_k12_sites_CO.csv", row.names = FALSE)
|
abd0d0ce811874f84b79ba46b268ac7b3aabc58e
|
35e82ea9c3c886b99f444f59b81b5c1e4af1b405
|
/man/analyze.lavaan.Rd
|
6ea1a5a0016509c35efe1a984bd35355f07dffa7
|
[
"MIT"
] |
permissive
|
anhnguyendepocen/psycho.R
|
5d68d8e00fc520d4fec999836a8480b5ba0e73c7
|
611e984742890e698c4e94b6965d917432e98348
|
refs/heads/master
| 2020-09-05T21:50:11.877771
| 2019-10-03T15:36:36
| 2019-10-03T15:36:36
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 892
|
rd
|
analyze.lavaan.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/analyze.lavaan.R
\name{analyze.lavaan}
\alias{analyze.lavaan}
\title{Analyze lavaan SEM or CFA) objects.}
\usage{
\method{analyze}{lavaan}(x, CI = 95, standardize = FALSE, ...)
}
\arguments{
\item{x}{lavaan object.}
\item{CI}{Confidence interval level.}
\item{standardize}{Compute standardized coefs.}
\item{...}{Arguments passed to or from other methods.}
}
\value{
output
}
\description{
Analyze lavaan (SEM or CFA) objects.
}
\examples{
library(psycho)
library(lavaan)
model <- " visual =~ x1 + x2 + x3\\ntextual =~ x4 + x5 + x6\\nspeed =~ x7 + x8 + x9 "
x <- lavaan::cfa(model, data = HolzingerSwineford1939)
rez <- analyze(x)
print(rez)
}
\seealso{
https://www.researchgate.net/post/Whats_the_standard_of_fit_indices_in_SEM
}
\author{
\href{https://dominiquemakowski.github.io/}{Dominique Makowski}
}
|
805030db60490c8957a2dec9490573a93528bb82
|
2bec5a52ce1fb3266e72f8fbeb5226b025584a16
|
/gtfs2gps/R/zzz.R
|
adc96e65f102123d92f89489a03f177a1839fd14
|
[] |
no_license
|
akhikolla/InformationHouse
|
4e45b11df18dee47519e917fcf0a869a77661fce
|
c0daab1e3f2827fd08aa5c31127fadae3f001948
|
refs/heads/master
| 2023-02-12T19:00:20.752555
| 2020-12-31T20:59:23
| 2020-12-31T20:59:23
| 325,589,503
| 9
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,516
|
r
|
zzz.R
|
utils::globalVariables(c(".", "%>%", ":="))
.onLoad = function(lib, pkg) {
# Use GForce Optimisations in data.table operations
# details > https://jangorecki.gitlab.io/data.cube/library/data.table/html/datatable-optimize.html
options(datatable.optimize = Inf) # nocov
# set number of threads used in data.table to 100%
data.table::setDTthreads(percent = 100) # nocov
}
#' @importFrom magrittr %>%
#' @importFrom data.table := %between% fifelse %chin%
#' @importFrom stats na.omit
#' @importFrom utils head tail object.size
#' @importFrom stats na.omit
#' @importFrom Rcpp compileAttributes
#' @importFrom lwgeom st_geod_length
#' @importFrom rgdal readOGR
#' @useDynLib gtfs2gps, .registration = TRUE
NULL
## quiets concerns of R CMD check re: the .'s that appear in pipelines
if(getRversion() >= "2.15.1") utils::globalVariables(
c('dist', 'shape_id', 'route_id', 'trip_id', 'stop_id',
'service_id', 'stop_sequence', 'agency_id', 'i.stop_lat', 'i.stop_lon', 'i.stop_id',
'departure_time', 'arrival_time', 'start_time', 'end_time', 'i.stop_sequence',
'shape_pt_lon', 'shape_pt_lat', 'id', 'cumdist', 'i.departure_time',
'.N', 'update_newstoptimes', 'shape_pt_sequence', 'geometry',
'monday', 'tuesday', 'wednesday', 'thursday', 'friday', 'saturday', 'sunday',
'service_duration', 'headway_secs', 'number_of_departures',
'cumtime', 'speed', 'i', 'route_type', 'trip_number',
'.I', 'interval_id', 'i.interval', '.SD', 'grp', '.GRP'))
|
896e1f18a637fdb85bb1b7ab4a45b85a055b298a
|
4bc2de391e8d28db03d49b9a69e5b9734356aa5a
|
/constructed data 's restructureing_for_mian_dataset_HOV lane_2.1.R
|
8212ea472ba40631ce07e908d4f8299d750f0845
|
[] |
no_license
|
sadvaseb/Restructured-NGSIM-Dataset-for-surrounding-vehicles
|
d0cb2c1f6ed897aeaeacf73c656926e7e2b1ac45
|
9cb08914c4741448e799c090b8445ae42191ba02
|
refs/heads/master
| 2022-12-10T18:56:55.209579
| 2020-09-06T06:37:03
| 2020-09-06T06:37:03
| 293,001,377
| 3
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 13,702
|
r
|
constructed data 's restructureing_for_mian_dataset_HOV lane_2.1.R
|
library(dplyr)
library(tidyr)
library(data.table)
library(corrplot)
library(reshape)
library(plyr)
library(reshape2)
library(readr)
library(tictoc)
tic()
setwd("C:/Users/X1/Documents")
# read csv file
data_v <- read.csv("C:/Users/X1/Dropbox/X2/2018/2018.02.02- RNN and clustering/reconstracted-I-80.csv")
# sort file based on frame number, location, and vehicle ID
data_v <- data_v[order(data_v$Frame, data_v$Local_Y, data_v$vehicle_ID, data_v$Follower_ID, data_v$Leader_ID),]
# filter vehicle to passnger cars which travel in lanes 2 to 5
select_v <- subset(data_v[,c("Frame","Local_Y","vehicle_ID","Lane_ID", "v_Class", "Follower_ID", "Leader_ID"),], v_Class == 2) #only LDV
select_v <- subset(select_v[,c("Frame","Local_Y","vehicle_ID","Lane_ID","Follower_ID", "Leader_ID"),], Lane_ID ==1) #for other lanes will be adjusted
select_v <- subset(select_v[,c("Frame","Local_Y","vehicle_ID","Lane_ID","Follower_ID", "Leader_ID"),], Follower_ID > 0 & Leader_ID > 0) #remove data if follower or leader data is not available
# find vehicles with no lane changeing (selected vehciles)
ln1 <- ddply(select_v, c('vehicle_ID'), summarize,lane1=min(Lane_ID))
ln2 <- ddply(select_v, c('vehicle_ID'), summarize,lane1=max(Lane_ID))
select_vehcile <- as.data.frame(rep(0,nrow(ln2)))
names(select_vehcile) <- "vehicle_ID"
for(i in 1:nrow(ln2)){
if(ln1$lane1[i] == ln2$lane1[i]){
select_vehcile$vehicle_ID [i] <- ln1$vehicle_ID[i]}
else{
select_vehcile$vehicle_ID [i] <- NA
}
}
select_vehcile <- select_vehcile[complete.cases(select_vehcile), ]
select_v1 <- subset(select_v[,c("Frame","Local_Y","vehicle_ID","Lane_ID"),], select_v$vehicle_ID %in% select_vehcile)
# filter vehicles to vehicles in lane 1 to 2 (potential surrounding vehicles)
data_v_lane <- subset(data_v[,c("Frame","Local_Y","vehicle_ID","Lane_ID", "v_Class"),], Lane_ID %in% 1:2)
# Find HDVs and mark them
data_v_lane <- mutate(data_v_lane, vehicle_ID = ifelse(v_Class == 2, vehicle_ID, -2))
data_v_lanes <- subset(data_v_lane[,c("Frame","Local_Y","vehicle_ID","Lane_ID"),])
# create a data frame to hold main informations of vehicles
data_vehicles <- data.frame(vehicle_ID = as.numeric(),
Frame = as.numeric(),
Lane_ID = as.numeric(),
Local_Y_ID = as.numeric(),
vehicle_ID_right = as.numeric(),
Local_Y_right = as.numeric()
)
for(i in 1:nrow(select_v1)){
vehicle <- select_v1$vehicle_ID[i]
time <- select_v1$Frame[i]
lane <- select_v1$Lane_ID[i]
y <- select_v1$Local_Y[i]
data_subset <- subset(data_v_lanes, ((Frame == time) & (Lane_ID == (lane+1))))
data_subset2 <- subset(data_v_lanes, ((Frame == time) & (Lane_ID == (lane-1))))
tt= 0
if(nrow(data_subset)>=1){ # shows if there is any matching vehicle on the right lane
for(j in 1:nrow(data_subset)){ #searching for the vehicle on the right lane
if(data_subset$Local_Y[j] > y){
vehicle_right <- data_subset$vehicle_ID[j]
vehicle_right_y <- data_subset$Local_Y[j]
tt= 1
break
}
}
}
#If there is no vehicle on the right lane forget this line of data
if (tt == 0 ){
next
}
#if the vheilce on the right lane is HDV, remove the line
if (vehicle_right == -2){ next}
tt= 0
# Add and marge surrounding vehicles attributes
data1 <- as.data.frame(cbind(vehicle, time, lane, y, vehicle_right, vehicle_right_y))
names(data1) <- c("vehicle_ID","Frame","Lane_ID","Local_Y_ID","vehicle_ID_right","Local_Y_right")
data1 <- as.data.frame(transform(data1,
vehicle_ID = as.numeric(as.character(vehicle_ID)),
Frame <- as.numeric(as.character(Frame)),
Lane_ID = as.numeric(as.character(Lane_ID)),
Local_Y_ID = as.numeric(as.character(Local_Y_ID)),
vehicle_ID_right = as.numeric(as.character(vehicle_ID_right)),
Local_Y_right = as.numeric(as.character(Local_Y_right)))[,1:6],stringsAsFactors = FALSE)
data_vehicles <- as.data.frame(rbind(data_vehicles,data1[1,]))
if((i %% 100) == 1){ #print codes progress every 100 cycles
print(i)}
}
# Add extra attributes to the restructured dataset
# subject vehicle data
vehicles_around_vars <- left_join(data_vehicles, data_v[,c("Frame","vehicle_ID","length", "Speed", "Acceleration", "Leader_ID", "Follower_ID")]
, by=c("Frame"="Frame","vehicle_ID"="vehicle_ID"))
# Right vehicle data
vehicles_around_vars <- left_join(vehicles_around_vars,data_v[,c("Frame","vehicle_ID","length", "Speed","Acceleration")],
by=c("Frame"="Frame","vehicle_ID_right"="vehicle_ID"))
# leading vehicle data
vehicles_around_vars <- left_join(vehicles_around_vars,data_v[,c("Frame","vehicle_ID","Local_Y","length", "v_Class", "Speed","Acceleration", "Leader_ID")],
by=c("Frame"="Frame","Leader_ID"="vehicle_ID"))
# 2nd ahead vehicle data
vehicles_around_vars <- left_join(vehicles_around_vars,data_v[,c("Frame","vehicle_ID","Local_Y","length", "v_Class", "Speed","Acceleration")],
by=c("Frame"="Frame","Leader_ID.y"="vehicle_ID"))
# Following vehicle (back vehicle)
vehicles_around_vars <- left_join(vehicles_around_vars,data_v[,c("Frame","vehicle_ID","Local_Y","length", "v_Class", "Speed","Acceleration")],
by=c("Frame"="Frame","Follower_ID"="vehicle_ID"))
names(vehicles_around_vars) <- c("subject_ID","Frame","subject_Lane","subject_Local_Y",
"right_ID","right_Local_Y",
"subject_length","subject_speed", "subject_Acc", "leading_ID", "follower_ID",
"right_length", "right_speed", "right_Acc",
"leading_Y", "leading_length", "leading_Class","leading_speed", "leading_Acc",
"head2_ID", "head2_Y","head2_length", "head2_Class","head2_speed", "head2_Acc",
"back_Y","back_length", "back_Class","back_speed", "back_Acc"
)
# Remove rows with class 3 surrounding vehicles or no surrounding vehicle
all_vehicles_around <- subset(vehicles_around_vars[,c("subject_ID","Frame","subject_Lane","subject_Local_Y",
"right_ID","right_Local_Y",
"subject_length","subject_speed", "subject_Acc", "leading_ID", "follower_ID",
"right_length", "right_speed", "right_Acc",
"leading_Y", "leading_length", "leading_Class","leading_speed", "leading_Acc",
"head2_ID", "head2_Y","head2_length", "head2_Class","head2_speed", "head2_Acc",
"back_Y","back_length", "back_Class","back_speed", "back_Acc"),],
leading_Class==2 & head2_Class==2 & back_Class==2 & head2_ID > 0 )
# Generate new attributes
all_vehicles_around <- all_vehicles_around[order(all_vehicles_around$subject_ID, all_vehicles_around$Frame),]
all_vehicles_around <- mutate(all_vehicles_around, right_speed_diff = subject_speed - right_speed,
leading_speed_diff = subject_speed - leading_speed, head2_speed_diff = subject_speed - head2_speed,
back_speed_diff = subject_speed - back_speed,
right_headway = right_Local_Y - subject_Local_Y - (subject_length + right_length)/2,
leading_headway = leading_Y - subject_Local_Y - (subject_length + leading_length)/2, head2_headway = head2_Y - subject_Local_Y - (subject_length + head2_length)/2,
back_headway = subject_Local_Y - back_Y - (subject_length + back_length)/2)
all_vehicles_around <- transform(all_vehicles_around, subject_Acc_1 = lag(subject_Acc))
all_vehicles_around <- transform(all_vehicles_around, subject_Acc_2 = lag(subject_Acc_1))
all_vehicles_around <- transform(all_vehicles_around, subject_Acc_3 = lag(subject_Acc_2))
all_vehicles_around <- transform(all_vehicles_around, subject_Acc_4 = lag(subject_Acc_3))
all_vehicles_around <- transform(all_vehicles_around, traget_value = lead(subject_Acc))
all_vehicles_around <- subset(all_vehicles_around[,c("subject_ID","Frame","subject_Lane","subject_Local_Y",
"right_ID","right_Local_Y",
"subject_speed", "subject_Acc", "leading_ID", "follower_ID",
"right_speed", "right_Acc",
"leading_Y", "leading_speed", "leading_Acc",
"head2_ID", "head2_Y", "head2_speed", "head2_Acc",
"back_Y","back_speed", "back_Acc",
"right_speed_diff", "leading_speed_diff", "head2_speed_diff","back_speed_diff",
"right_headway", "leading_headway", "head2_headway", "back_headway",
"subject_Acc_1", "subject_Acc_2", "subject_Acc_3", "subject_Acc_4", "traget_value"),])
#************************* clean up between chanaging vehicle or disrupted periods
all_vehicles_batch <- data.frame(
subject_ID =as.numeric() ,Frame=as.numeric(),subject_Lane=as.numeric(),subject_Local_Y=as.numeric(),
right_ID =as.numeric() ,right_Local_Y =as.numeric(),
subject_speed =as.numeric(), subject_Acc=as.numeric(), leading_ID=as.numeric(), follower_ID=as.numeric(),
right_speed=as.numeric(),right_Acc=as.numeric(),
leading_Y=as.numeric(), leading_speed=as.numeric(), leading_Acc=as.numeric(),
head2_ID=as.numeric(), head2_Y=as.numeric(), head2_speed=as.numeric(), head2_Acc=as.numeric(),
back_Y=as.numeric(),back_speed=as.numeric(), back_Acc=as.numeric(),
right_speed_diff=as.numeric(), leading_speed_diff=as.numeric(),
head2_speed_diff=as.numeric(),back_speed_diff=as.numeric(),
right_headway=as.numeric(), leading_headway=as.numeric(),
head2_headway=as.numeric(), back_headway=as.numeric(),
subject_Acc_1=as.numeric(), subject_Acc_2=as.numeric(), subject_Acc_3=as.numeric(),
subject_Acc_4=as.numeric(), traget_value=as.numeric())
batch_size <- 120
i <- 5
while (i < nrow(all_vehicles_around) - batch_size){
if (all_vehicles_around$subject_ID[i] == all_vehicles_around$subject_ID[(i+ batch_size -1)] & #vehicle has not changed within a batch
all_vehicles_around$Frame[i]== all_vehicles_around$Frame[i-4] + 4 & #remove previous 4 acceration movement between batches
all_vehicles_around$Frame[i+ batch_size-1] == all_vehicles_around$Frame[i+ batch_size] -1 & # taget value adjustment
all_vehicles_around$Frame[i]== (all_vehicles_around$Frame[i+batch_size-1] - batch_size + 1) # no interruption inside a batch
){
for (j in 0:(batch_size-1)){
all_vehicles_batch <- as.data.frame(rbind(all_vehicles_batch, all_vehicles_around[i+j,]))
}
i <- i+ batch_size-1
}
i <- i +1
}
# Vehicles in lane #1 (HOV lane)
vehicle_lane_HOV <- subset(all_vehicles_batch[,c("subject_ID","Frame","subject_Local_Y",
"right_ID","right_Local_Y",
"subject_speed", "subject_Acc", "leading_ID", "follower_ID",
"right_speed", "right_Acc",
"leading_Y", "leading_speed", "leading_Acc",
"head2_ID", "head2_Y", "head2_speed", "head2_Acc",
"back_Y","back_speed", "back_Acc",
"right_speed_diff", "leading_speed_diff", "head2_speed_diff","back_speed_diff",
"right_headway", "leading_headway", "head2_headway", "back_headway",
"subject_Acc_1", "subject_Acc_2", "subject_Acc_3", "subject_Acc_4", "traget_value"),],
all_vehicles_batch$subject_Lane == 1)
# save the file
write.table(vehicle_lane_HOV,"vehicle_lane_HOV.csv", sep=";")
# Initial Correlations and plotting
correlation_data <- vehicle_lane_HOV[,c(
"subject_Local_Y",
"right_Local_Y",
"subject_speed", "subject_Acc",
"right_speed", "right_Acc",
"leading_Y", "leading_speed", "leading_Acc",
"head2_Y", "head2_speed", "head2_Acc",
"back_Y","back_speed", "back_Acc",
"right_speed_diff", "leading_speed_diff", "head2_speed_diff","back_speed_diff",
"right_headway", "leading_headway", "head2_headway", "back_headway",
"subject_Acc_1", "subject_Acc_2", "subject_Acc_3", "subject_Acc_4", "traget_value")]
correlation_results <- cor(as.matrix(correlation_data),use = "complete.obs")
write.table(correlation_results,"correlation_results_HOV.csv")
col<- colorRampPalette(c("#f8766d", "white", "#00bfc4"))(20)
corrplot(correlation_results, type="upper", col=col, tl.col="black",addCoef.col = "#737373", number.font=1, number.digits=1)
toc()
|
8e98a6329993ab34fdf2b10d9a6e46cf9a34ac26
|
32864d98d0a48323dce8a57bede4f84fc89b13dd
|
/blog/2015/may252015/unkown.R
|
8bf75bcffdc9615dc763bb7760f75b29a0476055
|
[] |
no_license
|
darrkj/darrkj.github.io
|
5666cd0d5ed75657fc59696aee0bd1dd8a7eefad
|
afdd9f7fbab2c0941b4a3ddfbbbc29fbafda4f08
|
refs/heads/master
| 2020-05-22T01:47:14.054218
| 2018-09-09T18:36:45
| 2018-09-09T18:36:45
| 24,395,464
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,274
|
r
|
unkown.R
|
# msm (Jackson 2011) handles Multi-State Models for panel data;
install.packages('msm')
library(msm)
library(dplyr)
library(lubridate)
library(markovchain)
bike <- read.csv('2014-Q2-Trips-History-Data.csv')
bike %>% select(sdate = Start.date, edate = End.date,
sterm = Start.terminal, eterm = End.terminal,
bike = Bike., type = Subscriber.Type) -> bik
bik$sdate <- mdy_hm(bik$sdate)
bik$edate <- mdy_hm(bik$edate)
clean <- function(id) {
x <- bik[bik$bike == id, ]
x <- x[order(x$sdate), ]
t <- x[1, ]
for (i in 1:(nrow(x) - 1)) {
if (x$eterm[i] != x$sterm[i+1]) {
dt <- x$edate[i] + difftime(x$sdate[i+1], x$edate[i]) / 2
t <- rbind(t, data.frame(sdate = dt, sterm = x$eterm[i],
edate = dt, eterm = x$sterm[i+1],
bike = x$bike[1], type = 'align'))
}
}
t <- t[-1, ]
xx <- rbind(x, t)
xx <- xx[order(xx$sdate), ]
}
xx <- clean(bik$bike[1])
y <- xx[, c('sterm', 'type')]
sequenceMatr<-createSequenceMatrix(x$sterm,sanitize=FALSE)
ind <- c(1, which(y$type == 'align'))
z <- list()
for (i in 1:(length(ind) - 1)) {
z[[i]] <- y$sterm[ind[i]:ind[i+1]]
}
#cbind(x$sterm[-1], x$eterm[-nrow(x)])
#x$sterm[-1] == x$eterm[-nrow(x)]
#
|
88a8a8ab040e07375cc6be4c019cee69d25c396f
|
2b96dda01e284f5df6c5f749272249ff3b1c26cd
|
/ch03/ex3-1.r
|
7fd21a0702d2af540ebe6f2ee755c0808831232f
|
[] |
no_license
|
freebz/The-Art-of-R-Programming
|
f9a82b42fedebc5ef0f10bfd132bdcad27600d83
|
6e51856512b945455810b1ad686bae58f0779ff7
|
refs/heads/master
| 2021-06-05T03:27:00.519420
| 2016-08-12T05:01:24
| 2016-08-12T05:01:24
| 65,524,551
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 228
|
r
|
ex3-1.r
|
# 3.1 행렬 만들기
y <- matrix(c(1,2,3,4),nrow=2,ncol=2)
y
y <- matrix(c(1,2,3,4),nrow=2)
y
y[,2]
y <- matrix(nrow=2,ncol=2)
y[1,1] <- 1
y[2,1] <- 2
y[1,2] <- 3
y[2,2] <- 4
y
m <- matrix(c(1,2,3,4,5,6),nrow=2,byrow=T)
m
|
b545f108fe8a14f584919f7233e289b4af805657
|
f668e02a08ad08b4f5a5ca1ab7a2057e1a56e25f
|
/man/en_generation_inst_gen_cap_agg.Rd
|
3c93a324e8d8a9cd42cf9967b5cbd192beef5f19
|
[] |
no_license
|
ktaranov/entsoeapi
|
b2e9d5b5d4966c74eacc9deae13eb54bf765ba57
|
0cf92df963949d8fa4df07b506de5fc3fbde2569
|
refs/heads/master
| 2022-12-23T15:11:28.282633
| 2020-09-24T21:11:44
| 2020-09-24T21:11:44
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 529
|
rd
|
en_generation_inst_gen_cap_agg.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/en_generation.R
\name{en_generation_inst_gen_cap_agg}
\alias{en_generation_inst_gen_cap_agg}
\title{Get total load from Entsoe}
\usage{
en_generation_inst_gen_cap_agg(eic, year, security_token = NULL)
}
\arguments{
\item{eic}{Energy Identification Code}
\item{security_token}{Security token}
\item{period_start}{POSIXct}
\item{period_end}{POSIXct}
}
\description{
Get total load from Entsoe
}
\examples{
library(tidyverse)
library(entsoeapi)
}
|
137feb1bc39012571102dad268275be536e93879
|
a3e83ec811b3ab800e11bc3ad732d3e50583e0f0
|
/scripts/06b.1_KCCA.extractclusters.R
|
0e9c5e8b6cd899ea730118f31fcaa824a5def2cd
|
[] |
no_license
|
aditiq/CSC-lupien
|
d007120a474c88f384f8356b2f52fc4a6b6a1f63
|
a5c1a128cef421d302ae1941b828ddb79122de24
|
refs/heads/master
| 2021-05-02T00:39:37.467197
| 2019-01-03T22:45:51
| 2019-01-03T22:45:51
| 120,949,008
| 0
| 0
| null | 2018-02-09T19:56:53
| 2018-02-09T19:52:39
| null |
UTF-8
|
R
| false
| false
| 12,899
|
r
|
06b.1_KCCA.extractclusters.R
|
#--------------------------------------------------------------------------------------------------------------------------------
#--------------------------------------------------------------------------------------------------------------------------------
# R-3.4.1
# mordor
# Objective : Extract clusters from KCCA
#--------------------------------------------------------------------------------------------------------------------------------
#--------------------------------------------------------------------------------------------------------------------------------
#----------------------------------------------------------------------------------------------------------------------------------------------------------------------
# load dependencies
#----------------------------------------------------------------------------------------------------------------------------------------------------------------------
library(RColorBrewer)
library(gplots)
library(pheatmap)
library(data.table)
bluered299 <- colorRampPalette(c("blue","royalblue","aliceblue","brown1","red"))(n=299)
color_scheme <- colorRampPalette(c("white", "#660000"), space = "rgb")(2)
scalered <- colorRampPalette(c("white", "red"), space = "rgb")(256)
hmcols = colorpanel(100, "steelblue", "white", "tomato")
#----------------------------------------------------------------------------------------------------------------------------------------------------------------------
# Extract clusters for Enhancers
#----------------------------------------------------------------------------------------------------------------------------------------------------------------------
#-----------------------------------
# Load binary matrix
#-----------------------------------
enhmat <- read.table("data/ConsensusSet/PCSC1/PCSC1.Consensus.Catalogue.Enhancer.Binarymat.txt",check.names=F, stringsAsFactors = F, header=T, sep="\t")
rownames(enhmat) <- paste(enhmat[,1], enhmat[,2], enhmat[,3], sep="_")
#-----------------------------------
# Load KCCA dataset
#-----------------------------------
load("results/PCSC1/Cluster/KCCA.Flexclust/kccadata/KCCA.Enhancer.100.Rdata")
load("results/PCSC1/Cluster/KCCA.Flexclust/kccadata/KCCA.Enhancer.rownames.100.Rdata")
flexclust.clus <- as.data.frame(kcca.cl$cluster) ## cluster assignment
rownames(flexclust.clus) <- rownames.obj
cc <- as.matrix(kcca.cl$centers)
colnames(cc) <- colnames(enhmat)[4:ncol(enhmat)]
rownames(cc) <- seq(1, max(kcca.cl$cluster),1)
#-----------------------------------
# Assign groups
#-----------------------------------
common <- c(92)
shared <- c(10,74,95,20,6,36,79,38,77,72,8,40,60,94,91,30,28,39,53,57,61,67,49,21,3,69,78,7,71,87,90)
lsc.sp <- c(12,83,68,27,32,31,19,85,18,14,55,46,48,4,22,59,93,100,88,56,70)
gbm.sp <- c(9,29,41,42,45,50,52,73,75,82,63,15,89,81,34,2,11,16,47,24,23,54,17,51,13,86,66,62,64,76,80,96,84,98,99)
pfa.sp <- c(25,97,44,43,58,65,37,33,35, 26, 1,5)
#-----------------------------------
# Plot image
#-----------------------------------
pdf(paste0("results/PCSC1/Cluster/KCCA.Flexclust/ExtractedClusters/K100.Enhancer.Flexclust.pdf"))
heatmap.2(as.matrix(cc[c(common, shared, lsc.sp, gbm.sp, pfa.sp),]),
col=hmcols, scale="none",
Rowv=NULL, Colv=NULL,
trace="none",
RowSideColors=c(rep("#F1D302",length(common)),rep("pink",length(shared)),rep("#C1292E",length(lsc.sp)),
rep("#8CAE68",length(gbm.sp)),rep("#235789",length(pfa.sp))),
cexRow=0.5,
add.expr = abline(v=c(18, 41))
)
dev.off()
pdf(paste0("results/PCSC1/Cluster/KCCA.Flexclust/ExtractedClusters/K100.Enhancer.Flexclust.Image.pdf"))
image(t(apply(as.matrix(cc[c(common, shared, lsc.sp, gbm.sp, pfa.sp),]),2,rev)) ,col=scalered)
dev.off()
pdf(paste0("results/PCSC1/Cluster/KCCA.Flexclust/ExtractedClusters/K100.Enhancer.Flexclust.Image.v2.pdf"))
image(t(apply(as.matrix(cc[c(common, shared, lsc.sp, gbm.sp, pfa.sp),]),2,rev)) ,col=c("white","red"))
dev.off()
#-----------------------------------
# Write the groups out
#-----------------------------------
flexclust.clus$id <- rownames(flexclust.clus)
flexclust.clus$FlexClust.group <- ifelse(flexclust.clus[,1] %in% common, "Common",
ifelse(flexclust.clus[,1] %in% shared, "Shared",
ifelse(flexclust.clus[,1] %in% lsc.sp, "LSC",
ifelse(flexclust.clus[,1] %in% gbm.sp, "GBM",
ifelse(flexclust.clus[,1] %in% pfa.sp, "PFA",NA)))))
colnames(flexclust.clus) <- c("Flexclust.ClusterNo","id","FlexClust.group")
write.table(flexclust.clus[,c("id","Flexclust.ClusterNo","FlexClust.group")],
file="results/PCSC1/Cluster/KCCA.Flexclust/ExtractedClusters/FlexClust.Enhancer.Groups.txt", row.names=F, col.names=T, sep="\t", quote=F)
for ( f in names(table(flexclust.clus$FlexClust.group))) {
print(f)
dat <- as.matrix(enhmat[subset(flexclust.clus$id, flexclust.clus$FlexClust.group==f),4:ncol(enhmat)])
x <- (1:nrow(dat))
y <- (1:ncol(dat))
pdf(paste0("results/PCSC1/Cluster/KCCA.Flexclust/ExtractedClusters/",f,".Enhancer.Flexclust.Image.pdf"))
image(y, x, t(dat), col=c("white","red"), axes=FALSE,xlab="",ylab="",srt=45)
axis(3, at = 1:ncol(dat), labels=colnames(dat),srt=45,tick=FALSE)
axis(2, at = 1:nrow(dat), labels=rownames(dat),srt=45,tick=FALSE)
abline(v=c(18,41))
dev.off()
}
for ( f in names(table(flexclust.clus$FlexClust.group))[2:5]) {
print(f)
pdf(paste0("results/PCSC1/Cluster/KCCA.Flexclust/ExtractedClusters/",f,".Enhancer.Flexclust.heatmap.pdf"))
heatmap.2(as.matrix(cc[unique(subset(flexclust.clus$Flexclust.ClusterNo, flexclust.clus$FlexClust.group==f)),]),
col=hmcols, scale="none",
trace="none",cexRow=0.5,Colv=NULL,
add.expr = abline(v=c(18, 41)),useRaster=TRUE,
hclustfun=function(x) hclust(x,method="ward.D2"),
distfun=function(x) as.dist((1 - cor( t(x), method="spearman" ))))
dev.off()
}
#----------------------------------------------------------------------------------------------------------------------------------------------------------------------
# Extract clusters for Promoters
#----------------------------------------------------------------------------------------------------------------------------------------------------------------------
#-----------------------------------
# Load binary matrix
#-----------------------------------
promat <- read.table("data/ConsensusSet/PCSC1/PCSC1.Consensus.Catalogue.Promoter.Binarymat.txt",check.names=F, stringsAsFactors = F, header=T, sep="\t")
rownames(promat) <- paste(promat[,1], promat[,2], promat[,3], sep="_")
#-----------------------------------
# Load KCCA dataset
#-----------------------------------
load("results/PCSC1/Cluster/KCCA.Flexclust/kccadata/KCCA.Promoter.100.Rdata")
load("results/PCSC1/Cluster/KCCA.Flexclust/kccadata/KCCA.Promoter.rownames.100.Rdata")
flexclust.clus <- as.data.frame(kcca.cl$cluster) ## cluster assignment
rownames(flexclust.clus) <- rownames.obj
cc <- as.matrix(kcca.cl$centers)
colnames(cc) <- colnames(promat)[4:ncol(promat)]
rownames(cc) <- seq(1, max(kcca.cl$cluster),1)
## Plot image unsuper
pdf(paste0("results/PCSC1/Cluster/KCCA.Flexclust/ExtractedClusters/K100.Promoter.Flexclust.unsuper.pdf"))
pheatmap(as.matrix(cc),
col=hmcols, scale="none",fontsize_row=3.5,
cluster_rows=TRUE, cluster_cols=FALSE,
clustering_method="average",
clustering_distance_rows = "correlation",
trace="none")
dev.off()
#-----------------------------------
# Assign groups
#-----------------------------------
common<-c(87,86,81,79,58,17)
shared <-c(94,89,88,84,83,82,80,75,73,72,71,65,64,54,53,52,51,44, 41,30,29,27,26,25,20,19,14,5,3)
lsc.sp <-c(100,97,93,90,85,78,74,69,66,61, 57, 50, 46, 40, 38,28,23,21,16,11,9,8,6,4,2)
gbm.sp <-c(99,98,95,92,76,70,68,67,63,62,60,59, 56,55,49,48,47,45,43,42 , 36,35,34,33,32,24,22,18,15, 10,7,1)
pfa.sp <-c(96,91,77,39,37,31, 13,12)
## 22,35
#-----------------------------------
# Plot image
#-----------------------------------
pdf(paste0("results/PCSC1/Cluster/KCCA.Flexclust/ExtractedClusters/K100.Promoter.Flexclust.pdf"))
heatmap.2(as.matrix(cc[c(common, shared, lsc.sp, gbm.sp, pfa.sp),]),
col=hmcols, scale="none",
Rowv=NULL, Colv=NULL,
trace="none",
RowSideColors=c(rep("#F1D302",length(common)),rep("pink",length(shared)),rep("#C1292E",length(lsc.sp)),
rep("#8CAE68",length(gbm.sp)),rep("#235789",length(pfa.sp))),
cexRow=0.5,
add.expr = abline(v=c(18, 41))
)
dev.off()
pdf(paste0("results/PCSC1/Cluster/KCCA.Flexclust/ExtractedClusters/K100.Promoter.Flexclust.Image.pdf"))
image(t(apply(as.matrix(cc[c(common, shared, lsc.sp, gbm.sp, pfa.sp),]),2,rev)) ,col=scalered)
dev.off()
pdf(paste0("results/PCSC1/Cluster/KCCA.Flexclust/ExtractedClusters/K100.Promoter.Flexclust.Image.v2.pdf"))
image(t(apply(as.matrix(cc[c(common, shared, lsc.sp, gbm.sp, pfa.sp),]),2,rev)) ,col=c("white","red"))
dev.off()
#-----------------------------------
# Write the groups out
#-----------------------------------
flexclust.clus$id <- rownames(flexclust.clus)
flexclust.clus$FlexClust.group <- ifelse(flexclust.clus[,1] %in% common, "Common",
ifelse(flexclust.clus[,1] %in% shared, "Shared",
ifelse(flexclust.clus[,1] %in% lsc.sp, "LSC",
ifelse(flexclust.clus[,1] %in% gbm.sp, "GBM",
ifelse(flexclust.clus[,1] %in% pfa.sp, "PFA",NA)))))
colnames(flexclust.clus) <- c("Flexclust.ClusterNo","id","FlexClust.group")
write.table(flexclust.clus[,c("id","Flexclust.ClusterNo","FlexClust.group")],
file="results/PCSC1/Cluster/KCCA.Flexclust/ExtractedClusters/FlexClust.Promoter.Groups.txt", row.names=F, col.names=T, sep="\t", quote=F)
for ( f in names(table(flexclust.clus$FlexClust.group))) {
print(f)
dat <- as.matrix(promat[subset(flexclust.clus$id, flexclust.clus$FlexClust.group==f),4:ncol(promat)])
x <- (1:nrow(dat))
y <- (1:ncol(dat))
pdf(paste0("results/PCSC1/Cluster/KCCA.Flexclust/ExtractedClusters/",f,".Promoter.Flexclust.Image.pdf"))
image(y, x, t(dat), col=c("white","red"), axes=FALSE,xlab="",ylab="",srt=45)
axis(3, at = 1:ncol(dat), labels=colnames(dat),srt=45,tick=FALSE)
axis(2, at = 1:nrow(dat), labels=rownames(dat),srt=45,tick=FALSE)
abline(v=c(18,41))
dev.off()
}
for ( f in names(table(flexclust.clus$FlexClust.group))[1:5]) {
print(f)
pdf(paste0("results/PCSC1/Cluster/KCCA.Flexclust/ExtractedClusters/",f,".Promoter.Flexclust.heatmap.pdf"))
heatmap.2(as.matrix(cc[unique(subset(flexclust.clus$Flexclust.ClusterNo, flexclust.clus$FlexClust.group==f)),]),
col=hmcols, scale="none",
trace="none",cexRow=0.5,Colv=NULL,
add.expr = abline(v=c(18, 41)),useRaster=TRUE,
hclustfun=function(x) hclust(x,method="average"),
distfun=function(x) as.dist((1 - cor( t(x), method="spearman" ))))
dev.off()
}
#----------------------------------------------------------------------------------------------------------------------------------------------------------------------
# Extract clusters for CSC.ESC
#----------------------------------------------------------------------------------------------------------------------------------------------------------------------
#-----------------------------------
# Load binary matrix
#-----------------------------------
cscescmat <- read.table("data/ConsensusSet/PCSC1/CSC.ESC.Consensus.Catalogue.Binarymat.txt",check.names=F, stringsAsFactors = F, header=T, sep="\t")
rownames(cscescmat) <- paste(cscescmat[,1], cscescmat[,2], cscescmat[,3], sep="_")
#-----------------------------------
# Load KCCA dataset
#-----------------------------------
load("results/PCSC1/Cluster/KCCA.Flexclust/kccadata/KCCA.CSC.ESC.100.Rdata")
load("results/PCSC1/Cluster/KCCA.Flexclust/kccadata/KCCA.CSC.ESC.rownames.100.Rdata")
flexclust.clus <- as.data.frame(kcca.cl$cluster) ## cluster assignment
rownames(flexclust.clus) <- rownames.obj
cc <- as.matrix(kcca.cl$centers)
colnames(cc) <- colnames(cscescmat)[4:ncol(cscescmat)]
rownames(cc) <- seq(1, max(kcca.cl$cluster),1)
## Plot image unsuper
pdf(paste0("results/PCSC1/Cluster/KCCA.Flexclust/ExtractedClusters/K100.CSC.ESC.Flexclust.unsuper.pdf"))
pheatmap(as.matrix(cc),
col=hmcols, scale="none",fontsize_row=3.5,
cluster_rows=TRUE, cluster_cols=TRUE,
clustering_method="average",
clustering_distance_rows = "correlation",
trace="none")
dev.off()
|
be11ac8bc7cca2779a9221b2bd709f465bb3c212
|
5d92eb9380216f13890280561204d494dca6fc41
|
/part2/20160423/Santander/xgboost.R
|
9469ab71169dec22a9cd9e89c2771cf6f6d0395e
|
[] |
no_license
|
mkhoin/analyticstool
|
4d184e3fca01f6c0cb30e78cd78d66707a3cc5d3
|
6683ea6edb27940450be5f82aebef5423bfb603f
|
refs/heads/master
| 2020-05-14T16:49:12.264450
| 2019-04-09T02:57:04
| 2019-04-09T02:57:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,780
|
r
|
xgboost.R
|
# https://www.kaggle.com/zfturbo/santander-customer-satisfaction/xgb-lalala
install.packages("xgboost","Matrix")
library(xgboost)
library(Matrix)
set.seed(4321)
train <- read.csv("./train.csv")
test <- read.csv("./test.csv")
# train <- read.csv("../input/train.csv")
# test <- read.csv("../input/test.csv")
##### log10 train$var38
train$var38 <- log10(train$var38)
##### Removing IDs
train$ID <- NULL
test.id <- test$ID
test$ID <- NULL
##### Extracting TARGET
train.y <- train$TARGET
train$TARGET <- NULL
##### 0 count per line
count0 <- function(x) {
return( sum(x == 0) )
}
train$n0 <- apply(train, 1, FUN=count0) # for a matrix 1 indicates rows, 2 indicates columns
test$n0 <- apply(test, 1, FUN=count0)
##### Removing constant features
# length(unique(train[[1]]))
cat("\n## Removing the constants features.\n")
for (f in names(train)) {
if (length(unique(train[[f]])) == 1) {
cat(f, "\n")
train[[f]] <- NULL
test[[f]] <- NULL
}
}
##### Removing identical features
# combn(letters[1:4], 2)
features_pair <- combn(names(train), 2, simplify = F)
toRemove <- c()
for(pair in features_pair) {
f1 <- pair[1]
f2 <- pair[2]
if (!(f1 %in% toRemove) & !(f2 %in% toRemove)) {
if (all(train[[f1]] == train[[f2]])) {
cat(f2, "\n")
toRemove <- c(toRemove, f2)
}
}
}
# (x <- c(sort(sample(1:20, 9)), NA))
# (y <- c(sort(sample(3:23, 7)), NA))
# union(x, y)
# intersect(x, y)
# setdiff(x, y)
# setdiff(y, x)
# setequal(x, y)
feature.names <- setdiff(names(train), toRemove)
train <- train[, feature.names]
test <- test[, feature.names]
train$TARGET <- train.y
#Construct a sparse model or “design” matrix, form a formula and data frame
train <- sparse.model.matrix(TARGET ~ ., data = train)
# Contruct xgb.DMatrix object from dense matrix, sparse matrix or local file.
dtrain <- xgb.DMatrix(data=train, label=train.y)
watchlist <- list(train=dtrain)
param <- list( objective = "binary:logistic",
booster = "gbtree",
eval_metric = "auc",
eta = 0.02,
max_depth = 7,
subsample = 0.68,
colsample_bytree = 0.7
)
clf <- xgb.train( params = param,
data = dtrain,
nrounds = 571,
verbose = 2,
watchlist = watchlist,
maximize = FALSE
)
test$TARGET <- -1
test <- sparse.model.matrix(TARGET ~ ., data = test)
preds <- predict(clf, test)
submission <- data.frame(ID=test.id, TARGET=preds)
cat("saving the submission file\n")
write.csv(submission, "submission.csv", row.names = F)
|
5717b579429c95e40580b6ef579c1189a3fef746
|
25393b604343ae34d5c27775cb2c10fd805bda62
|
/R/zzz.R
|
500a588cc7cd70a936bbc7ae8816c5363108da71
|
[] |
no_license
|
cran/staRt
|
e8530c433ac920e2dcf2b7682da3f001379cb393
|
06ba0e414b9b0f6d87276ffee3b5449233ceb87c
|
refs/heads/master
| 2021-01-21T05:02:05.004723
| 2007-10-12T00:00:00
| 2007-10-12T00:00:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,166
|
r
|
zzz.R
|
.onLoad<-function(libname,pkgname){
require("tools",quietly=TRUE)
}
.onAttach<-function(libname,pkgname){
#library.dynam(pkgname,pkgname,lib.loc=libname)
options(warn=-1)
cat("\n")
cat("---------------------------------------------------------------------------------\n")
pkg.info<-drop(read.dcf(file=file.path(.find.package(package=pkgname,lib.loc=libname),"DESCRIPTION")))
ndescr<-length(pkg.info)-2
for(i in 1:ndescr)
cat(paste(names(pkg.info[i]),":",sep=""),pkg.info[i],"\n")
cat("=================================================================================\n")
if(packageHasNamespace(package=pkgname,package.lib=libname))
cat("NAMESPACE:","yes","\n")
else
cat("NAMESPACE:","no","\n")
if(any(names(getLoadedDLLs())==pkgname))
cat("C/Fortran:","yes","\n")
else
cat("C/Fortran:","no","\n")
cat("=================================================================================\n")
filename<-file.path(.find.package(package=pkgname,lib.loc=libname),"doc")
result<-list_files_with_type(filename,"vignette")
if(length(result)==0){
cat(paste(pkg.info["Package"]," not contains Vignettes.",sep=""))
cat("\n")
}
if(length(result)>0){
cat(paste(pkg.info["Package"]," contains Vignettes.",sep=""))
cat("\nType",sQuote(paste("vignette(package=\"",pkg.info["Package"],"\")",sep="")),
"to list the Vignettes.")
cat("\nType",sQuote(paste("vignette(topic=\"name\",package=\"",
pkg.info["Package"],"\")",sep="")),"to view the Vignette.")
cat("\nYou can choose name in:\n")
filename2<-file.path(.find.package(package=as.character(pkgname),lib.loc=libname),"doc")
allv<-list.files(filename2,"pdf")
number<-length(allv)
for(i in 1:number){
myfile<-paste(filename2,list.files(filename2,"pdf")[i],sep="/")
miastringa<-unlist(strsplit(myfile,split="/"))[length(unlist(strsplit(myfile,split="/")))]
miasottostringa<-substr(miastringa,1,nchar(miastringa)-4)
cat(miasottostringa,"\n")
}
}
cat("=================================================================================\n")
cat("Type",sQuote(paste("help(package=\"",pkg.info["Package"],"\")",sep="")),
"or",sQuote(paste("package?",pkg.info["Package"],sep="")),"to get started.\n")
cat("Type",sQuote(paste("data(package=\"",pkg.info["Package"],"\")",sep="")),
"to view a list of data frames.\n")
cat("Type",sQuote(paste("ls(\"package:",pkg.info["Package"],"\")",sep="")),
"to view a list of functions.\n")
cat("Type",sQuote(paste("citation(package=\"",pkg.info["Package"],"\")",sep="")),
"to view the bibiography.\n")
cat("Type",sQuote(paste("detach(package:",pkg.info["Package"],")",sep="")),
"to remove it from the search() path.\n")
cat("---------------------------------------------------------------------------------\n")
cat("\n")
options(warn=0)
return(invisible(0))
}
.Last.lib<-function(libname){
stringa<-unlist(strsplit(x=libname,split="/"))
pkgname<-stringa[length(stringa)]
#library.dynam.unload(pkgname,libpath=libname)
txt<-paste("Thank you to use",sQuote(pkgname),"package. See you.")
writeLines(txt)
}
|
637669f6fac6feedf3e6e523626c3ac1f4e7dc1b
|
fc85f0333c547417034309ecb031e80c15e32717
|
/19Decembre.R
|
23d762214ad42ebe12c7ca3bbdc85eb4325b75c1
|
[] |
no_license
|
Evargalo/AdventOfCode2019
|
140d30c403586719b5b0268f0a1f1a0162dfc3dc
|
098ecc4db020f4ead8139c396763321799f252c5
|
refs/heads/master
| 2020-09-26T14:40:29.187725
| 2020-01-20T14:37:25
| 2020-01-20T14:37:25
| 226,275,418
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,346
|
r
|
19Decembre.R
|
#########################
# Advent Of Code 2019 #
# 19 Decembre #
######################
require(dplyr)
require(stringr)
require(tidyr)
require(purrr)
require(purrrlyr)
require(ggformula)
source("intCode.R")
library(readr)
X19DecembreInput <- read_delim("19DecInput.txt", delim= ",",
escape_double = FALSE, col_names = FALSE)
(X19DecembreInput %>% gather)$value->inputVect
###########
# Part 1
###########
l<-50
intCode(inputVect,c(0,0))
grid<-data.frame(x=rep(0:l-1,times=l),y=rep(0:l-1,each=l),t=0)
calcBeam<-function(x,y){
intCode(inputVect,c(x,y))$output
}
calcBeam(7,3)
calcBeam(0,0)
grid %>% rowwise %>% mutate(t=calcBeam(x,y)) -> grid
sum(grid$t)
# 215
grid %>% gf_tile(y~x,fill=~as.factor(t))
grid %>% gf_tile(y~x,color =~as.factor(t))
###########
# Part 2
###########
xMin<-500
yMin<-500
# 100 per 100
trouve<-FALSE
while(!trouve){
xMin<-xMin+100
while(!calcBeam(xMin,yMin)){
yMin<-yMin+15
}
print(c("xMin=",xMin," yMin=",yMin))
if(calcBeam(xMin,yMin+99)){
yMin2<-yMin
while(!calcBeam(xMin+99,yMin2)){
yMin2<-yMin2+10
}
print(c("xMin=",xMin," yMin2=",yMin2))
trouve<-calcBeam(xMin,yMin2+99)
}
}
# One solution:
calcBeam(11300,12755)
calcBeam(11300,12854)
calcBeam(11399,12755)
# "xMin=" "8100" " yMin2=" "9175"
# [1] "xMin=" "5100" " yMin2=" "5820"
calcBeam(5100,5820)
calcBeam(5100,5919)
calcBeam(5199,5820)
# [1] "xMin=" "900" " yMin2=" "1120"
calcBeam(900,1120)
calcBeam(900,1219)
calcBeam(999,1120)
xMin<-500
yMin<-500
# 10 per 10
trouve<-FALSE
while(!trouve){
xMin<-xMin+10
while(!calcBeam(xMin,yMin)){
yMin<-yMin+15
}
print(c("xMin=",xMin," yMin=",yMin))
if(calcBeam(xMin,yMin+99)){
yMin2<-yMin
while(!calcBeam(xMin+99,yMin2)){
yMin2<-yMin2+10
}
print(c("xMin=",xMin," yMin2=",yMin2))
trouve<-calcBeam(xMin,yMin2+99)
}
}
# [1] "xMin=" "780" " yMin2=" "985"
xMin<-750
yMin<-850
calcBeam(750,850)
# 1 by 1
trouve<-FALSE
while(!trouve){
xMin<-xMin+1
while(!calcBeam(xMin,yMin)){
yMin<-yMin+1
}
print(c("xMin=",xMin," yMin=",yMin))
if(calcBeam(xMin,yMin+99)){
yMin2<-yMin
while(!calcBeam(xMin+99,yMin2)){
yMin2<-yMin2+1
}
print(c("xMin=",xMin," yMin2=",yMin2))
trouve<-calcBeam(xMin,yMin2+99)
}
}
# x=772 ; y=975
|
0368a2f5a41b2068fec50559898011ae006ac71f
|
431f33c4b361ce49f70505f3bc923162eb92a062
|
/man/IntLimResults-class.Rd
|
ba95dd4430b59fe80c776c232f4139a7c3199b61
|
[] |
no_license
|
Mathelab/IntLIM
|
17559cdef93768831f6fe45d88783bda02d6017a
|
17c3229139d02de2ded783221323c988222beed3
|
refs/heads/master
| 2022-07-31T03:37:56.881904
| 2022-07-06T11:04:23
| 2022-07-06T11:04:23
| 99,143,369
| 5
| 7
| null | 2018-09-05T21:27:30
| 2017-08-02T17:23:01
|
R
|
UTF-8
|
R
| false
| true
| 846
|
rd
|
IntLimResults-class.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/AllClasses.R
\name{IntLimResults-class}
\alias{IntLimResults-class}
\title{IntLimResults class}
\description{
IntLimResults class
}
\section{Slots}{
\describe{
\item{\code{interaction.pvalues}}{matrix of interaction p-values}
\item{\code{interaction.adj.pvalues}}{matrix of adjusted interaction pvalues}
\item{\code{corr}}{matrix of correlations in group 1 and 2}
\item{\code{warnings}}{a message of whether genes and metabolites have 0 standard deviation}
\item{\code{stype}}{column name that represents sample type (by default, it will be used
in the interaction term). Only 2 categories are currently supported.}
\item{\code{outcome}}{outcome is either 'metabolite' or 'gene'}
\item{\code{covar}}{describing additional variables and the class they form}
}}
|
e5a846b7224402a941e5b288a07593cafadb0cae
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/hdnom/examples/hdcox.mnet.Rd.R
|
944c3dd5e5362a9455766b00fd1f5bdb10805d6e
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 839
|
r
|
hdcox.mnet.Rd.R
|
library(hdnom)
### Name: hdcox.mnet
### Title: Mnet Model Selection for High-Dimensional Cox Models
### Aliases: hdcox.mnet
### ** Examples
library("survival")
library("rms")
# Load imputed SMART data; only use the first 120 samples
data("smart")
x = as.matrix(smart[, -c(1, 2)])[1:120, ]
time = smart$TEVENT[1:120]
event = smart$EVENT[1:120]
y = Surv(time, event)
# Fit Cox model with Mnet penalty
fit = hdcox.mnet(
x, y, nfolds = 3,
gammas = 3, alphas = c(0.3, 0.8),
max.iter = 15000, seed = 1010)
# Prepare data for hdnom.nomogram
x.df = as.data.frame(x)
dd = datadist(x.df)
options(datadist = "dd")
# Generate hdnom.nomogram objects and plot nomogram
nom = hdnom.nomogram(
fit$mnet_model, model.type = "mnet",
x, time, event, x.df, pred.at = 365 * 2,
funlabel = "2-Year Overall Survival Probability")
plot(nom)
|
ba9140919e9edf2d288091e556d99e1059e4abd2
|
24b2f1530846fa114bc5cc594b54e30c203302c0
|
/man/ShotSeasonGraphPlayer.Rd
|
02b677c8524cf010f80dd532d204ca74045bef8a
|
[] |
no_license
|
cran/SpatialBall
|
46000fd163a86d64dedaa19c3e145248111971bb
|
d8abd291458ab7ec32f5ca8157751bc1576a26f4
|
refs/heads/master
| 2021-08-30T15:19:42.441068
| 2017-12-18T11:28:54
| 2017-12-18T11:28:54
| 114,638,956
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,032
|
rd
|
ShotSeasonGraphPlayer.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plots.R
\name{ShotSeasonGraphPlayer}
\alias{ShotSeasonGraphPlayer}
\title{Generates a shot chart for a given player}
\usage{
ShotSeasonGraphPlayer(Seasondata, player, quant = 0.4, type = "PPS")
}
\arguments{
\item{Seasondata}{a data frame with the details of the season}
\item{player}{the name of the player that you want to make a graph of}
\item{quant}{the quantile of shots to be graphed, defaults to 0.4}
\item{type}{either "PPS" for points per shot or "PCT" for percentage}
}
\value{
a shot chart graph
}
\description{
Creates a shot chart for a player on a given season
}
\examples{
data("season2017")
#Examples with several players
ShotSeasonGraphPlayer(season2017, player = "Stephen Curry")
ShotSeasonGraphPlayer(season2017, player = "DeMar DeRozan")
#Examples with percentage instead of points per shot
ShotSeasonGraphPlayer(season2017, player = "Stephen Curry", type = "PCT")
}
\author{
Derek Corcoran <derek.corcoran.barrios@gmail.com>
}
|
d758878fb6208dcb8f607737f2880ff49311a7d6
|
24066ef8e3f3c9631091279481eaffaa346b11df
|
/R/make_TMT_se.R
|
bff8b1d3884dca1b9cd8446a06f13a52dce77102
|
[] |
no_license
|
demar01/protrusionproteome
|
5430c92ae08274f25bb55c2f55795b52fd08fc6d
|
df71e443d1bad93737564866bd50772b924204b6
|
refs/heads/master
| 2023-04-23T23:27:07.713419
| 2021-04-26T16:40:12
| 2021-04-26T16:40:12
| 329,885,253
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,871
|
r
|
make_TMT_se.R
|
#' Data.frame to SummarizedExperiment object
#' conversion using file information and user input
#'
#' \code{make_TMT_se} creates a SummarizedExperiment object
#' based on protein table and user's input about experimental design.
#'
#' @param proteins_unique Data.frame,
#' Protein table with unique names annotated in the 'name' column
#' (output from \code{\link{make_unique}()}).
#' @param columns_positions Integer,
#' position of columns that contain experiment quantitative data
#' @param intensities Character,
#' names of columns that contain experiment quantitative data
#' @param time_unit Integer,
#' unit of time in that defines the experiment
#' @param time_span Integer,
#' number of times that the time_unit is repeated at the experimental timepoint
#' @param numerator Character,
#' condition of interest (prot)
#' @param denominator character,
#' condition to make relative (body)
#' @param sep Character,
#' The separator used to parse the column header
#' @return A SummarizedExperiment object
#' with log2-transformed values, normalised to control and median subtracted
#' (by column).
#' @examples
#' if(interactive()){
#' # Load example
#' data <- prot.raw
#' data <- data[data$Reverse != "+" & data$Potential.contaminant != "+" &
#' data$Reverse != "+" ,]
#' data_unique <- make_unique(data, "Gene.names", "Protein.IDs", delim = ";")
#'
#' columns_positions<-str_which(colnames(data_unique),
#' "Reporter.intensity.corrected.(\\d)+.(\\d)")
#' intensities <- colnames(data_unique)[str_which(colnames(data_unique),
#' "Reporter.intensity.corrected.(\\d)+.(\\d)")]
#' # Make SummarizedExperiment
#' se <- make_TMT_se(data_unique,columns_positions,intensities,
#' time_unit=30,
#' time_span=c(1,2,4,8,16),
#' numerator= "prot",
#' denominator= "body",
#' sep = "_")
#'}
#' @import SummarizedExperiment
#' @import stringr
#' @import dplyr
#' @importFrom tidyr unite
#' @importFrom stats median
#' @importFrom stringr str_remove
#' @importFrom lubridate minute
#' @importFrom lubridate minutes
#' @export
make_TMT_se <- function (proteins_unique,
columns_positions,
intensities,
time_unit=30,
time_span=c(1,2,4,8,16),
numerator= "prot",
denominator= "body",
sep = "_"){
assertthat::assert_that(is.data.frame(proteins_unique),
is.integer(columns_positions),
is.character(intensities),
is.numeric(time_unit),
length(time_unit) == 1,
is.numeric(time_span),
is.character(numerator),
length(numerator) == 1,
is.character(denominator),
length(denominator) == 1,
is.character(sep),
length(sep) == 1)
if (any(!c("name") %in% colnames(proteins_unique))) {
stop("'name' and/or columns are not present in '",
deparse(substitute(proteins_unique)), "
'.\nRun make_unique() to obtain the required columns",
call. = FALSE) }
if (any(!apply(proteins_unique[, columns_positions], 2, is.numeric))) {
stop("specified 'columns' should be numeric",
"\nRun make_se_parse() with the appropriate columns as argument",
call. = FALSE) }
# If input is a tibble, convert to data.frame
if (tibble::is_tibble(proteins_unique))
proteins_unique <- as.data.frame(proteins_unique)
########
# Select the assay data
rownames(proteins_unique) <- proteins_unique$name
raw <- proteins_unique[, columns_positions]
raw[raw == 0] <- NA
raw <- log2(raw)
#rename
colnames(raw) <- str_c(minute(rep(minutes(x = time_unit) *time_span,each=2)),c("body","prot"),sep=sep)
#combine
raw2<-raw[,str_detect(colnames(raw),str_c(numerator, collapse="|"))]-
raw[,str_detect(colnames(raw),str_c(denominator, collapse="|"))]
#substract median
raw2 <- raw2 %>%
mutate_all(funs(.- median(.,na.rm = TRUE)))
rownames(raw2) <-rownames(raw)
#####
# colnames(raw2) <- colnames(raw2) %>% make.names()
row_data <- proteins_unique[, -columns_positions]
rownames(row_data) <- row_data$name
col_data <- data.frame(label = colnames(raw2), stringsAsFactors = FALSE) %>%
mutate(condition = str_extract(label,"(\\d)+"),
replicate = 1) %>%
tidyr::unite(ID, condition, replicate, remove = FALSE)
rownames(col_data) <- col_data$ID
colnames(raw2)[match(col_data$label, colnames(raw2))] <- col_data$ID
raw2 <- raw2[, !is.na(colnames(raw2))]
se <- SummarizedExperiment::SummarizedExperiment(assays = as.matrix(raw2), colData = col_data, rowData = row_data)
return(se)
}
|
d37418c4925fe5aa720481e5cf7b38233dbfc2fd
|
cdb0244a09bc5ca71faf116644ea60971970b83e
|
/lectures/07-Make-and-Makefiles/make_power_gender_dataset.R
|
aa7a666dc36c7b59620165b11da00ad1d9e80bfc
|
[] |
no_license
|
akantuncch/datasci611
|
c628be109f01e8784cb408210e7826c02ce9538a
|
1ac1d0854e147ff79935c7dc7889934447213fc2
|
refs/heads/main
| 2023-09-04T12:31:08.183903
| 2021-10-27T18:43:01
| 2021-10-27T18:43:01
| 401,401,805
| 0
| 0
| null | 2021-08-30T15:54:35
| 2021-08-30T15:54:35
| null |
UTF-8
|
R
| false
| false
| 766
|
r
|
make_power_gender_dataset.R
|
library(tidyverse);
source("utils.R");
powers <- read_csv("derived_data/deduplicated_powers.csv");
genders <- read_csv("derived_data/gender_data.csv");
power_gender <- genders %>%
inner_join(powers, by=c("character","universe")) %>%
select(-url,-property_name) %>%
rename(gender=value) %>%
write_csv("derived_data/power_gender_data.csv");
gender_counts <- power_gender %>% group_by(gender) %>% tally(name="total");
probs <- power_gender %>%
inner_join(gender_counts, by="gender") %>%
group_by(power, gender, total) %>%
summarize(p=length(character)/total[[1]]) %>%
arrange(gender,desc(p)) %>%
group_by(gender) %>%
mutate(rank=seq(length(p))) %>%
ungroup() %>%
write_csv("derived_data/power_gender_ranks.csv");
|
564d28d2898e7fb2de31f1f360827e9555a546dd
|
d2ac85674d6812fe3f606094bae82ea089659609
|
/NCANDA/Scripts/growthrate_gamm.R
|
667776e61771d6765cdda29032f888379f7064eb
|
[] |
no_license
|
LabNeuroCogDevel/R03Behavioral
|
2a98e71917b1f35a4affe08298e32f9100df3b93
|
f743b316ac00aa3381eb72ae08c47b3c87891ebf
|
refs/heads/master
| 2020-09-23T07:19:38.313210
| 2019-12-05T22:19:06
| 2019-12-05T22:19:06
| 225,437,014
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 10,681
|
r
|
growthrate_gamm.R
|
ttor<-function(ts,nobs){
rfromt<-sqrt((t^2)/(t^2+nobs))
}
gamm_growthrate<-function(m, agevar, idvar = NULL, n.iterations = 10000, qnt = c(0.025,
0.975))
{
simdiff <- sim_diff1_from_gamm(m, agevar, idvar, n.iterations = n.iterations)
ci <- ci_from_simdiff1(simdiff$pred, simdiff$ages, qnt = qnt)
ci$fit <- simdiff$fit
return(ci)
}
sim_diff1_from_gamm <- function(m, agevar, idvar=NULL,
n.iterations=10000, interval_inc=.1) {
m<-m$gam
v <- m$model[, agevar]
cond_list <- list(seq(min(v), max(v), by=interval_inc))
pp <- data.frame(a=cond_list[[1]], b=Inf)
# names should match what went into the model
names(pp) <- c(agevar, idvar)
# what if idvar is factor (Inf wont work)
if (is.null(idvar)) {
# do nothing. no idvar
} else if (is.factor(m$model[, idvar])){
# select idvar with the middle most random effect
# random effects are coefficents like s(idvar).xxxxx
# where xxxx is the index of the specific idvar factor name
idvarpatt <- sprintf("s\\(%s\\)", idvar)
idvarpatt. <- sprintf("s\\(%s\\).", idvar)
randeff <- m$coefficients[ grep(idvarpatt, names(m$coefficients)) ]
medval <- sort(randeff)[floor(length(randeff)/2)]
med_re_name <- names(which(randeff == medval))
median_idx <- gsub(idvarpatt., "", med_re_name)
median_subj <- levels(m$model[, idvar])[as.numeric(median_idx)]
warning("gam w/factor idvar, ",
"setting the middle most random effect subject: ",
median_subj)
pp[, 2] <- median_subj
# alternatively, select the first
# pp[, 2] <- m$model[1, idvar]
} else {
warning("predition with continous (non-factor) idvar will give 'Inf' fit")
# maybe pick middle value instead?
# pp[, 2] <- mean(m$model[, idvar], na.rm=T)
}
# for all covars, pick out the mean
for (cv in find_covars_gam(m$formula, agevar)) {
x <- m$model[, cv]
if (is.character(x) || is.factor(x) ){
warning("gam w/factor covar, setting all sim to the first!")
y <- x[1]
# TODO: maybe pracma::Mode ?
} else {
y <- mean(x, na.rm=T)
}
pp[, cv] <- y
}
Xp <- predict(m, pp, type="lpmatrix")
mu_beta <- coef(m)
sigma_Vb <- vcov(m)
# variance-covariance matrix of the main parameters fitted model
# used as: a positive-definite symmetric matrix specifying
# the covariance matrix of the variables.
# set.seed(10)
mrand <- MASS::mvrnorm(n.iterations, mu_beta, sigma_Vb)
# ilink <- family(m)$linkinv
# ilink<-m$family$linkinv()
# only want inetercept and agevar
keep_cols <- grep(paste0("Intercept|", agevar), dimnames(Xp)[[2]], value=T)
Xp_agevar <- Xp[, keep_cols]
mrand_agevar <- mrand[, keep_cols]
# generate a whole bunch of plausable values, get the diff
diffs <- lapply(seq_len(n.iterations), function(i) {
fit <- m$family$linkinv((Xp_agevar %*% mrand_agevar[i, ]))
dff <- c(NA, diff(fit))
return(dff)
})
return(list(pred=diffs, ages=pp[, 1], fit=predict(m, pp)))
}
ci_from_simdiff1 <- function(pred, ages, qnt=c(.025, .975)) {
names(pred) <- 1:length(pred)
mm <- t(dplyr::bind_rows(pred))
# this is the ouptut !
mean_dff <- apply(mm, 2, mean)
ci <- apply(mm, 2, quantile, qnt, na.rm=T)
colnames(ci) <- ages
out <- data.frame(mean_dff=mean_dff, ages=ages)
ci_out <- t(ci)
dimnames(ci_out)[[2]] <- c("ci_low", "ci_high")
return(cbind(out, ci_out))
# NEVER REACHED -- left as bad documentation
# old: return just ci and mean_dff
return(list(ci=ci, mean_dff=mean_dff))
# this is for fun
ages[which.min(ci[1, ])]
ages[which.min(ci[2, ])]
plot(ages, mean_dff)
for (i in 1:10) lines(ages, pred[[i]])
}
gamm_growthrate_plot <-
function(d, model, ci, agevar, covar,idvar=NULL,
yvar=as.character(model$formula[2]),
plotsavename=NA, xplotname="Age", yplotname=yvar,
draw_maturation=T, draw_points=T, show_all_fill=F,
ci_plot=T,adjustresids=NULL){
require(ggplot2)
require(itsadug)
modellme<-model$mer
model<-model$gam
# TODO:
# remove or replace first row mean_dff
# NA draws weird first color on spectrum
# make sure we have what we say we want
if (! "gam" %in% class(model) ) stop("model given must be a gam model!")
if (! "data.frame" %in% class(d) ) stop("d given must be a data.frame!")
if (! "data.frame" %in% class(ci) ) stop("ci is not growthrate_gam() output")
if (! yvar %in% names(model$model) ) stop(yvar, "not in model dataframe!")
ci$mean_dff_clip <- ci$mean_dff
# when ci bounds include 0 (different sign), no longer signficant
ci <- clip_on_sig(ci)
maturation_pnt <- gam_maturation_point(ci)
# warn about no matruation point
if (is.na(maturation_pnt) && draw_maturation) {
warning("No maturation point!")
draw_maturation <- F
}
# show even unsignficant change in raster if show_all_fill
fill_column <- ifelse(show_all_fill, "mean_dff", "mean_dff_clip")
## setup derivitive raster plot
deriv_range <- range(ci$mean_dff, na.rm=T)
tile <-
ggplot(ci[-1, ]) + # don't plot first row (is NA)
aes_string(x="ages", y=1, fill=fill_column) +
geom_raster(interpolate=TRUE) +
scale_fill_gradient2(
low = "blue", mid = "white", high = "red",
midpoint = 0,
space = "Lab",
breaks=sort(c(0, deriv_range)), # assumes range covers 0
limits=deriv_range
) +
xlab(sprintf("\n%s", xplotname))
# draw dotted line where maturation point is
if (draw_maturation)
tile <- tile +
geom_segment(
linetype=2, colour="black",
aes(x=maturation_pnt, xend=maturation_pnt, y=.5, yend=1.5))
# lunaize the figure
tile_luna <- lunaize_geomrasterxkeep(tile) +
theme(text = element_text(size=36))
# predictions
modeldata<-data.frame(ydata=model$y, agevar=model$model[, agevar])
condlist <- list(a=ci$ages)
names(condlist) <- agevar
for (cv in find_covars_gam(model$formula, agevar)) {
x <- model$model[, cv]
if (is.character(x) || is.factor(x) ){
warning("gam w/factor covar, setting all sim to the first!")
y <- x[1]
# TODO: maybe pracma::Mode ?
} else {
y <- mean(x, na.rm=T)
}
pp[, cv] <- y
}
preddata<-data.frame(var=ci$ages,covar=y)
names(preddata)<-c(agevar,covar)
yhats <- predict(model,preddata,se.fit=TRUE)
agepred<-cbind(preddata,yhats$fit,yhats$se.fit)
names(agepred)<-c(agevar,covar,"fit","se")
agepred$CI<-1.96*agepred$se
ageplot<-
ggplot(agepred) +
aes_string(x=agevar, y="fit") +
# solid bold line for fitted model
geom_line(colour="black", size=2) +
# label plot
ylab(yplotname) +
xlab(xplotname)
if (ci_plot) {
ageplot <- ageplot +
geom_ribbon(aes(ymin=fit - CI, ymax=fit + CI), alpha=.3)
}
modeldata$resid<-adjustresids
modeldata$id<-model$model[,idvar]
covarname<-find_covars_gam(model$formula, agevar)
modeldata$covar<-model$model[,covarname]
names(modeldata)[names(modeldata)=="covar"]<-covarname
modeldata$pred<-model$model[, agevar]
names(modeldata)[names(modeldata=="pred")]<-agevar
modelranef<-as.data.frame(ranef(modellme)$id)
modelranef$id<-gsub("1/","",row.names(modelranef))
names(modelranef)[names(modelranef)=="(Intercept)"]<-"randint"
modeldatawithranef<-merge(modeldata,modelranef,by=c("id"))
modeldatawithranef$yhats<- predict(model,modeldatawithranef)
modeldatawithranef$adjustoutcome<-modeldatawithranef$yhats+modeldatawithranef$randint+modeldatawithranef$resid
# individual points for actual data
if (draw_points) ageplot <- ageplot +
geom_point(data=modeldatawithranef, aes(y=adjustoutcome, x=agevar), alpha=.2)
# add connecting lines if we have an idvar
if (!is.null(idvar) && draw_points)
ageplot <- ageplot +
geom_line(data=modeldatawithranef, aes(y=adjustoutcome, group=id), alpha=.2)
# lunaize main plot
ageplot_luna<-LNCDR::lunaize(ageplot)+
theme(text = element_text(size=36),
axis.title.x=element_blank(),
axis.text.x=element_blank())
# save to file if we have plotsavename
g <- gam_growthrate_plot_combine(ageplot_luna, tile_luna, plotsavename)
list_of_plots <- list(tile=tile_luna, ageplot=ageplot_luna, both=g)
# give back everything we created
return(list_of_plots)
}
#' combine age plot and tile slop heatmap into one figure (w/grob and grid)
#'
#' @description save two figures (only use if you need to mess with titles)
#' @export
#' @param ageplot_luna ggplot plot of subject coef by age (top part of figure)
#' @param tile_luna tile heatmap of slope (bottom part of figure)
#' @param PDFout PDF name to save output into, NA no saved, NULL not plotted
#' @examples
#' data <- data.frame(age=1:100,fd_mean=1:100,subj=as.factor(letters[1:25]), conn_ahpc_vmpfc=randu[1:100,1])
#' mod<-mgcv::gam(conn_ahpc_vmpfc~s(age)+s(fd_mean)+s(subj, bs="re"), data=data)
#' ci<-LNCDR::gam_growthrate(mod, 'age', n = 10000, qnt = c(0.025, 0.975), idvar='subj')
#' plist <- gam_growthrate_plot(data, mod, ci, 'age', idvar='subj')
#' plist$tile <- plist$tile + xlab('AGE')
#' g <- gam_growthrate_plot_combine(plist$ageplot, plist$tile, 'gammod.pdf')
gam_growthrate_plot_combine <- function(ageplot_luna, tile_luna, PDFout=NA) {
require(grid)
require(gridExtra)
tilegrob<- ggplotGrob(tile_luna)
agegrob <- ggplotGrob(ageplot_luna)
g<-rbind(agegrob, tilegrob, size="first")
panels <- g$layout$t[grep("panel", g$layout$name)]
g$heights[panels] <- unit(c(1, .1), "null")
# NULL is no draw
# NA is draw to screen
# filename is save to pdf
if (is.null(PDFout)){
return(g)
} else if (is.na(PDFout)) {
grid.draw(g)
} else {
# check we are saving pdf
ext <- rev(strsplit(PDFout, "\\.")[[1]])[1]
if (ext != "pdf") stop(PDFout, " must end in .pdf!")
# draw into pdf
pdf(PDFout, height = 9, width = 12)
grid.draw(g)
dev.off()
}
return(g)
}
lunaize_geomrasterxkeep<-function(x){
x+
theme_bw()+
theme(
panel.grid.minor = element_blank(),
panel.grid.major = element_blank(),
axis.title.y = element_blank(),
axis.ticks.y = element_blank(),
axis.text.y = element_blank(),
legend.position = "none")
}
|
a7774e6d898200a39b9040f8f1ce462f0e6ba047
|
ddc949cee0c036248e373a2c75b8631ec37c57a8
|
/Paired_Analysis.R
|
5d6051016b7040ef147b9d726a726d7496104c39
|
[] |
no_license
|
soulj/Dunnetal2015
|
963fc7dff5afefdbd3e8171525dbda294e8a2267
|
0deebe41e4bfccc8cc7599db7c4030a8c0591b9b
|
refs/heads/master
| 2021-01-06T20:37:21.946061
| 2015-09-07T09:55:07
| 2015-09-07T09:55:07
| 42,040,110
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 27,239
|
r
|
Paired_Analysis.R
|
#script for analysis of paired OA damaged vs undamaged data
#Define the working directory
setwd("~/Testing123/PairedOAAnalysis/")
#Do you want to visualise the networks in cytoscape?
visualise = FALSE
#load the required libraries
library("DESeq2")
library("gdata")
library("data.table")
library("lattice")
library("RColorBrewer")
library("gridExtra")
library("RUVSeq")
library("EDASeq")
library("gplots")
library("lumi")
library("lumiHumanAll.db")
library("annotate")
library("limma")
library("Matrix")
library("igraph")
library("data.table")
library("RCytoscape") # also librarys cytoscape v2.8 to be open with the Cytoscape RPC plugin active
library("annotate")
library("org.Hs.eg.db")
library("VennDiagram")
library("reshape2")
library("ggplot2")
library("GO.db")
require("goseq")
library("reactome.db")
library("KEGG.db")
#load the pre-processed data to save time
load("./Ensembl2Genes.RData")
load("./PreprocessedCountsMatrix.RData")
#################################
#Differential Expression Analysis
#################################
#caculate fold change and pvalues with DESeq2
colData=data.frame(Patient=as.factor(c(1,1,2,2,3,3,4,4,5,5,6,6,7,7,8,8)),Tissue=as.factor(c("PLC","DMC","PLC","DMC","PLC","DMC","PLC","DMC","PLC","DMC","PLC","DMC","PLC","DMC","PLC","DMC")))
dds<-DESeqDataSetFromMatrix(countData= countsMatrix.complete, colData= colData,design=~Patient + Tissue)
#caclulate the fold changes
dds<-DESeq(dds)
#control for the non-uniform variance in the read counts.
vsd <- varianceStabilizingTransformation(dds)
library("vsn")
colours=c(brewer.pal(12, "Paired"),"lightgrey", "black","#996633", "#663300")
batch=c(18,17,17,16,17,16,17,17,11,11,15,15,15,15,15,15)
ntop=500
intgroup =c("Patient","Tissue")
rv = apply(assay(vsd), 1, var)
select = order(rv, decreasing = TRUE)[seq_len(min(ntop, length(rv)))]
pca = prcomp(t(assay(vsd)[select, ]))
fac = factor(apply(as.data.frame(colData(vsd)[, intgroup, drop = FALSE]),
1, paste, collapse = " : "))
xyplot(PC2 ~ PC1, groups = fac, data = as.data.frame(pca$x), cex = 2, aspect = "iso", col = colours, pch=batch,main = draw.key(key = list(rect = list(col = colours), text = list(levels(fac)), rep = FALSE)))
# Strong Batch effect - will correct with RUVSeq
countsMatrix2<- sapply(countsMatrix.complete,as.numeric)
rownames(countsMatrix2)=rownames(countsMatrix.complete)
filter<- apply(countsMatrix.complete, 1 , function(x)length(x[x>10]) >=8)
filtered<-countsMatrix.complete[filter,]
filtered=as.matrix(filtered)
dds<-DESeqDataSetFromMatrix(countData=filtered, colData= colData,design=~Patient + Tissue)
#caclulate the fold changes
dds<-DESeq(dds)
res<-as.data.frame(results(dds,contrast=c("Tissue","DMC","PLC")))
geneRank=res[order(abs(res$pvalue)),]
#take all but the top 5000 genes ranked by differential expression
empirical<- rownames(geneRank)[ which ( !(rownames(geneRank)%in%rownames (geneRank)[1:5000]))]
#Set the number of surrogate variables to be 2 - based on the resulting PCA
set=RUVg(filtered,empirical,k=2)
#Look at the distribution of the batch effect corrected samples
dds<-DESeqDataSetFromMatrix(countData=set$normalizedCounts, colData= colData,design=~Patient + Tissue)
#caclulate the fold changes
dds<-DESeq(dds)
vsd <- DESeq2::varianceStabilizingTransformation(dds)
par(mfrow=c(1,3))
notAllZero <- (rowSums(counts(dds))>0)
ntop=500
intgroup =c("Patient","Tissue")
rv = apply(assay(vsd), 1, var)
select = order(rv, decreasing = TRUE)[seq_len(min(ntop, length(rv)))]
pca = prcomp(t(assay(vsd)[select, ]))
fac = factor(apply(as.data.frame(colData(vsd)[, intgroup, drop = FALSE]),
1, paste, collapse = " : "))
xyplot(PC2 ~ PC1, groups = fac, data = as.data.frame(pca$x), cex = 2, aspect = "iso", col = colours, pch=batch,main = draw.key(key = list(rect = list(col = colours), text = list(levels(fac)), rep = FALSE)))
distsRL<- dist(t ( assay(vsd)))
hmcol <- colorRampPalette(brewer.pal(9, "GnBu"))(100)
mat <- as.matrix(distsRL)
rownames(mat) <- colnames(mat) <- with(colData(dds),
paste(Patient, Tissue, sep=" : "))
heatmap.2(mat, trace="none", col = rev(hmcol), margin=c(13, 13))
# Use the calulated covariantes from RUVSeq to identify the differentially expressed genes
colData=data.frame(Patient=as.factor(c(1,1,2,2,3,3,4,4,5,5,6,6,7,7,8,8)),W_1=set$W[,1],W_2=set$W[,2],Tissue=c("PLC","DMC","PLC","DMC","PLC","DMC","PLC","DMC","PLC","DMC","PLC","DMC","PLC","DMC","PLC","DMC"))
dds<-DESeqDataSetFromMatrix(countData=filtered, colData= colData,design=~Patient + W_1+ W_2 + Tissue)
#caclulate the fold changes
dds<-DESeq(dds)
res<-as.data.frame(results(dds,contrast=c("Tissue","DMC","PLC")))
geneRank=res[order(-abs(res$log2FoldChange)),]
geneRank=merge(geneRank,Genes,by.x="row.names",by.y="ID")
geneRank=geneRank[order(-abs(geneRank$log2FoldChange)),]
normalisedCounts=counts(dds, normalized =TRUE)
geneRank=merge(geneRank,normalisedCounts,by.y="row.names",by.x="Row.names")
geneRank=geneRank[order(-abs(geneRank$log2FoldChange)),]
geneRank$FoldChange<-2^geneRank$log2FoldChange
geneRank$FoldChange<-ifelse(geneRank$FoldChange < 1, -1/geneRank$FoldChange,geneRank$FoldChange)
write.table(geneRank,file="./Results/DESeq2Results.tab",row.names=F,col.names=T,sep="\t",quote=F)
#Just the differentially expressed genes
DEGs=geneRank[abs(geneRank$log2FoldChange)>=0.58,]
DEGs=DEGs[DEGs$padj<=0.1,]
DEGs<-na.omit(DEGs)
DEGs<-DEGs[,c(1,8,25,6,7)]
DEGs<-DEGs[ !duplicated(DEGs$Gene.name),]
colnames(DEGs)[1]<-"Ensembl Gene ID"
write.table(DEGs,file="./Results/DESeq2ResultsDEGs.tab",row.names=F,col.names=T,sep="\t",quote=F)
#split the ID
transcriptIDs<- do.call('rbind', strsplit(as.character(geneRank$Row.names),'.',fixed=TRUE))
########################################
#Pathway enrichment analysis with GOSeq
#########################################
genes=ifelse(geneRank$Gene.name %in% DEGs$Gene.name,1,0)
names(genes)=transcriptIDs[,1]
pwf=nullp(genes,"hg19","ensGene")
# find KEGG pathways
KEGG=goseq(pwf,'hg19','ensGene',test.cats="KEGG")
KEGG$padj=p.adjust(KEGG$over_represented_pvalue,method="BH")
KEGG.sig=KEGG[KEGG$padj<=0.1,]
en2eg=as.list(org.Hs.egENSEMBL2EG)
eg2kegg=as.list(org.Hs.egPATH)
grepKEGG=function(id,mapkeys){unique(unlist(mapkeys[id],use.names=FALSE))}
kegg=lapply(en2eg,grepKEGG,eg2kegg)
resKEGG=within(geneRank, Row.names<-data.frame(do.call('rbind', strsplit(as.character(Row.names), '.', fixed=TRUE))))
resKEGG$Row.names=resKEGG$Row.names$X1
KEGGResults=list()
sigGenes=genes[genes==1]
for ( i in 1:length(KEGG.sig$category)) {
#search kegg for the kegg term of interest and filter by differentially expressed genes
test_term=KEGG.sig$category[i]
index=sapply(kegg,function(x) test_term %in% x)
termIDs=names(index[index=="TRUE"])
sig=termIDs[termIDs %in% names(sigGenes)]
sig=resKEGG[resKEGG$Row.names %in% sig ,]
KEGGResults[[test_term]]=sig$Gene.name
}
xx <- as.list(KEGGPATHID2NAME)
names(KEGGResults)=apply(KEGG.sig,1,function(x) xx[[unlist(x[1])]])
KEGGResults=lapply(KEGGResults,function(x) paste(x, sep="", collapse=" ") )
KEGGResults=data.frame(Term= names(KEGGResults),Genes = unlist(KEGGResults),Adj.pvalue=KEGG.sig$padj)
colnames(KEGGResults)[3]=c("Adjusted p-value")
write.table(KEGGResults,file="./Results/KEGGPathways.tab",col.names=T,row.names=F,sep="\t",quote=F)
#map from ensembl to REACTOME
en2eg=as.list(org.Hs.egENSEMBL2EG)
eg2reactome=as.list(reactomeEXTID2PATHID)
grepREACTOME=function(id,mapkeys){unique(unlist(mapkeys[id],use.names=FALSE))}
reactome=lapply(en2eg,grepREACTOME,eg2reactome)
REACTOME=goseq(pwf,gene2cat=reactome)
REACTOME$padj=p.adjust(REACTOME$over_represented_pvalue,method="BH")
xx <- as.list(reactomePATHID2NAME)
REACTOME$Term=apply(REACTOME,1,function(x) xx[[unlist(x[1])]])
REACTOME.sig=REACTOME[REACTOME$padj<=0.1,]
#work out which proteins are in each catergory
#reactome holds ENSG to reactome ID
#fix the IDs
resReactome=within(geneRank, Row.names<-data.frame(do.call('rbind', strsplit(as.character(Row.names), '.', fixed=TRUE))))
resReactome$Row.names=resReactome$Row.names$X1
reactomeResults=list()
sigGenes=genes[genes==1]
for ( i in 1:length(REACTOME.sig$category)) {
#search reactome for the reactome term of interest and filter by differentially expressed genes
test_term=REACTOME.sig$category[i]
index=sapply(reactome,function(x) test_term %in% x)
termIDs=names(index[index=="TRUE"])
sig=termIDs[termIDs %in% names(sigGenes)]
sig=resReactome[resReactome$Row.names %in% sig ,]
reactomeResults[[test_term]]=sig$Gene.name
}
names(reactomeResults)=REACTOME.sig$Term
reactomeResults=lapply(reactomeResults,function(x) paste(x, sep="", collapse=" ") )
reactomeResults=data.frame(Term= names(reactomeResults),Genes = unlist(reactomeResults),Adj.pvalue=REACTOME.sig$padj)
#reactomeResults$Genes = sapply(lapply(reactomeResults$Genes, strwrap, width=40), paste, collapse="\n")
colnames(reactomeResults)[3]=c("Adjusted p-value")
write.table(reactomeResults,file="./Results/ReactomePathways.tab",col.names=T,row.names=F,sep="\t",quote=F)
##run phenome Express
#source the methods
source("./PhenomeExpress/src/HeterogeneousNetwork.R")
source("./PhenomeExpress/src/RHWN.R")
source("./PhenomeExpress/src/runGIGA.R")
source("./PhenomeExpress/src/runPhenoExpress.R")
#map from ensembl gene to entrez gene to uniprot
#remove the version number on the transcript ID
res=within(res, Row.names<-data.frame(do.call('rbind', strsplit(as.character(rownames(res)), '.', fixed=TRUE))))
colnames(res)[7]="ensembl_gene_id"
res$ensembl_gene_id=res$ensembl_gene_id$X1
egIDs <- stack(mget(na.omit(as.character(res$ensembl_gene_id)), org.Hs.egENSEMBL2EG, ifnotfound = NA))
res=merge(res,egIDs,by.x="ensembl_gene_id",by.y="ind")
#collpase the duplicated entrez IDs based on the highest foldchange
res <- data.table(res)
res=res[, .SD[which.max(abs(log2FoldChange)),], by=values]
res=as.data.frame(res)
#use uniprot and DAVID mapping to get the SwissProt IDs
#Use the David and Uniprot ID maps to match the EntrezID to Swissprot for the PPI network
Young_EnteztoSwiss_via_Uniprot <- read.delim("./GenenamesEntreztoUniprot_via_Uniprot_for8.txt")
Young_EnteztoSwiss_via_Uniprot=Young_EnteztoSwiss_via_Uniprot[,c(1,3)]
colnames(Young_EnteztoSwiss_via_Uniprot)=c("From","To")
Young_EnteztoSwiss_via_David <- read.delim("./GenenamesEntreztoUniprot_via_David_for8.txt", dec=",")
Young_EnteztoSwiss_via_David=Young_EnteztoSwiss_via_David[,1:2]
Young_EnteztoSwiss=rbind(Young_EnteztoSwiss_via_David,Young_EnteztoSwiss_via_Uniprot)
Young_EnteztoSwiss=Young_EnteztoSwiss[!duplicated(Young_EnteztoSwiss),]
dt=merge(res,Young_EnteztoSwiss,by.x="values",by.y="From")
colnames(dt)[9]="name"
#calculate the Pi value for scoring the nodes
dt$absFC=abs(dt$log2FoldChange)
dt$logAdjPval=-log10(dt$padj)
dt$Pi=(dt$absFC*dt$logAdjPval)
dt=na.omit(dt)
#load the HumanConsensusDB PPI network
load("./PhenomeExpress/Networks/ConsensusDB_graph.RData")
#filter the network based on expressed genes
presentList=na.omit(match(dt$name,V(ConsensusDB_graph)$name))
OA.network=induced.subgraph(ConsensusDB_graph,presentList)
OA.network=decompose.graph(OA.network)[[1]]
#filter expression table based on genes in the network
presentList=na.omit(match(V(OA.network)$name,dt$name))
dt=dt[presentList,]
#useful to assign the node with the entrez ID as well - for downstream analysis in cytoscape i.e mapping to genenames or functional annotation
V(OA.network)$EntrezID=as.character(dt$values)
#merge all vertices with the same entrezID but with different protein names
IDs=V(OA.network)$EntrezID
names(IDs)=1:length(IDs)
IDs2=match(IDs,IDs)
IDs=IDs[IDs2]
igraph.options(vertex.attr.comb=list(name="first"))
OA.network2=igraph::contract.vertices(OA.network,mapping=as.integer(names(IDs)))
OA.network2=decompose.graph(OA.network2)[[1]]
OA.network3=get.edgelist(OA.network2)
OA.network3=graph.data.frame(OA.network3,directed=F)
E(OA.network3)$Confidence=E(OA.network2)$Confidence
OA.network=OA.network3
presentList=na.omit(match(V(OA.network)$name,dt$name))
dt=dt[presentList,]
#select the phenotypes from the UberPheno ontology - the Phenomiser tool and manual searching of the ontolgy by relevent keywords is helpful for this
Phenotypes=c("HP:0005086","HP:0001387","MP:0003724","MP:0003436")
set.seed(123)
#run Phenome Express - set inital subnetwork number to 7 to give reasonably sized consensus sub-networks
OAResults=runPhenomeExpress(OA.network,dt,Phenotypes,"Human",max_number=7,sampleSize=10000)
#retrieve the significant sub-networks
subnetworks=OAResults[[1]]
#get the gene ontolgy term for each netwrok
TopGO=list()
for (subnetwork in seq_along(subnetworks)) {
genes=ifelse(dt$name %in% V(subnetworks[[subnetwork]])$name ,1,0)
names(genes)=dt$ensembl_gene_id
genes=genes[unique(names(genes))]
pwf=nullp(genes,"hg19","ensGene",plot.fit=F)
GO.BP=goseq(pwf,"hg19","ensGene",test.cats=c("GO:BP"),method="Hypergeometric")
GO.BP=GO.BP[1, "term"]
TopGO[[subnetwork]]=GO.BP
}
#retrieve the table of p-values
sigTable=OAResults[[2]]
sigTable$GO=unlist(TopGO)
names(subnetworks)<-unlist(TopGO)
save(subnetworks,file="./Results/PhenomeExpress_subnetworks.RData")
NetworkSize=sapply(subnetworks, vcount)
sigTable$NetworkSize=NetworkSize
sigTable$Number=1:nrow(sigTable)
colnames(sigTable)=c("Network Number","Empirical p-value","Top GO Biological Process","Network size")
sigTable=sigTable[,c(1,4,2,3)]
write.table(sigTable,file="./Results/PhenomeExpressSummary.tab",row.names=F,col.names=T,sep="\t",quote=F)
z=getHeterogeneousNetwork(OA.network,"Human")[["genePheno"]] # note contains all proteins - including ones not present in network
phenoAnnotated=z[rownames(z) %in% Phenotypes,]
phenoAnnotated=phenoAnnotated[,colSums(phenoAnnotated)>0]
phenoAnnotated=colnames(phenoAnnotated)
#send all the sub-networks from PhenomeExpress to cytoscape
#colours the nodes according to the fold change
#black border if directly annotated to seed phenotype
V(OA.network)$EntrezID=as.character(dt$values)
Genes$ID<- do.call('rbind', strsplit(as.character(Genes$ID),'.',fixed=TRUE))[,1]
dt2=merge(dt,Genes,by.x="ensembl_gene_id",by.y="ID")
Swiss2GeneSymbol<-dt2[,c(9,13)]
# save(Swiss2GeneSymbol,file="./Results/Swiss2GeneSymbol.RData")
if (visualise == TRUE) {
for(i in 1:length(subnetworks)) {
presentList=na.omit(match(V(subnetworks[[i]])$name,V(OA.network)$name))
tempGraph=induced.subgraph(OA.network,presentList)
FC=dt2[na.omit(match(V(tempGraph)$name,dt2$name)),]
V(tempGraph)$logFC=FC$log2FoldChange
V(tempGraph)$GeneSymbol=as.character(FC$Gene.symbol)
V(tempGraph)$GeneName=as.character(FC$Gene.Name)
seedAnnotatedGenes=ifelse(V(tempGraph)$name %in% phenoAnnotated,1,0)
V(tempGraph)$Seed=seedAnnotatedGenes
#do the network creation stuff
#convert the igraph object to a graphNEL object and intialise the attributes
tempGraph.NEL=igraph.to.graphNEL(tempGraph)
tempGraph.NEL=initEdgeAttribute(tempGraph.NEL,"Confidence","numeric",0)
tempGraph.NEL=initEdgeAttribute(tempGraph.NEL,"weight","numeric",0)
tempGraph.NEL=initNodeAttribute(tempGraph.NEL,"logFC","numeric",0)
tempGraph.NEL=initNodeAttribute(tempGraph.NEL,"Seed","numeric",0)
tempGraph.NEL=initNodeAttribute(tempGraph.NEL,"EntrezID","char",0)
tempGraph.NEL=initNodeAttribute(tempGraph.NEL,"GeneSymbol","char",0)
tempGraph.NEL=initNodeAttribute(tempGraph.NEL,"GeneName","char",0)
nodeDataDefaults(tempGraph.NEL, "label") <- "name"
nodeData(tempGraph.NEL,V(tempGraph)$name,"label") = V(tempGraph)$name
tempGraph.NEL=initNodeAttribute(tempGraph.NEL,"label","char","name")
#Open the cytoscape window and send the graph
cw1 <- new.CytoscapeWindow (paste("PhenoExpressadded",as.character(i),sep=""), graph=tempGraph.NEL)
#display the graph
displayGraph (cw1)
#select the layout
layoutNetwork (cw1, layout.name='force-directed')
#colour according to the logFC
control.points <- c(-5,0,5)
node.colors <- c ("#00AA00", "#00FF00", "#FFFFFF", "#FF0000", "#AA0000")
setNodeColorRule (cw1, node.attribute.name='logFC', control.points, node.colors, mode='interpolate')
setDefaultBackgroundColor (cw1, '#FFFFFF')
#set the nodeborder to correspond to the seed phenotype annotated genes
data.values <- c ("1", "0")
line.widths = c ("15","1")
setNodeBorderWidthRule (cw1, 'Seed', data.values, line.widths)
}
}
##################################
#Comparison with existing studies
##################################
#compare our results with Ramos et al's results
#load the raw data as a GEO gset
load("./GSE57218.RData")
lumi.N <- lumiN(gset,method="rsn")
detection <- read.delim("./GSE57218_Non-normalized_data.txt")
rownames(detection)=detection$ID_REF
detection_index=grep("detection",colnames(detection))
detection=detection[detection_index]
detection=ifelse(detection<=0.05,1,0)
detection=detection[rowSums(detection)>32,]
IDs=rownames(detection)
chipVersion=getChipInfo(lumi.N)[[1]]
IDs=probeID2nuID(IDs,lib.mapping="lumiHumanIDMapping",species="Human",chipVersion=chipVersion)
expressed=IDs[,7]
#convert the probe ID to NUIDs
lumi.N = addNuID2lumi(lumi.N,lib.mapping="lumiHumanIDMapping")
#get the probe ID and intensities for each sample
dataMatrix <- exprs(lumi.N)
dim(dataMatrix)
dataMatrix=dataMatrix[rownames(dataMatrix) %in% expressed,]
#filter out unannotated probes
dataMatrix=dataMatrix[!is.na(getSYMBOL(rownames(dataMatrix), 'lumiHumanAll.db')),]
dataMatrix.complete=dataMatrix
#remove the healthy samples
dataMatrix=dataMatrix[,-c(1,2)]
dataMatrix=dataMatrix[,-c(68:71)]
dataMatrix=dataMatrix[,-c(29)]
#define the experimental conditions (factors)
sampleType=as.factor(c(rep(c("OA","Perserved"),33)))
sampleType=relevel(sampleType,"Perserved")
patient=as.factor(rep(1:33, each=2))
design <- model.matrix(~patient + sampleType)
#fit the linear model
fit <- lmFit(dataMatrix, design)
#calculate pvalues
fit <- eBayes(fit)
#calculate BH correction p values and store results table
results=topTable(fit,coef="sampleTypeOA",number=Inf)
#Annotate the probes with Entrez gene IDs
genes=as.data.frame(getEG(rownames(results), 'lumiHumanAll.db' ))
colnames(genes)=c("EntrezID")
results=merge(results,genes,by="row.names")
#collpase the duplicated entrez IDs based on the highest foldchange
results <- data.table(results)
results=results[, .SD[which.max(abs(logFC)),], by=EntrezID]
results=as.data.frame(results)
results=results[order(-abs(results$logFC)),]
#Annotate with the genename
genes=as.data.frame(getSYMBOL(as.character(results$Row.names), 'lumiHumanAll.db' ))
colnames(genes)=c("GeneName")
results=merge(results,genes,by.y="row.names",by.x="Row.names")
results=results[order(-abs(results$logFC)),]
resultsRamos<-results
save(resultsRamos,file="./Results/resultsRamos.RData")
#analyse the snelling et al data
targets <- read.delim("./microarray_results/targets.txt", header=T)
setwd("./microarray_results")
images <- read.maimages(targets,source="agilent")
setwd("..")
images.processed <- backgroundCorrect(images, method="normexp", offset=50)
images.processed=normalizeWithinArrays(images.processed,method="loess")
images.processed <- normalizeBetweenArrays(images.processed, method="quantile")
images.processed <- avereps(images.processed, ID=images.processed$genes$GeneName)
xx <- as.list(org.Hs.egSYMBOL)
images.processed=images.processed[images.processed$genes$ControlType!=1,]
images.processed=images.processed[images.processed$genes$GeneName %in% xx,]
design=modelMatrix(targets,ref="undamaged")
fit <- lmFit(images.processed, design)
fit <- eBayes(fit)
results.Snelling=topTable(fit,coef="damaged",number=Inf)
#get the sig genes
sigGenesRamos=results[abs(results$logFC)>=0.58,]
sigGenesRamos=na.omit(sigGenesRamos[sigGenesRamos$adj.P.Val<=0.1,])
Oursig=geneRank[abs(geneRank$log2FoldChange)>=0.58,]
Oursig=na.omit(Oursig[Oursig$padj<=0.1,])
Snellingsig=results.Snelling[abs(results.Snelling$logFC)>=0.58,]
Snellingsig=na.omit(Snellingsig[Snellingsig$adj.P.Val<=0.1,])
vennList=list(ours=Oursig$Gene.name,Ramos=sigGenesRamos$GeneName,Snelling=Snellingsig$GeneName)
venn.diag=venn.diagram(vennList,fill = c("red", "green","lightskyblue"),alpha = c(0.5, 0.5,0.5), cex = 2,cat.fontface = 4,lty =2, fontfamily =3, filename="./Results/Venn.jpg")
#get intersectection genes and extract the data from each table to allow plotting
chosenGenes=intersect(sigGenesRamos$GeneName,intersect(Snellingsig$GeneName,Oursig$Gene.name))
chosenGeneTable=merge(Oursig,Snellingsig,by.x="Gene.name",by.y="GeneName")
chosenGeneTable=merge(chosenGeneTable,sigGenesRamos,by.x="Gene.name",by.y="GeneName")
chosenGeneTable <- data.table(chosenGeneTable)
chosenGeneTable=as.data.frame(chosenGeneTable)
chosenGeneTable=data.frame(GeneName=chosenGeneTable$Gene.name,Ours=chosenGeneTable$log2FoldChange,Ramos=chosenGeneTable$logFC.y,Snelling=chosenGeneTable$logFC.x)
chosenGeneTable.long=melt(chosenGeneTable,value.name="logFC")
graph.genes<- ggplot(chosenGeneTable.long,aes(GeneName,logFC,fill=as.factor(variable)))+ geom_bar(position="dodge",stat="identity") + facet_wrap(~GeneName,nrow=3,scales = "free_x") + theme(axis.ticks = element_blank(), axis.title=element_text(size=18,face="bold"), axis.text.x = element_blank(),strip.text.x = element_text(size = 14,face="bold"),legend.position="bottom",legend.text=element_text(size=16,face="bold"),legend.title = element_text(colour="red", size = 18, face = "bold")) + scale_y_continuous(name="log2 Fold Change" ,breaks=seq(-4, 4, 0.5)) + scale_x_discrete(name="Gene Name")
graph.genes<- graph.genes + scale_fill_manual(values= c("red", "green","lightskyblue"),name="",
breaks=c("Ours", "Ramos", "Snelling"),
labels=c("Present Study", "Ramos et al", "Snelling et al")) + theme(panel.background = element_rect(fill = "white",colour = "grey"), plot.background = element_rect(fill = "white",colour = "grey"))
ggsave("./Results/comparison.jpg",width=12,height=9.3)
chosen2=intersect(Oursig$Gene.name,sigGenesRamos$GeneName)
chosen2=setdiff(chosen2,chosenGenes)
chosenGeneTable=merge(Oursig,sigGenesRamos,by.x="Gene.name",by.y="GeneName")
chosenGeneTable=data.frame(GeneName=chosenGeneTable$Gene.name,Ours=chosenGeneTable$log2FoldChange,Ramos=chosenGeneTable$logFC)
chosenGeneTable=chosenGeneTable[ chosenGeneTable$GeneName %in% chosen2,]
chosenGeneTable.long=melt(chosenGeneTable,value.name="logFC")
graph.genes<- ggplot(chosenGeneTable.long,aes(GeneName,logFC,fill=as.factor(variable)))+ geom_bar(position="dodge",stat="identity") + facet_wrap(~GeneName,nrow=6,scales = "free_x") + theme(axis.ticks = element_blank(), axis.title=element_text(size=18,face="bold"), axis.text.x = element_blank(),strip.text.x = element_text(size = 14,face="bold"),legend.position="bottom",legend.text=element_text(size=16,face="bold"),legend.title = element_text(colour="red", size = 18, face = "bold")) + scale_y_continuous(name="log2 Fold Change" ,breaks=seq(-4, 4, 0.5)) + scale_x_discrete(name="Gene Name")
graph.genes<- graph.genes + scale_fill_manual(values=c("red", "green"),name="",
breaks=c("Ours", "Ramos"),
labels=c("Present Study", "Ramos et al")) + theme(panel.background = element_rect(fill = "white",colour = "grey"), plot.background = element_rect(fill = "white",colour = "grey"))
ggsave("./Results/comparisonRamos.jpg",width=12,height=9.3)
chosen3=intersect(Oursig$Gene.name,Snellingsig$GeneName)
chosen3=setdiff(chosen3,chosenGenes)
chosenGeneTable=merge(Oursig,Snellingsig,by.x="Gene.name",by.y="GeneName")
chosenGeneTable=data.frame(GeneName=chosenGeneTable$Gene.name,Ours=chosenGeneTable$log2FoldChange,Snelling=chosenGeneTable$logFC)
chosenGeneTable=chosenGeneTable[ chosenGeneTable$GeneName %in% chosen3,]
chosenGeneTable.long=melt(chosenGeneTable,value.name="logFC")
graph.genes<- ggplot(chosenGeneTable.long,aes(GeneName,logFC,fill=as.factor(variable)))+ geom_bar(position="dodge",stat="identity") + facet_wrap(~GeneName,nrow=6,scales = "free_x") + theme(axis.ticks = element_blank(), axis.title=element_text(size=18,face="bold"), axis.text.x = element_blank(),strip.text.x = element_text(size = 14,face="bold"),legend.position="bottom",legend.text=element_text(size=16,face="bold"),legend.title = element_text(colour="red", size = 18, face = "bold")) + scale_y_continuous(name="log2 Fold Change" ,breaks=seq(-4, 4, 0.5)) + scale_x_discrete(name="Gene Name")
graph.genes<- graph.genes + scale_fill_manual(values=c("red", "lightskyblue"),name="",
breaks=c("Ours", "Snelling"),
labels=c("Present Study", "Snelling et al")) + theme(panel.background = element_rect(fill = "white",colour = "grey"), plot.background = element_rect(fill = "white",colour = "grey"))
ggsave("./Results/comparisonSnelling.jpg",width=14,height=9.3)
#intersection pathway analysis
OursandRamos<-intersect(Oursig$Gene.name,sigGenesRamos$GeneName)
OursandSnelling<-intersect(Oursig$Gene.name,Snellingsig$GeneName)
RamosandSnelling<-intersect(sigGenesRamos$GeneName,Snellingsig$GeneName)
intersectionGenes<-c(OursandRamos,OursandSnelling,RamosandSnelling)
genes=ifelse(geneRank$Gene.name %in% intersectionGenes,1,0)
names(genes)=transcriptIDs[,1]
pwf=nullp(genes,"hg19","ensGene")
#map from ensembl to REACTOME
en2eg=as.list(org.Hs.egENSEMBL2EG)
eg2reactome=as.list(reactomeEXTID2PATHID)
grepREACTOME=function(id,mapkeys){unique(unlist(mapkeys[id],use.names=FALSE))}
reactome=lapply(en2eg,grepREACTOME,eg2reactome)
REACTOME=goseq(pwf,gene2cat=reactome)
REACTOME$padj=p.adjust(REACTOME$over_represented_pvalue,method="BH")
xx <- as.list(reactomePATHID2NAME)
REACTOME$Term=apply(REACTOME,1,function(x) xx[[unlist(x[1])]])
REACTOME.sig=REACTOME[REACTOME$padj<=0.1,]
#work out which proteins are in each catergory
#reactome holds ENSG to reactome ID
#fix the IDs
resReactome=within(geneRank, Row.names<-data.frame(do.call('rbind', strsplit(as.character(Row.names), '.', fixed=TRUE))))
resReactome$Row.names=resReactome$Row.names$X1
reactomeResults=list()
sigGenes=genes[genes==1]
for ( i in 1:length(REACTOME.sig$category)) {
#search reactome for the reactome term of interest and filter by differentially expressed genes
test_term=REACTOME.sig$category[i]
index=sapply(reactome,function(x) test_term %in% x)
termIDs=names(index[index=="TRUE"])
sig=termIDs[termIDs %in% names(sigGenes)]
sig=resReactome[resReactome$Row.names %in% sig ,]
reactomeResults[[test_term]]=sig$Gene.name
}
names(reactomeResults)=REACTOME.sig$Term
reactomeResults=lapply(reactomeResults,function(x) paste(x, sep="", collapse=" ") )
reactomeResults=data.frame(Term= names(reactomeResults),Genes = unlist(reactomeResults),Adj.pvalue=REACTOME.sig$padj)
#reactomeResults$Genes = sapply(lapply(reactomeResults$Genes, strwrap, width=40), paste, collapse="\n")
colnames(reactomeResults)[3]=c("Adjusted p-value")
write.table(reactomeResults,file="./Results/IntersectionReactomePathways.tab",col.names=T,row.names=F,sep="\t",quote=F)
|
91a19a0dab27c98649cd07fb772d78533cefd0ba
|
97fcf2ba0b4dc336ebeef80388f73d811e5a0047
|
/models/mod/mod.R
|
8415aa60fc419c083a0114ded60d29cbefd30883
|
[] |
no_license
|
karenenes/selectiveharvesting
|
6d2b6f81fb6bd52c3dd7e72ab248e97b05ea0f1b
|
43946422b1428264ad9b6ca648561ce06bc6e16f
|
refs/heads/master
| 2022-11-01T11:52:21.174126
| 2022-10-22T23:36:23
| 2022-10-22T23:36:23
| 239,534,499
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 692
|
r
|
mod.R
|
library(R6)
##
#
##
MOD <- R6Class("MOD",
public = list(
##
# Initializes the model instance.
##
initialize = function()
{},
##
#
##
isValid = function()
{
return (TRUE)
},
##
#
##
getName = function()
{
name <- sprintf("MOD")
return (name)
},
##
#
##
fit = function()
{
stop()
},
##
#
##
predict = function(x)
{
stop()
}
),
private = list()
)
|
dc0adaa420d093b26c97aa6169ba5c9162eefdac
|
fb4ee97814efccd540d909a1a19cec8c170646fd
|
/R/19_0_category_reduction_of_NOT_downloaded.R
|
807cb4ea6f9dca916de88c25bf10e3fc1b8c055f
|
[] |
no_license
|
potockan/Text-clustering-basing-on-string-metrics
|
6ba1ac23f5d29a2cf59e8ea57f7ea43985dc614e
|
babdf79db3a0ab875cc8641160fe41533e3ec6e3
|
refs/heads/master
| 2021-01-15T17:46:10.962750
| 2016-05-16T10:52:52
| 2016-05-16T10:52:52
| 25,786,834
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,527
|
r
|
19_0_category_reduction_of_NOT_downloaded.R
|
library(rvest)
library(dplyr)
library(stringi)
library(Hmisc)
library(RSQLite)
library(compiler)
prepare_string <- cmpfun(function(str) {
stri_paste("'", stri_replace_all_fixed(str, "'", "''"), "'")
})
expand_tree <- function(link, level) {
#print(link)
#stri_sub(link, 1, 1) <- stri_trans_toupper(stri_sub(link, 1, 1))
tree_nodes <-
link %>%
capitalize() %>%
stri_replace_all_fixed(., " ", "_") %>%
stri_paste("https://pl.wikipedia.org/wiki/Kategoria:", .) %>%
read_html %>%
html_nodes("#mw-normal-catlinks li a")
if (length(tree_nodes) > 0)
data_frame(link =
tree_nodes %>%
html_attr("href") %>%
paste0("http://pl.wikipedia.org", .),
name = html_text(tree_nodes) ) %>%
setNames(names(.) %>% paste(level, . , sep = ".") ) else data_frame()
}
#######################
load("/dragon/Text-clustering-basing-on-string-metrics/Data/RObjects/categories/level6.rda")
print(7)
ciag <- c(seq(1, nrow(brak_sciagnietych), by = 3000), nrow(brak_sciagnietych))
c(13, 25, 27, 31, 35, 39, 42:49)
level61 <- level6[ciag[13]:(ciag[14]-1),]
for(i in c(25, 27, 31, 35, 39, 42:49)){
level61 <- rbind(level61, level6[ciag[i]:(ciag[i+1]-1),])
}
ciag2 <- c(seq(1, nrow(level6), by = 1000), nrow(level6))
########################################################
##################### Downloading ######################
########################################################
for(i in 1:nrow(brak_sciagnietych)){
tryCatch({
#level0 <- brak_sciagnietych[ciag[i]:(ciag[i+1]-1),]
level1 <- brak_sciagnietych[i,] %>% do(expand_tree(.$name, 'level1')) %>% ungroup
save(list = 'level1', file = paste0('/dragon/Text-clustering-basing-on-string-metrics/Data/RObjects/categories/level7/level7', i, '.rda'))
}, error = function(e) print(paste("error", i)))
if(i %% 1000 == 0)
print(i)
}
names(brak_sciagnietych)
load("/dragon/Text-clustering-basing-on-string-metrics/Data/RObjects/categories/level7/level71.rda")
level10 <- bind_cols(level1, select(brak_sciagnietych[1,], name))
pliki <- list.files("/dragon/Text-clustering-basing-on-string-metrics/Data/RObjects/categories/level7", pattern = ".rda", full.names = TRUE)
for(i in 2:(length(pliki)-1)){
plik <- pliki[i]
load(plik)
index <- stri_match_last_regex(plik, "/level7(\\d+)\\.rda")[,2]
to_bind <- do.call(what = "rbind", args =
replicate(nrow(level1), select(brak_sciagnietych[as.numeric(index),], name), simplify = FALSE))
level10 <- level1 %>%
bind_cols(to_bind) %>%
bind_rows(level10)
if(i %% 500 == 0)
print(i)
}
save(list = 'level10', file = "/dragon/Text-clustering-basing-on-string-metrics/Data/RObjects/categories/level7/level10.rda")
load("/dragon/Text-clustering-basing-on-string-metrics/Data/RObjects/categories/level10.rda")
level11 <- left_join(level10[,-1], brak_sciagnietych) %>%
select(name, cnt, id_category, level1.name) %>%
rename(nowa_kat = level1.name) %>%
mutate(nowa_kat = stri_trans_tolower(nowa_kat))
level11 <- level11 %>%
select(nowa_kat) %>%
rename(nowa_kat2 = nowa_kat) %>%
bind_cols(level11, .)
load("/dragon/Text-clustering-basing-on-string-metrics/Data/RObjects/categories/stat_all.rda")
stat_all <- bind_rows(stat_all, level11) %>%
group_by(name, cnt, id_category) %>%
summarise(nowa_kat = first(nowa_kat), nowa_kat2 = first(nowa_kat2)) %>%
bind_cols(rename(select(., nowa_kat2), nowa_kat3 = nowa_kat2))
wspolne <- intersect(stat_all$name, stat_all$nowa_kat2)
for(i in 1:length(wspolne)){
index1 <- which(stat_all$nowa_kat2 == wspolne[i])
index2 <- which(stat_all$name == wspolne[i])
stat_all$nowa_kat3[index1] <- stat_all$nowa_kat3[index2]
}
save(list = 'stat_all', file = "/dragon/Text-clustering-basing-on-string-metrics/Data/RObjects/categories/stat_all_nowsze.rda")
load("/dragon/Text-clustering-basing-on-string-metrics/Data/RObjects/categories/stat_all_nowsze.rda")
stat_all <- stat_all[, -c(6,7)]
stat_all$nowa_kat3 <- ifelse(stat_all$nowa_kat3 == "", stat_all$name, stat_all$nowa_kat3)
brak_sciagnietych2 <- brak_sciagnietych %>% anti_join(stat_all, by = c("name" = "name"))
brak_sciagnietych2 <- brak_sciagnietych2[brak_sciagnietych2$cnt > 15, ]
brak_sciagnietych2 <- brak_sciagnietych2 %>%
bind_cols(data.frame(nowa_kat = brak_sciagnietych2$name, nowa_kat2 = brak_sciagnietych2$name, nowa_kat3 = brak_sciagnietych2$name))
stat_all <- bind_rows(stat_all, brak_sciagnietych2)
save(list = 'stat_all', file = "/dragon/Text-clustering-basing-on-string-metrics/Data/RObjects/categories/stat_all_nowsze2.rda")
load("/dragon/Text-clustering-basing-on-string-metrics/Data/RObjects/categories/stat_all_nowsze2.rda")
# kategorie <- stat_all$nowa_kat3 %>% unique()
# numery_kat <- dbGetQuery(con, sprintf("select id, name
# from wiki_category_name
# where name in (%s)",
# stri_flatten(prepare_string(kategorie), collapse = ", ")))
stat_all %>% group_by(nowa_kat3) %>% summarise(cnt2 = sum(cnt)) -> stat_all2
stat_all <- stat_all[stat_all$nowa_kat3 %in% stat_all2$nowa_kat3[stat_all2$cnt2 > 15],]
save(list = 'stat_all', file = "/dragon/Text-clustering-basing-on-string-metrics/Data/RObjects/categories/stat_all_nowsze3.rda")
load("/dragon/Text-clustering-basing-on-string-metrics/Data/RObjects/categories/stat_all_nowsze3.rda")
########################################################
#################### Insert into db ####################
########################################################
con <- dbConnect(SQLite(), dbname = "/dragon/Text-clustering-basing-on-string-metrics/Data/DataBase/wiki.sqlite")
source("./R/db_exec.R")
dbExecQuery(con, "CREATE TABLE IF NOT EXISTS wiki_category_after_reduction (
id_new INTEGER NOT NULL,
name_old VARCHAR(256) NOT NULL,
id_old INTEGER NOT NULL,
name_new VARCHAR(256) NOT NULL,
FOREIGN KEY (id_old) REFERENCES wiki_category_name(id)
);")
stat_all$nowa_kat3 %>% unique() -> kategorie
kategorie <- data.frame(nowa_kat3 = kategorie, id_new = 1:length(kategorie))
stat_all <- left_join(stat_all, kategorie)
to_insert <- sprintf("(%d, %s, %d, %s)",
stat_all$id_new,
prepare_string(stat_all$name),
stat_all$id_category,
prepare_string(stat_all$nowa_kat3))
to_insert <- split(to_insert,
rep(1:ceiling(length(to_insert)/500),
length.out=length(to_insert)))
lapply(to_insert, function(to_insert) {
dbExecQuery(con, sprintf("INSERT into wiki_category_after_reduction(id_new, name_old, id_old, name_new)
values %s", stri_flatten(to_insert, collapse=",")))
})
dbExecQuery(con, "CREATE TABLE IF NOT EXISTS tmp_category_text_after_reduction (
id_title INTEGER NOT NULL,
id_new_cat INTEGER NOT NULL,
id_old_cat INTEGER NOT NULL,
FOREIGN KEY (id_title) REFERENCES wiki_page(id),
FOREIGN KEY (id_old_cat) REFERENCES wiki_category_name(id)
);")
dbExecQuery(con, "INSERT into tmp_category_text_after_reduction(id_title, id_new_cat, id_old_cat)
select a.id_title, b.id_new, b.id_old
from wiki_unique_category a
join
wiki_category_after_reduction b
on a.id_category = b.id_old
")
dbExecQuery(con, "CREATE TABLE IF NOT EXISTS wiki_category_text_after_reduction (
id INTEGER NOT NULL PRIMARY KEY,
id_title INTEGER NOT NULL,
id_new_cat INTEGER NOT NULL,
id_old_cat INTEGER NOT NULL,
FOREIGN KEY (id_title) REFERENCES wiki_page(id),
FOREIGN KEY (id_old_cat) REFERENCES wiki_category_name(id)
);")
dbExecQuery(con, "INSERT into wiki_category_text_after_reduction(id_title, id_new_cat, id_old_cat)
select d.id_title, d.id_new_cat, d.id_old_cat
from (
select c.id_new_cat, count(1) as cnt
from
tmp_category_text_after_reduction c
group by c.id_new_cat) e
join
tmp_category_text_after_reduction d
on e.id_new_cat = d.id_new_cat
where e.cnt > 15")
########### TUTAJ JEST BLAD. POWINNO BYC JESZCZE GROUP BY ID_TITLE ALBO COS W TYM RODZAJU !!!! ###########
dbExecQuery(con, "drop table tmp_category_text_after_reduction")
dbDisconnect(con)
########################################
|
26d23a07bb296aab504ddd594fc2b421e22f147b
|
49928aacb833073fe225ae2cff02f32423e4da34
|
/man/eskin.Rd
|
83ba64945ba4fd0df0e25d2137be102d76bbe842
|
[] |
no_license
|
cran/nomclust
|
a30d00e74894e06fe4a063dadf696143a53aac48
|
63d7ad4664c5fe23ebb8bf1b961ae421b0329229
|
refs/heads/master
| 2023-08-31T05:09:52.306488
| 2023-08-18T10:12:38
| 2023-08-18T11:30:56
| 36,813,240
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 2,041
|
rd
|
eskin.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/eskin.R
\name{eskin}
\alias{eskin}
\title{Eskin (ES) Measure}
\usage{
eskin(data, var.weights = NULL)
}
\arguments{
\item{data}{A data.frame or a matrix with cases in rows and variables in columns.}
\item{var.weights}{A numeric vector setting weights to the used variables. One can choose the real numbers from zero to one.}
}
\value{
The function returns an object of the class "dist".
\cr
}
\description{
The function calculates a dissimilarity matrix based on the ES similarity measure.
}
\details{
The Eskin similarity measure was proposed by Eskin et al. (2002) and examined by Boriah et al., (2008). It is constructed to assign
higher weights to mismatches on variables with more categories.
}
\examples{
# sample data
data(data20)
# dissimilarity matrix calculation
prox.eskin <- eskin(data20)
# dissimilarity matrix calculation with variable weights
weights.eskin <- eskin(data20, var.weights = c(0.7, 1, 0.9, 0.5, 0))
}
\references{
Boriah S., Chandola V., Kumar V. (2008). Similarity measures for categorical data: A comparative evaluation.
In: Proceedings of the 8th SIAM International Conference on Data Mining, SIAM, p. 243-254.
\cr
\cr
Eskin E., Arnold A., Prerau M., Portnoy L. and Stolfo S. (2002). A geometric framework for unsupervised anomaly detection.
In D. Barbara and S. Jajodia (Eds): Applications of Data Mining in Computer Security, p. 78-100. Norwell: Kluwer Academic Publishers.
}
\seealso{
\code{\link[nomclust]{anderberg}},
\code{\link[nomclust]{burnaby}},
\code{\link[nomclust]{gambaryan}},
\code{\link[nomclust]{goodall1}},
\code{\link[nomclust]{goodall2}},
\code{\link[nomclust]{goodall3}},
\code{\link[nomclust]{goodall4}},
\code{\link[nomclust]{iof}},
\code{\link[nomclust]{lin}},
\code{\link[nomclust]{lin1}},
\code{\link[nomclust]{of}},
\code{\link[nomclust]{sm}},
\code{\link[nomclust]{smirnov}},
\code{\link[nomclust]{ve}},
\code{\link[nomclust]{vm}}.
}
\author{
Zdenek Sulc. \cr Contact: \email{zdenek.sulc@vse.cz}
}
|
29f1d40a2b92dd769417f6c039d892afde83e650
|
d93922bca2e78e91d24e9fb93bdec867bdfe657c
|
/StatisticalDataMining R codes/Assignment1/Problem 1.R
|
78e915c0f490189b6c2b9b399dfb109649696ed6
|
[] |
no_license
|
freyagenesis/Fall2018-codes
|
9476d1f67815a75b7cd97121fca2d926278a5bee
|
e49b02f835cf39b7c172e4759351f6088005a5fe
|
refs/heads/master
| 2020-04-13T21:39:26.875265
| 2018-12-29T00:37:20
| 2018-12-29T00:37:20
| 163,460,866
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,163
|
r
|
Problem 1.R
|
##################################################################
# Problem 1
##################################################################
# install some packages
#install.packages("DAAG")
#install.packages("lattice")
#install.packages("MASS")
#install.packages("plyr")
library("DAAG")
library("lattice")
library("MASS")
library("plyr")
## merging the two datasets
d1=read.table("/Users/freyadmello/Desktop/codes/student/student-mat.csv",sep=";",header=TRUE)
d2=read.table("/Users/freyadmello/Desktop/codes/student/student-por.csv",sep=";",header=TRUE)
d3=merge(d1,d2,by=c("school","sex","age","address","famsize","Pstatus","Medu","Fedu","Mjob","Fjob","reason","nursery","internet"))
print(nrow(d3)) # 382 students
## Since the predictive model has to be built on first Period Grades
## The second and third period grades need not be considered
## Other columns such as guardian, traveltime are common across both subjects - Math and Portuguese
## And hence, the ".y"corresponding variables are ignored
d3 = d3[,c("school", "sex", "age", "address", "famsize", "Pstatus", "Medu",
"Fedu", "Mjob", "Fjob", "reason", "nursery", "internet", "guardian.x",
"traveltime.x", "studytime.x", "failures.x", "schoolsup.x", "famsup.x",
"paid.x", "activities.x", "higher.x", "romantic.x", "famrel.x",
"freetime.x", "goout.x", "Dalc.x", "Walc.x", "health.x", "absences.x",
"G1.x","G1.y")]
print(nrow(d3))
attach(d3)
# Histogram of student count in both schools
par(mfrow = c(1,1))
counts <- table(d3$school)
barplot(counts, xlab="Schools GP and MS", main = "Student count in both schools")
# Strip Plot on the student count of both schools
stripplot(school ~ age, ylab = "Schools", data = d3, main = "Strip Plot on the student count of both schools")
# Grades distribution - subject wise
par(mfrow = c(1,2))
dens <- density(d3$G1.x)
xlim1 <- range(dens$x)
ylim1 <- range(dens$y)
hist(d3$G1.x, breaks = 0 + (0:20)*1, probability = T, xlab = "Grades", xlim = xlim1, ylim = ylim1, main = "Math")
lines(dens)
dens <- density(d3$G1.y)
xlim4 <- range(dens$x)
ylim4 <- range(dens$y)
hist(d3$G1.y, breaks = 0 + (0:20)*1, probability = T, xlab = "Grades", xlim = xlim4, ylim = ylim4, main = "Portuguese")
lines(dens)
# How the age affect the grades - ScatterPlot
# No effect on the grade
par(mfrow = c(1,2))
df_age <- aggregate(G1.x ~ age, d3, mean)
plot(df_age$age, df_age$G1.x, main="Grades vs Age(Math)", xlab="Age", ylab="1st Grades in Math")
df_age <- aggregate(G1.y ~ age, d3, mean)
plot(df_age$age, df_age$G1.y, main="Grades vs Age(Por)", xlab="Age", ylab="1st Grades in Portuguese")
# Removing outlier i.e. the student of ag 22
d3<-d3[!(d3$age>21),]
# How the absences affect the grades - ScatterPlot
aggdf = as.data.frame(df)
par(mfrow = c(1,2))
df <- aggregate(G1.x ~ absences.x, d3, mean)
plot( df$absences.x,df$G1.x, main="Grades vs Absences(Math)", xlab="Absences", ylab="1st Grades in Math")
df <- aggregate(G1.y ~ absences.x, d3, mean)
plot(df$absences.x, df$G1.y, main="Grades vs Absences(Por)", xlab="Absences", ylab="1st Grades in Portuguese")
# Travel vs Study vs Free time
par(mfrow = c(1,3))
travel_dens_m <- density(d3$traveltime.x)
study_dens_m <- density(d3$studytime.x)
free_dens_m <- density(d3$freetime.x)
plot(travel_dens_m, main = "Travel time density")
polygon(travel_dens_m, col="red", border="black")
plot(study_dens_m, main = "Study time density")
polygon(study_dens_m, col="blue", border="black")
plot(free_dens_m, main = "Free time density")
polygon(free_dens_m, col="yellow", border="black")
# Quantity of family relationships vs grades
# Ones with good to excellent levels (4 and 5) have better average grading than the rest.
famrel_avg_m = aggregate(G1.x ~ famrel.x,d3, mean)
famrel_avg_p = aggregate(G1.y ~ famrel.x,d3, mean)
par(mfrow = c(1,2))
barplot(famrel_avg_m$G1.x, names.arg = famrel_avg_m$famrel.x, xlab = "Family Relation", ylab = "Grade average", main="Math")
barplot(famrel_avg_p$G1.y, names.arg = famrel_avg_p$famrel.x, xlab = "Family Relation", ylab = "Grade average", main = "Portuguese")
# Quantity of Study time vs grades
# Students across Math and Portuguese subjects, after investing simillar study time perform
# better in Portuguese
stud_avg_m = aggregate(G1.x ~ studytime.x,d3, mean)
stud_avg_p = aggregate(G1.y ~ studytime.x,d3, mean)
par(mfrow = c(1,2))
barplot(stud_avg_m$G1.x, names.arg = stud_avg_m$studytime.x, xlab = "Study Time", ylab = "Grade average", main="Math")
barplot(stud_avg_p$G1.y, names.arg = stud_avg_p$studytime.x, xlab = "Study Time", ylab = "Grade average", main = "Portuguese")
# Quantity of Going out time vs grades
# Students across Math and Portuguese subjects, after investing simillar travel time perform
# better in Portuguese
goout_avg_m = aggregate(G1.x ~ goout.x,d3, mean)
goout_avg_p = aggregate(G1.y ~ goout.x,d3, mean)
par(mfrow = c(1,2))
barplot(goout_avg_m$G1.x, names.arg = goout_avg_m$goout.x, xlab = "Going out Time", ylab = "Grade average", main="Math")
barplot(goout_avg_p$G1.y, names.arg = goout_avg_p$goout.x, xlab = "Going Time", ylab = "Grade average", main = "Portuguese")
|
e08a2b327ae8432de326a8a32d99606752a92cce
|
0efa8ba6159e0a5aecea978bb45d26b458f53bc5
|
/r/code-snippets/probability.R
|
23f542fea1b55a2dec9e96992360f34ed43feb37
|
[
"MIT"
] |
permissive
|
patrickbucher/docs
|
53368a42736a7b551a651b4b4579c078085ab243
|
a0c7516173f9dee1e94bd41376ef6b2a70ed8a3c
|
refs/heads/master
| 2023-09-01T03:45:05.728392
| 2023-08-29T16:03:23
| 2023-08-29T16:03:23
| 71,139,618
| 14
| 1
| null | 2016-11-17T07:59:43
| 2016-10-17T13:06:57
|
TeX
|
UTF-8
|
R
| false
| false
| 1,647
|
r
|
probability.R
|
game.outcomes <- c(-4, 0, 1, 8) # loose 4, gain 0, 1 or 8
game.probs <- c(0.32, 0.48, 0.15, 0.05)
game.cumprobs <- cumsum(game.probs)
# probability distribution
barplot(game.probs, ylim = c(0, 0.5), names.arg = game.outcomes, space = 0,
xlab = "x", ylab = "Pr(X = x)", main = "Probabilities")
# cumulative probability distribution
barplot(game.cumprobs, names.arg = game.outcomes, space = 0,
xlab = "x", ylab = "P(x <= x)", main = "Cumulative Probabilities")
game.expected <- sum(game.outcomes * game.probs)
game.variance <- sum((game.outcomes - game.expected)^2 * game.probs)
game.sd <- sqrt(game.variance)
cat("expected win of", game.expected, "with a standard deviation of", game.sd)
# playing the lottery (it's not worth it...)
wins <- c(6, 50, 10000, 10e6)
probs <- c(
6/45 * 5/44 * 4/43,
6/45 * 5/44 * 4/43 * 3/42,
6/45 * 5/44 * 4/43 * 3/42 * 2/41,
6/45 * 5/44 * 4/43 * 3/42 * 2/41 * 1/40
)
expectation <- sum(wins * probs) # 1.29, but a lottery ticket costs 2.00
print(expectation - 2)
w <- seq(35, 95, by = 5)
lower.w <- w >= 40 & w <= 65
upper.w <- w > 65 & w <= 90
fw <- rep(0, length(w))
fw[lower.w] <- (w[lower.w] - 40) / 625
fw[upper.w] <- (90 - w[upper.w]) / 625
#plot(w, fw, type = "l", ylab = "f(w)")
abline(h = 0, col = "gray", lty = 2)
fw.specific <- (55.2 - 40) / 625
fw.specific.area <- 0.5 * 15.2 * fw.specific
fw.specific.vertices <- rbind(c(40, 0), c(55.2, 0), c(55.2, fw.specific))
plot(w, fw, type = "l", ylab = "f(w)")
abline(h = 0, col = "gray", lty = 2)
polygon(fw.specific.vertices, col = "gray", border = NA)
abline(v = 55.2, lty = 3)
text(50, 0.005, labels = fw.specific.area)
|
d48ea4c1e515dd502c87140fa06f4703b01a4d4e
|
5459d7af208e247c081930c04978dde5b788a90c
|
/WordnetVerb.r
|
bf787e2be343f811c7cbe829592ab407692b5fa7
|
[] |
no_license
|
Smullle/FinalYearThesis
|
9c49ae53ba4105595864c1eec1a78600f5536366
|
a409624d512d3c63b3c946704abd3ff316032206
|
refs/heads/master
| 2022-05-22T09:30:19.497299
| 2020-04-13T20:23:29
| 2020-04-13T20:23:29
| 255,603,525
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 34,929
|
r
|
WordnetVerb.r
|
# System usage
# s2v
s2v_fist_first <- c(0.43291122,0.6673107,1.0,1.0,0.44664878,0.34282508,0.16701001,1.0,1.0,0.2744395,1.0,0.4215669,1.0,1.0,1.0,0.39310393,1.0,0.26562777,0.3074523,0.21201238,1.0,1.0,0.16701001,1.0,1.0,0.7070686,1.0,1.0,0.51232535,1.0,1.0,1.0,0.4071389,0.3902227,1.0,1.0,1.0,1.0,1.0,1.0,1.0,0.16613178,0.17424479,1.0,1.0,1.0,0.2744395,1.0,0.44204247,0.5835742,0.38675117,0.19608118,1.0,1.0,1.0,1.0,1.0,0.42994255,0.4948531,0.2606651,1.0,0.5352187,1.0,1.0,1.0,1.0,1.0,0.30767587,1.0,1.0,1.0,0.6000189,0.6294347,1.0,1.0,1.0,1.0,1.0,1.0,0.52410877,1.0,0.5825748,0.5742086,1.0,1.0,0.44664878,1.0,1.0,1.0,0.6673107,1.0,1.0,0.44765264,1.0,1.0,1.0,1.0,1.0,0.7147826,0.7007741,1.0,1.0,1.0,1.0,0.62823945,1.0,1.0,1.0,1.0,0.566395,1.0,0.4539894,0.42617464,1.0,0.3755897,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,0.4215669,1.0,0.19878842,1.0,0.38363132,1.0,1.0,1.0,1.0,1.0,1.0,0.38675117,1.0,1.0,1.0,1.0,1.0,1.0,0.42994255,1.0,1.0,0.15776676,1.0,1.0,1.0,1.0,1.0,1.0,1.0,0.35347822,1.0,1.0,1.0,0.34282508,0.6294347,0.39310393,0.33818808,0.39154255,1.0,0.30928135,1.0,1.0,0.39310393,0.68581426,1.0,0.39310393,1.0,0.42692944,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,0.7147826,1.0,0.5009553,1.0,1.0,1.0,1.0,0.52410877,1.0,1.0,1.0,1.0,1.0,0.2606651,1.0,0.3755897,1.0,1.0,0.65021455,0.35347822,1.0,0.40690422,1.0,1.0,1.0,0.26738596,1.0,1.0,1.0,0.16613178,1.0,1.0,1.0,0.35376722,1.0,0.76188284,1.0,1.0,0.34282508,1.0,0.6660206,0.51232535,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,0.27760592,0.38300672,0.27662027,1.0,0.6206515,1.0,1.0,1.0,1.0,1.0,1.0,0.7147826,1.0,0.13450952,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,0.2744395,0.27662027,0.4346592,1.0,0.44664878,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,0.34052378,1.0,1.0,1.0,0.24452251,1.0,1.0,1.0,1.0,1.0,0.76188284,1.0,0.51232535,0.7007741,1.0,0.33072048,1.0,0.6000189,0.3401244,1.0,1.0,1.0,1.0,1.0,1.0,1.0,0.16613178,1.0,1.0,0.48014614,1.0,1.0,1.0,0.19878842,1.0,0.2909171,0.4007147,1.0,1.0,0.5467478,1.0,1.0,1.0,0.57491684,1.0,1.0,0.7007741,1.0,1.0,1.0,1.0,0.7147826,1.0,1.0,1.0,1.0,1.0,0.5824128,1.0,1.0,0.26738596,1.0,1.0,1.0,1.0,1.0,1.0,0.35347822,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,0.57710373,1.0,1.0,1.0,1.0,0.5352187,0.52410877,0.380718,0.21201238,1.0,0.19878842,0.6673107,0.4609579,1.0,0.6029006,1.0,1.0,0.76188284,1.0,1.0,1.0,0.33818808,0.26513892,0.5636095,0.19878842,0.16613178,1.0,0.33818808,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,0.19608118,1.0,1.0,1.0,1.0,0.380718,1.0,1.0,0.48353636,0.6206515,1.0,1.0,1.0,1.0,0.4003312,1.0,1.0,1.0,1.0,0.44765264,1.0,1.0,1.0,1.0,0.43034634,0.35347822,1.0,0.42994255,1.0,1.0,1.0,1.0,1.0,0.4509628,1.0,1.0,1.0,1.0,1.0,0.35347822,1.0,1.0,1.0,0.48353636,0.4007147,1.0,1.0,1.0,0.19878842,1.0,1.0,0.2606651,0.4948531,0.6206515,1.0,1.0,0.380718,1.0,0.37556693,1.0,1.0,1.0,0.33818808,1.0,0.35347822,0.45751613,1.0,1.0,1.0,1.0,1.0,1.0,0.44161415,0.5091485,1.0,1.0,0.32897002,0.16613178,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,0.7070686,1.0,1.0,1.0,1.0,0.7147826,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,0.6734226,0.2744395,1.0,1.0,1.0,1.0,1.0,0.34282508,1.0,0.45751613,0.24124683,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,0.6206515,0.17363201,1.0,0.42617464,1.0,1.0,0.6300172,0.19608118,1.0,0.33893037,1.0,1.0,1.0,0.566395,1.0,1.0,0.57710373,1.0,0.3134764,0.3074523,0.27760592,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,0.6124897,0.26513892,1.0,0.40690422,1.0,0.43034634,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,0.4609579,0.6660206,1.0,1.0,1.0,1.0,1.0,0.6300172,1.0,0.380718,1.0,1.0,1.0,1.0,0.35376722,1.0,1.0,0.27031216,1.0,0.6300172,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,0.42994255,1.0,1.0,0.5467478,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,0.43034634,1.0,0.17424479,0.34282508,0.30767587,1.0,1.0,0.5835742,1.0,0.32897002,0.30767587,1.0,1.0,1.0,0.33072048,1.0,1.0,0.3074523,0.4929211,0.19878842,1.0,0.44765264,1.0,1.0,1.0,1.0,1.0,1.0,0.2606651,0.35376722,1.0,1.0,0.4609579,0.57710373,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,0.65021455,1.0,1.0,1.0,1.0,1.0,0.42994255,1.0,1.0,0.4609579,0.27760592,0.6124897,1.0,0.3074523,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,0.2606651,1.0,1.0,1.0,1.0,0.76188284,0.16701001,1.0,0.27031216,1.0,1.0,1.0,0.5352187,1.0,1.0,1.0,1.0,0.6300172,1.0,0.34308803,1.0,1.0,0.64947075,1.0,1.0,1.0,1.0,1.0,0.33818808,1.0,1.0,1.0,0.44765264,0.57491684,1.0,1.0,1.0,1.0,1.0,0.27031216,1.0,1.0,1.0,1.0,1.0,1.0,1.0,0.7147826,0.2606651,0.21201238,1.0,1.0,1.0,0.35196084,1.0,1.0,1.0,0.64947075,1.0,1.0,1.0,0.35092667,1.0,1.0,1.0,1.0,1.0,1.0,0.6124897,1.0,1.0,1.0,1.0,1.0,0.52410877,1.0,0.39710718,1.0,1.0,1.0,1.0,0.7007741,0.5944236,1.0,1.0,1.0,1.0,1.0,0.35196084,1.0,1.0,0.6660206,1.0,1.0,0.27662027,1.0,1.0,0.380718,1.0,0.6206515,1.0,0.4509628,1.0,1.0,1.0,1.0,1.0,0.44765264,1.0,1.0,1.0,1.0,1.0,1.0,0.27662027,1.0,1.0,1.0,1.0,1.0,0.33818808,1.0)
s2v_fist_last <- c(0.45037517,0.21790777,0.26304266,0.37988713,1.0,0.32250378,1.0,0.26140043,0.36808288,0.4303678,1.0,1.0,0.5882211,0.3091014,0.25928703,1.0,0.50324607,0.49519148,0.5024241,0.12690912,0.623596,1.0,1.0,1.0,1.0,0.20264557,0.623596,1.0,0.30569738,1.0,1.0,1.0,0.20682785,0.5421075,0.4238335,1.0,1.0,0.4199403,1.0,0.25994033,1.0,1.0,0.41867,0.6917147,1.0,0.5203174,0.4303678,1.0,1.0,0.28197166,1.0,0.35433903,0.20216255,1.0,0.22035094,0.20216255,0.30294654,1.0,0.28796464,0.48033,0.2934635,0.8335483,1.0,0.25124916,1.0,0.20588203,1.0,0.36378008,1.0,0.33104572,0.3373526,0.24859054,0.45707762,0.20683424,1.0,0.5273684,0.16750325,0.49316898,0.2774369,1.0,0.46805388,0.44225818,0.31764588,0.5841707,1.0,1.0,1.0,0.776275,1.0,0.21790777,0.48852658,0.5203174,0.35017204,1.0,0.26779795,1.0,1.0,0.42665324,0.26824778,0.2534581,1.0,1.0,0.5273684,1.0,1.0,0.4195244,0.21649031,1.0,1.0,0.55084413,0.38060236,1.0,1.0,1.0,0.50567,0.85521495,1.0,0.36592337,1.0,0.3187317,0.3379542,1.0,0.2934635,1.0,0.33594704,0.42665324,1.0,1.0,1.0,1.0,1.0,1.0,1.0,0.5501343,1.0,1.0,1.0,1.0,0.21024697,1.0,0.25361824,1.0,1.0,0.44095683,0.23147975,1.0,0.6185227,0.3516509,1.0,1.0,1.0,1.0,1.0,0.38043365,0.58852,0.38983104,1.0,1.0,1.0,0.3650402,1.0,0.26140043,0.43295184,0.32250378,0.45707762,1.0,1.0,0.41659942,1.0,0.3348673,1.0,1.0,1.0,1.0,0.20585859,1.0,0.3499345,0.59508127,0.49260107,0.30463517,0.63569236,0.38043365,1.0,0.4238335,0.3516509,0.35426268,0.34106082,1.0,0.21024697,1.0,0.26824778,0.4195244,0.4757074,1.0,0.20509526,0.30294654,1.0,1.0,1.0,0.2574683,0.35426268,1.0,0.22670513,0.48033,0.3091014,0.50567,0.20216255,0.60027564,0.29021683,0.3650402,1.0,0.14596896,0.36592337,0.54027355,0.32039127,1.0,1.0,0.3373526,1.0,1.0,1.0,1.0,0.27828467,0.39796314,1.0,0.38385087,0.3373526,1.0,0.32250378,0.48852658,0.29794803,0.30569738,0.776275,0.21024697,0.280092,1.0,0.38043365,0.23591581,0.26140043,1.0,1.0,1.0,0.30147466,0.37951934,0.32006758,1.0,1.0,1.0,1.0,0.1716088,0.19742435,0.36808288,1.0,0.26824778,0.2545101,1.0,0.5841707,1.0,0.18821381,1.0,0.16039088,1.0,1.0,1.0,1.0,0.4303678,0.32006758,0.46391273,0.25994033,1.0,1.0,0.38516942,0.4284409,0.37267947,1.0,0.118747555,1.0,0.31887275,1.0,1.0,1.0,1.0,0.45334226,1.0,1.0,0.20436318,1.0,1.0,1.0,1.0,1.0,0.38385087,0.4238335,0.30569738,0.2534581,0.5045958,0.5365064,1.0,0.24859054,1.0,1.0,0.57761014,0.5241808,1.0,0.27828467,0.623596,0.6036641,1.0,1.0,0.15195946,0.3895311,1.0,0.623596,1.0,1.0,0.4324072,1.0,1.0,0.44095683,1.0,0.53233755,0.4324072,1.0,1.0,0.5859699,0.3657326,1.0,0.2534581,0.26140043,1.0,1.0,1.0,0.26824778,0.45054653,1.0,1.0,0.38983104,0.271574,1.0,0.6917147,1.0,1.0,0.5394058,1.0,1.0,1.0,0.60027564,0.21649031,0.3650402,1.0,0.38983104,0.6616458,0.16039088,1.0,0.23591581,1.0,0.5841707,0.34106082,1.0,1.0,1.0,1.0,1.0,1.0,0.8335483,1.0,1.0,0.12690912,1.0,1.0,0.21790777,0.81343776,1.0,1.0,0.25928703,1.0,0.38385087,1.0,0.60027564,0.21649031,1.0,1.0,0.40154797,1.0,1.0,1.0,1.0,1.0,0.3657326,1.0,0.20738605,1.0,1.0,0.44862172,0.3187317,1.0,1.0,0.3808026,1.0,0.35433903,0.60027564,0.6294347,0.15195946,1.0,1.0,1.0,1.0,1.0,1.0,1.0,0.17422085,0.35426268,1.0,1.0,0.5188569,1.0,0.25361824,1.0,0.35017204,1.0,1.0,0.45326453,1.0,1.0,0.3650402,0.15195946,1.0,0.38043365,0.5273684,1.0,1.0,1.0,0.30656576,0.4284409,1.0,1.0,0.3747172,1.0,0.3650402,0.5359372,0.3499345,1.0,1.0,1.0,1.0,1.0,0.15195946,1.0,0.26140043,1.0,0.48033,0.28796464,1.0,1.0,1.0,1.0,1.0,0.33075523,1.0,0.5068076,0.16750325,1.0,1.0,0.3650402,0.6435584,0.26140043,0.3707282,1.0,1.0,1.0,1.0,1.0,0.3766335,0.25124916,0.6616458,1.0,1.0,1.0,0.3747172,1.0,0.19742435,0.3373526,0.3516509,1.0,1.0,1.0,1.0,0.35426268,0.25928703,0.3808026,0.38060236,0.20264557,0.20738605,0.67433524,1.0,0.5359372,0.26824778,1.0,0.44095683,0.25790894,0.6890197,0.25361824,0.39250582,0.36808288,1.0,1.0,1.0,0.4303678,0.3707282,0.57345456,0.26140043,1.0,0.19742435,0.32250378,1.0,0.6435584,1.0,0.2774369,0.3747172,1.0,0.57345456,0.4324072,0.3694386,1.0,1.0,1.0,1.0,0.28566155,1.0,1.0,1.0,0.31238142,1.0,0.35433903,0.31887275,0.40726745,1.0,0.27828467,1.0,0.55084413,1.0,1.0,1.0,1.0,0.3509649,0.5024241,0.30147466,0.45054653,0.2774369,1.0,1.0,1.0,1.0,1.0,1.0,0.60027564,0.38983104,0.42665324,1.0,1.0,0.57345456,0.14596896,1.0,1.0,0.5394058,1.0,1.0,0.38983104,1.0,0.23074082,0.23563832,1.0,1.0,0.81343776,0.29794803,0.42665324,1.0,0.280092,0.42665324,1.0,1.0,1.0,1.0,1.0,0.32897002,0.4284409,1.0,0.39796314,1.0,1.0,1.0,0.43295184,1.0,0.32039127,0.3373526,0.3690435,1.0,0.3196927,0.63569236,0.5359372,1.0,0.21024697,1.0,0.43295184,0.2835586,0.53233755,0.63569236,1.0,1.0,1.0,1.0,1.0,1.0,0.4238335,0.4238335,1.0,1.0,1.0,0.41867,0.32250378,0.36378008,1.0,0.3373526,0.28197166,1.0,1.0,0.36378008,0.623596,1.0,0.60027564,0.5365064,0.35971618,1.0,0.5024241,1.0,1.0,1.0,0.35017204,1.0,1.0,0.39250582,0.25361824,1.0,0.21649031,0.48033,0.39796314,0.39392543,0.16039088,0.81343776,1.0,0.24267516,1.0,1.0,0.63569236,1.0,0.30294654,0.25124916,0.31238142,0.2757926,0.29021683,1.0,0.31238142,1.0,1.0,0.1716088,1.0,0.23074082,0.50324607,0.81343776,0.30147466,1.0,0.63569236,0.5024241,1.0,0.21594192,1.0,0.271574,0.23147975,0.30463517,0.20585859,1.0,0.2574683,0.48033,0.597964,0.4003312,1.0,0.32897002,0.38385087,1.0,0.3379542,1.0,1.0,1.0,0.31238142,0.8335483,0.280092,1.0,1.0,0.67433524,1.0,0.21471679,1.0,1.0,0.26779795,0.4516161,1.0,0.5394058,1.0,0.5841707,1.0,1.0,1.0,1.0,0.54027355,0.35017204,0.5859699,1.0,0.49260107,0.6185227,1.0,0.3921083,1.0,0.37267947,0.36592337,1.0,1.0,0.118747555,1.0,0.39250582,0.26824778,0.48033,0.12690912,1.0,0.1716088,0.22035094,1.0,0.5882211,0.21024697,0.597964,0.4516161,0.38516942,0.2574683,1.0,0.29936668,0.3747172,0.42665324,1.0,1.0,1.0,1.0,1.0,1.0,0.4238335,0.49316898,0.4324072,0.15195946,1.0,1.0,0.3066122,0.63569236,0.1716088,0.5882211,0.776275,0.2534581,0.42718083,1.0,0.38983104,0.3187317,0.21594192,0.4284409,1.0,1.0,1.0,0.29794803,1.0,0.42088416,0.32006758,1.0,1.0,1.0,0.26779795,1.0,0.3187317,0.30656576,0.776275,1.0,1.0,0.21594192,0.32897002,0.35017204,0.42665324,0.29937696,1.0,0.5297487,0.6917147,0.2545101,0.32006758,1.0,1.0,1.0,1.0,0.15195946,1.0,0.42665324)
s2v_last_last <- c(0.45037517,0.21790777,0.26304266,0.37988713,1.0,0.32250378,1.0,0.26140043,0.36808288,0.4303678,1.0,1.0,0.5882211,0.3091014,0.25928703,1.0,0.50324607,0.49519148,0.5024241,0.12690912,0.623596,1.0,1.0,1.0,1.0,0.20264557,0.623596,1.0,0.30569738,1.0,1.0,1.0,0.20682785,0.5421075,0.4238335,1.0,1.0,0.4199403,1.0,0.25994033,1.0,1.0,0.41867,0.6917147,1.0,0.5203174,0.4303678,1.0,1.0,0.28197166,1.0,0.35433903,0.20216255,1.0,0.22035094,0.20216255,0.30294654,1.0,0.28796464,0.48033,0.2934635,0.8335483,1.0,0.25124916,1.0,0.20588203,1.0,0.36378008,1.0,0.33104572,0.3373526,0.24859054,0.45707762,0.20683424,1.0,0.5273684,0.16750325,0.49316898,0.2774369,1.0,0.46805388,0.44225818,0.31764588,0.5841707,1.0,1.0,1.0,0.776275,1.0,0.21790777,0.48852658,0.5203174,0.35017204,1.0,0.26779795,1.0,1.0,0.42665324,0.26824778,0.2534581,1.0,1.0,0.5273684,1.0,1.0,0.4195244,0.21649031,1.0,1.0,0.55084413,0.38060236,1.0,1.0,1.0,0.50567,0.85521495,1.0,0.36592337,1.0,0.3187317,0.3379542,1.0,0.2934635,1.0,0.33594704,0.42665324,1.0,1.0,1.0,1.0,1.0,1.0,1.0,0.5501343,1.0,1.0,1.0,1.0,0.21024697,1.0,0.25361824,1.0,1.0,0.44095683,0.23147975,1.0,0.6185227,0.3516509,1.0,1.0,1.0,1.0,1.0,0.38043365,0.58852,0.38983104,1.0,1.0,1.0,0.3650402,1.0,0.26140043,0.43295184,0.32250378,0.45707762,1.0,1.0,0.41659942,1.0,0.3348673,1.0,1.0,1.0,1.0,0.20585859,1.0,0.3499345,0.59508127,0.49260107,0.30463517,0.63569236,0.38043365,1.0,0.4238335,0.3516509,0.35426268,0.34106082,1.0,0.21024697,1.0,0.26824778,0.4195244,0.4757074,1.0,0.20509526,0.30294654,1.0,1.0,1.0,0.2574683,0.35426268,1.0,0.22670513,0.48033,0.3091014,0.50567,0.20216255,0.60027564,0.29021683,0.3650402,1.0,0.14596896,0.36592337,0.54027355,0.32039127,1.0,1.0,0.3373526,1.0,1.0,1.0,1.0,0.27828467,0.39796314,1.0,0.38385087,0.3373526,1.0,0.32250378,0.48852658,0.29794803,0.30569738,0.776275,0.21024697,0.280092,1.0,0.38043365,0.23591581,0.26140043,1.0,1.0,1.0,0.30147466,0.37951934,0.32006758,1.0,1.0,1.0,1.0,0.1716088,0.19742435,0.36808288,1.0,0.26824778,0.2545101,1.0,0.5841707,1.0,0.18821381,1.0,0.16039088,1.0,1.0,1.0,1.0,0.4303678,0.32006758,0.46391273,0.25994033,1.0,1.0,0.38516942,0.4284409,0.37267947,1.0,0.118747555,1.0,0.31887275,1.0,1.0,1.0,1.0,0.45334226,1.0,1.0,0.20436318,1.0,1.0,1.0,1.0,1.0,0.38385087,0.4238335,0.30569738,0.2534581,0.5045958,0.5365064,1.0,0.24859054,1.0,1.0,0.57761014,0.5241808,1.0,0.27828467,0.623596,0.6036641,1.0,1.0,0.15195946,0.3895311,1.0,0.623596,1.0,1.0,0.4324072,1.0,1.0,0.44095683,1.0,0.53233755,0.4324072,1.0,1.0,0.5859699,0.3657326,1.0,0.2534581,0.26140043,1.0,1.0,1.0,0.26824778,0.45054653,1.0,1.0,0.38983104,0.271574,1.0,0.6917147,1.0,1.0,0.5394058,1.0,1.0,1.0,0.60027564,0.21649031,0.3650402,1.0,0.38983104,0.6616458,0.16039088,1.0,0.23591581,1.0,0.5841707,0.34106082,1.0,1.0,1.0,1.0,1.0,1.0,0.8335483,1.0,1.0,0.12690912,1.0,1.0,0.21790777,0.81343776,1.0,1.0,0.25928703,1.0,0.38385087,1.0,0.60027564,0.21649031,1.0,1.0,0.40154797,1.0,1.0,1.0,1.0,1.0,0.3657326,1.0,0.20738605,1.0,1.0,0.44862172,0.3187317,1.0,1.0,0.3808026,1.0,0.35433903,0.60027564,0.6294347,0.15195946,1.0,1.0,1.0,1.0,1.0,1.0,1.0,0.17422085,0.35426268,1.0,1.0,0.5188569,1.0,0.25361824,1.0,0.35017204,1.0,1.0,0.45326453,1.0,1.0,0.3650402,0.15195946,1.0,0.38043365,0.5273684,1.0,1.0,1.0,0.30656576,0.4284409,1.0,1.0,0.3747172,1.0,0.3650402,0.5359372,0.3499345,1.0,1.0,1.0,1.0,1.0,0.15195946,1.0,0.26140043,1.0,0.48033,0.28796464,1.0,1.0,1.0,1.0,1.0,0.33075523,1.0,0.5068076,0.16750325,1.0,1.0,0.3650402,0.6435584,0.26140043,0.3707282,1.0,1.0,1.0,1.0,1.0,0.3766335,0.25124916,0.6616458,1.0,1.0,1.0,0.3747172,1.0,0.19742435,0.3373526,0.3516509,1.0,1.0,1.0,1.0,0.35426268,0.25928703,0.3808026,0.38060236,0.20264557,0.20738605,0.67433524,1.0,0.5359372,0.26824778,1.0,0.44095683,0.25790894,0.6890197,0.25361824,0.39250582,0.36808288,1.0,1.0,1.0,0.4303678,0.3707282,0.57345456,0.26140043,1.0,0.19742435,0.32250378,1.0,0.6435584,1.0,0.2774369,0.3747172,1.0,0.57345456,0.4324072,0.3694386,1.0,1.0,1.0,1.0,0.28566155,1.0,1.0,1.0,0.31238142,1.0,0.35433903,0.31887275,0.40726745,1.0,0.27828467,1.0,0.55084413,1.0,1.0,1.0,1.0,0.3509649,0.5024241,0.30147466,0.45054653,0.2774369,1.0,1.0,1.0,1.0,1.0,1.0,0.60027564,0.38983104,0.42665324,1.0,1.0,0.57345456,0.14596896,1.0,1.0,0.5394058,1.0,1.0,0.38983104,1.0,0.23074082,0.23563832,1.0,1.0,0.81343776,0.29794803,0.42665324,1.0,0.280092,0.42665324,1.0,1.0,1.0,1.0,1.0,0.32897002,0.4284409,1.0,0.39796314,1.0,1.0,1.0,0.43295184,1.0,0.32039127,0.3373526,0.3690435,1.0,0.3196927,0.63569236,0.5359372,1.0,0.21024697,1.0,0.43295184,0.2835586,0.53233755,0.63569236,1.0,1.0,1.0,1.0,1.0,1.0,0.4238335,0.4238335,1.0,1.0,1.0,0.41867,0.32250378,0.36378008,1.0,0.3373526,0.28197166,1.0,1.0,0.36378008,0.623596,1.0,0.60027564,0.5365064,0.35971618,1.0,0.5024241,1.0,1.0,1.0,0.35017204,1.0,1.0,0.39250582,0.25361824,1.0,0.21649031,0.48033,0.39796314,0.39392543,0.16039088,0.81343776,1.0,0.24267516,1.0,1.0,0.63569236,1.0,0.30294654,0.25124916,0.31238142,0.2757926,0.29021683,1.0,0.31238142,1.0,1.0,0.1716088,1.0,0.23074082,0.50324607,0.81343776,0.30147466,1.0,0.63569236,0.5024241,1.0,0.21594192,1.0,0.271574,0.23147975,0.30463517,0.20585859,1.0,0.2574683,0.48033,0.597964,0.4003312,1.0,0.32897002,0.38385087,1.0,0.3379542,1.0,1.0,1.0,0.31238142,0.8335483,0.280092,1.0,1.0,0.67433524,1.0,0.21471679,1.0,1.0,0.26779795,0.4516161,1.0,0.5394058,1.0,0.5841707,1.0,1.0,1.0,1.0,0.54027355,0.35017204,0.5859699,1.0,0.49260107,0.6185227,1.0,0.3921083,1.0,0.37267947,0.36592337,1.0,1.0,0.118747555,1.0,0.39250582,0.26824778,0.48033,0.12690912,1.0,0.1716088,0.22035094,1.0,0.5882211,0.21024697,0.597964,0.4516161,0.38516942,0.2574683,1.0,0.29936668,0.3747172,0.42665324,1.0,1.0,1.0,1.0,1.0,1.0,0.4238335,0.49316898,0.4324072,0.15195946,1.0,1.0,0.3066122,0.63569236,0.1716088,0.5882211,0.776275,0.2534581,0.42718083,1.0,0.38983104,0.3187317,0.21594192,0.4284409,1.0,1.0,1.0,0.29794803,1.0,0.42088416,0.32006758,1.0,1.0,1.0,0.26779795,1.0,0.3187317,0.30656576,0.776275,1.0,1.0,0.21594192,0.32897002,0.35017204,0.42665324,0.29937696,1.0,0.5297487,0.6917147,0.2545101,0.32006758,1.0,1.0,1.0,1.0,0.15195946,1.0,0.42665324)
# w2v
w2v_fist_first <- c(0.25340745,0.99999994,1.0,0.69450164,1.0,1.0,0.60697013,0.48318893,1.0,1.0,0.32364058,1.0,0.99999994,0.99999994,0.9999999,0.99999994,1.0,0.27959016,0.9999999,1.0,1.0,0.32364058,0.06923626,0.6744991,1.0,0.69450164,1.0,1.0,1.0,1.0,1.0,0.39699715,0.99999994,0.44055676,1.0,0.99999994,0.99999994,0.3412163,1.0,1.0,1.0,0.5837247,0.4756788,0.21705829,1.0,0.99999994,0.99999994,1.0,1.0,1.0,0.79646647,0.48723003,1.0,1.0,0.7980804,0.99999994,1.0,1.0,-0.12749827,0.30003864,1.0,0.99999994,1.0,1.0,1.0,0.69450164,1.0,0.99999994,1.0,0.99999994,1.0,0.6977526,0.27959016,0.20442195,0.38415307,1.0,1.0,1.0,0.21705829,1.0,1.0,0.99999994,1.0,1.0,0.6109226,0.99999994,1.0,0.99999994,0.99999994,0.7008525,1.0,0.99999994,1.0,0.99999994,0.21570428,0.99999994,1.0,1.0,0.76057994,0.727841,1.0,1.0,1.0,1.0,0.351846,0.26510695,0.99999994,0.30003864,1.0,0.06881772,1.0,0.63944536,0.48675558,0.012015389,1.0,1.0,0.097540505,1.0,1.0,0.12222154,0.57147324,1.0,1.0,1.0,-0.021574289,0.79646647,0.99999994,1.0,1.0,0.69569623,1.0,1.0,1.0,0.039417733,1.0,1.0,0.99999994,1.0,1.0,0.48675558,0.2582003,0.99999994,0.99999994,0.6805931,1.0,0.4506089,1.0,1.0,1.0,1.0,0.70023763,0.99999994,0.6701168,0.44598138,0.99999994,1.0,1.0,0.7733818,1.0,0.351846,1.0,0.7980804,1.0,0.4007791,1.0,0.79646647,1.0,1.0,1.0,1.0,0.99999994,0.48318893,0.39699715,0.99999994,0.70023763,0.26510695,0.4506089,1.0,0.99999994,0.20289439,0.99999994,0.80811465,1.0,0.6701168,1.0,1.0,1.0,0.99999994,1.0,0.8408154,1.0,0.039417733,0.43010306,1.0,1.0,0.2703469,0.5837247,0.99999994,1.0,1.0,1.0,1.0,1.0,0.550718,1.0,1.0,0.99999994,1.0,0.69569623,0.5837247,0.6977526,0.26510695,0.99999994,1.0,0.24241246,0.7980804,1.0,0.27959016,1.0,0.29845855,0.5770045,1.0,0.27460334,0.8278799,1.0,0.99999994,0.6744991,0.99999994,1.0,1.0,1.0,0.24478133,0.99999994,0.6107836,0.70023763,1.0,1.0,0.27959016,1.0,0.13281249,1.0,1.0,1.0,1.0,1.0,0.2685541,1.0,0.39699715,0.99999994,1.0,1.0,0.7523,0.16023284,1.0,0.44055676,0.06923626,0.57227695,1.0,0.8345818,1.0,0.57620436,1.0,1.0,1.0,0.38415307,1.0,1.0,1.0,1.0,0.15712477,0.99999994,0.40223452,0.21097478,1.0,0.6109226,0.6977526,0.35510886,1.0,1.0,1.0,1.0,1.0,0.21097478,0.35510886,1.0,1.0,1.0,1.0,1.0,0.094626226,0.99999994,-0.12749827,0.25399002,1.0,0.9317011,1.0,0.10955807,1.0,1.0,0.6805931,0.103451245,1.0,1.0,1.0,1.0,1.0,1.0,0.99999994,1.0,1.0,0.6107836,0.29845855,1.0,0.99999994,1.0,1.0,1.0,0.13281249,1.0,0.008827321,0.13900572,1.0,1.0,0.76057994,1.0,1.0,0.5837247,0.5357162,1.0,1.0,0.99999994,0.5280927,1.0,0.42875063,0.74858737,0.6109226,1.0,0.012015389,1.0,1.0,1.0,1.0,1.0,0.48723003,1.0,1.0,1.0,1.0,0.79646647,1.0,0.13281249,1.0,1.0,1.0,0.6107836,0.63944536,1.0,1.0,1.0,0.60697013,1.0,1.0,0.60697013,0.4756788,0.57620436,0.70003337,0.24478133,1.0,0.99999994,0.06923626,0.99999994,0.351846,1.0,0.6109226,1.0,0.38415307,1.0,1.0,1.0,0.79646647,1.0,1.0,0.99999994,0.48723003,1.0,0.8354425,1.0,1.0,1.0,1.0,1.0,0.44627908,0.99999994,1.0,1.0,1.0,0.63944536,1.0,1.0,0.99999994,0.24478133,1.0,1.0,0.99999994,0.99999994,0.3152594,0.60697013,1.0,1.0,1.0,1.0,0.69450164,1.0,1.0,1.0,0.13281249,0.99999994,1.0,0.06923626,1.0,1.0,1.0,1.0,0.13281249,0.351846,1.0,1.0,1.0,0.20289439,1.0,1.0,0.3412163,1.0,0.99999994,0.22275014,0.15712477,1.0,1.0,1.0,1.0,1.0,1.0,0.35712466,0.80811465,1.0,0.16434829,1.0,0.99999994,1.0,0.99999994,1.0,1.0,0.27460334,1.0,1.0,1.0,0.21570428,1.0,0.44598138,0.99999994,0.99999994,1.0,0.16023284,0.06881772,1.0,1.0,1.0,0.99999994,1.0,1.0,0.32364058,1.0,0.44055676,1.0,0.24478133,0.99999994,1.0,0.56046575,1.0,0.35510886,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,0.42875063,0.8345818,1.0,1.0,0.70003337,1.0,1.0,1.0,1.0,0.20289439,1.0,1.0,1.0,1.0,0.16259249,1.0,0.99999994,0.99999994,1.0,0.6054121,0.48675558,0.99999994,1.0,0.8278799,1.0,0.23036812,0.012015389,0.42875063,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,0.21570428,0.5280927,0.99999994,1.0,0.99999994,0.6701168,0.295391,1.0,0.21705829,0.4756788,1.0,0.99999994,0.99999994,1.0,1.0,1.0,0.6054121,1.0,1.0,0.44627908,1.0,0.27875987,1.0,1.0,-0.021574289,1.0,1.0,1.0,1.0,0.8354425,1.0,0.47798377,0.06881772,0.99999994,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,0.99999994,0.99999994,1.0,0.24478133,1.0,0.727841,1.0,1.0,1.0,0.6109226,0.7980804,0.10955807,1.0,1.0,1.0,1.0,0.727841,1.0,1.0,1.0,0.48675558,1.0,0.5770045,1.0,1.0,1.0,0.99999994,1.0,1.0,1.0,1.0,0.727841,1.0,1.0,1.0,0.48675558,1.0,0.69450164,1.0,0.69569623,0.6977526,1.0,1.0,0.99999994,0.012015389,0.6805931,0.6701168,1.0,1.0,0.99999994,1.0,0.99999994,0.99999994,1.0,0.99999994,0.8345818,1.0,0.23036812,1.0,1.0,1.0,1.0,1.0,1.0,0.727841,1.0,0.24241246,0.79646647,1.0,1.0,1.0,-0.047015317,1.0,0.8408154,0.7980804,0.039417733,1.0,0.58502424,0.99999994,0.6744991,0.99999994,0.99999994,1.0,1.0,0.16259249,1.0,0.99999994,0.38415307,1.0,0.10955807,0.24241246,0.6977526,0.44055676,1.0,1.0,1.0,0.47298503,0.6977526,0.99999994,1.0,0.550718,0.99999994,0.6054121,-0.12749827,0.5303794,0.9999999,0.99999994,1.0,1.0,0.44598138,0.99999994,0.25399002,0.097540505,1.0,0.6030951,0.2703469,0.2703469,1.0,0.99999994,1.0,1.0,0.74858737,0.2685541,0.99999994,1.0,0.3672635,0.5357162,0.6701168,1.0,1.0,1.0,0.8278799,1.0,1.0,1.0,0.30979303,1.0,1.0,0.99999994,0.9317011,0.99999994,1.0,0.13438751,-0.021574289,1.0,1.0,1.0,0.727841,0.57620436,1.0,1.0,1.0,1.0,0.25399002,0.70023763,0.99999994,1.0,1.0,1.0,1.0,1.0,0.99999994,0.35510886,1.0,0.99999994,0.99999994,0.4756788,0.3672635,0.008827321,1.0,0.12222154,1.0,1.0,1.0,1.0,1.0,0.99999994,0.6744991,0.39699715,1.0,1.0,0.23036812,1.0,1.0,0.29845855,1.0,1.0,1.0,1.0,1.0,0.3295845,0.30979303,1.0,1.0,0.8278799,0.30979303,1.0,0.42875063,0.26510695,1.0,0.16434829,0.21705829,1.0,1.0)
w2v_fist_last <- c(0.36056337,0.5091434,1.0,0.29230556,0.41286796,1.0,1.0,0.35117063,1.0,1.0,1.0,1.0,0.48289025,0.7009281,0.9999999,0.03766249,1.0,0.19531734,0.9999999,0.25536528,0.30778572,1.0,-0.078639485,0.19433504,0.32707268,0.29230556,1.0,1.0,1.0,0.73736733,1.0,0.99999994,0.99999994,0.99999994,0.30778572,0.99999994,0.99999994,1.0,1.0,0.5195033,0.3225619,0.71925914,1.0,1.0,0.39699715,0.40299514,0.3010716,1.0,0.09148129,0.09148129,0.35785872,0.31986994,1.0,1.0,0.6557346,0.99999994,1.0,0.7169294,1.0,0.26124007,1.0,0.26853514,0.68189204,1.0,1.0,0.29230556,1.0,0.99999994,0.31190598,0.99999994,1.0,1.0,0.19531734,0.18865041,0.14952749,-0.0012015628,0.21720992,1.0,1.0,0.12670073,0.74147034,0.63130665,1.0,1.0,1.0,0.99999994,-0.008165963,0.63130665,0.7009281,0.3146427,-0.003566877,0.5263758,1.0,0.99999994,1.0,0.99999994,1.0,1.0,1.0,0.3219774,1.0,1.0,0.4636757,1.0,0.3057815,1.0,0.41697532,0.26124007,0.4636757,1.0,1.0,0.2979879,0.3005069,0.43831536,1.0,0.45838788,1.0,1.0,0.5191553,0.019559786,0.99999994,0.5356808,1.0,1.0,1.0,0.35785872,0.63130665,0.30617166,1.0,0.43231362,0.494272,0.20551263,1.0,0.37740585,0.5248271,0.4941933,0.41697532,0.8005013,1.0,0.3005069,0.37757155,0.99999994,0.3737321,1.0,0.8005013,1.0,0.70023763,1.0,1.0,0.32006875,0.99999994,0.26853514,1.0,0.99999994,0.6160442,0.10869746,0.31190598,0.99999994,0.25536528,0.3057815,1.0,0.6557346,1.0,0.2099944,0.40522474,0.35785872,0.6841196,0.3781568,0.37883556,0.0029367534,0.99999994,0.35117063,0.99999994,0.2595745,0.99999994,1.0,1.0,1.0,0.13671437,0.009926386,0.99999994,0.13318044,1.0,1.0,0.12670073,0.34597602,1.0,0.6160442,0.16951627,0.4217595,1.0,0.37740585,0.58020055,0.6456344,1.0,1.0,0.71925914,0.4699464,0.16951627,0.24718724,0.8005013,0.43606666,1.0,1.0,1.0,0.5191553,0.5091434,1.0,0.43231362,0.71925914,1.0,1.0,0.99999994,1.0,0.35097864,0.6557346,1.0,0.19531734,0.41106766,0.6469727,0.4218619,0.13693234,1.0,-0.027140852,0.40813842,0.99999994,0.19433504,0.99999994,0.13693234,1.0,1.0,0.25691313,0.99999994,1.0,0.99999994,0.34597602,1.0,0.19531734,1.0,0.5786355,0.40590805,1.0,0.5502382,1.0,1.0,1.0,1.0,0.99999994,0.99999994,1.0,1.0,0.6064839,1.0,1.0,0.99999994,-0.078639485,0.6886524,0.41106766,1.0,1.0,0.9395597,1.0,0.40590805,0.39699715,0.14952749,1.0,1.0,0.6499989,1.0,0.99999994,0.99999994,1.0,0.292591,1.0,1.0,1.0,0.22619407,0.21969363,1.0,0.32707268,1.0,0.40813842,0.292591,0.22619407,1.0,0.6536679,1.0,0.58641773,1.0,0.07738136,0.99999994,1.0,0.16367693,0.11722051,1.0,1.0,0.8901057,1.0,1.0,1.0,1.0,0.5195033,0.11722051,1.0,1.0,0.0029367534,1.0,0.13671437,1.0,0.022754498,1.0,0.6469727,1.0,0.0773078,0.24718724,0.68189204,1.0,0.5786355,1.0,0.6088914,0.16445775,1.0,0.32707268,1.0,0.16951627,1.0,0.71925914,0.49630952,1.0,1.0,0.99999994,1.0,0.68189204,1.0,-0.1066545,1.0,0.40033248,0.43831536,1.0,0.12670073,1.0,1.0,1.0,0.31986994,1.0,1.0,0.32006875,0.11722051,0.35785872,1.0,0.5786355,1.0,0.29361695,1.0,1.0,0.2979879,1.0,0.29361695,0.4158308,1.0,0.25536528,1.0,1.0,1.0,0.9395597,0.604948,0.25691313,1.0,0.26853514,-0.078639485,0.99999994,0.3057815,0.30850354,1.0,0.25064415,0.14952749,0.70023763,0.41106766,1.0,0.35785872,0.05631186,0.5093313,0.42856526,0.31986994,1.0,1.0,0.34597602,0.8991714,1.0,0.28961033,1.0,0.58148575,0.99999994,0.07459725,0.43606666,0.40033248,0.2979879,1.0,0.16314548,0.99999994,0.25691313,1.0,1.0,0.99999994,0.99999994,1.0,1.0,0.09148129,1.0,1.0,0.25536528,0.29230556,0.6841196,1.0,1.0,0.5786355,0.99999994,1.0,-0.078639485,0.07459725,1.0,1.0,0.32707268,0.5786355,0.3057815,0.22358413,1.0,1.0,0.009926386,0.40033248,1.0,1.0,1.0,0.44807348,0.23897943,0.99999994,1.0,1.0,1.0,-0.008165963,1.0,1.0,0.1912721,0.13318044,1.0,0.47302213,1.0,0.99999994,1.0,0.99999994,1.0,1.0,1.0,0.05631186,0.07459725,-0.003566877,1.0,1.0,0.99999994,0.26021796,0.5091434,1.0,1.0,1.0,1.0,0.6499989,1.0,0.55932415,0.07459725,1.0,1.0,0.40522474,0.99999994,1.0,0.25691313,0.99999994,1.0,0.45937645,1.0,0.22619407,1.0,0.6911551,1.0,1.0,1.0,0.50728494,1.0,1.0,1.0,1.0,0.24472873,0.4941933,0.604948,0.494272,1.0,0.5248271,0.4842383,0.009926386,1.0,1.0,1.0,0.07459725,0.99999994,0.25536528,0.4699464,0.054624442,1.0,0.3874962,0.3005069,0.48330563,0.39699715,-0.027140852,1.0,0.48427746,0.43831536,1.0,0.21720992,0.5191553,1.0,-0.008165963,1.0,1.0,0.09148129,1.0,1.0,1.0,0.99999994,0.07256241,0.3737321,1.0,0.19876298,0.3781568,1.0,1.0,1.0,0.7009281,0.99999994,0.11722051,0.024397077,1.0,0.3874962,1.0,1.0,0.58148575,1.0,0.104182184,0.68189204,0.24472873,1.0,0.009593446,0.07256241,0.20551263,0.07459725,1.0,0.53150326,-0.021211842,1.0,0.0773078,0.07256241,1.0,0.09148129,1.0,1.0,1.0,1.0,1.0,0.4993316,0.99999994,0.098746635,0.25691313,1.0,0.3219774,1.0,1.0,0.32006875,1.0,0.6557346,0.8901057,-0.003566877,0.0857112,0.43606666,0.5356808,0.3219774,0.367818,1.0,1.0,0.3005069,0.5335908,0.4218619,1.0,0.30617166,0.32006875,0.99999994,1.0,0.367818,1.0,1.0,0.3219774,1.0,1.0,1.0,0.3005069,0.4636757,0.29230556,0.8005013,0.43231362,1.0,-0.008165963,0.21969363,0.48330563,0.43831536,1.0,1.0,-0.07633811,0.12670073,0.99999994,1.0,0.4993316,0.26021796,1.0,0.55932415,1.0,1.0,0.48427746,0.3225619,0.7169294,1.0,1.0,1.0,1.0,0.3219774,1.0,0.35097864,0.35785872,1.0,1.0,0.25064415,0.7586176,0.098746635,0.4217595,0.6557346,0.37740585,0.21720992,0.99999994,0.7009281,0.19433504,0.99999994,0.3737321,1.0,1.0,0.99999994,1.0,0.99999994,0.14952749,0.68189204,0.8901057,0.35097864,1.0,0.99999994,0.11722051,1.0,1.0,1.0,1.0,0.4993316,0.0029367534,1.0,0.3737321,0.3874962,1.0,0.12343065,0.9999999,0.99999994,1.0,0.8005013,0.99999994,0.3010716,0.16367693,1.0,1.0,0.20161536,1.0,1.0,1.0,0.99999994,1.0,0.53150326,-0.1066545,1.0,0.26853514,1.0,0.99999994,0.49630952,1.0,0.11722051,1.0,0.53150326,-0.027140852,1.0,1.0,0.494272,0.39455858,1.0,0.09148129,0.99999994,1.0,0.6160442,1.0,0.34218472,1.0,1.0,1.0,0.07778965,0.3219774,0.9395597,1.0,1.0,0.7180517,0.41106766,0.16367693,0.99999994,0.55932415,0.31190598,1.0,1.0,1.0,0.5335908,0.99999994,0.22619407,0.21969363,0.7960824,0.5263758,1.0,0.99999994,0.6088914,0.5431224,0.019559786,1.0,1.0,0.07256241,1.0,0.18217726,0.48330563,0.19433504,0.99999994,1.0,0.50728494,0.48427746,1.0,1.0,0.6469727,1.0,0.45838788,1.0,0.41286796,1.0,0.4070585,0.39455858,1.0,1.0,-0.027140852,0.39455858,1.0,1.0,1.0,0.5195033,0.47302213,1.0,1.0,-0.008165963)
w2v_last_last <- c(0.36056337,0.19259661,1.0,1.0,1.0,1.0,0.4813526,0.35117063,0.04272193,1.0,1.0,1.0,0.41403502,0.99999994,0.9999999,0.03766249,1.0,0.19531734,0.9999999,1.0,1.0,1.0,-0.078639485,1.0,1.0,1.0,1.0,1.0,1.0,0.7488651,1.0,0.99999994,0.57424796,0.99999994,1.0,0.99999994,0.99999994,1.0,1.0,0.0843728,1.0,1.0,1.0,0.150232,1.0,0.99999994,0.99999994,1.0,1.0,1.0,1.0,1.0,0.3079796,1.0,1.0,0.35351613,1.0,0.6250057,0.18549149,0.99999994,1.0,0.99999994,1.0,1.0,1.0,1.0,1.0,0.99999994,1.0,-0.05037111,0.39436397,1.0,0.19531734,1.0,0.14952749,0.48262545,1.0,1.0,0.150232,0.12670073,1.0,0.99999994,0.3806352,0.03196327,1.0,0.99999994,-0.09626324,0.99999994,0.99999994,0.7008525,1.0,0.028906511,1.0,0.57424796,1.0,0.99999994,1.0,1.0,0.4829056,0.5713356,1.0,1.0,0.5264707,1.0,0.3057815,1.0,0.99999994,0.99999994,0.5264707,0.05753716,1.0,-0.0069058742,0.3005069,1.0,0.49670666,0.45838788,0.049443208,0.5580772,0.5191553,0.42596528,0.99999994,1.0,1.0,1.0,1.0,1.0,0.99999994,0.7170906,1.0,0.24163032,1.0,0.18000342,1.0,0.37740585,1.0,0.4941933,0.99999994,1.0,1.0,0.3005069,0.45895585,-0.05037111,0.3737321,1.0,1.0,1.0,1.0,0.49670666,1.0,1.0,0.99999994,0.99999994,0.54449236,0.99999994,0.99999994,1.0,1.0,0.4364546,1.0,0.3057815,0.29227597,1.0,1.0,0.99999994,0.39884517,1.0,1.0,1.0,1.0,0.22776818,0.99999994,0.35117063,0.99999994,0.2595745,0.99999994,1.0,1.0,-0.014651864,0.3720478,-0.025001995,0.99999994,1.0,1.0,0.54449236,0.12670073,0.015563594,1.0,0.99999994,1.0,0.99999994,1.0,0.37740585,0.66705006,0.31457636,1.0,1.0,1.0,0.99999994,1.0,0.24718724,1.0,1.0,1.0,1.0,1.0,0.5191553,0.19259661,1.0,0.24163032,1.0,1.0,1.0,0.99999994,1.0,1.0,1.0,1.0,0.19531734,0.45791462,1.0,1.0,0.30976036,1.0,0.049527638,1.0,0.99999994,1.0,0.99999994,0.30976036,0.03196327,1.0,0.25691313,0.57424796,0.76421493,0.99999994,0.015563594,1.0,0.19531734,1.0,0.5218487,0.31937748,1.0,1.0,1.0,1.0,0.43988776,1.0,0.99999994,0.99999994,0.29227597,0.5010465,1.0,1.0,0.5546783,0.99999994,-0.078639485,0.99999994,0.45791462,1.0,1.0,0.9395597,0.102522925,0.31937748,1.0,0.14952749,1.0,0.49670666,0.15767816,1.0,0.99999994,0.37573695,0.13961081,0.292591,1.0,1.0,1.0,0.99999994,0.37092754,1.0,1.0,1.0,1.0,0.292591,0.99999994,1.0,1.0,1.0,1.0,1.0,0.07738136,0.99999994,0.18549149,0.16367693,1.0,0.87484443,1.0,0.8901057,1.0,-0.014651864,1.0,1.0,0.0843728,1.0,1.0,1.0,0.22776818,0.5546783,0.3720478,1.0,1.0,0.76421493,1.0,0.32095486,0.99999994,0.24718724,1.0,1.0,0.5218487,1.0,1.0,1.0,1.0,1.0,0.4829056,1.0,1.0,1.0,1.0,0.0136583615,1.0,0.57424796,0.55897427,1.0,1.0,0.41169858,1.0,1.0,1.0,1.0,0.12670073,-0.014651864,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,0.23245648,0.5218487,1.0,-0.033519633,-0.07603918,0.76421493,-0.0069058742,0.49670666,-0.033519633,1.0,0.4813526,1.0,1.0,0.4813526,1.0,0.9395597,0.99999994,0.25691313,1.0,0.99999994,-0.078639485,0.37573695,0.3057815,0.7290551,1.0,1.0,0.14952749,1.0,0.45791462,1.0,1.0,0.5652849,1.0,0.99999994,1.0,0.1090399,1.0,0.015563594,1.0,1.0,1.0,1.0,0.58148575,0.57424796,1.0,1.0,1.0,-0.0069058742,1.0,1.0,0.99999994,0.25691313,1.0,0.28884116,0.57424796,0.99999994,1.0,0.4813526,1.0,1.0,1.0,1.0,1.0,1.0,1.0,0.49670666,0.5218487,0.99999994,1.0,-0.078639485,1.0,0.5010465,0.0136583615,1.0,0.5218487,0.3057815,1.0,0.5580772,0.17875747,-0.025001995,1.0,1.0,1.0,1.0,0.99999994,-0.055716645,0.99999994,1.0,0.58639055,1.0,-0.09626324,0.39436397,1.0,-0.1777314,1.0,1.0,1.0,1.0,0.99999994,1.0,0.08460645,1.0,1.0,1.0,0.5652849,1.0,1.0,1.0,1.0,0.99999994,0.5953513,0.19259661,1.0,1.0,0.05753716,1.0,0.15767816,1.0,0.99999994,1.0,1.0,1.0,0.39884517,0.99999994,1.0,0.25691313,0.99999994,0.39436397,0.5065666,0.29227597,0.99999994,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,0.4941933,0.99999994,1.0,0.04272193,1.0,1.0,-0.025001995,1.0,1.0,1.0,1.0,0.99999994,1.0,0.99999994,0.28839657,1.0,1.0,0.3005069,0.48330563,1.0,0.049527638,1.0,0.48427746,1.0,1.0,1.0,0.5191553,1.0,-0.09626324,1.0,1.0,1.0,0.58639055,1.0,0.55897427,0.99999994,0.39362487,0.3737321,0.54449236,0.295391,1.0,0.150232,1.0,1.0,0.99999994,0.99999994,1.0,1.0,1.0,1.0,1.0,1.0,0.58148575,1.0,0.18617831,1.0,1.0,1.0,1.0,0.39362487,0.18000342,1.0,1.0,0.34408608,1.0,0.05753716,0.99999994,0.39362487,1.0,1.0,0.3806352,1.0,1.0,1.0,1.0,0.1930377,0.99999994,1.0,0.25691313,0.0136583615,0.5713356,1.0,1.0,1.0,1.0,1.0,0.8901057,1.0,1.0,1.0,1.0,0.5713356,1.0,1.0,1.0,0.3005069,0.19462772,1.0,1.0,0.7170906,1.0,0.99999994,0.24261636,1.0,1.0,1.0,0.5713356,1.0,-0.07603918,1.0,0.3005069,0.5264707,1.0,1.0,0.24163032,1.0,-0.09626324,0.37092754,0.48330563,1.0,1.0,0.54449236,0.37695107,0.12670073,0.99999994,1.0,0.1930377,0.5953513,1.0,0.99999994,1.0,1.0,0.48427746,1.0,0.6250057,1.0,0.29227597,0.04272193,1.0,0.5713356,0.17262895,1.0,1.0,1.0,0.52361315,1.0,1.0,1.0,0.99999994,1.0,0.37740585,1.0,0.99999994,0.99999994,1.0,0.37573695,0.3737321,1.0,1.0,0.99999994,1.0,0.99999994,0.14952749,1.0,0.8901057,1.0,1.0,0.99999994,1.0,0.1090399,1.0,1.0,1.0,0.1930377,0.22776818,1.0,0.3737321,1.0,0.18549149,0.12343065,0.9999999,0.99999994,1.0,1.0,0.99999994,0.99999994,0.16367693,0.049443208,1.0,1.0,1.0,1.0,0.19392927,0.99999994,1.0,0.34408608,0.41169858,0.43988776,0.99999994,1.0,0.5575696,1.0,0.54449236,1.0,1.0,0.34408608,0.049527638,1.0,0.03196327,1.0,0.40039736,1.0,1.0,0.99999994,0.87484443,0.99999994,1.0,0.12717277,1.0,1.0,1.0,1.0,0.5713356,0.9395597,1.0,1.0,1.0,0.45791462,0.16367693,0.99999994,0.99999994,1.0,0.102522925,1.0,1.0,0.19462772,0.99999994,0.99999994,0.37092754,0.67901355,0.028906511,1.0,0.5575696,1.0,0.6379795,0.42596528,1.0,1.0,0.39362487,1.0,0.31412387,0.48330563,1.0,0.99999994,1.0,1.0,0.48427746,1.0,0.23245648,1.0,0.70241374,0.45838788,1.0,1.0,0.39436397,1.0,0.40039736,1.0,1.0,0.049527638,0.40039736,1.0,1.0,1.0,0.0843728,1.0,0.150232,1.0,-0.09626324)
boxplot(s2v_fist_first, w2v_fist_first, s2v_fist_last, w2v_fist_last, s2v_last_last, w2v_last_last, main="Synset Comparsion Verbs", names=c("s2v_fist_first", "w2v_fist_first", "s2v_fist_last", "w2v_fist_last", "s2v_last_last", "w2v_last_last"), las=2, notch=TRUE, ylim=c(0,1.2) , cex.axis=0.6)
points(1:6, c(mean(s2v_fist_first), mean(w2v_fist_first), mean(s2v_fist_last), mean(w2v_fist_last), mean(s2v_last_last), mean(w2v_last_last)), pch=18 , cex=1.5)
|
bcb32b3fc8f4316a5a55ca673529b4b75bcd8793
|
a176626eb55b6525d5a41e2079537f2ef51d4dc7
|
/Uni/Projects/code/P020.Temprature_NE_MIA/archive/RSOE_review_analysis_V2.r
|
385f428c5992d1e0c1ba90f1e109d60970d61ef3
|
[] |
no_license
|
zeltak/org
|
82d696b30c7013e95262ad55f839998d0280b72b
|
d279a80198a1dbf7758c9dd56339e8a5b5555ff2
|
refs/heads/master
| 2021-01-21T04:27:34.752197
| 2016-04-16T04:27:57
| 2016-04-16T04:27:57
| 18,008,592
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,502
|
r
|
RSOE_review_analysis_V2.r
|
library(lme4)
library(reshape)
library(foreign)
library(ggplot2)
library(plyr)
library(data.table)
library(reshape2)
library(Hmisc)
library(mgcv)
library(gdata)
#create CV table
mod1table <- data.frame(type=character(17), R2=numeric(17),Bias=numeric(17),RMSPE=numeric(17))
#name columns
mod1table$type <- c("WV", "VA", "PA", "MD", "NY", "RI", "NJ", "DE", "CT", "MA", "VT", "NH", "ME","Boston","NYC","rural","urban")
#####################
###Full model
#####################
mod1 <- as.data.table(read.dbf("/media/NAS/Uni/Projects/P020_Temprature_NE_MIA/3.Work/3.Analysis/AN_001_mods_CV/Mod1_2005.dbf"))
summary(mod1)
#day dataset
DELLIST <- names(mod1) %in% c("DTckin", "humidity")
mod1d <- mod1[!DELLIST]
mod1d<-na.omit(mod1d)
mod1d$predicted<-NA
unique(mod1d$STATE_ABBR)
# #region
# reg<-fread("/media/NAS/Uni/Projects/P020_Temprature_NE_MIA/3.Work/2.Gather_data/FN007_Key_tables/station_state.csv")
# #add new region
# setkey(mod1d,station)
# setkey(reg,station)
# mod1d <- merge(mod1d, reg, all.x = T)
#overall model
outd = lme(tempc ~ NTckin+elev+purban+NDVI+ws, random = ~1 + NTckin| date, data = mod1d)
######
#1WV
#####
mod1d_WV <- mod1d[STATE_ABBR %in% c("WV"), ]
mod1d_WV$predicted <- predict(object=outd,newdata=mod1d_WV )
WV_out<-summary(lm(tempc~predicted,data=mod1d_WV))
mod1d_WV <- mod1d[STATE_ABBR %in% c("WV"), ]
mod1d_WV$predicted <- predict(object=outd,newdata=mod1d_WV )
WV_out<-lm(tempc~predicted,data=mod1d_WV)
mod1table$R2[1] <-summary(WV_out)$r.squared
mod1table$Bias[1] <-summary(WV_out)$coef[2,1]
#rmspe
mod1table$RMSPE[1]<- sqrt(mean(WV_out$residual^2))
######
#V2A
#####
mod1d_VA <- mod1d[STATE_ABBR %in% c("VA"), ]
mod1d_VA$predicted <- predict(object=outd,newdata=mod1d_VA )
VA_out<-summary(lm(tempc~predicted,data=mod1d_VA))
mod1d_VA <- mod1d[STATE_ABBR %in% c("VA"), ]
mod1d_VA$predicted <- predict(object=outd,newdata=mod1d_VA )
VA_out<-lm(tempc~predicted,data=mod1d_VA)
mod1table$R2[2] <-summary(VA_out)$r.squared
mod1table$Bias[2] <-summary(VA_out)$coef[2,1]
#rmspe
mod1table$RMSPE[2]<- sqrt(mean(VA_out$residual^2))
######
#3PA
#####
mod1d_PA <- mod1d[STATE_ABBR %in% c("PA"), ]
mod1d_PA$predicted <- predict(object=outd,newdata=mod1d_PA )
PA_out<-summary(lm(tempc~predicted,data=mod1d_PA))
mod1d_PA <- mod1d[STATE_ABBR %in% c("PA"), ]
mod1d_PA$predicted <- predict(object=outd,newdata=mod1d_PA )
PA_out<-lm(tempc~predicted,data=mod1d_PA)
mod1table$R2[3] <-summary(PA_out)$r.squared
mod1table$Bias[3] <-summary(PA_out)$coef[2,1]
#rmspe
mod1table$RMSPE[3]<- sqrt(mean(PA_out$residual^2))
######
#4MD
#####
mod1d_MD <- mod1d[STATE_ABBR %in% c("MD"), ]
mod1d_MD$predicted <- predict(object=outd,newdata=mod1d_MD )
MD_out<-summary(lm(tempc~predicted,data=mod1d_MD))
mod1d_MD <- mod1d[STATE_ABBR %in% c("MD"), ]
mod1d_MD$predicted <- predict(object=outd,newdata=mod1d_MD )
MD_out<-lm(tempc~predicted,data=mod1d_MD)
mod1table$R2[4] <-summary(MD_out)$r.squared
mod1table$Bias[4] <-summary(MD_out)$coef[2,1]
#rmspe
mod1table$RMSPE[4]<- sqrt(mean(MD_out$residual^2))
######
#5NY
#####
mod1d_NY <- mod1d[STATE_ABBR %in% c("NY"), ]
mod1d_NY$predicted <- predict(object=outd,newdata=mod1d_NY )
NY_out<-summary(lm(tempc~predicted,data=mod1d_NY))
mod1d_NY <- mod1d[STATE_ABBR %in% c("NY"), ]
mod1d_NY$predicted <- predict(object=outd,newdata=mod1d_NY )
NY_out<-lm(tempc~predicted,data=mod1d_NY)
mod1table$R2[5] <-summary(NY_out)$r.squared
mod1table$Bias[5] <-summary(NY_out)$coef[2,1]
#rmspe
mod1table$RMSPE[5]<- sqrt(mean(NY_out$residual^2))
######
#6RI
#####
mod1d_RI <- mod1d[STATE_ABBR %in% c("RI"), ]
mod1d_RI$predicted <- predict(object=outd,newdata=mod1d_RI )
RI_out<-summary(lm(tempc~predicted,data=mod1d_RI))
mod1d_RI <- mod1d[STATE_ABBR %in% c("RI"), ]
mod1d_RI$predicted <- predict(object=outd,newdata=mod1d_RI )
RI_out<-lm(tempc~predicted,data=mod1d_RI)
mod1table$R2[6] <-summary(RI_out)$r.squared
mod1table$Bias[6] <-summary(RI_out)$coef[2,1]
#rmspe
mod1table$RMSPE[6]<- sqrt(mean(RI_out$residual^2))
######
#7NJ
#####
mod1d_NJ <- mod1d[STATE_ABBR %in% c("NJ"), ]
mod1d_NJ$predicted <- predict(object=outd,newdata=mod1d_NJ )
NJ_out<-summary(lm(tempc~predicted,data=mod1d_NJ))
mod1d_NJ <- mod1d[STATE_ABBR %in% c("NJ"), ]
mod1d_NJ$predicted <- predict(object=outd,newdata=mod1d_NJ )
NJ_out<-lm(tempc~predicted,data=mod1d_NJ)
mod1table$R2[7] <-summary(NJ_out)$r.squared
mod1table$Bias[7] <-summary(NJ_out)$coef[2,1]
#rmspe
mod1table$RMSPE[7]<- sqrt(mean(NJ_out$residual^2))
######
#8DE
#####
mod1d_DE <- mod1d[STATE_ABBR %in% c("DE"), ]
mod1d_DE$predicted <- predict(object=outd,newdata=mod1d_DE )
DE_out<-summary(lm(tempc~predicted,data=mod1d_DE))
mod1d_DE <- mod1d[STATE_ABBR %in% c("DE"), ]
mod1d_DE$predicted <- predict(object=outd,newdata=mod1d_DE )
DE_out<-lm(tempc~predicted,data=mod1d_DE)
mod1table$R2[8] <-summary(DE_out)$r.squared
mod1table$Bias[8] <-summary(DE_out)$coef[2,1]
#rmspe
mod1table$RMSPE[8]<- sqrt(mean(DE_out$residual^2))
######
#9CT
#####
mod1d_CT <- mod1d[STATE_ABBR %in% c("CT"), ]
mod1d_CT$predicted <- predict(object=outd,newdata=mod1d_CT )
CT_out<-summary(lm(tempc~predicted,data=mod1d_CT))
mod1d_CT <- mod1d[STATE_ABBR %in% c("CT"), ]
mod1d_CT$predicted <- predict(object=outd,newdata=mod1d_CT )
CT_out<-lm(tempc~predicted,data=mod1d_CT)
mod1table$R2[9] <-summary(CT_out)$r.squared
mod1table$Bias[9] <-summary(CT_out)$coef[2,1]
#rmspe
mod1table$RMSPE[9]<- sqrt(mean(CT_out$residual^2))
######
#10MA
#####
mod1d_MA <- mod1d[STATE_ABBR %in% c("MA"), ]
mod1d_MA$predicted <- predict(object=outd,newdata=mod1d_MA )
MA_out<-summary(lm(tempc~predicted,data=mod1d_MA))
mod1d_MA <- mod1d[STATE_ABBR %in% c("MA"), ]
mod1d_MA$predicted <- predict(object=outd,newdata=mod1d_MA )
MA_out<-lm(tempc~predicted,data=mod1d_MA)
mod1table$R2[10] <-summary(MA_out)$r.squared
mod1table$Bias[10] <-summary(MA_out)$coef[2,1]
#rmspe
mod1table$RMSPE[10]<- sqrt(mean(MA_out$residual^2))
######
#11VT
#####
mod1d_VT <- mod1d[STATE_ABBR %in% c("VT"), ]
mod1d_VT$predicted <- predict(object=outd,newdata=mod1d_VT )
VT_out<-summary(lm(tempc~predicted,data=mod1d_VT))
mod1d_VT <- mod1d[STATE_ABBR %in% c("VT"), ]
mod1d_VT$predicted <- predict(object=outd,newdata=mod1d_VT )
VT_out<-lm(tempc~predicted,data=mod1d_VT)
mod1table$R2[11] <-summary(VT_out)$r.squared
mod1table$Bias[11] <-summary(VT_out)$coef[2,1]
#rmspe
mod1table$RMSPE[11]<- sqrt(mean(VT_out$residual^2))
######
#12NH
#####
mod1d_NH <- mod1d[STATE_ABBR %in% c("NH"), ]
mod1d_NH$predicted <- predict(object=outd,newdata=mod1d_NH )
NH_out<-summary(lm(tempc~predicted,data=mod1d_NH))
mod1d_NH <- mod1d[STATE_ABBR %in% c("NH"), ]
mod1d_NH$predicted <- predict(object=outd,newdata=mod1d_NH )
NH_out<-lm(tempc~predicted,data=mod1d_NH)
mod1table$R2[12] <-summary(NH_out)$r.squared
mod1table$Bias[12] <-summary(NH_out)$coef[2,1]
#rmspe
mod1table$RMSPE[12]<- sqrt(mean(NH_out$residual^2))
######
#13ME
#####
mod1d_ME <- mod1d[STATE_ABBR %in% c("ME"), ]
mod1d_ME$predicted <- predict(object=outd,newdata=mod1d_ME )
ME_out<-summary(lm(tempc~predicted,data=mod1d_ME))
mod1d_ME <- mod1d[STATE_ABBR %in% c("ME"), ]
mod1d_ME$predicted <- predict(object=outd,newdata=mod1d_ME )
ME_out<-lm(tempc~predicted,data=mod1d_ME)
mod1table$R2[13] <-summary(ME_out)$r.squared
mod1table$Bias[13] <-summary(ME_out)$coef[2,1]
#rmspe
mod1table$RMSPE[13]<- sqrt(mean(ME_out$residual^2))
###3 cities
#boston analysis
#use overall fit to predict for only boston stations
bos2005<- mod1d[glong < -70.5 & glong > -71.5 & glat > 42.00 & glat < 43, ]
outd = lme(tempc ~ NTckin+elev+purban+NDVI+ws, random = ~1 + NTckin| date, data = mod1d)
bos2005$predicted <- predict(object=outd,newdata=bos2005 )
bos2005_out<-lm(tempc~predicted,data=bos2005)
mod1table$R2[14] <-summary(bos2005_out)$r.squared
mod1table$Bias[14] <-summary(bos2005_out)$coef[2,1]
#rmspe
mod1table$RMSPE[14]<- sqrt(mean(bos2005_out$residual^2))
#NY analysis
#use overall fit to predict for only boston stations
ny2005<- mod1d[glong < -73 & glong > -75 & glat > 40 & glat < 42, ]
ny2005$predicted <- predict(object=outd,newdata=ny2005 )
ny2005_out<-lm(tempc~predicted,data=ny2005)
mod1table$R2[15] <-summary(ny2005_out)$r.squared
mod1table$Bias[15] <-summary(ny2005_out)$coef[2,1]
#rmspe
mod1table$RMSPE[15]<- sqrt(mean(ny2005_out$residual^2))
#urban
m3pred2005_urb <- m3pred2005[purban.x > 22.2222, ]
m3pred2005_rural <- m3pred2005[purban.x <=22.2222, ]
summary(lm(tempc~pred_m3,data=m3pred2005_urb))
0.9693
summary(lm(tempc~pred_m3,data=m3pred2005_rural))
0.8851
#########paper
#########paper
#########paper
#Seasons
library(car)
m3pred2005 <-m3pred2005 [, date:=as.Date(strptime(Date, "%m/%d/%y"))]
m3pred2005$month <- as.numeric(format(m3pred2005$date, "%m"))
#1-winter, 2-spring,3-summer,4-autum
m3pred2005$season<-recode(m3pred2005$month,"1=1;2=1;3=2;4=2;5=2;6=3;7=3;8=3;9=4;10=4;11=4;12=1")
#1-winter, 2-summer
m3pred2005$seasonSW<-as.character(recode(m3pred2005$month,"1=1;2=1;3=1;4=2;5=2;6=2;7=2;8=2;9=2;10=1;11=1;12=1"))
m3pred2005_winter <- m3pred2005[seasonSW %in% c("1"), ]
m3pred2005_summer <- m3pred2005[seasonSW %in% c("2"), ]
summary(lm(tempc~pred_m3,data=m3pred2005_winter))
summary(lm(tempc~pred_m3,data=m3pred2005_summer))
m3pred2005_s1 <- m3pred2005[season %in% c("1"), ]
m3pred2005_s2 <- m3pred2005[season %in% c("2"), ]
m3pred2005_s3 <- m3pred2005[season %in% c("3"), ]
m3pred2005_s4 <- m3pred2005[season %in% c("4"), ]
summary(lm(tempc~pred_m3,data=m3pred2005_s1))
summary(lm(tempc~pred_m3,data=m3pred2005_s2))
summary(lm(tempc~pred_m3,data=m3pred2005_s3))
summary(lm(tempc~pred_m3,data=m3pred2005_s4))
|
203443a8bf746aadc63631d82a842cd2d8f5aec4
|
5ba7be90516c93d2224001914f726ab05d23e713
|
/rpackage/bohemia/man/voronoi.Rd
|
478c0cb30d8a9b99bcc7f58b2d2f48bfa706ae03
|
[
"MIT"
] |
permissive
|
bikeforsale/bohemia
|
cbb3ddcc92d084ec6d98986f299ea08dc2324228
|
251a4ff737b5c6e7452eeb845ccfae11e48587c9
|
refs/heads/master
| 2022-09-05T07:35:00.972380
| 2020-05-02T20:30:11
| 2020-05-02T20:30:11
| 265,606,212
| 0
| 0
|
NOASSERTION
| 2020-05-20T15:21:57
| 2020-05-20T15:21:56
| null |
UTF-8
|
R
| false
| true
| 575
|
rd
|
voronoi.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/voronoi.R
\name{voronoi}
\alias{voronoi}
\title{Create voronoi tiles}
\usage{
voronoi(shp, poly = NULL)
}
\arguments{
\item{shp}{A spatial points dataframe with the following columns in the data slot: lng, lat, id (the id being the repeated area signifier)}
\item{poly}{An optional spatialPolygonsDataFrame by which the triangles will be trimmed}
}
\value{
A spatialPolygonsDataFrame
}
\description{
Create voronoi tiles using Delaunay triangulation for an entire surface based on ID'ed points
}
|
c037f6559f62aa628d83860ad0eca9db6eb6883d
|
7cc6c1874fe2ea4193bc812e70adc01859aa6c83
|
/travis/travis_prep.R
|
e9d8aa29c5dbb68160bd88c4ae08c6b068ec06fb
|
[
"BSD-2-Clause"
] |
permissive
|
daniel0128/receptormarker
|
c0fe2d3429182d05a56c6bfe3317732c0953e2cd
|
6b343ccd60f05ffcd494e8c636eccb06a9c04152
|
refs/heads/master
| 2020-12-31T02:02:30.028813
| 2015-12-12T00:54:20
| 2015-12-12T00:54:20
| 45,644,690
| 0
| 0
| null | 2015-11-05T22:55:38
| 2015-11-05T22:55:38
| null |
UTF-8
|
R
| false
| false
| 108
|
r
|
travis_prep.R
|
devtools::install_github('jimhester/lintr')
source("http://bioconductor.org/biocLite.R")
biocLite("muscle")
|
f5422b30a8ec1726f8234a0d9a7036226ac34b9b
|
da8dae69e597072bc616936d1d72a96f65e4efa0
|
/code/oldversions/v4_20190329/shiny/options.R
|
f1ea19a42ef10c820f56a65a07e1f964aab69a5e
|
[] |
no_license
|
UCL/provis
|
71e82c383cd9414840e57c2a2867826d6b4ee3e6
|
86a287c7bc705d4aeffb9bbcf96747e97e6d688b
|
refs/heads/master
| 2020-08-01T04:08:20.198284
| 2019-11-08T12:09:43
| 2019-11-08T12:09:43
| 210,310,151
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,521
|
r
|
options.R
|
# Options for UI inputs
# Select regions using "selectInput"
region_options<-as.list(c(1:11))
names(region_options)<-c("Ca-MK-Ox","Cornwall and Devon",
"East Midlands","East of England",
"London","North East","North West",
"Sout East","South West","West Midlands",
"Yorkshire and the Humber")
selectInput("region",label="Choose region.",
choices = region_options)
# Select regions using "radioButtons"
radioButtons("region",
label = "Choose region.",
choiceNames = list("CaMKOx","Cornwall and Devon","East Midlands",
"East of England","London","North East England",
"North West England","South East England",
"South West England",
"West Midlands","Yorkshire and the Humber"),
choiceValues = as.list(c(1:11)),width="30%")
#--------------------------------------------------------------------
# Display PDF in shiny
# OPTION 1
tags$iframe(style="height:600px; width:100%",
src="http://localhost/ressources/pdf/R-Intro.pdf")
# Display PDF in shiny
# OPTION 2
# UI
textInput("pdfurl", "PDF URL")
htmlOutput('pdfviewer')
# server
output$pdfviewer <- renderText({
return(paste('<iframe style="height:600px; width:100%" src="',
input$pdfurl, '"></iframe>', sep = ""))
})
# Option 3
# 1) put pdf in "www" directory
# 2) add renderUI statement to server
# 3) add uiOutput to ui
shinyServer(function(input, output) {
observeEvent(input$generate, {
output$pdfview <- renderUI({
tags$iframe(style="height:600px; width:100%", src="foo.pdf")
})
})
})
shinyUI(fluidPage(
titlePanel("Display a PDF"),
sidebarLayout(
sidebarPanel(
actionButton("generate", "Generate PDF")
),
mainPanel(
uiOutput("pdfview")
)
)
))
# PLOT MAP OF UK REGIONS
# output$ukmap<-renderPlot({
# regions<-B5LoadRegions(dirs)
# nregions<-nrow(regions@data)
# plot(regions, col=rainbow(nregions),main="UK regions with data availability")
# legend("left",legend = levels(regions@data$rgn15nm),
# fill=rainbow(nregions),
# cex=0.8)
# })
# Create UI that depends on user input
output$cityControls <- renderUI({
cities <- getNearestCities(input$lat, input$long)
checkboxGroupInput("cities", "Choose Cities", cities)
})
|
b06b22ba3ebbf070e720feb48abd9a96a4686118
|
f7983f4b74cb9fcd6154007f3b11b34b9ad11ede
|
/bayarea/scripts/household_assignment.R
|
014134f5bb41714413c84bb2f1bbbba6668d5ab1
|
[] |
no_license
|
psrc/urbansim
|
7d2fe615d7c22db839c283304067fce746fdb512
|
c392d15b35aa1d47bbc185ed76314f8e6dd9f92f
|
refs/heads/master
| 2023-06-09T16:55:50.415783
| 2023-05-11T19:24:34
| 2023-05-11T19:24:34
| 19,719,383
| 4
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,806
|
r
|
household_assignment.R
|
##this R script assigns households to buildings according to matched zone_id, tenure, and building_type_id
##author(s): Yujiang Mou
setwd("/Users/yujiangmou/Desktop/updating hh_table")
load("hhtable_newbuilding.rda")
load("building_newbuilding.rda")
save(sub.bds,file="building_newbuilding.rda")
library("foreach")
ls()
zone_ids=as.vector(unique(hh.pums.new$TAZ_id))
temp <- foreach(zone_id=zone_ids, .combine='rbind') %do% process_zone(hh.pums.new,sub.bds,zone_id)
process_zone=function(hh,bd,zone_id)
{
hh.zone=subset(hh,hh$TAZ_id==zone_id)
bd.zone=subset(bd,bd$TAZ_id==zone_id)
for (i in 1: nrow(hh.zone))
{
tenure=hh.zone$tenure[i]
building_type=hh.zone$building_type[i]
index=bd.zone$tenure==tenure & bd.zone$building_type==building_type & bd.zone$units!=0
idsample=bd.zone$building_id[index]
if (is.na(idsample[1])) next
id=sample(idsample,1)
hh.zone$building_id[i]=id
bd.zone$units[bd.zone$building_id==id]=bd.zone$units[bd.zone$building_id==id]-1
print (i)
}
new.hh=cbind(hh.zone$household_id,hh.zone$building_id)
return (new.hh)
}
temp=data.frame(temp)
names(temp)[names(temp)=="X1"]="household_id"
names(temp)[names(temp)=="X2"]="building_id"
names(temp)
# look at the data
load("new_pums.rda")
nrow(temp2)
index=temp2$building_id==-1
sum(index)
(nrow(temp2)-sum(index))/nrow(temp2)#2371127
table(temp2$building_type)
names(temp2)
# uploaded to paris
library('RPostgreSQL')
conn<-dbConnect(PostgreSQL(),user='urbanvision',password='***',dbname='bayarea',host='paris.urbansim.org')
dbListTables(conn)
table_name <- "hh_table_updated_bdID"
temp=read
hh <- read.csv('hh_pums_new.csv', header=T)
dbWriteTable(conn, table_name, hh, row.names = F)
ls()
hh.bats<-dbReadTable(conn,'hh_table_updated_bdID')
|
8fdf7b3f9b983984dda28402706509f3f4af1aca
|
dc1ead4b62bba2a151b894d33dc16d76d12856e2
|
/TheFeetMaze/demos/main TheFeetMaze.R
|
b776200b50044404d760f7401dba568d550c9d1f
|
[] |
no_license
|
pabloirigo/IS_RProjects
|
6343e968523155164e4ef8d41dfbbe68f7588c35
|
e6576d713641038c09495c6dc8be3d458e10e3ab
|
refs/heads/master
| 2020-04-28T15:38:32.411954
| 2019-05-20T11:58:47
| 2019-05-20T11:58:47
| 175,378,835
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,441
|
r
|
main TheFeetMaze.R
|
# =======================================================================
# Names:Luis Urrechaga, Pablo Irigoyen
# Group Number: B
# Assignment:TheFeetMaze
# Date:
# =======================================================================
# 1. Be sure to include, with this template, any necessary files
# for execution, including datasets (problem.R, methodXXX.R, ...)
# (submission of the entire template folder is recommended)
# 2. If you use a function of a certain package, do not forget to include the
# corresponding call to the "library ()" function
# 3. Do not forget to comment on the code, especially those non-trivial commands
# (remember that part of the rating depends on the cleaning of the code)
# 4. It is strongly recommended to test any implemented function in order to
# check for its proper operation
# =======================================================================
# (This is a general code, you must adapt it)
# =======================================================================
# Configuring the Environment
rm(list=ls())
cat("\014")
graphics.off()
setwd(dirname(rstudioapi::getActiveDocumentContext()$path))
getwd()
dir()
# LIBRARIES (add any needed library here)
library(rstudioapi)
library(ggplot2)
library(gridExtra)
# ADDITIONAL FUNCTIONS (add any used method/problem here)
source("../problems/problem TheFeetMaze.R")
source("../methods/Breadth First Search.R")
source("../methods/Depth First Search.R")
#source("../methods/YYYYYYYYYYY.R")
# And here, there are additional (needed) functions
source("../methods/Expand Node.R")
source("../methods/Analyze Results.R")
source("../methods/Plot Results.R")
# =======================================================================
# Check the proper operation of implemented function here!
# =======================================================================
# Solving of the problem (you have to adapt it)
#problem = initialize.problem("../data/map.txt", "../data/map_Up.txt", "../data/map_Down.txt", "../data/map_Right.txt", "../data/map_Left.txt")
problem = initialize.problem("../data/map.csv", "../data/map_Up.csv", "../data/map_Down.csv", "../data/map_Right.csv", "../data/map_Left.csv")
print(problem)
res1 = Breadth.First.Search(problem, count.limit = 1000, graph.search = T)
res2 = Depth.First.Search(problem,count.limit = 10000, graph.search = T)
all = list(res1, res2)
analyze.results(list(res1,res2),problem)
|
a4ddd50344e9b730b569546137c987e4d5585172
|
a864a155a4ac597349fdc6a0d0b7618e695f1f2e
|
/man/spatialCoords-set-SpatialExperiment-method.Rd
|
3d23adcc0fd2a9b13f5ce5e0bdd0dc9e70f936e4
|
[] |
no_license
|
celsomilne/SpatialExperiment
|
eb80936e938596ced34514c5a7af4b30e18e7658
|
ae1da7d1eaeeed6bee5492b0cb08a7e05ac544f2
|
refs/heads/master
| 2022-12-02T06:22:56.199378
| 2020-07-31T00:52:29
| 2020-07-31T00:52:29
| 280,007,348
| 0
| 0
| null | 2020-07-31T00:52:30
| 2020-07-15T23:49:43
| null |
UTF-8
|
R
| false
| true
| 813
|
rd
|
spatialCoords-set-SpatialExperiment-method.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/SpatialExperimentMethods.R
\name{spatialCoords<-,SpatialExperiment-method}
\alias{spatialCoords<-,SpatialExperiment-method}
\alias{spatialCoords<-}
\title{spatialCoords-setter}
\usage{
\S4method{spatialCoords}{SpatialExperiment}(x) <- value
}
\arguments{
\item{x}{a SpatialExperiment class object}
\item{value}{a DataFrame with the new spatial coordinates to set.}
}
\value{
none
}
\description{
a setter method which sets/replaces the spatial coordinate in a
SpatialExperiment class object.
}
\examples{
example(SpatialExperiment)
fakeFishCoords <- cbind(fishCoordinates[,c(1:3)], fishCoordinates[,3])
colnames(fakeFishCoords) <- c("MyCell_ID", "Irrelevant", "x", "y")
spatialCoords(se) <- fakeFishCoords
spatialCoords(se)
}
|
99443471a1b146eb35771003e9ce086726c9f88e
|
eb7ab30af415f747378906f0596f06ee3729a10c
|
/man/skriging.Rd
|
c896227a3e39bfe006530326a80b05d1ea1959e3
|
[] |
no_license
|
cran/SK
|
f6a33286e3e5b74ac7fa55cc3a9ae492774744b4
|
8d04a47caa3b2a613305b47323f7d33b6eac6f8e
|
refs/heads/master
| 2020-03-17T14:33:28.229937
| 2018-05-16T13:17:01
| 2018-05-16T13:17:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,741
|
rd
|
skriging.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/skriging.R
\name{skriging}
\alias{skriging}
\alias{print.skriging}
\alias{plot.skriging}
\title{Function for Segment-based Kriging models}
\usage{
skriging(formula, polyline = polyline, method = "srk",
lwd = "width", obspred = "obs1pred0", boxcox = TRUE)
\method{print}{skriging}(x, ...)
\method{plot}{skriging}(x, studyarea = NULL, ...)
}
\arguments{
\item{formula}{A skriging formula.}
\item{polyline}{A shapefile of spatial polyline.}
\item{method}{A characteor of segment-based Kriging model. The default is "srk",
segment-based regression Kriging Another method is "sok", segment-based ordinary
Kriging.}
\item{lwd}{A fixed number or a variable name of polyline of the line width.}
\item{obspred}{A variable name of polyline to define the observation and prediction lines.
Observation is 1 and prediction is 0.}
\item{boxcox}{A logical parameter to set whether the dependent variable should be transformed
with boxcox function. The default is TRUE.}
\item{x}{A list of \code{skriging} result.}
\item{studyarea}{A shapefile of spatial polygon of study area.}
\item{...}{new print and plot}
}
\description{
Segment-based Kriging models, including Segment-based Ordinary Kriging (SOK) and
Segment-based Regression Kriging (SRK), for spatial prediction of
line segment spatial data (polyline). The methods are described in
Yongze Song (2018) <doi:10.1109/TITS.2018.2805817>.
}
\examples{
## SRK: segment-based regression Kriging
## dataset 'vtest' is a sample of dataset 'vehicles'
srk1 <- skriging(heavy ~ wpai + width, polyline = vtest, method = "srk",
lwd = "width", obspred = "obs1pred0", boxcox = TRUE)
srk1
plot(srk1)
}
|
18ed9be153ffe6e5ca57fb87d2cbeb3b21888cd0
|
b9d8c0753c70ab28bc16249cf2c8ef3a9d2f0c7e
|
/data cleaning/week_3_quiz.R
|
cee4f94ff707001efe535db477476131a51802e1
|
[] |
no_license
|
sesh1729/datasciencecoursera
|
8e26201a8ac07c9b0438cd5e974f5fbbfc191af5
|
3c8d65b7fba35c37bfea426ee504dd1225b84866
|
refs/heads/master
| 2021-01-17T12:45:54.903557
| 2016-11-07T07:16:49
| 2016-11-07T07:16:49
| 59,560,742
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,235
|
r
|
week_3_quiz.R
|
#download.file(url='https://d396qusza40orc.cloudfront.net/getdata%2Fdata%2Fss06hid.csv',destfile='idaho_housing.csv',method='curl')
idaho_housing_data<-read.csv(file='idaho_housing.csv')
households<-idaho_housing_data$ACR==3 & idaho_housing_data$AGS==6
which(households)
#download.file(url='https://d396qusza40orc.cloudfront.net/getdata%2Fjeff.jpg',destfile = 'instructor.jpeg',method='curl')
img<-readJPEG(source='instructor.jpeg',native=TRUE)
quantile(img,c(0.3,0.8))
#download.file(url='https://d396qusza40orc.cloudfront.net/getdata%2Fdata%2FGDP.csv',destfile = 'gdpdata.csv')
#download.file(destfile = 'edudata.csv',url='https://d396qusza40orc.cloudfront.net/getdata%2Fdata%2FEDSTATS_Country.csv')
gdpdata<-read.csv(file='gdpdata.csv',skip = 3,blank.lines.skip = TRUE,strip.white=TRUE)
gdpdata<-filter(gdpdata,Ranking!=''&X!='')
gdpdata$Ranking<-as.numeric(as.character(gdpdata$Ranking))
gdpdata$US.dollars.<-as.numeric(gsub(",","",gdpdata$US.dollars.))
edudata<-read.csv(file='edudata.csv')
merged_data<-merge(edudata,gdpdata,by.x = 'CountryCode',by.y = 'X')
merged_data<-arrange(merged_data,desc(Ranking))
summarise(group_by(merged_data,Income.Group),mean(Ranking))
table(merged_data$Income.Group,cut(merged_data$Ranking,5))
|
d9c5e24255999bd547334aaf24f1cee6220cd6b7
|
12e09fb76eac4d76ef25c4214d6a40128282818d
|
/R/stj_baixar_docs_pdf.R
|
7b1ee5a04ffd06a2062e56d77f376e0f6d012fb2
|
[
"MIT"
] |
permissive
|
jjesusfilho/stj
|
76d5676c447a9fb58e42f4bdec9a0761c0a43c0a
|
2a4ccd87ba81197b4f929839830ca71135e15aea
|
refs/heads/master
| 2023-06-22T03:30:11.940248
| 2023-06-20T16:55:25
| 2023-06-20T16:55:25
| 202,847,868
| 12
| 6
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,165
|
r
|
stj_baixar_docs_pdf.R
|
#' Baixa docs em pdf
#'
#' @param sequencial Sequencial do documento
#' @param registro Número do registro
#' @param data Data no formato dd/mm/aaaa
#' @param diretorio Diretório onde armazenar os pdfs
#'
#' @return pdf
#' @export
#'
#' @examples
#' \dontrun{
#' stj_baixar_docs_pdf("112282908", "20200165980","04/08/2020")
#'
#' }
stj_baixar_docs_pdf <- function(sequencial, registro, data, diretorio = "."){
registro <- registro |>
stringr::str_remove_all("\\D")
data <- data |>
lubridate::dmy() %>%
stringr::str_remove_all("\\D")
httr::set_config(httr::config(ssl_verifypeer = 0L))
purrr::pwalk(list(x = sequencial, y = registro, z = data), purrr::possibly(function(x,y,z) {
u1 <- paste0("https://www.stj.jus.br/websecstj/cgi/revista/REJ.cgi/MON?seq=",x,"&tipo=0&nreg=",y,"&SeqCgrmaSessao=&CodOrgaoJgdr=&dt=", z, "&formato=PDF&salvar=false")
u2 <- paste0("https://processo.stj.jus.br/processo/dj/documento/?&sequencial=", x,"&num_registro=",y,"&data=",z, "&formato=PDF&componente=MON")
u3 <- paste0("https://ww2.stj.jus.br/websecstj/cgi/revista/REJ.cgi/ITA?seq=",x,"&tipo=0&nreg=",y, "&SeqCgrmaSessao=&CodOrgaoJgdr=&dt=",z, "&formato=PDF&salvar=false")
r1 <- httr::GET(u1)
r2 <- httr::GET(u2)
r3 <- curl::curl_fetch_memory(u3)
rs <- list(r1, r2, r3)
tipo1 <- r1$headers$`content-type`
tipo2 <- r2$headers$`content-type`
tipo3 <- r3$type
pdf <- "application/pdf"
pdfs <- is.element(c(tipo1, tipo2, tipo3), pdf)
tamanho1 <- r1$headers$`content-length` |> as.integer()
tamanho2 <- r2$headers$`content-length` |> as.integer()
tamanho3 <- r3$headers |>
rawToChar() |>
stringr::str_squish() |>
stringr::str_extract_all("(?<=Content-Length: )\\d+") |>
unlist()|>
as.integer() |>
max()
if (all(pdfs)){
r <- rs[which.max(c(tamanho1, tamanho2, tamanho3))][[1]]
} else {
r <- rs[which(pdfs)][[1]]
}
arquivo <- file.path(diretorio, paste0("stj_doc_sequencial_",x, "_registro_", y, "_data_", z,".pdf"))
writeBin(r$content, arquivo)
},NULL))
}
|
1414e48d5df5de3e341aef4d17a5f646a645d53c
|
a855bb0b9c803b79c756b581b35492f4571c1c67
|
/Cleaning_data.R
|
090ac539532f783d5066bc0379ae6c08683f8cad
|
[] |
no_license
|
aellman13/NYT_Recipe_scrapy
|
64d0da65eda98a9bd47a47d2d76731830de83b01
|
66a2622243309191cf850d53579120ec2b6e0a25
|
refs/heads/master
| 2020-05-19T02:52:02.974264
| 2019-06-19T21:24:24
| 2019-06-19T21:24:24
| 184,787,947
| 2
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 319
|
r
|
Cleaning_data.R
|
library(tidyverse)
recipes = read_csv('NYT_recipes2.csv')
# Make new columns
recipes['index'] = 1:nrow(recipes)
recipes['time_in_minutes'] = 0
# Column to factor
col_to_factor = function(column_name, df){
df$column_ = forcats::as_factor(df$column_name)
}
col_to_factor(column_name = author, df = recipes)
|
769d27575fd006d493b18cf373fe6495009b7b37
|
e86bc617d84f3c31be186460b53833eac1076658
|
/man/repsample.Rd
|
9d0db8da023fb98c081e05d53ae4c8b2c7a91c12
|
[] |
no_license
|
qkdrk777777/lotto
|
7ba535c3b47d0c202d94d39cfc8c390158ebee10
|
f9f61e1aa1a7755e55d23486017dfc2add2049be
|
refs/heads/master
| 2021-05-08T19:39:52.598341
| 2018-03-17T08:43:55
| 2018-03-17T08:43:55
| 119,474,782
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 629
|
rd
|
repsample.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/6 sampling2.R
\name{repsample}
\alias{repsample}
\title{sample2}
\usage{
repsample(n, count = 1, keep = NULL, drop = NULL, hold = NULL,
up = NULL, header = 10, write = F)
}
\arguments{
\item{n}{= The number of times the sample is pulled}
\item{keep}{= The number to keep the probability of}
\item{drop}{=The number corresponding to 'drop' does not appear.}
\item{hold}{=This number appears unconditionally.}
}
\description{
sample2
}
\examples{
repsample(1000,up=1)
repsample(1000,header=10)
(k<-sort(num_per()[as.numeric(data[1,1:6])],dec=T))
}
|
c0a88a67ceecbb76847bab75f7db03c0950ba480
|
4385010fdebcfd752eb8f53de5293c52df6ef838
|
/2020/day03.R
|
db01414e1062856749f6f28c1412c5e59dcfb7df
|
[] |
no_license
|
nickopotamus/advent-of-code
|
53456b871a43f0b47d54d153b1ad439fc298ecb6
|
62009283e095ebdd6065ae709a1ec84a4ed59520
|
refs/heads/main
| 2023-01-31T18:59:51.747768
| 2020-12-10T17:32:17
| 2020-12-10T17:32:17
| 320,250,303
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 839
|
r
|
day03.R
|
# Read input - 323 routes
input <- readLines('input03.txt')
## Part 1
# Moves 3 right, 1 down; returns position on each of the 323 rows
# (Loops at end of row)
positions <- (3 * (seq_along(input) - 1)) %% nchar(input) + 1
# Boolean if tree is encountered, then sums all trues
sum(substr(input, positions, positions) == '#')
## Part 2
# Wrap above in a generic function
count_trees <- function(right, down = 1) {
vertical <- seq(0, length(input) - 1, by = down) + 1
horizontal <- (right * (seq_along(input) - 1)) %% nchar(input) + 1
horizontal <- head(horizontal, length(vertical))
as.double( # To avoid overflow in integers
sum(substr(input[vertical], horizontal, horizontal) == '#')
)
}
# Sum all the possible routes
total =
count_trees(1) *
count_trees(3) *
count_trees(5) *
count_trees(7) *
count_trees(1, 2)
|
55ab4ea90fd068493cb5c61e5f40166a014686db
|
7e9d5d4490544d217ee3b291beb950c3e80c1428
|
/man/summaryRedcapDqaResults.Rd
|
8dbe414dba93d5364d1fe5e2d2363a80c34337ec
|
[] |
no_license
|
lagvier/RedcapDqa
|
5651ac1e2d4826ae86c7545842316db1652bb600
|
f664129f62be61845f3fcf9a5a6caf6672942590
|
refs/heads/master
| 2021-01-23T19:59:32.473750
| 2016-11-22T14:02:29
| 2016-11-22T14:02:29
| 102,841,070
| 0
| 1
| null | 2017-09-08T08:58:17
| 2017-09-08T08:58:17
| null |
UTF-8
|
R
| false
| true
| 334
|
rd
|
summaryRedcapDqaResults.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Concordance.R
\name{summary.RedcapDqaResults}
\alias{summary.RedcapDqaResults}
\title{summary DQA Results}
\usage{
\method{summary}{RedcapDqaResults}(x, ...)
}
\description{
Summarize DQA Results
}
\details{
Generic summary method
}
\concept{
RedcapDqa
}
|
62df8cb177d74713dcd2c84a08d95cecd4a67466
|
3b26ab6bc88a47dfef383d4937558e4bd44da506
|
/man/lookAtPair.Rd
|
144c50942944365dde3de7b472731c1b58fc2522
|
[
"MIT"
] |
permissive
|
SMBaylis/fishSim
|
affafad3915dad24057895d1b0708bc53dd206bd
|
2f98c4545780d4d42f63dd169fb9902c61d0c614
|
refs/heads/master
| 2021-08-02T18:07:06.651542
| 2021-07-23T06:17:11
| 2021-07-23T06:17:11
| 144,930,871
| 3
| 2
|
MIT
| 2021-02-15T01:28:04
| 2018-08-16T03:17:48
|
R
|
UTF-8
|
R
| false
| true
| 1,421
|
rd
|
lookAtPair.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fishSim_dev.R
\name{lookAtPair}
\alias{lookAtPair}
\title{show a readable relationship summary for a pair of relatives}
\usage{
lookAtPair(pair)
}
\arguments{
\item{pair}{a data.frame object, with structure identical to a row from findRelatives()}
}
\description{
lookAtPair takes a single row from the output of findRelatives(), and returns a 7-by-7
matrix (as data.frame) showing the number of shared ancestors for each relationship class.
The lookAtPair output is symmetric about the diagonal, and is particularly useful for
displaying departures from expected relationship structures. For instance, the output
> lookAtPair(pair)
X1 X2 X3 X4 X5 X6 X7
1 . . 1 . . . .
2 . . . 2 . . .
3 1 . . . 4 . .
4 . 2 . . . 8 .
5 . . 4 . . 1 16
6 . . . 8 1 . 3
7 . . . . 16 3 .
shows a pair where one individual is the other's grandparent (shown by the 1 in [1,X3], and
doubling series proceeding diagonally down from that point),
further related via shared great-great-grandparent / great-great-great-grandparent
(the 1 in [5,X6], and doubling series proceeding diagonally from that point), and a shared
great-great-great-grandparent / great-great-great-great-grandparent (the 3 in [6,X7], where
we would otherwise expect a 2 from the previous shared ancestor).
}
\seealso{
[fishSim::findRelatives()]
}
|
d5f34fc48e4876097d195726cc12594c4cfef2b2
|
ed53601b1b157a91398fc862cc0c1015bc115d44
|
/R/Interplot_plot.R
|
bcdd2f5a4c262e5667073038c324a3bbeff7251b
|
[] |
no_license
|
cran/interplot
|
ba1ff1b62f0e7bc83a6f504d5a109072e2d3f490
|
44eb5f233740c6b52cb328291e611803f489fd7f
|
refs/heads/master
| 2021-07-20T00:35:20.610136
| 2021-02-18T05:20:07
| 2021-02-18T05:20:07
| 38,122,196
| 2
| 4
| null | null | null | null |
UTF-8
|
R
| false
| false
| 13,156
|
r
|
Interplot_plot.R
|
#' Plot Conditional Coefficients in Models with Interaction Terms
#'
#' Graph based on the data frame of statistics about the conditional effect of an interaction.
#'
#' @param m A model object including an interaction term, or, alternately, a data frame recording conditional coefficients. This data frame should includes four columns:
#' \itemize{
#' \item fake: The sequence of \code{var1} (the item whose effect will be conditioned on in the interaction);
#' \item coef1: The point estimates of the coefficient of \code{var1} at each break point.
#' \item ub: The upper bound of the simulated 95\% CI.
#' \item lb: The lower bound of the simulated 95\% CI.
#' }
#' @param var1 The name (as a string) of the variable of interest in the interaction term; its conditional coefficient estimates will be plotted.
#' @param var2 The name (as a string) of the other variable in the interaction term.
#' @param plot A logical value indicating whether the output is a plot or a dataframe including the conditional coefficient estimates of var1, their upper and lower bounds, and the corresponding values of var2.
#' @param steps Desired length of the sequence. A non-negative number, which for seq and seq.int will be rounded up if fractional. The default is 100 or the unique categories in the \code{var2} (when it is less than 100. Also see \code{\link{unique}}).
#' @param ci is a numeric value inherited from the data wrangling functions in this package. Adding it here is just for the method consistency.
#' @param adjCI Succeeded from the data management functions in `interplot` package.
#' @param hist A logical value indicating if there is a histogram of `var2` added at the bottom of the conditional effect plot.
#' @param var2_dt A numerical value indicating the frequency distribution of `var2`. It is only used when `hist == TRUE`. When the object is a model, the default is the distribution of `var2` of the model.
#' @param predPro A logical value with default of `FALSE`. When the `m` is an output of a general linear model (class `glm` or `glmerMod`) and the argument is set to `TRUE`, the function will plot predicted probabilities at the values given by `var2_vals`.
#' @param var2_vals A numerical value indicating the values the predicted probabilities are estimated, when `predPro` is `TRUE`.
#' @param point A logical value determining the format of plot. By default, the function produces a line plot when var2 takes on ten or more distinct values and a point (dot-and-whisker) plot otherwise; option TRUE forces a point plot.
#' @param sims Number of independent simulation draws used to calculate upper and lower bounds of coefficient estimates: lower values run faster; higher values produce smoother curves.
#' @param xmin A numerical value indicating the minimum value shown of x shown in the graph. Rarely used.
#' @param xmax A numerical value indicating the maximum value shown of x shown in the graph. Rarely used.
#' @param ercolor A character value indicating the outline color of the whisker or ribbon.
#' @param esize A numerical value indicating the size of the whisker or ribbon.
#' @param ralpha A numerical value indicating the transparency of the ribbon.
#' @param rfill A character value indicating the filling color of the ribbon.
#' @param ... Other ggplot aesthetics arguments for points in the dot-whisker plot or lines in the line-ribbon plots. Not currently used.
#' @param stats_cp A character value indicating what statistics to present as the plot note. Three options are available: "none", "ci", and "ks". The default is "none". See the Details for more information.
#' @param txt_caption A character string to add a note for the plot, a value will sending to \code{ggplot2::labs(caption = txt_caption))}.
#' @param ci_diff A numerical vector with a pair of values indicating the confidence intervals of the difference between \code{var1} and \code{var2}.
#' @param ks_diff A \code{ks.test} object of the effect of \code{var1} conditioned on \code{var2}.
#'
#' @details \code{interplot.plot} is a S3 method from the \code{interplot}. It generates plots of conditional coefficients.
#'
#' Because the output function is based on \code{\link[ggplot2]{ggplot}}, any additional arguments and layers supported by \code{ggplot2} can be added with the \code{+}.
#'
#' \code{interplot} visualizes the conditional effect based on simulated marginal effects. The simulation provides a probabilistic distribution of moderation effect of the conditioning variable (\code{var2}) at every preset values (including the minimum and maximum values) of the conditioned variable (\code{var1}), denoted as Emin and Emax. This output allows the function to further examine the conditional effect statistically in two ways. One is to examine if the distribution of \eqn{Emax - Emin} covers zero. The other is to directly compare Emin and Emax through statistical tools for distributional comparisons. Users can choose either method by setting the argument \code{stats_cp} to "ci" or "ks".
#' \itemize{
#' \item "ci" provides the confidence interval of the difference of \eqn{Emax - Emin}. An interval including 0 suggests no statistical difference before and after the conditional effect is applied, and vise versa.
#' \item "ks" presents the result of a two-sample Kolmogorov-Smirnov test of the simulated distributions of Emin and Emax. The output includes a D statistics and a p-value of the null hypothesis that the two distributions come from the same distribution at the 0.05 level.
#' }
#'
#' See an illustration in the package vignette.
#'
#' @return The function returns a \code{ggplot} object.
#'
#' @import ggplot2
#' @importFrom graphics hist
#' @importFrom dplyr mutate
#'
#'
#' @export
## S3 method for class 'data.frame'
interplot.plot <- function(m, var1 = NULL, var2 = NULL, plot = TRUE, steps = NULL, ci = .95, adjCI = FALSE, hist = FALSE, var2_dt = NULL, predPro = FALSE, var2_vals = NULL, point = FALSE, sims = 5000, xmin = NA, xmax = NA, ercolor = NA, esize = 0.5, ralpha = 0.5, rfill = "grey70", stats_cp = "none", txt_caption = NULL, ci_diff = NULL, ks_diff = NULL, ...) {
if(is.null(steps)) steps <- nrow(m)
levels <- sort(unique(m$fake))
ymin <- ymax <- vector() # to deal with the "no visible binding for global variable" issue
xdiff <- vector() # to deal with the "no visible binding for global variable" issue
test_cp <- vector() # to deal with the "no visible binding for global variable" issue
if(predPro == TRUE){
if(is.null(m$value)) stop("The input data.frame does not include required information.")
}
if (hist == FALSE) {
if (steps < 5 | point == T) {
if (is.na(ercolor)) ercolor <- "black" # ensure whisker can be drawn
if(predPro == TRUE){
coef.plot <- ggplot(m, aes_string(x = "fake", y = "coef1", colour = "value")) + geom_point(...) + geom_errorbar(aes_string(ymin = "lb", ymax = "ub", colour = "value"), width = 0, size = esize) + scale_x_continuous(breaks = levels) + ylab(NULL) + xlab(NULL)
}else{
coef.plot <- ggplot(m, aes_string(x = "fake", y = "coef1")) + geom_point(...) + geom_errorbar(aes_string(ymin = "lb", ymax = "ub"), width = 0, color = ercolor, size = esize) + scale_x_continuous(breaks = levels) + ylab(NULL) + xlab(NULL)
}
} else {
if(predPro == TRUE){
coef.plot <- ggplot(m, aes_string(x = "fake", y = "coef1", colour = "value")) + geom_line(...) + geom_ribbon(aes_string(ymin = "lb", ymax = "ub", fill = "value"), alpha = ralpha) + ylab(NULL) + xlab(NULL)
}else{
coef.plot <- ggplot(m, aes_string(x = "fake", y = "coef1")) + geom_line(...) + geom_ribbon(aes_string(ymin = "lb", ymax = "ub"), alpha = ralpha, color = ercolor, fill = rfill) + ylab(NULL) + xlab(NULL)
}
}
if(stats_cp == "ci"){
test_cp <- paste0("CI(Max - Min): [", round(ci_diff[1], digits = 3), ", ", round(ci_diff[2], digits = 3), "]")
}else if(stats_cp == "ks"){
test_cp <- paste0("D(KS): ", ks_diff$statistic, "(p-value = ", format(round(ks_diff$p.value, digits = 3), nsmall = 3), ")")
}
coef.plot <- coef.plot +
labs(caption = paste0(test_cp, txt_caption))
return(coef.plot)
} else {
if (point == T) {
if (is.na(ercolor)) ercolor <- "black" # ensure whisker can be drawn
yrange <- c(m$ub, m$lb, var2_dt)
maxdiff <- (max(yrange) - min(yrange))
break_var2 <- steps + 1
if (break_var2 >= 100)
break_var2 <- 100
hist.out <- hist(var2_dt, breaks = seq(min(var2_dt), max(var2_dt), l = break_var2), plot = FALSE)
n.hist <- length(hist.out$mids)
if (steps <10) {
dist <- (hist.out$mids[2] - hist.out$mids[1])/3
} else {
dist <- hist.out$mids[2] - hist.out$mids[1]
}
hist.max <- max(hist.out$counts)
if (steps <10) {
histX <- data.frame(ymin = rep(min(yrange) - maxdiff/5, n.hist),
ymax = hist.out$counts/hist.max * maxdiff/5 + min(yrange) - maxdiff/5,
xmin = sort(unique(var2_dt)) - dist/2,
xmax = sort(unique(var2_dt)) + dist/2)
} else {
histX <- data.frame(ymin = rep(min(yrange) - maxdiff/5, n.hist),
ymax = hist.out$counts/hist.max * maxdiff/5 + min(yrange) - maxdiff/5,
xmin = hist.out$mids - dist/2,
xmax = hist.out$mids + dist/2)
}
#when up to 10, the sort(unique(var2_dt)) - dist/2 leads to problemtic histogram
if (steps <10) {
histX_sub <- histX
} else {
histX_sub <- mutate(histX, xdiff = xmax - xmin, xmax = xmax - xdiff/2)
}
coef.plot <- ggplot()
coef.plot <- coef.plot + geom_rect(data = histX, aes(xmin = xmin, xmax = xmax, ymin = ymin, ymax = ymax), colour = "gray50", alpha = 0, size = 0.5) #histgram
if(predPro == TRUE){
coef.plot <- coef.plot + geom_errorbar(data = m, aes_string(x = "fake", ymin = "lb", ymax = "ub", colour = "value"), width = 0, size = esize) + scale_x_continuous(breaks = levels) + ylab(NULL) + xlab(NULL) + geom_point(data = m, aes_string(x = "fake", y = "coef1", colour = "value"))
}else{
coef.plot <- coef.plot + geom_errorbar(data = m, aes_string(x = "fake", ymin = "lb", ymax = "ub"), width = 0, color = ercolor, size = esize) + scale_x_continuous(breaks = levels) + ylab(NULL) + xlab(NULL) + geom_point(data = m, aes_string(x = "fake", y = "coef1"))
}
} else {
yrange <- c(m$ub, m$lb)
maxdiff <- (max(yrange) - min(yrange))
break_var2 <- length(unique(var2_dt))
if (break_var2 >= 100)
break_var2 <- 100
hist.out <- hist(var2_dt, breaks = break_var2, plot = FALSE)
n.hist <- length(hist.out$mids)
dist <- hist.out$mids[2] - hist.out$mids[1]
hist.max <- max(hist.out$counts)
histX <- data.frame(ymin = rep(min(yrange) - maxdiff/5, n.hist), ymax = hist.out$counts/hist.max * maxdiff/5 + min(yrange) - maxdiff/5, xmin = hist.out$mids - dist/2, xmax = hist.out$mids + dist/2)
coef.plot <- ggplot()
coef.plot <- coef.plot + geom_rect(data = histX, aes(xmin = xmin, xmax = xmax, ymin = ymin, ymax = ymax), colour = "gray50", alpha = 0, size = 0.5)
if(predPro == TRUE){
coef.plot <- coef.plot + geom_line(data = m, aes_string(x = "fake", y = "coef1", colour = "value")) + geom_ribbon(data = m, aes_string(x = "fake", ymin = "lb", ymax = "ub", fill = "value"), alpha = ralpha) + ylab(NULL) + xlab(NULL)
}else{
coef.plot <- coef.plot + geom_line(data = m, aes_string(x = "fake", y = "coef1")) + geom_ribbon(data = m, aes_string(x = "fake", ymin = "lb", ymax = "ub"), alpha = ralpha, color = ercolor, fill = rfill) + ylab(NULL) + xlab(NULL)
}
}
if(stats_cp == "ci"){
test_cp <- paste0("CI(Max - Min): [", round(ci_diff[1], digits = 3), ", ", round(ci_diff[2], digits = 3), "]")
}else if(stats_cp == "ks"){
test_cp <- paste0("D(KS): ", ks_diff$statistic, "(p-value = ", format(round(ks_diff$p.value, digits = 3), nsmall = 3), ")")
}
coef.plot <- coef.plot +
labs(caption = paste0(test_cp, txt_caption))
return(coef.plot)
}
}
|
fd11412635fda170bca782ad04f620ce924e94d8
|
397182bd2f6ed4e39dcc3f3d62e89807af03ac5a
|
/R/frontend-arcs.R
|
e53e986b08bc2a396953c7d8a3c419971c621a8c
|
[] |
no_license
|
cran/bnlearn
|
e04a869228c237067fc09d64b8c65a360dada76a
|
dda6458daf2e0b25d3e87313f35f23d8a1c440c1
|
refs/heads/master
| 2023-05-14T14:21:07.744313
| 2023-04-29T09:30:02
| 2023-04-29T09:30:02
| 17,694,839
| 58
| 42
| null | 2020-01-03T23:49:19
| 2014-03-13T04:09:21
|
C
|
UTF-8
|
R
| false
| false
| 4,861
|
r
|
frontend-arcs.R
|
# return the arcs in the graph.
arcs = function(x) {
# check x's class.
check.bn.or.fit(x)
if (is(x, "bn"))
x$arcs
else
fit2arcs(x)
}#ARCS
# rebuild the network structure using a new set fo arcs.
"arcs<-" = function(x, check.cycles = TRUE, check.illegal = TRUE, debug = FALSE,
value) {
# check x's class.
check.bn(x)
# a set of arcs is needed.
if (missing(value))
stop("no arc specified.")
# check logical arguments.
check.logical(check.cycles)
check.logical(check.illegal)
check.logical(debug)
# sanitize the set of arcs.
value = check.arcs(value, nodes = names(x$nodes))
# check whether the the graph contains directed cycles.
if (check.cycles)
if (!is.acyclic(nodes = names(x$nodes), arcs = value, debug = debug,
directed = TRUE))
stop("the specified network contains cycles.")
# check whether any arc is illegal.
if (check.illegal) {
illegal = which.listed(value, x$learning$illegal)
if (any(illegal)) {
illegal = apply(value[illegal, , drop = FALSE], 1,
function(x) { paste(" (", x[1], ", ", x[2], ")", sep = "") })
stop("the following arcs are not valid due to the parametric assumptions of the network:",
illegal, ".")
}#THEN
}#THEN
# update the arcs of the network.
x$arcs = value
# update the network structure.
x$nodes = cache.structure(names(x$nodes), arcs = x$arcs, debug = debug)
return(x)
}#ARCS<-
# return the directed arcs in the graph.
directed.arcs = function(x) {
# check x's class.
check.bn.or.fit(x)
if (is(x, "bn"))
x$arcs[which.directed(x$arcs, names(x$nodes)), , drop = FALSE]
else
fit2arcs(x)
}#DIRECTED.ARCS
# return the undirected arcs in the graph.
undirected.arcs = function(x) {
# check x's class.
check.bn.or.fit(x)
if (is(x, "bn"))
x$arcs[which.undirected(x$arcs, names(x$nodes)), , drop = FALSE]
else
matrix(character(0), nrow = 0, ncol = 2,
dimnames = list(NULL, c("from", "to")))
}#UNDIRECTED.ARCS
# return the arcs pointing to a particular node.
incoming.arcs = function(x, node) {
# check x's class.
check.bn.or.fit(x)
# a valid node is needed.
check.nodes(nodes = node, graph = x, max.nodes = 1)
arcs = directed.arcs(x)
arcs[arcs[, "to"] == node, , drop = FALSE]
}#INCOMING.ARCS
# return the arcs originating from a particular node.
outgoing.arcs = function(x, node) {
# check x's class.
check.bn.or.fit(x)
# a valid node is needed.
check.nodes(nodes = node, graph = x, max.nodes = 1)
arcs = directed.arcs(x)
arcs[arcs[, "from"] == node, , drop = FALSE]
}#OUTGOING.ARCS
# return the arcs incident on a particular node.
incident.arcs = function(x, node) {
# check x's class.
check.bn.or.fit(x)
# a valid node is needed.
check.nodes(nodes = node, graph = x, max.nodes = 1)
arcs = arcs(x)
arcs[(arcs[, "from"] == node) | (arcs[, "to"] == node), , drop = FALSE]
}#INCIDENT.ARCS
# return compelled arcs.
compelled.arcs = function(x) {
# check x's class.
check.bn.or.fit(x)
if (is(x, "bn.fit"))
x = cpdag(bn.net(x))
else
x = cpdag(x)
return(directed.arcs(x))
}#COMPELLED.ARCS
# return reversible arcs.
reversible.arcs = function(x) {
# check x's class.
check.bn.or.fit(x)
if (is(x, "bn.fit"))
cp = cpdag(bn.net(x))
else
cp = cpdag(x)
return(x$arcs[which.listed(x$arcs, undirected.arcs(cp)), ])
}#REVERSIBLE.ARCS
# return the number of arcs in the graph.
narcs = function(x) {
# check x's class.
check.bn.or.fit(x)
narcs.backend(x)
}#NARCS
# set an arc direction manually.
set.arc = function(x, from, to, check.cycles = TRUE, check.illegal = TRUE,
debug = FALSE) {
arc.operations(x = x, from = from, to = to, op = "set",
check.cycles = check.cycles, check.illegal = check.illegal, debug = debug)
}#SET.ARC
# remove an arc from the graph.
drop.arc = function(x, from, to, debug = FALSE) {
arc.operations(x = x, from = from, to = to, op = "drop",
check.cycles = FALSE, check.illegal = FALSE, debug = debug)
}#DROP.ARC
# reverse an arc in the graph.
reverse.arc = function(x, from, to, check.cycles = TRUE, check.illegal = TRUE,
debug = FALSE) {
arc.operations(x = x, from = from, to = to, op = "reverse",
check.cycles = check.cycles, check.illegal = check.illegal, debug = debug)
}#REVERSE.ARC
# set an undirected arc.
set.edge = function(x, from, to, check.cycles = TRUE, check.illegal = TRUE,
debug = FALSE) {
arc.operations(x = x, from = from, to = to, op = "seted",
check.cycles = check.cycles, check.illegal = check.illegal, debug = debug)
}#SET.EDGE
# remove an arc from the graph.
drop.edge = function(x, from, to, debug = FALSE) {
arc.operations(x = x, from = from, to = to, op = "droped",
check.cycles = FALSE, check.illegal = FALSE, debug = debug)
}#DROP.EDGE
|
a01719b86fa86617ca37ed4773beea159225d51e
|
25be27365977838a5f716ab736e01acc93b2823e
|
/run_simulation.R
|
aee56259e07912263e66f947f353c8470c920b07
|
[] |
no_license
|
MWesselkamp/DomAdapt
|
ab45e2e97f46244b876dadcd14a203f03867d31a
|
fef2c1f57c31b9aa89c77854892eac4090545560
|
refs/heads/master
| 2023-03-27T02:32:02.975281
| 2021-03-29T14:49:56
| 2021-03-29T14:49:56
| 269,294,513
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,934
|
r
|
run_simulation.R
|
#=================#
# run simulations #
#=================#
# In this file, the climate data to use and the number of simulations are specified.
# As well as the range of functions we are going to use below.
source("simulation_functions.R")
load("Rdata/fmT.Rdata")
load("Rdata/fmPAR.Rdata")
load("Rdata/fmVPD.Rdata")
load("Rdata/fmPrecip.Rdata")
load("Rdata/fmfAPAR.Rdata")
params = get_parameters()
run_sim = function(nsamples, days, params_distr, fix, pars = params, data_dir = "data/simulations/"){
pars_values = pars$Default
pars_names = c("beta", "X0", "gamma", "alpha", "chi")
sims_in = NULL
sims_out = NULL
pars_lhs = sample_parameters(pars, nsamples, params_distr)
climate_simulations = climate_simulator(days, 1)
for (sample in 1:nsamples){
if (fix!="climate"){ climate_simulations = climate_simulator(days, sample) }
if (fix !="params"){pars_values[which(as.character(pars$Name) %in% pars_names)] = pars_lhs[,sample]}
targets = matrix(unlist(get_preles_output(climate_simulations, pars_values, c("GPP"))), nrow = days, ncol=1)
features = cbind(climate_simulations, apply(as.matrix(pars_values[which(as.character(pars$Name) %in% pars_names)]), 1, function(x) rep(x, times=days)))
names(features)[10:14] = pars_names
sims_in = rbind(sims_in, features)
sims_out = rbind(sims_out, targets)
}
if (fix == "None"){
write.table(sims_in, file=paste0(data_dir, params_distr, "_params/sims_in.csv"), sep=";", row.names = FALSE)
write.table(sims_out, file=paste0(data_dir, params_distr, "_params/sims_out.csv"), sep=";", row.names = FALSE, col.names = c("GPP"))
}else{
write.table(sims_in, file=paste0(data_dir, fix, "Fix/sims_in.csv"), sep=";", row.names = FALSE)
write.table(sims_out, file=paste0(data_dir, fix, "Fix/sims_out.csv"), sep=";", row.names = FALSE, col.names = c("GPP"))
}
}
run_sim(1000, 365, "normal", "params")
|
759813e7d8b74188f64e5ddfced483c71ee7e7ac
|
ef3eb2afb71993519415ebd9919dc3854b22ae37
|
/results/figure_4/format.R
|
02b22f5e73ad00647aaa954497505db72889531f
|
[] |
no_license
|
matthewkling/winds-of-change
|
7a0fd897dcdebf2f3e566f1388393bfcad47a993
|
beb690268b810f2c7f666fb22a06b89c6ac111fe
|
refs/heads/master
| 2020-07-23T02:13:57.345121
| 2020-05-27T18:22:35
| 2020-05-27T18:22:35
| 267,161,663
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 747
|
r
|
format.R
|
# reformat the raw dataset, which is too large for github, contains many extraneous variables, and has obtuse variable names
library(tidyverse)
d <- read_csv("results/figure_4/p1_wnd10m_250km_inv.csv") %>%
select(x, y,
wind_fwd_windshed_size, wind_rev_windshed_size,
clim_fwd_windshed_size, clim_rev_windshed_size,
overlap_fwd_windshed_size, overlap_rev_windshed_size) %>%
mutate_at(vars(-x, -y), signif, digits = 5) %>%
rename_at(vars(contains("windshed")), function(x) str_replace(x, "fwd_windshed_size", "outbound")) %>%
rename_at(vars(contains("windshed")), function(x) str_replace(x, "rev_windshed_size", "inbound")) %>%
write_csv("results/figure_4/windscape_data.csv")
|
b7a397e1af25f18d4ec7d08e9b3c7b2a2d6364b9
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/spam/examples/lu.tri.Rd.R
|
838c3d0cdd6d0dfc6369160679d7eda12c262b99
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 342
|
r
|
lu.tri.Rd.R
|
library(spam)
### Name: lower.tri
### Title: Lower and Upper Triangular Part of a Sparse Matrix
### Aliases: lower.tri upper.tri lower.tri.spam upper.tri.spam
### Keywords: array algebra
### ** Examples
smat <- spam( c( 1,2,0,3,0,0,0,4,5),3)
upper.tri( smat)
upper.tri( smat, diag=TRUE)
options(spam.trivalues=TRUE)
upper.tri( smat)
|
4260d5a1ff01ecbbb549513d73c69b189cb3440e
|
8fdfd167bc1cee813ee5d49c6954f3ceed666529
|
/NADIA_package/NADIA/R/PipeOpSample_B.R
|
f43fa2cc2992b512d613faa3ea1772d1456f8fa2
|
[] |
no_license
|
jjanborowka/EMMA
|
53db6273b09d215721dcef9893c0917b7f4a37d9
|
e93e1f4056e80e9ac06b54c9c12a057b2f63efc4
|
refs/heads/master
| 2023-02-04T10:37:02.366643
| 2020-12-22T10:04:42
| 2020-12-22T10:04:42
| 282,159,979
| 0
| 0
| null | 2020-11-26T13:35:48
| 2020-07-24T08:02:16
|
HTML
|
UTF-8
|
R
| false
| false
| 2,415
|
r
|
PipeOpSample_B.R
|
#' @title PipeOpSample_B
#'
#' @name PipeOpSample_B
#'
#' @description
#' Impute features by sampling from non-missing data in approach B (independently during the training and prediction phase).
#'
#' @section Input and Output Channels:
#' Input and output channels are inherited from \code{\link{PipeOpImpute}}.
#'
#' @section Parameters:
#' The parameters include inherited from [`PipeOpImpute`], as well as: \cr
#' \itemize{
#' \item \code{id} :: \code{character(1)}\cr
#' Identifier of resulting object, default `"impute_sample_B"`.
#' }
#' @importFrom data.table .N
#' @examples
#' {
#' graph <- PipeOpSample_B$new() %>>% mlr3learners::LearnerClassifGlmnet$new()
#' graph_learner <- GraphLearner$new(graph)
#'
#' # Task with NA
#' set.seed(1)
#' resample(tsk("pima"), graph_learner, rsmp("cv", folds = 3))
#' }
#' @export
PipeOpSample_B = R6::R6Class("Sample_B_imputation",
inherit = PipeOpImpute,
public = list(
initialize = function(id = "impute_sample_B", param_vals = list()) {
super$initialize(id, param_vals = param_vals, packages = c("stats", "data.table"), feature_types = c("factor", "integer", "logical", "numeric", "ordered"))
}
),
private = list(
.train_imputer = function(feature, type, context) {
NULL
},
.impute = function(feature, type, model, context) {
train_model <- function(feature, type, context) {
fvals = feature[!is.na(feature)]
if (length(fvals) < 10) { # don't bother with table if vector is short
return(fvals)
}
tab <- data.table::data.table(fvals)[, .N, by = "fvals"]
if (nrow(tab) > length(fvals) / 2) {
# memory usage of count table is larger than memory usage of just the values
return(fvals)
}
model <- tab$fvals
attr(model, "probabilities") <- tab$N / sum(tab$N)
model
}
model <- train_model(feature, type, context)
if (type %in% c("factor", "ordered")) {
# in some edge cases there may be levels during training that are missing during predict.
levels(feature) = c(levels(feature), as.character(model))
}
if (length(model) == 1) {
feature[is.na(feature)] = model
} else {
outlen = sum(is.na(feature))
feature[is.na(feature)] = sample(model, outlen, replace = TRUE, prob = attr(model, "probabilities"))
}
feature
}
)
)
|
9936c811706f6d125b82666152bd77d03f946f89
|
e0369cc936a3ee597ebee96c1eee92fb27f2ed65
|
/WorkOnVariousDatasets/Automating Reviews in Medicine/trials.R
|
8dc62b909398e773e660a854a13296d77490546d
|
[] |
no_license
|
vihangp/MachineLearning
|
0bc6cdbd0cbc75cdebdd2e2a8595925f745f0320
|
8c14a7daed9fed2c336e82a167425be89cf63d20
|
refs/heads/master
| 2021-06-11T04:43:39.024470
| 2017-01-29T17:36:34
| 2017-01-29T17:36:34
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,318
|
r
|
trials.R
|
#1.1
trials = read.csv("clinical_trial.csv", stringsAsFactors = FALSE)
max(nchar(trials$abstract))
#1.2
nrow(trials[nchar(trials$abstract) == 0,])
#1.3
trials[which.min(nchar(trials$title)),'title']
#2.1
corpusTitle = Corpus(VectorSource(trials$title))
corpusTitle = tm_map(corpusTitle, tolower)
corpusTitle = tm_map(corpusTitle, PlainTextDocument)
corpusTitle = tm_map(corpusTitle, removePunctuation)
corpusTitle = tm_map(corpusTitle, removeWords, stopwords("english"))
corpusTitle = tm_map(corpusTitle, stemDocument)
dtmTitle = DocumentTermMatrix(corpusTitle)
dtmTitle = removeSparseTerms(dtmTitle, 0.95)
dtmTitleFrame = as.data.frame(as.matrix(dtmTitle))
corpusAbstract = Corpus(VectorSource(trials$abstract))
corpusAbstract = tm_map(corpusAbstract, tolower)
corpusAbstract = tm_map(corpusAbstract, PlainTextDocument)
corpusAbstract = tm_map(corpusAbstract, removePunctuation)
corpusAbstract = tm_map(corpusAbstract, removeWords, stopwords("english"))
corpusAbstract = tm_map(corpusAbstract, stemDocument)
dtmTAbstract = DocumentTermMatrix(corpusAbstract)
dtmTAbstract = removeSparseTerms(dtmTAbstract, 0.95)
dtmAbstractFrame = as.data.frame(as.matrix(dtmTAbstract))
#2.3
sort(colSums(dtmAbstractFrame))
#3.1
colnames(dtmTitleFrame) = paste0("T", colnames(dtmTitleFrame))
colnames(dtmAbstractFrame) = paste0("A", colnames(dtmAbstractFrame))
#3.2
dtm = cbind(dtmTitleFrame, dtmAbstractFrame)
dtm$trial = trials$trial
#3.3
library(caTools)
set.seed(144)
split = sample.split(dtm$trial, SplitRatio = 0.7)
train = subset(dtm, split==TRUE)
test = subset(dtm, split==FALSE)
table(dtm$trial)
baseline_accuracy = 1043/nrow(dtm)
#3.4
library(rpart)
library(rpart.plot)
trialCART = rpart(trial ~ ., data=train, method="class")
prp(trialCART)
#3.5
predictions = predict(trialCART)
max(predictions[,2])
#3.7
predictions = predict(trialCART, type = "class")
table(train$trial,predictions)
accuracy = (631+441)/nrow(train)
sensitivity = 441/(441+131)
specificity = 631/(631+99)
#4.1
predictions = predict(trialCART, newdata = test, type = "class")
table(test$trial,predictions)
accuracy = (261+162)/nrow(test)
#4.2
library(ROCR)
predictions = predict(trialCART, newdata = test, type = "response")
auc = performance(prediction(predictions[,2], labels = test$trial), measure = 'auc')
auc <- as.numeric(auc@y.values)
|
f374afbe2d7d210db408fc628988745cc61bb4a3
|
73a165c11149f8ad0a3db8e4d7a36311485cee33
|
/Scripty.R
|
ad3d130d3201807860c27f311385a21957e88d52
|
[
"MIT"
] |
permissive
|
christosto/FirstRepo
|
ab8c9e5aa2c15d03212c9a99e638d70d83d28e28
|
840b769ab0c2639fdd9de48b375e6eca0286b10f
|
refs/heads/master
| 2020-07-09T03:55:13.532868
| 2019-08-29T20:37:07
| 2019-08-29T20:37:07
| 203,869,024
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 42
|
r
|
Scripty.R
|
#sample script
A <- 1 + 1
B <- 1: 10
3*A
|
68a6be939b5c3ca7300694b7a7b84e36e9e7ab1b
|
9796ce6157e5f00095da4c84c12f2f551b6b8a69
|
/tests/testthat.R
|
4f402c4c14500df4181f7b606d654282f953397f
|
[] |
no_license
|
Suhani8884/Lab3A
|
0c0a35df81b6ac38cfd471fd7e13ce7f0900c454
|
7c5c17dcf2786bb793707ff2ef1bbccdb4ef3a98
|
refs/heads/master
| 2022-12-14T03:07:43.070389
| 2020-09-13T20:53:27
| 2020-09-13T20:53:27
| 294,890,841
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 54
|
r
|
testthat.R
|
library(testthat)
library(Lab3A)
test_check("Lab3A")
|
23c5715299c5ad95a490f274e8ad5fbf224b0d9d
|
ab918939f5de4246046ef24bc313e9e602d24792
|
/trunk/oncotcap/sortOntologyFiles.R
|
46e4d3bc7239bea14730520083c64c97912bee5f
|
[] |
no_license
|
professorbeautiful/oncotcap
|
3c3006ff9d9da5283ba8e77378dcaf74b7fb9e3f
|
5025b8b921f26cd74076eb26241c7041a7bc8d34
|
refs/heads/master
| 2021-01-21T18:10:37.083536
| 2014-02-19T15:59:26
| 2014-02-19T15:59:26
| 15,146,293
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,627
|
r
|
sortOntologyFiles.R
|
# USAGE: Rscript --vanilla sortOntologyFiles.R [FileChoice] [FolderChoice]
# Typically this will be run after saving the ontology, twice:
# Rscript --vanilla sortOntologyFiles.R pins .
# Rscript --vanilla sortOntologyFiles.R pont .
# in the directory "." where the files were saved.
# The unsorted files are backed up, then overwritten with the sorted versions.
#options(error=recover)
#folder = "~/Dropbox/OncoTCap/oncotcap/TcapData-demos"
#folder = "~/Dropbox/OncoTCap/oncotcap/TcapData-Roger"
folder = "TcapData"
filename = "oncotcap.pins"
if(!interactive()) {
args = commandArgs(trailingOnly=T) ### AFTER "--args"
cat("args are ", args, "\n")
if(length(args) == 0) {
cat("No arguments; folder and filename set to defaults",
filename, ", ", folder, ".\n")
}
else {
FileChoice = args[1]
filename = switch(FileChoice,
pins="oncotcap.pins",
pont="oncotcap.pont",
FileChoice)
if(filename=="no file named") {
stop("Error: file name ", FileChoice, " is unrecognized. Exiting\n")
}
if(length(args) >= 2) {
FolderChoice = args[2]
if(file.exists(FolderChoice) )
if(file.info(FolderChoice)$isdir ) {
folder = FolderChoice
} else stop("folder ", FolderChoice, " is not actually a folder")
else
stop("folder ", FolderChoice, " does not exist")
}
}
}
fileIn = paste0(folder, "/", filename)
#fileOut = paste0(fileIn, "_sorted")
fileOut = fileIn ## We need to overwrite the input file.
cat("AFTER NONINTERACTIVE PROCESSING, filename is ", filename,
" folder is ", folder, " fileIn is ", fileIn,
" fileOut is ", fileOut,"\n")
######
# first make a backup.
system(paste0("cp ", fileIn, " ", fileIn, "_unsorted" ))
protege=readLines(f<-file(fileIn))
close(f)
cat( "length of protege file is ", length(protege), " lines.\n")
head(protege)
# ntabs = attr(regexpr("\t*", gsub(";\\+","",protege)), "match.length")
# table(ntabs)
# ntabs.rle = rle(ntabs)
# ntabs.cumLengths = c(1, 1+cumsum(ntabs.rle$lengths[-length(ntabs.rle$lengths)]))
# ntabs.cumLengths.end = c(ntabs.cumLengths[-1]-1, length(protege))
# rbind(start=(ntabs.cumLengths), end=(ntabs.cumLengths.end),
# depth=(ntabs.rle$values)) [ , 1:6 ]
#' separateWithinTabGroup
#' @param lines Input lines (character vector)
#' @param tablevel The depth number.
#' @param pr Print detail?
#' @param doSort Sort inside?
#' @return Broken into a list according to depth.
separateWithinTabGroup = function(lines, tablevel, pr=FALSE, doSort=FALSE){
ntabs = attr(regexpr("\t*", gsub(";\\+","",lines)), "match.length")
# ntabs.rle = rle(ntabs)
pos.0 = which(ntabs==tablevel)
pos.0.end = c(pos.0[-1]-1, length(ntabs))
if(length(pos.0)!=length(pos.0.end))
browser("OOPS")
top = min(20, length(pos.0))
if(pr) print(rbind(start=pos.0, end=pos.0.end)[ , 1:top])
if(pr) print(rbind(start=rev(pos.0), end=rev(pos.0.end))[ , 1:top])
result = lapply(1:length(pos.0),
function(p) {
if(pos.0[p]==pos.0.end[p]) result = (NULL)
else {
result = (lines[(pos.0[p]+1):pos.0.end[p]])
if(doSort) result=sort(result)
}
c(lines[pos.0[p]], result)
})
names(result) = lines[pos.0]
result = result[order(names(result))]
result
}
if(1==length(grep("pont", filename))){ # "pont" file.
### We have to sort ONLY in groups of single-slot and multislot,
## not at the top level (which is NOT written in stochastic order,
## but in a meaningful order),
## and not at the third level, which must be fixed also.
protegeHeader = protege[(1:6)]
protegeFixed = protege[-(1:6)]
defclass = grep("^\\(defclass", protegeFixed)
isa = grep("^\t\\(is-a", protegeFixed)
role = grep("^\t\\(role", protegeFixed)
table(isa - defclass) #all = 1. The first line is always "isa".
table(role - defclass) #all = 2. The second line is always "role".
## Therefore the first and second lines must not be included in the sort.
ranges = rbind(defclass+3, c(defclass[-1]-2, length(protegeFixed)))
## The line number ranges over which the sorting will take place.
## The first row is the starting line, the second row is the ending line.
for(r in 1:ncol(ranges)) {
if( ranges[1,r] < ranges[2,r] ) {
fixThis = protegeFixed[ ranges[1,r]:ranges[2,r]]
fixThis[length(fixThis)] = gsub(")$", "", fixThis[length(fixThis)])
# handling the completing parenthesis. Remove before sorting.
separatedSorted = separateWithinTabGroup(fixThis, 1)
# Separate (into a list) and sort at tab level 1.
unListed = unlist(separatedSorted, recursive=TRUE, use.names=FALSE)
# Now we re-assemble as a character vector.
unListed[length(unListed)] = paste0(unListed[length(unListed)], ")")
# handling the completing parenthesis. Replacing after sorting.
protegeFixed[ ranges[1,r]:ranges[2,r]] = unListed
# Replace the chunk to be fixed.
}
}
## No sorting is done at level 3.
}
if(1==length(grep("pins", filename))){ #### "pins" file:
protegeHeader = protege[(1:5)]
protegeFixed = protege[-(1:5)]
instanceStarts = grep("^\\(", protegeFixed)
# grep '^(' TcapData/*pins | cut -c1-2 | sort | uniq -c
# 27244 instances from this unix pipeline
ranges = rbind(instanceStarts,
c(instanceStarts[-1]-2, length(protegeFixed)))
# The ranges of chunks to be processed: each instance.
# The second row is the end of the range.
for(r in 1:ncol(ranges)) {
if( ranges[1,r] < ranges[2,r] ) {
fixThis = protegeFixed[ (ranges[1,r]+1):ranges[2,r]]
fixThis[length(fixThis)] = gsub(")$", "", fixThis[length(fixThis)])
## Handle the closing parenthesis. Remove it for now.
separatedSorted = separateWithinTabGroup(fixThis, 1, doSort=F)
## Within this chunk, separate the tab groups. A new group starts with a line with one tab.
separatedSorted = separatedSorted[order(names(separatedSorted))]
## Sort the one-tab groups.
unListed = unlist(separatedSorted, recursive=TRUE, use.names=FALSE)
## Turn it back into a vector of character strings with unlist().
unListed[length(unListed)] = paste0(unListed[length(unListed)], ")")
## Handle the closing parenthesis. Replace it .
protegeFixed[ (ranges[1,r]+1):ranges[2,r]] = unListed
## Replace the fixed vectors.
}
}
}
writeLines(c(protegeHeader, protegeFixed),
f<-file(fileOut))
close(f)
|
191c524d56a44b42157e244fd1d585d4526532d8
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/HDtest/examples/wntest.Rd.R
|
756e8c8674624b279708cc53b409676e82499937
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 533
|
r
|
wntest.Rd.R
|
library(HDtest)
### Name: wntest
### Title: Testing for multivariate or high dimensional white noise
### Aliases: wntest
### ** Examples
library(expm)
p = 15
n = 300
S1 = diag(1, p, p)
for(ii in c(1:p)){
for(jj in c(1:p)){
S1[ii, jj] = 0.995^(abs(ii-jj))
}
}
S11 = sqrtm(S1)
X = S11 %*% matrix(rt(n*p, df = 8), ncol = n)
k_max = 10
kk = seq(2, k_max, 2)
M = 500
k0 = 10
delta = 1.5
alpha = 0.05
wntest(X, M, k_max, kk, type = 1, opt = 0)
## Not run:
##D wntest(X, M, k_max, kk, type = 1, opt = 4, cv_opt = 1)
## End(Not run)
|
0485aa8896f6360ed6e0cc2475c5f3f854c7b75e
|
c2ca52ff213c784ebb7e3e6f45ed9d148fb089cd
|
/BA/GetData/GetEmails-01.R
|
880d6bb0ecd36384d03de65c95ad6093499b2872
|
[
"MIT"
] |
permissive
|
jeklen/notes
|
93d3091ea2a14cf244004fe1a3a539018ae671d2
|
700498ce6577f83707c8d497ddef4b673b190e2a
|
refs/heads/master
| 2021-06-12T09:36:41.024345
| 2017-01-22T02:13:22
| 2017-01-22T02:13:22
| 71,848,578
| 1
| 0
| null | null | null | null |
GB18030
|
R
| false
| false
| 1,144
|
r
|
GetEmails-01.R
|
library(stringr) # for the function str_match 调入stringr package
setwd("d:/BA/GetData") #设置工作目录
con <- file("Email.txt", "r") #读文本文件Email.txt
email_list <- NULL #email_list用来保存提取出来的Email,初始化为空。
pattern <- '[[:alnum:]_.-]+@[[:alnum:]_.-]+\\.[A-Za-z]+' #Email的正则表达式。
# a simplier RE: pattern <- '[[:alnum:].-_]+@[[:alnum:].-_]+'
# However, the above simplier RE may match "ABC@AAA.Com._"
line=readLines(con,n=1) #读一行文字。
while( length(line) != 0 ) { #如果还没到文件尾部
match_emails <- unlist(str_match_all(line, pattern))
#看这一行里面有没有Email,如果有有则将结果存入match_emails这个变量中。
#str_match_all returns all matched emails.
# unlist a list to a vector variable.
if (length(match_emails)!=0)
{ email_list <- append(email_list, match_emails)
print(match_emails) }
line=readLines(con,n=1)
}
close(con) #关闭打开的Email.txt文件
paste(email_list,collapse=";") #合并email_list当中保存的emails,用分号作为分隔符。
length(email_list) #看一下提取出多少个email。
|
98107fec19c80c1602eea6c45cd04296b4a06544
|
2595ffba6f364508c7189553c5e01db01fea1292
|
/man/append_object.Rd
|
957a5b584bdfad470cff70d54586de65075bc9e2
|
[] |
no_license
|
cran/tidytidbits
|
c16685e56ef0f807226b19f85c324b5d3b6dcccb
|
629ef60315367a8923d68e7c0944c50549805d76
|
refs/heads/master
| 2022-04-30T02:44:50.836003
| 2022-03-16T21:30:06
| 2022-03-16T21:30:06
| 146,012,291
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,027
|
rd
|
append_object.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/purrr-like.R
\name{append_object}
\alias{append_object}
\title{Appending in a pipe, never unlisting}
\usage{
append_object(x, .l, name = NULL)
}
\arguments{
\item{x}{Object to append. If the object is a list, then it is appended as-is, and not unlisted.}
\item{.l}{The list to append to.
Special case handling applies if .l does not exist: then an empty list is used.
This alleviates the need for an initial mylist <- list()}
\item{name}{Will be used as name of the object in the list}
}
\value{
The list .l with x appended
}
\description{
Append to a given list, while considering as a single object and not unlisting as base::append does.
Argument order is reversed compared to base::append to allow a different pattern of use in a pipe.
}
\examples{
library(magrittr)
results <- list(first=c(3,4), second=list(5,6))
list(7,8) \%>\%
append_object(results, "third result") ->
results
# results has length 1, containing one list named "first"
}
|
357aba8c1c790ff29c0a1ebf1cc14869c53ed799
|
7e1ca878a6b0f0d17bbc81e76224b13cb70acb42
|
/Repository/Supplementary Files/CreateExampleTrajectoriesGradual.R
|
a146323c7c539ff25fd4732771c6ff00be3b106e
|
[] |
no_license
|
EvoNetHIV/Mittler-et-al-TasP-by-Age
|
6edac9ae7b94dd96099816ac6689f8a10a4b710f
|
408216556a3640968c8073a885bdfc9373c8fd26
|
refs/heads/master
| 2022-04-12T18:37:06.015449
| 2019-12-03T22:59:29
| 2019-12-03T22:59:29
| 107,707,867
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 24,934
|
r
|
CreateExampleTrajectoriesGradual.R
|
#note, graphing routine starts line 163, need to change pathways below
re_read_data <- FALSE
par(mfrow=c(3,3))
legend_text_size <- 0.65
if (re_read_data == TRUE) {
run1_title <- "'Random'"
load("H:/TasP2/Exp20_Gradual_95_care_N10000/txt_opto_Exp20_paramsGradual_95_N10000_TasP60perc__random_prop0.2.RData")
run1_1=evomodel
remove(evomodel)
load("H:/TasP2/Exp20_Gradual_95_care_N10000/txt_opto_Exp20_paramsGradual_95_N10000_TasP60perc__random_prop0.3.RData")
run1_2=evomodel
remove(evomodel)
load("H:/TasP2/Exp20_Gradual_95_care_N10000/txt_opto_Exp20_paramsGradual_95_N10000_TasP60perc__random_prop0.4.RData")
run1_3=evomodel
remove(evomodel)
run2_title <- "'Under Age 30'"
load("H:/TasP2/Exp20_Gradual_95_care_N10000/txt_opto_Exp20_paramsGradual_95_N10000_TasP60perc__under25_under30_random_prop0.2.RData")
run2_1=evomodel
remove(evomodel)
load("H:/TasP2/Exp20_Gradual_95_care_N10000/txt_opto_Exp20_paramsGradual_95_N10000_TasP60perc__under25_under30_random_prop0.3.RData")
run2_2=evomodel
remove(evomodel)
load("H:/TasP2/Exp20_Gradual_95_care_N10000/txt_opto_Exp20_paramsGradual_95_N10000_TasP60perc__under25_under30_random_prop0.4.RData")
run2_3=evomodel
remove(evomodel)
run1_1_ps <- run1_1$popsumm
run1_2_ps <- run1_2$popsumm
run1_3_ps <- run1_3$popsumm
run2_1_ps <- run2_1$popsumm
run2_2_ps <- run2_2$popsumm
run2_3_ps <- run2_3$popsumm
}
nsteps <- run1_1$param[[1]]$n_steps
nyears <- (nsteps/365)
max_infected <- 1400
max_incidence <- 2.0
max_death_rate <- 0.18
##############################
extract_evonet_data <-function(model_obj,overlay){
#plots incidence rate
#input netsim object
#output: plot of incidence rate by year
if (overlay==TRUE) graph_col = "red"
else graph_col = "black"
model <- model_obj
#model <- model_obj
total_sims <- model$param[[1]]$nsims
result_list <- vector('list',length=total_sims)
for(nsim in 1:total_sims)
{
popsumm_freq <- model$param[[nsim]]$popsumm_frequency
nsteps <- model$param[[nsim]]$n_steps
if(nsteps>=365)
steps_per_year <- floor(365/model$param[[nsim]]$popsumm_frequency)
else
steps_per_year <- floor(nsteps/model$param[[nsim]]$popsumm_frequency)
sus <- model$popsumm[[nsim]]$susceptibles
inf <- model$popsumm[[nsim]]$new_infections
nsteps <- model$param[[nsim]]$n_steps
nyears <- (nsteps/365)
if(nyears<1)
tli <- floor(seq(1,length(sus),length=2))
if(nyears>=1)
tli <- floor(seq(1,length(sus),length=floor(nyears)+1))
number_sus <- rep(NA_real_,length(tli)-1)
total_new_inf <- rep(NA_real_,length(tli)-1)
#browser()
for(ii in 1:(length(tli)-1))
{
total_new_inf[ii] <- sum(inf[(tli[ii]+1):(tli[ii+1])])
if(popsumm_freq>1){
number_sus[ii] <- mean(sus[(tli[ii]+1):(tli[ii+1])])
}
#note: this gives exact same results as above
if(popsumm_freq==1){
number_sus[ii] <- sum(sus[(tli[ii]+1):(tli[ii+1])])/365
}
}
if(sum(number_sus)==0){next}
# scalar accounts for round-off error
scalar<-365/(diff(tli)*popsumm_freq)
inc <- scalar*100*total_new_inf/number_sus
inc[number_sus==0]<-0
result_list[[nsim]] <- inc
}
result_mat <- do.call(rbind,result_list)
ymax=max(unlist(result_mat),na.rm=T)
for(jj in 1:total_sims) {
if(jj==1 && overlay==FALSE){
if(nsteps>=365){
plot((1:nyears)-20,result_list[[jj]],type='l',xlim=c(-10,25),ylim=c(0,1.6),xlab="Years before/after TasP campaingn",ylab="Incidence [rate per 100 person years]",
#xlim=c(10,nyears),
lty=2,col=graph_col,axes=F)
labseq=seq(0,nyears,by=10)-20
#labseq=labseq[-1]
axis(1,at=labseq,labels=labseq)
}
if(nsteps<365){
plot(nyears,result_list[[jj]],type='l',xlim=c(-10,25),ylim=c(0,ymax),xlab="Years before/after TasP campaingn",ylab="Incidence [rate per 100 person years]",
lty=2,col=graph_col,axes=F)
axis(1,at=1:nyears,labels=1:nyears)
}
axis(2);box()
} else {
lines((1:nyears)-20,result_list[[jj]],type='l',col=graph_col,ylim=c(0,max(inc)),lty=2)
}
}
lines((1:nyears)-20,colMeans(result_mat),type='l',lwd=2,col=graph_col)
# Now calculate mean and standard deviations from the last 10 years
mean_incid <- 0 ; mean_prev <- 0; mean_pills_taken <- 0; mean_not_prioritized_yr10 <- 0; mean_died_AIDS <- 0
for(jj in 1:total_sims)
{
last_recorded_step <- length(model$popsumm[[jj]]$prevalence)
after_rampup <- 20
mean_incid <- mean_incid + mean(result_list[[jj]][40:45])
mean_prev <- mean_prev + model$popsumm[[jj]]$prevalence[last_recorded_step]
mean_pills_taken <- mean_pills_taken + sum(model$popsumm[[jj]]$total_pills_taken[last_recorded_step])
mean_not_prioritized_yr10 <- mean_not_prioritized_yr10 + model$param[[jj]]$num_randomly_chosen_start_campaign
mean_died_AIDS <- mean_died_AIDS + sum(model$popsumm[[jj]]$aids_deaths[after_rampup:last_recorded_step])
}
mean_incid <- mean_incid/total_sims
mean_prev <- mean_prev/total_sims
mean_pills_taken <- mean_pills_taken/total_sims
mean_not_prioritized_yr10 <- mean_not_prioritized_yr10/total_sims
mean_died_AIDS <- mean_died_AIDS/total_sims
ss_incid <- 0 ; ss_prev <- 0; ss_pills_taken <- 0; ss_not_prioritized_yr10 <- 0; ss_died_AIDS <- 0
for(jj in 1:total_sims)
{
ss_incid <- ss_incid + (mean(result_list[[jj]][40:45]) - mean_incid)^2
ss_prev <- ss_prev + (model$popsumm[[jj]]$prevalence[last_recorded_step] - mean_prev)^2
ss_pills_taken <- ss_pills_taken + (model$popsumm[[jj]]$total_pills_taken[last_recorded_step] - mean_pills_taken)^2
ss_not_prioritized_yr10 <- ss_not_prioritized_yr10 + (model$param[[jj]]$num_randomly_chosen_start_campaign - mean_not_prioritized_yr10)^2
ss_died_AIDS <- ss_died_AIDS + (sum(model$popsumm[[jj]]$aids_deaths[after_rampup:last_recorded_step]) - mean_died_AIDS)^2
}
sd_incid <- sqrt(ss_incid/(total_sims-1))
sd_prev <- sqrt(ss_prev/(total_sims-1))
sd_pills_taken <- sqrt(ss_pills_taken/(total_sims-1))
sd_not_prioritized_yr10 <- sqrt(ss_not_prioritized_yr10/(total_sims-1))
sd_died_AIDS <- sqrt(ss_died_AIDS/(total_sims-1))
cat("Incidence last 10 years:",mean_incid,"(",sd_incid,")\n")
cat("Prevalence final time point:",mean_prev,"(",sd_prev,")\n")
cat("Total pills taken:",mean_pills_taken,"(",sd_pills_taken,")\n")
cat("Not prioritized year 10:",mean_not_prioritized_yr10,"(",sd_not_prioritized_yr10,")\n")
cat("Died of AIDS after ramp-up: ",mean_died_AIDS,"(",sd_died_AIDS,")\n")
yearly_inc <- 1 + model$param[[1]]$yearly_incr_tx
evonet_results <- list("proportion_treated_yr10" = model$param[[1]]$proportion_treated/( (yearly_inc^(1/365))^(365*30) ),
"proportion_treated_end" = model$param[[1]]$proportion_treated,
"under_care" = model$param[[1]]$prob_care,
"incid" = mean_incid, "incid_low" = mean_incid - sd_incid,
"incid_high" = mean_incid + sd_incid,
"prev" = mean_prev, "prev_low" = mean_prev - sd_prev,
"prev_high" = mean_prev + sd_prev,
"pills_taken" = mean_pills_taken, "pills_taken_low" = mean_pills_taken - sd_pills_taken,
"pills_taken_high" = mean_pills_taken + sd_pills_taken,
"not_prioritized_yr10" = mean_not_prioritized_yr10,
"not_prioritized_yr10_low" = mean_not_prioritized_yr10 - sd_not_prioritized_yr10,
"not_prioritized_yr10_high" = mean_not_prioritized_yr10 + sd_not_prioritized_yr10,
"died_AIDS" = mean_died_AIDS, "died_AIDS_low" = mean_died_AIDS - sd_died_AIDS,
"died_AIDS_high" = mean_died_AIDS + sd_died_AIDS
)
return(evonet_results)
}
#####################################
#par(mfrow=c(3,3))
nreps = 16
###
###
#Plot number infected and number treated for strategy # 1_1
labseq=seq(0,nyears,by=10)-20
plot(-20+run1_1_ps[[1]]$timestep/365,run1_1_ps[[1]]$total_infections_alive,xlim=c(-10,25),ylim=c(0,1700),
#xlim=c(10,45),
type="l",lwd=0.5,lty=2,xlab="Years before/after TasP campaign",ylab="Number Infected and Treated",col="black",axes=F)
axis(1,at=labseq,labels=labseq)
lines(-20+run1_1_ps[[1]]$timestep/365,run1_1_ps[[1]]$no_treated ,type="l",lwd=0.5,lty=2,col="blue")
mean_no_treated_run1_1_ps <- run1_1_ps[[1]]$no_treated/nreps
mean_infected_run1_1_ps <- run1_1_ps[[1]]$total_infections_alive/nreps
for (jj in 2:nreps) {
lines(-20+run1_1_ps[[jj]]$timestep/365,run1_1_ps[[jj]]$total_infections_alive,type="l",lwd=0.5,lty=2,col="black")
lines(-20+run1_1_ps[[jj]]$timestep/365,run1_1_ps[[jj]]$no_treated ,type="l",lwd=0.5,lty=2,col="blue")
#lines(run1_1_ps[[jj]]$timestep/365,run1_1_ps[[jj]]$no_prioritized_treated ,type="l",lwd=0.5,lty=2,col="orange")
mean_no_treated_run1_1_ps <- mean_no_treated_run1_1_ps + run1_1_ps[[jj]]$no_treated/nreps
mean_infected_run1_1_ps <- mean_infected_run1_1_ps + run1_1_ps[[jj]]$total_infections_alive/nreps
}
lines(-20+run1_1_ps[[jj]]$timestep/365,mean_infected_run1_1_ps,type="l",lwd=2,col="black")
lines(-20+run1_1_ps[[jj]]$timestep/365,mean_no_treated_run1_1_ps,type="l",lwd=2,col="blue")
axis(2); box()
#title("Slow, linear")
#Plot number infected and number treated for strategy # 2_1
lines(-20+run2_1_ps[[1]]$timestep/365,run2_1_ps[[1]]$total_infections_alive,type="l",lwd=0.5,col="red")
lines(-20+run2_1_ps[[1]]$timestep/365,run2_1_ps[[1]]$no_treated ,type="l",lwd=0.5,lty=2,col="green")
mean_no_treated_run2_1_ps <- run2_1_ps[[1]]$no_treated/nreps
mean_infected_run2_1_ps <- run2_1_ps[[1]]$total_infections_alive/nreps
mean_prioritizied_run2_1_ps <- run2_1_ps[[1]]$prioritized_tx/nreps
for (jj in 2:nreps) {
lines(-20+run2_1_ps[[jj]]$timestep/365,run2_1_ps[[jj]]$total_infections_alive,type="l",lwd=0.5,lty=2,col="red")
lines(-20+run2_1_ps[[jj]]$timestep/365,run2_1_ps[[jj]]$no_treated ,type="l",lwd=0.5,lty=2,col="green")
lines(-20+run2_1_ps[[jj]]$timestep/365,run2_1_ps[[jj]]$prioritized_tx ,type="l",lwd=0.5,lty=2,col="purple")
#lines(run2_1_ps[[jj]]$timestep/365,run2_1_ps[[jj]]$no_prioritized_treated ,type="l",lwd=0.5,lty=2,col="orange")
mean_no_treated_run2_1_ps <- mean_no_treated_run2_1_ps + run2_1_ps[[jj]]$no_treated/nreps
mean_infected_run2_1_ps <- mean_infected_run2_1_ps + run2_1_ps[[jj]]$total_infections_alive/nreps
mean_prioritizied_run2_1_ps <- mean_prioritizied_run2_1_ps + run2_1_ps[[jj]]$prioritized_tx/nreps
}
lines(-20+run2_1_ps[[jj]]$timestep/365,mean_infected_run2_1_ps,type="l",lwd=2,col="red")
lines(-20+run2_1_ps[[jj]]$timestep/365,mean_no_treated_run2_1_ps,type="l",lwd=2,col="green")
lines(-20+run2_1_ps[[jj]]$timestep/365,mean_prioritizied_run2_1_ps,type="l",lwd=2,col="purple")
#title(run2_1_title)
if (1==1)legend(-10,1770,c(paste("Infected ",run1_title,sep=""),
paste("Infected ",run2_title,sep=""),
paste("Treated ",run1_title,sep=""),
paste("Treated ",run2_title,sep=""),
paste("Targeted ",run2_title,sep=""))
,lwd=c(2,2,2,2,2),col=c("black","red","blue","green","purple"),bty='n',cex=legend_text_size,
y.intersp = 1)
#Plot number infected and number treated for strategy # 1_2
plot(-20+run1_2_ps[[1]]$timestep/365,run1_2_ps[[1]]$total_infections_alive,xlim=c(-10,25),ylim=c(0,1700),
#xlim=c(10,45),
type="l",lwd=0.5,lty=2,xlab="Years before/after TasP campaign",ylab="Number Infected and Treated",col="black",axes="F")
axis(1,at=labseq,labels=labseq)
lines(-20+run1_2_ps[[1]]$timestep/365,run1_2_ps[[1]]$no_treated ,type="l",lwd=0.5,lty=2,col="blue")
mean_no_treated_run1_2_ps <- run1_2_ps[[1]]$no_treated/nreps
mean_infected_run1_2_ps <- run1_2_ps[[1]]$total_infections_alive/nreps
for (jj in 2:nreps) {
lines(-20+run1_2_ps[[jj]]$timestep/365,run1_2_ps[[jj]]$total_infections_alive,type="l",lwd=0.5,lty=2,col="black")
lines(-20+run1_2_ps[[jj]]$timestep/365,run1_2_ps[[jj]]$no_treated ,type="l",lwd=0.5,lty=2,col="blue")
#lines(run1_2_ps[[jj]]$timestep/365,run1_2_ps[[jj]]$no_prioritized_treated ,type="l",lwd=0.5,lty=2,col="orange")
mean_no_treated_run1_2_ps <- mean_no_treated_run1_2_ps + run1_2_ps[[jj]]$no_treated/nreps
mean_infected_run1_2_ps <- mean_infected_run1_2_ps + run1_2_ps[[jj]]$total_infections_alive/nreps
}
lines(-20+run1_2_ps[[jj]]$timestep/365,mean_infected_run1_2_ps,type="l",lwd=2,col="black")
lines(-20+run1_2_ps[[jj]]$timestep/365,mean_no_treated_run1_2_ps,type="l",lwd=2,col="blue")
#title("Slow, linear")
axis(2); box()
#Plot number infected and number treated for strategy # 2_2
lines(-20+run2_2_ps[[1]]$timestep/365,run2_2_ps[[1]]$total_infections_alive,type="l",lwd=0.5,col="red")
lines(-20+run2_2_ps[[1]]$timestep/365,run2_2_ps[[1]]$no_treated ,type="l",lwd=0.5,lty=2,col="green")
mean_no_treated_run2_2_ps <- run2_2_ps[[1]]$no_treated/nreps
mean_infected_run2_2_ps <- run2_2_ps[[1]]$total_infections_alive/nreps
mean_prioritizied_run2_2_ps <- run2_2_ps[[1]]$prioritized_tx/nreps
for (jj in 2:nreps) {
lines(-20+run2_2_ps[[jj]]$timestep/365,run2_2_ps[[jj]]$total_infections_alive,type="l",lwd=0.5,lty=2,col="red")
lines(-20+run2_2_ps[[jj]]$timestep/365,run2_2_ps[[jj]]$no_treated ,type="l",lwd=0.5,lty=2,col="green")
lines(-20+run2_2_ps[[jj]]$timestep/365,run2_2_ps[[jj]]$prioritized_tx ,type="l",lwd=0.5,lty=2,col="purple")
#lines(run2_2_ps[[jj]]$timestep/365,run2_2_ps[[jj]]$no_prioritized_treated ,type="l",lwd=0.5,lty=2,col="orange")
mean_no_treated_run2_2_ps <- mean_no_treated_run2_2_ps + run2_2_ps[[jj]]$no_treated/nreps
mean_infected_run2_2_ps <- mean_infected_run2_2_ps + run2_2_ps[[jj]]$total_infections_alive/nreps
mean_prioritizied_run2_2_ps <- mean_prioritizied_run2_2_ps + run2_2_ps[[jj]]$prioritized_tx/nreps
}
lines(-20+run2_2_ps[[jj]]$timestep/365,mean_infected_run2_2_ps,type="l",lwd=2,col="red")
lines(-20+run2_2_ps[[jj]]$timestep/365,mean_no_treated_run2_2_ps,type="l",lwd=2,col="green")
lines(-20+run2_2_ps[[jj]]$timestep/365,mean_prioritizied_run2_2_ps,type="l",lwd=2,col="purple")
#title(run2_2_title)
if (1==2)legend(-10,1.03*max_infected,c(paste("Infected ",run1_title,sep=""),
paste("Infected ",run2_title,sep=""),
paste("Treated ",run1_title,sep=""),
paste("Treated ",run2_title,sep=""),
paste("Targeted ",run2_title,sep=""))
,lwd=c(2,2,2,2,2),col=c("black","red","blue","green","purple"),bty='n',cex=legend_text_size,
y.intersp = 1)
#Plot number infected and number treated for strategy # 1_3
plot(-20+run1_3_ps[[1]]$timestep/365,run1_3_ps[[1]]$total_infections_alive,xlim=c(-10,25),ylim=c(0,1700),
#xlim=c(10,45),axes="F",
type="l",lwd=0.5,lty=2,xlab="Years before/after TasP campaign",ylab="Number Infected and Treated",col="black")
axis(1,at=labseq,labels=labseq)
lines(-20+run1_3_ps[[1]]$timestep/365,run1_3_ps[[1]]$no_treated ,type="l",lwd=0.5,lty=2,col="blue")
mean_no_treated_run1_3_ps <- run1_3_ps[[1]]$no_treated/nreps
mean_infected_run1_3_ps <- run1_3_ps[[1]]$total_infections_alive/nreps
for (jj in 2:nreps) {
lines(-20+run1_3_ps[[jj]]$timestep/365,run1_3_ps[[jj]]$total_infections_alive,type="l",lwd=0.5,lty=2,col="black")
lines(-20+run1_3_ps[[jj]]$timestep/365,run1_3_ps[[jj]]$no_treated ,type="l",lwd=0.5,lty=2,col="blue")
#lines(run1_3_ps[[jj]]$timestep/365,run1_3_ps[[jj]]$no_prioritized_treated ,type="l",lwd=0.5,lty=2,col="orange")
mean_no_treated_run1_3_ps <- mean_no_treated_run1_3_ps + run1_3_ps[[jj]]$no_treated/nreps
mean_infected_run1_3_ps <- mean_infected_run1_3_ps + run1_3_ps[[jj]]$total_infections_alive/nreps
}
lines(-20+run1_3_ps[[jj]]$timestep/365,mean_infected_run1_3_ps,type="l",lwd=2,col="black")
lines(-20+run1_3_ps[[jj]]$timestep/365,mean_no_treated_run1_3_ps,type="l",lwd=2,col="blue")
axis(2); box()#title("Slow, linear")
#Plot number infected and number treated for strategy # 2_3
lines(-20+run2_3_ps[[1]]$timestep/365,run2_3_ps[[1]]$total_infections_alive,type="l",lwd=0.5,col="red")
lines(-20+run2_3_ps[[1]]$timestep/365,run2_3_ps[[1]]$no_treated ,type="l",lwd=0.5,lty=2,col="green")
mean_no_treated_run2_3_ps <- run2_3_ps[[1]]$no_treated/nreps
mean_infected_run2_3_ps <- run2_3_ps[[1]]$total_infections_alive/nreps
mean_prioritizied_run2_3_ps <- run2_3_ps[[1]]$prioritized_tx/nreps
for (jj in 2:nreps) {
lines(-20+run2_3_ps[[jj]]$timestep/365,run2_3_ps[[jj]]$total_infections_alive,type="l",lwd=0.5,lty=2,col="red")
lines(-20+run2_3_ps[[jj]]$timestep/365,run2_3_ps[[jj]]$no_treated ,type="l",lwd=0.5,lty=2,col="green")
lines(-20+run2_3_ps[[jj]]$timestep/365,run2_3_ps[[jj]]$prioritized_tx ,type="l",lwd=0.5,lty=2,col="purple")
#lines(run2_3_ps[[jj]]$timestep/365,run2_3_ps[[jj]]$no_prioritized_treated ,type="l",lwd=0.5,lty=2,col="orange")
mean_no_treated_run2_3_ps <- mean_no_treated_run2_3_ps + run2_3_ps[[jj]]$no_treated/nreps
mean_infected_run2_3_ps <- mean_infected_run2_3_ps + run2_3_ps[[jj]]$total_infections_alive/nreps
mean_prioritizied_run2_3_ps <- mean_prioritizied_run2_3_ps + run2_3_ps[[jj]]$prioritized_tx/nreps
}
lines(-20+run2_3_ps[[jj]]$timestep/365,mean_infected_run2_3_ps,type="l",lwd=2,col="red")
lines(-20+run2_3_ps[[jj]]$timestep/365,mean_no_treated_run2_3_ps,type="l",lwd=2,col="green")
lines(-20+run2_3_ps[[jj]]$timestep/365,mean_prioritized_run2_3_ps,type="l",lwd=2,col="purple")
#title(run2_3_title)
if (1==2)legend(-10,1.03*max_infected,c(paste("Infected ",run1_title,sep=""),
paste("Infected ",run2_title,sep=""),
paste("Treated ",run1_title,sep=""),
paste("Treated ",run2_title,sep=""),
paste("Targeted ",run2_title,sep=""))
,lwd=c(2,2,2,2,2),col=c("black","red","blue","green","purple"),bty='n',cex=legend_text_size,
y.intersp = 1)
# Plot incidence for under strategy 1_1
extract_evonet_data(run1_1,overlay=FALSE)
#title(run1_title)
# Plot incidence for strategy 2_1
extract_evonet_data(run2_1,overlay=TRUE)
legend(-10,1.68,c(run1_title, run2_title), cex=legend_text_size,
lwd=c(2,2),col=c("black","red"),bty='n',y.intersp = 1)
# Plot incidence for under strategy 1_2
extract_evonet_data(run1_2,overlay=FALSE)
#title(run1_title)
# Plot incidence for strategy 2_2
extract_evonet_data(run2_2,overlay=TRUE)
#legend(10,1.5,c(run1_title, run2_title), cex=legend_text_size,
# lwd=c(2,2),col=c("black","red"),bty='n',y.intersp = 1)
# Plot incidence for under strategy 1_3
extract_evonet_data(run1_3,overlay=FALSE)
#title(run1_title)
# Plot incidence for strategy 2_3
extract_evonet_data(run2_3,overlay=TRUE)
#legend(10,1.5,c(run1_title, run2_title), cex=legend_text_size,
# lwd=c(2,2),col=c("black","red"),bty='n',y.intersp = 1)
#Plot AIDS Death Rates for Strategy 1_1
plot(-20+run1_1_ps[[1]]$timestep/365,run1_1_ps[[1]]$aids_deaths/run1_1_ps[[1]]$total_infections_alive,
xlim=c(-10,25),
ylim=c(0,0.16),type="l",lwd=0.5,lty=2,xlab="Years before/after TasP campaign",ylab="AIDS Death Rate",col="red",axes="F")
axis(1,at=labseq,labels=labseq)
mean_aids_deaths_run1_1_ps <- (run1_1_ps[[1]]$aids_deaths/run1_1_ps[[jj]]$total_infections_alive)/nreps
for (jj in 2:nreps) {
lines(-20+run1_1_ps[[jj]]$timestep/365,run1_1_ps[[jj]]$aids_deaths/run1_1_ps[[jj]]$total_infections_alive,
type="l",lwd=0.5,lty=2,col="black")
mean_aids_deaths_run1_1_ps <- mean_aids_deaths_run1_1_ps + (run1_1_ps[[jj]]$aids_deaths/run1_1_ps[[jj]]$total_infections_alive)/nreps
}
lines(-20+run1_1_ps[[jj]]$timestep/365,mean_aids_deaths_run1_1_ps,type="l",lwd=2,col="black")
axis(2); box()
#Plot AIDS Death Rates for Strategy 2_1
lines(-20+run2_1_ps[[1]]$timestep/365,run2_1_ps[[1]]$aids_deaths/run2_1_ps[[1]]$total_infections_alive,
lwd=0.5,lty=2,col="red")
mean_aids_deaths_run2_1_ps <- (run2_1_ps[[1]]$aids_deaths/run2_1_ps[[1]]$total_infections_alive)/nreps
for (jj in 2:nreps) {
lines(-20+run2_1_ps[[jj]]$timestep/365,run2_1_ps[[jj]]$aids_deaths/run2_1_ps[[jj]]$total_infections_alive,type="l",lwd=0.5,lty=2,col="red")
mean_aids_deaths_run2_1_ps <- mean_aids_deaths_run2_1_ps + (run2_1_ps[[jj]]$aids_deaths/run2_1_ps[[jj]]$total_infections_alive)/nreps
}
lines(-20+run2_1_ps[[jj]]$timestep/365,mean_aids_deaths_run2_1_ps,type="l",lwd=2,col="red")
#lines(-20+run1_ps[[jj]]$timestep/365,mean_aids_deaths_run1_ps,type="l",lwd=0.5,col="black")
#title(run2_1_title)
legend(-10,0.168,c(run1_title, run2_title),cex=legend_text_size,
lwd=c(2,2),col=c("black","red"),bty='n',y.intersp = 1)
#Plot AIDS Death Rates for Strategy 1_2
plot(-20+run1_2_ps[[1]]$timestep/365,run1_2_ps[[1]]$aids_deaths/run1_2_ps[[1]]$total_infections_alive,
xlim=c(-10,25),axes="F",
ylim=c(0,0.16),type="l",lwd=0.5,lty=2,xlab="Years before/after TasP campaign",ylab="AIDS Death Rate",col="red")
axis(1,at=labseq,labels=labseq)
mean_aids_deaths_run1_2_ps <- (run1_2_ps[[1]]$aids_deaths/run1_2_ps[[jj]]$total_infections_alive)/nreps
for (jj in 2:nreps) {
lines(-20+run1_2_ps[[jj]]$timestep/365,run1_2_ps[[jj]]$aids_deaths/run1_2_ps[[jj]]$total_infections_alive,
type="l",lwd=0.5,lty=2,col="black")
mean_aids_deaths_run1_2_ps <- mean_aids_deaths_run1_2_ps + (run1_2_ps[[jj]]$aids_deaths/run1_2_ps[[jj]]$total_infections_alive)/nreps
}
lines(-20+run1_2_ps[[jj]]$timestep/365,mean_aids_deaths_run1_2_ps,type="l",lwd=2,col="black")
axis(2); box()
#Plot AIDS Death Rates for Strategy 2_2
lines(-20+run2_2_ps[[1]]$timestep/365,run2_2_ps[[1]]$aids_deaths/run2_2_ps[[1]]$total_infections_alive,
lwd=0.5,lty=2,col="red")
mean_aids_deaths_run2_2_ps <- (run2_2_ps[[1]]$aids_deaths/run2_2_ps[[1]]$total_infections_alive)/nreps
for (jj in 2:nreps) {
lines(-20+run2_2_ps[[jj]]$timestep/365,run2_2_ps[[jj]]$aids_deaths/run2_2_ps[[jj]]$total_infections_alive,type="l",lwd=0.5,lty=2,col="red")
mean_aids_deaths_run2_2_ps <- mean_aids_deaths_run2_2_ps + (run2_2_ps[[jj]]$aids_deaths/run2_2_ps[[jj]]$total_infections_alive)/nreps
}
lines(-20+run2_2_ps[[jj]]$timestep/365,mean_aids_deaths_run2_2_ps,type="l",lwd=2,col="red")
#lines(-20+run1_ps[[jj]]$timestep/365,mean_aids_deaths_run1_ps,type="l",lwd=0.5,col="black")
#title(run2_2_title)
#legend(10,0.15,c(run1_title, run2_title),cex=legend_text_size,
# lwd=c(2,2),col=c("black","red"),bty='n',y.intersp = 1)
#Plot AIDS Death Rates for Strategy 1_3
plot(-20+run1_3_ps[[1]]$timestep/365,run1_3_ps[[1]]$aids_deaths/run1_3_ps[[1]]$total_infections_alive,
xlim=c(-10,25),axes="F",
ylim=c(0,0.16),type="l",lwd=0.5,lty=2,xlab="Years before/after TasP campaign",ylab="AIDS Death Rate",col="red")
axis(1,at=labseq,labels=labseq)
mean_aids_deaths_run1_3_ps <- (run1_3_ps[[1]]$aids_deaths/run1_3_ps[[jj]]$total_infections_alive)/nreps
for (jj in 2:nreps) {
lines(-20+run1_3_ps[[jj]]$timestep/365,run1_3_ps[[jj]]$aids_deaths/run1_3_ps[[jj]]$total_infections_alive,
type="l",lwd=0.5,lty=2,col="black")
mean_aids_deaths_run1_3_ps <- mean_aids_deaths_run1_3_ps + (run1_3_ps[[jj]]$aids_deaths/run1_3_ps[[jj]]$total_infections_alive)/nreps
}
lines(-20+run1_3_ps[[jj]]$timestep/365,mean_aids_deaths_run1_3_ps,type="l",lwd=2,col="black")
axis(2); box()
#Plot AIDS Death Rates for Strategy 2_3
lines(-20+run2_3_ps[[1]]$timestep/365,run2_3_ps[[1]]$aids_deaths/run2_3_ps[[1]]$total_infections_alive,
lwd=0.5,lty=2,col="red")
mean_aids_deaths_run2_3_ps <- (run2_3_ps[[1]]$aids_deaths/run2_3_ps[[1]]$total_infections_alive)/nreps
for (jj in 2:nreps) {
lines(-20+run2_3_ps[[jj]]$timestep/365,run2_3_ps[[jj]]$aids_deaths/run2_3_ps[[jj]]$total_infections_alive,type="l",lwd=0.5,lty=2,col="red")
mean_aids_deaths_run2_3_ps <- mean_aids_deaths_run2_3_ps + (run2_3_ps[[jj]]$aids_deaths/run2_3_ps[[jj]]$total_infections_alive)/nreps
}
lines(-20+run2_3_ps[[jj]]$timestep/365,mean_aids_deaths_run2_3_ps,type="l",lwd=2,col="red")
#lines(-20+run1_ps[[jj]]$timestep/365,mean_aids_deaths_run1_ps,type="l",lwd=0.5,col="black")
#title(run2_3_title)
#legend(10,0.15,c(run1_title, run2_title), cex=legend_text_size,
# lwd=c(2,2),col=c("black","red"),bty='n',y.intersp = 1)
|
9a11657bc5c461e67c8e6e72cab16f8612b173ba
|
5b4f2711c63a5e49e792ab41be5e965820ff7c97
|
/Indian Data/describeIndia.R
|
861f1fa457b842ecc37c8a899a8117298fe119ec
|
[] |
no_license
|
calbarber21/Hwang-Lab-Economic-Research
|
f09c7e7bbbf8de8a7e157a59213c9ab176eab5c2
|
a2928a454db5bbc24439fa15f9f7b70dafc61ed0
|
refs/heads/master
| 2022-12-26T23:32:13.630993
| 2020-09-27T19:39:23
| 2020-09-27T19:39:23
| 295,228,415
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,632
|
r
|
describeIndia.R
|
describeIndia <- function(){
library(patchwork)
library(dplyr)
library(ggplot2)
file <- "WVS.csv"
WVS<- read.csv(file)
#the sex ratio variable can be changed to reflect urban, rural or overall population. Use the filter option to only take responses from those who live in rural areas. Rural area is defined by townSize < 2. Population columns are sexratio, sexRatioRural,sexRatioUrban
summarizedData <- WVS %>% select(Sum,State,sexratio,GenInd_std,StateLabel) %>% group_by(State,StateLabel) %>% summarise(mean_genind = mean(GenInd_std, na.rm = TRUE), sexratio = mean(sexratio, na.rm = TRUE))
#create a regression. Y=Gender index X=Sex ratio
regressionTotal <- lm(mean_genind ~ sexratio, data = summarizedData)
#access intercept from regression object
int <- regressionTotal$coefficients[1]
int <- format(round(int, 2), nsmall = 2)
#access slope from regression object
slope <- regressionTotal$coefficients[2]
slope <- format(round(slope, 2), nsmall = 2)
#access standard error from regression object
se <- summary(regressionTotal)
se <- se[[4]][4]
se <- format(round(se, 2), nsmall = 2)
#a string to attach to our graph
equation1 <- paste("Beta = ", slope,"SE = ",se)
p1 <- ggplot(summarizedData, aes(x=sexratio, y=mean_genind)) + geom_text(label=summarizedData$StateLabel) + annotate(geom="text", x=1.05, y=1.5, label=equation1,
color="black") + geom_smooth(method='lm',se=FALSE) + theme_bw() + xlab("Sex Ratio (Boy per Girl)") + ylab("Gender Index")
p1
#remove any outliers
summarizedDataExcluding <- WVS %>% select(Sum,State,sexratio,GenInd_std,StateLabel) %>% filter(State != "Kerala", State != "Delhi") %>% group_by(State,StateLabel) %>% summarise(mean_genind = mean(GenInd_std, na.rm = TRUE), sexratio = mean(sexratio,na.rm = TRUE))
#create a regression. Y=Gender index X=Sex ratio
regressionTotalExcluding <- lm(mean_genind ~ sexratio, data = summarizedDataExcluding)
#access intercept from regression object
int2 <- regressionTotalExcluding$coefficients[1]
int2<- format(round(int2, 2), nsmall = 2)
#access slope from regression object
slope2 <- regressionTotalExcluding$coefficients[2]
slope2 <- format(round(slope2, 2), nsmall = 2)
#access standard error from regression object
se2 <- summary(regressionTotalExcluding)
se2 <- se2[[4]][4]
se2 <- format(round(se2, 2), nsmall = 2)
#a string to attach to our graph
equation2 <- paste("Beta w/o Outliers = ", slope2,"SE = ",se2)
p2 <- ggplot(summarizedDataExcluding, aes(x=sexratio, y=mean_genind)) + geom_text(label=summarizedDataExcluding$StateLabel) + annotate(geom="text", x=1.05, y=1.5, label=equation2,
color="black") + geom_smooth(method='lm',se=FALSE) + theme_bw() + xlab("Sex Ratio (Boy per Girl)") + ylab("Gender Index")
p2
#regress individual gender index variables against sexratio
variableOfInterest <- c("MenJobs_std","womenEarnMore_std","HousewifeFulfilling_std","menBeterLeader_std","menBetterExec_std","UniversityBoy_std","preSchoolMother_std")
plot_list = list()
for(i in 1:7){
summarizedData <- WVS %>% select(variableOfInterest[i],State,sexRatioRural,StateLabel,townSize,ChildrenNum) %>% filter(townSize < 2,ChildrenNum != 0) %>% group_by(State,StateLabel) %>% summarise(count = n(),mean_var = mean(.data[[variableOfInterest[i]]], na.rm = TRUE), sexratio = mean(sexRatioRural,na.rm = TRUE))
summarizedData <- summarizedData %>% filter(count >= 30)
regressionTotal <- lm(mean_var ~ sexratio, data = summarizedData)
int <- regressionTotal$coefficients[1]
int <- format(round(int, 2), nsmall = 2)
slope <- regressionTotal$coefficients[2]
slope <- format(round(slope, 2), nsmall = 2)
se <- summary(regressionTotal)
se <- se[[4]][4]
se <- format(round(se, 2), nsmall = 2)
equation1 <- paste("Beta = ", slope,"SE = ",se)
p <- ggplot(summarizedData, aes(x=sexratio, y=mean_var)) + geom_text(label=summarizedData$StateLabel) + annotate(geom="text", x=1.05, y=1.5, label=equation1,
color="black") + geom_smooth(method='lm',se=FALSE) + theme_bw() + xlab("Sex Ratio (Boy per Girl)") + ylab(variableOfInterest[i])
plot_list[[i]] = p
#ggsave("IndiaPlot_Individual",p)
}
for (i in 1:7) {
fileName <- paste("India_var_Rural_ExcludingSmall_AndNonParents",i,".pdf")
ggsave(fileName,plot_list[[i]])
}
#Regression and graph for Rural places using sex ratio by year for rural places
summarizedDataRural <- WVS %>% filter(townSize < 2, ChildrenNum != 0 ) %>% group_by(State,StateLabel) %>% summarise(count = n(),sexRatioRural = mean(sexRatioRural, na.rm = TRUE),mean_genindRural = mean(GenInd_std, na.rm = TRUE))
summarizedDataRural <- summarizedDataRural %>% filter(count >= 30)
regressionTotal <- lm(mean_genindRural ~ sexRatioRural, data = summarizedDataRural)
int <- regressionTotal$coefficients[1]
int <- format(round(int, 2), nsmall = 2)
slope <- regressionTotal$coefficients[2]
slope <- format(round(slope, 2), nsmall = 2)
se <- summary(regressionTotal)
se <- se[[4]][4]
se <- format(round(se, 2), nsmall = 2)
equation1 <- paste("Beta = ", slope,"SE = ",se)
p4 <- ggplot(summarizedDataRural, aes(x=sexRatioRural, y=mean_genindRural)) + geom_text(label=summarizedDataRural$StateLabel) + annotate(geom="text", x=1.05, y=1.5, label=equation1,
color="black") + geom_smooth(method='lm',se=FALSE) + theme_bw() + xlab("Sex Ratio (Boy per Girl)") + ylab("Gender Index") + ggtitle("Rural excluding small samples and non-parents")
p4
#Regression and graph for Urban places usuing sex ratio by year for urban places
summarizedDataUrban <- WVS %>% filter(townSize >= 2,ChildrenNum != 0) %>% group_by(State,StateLabel) %>% summarise(count = n(),sexRatioUrban = mean(sexRatioUrban, na.rm = TRUE),mean_genindUrban = mean(GenInd_std, na.rm = TRUE))
summarizedDataUrban <- summarizedDataUrban %>% filter(count >= 30)
regressionTotal <- lm(mean_genindUrban ~ sexRatioUrban, data = summarizedDataUrban)
int <- regressionTotal$coefficients[1]
int <- format(round(int, 2), nsmall = 2)
slope <- regressionTotal$coefficients[2]
slope <- format(round(slope, 2), nsmall = 2)
se <- summary(regressionTotal)
se <- se[[4]][4]
se <- format(round(se, 2), nsmall = 2)
equation1 <- paste("Beta = ", slope,"SE = ",se)
p5 <- ggplot(summarizedDataUrban, aes(x=sexRatioUrban, y=mean_genindUrban)) + geom_text(label=summarizedDataUrban$StateLabel) + annotate(geom="text", x=1.05, y=1.5, label=equation1,
color="black") + geom_smooth(method='lm',se=FALSE) + theme_bw() + xlab("Sex Ratio (Boy per Girl)") + ylab("Gender Index") + ggtitle("Urban excluding small samples and non-parents")
p5
#Regression and graph for urban places using one child sex ratio for 2001
summarizedDataUrban_ChildRatio <- WVS %>% filter(townSize >= 2) %>% group_by(State,StateLabel) %>% summarise(count = n(),sexRatio = mean(childSexRatio2001, na.rm = TRUE),mean_genind = mean(GenInd_std, na.rm = TRUE))
regressionTotal <- lm(mean_genind ~ sexRatio, data = summarizedDataUrban)
int <- regressionTotal$coefficients[1]
int <- format(round(int, 2), nsmall = 2)
slope <- regressionTotal$coefficients[2]
slope <- format(round(slope, 2), nsmall = 2)
se <- summary(regressionTotal)
se <- se[[4]][4]
se <- format(round(se, 2), nsmall = 2)
equation1 <- paste("Beta = ", slope,"SE = ",se)
p6 <- ggplot(summarizedDataUrban, aes(x=sexRatio, y=mean_genind)) + geom_text(label=summarizedDataUrban$StateLabel) + annotate(geom="text", x=1.05, y=1.5, label=equation1,
color="black") + geom_smooth(method='lm',se=FALSE) + theme_bw() + xlab("Sex Ratio (Boy per Girl)") + ylab("Gender Index") + ggtitle("Urban Excluding Small Samples")
p6
#Regression and graph for rural places using one child sex ratio for 2001
summarizedDataRural_ChildRatio <- WVS %>% filter(townSize < 2) %>% group_by(State,StateLabel) %>% summarise(count = n(),sexRatio = mean(childSexRatio2001, na.rm = TRUE),mean_genind = mean(GenInd_std, na.rm = TRUE))
regressionTotal <- lm(mean_genind ~ sexRatio, data = summarizedDataRural)
int <- regressionTotal$coefficients[1]
int <- format(round(int, 2), nsmall = 2)
slope <- regressionTotal$coefficients[2]
slope <- format(round(slope, 2), nsmall = 2)
se <- summary(regressionTotal)
se <- se[[4]][4]
se <- format(round(se, 2), nsmall = 2)
equation1 <- paste("Beta = ", slope,"SE = ",se)
p7 <- ggplot(summarizedDataRural, aes(x=sexRatio, y=mean_genind)) + geom_text(label=summarizedDataRural$StateLabel) + annotate(geom="text", x=1.05, y=1.5, label=equation1,
color="black") + geom_smooth(method='lm',se=FALSE) + theme_bw() + xlab("Sex Ratio (Boy per Girl)") + ylab("Gender Index") + ggtitle("Rural Excluding Small Samples")
p7
ggsave("WVS_plot_India.pdf",p1)
ggsave("WVS_plot_India_wo_outliers.pdf",p2)
#write.table(summarizedData, "India_table.txt")
}
|
78517c767781691620332f7fe7bd5291fd42a94e
|
75f792fc99722753a76b6e428ba30a591f2d5368
|
/data-raw/small_community.comp.R
|
822748a2b0e1006b9ce5175e69d2ac92c71b7b98
|
[
"MIT"
] |
permissive
|
jtmccr1/HIVEr
|
9739c7883ec6e8f1071a537517a62b7288f553cd
|
61601afa9f173fe2ca033b0a0028b109f7b90de6
|
refs/heads/master
| 2021-09-20T03:25:25.257483
| 2018-08-02T15:15:31
| 2018-08-02T15:15:31
| 109,002,994
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 822
|
r
|
small_community.comp.R
|
small_isnv$HOUSE_ID <- 000000
get_freqs(c("HS1595","HS1563"),small_isnv)->small_dups
small_dups.comp<-polish_freq(small_dups,freq1,0.02)
extra<-small_dups.comp
extra$SPECID1<-"MH0000"
small_dups.comp$freq2[1]<-0
small_dups.comp<-rbind(small_dups.comp,extra)
small_dups.comp<-rbind(small_dups.comp,small_dups.comp)
small_dups.comp<-rbind(small_dups.comp,small_dups.comp)
small_dups.comp$pair_id<-rep(1:(nrow(small_dups.comp)/2),each=2)
small_dups.comp$freq1<-runif(n = nrow(small_dups.comp),0,1)
small_dups.comp$found<-small_dups.comp$freq2>0.02
small_community.comp<-small_dups.comp
small_dups.comp$SPECID2<-"MH0001"
small_community.comp<-rbind(small_community.comp,small_dups.comp)
small_community.comp$pair_id<-rep(1:(nrow(small_community.comp)/2),each=2)
devtools::use_data(small_community.comp,overwrite = T)
|
bab4571752dd01d9d455c284238106bd65a37889
|
d20612d64c5cf45664ddb6a50dfbf05028c72158
|
/R/data.R
|
b7fc911616a2a77b5c41d56fe3b0f053f81cf532
|
[] |
no_license
|
kuriwaki/ddi
|
4f0a634c79c7f1d9b3528a7b2a3158c8216ddb4b
|
bbbc6772ef521b958033485f5fc30cb096e191b1
|
refs/heads/master
| 2021-07-16T09:56:51.972865
| 2020-05-09T01:04:05
| 2020-05-09T01:04:05
| 145,268,603
| 3
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,963
|
r
|
data.R
|
#' 2016 General Election Results and Survey Estimates
#'
#' Donald Trump's voteshare in each U.S. state,
#' with survey estimates from the Cooperative Congressional Election Study
#' (pre-election wave). See Meng (2018) referenced below for more details.
#' We focus on unweighted estimates to capture the response patterns, before
#' correcting for any imbalances through weights.
#'
#'
#' @format A data frame with 51 rows (all U.S. states and D.C.)
#' \describe{
#' \item{state}{state (full name)}
#' \item{st}{state (abbreviation).}
#' \item{pct_djt_voters}{Donald J. Trump's voteshare, the estimand.}
#' \item{cces_pct_djt_vv}{CCES unweighted proportion of Trump support, one estimate.}
#' \item{cces_pct_djtrund_vv}{CCES unweighted proportion counting Republican undecideds as Trump voters.}
#' \item{votes_djt}{Total number of votes by Trump.}
#' \item{tot_votes}{Turnout in Presidential as total number of votes cast.}
#' \item{cces_totdjt_vv}{Validated voters intending to vote for Trump. Used as the numerator for the above CCES estimates.}
#' \item{cces_n_vv}{Validated voters in survey sample. Used as the denominator for the above CCES estimates.}
#' \item{vap}{Voting Age Population in the state.}
#' \item{vep}{Voting Eligible Population in the state (estimate from the US Election Project).}
#' }
#'
#' @examples
#' library(dplyr)
#' data(g2016)
#'
#' transmute(g2016,
#' st,
#' ddc = ddc(mu = pct_djt_voters,
#' muhat = cces_pct_djt_vv,
#' N = tot_votes,
#' n = cces_n_vv))
#'
#' @source Cooperative Congressional Election Study (CCES) \url{https://cces.gov.harvard.edu/}
#' and the United States Election Project \url{http://www.electproject.org/2016g}.
#' Created under \url{https://github.com/kuriwaki/poll_error}.
#'
#'
#' @references For an explanation in the
#' context of d.d.i., see Meng (2018) <doi:10.1214/18-AOAS1161SF>
"g2016"
|
6050c43ab2d7e30a22afb5cba22903db06900669
|
3fc72b0734b728dcb57baf287b5b8e06f6e5b0b7
|
/tools/archive/_valueParser.R
|
17722ae553defa29384350502be5340a27555fb2
|
[] |
no_license
|
WengLab-InformaticsResearch/DQueST
|
a8e24a4f5bd050fa0f78c5c33f69a16b7b2c57e1
|
f1fb9302ba06141c837bf27a686e614b55889cda
|
refs/heads/master
| 2020-04-02T23:27:05.844799
| 2018-10-26T16:55:56
| 2018-10-26T16:55:56
| 154,867,944
| 2
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,191
|
r
|
_valueParser.R
|
valueNormalize <- function(has_value){
if(is.na(has_value)){
return(NA)
}
has_value = tolower(has_value)
bigger_words = c("\\>","\\+","more","big","great","more","excess","high","better","exceed","old","long","over","at least")
smaller_words = c("\\<","\\-","less","small","low","worse","young","short")
and_words = c("and","to","\\-")
# scan two number first.
and_match = str_match(has_value, paste0("([\\d,\\.]+).*?",and_words,".*?([\\d,\\.]+)"))
if(sum(!is.na(and_match[,2])) > 0){
# at least one match.
and_match = and_match[!is.na(and_match[,2]),c(2,3)]
if(!is.null(dim(and_match))){
# more than one match. select the first match.
and_match = and_match[1,]
}
# require the first one is smaller than second one.
min = and_match[1]
max = and_match[2]
return(paste(min,":",max,sep = ""))
}
# scan big number.
big_match_suffix = str_match(has_value, paste0("([\\d,\\.]+).*?",bigger_words))
big_match_prefix = str_match(has_value, paste0(bigger_words,".*?([\\d,\\.]+)"))
big_match = rbind(big_match_suffix,big_match_prefix)
if(sum(!is.na(big_match[,2])) > 0){
# at least one match.
big_match = big_match[!is.na(big_match[,2]),c(2)]
if(length(big_match) > 1){
# more than one match. select the first match.
big_match = big_match[1]
}
# require the first one is smaller than second one.
min = big_match[1]
max = Inf
return(paste(min,":",max,sep = ""))
}
# scan small number.
small_match_suffix = str_match(has_value, paste0("([\\d,\\.]+).*?",smaller_words))
small_match_prefix = str_match(has_value, paste0(smaller_words,".*?([\\d,\\.]+)"))
small_match = rbind(small_match_suffix,small_match_prefix)
if(sum(!is.na(small_match[,2])) > 0){
# at least one match.
small_match = small_match[!is.na(small_match[,2]),c(2)]
if(length(small_match) > 1){
# more than one match. select the first match.
small_match = small_match[1]
}
# require the first one is smaller than second one.
min = -Inf
max = small_match[1]
return(paste(min,":",max,sep = ""))
}
# if not return before. then return NA
return(NA)
}
|
64500e06ad88a3eefa5317b6a1c2db83192eebd6
|
b5a1bd1654ad240813aec25a16a236b198558095
|
/Archive/paper/size2/bigsize1d.R
|
0468152976d9a771a2d8f551d84b678a844bb64b
|
[] |
no_license
|
trevor-harris/assimilation-cfr
|
af4d6125dff636f0edd338f0bd4fbbd57a176aca
|
60a311dccafc4d524136e6400a41118a4ec2ff58
|
refs/heads/master
| 2021-03-22T00:23:45.788770
| 2020-06-18T19:06:09
| 2020-06-18T19:06:09
| 118,714,285
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,381
|
r
|
bigsize1d.R
|
library(reshape2)
library(tictoc)
library(future)
library(future.apply)
source("/home/trevorh2/assimilation-cfr/code/depth_tests.R")
source("/home/trevorh2/assimilation-cfr/code/depths.R")
source("/home/trevorh2/assimilation-cfr/code/simulation.R")
#### SIZE
set.seed(042696)
iter = 50
sims = 1000
ranges = c(10, 20, 30, 40)
nfuns = c(50, 100, 200, 300, 400, 500)
ksize = data.frame(size = numeric(0),
nfun = numeric(0),
range = numeric(0))
qsize = data.frame(size = numeric(0),
nfun = numeric(0),
range = numeric(0))
plan(multiprocess)
k = 1
tic("Total")
for(n in nfuns) {
for(r in ranges) {
tic(paste0("n = ", n, " r = ", r))
klevel = rep(0, iter)
qlevel = rep(0, iter)
for(j in 1:iter) {
kpval = rep(0, sims)
qpval = rep(0, sims)
val = future_sapply(1:sims, function(x) {
f = gp1d(n, sd = 1, l = r)
g = gp1d(n+1, sd = 1, l = r)
c(kolm(f, g)[2], quality(f, g)[2])
})
ksize[k,] = c(mean(val[1,] <= 0.05), n, r)
qsize[k,] = c(mean(val[2,] <= 0.05), n, r)
k = k + 1
}
toc()
}
}
toc()
saveRDS(ksize, file = "/home/trevorh2/assimilation-cfr/sim/size2/out/big_ksize.RDS")
saveRDS(qsize, file = "/home/trevorh2/assimilation-cfr/sim/size2/out/big_qsize.RDS")
|
3654d5dca352a31830160c0d269b8c1bda35f61f
|
eb8e225361c5a640d23207735e88d87aa8477533
|
/man/MeanDurUnder.Rd
|
844f5e3b70164f6cc61455f2571b4fa527f769d0
|
[] |
no_license
|
cran/NHMSAR
|
6ec23ee69d3398005f85e64293b32fe5f71b5375
|
6fbbfdd21700d5b6b95322844abdc93129cddc22
|
refs/heads/master
| 2022-03-03T06:41:29.892008
| 2022-02-09T06:40:11
| 2022-02-09T06:40:11
| 27,052,794
| 2
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,227
|
rd
|
MeanDurUnder.Rd
|
\name{MeanDurUnder}
\alias{MeanDurUnder}
\title{
Mean Duration of sojourn under a treshold
}
\description{
Plot the mean duration of sojourn under thresholds for an observed time series and a simulated one with respect to teh empirical cumulative distribution function (cdf). Confidence intervals are plotted too.}
\usage{
MeanDurUnder(data, data.sim, u, alpha = 0.05,col="red",plot=TRUE)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{data}{
observed (or reference) time series, array of dimension T*N.samples*1 }
\item{data.sim}{
simulated time series, array of dimension T*N.sim*1.
N.sim have to be K*N.samples with K large enough (for instance, K=100) }
\item{u}{
vector of thresholds}
\item{alpha}{
1-confidence level for confidence intervals. Default = 0.05}
\item{col}{color of the lines for simulated data, default is red}
\item{plot}{statistic are plotted if TRUE (default)}
}
\value{
Returns a plot and a list including
..$F : empirical cdf of data for levels u
..$mdu.data : mean duration under levels u for data
..$F.sim : empirical cdf of simulations for levels u
..$mdu.sim : mean duration under levels u for simulations
..$CI : confidence intervals of mean duration under levels u for simulations
}
%\references{
%% ~put references to the literature/web site here ~}
\author{
Valerie Monbet, valerie.monbet@univ-rennes1.fr}
\seealso{valid_all.MSAR, MeanDurOver}
\examples{
data(meteo.data)
data = array(meteo.data$temperature,c(31,41,1))
k = 40
T = dim(data)[1]
N.samples = dim(data)[2]
d = dim(data)[3]
M = 2
order = 2
theta.init = init.theta.MSAR(data,M=M,order=order,label="HH")
mod.hh= NULL
mod.hh$theta = theta.init
mod.hh$theta$A = matrix(c(0.40,0.88,-.09,-.13),2,2)
mod.hh$theta$A0 = matrix(c(6.75,1.08),2,1)
mod.hh$theta$sigma = matrix(c(1.76,3.40),2,1)
mod.hh$theta$prior = matrix(c(0.37,0.63),2,1)
mod.hh$theta$transmat = matrix(c(0.82,0.09,0.18,0.91),2,2)
B.sim = 20*N.samples
Y0 = array(data[1:2,sample(1:dim(data)[2],B.sim,replace=TRUE),],c(2,B.sim,1))
Y.sim = simule.nh.MSAR(mod.hh$theta,Y0=Y0,T,N.samples=B.sim)
u = seq(min(data),max(data),length.out=30)
MeanDurUnder(data,Y.sim$Y,u)
}
\keyword{ Mean Duration of Sojourn }
\keyword{ Threshold excess }
|
c2fb7a7db9855b522d2c1874f73ad96aa49d72e6
|
343a4b66f68e7652d06dea9f74b01831b93e1c96
|
/R/dados_estruturais/tabelas_locias_proximo.R
|
d895d7e48cd8b4390dc6a2ad437d5485e67d5972
|
[] |
no_license
|
Marcelo-Honorio/modelos_tratamento_de_dados
|
fd3f6324290734ddfed4b356addc8de9ea288845
|
d0941880b09765a5fcdc1cf63f494ffac600db7b
|
refs/heads/master
| 2022-05-28T16:17:23.525509
| 2020-05-02T20:11:18
| 2020-05-02T20:11:18
| 257,465,309
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,201
|
r
|
tabelas_locias_proximo.R
|
rm(list = ls(all = T))
library(tidyverse)
library(stringr)
library(readr)
library(readxl)
library(lubridate)
library(formattable)
library(knitr)
library(kableExtra)
library(webshot)
setwd('C:/Users/marce/Documents/Emovel/Dados/avaliacao_ banco/locais_proximos')
locais <- read_excel('locais_prox_itau.xlsx')
locais <- locais %>%
arrange(id_agencia)
agencia <- unique(locais$id_agencia)
for (j in 1:length(agencia)){
tabela <- locais %>%
filter(id_agencia == agencia[j])
tabela <- tabela %>%
select(Port, nome_local)
colnames(tabela) <- c('Segmento', 'Nome')
nome_tabela <- paste("tabela_", agencia[j], sep = "")
tabela <- tabela %>%
arrange(Segmento)
tabela %>%
kable(align = 'c', format.args = list(decimal.mark = ",", digits = NULL, preserve.width = 'individual'), row_label_position = 'c') %>%
kable_styling(bootstrap_options = c('striped', 'condensed', 'houver'), full_width = F, row_label_position = "c") %>%
row_spec(0, color = 'white', background = '#243654', font_size = 12, monospace = F) %>%
row_spec(1:length(tabela$Nome), color = 'black') %>%
save_kable(file = paste(nome_tabela, '.png', sep = ''))
}
|
a63e86c3a306ecedd16612083b3eef0944b462f8
|
60669f36c55c532bc9fad514a5eda9a0ce20c571
|
/tests/testthat.R
|
de65f8bc8c5b7159d9e31db84a2b100cb2afbb32
|
[] |
no_license
|
bmewing/mgsubcpp
|
99c856409842d16e21bc6917cc609e682090c18c
|
331f5467db619b9b9e49cd6f5b80278ea885f72f
|
refs/heads/master
| 2021-05-13T12:03:30.676221
| 2018-01-16T16:09:10
| 2018-01-16T16:09:10
| 117,150,106
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 60
|
r
|
testthat.R
|
library(testthat)
library(mgsubcpp)
test_check("mgsubcpp")
|
c3a045b24fd3abfa2bb290a7f7fd6a0bf3b608ff
|
794863d2e9e26424a04079a91c3a23063bdb4f8e
|
/R/VFEVD.R
|
d1aaaeef55453da6fc40df984d747e53a161b9e3
|
[] |
no_license
|
GabauerDavid/ConnectednessApproach
|
ef768e64e0bc458ad180bac6b667b3fe5662f01d
|
0ca4799a2f5aa68fdd2c4a3e8a2e0e687d0a9b17
|
refs/heads/main
| 2023-08-09T07:23:45.002713
| 2023-07-27T22:57:04
| 2023-07-27T22:57:04
| 474,462,772
| 47
| 20
| null | 2023-03-12T04:22:26
| 2022-03-26T20:47:15
|
R
|
UTF-8
|
R
| false
| false
| 3,076
|
r
|
VFEVD.R
|
#' @title Generalized volatility forecast error variance decomposition and volatility impulse response functions
#' @description This function provides the volatility impulse responses and the forecast error variance decomposition of DCC-GARCH models.
#' @param fit Fitted DCC-GARCH model
#' @param nfore H-step ahead forecast horizon
#' @param standardize Boolean value whether GIRF should be standardized
#' @return Get volatility impulse response functions and forecast error variance decomposition
#' @references
#' Gabauer, D. (2020). Volatility impulse response analysis for DCC‐GARCH models: The role of volatility transmission mechanisms. Journal of Forecasting, 39(5), 788-796.
#' @author David Gabauer
#' @importFrom rmgarch rcor
#' @importFrom rmgarch rcov
#' @export
VFEVD = function(fit, nfore=100, standardize=FALSE) {
if (!is(fit, "DCCfit")) {
stop("fit needs to be of class DCCfit")
}
if (nfore<=0) {
stop("nfore needs to be a positive integer")
}
NAMES = fit@model$modeldata$asset.names
H = rmgarch::rcov(fit)
R = rmgarch::rcor(fit)
R.bar = apply(R,1:2,mean)
Q.bar = fit@mfit$Qbar
t = dim(H)[3]
k = dim(H)[1]
alpha = array(0,c(k,k,nfore))
alpha[,,1] = diag(fit@mfit$matcoef[c(seq(3,(4*k),4)),1])
beta = diag(fit@mfit$matcoef[c(seq(4,(4*k),4)),1])
ALPHA = fit@mfit$matcoef[(4*k+1),1]
BETA = fit@mfit$matcoef[(4*k+2),1]
H.hat = array(0,c(k,k,nfore+1))
VIRF = H.hat.shock = H.hat.no_shock = array(0,c(k,k,t,nfore+1))
e = diag(k)
for (i in 1:t) {
H.hat[,,1] = H[,,i]
Q.hat = H.hat
Q.hat[,,1] = fit@mfit$Q[[i]]
for (j in 1:nfore) {
H.hat[,,j+1] = (alpha[,,j])%*%e^2 + beta%*%H.hat[,,j]
D = diag(diag(H.hat[,,j+1])^0.5)
u = D%*%e
if (j==1) {
Q.hat[,,2] = (1-ALPHA-BETA)*Q.bar + ALPHA*crossprod(u) + BETA*H.hat[,,1]
} else {
Q.hat[,,j+1] = (1-ALPHA-BETA)*Q.bar + (ALPHA+BETA)*Q.hat[,,j]
}
R.hat = diag(1/(diag(Q.hat[,,j+1])^0.5))%*%Q.hat[,,j+1]%*%(diag(1/diag(Q.hat[,,j+1])^0.5))
H.hat[,,j+1] = D%*%R.hat%*%D
}
H.hat.shock[,,i,] = H.hat
}
if (standardize) {
e = 0*diag(k)
for (i in 1:t) {
H.hat[,,1] = H[,,i]
Q.hat = H.hat
Q.hat[,,1] = fit@mfit$Q[[i]]
for (j in 1:nfore) {
H.hat[,,j+1] = beta%*%H.hat[,,j]
D = diag(diag(H.hat[,,j+1])^0.5)
if (j==1) {
Q.hat[,,2] = (1-ALPHA-BETA)*Q.bar + BETA*H.hat[,,1]
} else {
Q.hat[,,j+1] = (1-ALPHA-BETA)*Q.bar+(ALPHA+BETA)*Q.hat[,,j]
}
R.hat = diag(1/(diag(Q.hat[,,j+1])^0.5))%*%Q.hat[,,j+1]%*%(diag(1/diag(Q.hat[,,j+1])^0.5))
H.hat[,,j+1] = D%*%R.hat%*%D
}
H.hat.no_shock[,,i,] = H.hat
}
}
for (i in 1:t) {
VIRF[,,i,] = H.hat.shock[,,i,] - H.hat.no_shock[,,i,]
}
date = dimnames(H)[[3]]
VFEVD = array(NA, c(k,k,t), dimnames=list(NAMES,NAMES,date))
for (i in 1:t) {
num = apply(VIRF[,,i,]^2,1:2,sum)
den = c(apply(num,1,sum))
fevd = t(num)/den
VFEVD[,,i] = (fevd/apply(fevd, 1, sum))
}
return = list(IRF=VIRF, FEVD=VFEVD)
}
|
54bdae68b09974308e282b56a5df7ae5513430ff
|
936a5fd21e0eb2b92d8128b9ab1c04866d21e73a
|
/inst/examples/ames_glmnet.R
|
3fd7c89ffc0075e8150c873502e1eba6064b0b22
|
[
"MIT"
] |
permissive
|
tidymodels/tune
|
7a8dcc35196ac1be7f11ef26b12107e204f7c390
|
034b75a1b032bd29ceb70df8d4e3ce10e9fcc17b
|
refs/heads/main
| 2023-09-04T07:23:00.149702
| 2023-08-24T12:37:39
| 2023-08-24T12:37:39
| 201,538,545
| 253
| 48
|
NOASSERTION
| 2023-09-08T21:18:10
| 2019-08-09T20:49:40
|
R
|
UTF-8
|
R
| false
| false
| 1,673
|
r
|
ames_glmnet.R
|
library(tidymodels)
library(tune)
library(AmesHousing)
# ------------------------------------------------------------------------------
ames <- make_ames()
# Make sure that you get the same random numbers
set.seed(4595)
data_split <- initial_split(ames, strata = "Sale_Price")
ames_train <- training(data_split)
set.seed(2453)
cv_splits <- vfold_cv(ames_train, v = 10, strata = "Sale_Price")
# ------------------------------------------------------------------------------
lm_mod <-
linear_reg(penalty = tune(), mixture = tune()) %>%
set_engine("glmnet")
ames_wflow <-
workflow() %>%
add_formula(log(Sale_Price) ~ .) %>%
add_model(lm_mod)
grid_df <- grid_regular(ames_wflow, levels = c(10, 3))
ames_glmnet <- tune_grid(ames_wflow, resamples = cv_splits, grid = grid_df, control = control_grid(verbose = TRUE))
# summarize(ames_glmnet) %>%
# dplyr::filter(.metric == "rmse") %>%
# select(-n, -std_err, -.estimator, -.metric) %>%
# mutate(penalty = log10(penalty)) %>%
# gather(parameter, value, -mean) %>%
# ggplot(aes(x = value, y = mean)) +
# geom_point() +
# facet_wrap(~parameter, scales = "free_x")
#
# summarize(ames_glmnet) %>%
# dplyr::filter(.metric == "rmse") %>%
# arrange(mean) %>%
# slice(1)
set.seed(9890)
search_res <-
tune_bayes(
ames_wflow,
resamples = cv_splits,
initial = ames_glmnet,
iter = 50,
control = control_bayes(verbose = TRUE)
)
more_search_res <-
tune_bayes(
ames_wflow,
resamples = cv_splits,
# param_info = ames_set,
initial = search_res,
metrics = metric_set(rmse, rsq),
iter = 50,
control = control_bayes(verbose = TRUE, uncertain = 5)
)
|
2e1a9328da936d1c5b2a8751d7615a66bdb505e4
|
976f676cdd85b2dffd7405712c3ed70e48bfde0e
|
/02-add-log-odds-ranking.R
|
4edf16e7deb735f690b87b75bb2f40b617f9c07b
|
[] |
no_license
|
shuckle16/job-description-analytics
|
493c24723d9b1927ae7c3b0fa84f0bf3424d83d2
|
f424cbf6a84358ecc3ce975e4f1e535fbccfa0df
|
refs/heads/master
| 2020-11-28T11:39:34.835242
| 2020-01-07T15:59:31
| 2020-01-07T15:59:31
| 229,801,248
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 789
|
r
|
02-add-log-odds-ranking.R
|
library(tidytext)
library(tidylo)
find_keywords2 <- function(job_description = "Accountants", n = 10) {
job_descs %>%
unnest_tokens(word, description) %>%
count(title, word) %>%
filter(
!(word %in% tidytext::stop_words$word)
) %>%
bind_log_odds(title, word, n) %>%
arrange(-log_odds) %>%
group_by(title) %>%
slice(1:10) %>%
filter(
grepl(pattern = job_description, x = title)
) %>%
inner_join(
find_keywords(job_description, n = 10) %>%
tibble::enframe(name = "word", value = "tf_idf")
) %>%
mutate(
geom_mean = sqrt(log_odds * tf_idf)
) %>%
arrange(desc(geom_mean))
}
find_keywords2("Accountants")
# broken because of slice and n
find_keywords2("Aerospace Engineers", 3)
|
fb17c20a275247c258a758f47adcbbe715190c28
|
bbe0ba427d05eaeded9a920251d5c109b48d731d
|
/man/get_color.Rd
|
c29e3a2767f6b60b9de7dcb78986c47b17fa1d5c
|
[] |
no_license
|
cran/rcolors
|
77c9a96490bffb5a3a80a8d6d2ef5cbcc03dbd0d
|
e09ad9df5e846c2971b39b52a88c7e9962f6ae8c
|
refs/heads/master
| 2023-04-22T11:09:19.959872
| 2021-04-23T09:40:02
| 2021-04-23T09:40:02
| 360,933,493
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,134
|
rd
|
get_color.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_color.R
\name{get_color}
\alias{get_color}
\title{Get and interpolate colors}
\usage{
get_color(col, n = NULL, show = FALSE)
}
\arguments{
\item{col}{color name in the \code{rcolors} or a vector of colors}
\item{n}{Integer, default is length of \code{col}}
\item{show}{Boolean, whether to visualize those colors?}
}
\value{
A character vector with elements of 7 or 9 characters, \code{"#"}
followed by the red, blue, green and optionally alpha values in
hexadecimal (after rescaling to \code{0 ... 255}). The optional alpha
values range from \code{0} (fully transparent) to \code{255} (opaque).
\R does \strong{not} use \sQuote{premultiplied alpha}.
}
\description{
Get and interpolate colors
}
\details{
\verb{supported col names}: 270 ncl colors (\code{names(rcolors)}) and 35
RColorBrewer colors (\code{names(colors_group$brewer)}).
}
\examples{
print(names(rcolors[1:6]))
print(names(colors_group$brewer))
get_color("amwg_blueyellowred", n = 20)
}
\seealso{
\code{\link[=show_cols]{show_cols()}}
}
|
9be91a684d4085c2a73b29ca1b1004e56b65c9f2
|
29585dff702209dd446c0ab52ceea046c58e384e
|
/UpSetR/R/Set.metadata.R
|
e6bfabac642504ab8ffbe115e8bc4b8f58f07dd6
|
[] |
no_license
|
ingted/R-Examples
|
825440ce468ce608c4d73e2af4c0a0213b81c0fe
|
d0917dbaf698cb8bc0789db0c3ab07453016eab9
|
refs/heads/master
| 2020-04-14T12:29:22.336088
| 2016-07-21T14:01:14
| 2016-07-21T14:01:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,885
|
r
|
Set.metadata.R
|
Make_set_metadata_plot <- function(set.metadata, set_names){
metadata <- as.data.frame(set.metadata$data)
num_of_att <- length(set.metadata$plots)
metadata_columns <- colnames(metadata)
metadata_columns[1] <- "sets"
names(metadata) <- metadata_columns
metadata <- metadata[which(metadata$sets %in% set_names), ]
metadata <- metadata[match(set_names, metadata$sets), ]
metadata$sets <- seq(1,nrow(metadata))
rownames(metadata) <- set_names
metadata_plot <- list()
for(i in 1:num_of_att){
if(is.null(set.metadata$plots[[i]]$colors) == FALSE){
colors <- set.metadata$plots[[i]]$colors
}
else{
colors <- NULL
}
if(set.metadata$plots[[i]]$type == "hist"){
metadata_plot[[i]] <- metadataHist(metadata, set.metadata$plots[[i]]$column, colors)
metadata_plot[[i]] <- ggplot_gtable(ggplot_build(metadata_plot[[i]]))
}
if(set.metadata$plots[[i]]$type == "heat"){
metadata_plot[[i]] <- metadataHeat(metadata, set.metadata$plots[[i]]$column, set.metadata$plots[[i]]$type, colors)
metadata_plot[[i]] <- ggplot_gtable(ggplot_build(metadata_plot[[i]]))
}
if(set.metadata$plots[[i]]$type == "bool"){
metadata_plot[[i]] <- metadataHeat(metadata, set.metadata$plots[[i]]$column, set.metadata$plots[[i]]$type, colors)
metadata_plot[[i]] <- ggplot_gtable(ggplot_build(metadata_plot[[i]]))
}
if(set.metadata$plots[[i]]$type == "text"){
if(is.null(set.metadata$plots[[i]]$alignment)){
alignment <- NULL
}
else{
alignment <- set.metadata$plots[[i]]$alignment
}
metadata_plot[[i]] <- metadataText(metadata, set.metadata$plots[[i]]$column, colors, alignment)
metadata_plot[[i]] <- ggplot_gtable(ggplot_build(metadata_plot[[i]]))
}
if(set.metadata$plots[[i]]$type == "matrix_rows"){
next
}
}
return(metadata_plot)
}
|
fc10e2f107553dff9b15d304bc1f6a9ef087770b
|
a5db873e3ee26b7047fe601863a7831d147cf1a1
|
/man/summariseValidTagsLGR.Rd
|
55905b53d24ff0359b76428fae2891a4c9f40b73
|
[
"CC0-1.0",
"CC-BY-4.0"
] |
permissive
|
KevinSee/PITcleanr
|
ad10fa9e2a74cf84afb051750d5e611bbd8d7d87
|
1bd2c6106c65f310a2cb09f363009ab8507ffb7d
|
refs/heads/main
| 2023-08-22T10:44:07.865356
| 2023-08-16T00:16:33
| 2023-08-16T00:16:33
| 276,694,561
| 4
| 8
|
CC-BY-4.0
| 2023-09-11T20:04:01
| 2020-07-02T16:23:49
|
R
|
UTF-8
|
R
| false
| true
| 584
|
rd
|
summariseValidTagsLGR.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/summariseValidTagsLGR.R
\name{summariseValidTagsLGR}
\alias{summariseValidTagsLGR}
\title{Summarise Valid Tags}
\usage{
summariseValidTagsLGR(valid_df)
}
\arguments{
\item{valid_df}{dataframe of valid tags extracted from the trap database with the \code{filterLGRtrapDB} function}
}
\description{
Filter the valid tag dataframe returned by \code{filterLGRtrapDB} to only include the tag code and the tagging date
}
\examples{
summariseValidTagsLGR(filterLGRtrapDB(spawnYear = 2015))
}
\author{
Kevin See
}
|
84a94c5085e97246860311fa95b8e776ab5f70d3
|
dfa6f9ae20dd7b548b25d917c016fc3d5780ae5c
|
/R/parse_beast_posterior.R
|
e3248af0f7a7af488ec06641a149f9babaa7b058
|
[
"MIT"
] |
permissive
|
venkataduvvuri/RBeast
|
13de4b2a8a7fed45ed2f35d6046625a3e0603b9c
|
1e9ba811a1ac328dc4ee9209bb0c68c19fa3e43e
|
refs/heads/master
| 2022-04-27T11:04:19.878296
| 2019-04-10T21:50:12
| 2019-04-10T21:50:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 767
|
r
|
parse_beast_posterior.R
|
#' Parses BEAST2 output files to a posterior
#' @param trees_filename name of the BEAST2 .trees output file
#' @param log_filename name of the BEAST2 .trees output file
#' @return a posterior
#' @export
#' @examples
#' trees_filename <- system.file(
#' "extdata", "beast2_example_output.trees", package = "RBeast"
#' )
#' log_filename <- system.file(
#' "extdata", "beast2_example_output.log", package = "RBeast"
#' )
#' posterior <- parse_beast_posterior(
#' trees_filename = trees_filename,
#' log_filename = log_filename
#' )
#' testit::assert(is_posterior(posterior))
#' @author Richel J.C. Bilderbeek
parse_beast_posterior <- function(trees_filename, log_filename) {
tracerer::parse_beast_posterior(trees_filename, log_filename)
}
|
09fa36a4560cdfdf31e7dcccc50c9970b3f2ffed
|
7bae5569fd5509263b0cdd20fc1c6c14436410f9
|
/packages/RNASeq/OFBIT/SCONE/SCONE_EX4.R
|
63ced337370e90fb09af021865bebff465790797
|
[] |
no_license
|
cfbuenabadn/YosefCode2
|
fe578ac0e9d0ff5ce724209dde1379acae6ab0ad
|
35bd4e749301b728ad502d6327b88c01de71cbd3
|
refs/heads/master
| 2021-07-05T03:40:51.753643
| 2017-06-23T10:50:08
| 2017-06-23T10:50:08
| 105,191,082
| 3
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,813
|
r
|
SCONE_EX4.R
|
load("/data/yosef/users/mbcole/WTP63/First/WTp63_counts_QCscores_someMetaD_GeneLists.rda")
rm(list = ls()[ls() != "HKlistE1"])
source("~/YosefCode/packages/RNASeq/OFBIT/SCONE/SCONE.R")
## ----- Standardize Quality Matrix -----
# Standardize quality metric
# q = quality metric matrix (columns = named features, rows = samples)
# ... = lists for specific transformations (see below)
PPQual = function(q, to.log = c("NREADS", "NALIGNED"),
to.abs.log = c("MEDIAN_5PRIME_TO_3PRIME_BIAS","MEDIAN_5PRIME_BIAS","MEDIAN_3PRIME_BIAS"),
to.logit.one = c("PCT_RIBOSOMAL_BASES","PCT_CODING_BASES","PCT_UTR_BASES",
"PCT_INTRONIC_BASES","PCT_INTERGENIC_BASES","PCT_MRNA_BASES"),
to.logit.hund = c("RALIGN")){
## ===== SD_EPSILON: Constant for purpose of correlation computation =====
SD_EPSILON = 1e10 * .Machine$double.eps #~2.2e-6
if(any(is.null(colnames(q)))){
stop("No quality parameter names.")
}
quality.features = q
# Convert non-numeric data fields
for (i in 1:dim(quality.features)[2]){
if(!is.numeric(quality.features[,i])){
quality.features[,i] = as.numeric(as.character(quality.features[,i]))
}
}
## ----- Special Transformations
# 0 to Infinity -> log
quality.features[,to.log] = log(quality.features[,to.log]+.01)
# 0 to Infinity, Best at 1
quality.features[,to.abs.log] = exp(abs(log(quality.features[,to.abs.log]+.01)))-.01
# 0 to 1
quality.features[,to.logit.one] = log((quality.features[,to.logit.one]+.01)/(1-quality.features[,to.logit.one]+.01))
# 0 to 100
quality.features[,to.logit.hund] = log((quality.features[,to.logit.hund]+1)/(100-quality.features[,to.logit.hund] + 1))
## ----- Remove NA (missing data), Constants, and scale
quality.features = t(na.omit(t(quality.features)))
quality.features = quality.features[,apply(quality.features,2,sd) > SD_EPSILON]
quality.features = scale(quality.features,center = T,scale = T)
return(quality.features)
}
load("/data/yosef/users/mbcole/SOX2/expt3_info.rda")
load("/data/yosef/users/mbcole/SOX2/E3_gf405.rda")
de = read.table("/data/yosef/users/mbcole/SOX2/OE_regenGeneList.txt")
cc = read.table("/data/yosef/users/mbcole/WTP63/First/FP/cell_cycle_Tirosh.txt")
scone_out = SCONE(e = gf405,factor_free_only = F,design = c("nested"), nested_model = c("fixed"),
condition = expt,
batch = c1_run_id,
qual = PPQual(QCdataE3),
dim_UV = 5,
is_HK = rownames(gf405) %in% as.matrix(HKlistE1),
is_DE = rownames(gf405) %in% as.matrix(de),
is_CC = toupper(rownames(gf405)) %in% as.matrix(cc$V2),
out_dir = "/data/yosef/users/mbcole/SOX2/SCONE_out_405")
|
bbe7bb81b419f519be94486f6f4adac8f629c162
|
e06965698053952f7f97c60349a590e42d08b633
|
/man/assets.Rd
|
7ac535e8495fa784aadd9ab6536c6ef184957f1f
|
[
"Apache-2.0"
] |
permissive
|
kcf-jackson/sketch
|
a9940c89ed8183627914861a11893856b1c47429
|
b597f01e540f35aab1f5ee2d3744f6f64c70c94d
|
refs/heads/master
| 2022-11-01T03:28:32.088340
| 2022-10-23T14:22:05
| 2022-10-23T14:22:05
| 222,058,097
| 106
| 5
|
NOASSERTION
| 2022-10-23T14:22:07
| 2019-11-16T06:36:59
|
HTML
|
UTF-8
|
R
| false
| true
| 688
|
rd
|
assets.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/assets-loader.R
\name{assets}
\alias{assets}
\title{Process assets in headers}
\usage{
assets(file, ..., trace = FALSE)
}
\arguments{
\item{file}{Character string; the file path.}
\item{...}{(Optional) List of processors to pass to \link{convert_src}.}
\item{trace}{TRUE or FALSE; if TRUE, assets are extracted, but not processed.}
}
\description{
Take a 'sketch' R file as input, extract and process the
resources links as provided by the user with the '#!'/'#|' header.
}
\examples{
file <- system.file("test_files/test_RMD.R", package = "sketch")
assets(file, trace = TRUE)
assets(file, trace = FALSE)
}
|
10f4166122e667979fc38106f97ea6dc6d4aebb8
|
02b178b7ebb101940d6ede02b10c52dec501dcd6
|
/snakemake/chiplike_analysis/workflow/scripts/t_vs_c_filter.R
|
843c9909710c915e0f0e4c0d3caea7dab2e3ec38
|
[
"MIT"
] |
permissive
|
radio1988/bpipes
|
21ea7c124f1bd962afe32644c445da3bb7a7d177
|
0aceb97070210c2361adb45ee0040b6aa5be771b
|
refs/heads/master
| 2023-08-24T12:40:19.129216
| 2023-08-24T00:49:42
| 2023-08-24T00:49:42
| 140,731,030
| 3
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,352
|
r
|
t_vs_c_filter.R
|
# For PullDown vs Input Only
# output Any LFC_RAW > 0
# Use colSum(COUNT) as SizeFactor
# Pvalue not very useful, LFC_RAW good for this step
# todo: Each Treatment must > mean(IgG)?
library(DESeq2)
library(ashr)
library(WriteXLS)
readExcel <- function(fname){
df <- readxl::read_xlsx( fname, na='NA', sheet=1) # na term important
df <- data.frame(df) #important
return (df)
}
writeExcel <- function(df, name){
WriteXLS(
df,
ExcelFileName=name,
row.names = FALSE,
SheetNames = 'sheet1',
na = 'NA')
}
df_filter <- function(df, min_rowsum=10, min_colsum=2e4){
expression_filter <- rowSums(df[, 6:ncol(df)]) >= min_rowsum
sample_filter <- colSums(df[, 6:ncol(df)]) >= min_colsum
if (sum(expression_filter) > 2 & sum(sample_filter) >= 4){
df <- df[expression_filter, c(rep(TRUE,5), sample_filter )]
print(paste(
"Removed genes with less than ", min_rowsum,
"reads/fragments across all samples", "\n"))
print(
paste("Removed samples with less than ", min_colsum,
"reads/fragments across all genes", "\n"))
print("Data dim after filtering:")
print(dim(df[, 6:ncol(df)]))
}else{
print("Library size too small: \
too few genes/samples would be left after filtering, so skipped filtering.")
print("Please interpret results with caution")
}
return(df)
}
calculateCPM <- function(counts) {
# michael's version
# https://support.bioconductor.org/p/91218/
x <- counts
return(t(t(x)*1e6/colSums(x)))
}
clean_name <- function(name){
# group names in contrast
name <- gsub(" ", "", name)
name <- gsub("-", ".", name)
name <- gsub(";$", "", name)
return (name)
}
read_csv <- function(fname, sep=','){
df <- read.csv(fname, comment.char='#', sep=sep)
df <- df[,colSums(is.na(df))<nrow(df)] # remove columns with all NA
df <- data.frame(lapply(df, clean_name))
return(df)
}
get_contrast <- function(contrast.df, i){
# get names
name1 <- clean_name(contrast.df[i,2])
name2 <- clean_name(contrast.df[i,3])
name <- paste(name1, name2, sep = "_vs_") # HA.Ab1_vs_IgG
# name <- paste(contrast.df[i,1], name, sep = '.') # c1.HA.Ab1_vs_IgG
#if (nchar(name) > 100) {name = contrast.df[i,1]} # c1
resultnames <- gsub("group", "", resultsNames(dds)) # "group.HA.Ab1" "groupHA.Ab1" "groupIgG"
poss <- match(name1, resultnames) # 2
negs <- match(name2, resultnames) # 3
contrast <- rep(0, length(resultsNames(dds))) # 0, 0, 0
contrast[poss] <- 1/length(poss)
contrast[negs] <- -1/length(negs) # 0, 1, -1; or 0, 1/2, 1/2, -1/2, -1/2 if ; used
print(data.frame(resNames=resultnames,
contrast=contrast))
return(list(contrast=contrast, name=name))
}
# params
args = commandArgs(trailingOnly=TRUE)
if (length(args)==0) {
# countFile <- "results/narrow_peaks_contrast_level/K4me3-KO/K4me3-KO_vs_NoAb-KO_count.txt"
# peakFile <- "results/narrow_peaks_contrast_level/K4me3-KO/K4me3-KO_vs_NoAb-KO_clean.narrowPeak"
# outPeakFile <- "results/narrow_peaks_contrast_level/K4me3-KO/K4me3-KO_vs_NoAb-KO_clean.real.narrowPeak"
# metaFile <- 'config/meta.csv'
# contrastFile <- 'config/contrast.csv'
# contrast_name <- 'K4me3-KO'
# outputExcelFile <- "results/narrow_peaks_contrast_level/K4me3-KO/K4me3-KO_vs_NoAb-KO_clean.t_vs_c.xlsx"
# SizeFactorFile <- "results/clean_reads_qc/stats/reads_mapped.txt"
countFile <- "results/narrow_peaks_contrast_level/merged/ChIP_vs_Input_count.txt"
peakFile <- "results/narrow_peaks_contrast_level/merged/ChIP_vs_Input_clean.narrowPeak"
outPeakFile <- "results/narrow_peaks_contrast_level/merged/ChIP_vs_Input_clean.real.narrowPeak"
metaFile <- 'config/meta.csv'
contrastFile <- 'config/contrast.csv'
contrast_name <- 'merged'
outputExcelFile <- "results/narrow_peaks_contrast_level/merged/ChIP_vs_Input_clean.t_vs_c.xlsx"
SizeFactorFile <- "results/clean_reads_qc/stats/reads_mapped.txt"
}else{
countFile <- args[1]
peakFile <- args[2]
outPeakFile <- args[3]
metaFile <- args[4]
contrastFile <- args[5]
contrast_name <- args[6]
outputExcelFile <- args[7]
SizeFactorFile <- args[8]
}
indfilter <- FALSE
cookscutoff <- TRUE
# Prep
odir <- dirname(countFile)
df <- read.table(countFile, sep="\t", header=TRUE,
comment.char = '#', row.names = 1)
colnames(df) <- gsub("\\.bam$", "", colnames(df))
colnames(df) <- gsub("results.clean_reads.", "", colnames(df))
print(head(df))
print(dim(df))
df[, 6:ncol(df)] <- sapply(df[, 6:ncol(df)], as.integer)
peaks <- read.table(peakFile) # V4 is peak_id
print(head(peaks))
meta <- read_csv(metaFile)
print(meta)
contrast.df <- read_csv(contrastFile)
print(contrast.df)
contrast_name <- clean_name(contrast_name)
# Filter
# df <- df_filter(df, min_rowsum=10, min_colsum=2e4)
pdf(file.path(odir, 'lib_size.pdf'))
boxplot(
log10(df[, 6:ncol(df)]+1),
las=2,
main = "library size after filtering"
)
dev.off()
# COUNT.xlsx, CPM.xlsx
COUNT <- data.frame(df[, 6:ncol(df)])
colnames(COUNT) = paste0(colnames(COUNT), '_COUNT')
print(head(COUNT))
write.csv(COUNT, file.path(odir, "COUNT.csv"))
CPM <- calculateCPM(df[, 6:ncol(df)])
colnames(CPM) = paste0(colnames(CPM), '_CPM')
CPM <- data.frame(round(CPM,1))
print(head(CPM))
write.csv(CPM,file.path(odir, "CPM.csv"))
# DESeq2
## design
meta <- meta[match(colnames(df[, 6:ncol(df)]), meta$sample), ]
print(meta)
if (length(levels(meta$batch)) > 1){
coldata <- data.frame(row.names=colnames(df[, 6:ncol(df)]),
sample=factor(meta$sample),
group=factor(meta$group),
batch=factor(meta$batch)
)
print(coldata)
dds <- DESeqDataSetFromMatrix(
countData = df[, 6:ncol(df)],
colData = coldata,
design = ~ 0 + group + batch)
}else{
coldata <- data.frame(row.names=colnames(df[, 6:ncol(df)]),
sample=factor(meta$sample),
group=factor(meta$group)
)
print(coldata)
dds <- DESeqDataSetFromMatrix(
countData = df[, 6:ncol(df)],
colData = coldata,
design = ~ 0 + group) # converted to alph-order
}
writeLines(capture.output(coldata),
file.path(odir,"design.txt"))
# !!! unique for ChIPSeq (PullDown vs IgG)
# If use default, will assume overall binding profile same in PullDown and IgG,
# make pos (+LFC) to neg (-LFC)
# Size factor
sf <- read_csv(SizeFactorFile, sep='\t')
sf$count <- as.numeric(as.character(sf$count))
print(sf )
colnames(dds)
sf <- sf[match(colnames(dds),sf$file ), ]
sizeFactors(dds) <- sf$count / min(sf$count)
writeLines(capture.output(sizeFactors(dds)),
file.path(odir,"size_factor.txt"))
dds <-DESeq(dds)
NormCount <- counts(dds, normalized=TRUE)
colnames(NormCount) = paste0(colnames(NormCount), '_Norm')
## contrast
i = which(contrast.df$type == contrast_name)
i = i[1]
c = get_contrast(contrast.df, i)
print(c)
if (sum(c$contrast != 0) < 2){
stop("less than 2 groups in contrast, maybe due to filtering")
}
res.lfc <- lfcShrink(
dds,
contrast = c$contrast,
type = 'ashr')
res.p <- results(
dds,
contrast = c$contrast,
independentFilter=indfilter,
cooksCutoff=cookscutoff)
RES <- data.frame(
peak_id = row.names(res.lfc),
log2FC_shrinked = res.lfc$log2FoldChange,
log2FC_raw = res.p$log2FoldChange,
pvalue = res.p$pvalue,
FDR = res.p$padj
)
OUT <- merge(RES, CPM,by.x=1, by.y = 0, all.x=T)
OUT <- merge(OUT, COUNT, by.x=1, by.y = 0, all.x=T)
OUT <- merge(OUT, NormCount, by.x=1, by.y = 0, all.x=T)
writeExcel(OUT, outputExcelFile) # "c1/HA_vs_IgG_clean.real.broadPeak"
# "results/narrow_peaks_contrast_level/c1/c1.HA.Ab1_vs_IgG.DESeq2.xlsx"
# subset(OUT, peak_id == 'HA_vs_IgG_peak_2393')
OUT.up <- subset(OUT, log2FC_raw > 0) # for PullDown vs IgG
# OUT.down <- subset(OUT, FDR < MAX_FDR & log2FC_raw < -MIN_LFC)
# OUT.sig <- rbind(OUT.up, OUT.down)
# writeExcel(
# OUT.up,
# file.path(odir, paste0(c$name,".DESeq2.", ".up.xlsx")))
peaks.up <- peaks[peaks[,4] %in% OUT.up$peak_id,]
write.table(peaks.up, outPeakFile, sep='\t', row.names=F, col.names=F, quote=F)
peaks.down <- peaks[!peaks[,4] %in% OUT.up$peak_id,]
write.table(peaks.down, paste0(outPeakFile, '.down'), sep='\t', row.names=F, col.names=F, quote=F)
# SessionInfo
writeLines(capture.output(sessionInfo()),
file.path(odir,"sessionInfo.txt"))
|
69aa1c18b6112b910811bb5a4a54afd1fad990b5
|
3abfaa6cf0b52c5ff44e9c0aa7d842f73e2d876e
|
/scripts/buoys-validator/resources/r/rmse.R
|
ae4403de1c03be6ac8ddd7fbf70aa9622334dbb6
|
[] |
no_license
|
rchailan/mirmidon-toolbox
|
0c272c4f7b7fd3db1d2a2a975b8a4e86876bb677
|
d116b1e0c8c7faaa4dd99144ea4965a1fa64fd03
|
refs/heads/master
| 2021-05-29T01:03:44.384387
| 2015-04-23T17:10:45
| 2015-04-23T17:10:45
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 827
|
r
|
rmse.R
|
#remove nan row(s)
df.mix2<-df.mix[complete.cases(df.mix),]
#subselect time frame
#df.mix2<-df.mix2[df.mix2$date > as.POSIXct("2011-09-01 00:00:00") & df.mix2$date < as.POSIXct("2011-12-31 00:00:00"),]
#Plot error
hs.error<-(df.mix2$hs.modeled-df.mix2$hs.measured)
df<-data.frame(date=df.mix2$date,error=hs.error)
pline<-ggplot(df, aes(date)) +
geom_point(aes(y = error, colour = "error")) +
theme(axis.title.x = element_blank()) +
ylab("Error (m)") +
labs(colour = paste("Station","MeteoFrance",sep=": "))
#Compute errors metrics
error.mae<-(1/length(df.mix2$date))*sum(abs((df.mix2$hs.modeled-df.mix2$hs.measured)))
error.mse <- (1/length(df.mix2$date)) * (sum((df.mix2$hs.modeled-df.mix2$hs.measured)^2))
error.rmse <- sqrt(error.mse)
error.nrmse<- error.rmse/(max(df.mix2$hs.measured)-min(df.mix2$hs.measured))
|
df9d493d184186f868eeda4ee8626c6147f719c7
|
b4e274d4513130bb3333d9f665233145a9ab8cb6
|
/man/relAbForTimes.Rd
|
debf77f4e87cbe435249a248f52dc208b5a065f8
|
[] |
no_license
|
HegemanLab/ProteinTurnover
|
14f4bb415342070a660425ba57f3e6d796ce4f16
|
cc2f184ff508c78d4336bcfb2a838f19f12b564b
|
refs/heads/master
| 2021-01-10T17:46:41.292492
| 2017-07-18T21:48:43
| 2017-07-18T21:48:43
| 50,138,014
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 2,111
|
rd
|
relAbForTimes.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/proportions2.R, R/run.R
\name{relAbForTimes}
\alias{relAbForTimes}
\alias{regressionPlot}
\alias{plot.RelAbTimes}
\alias{makeRelAb}
\title{Relative Abundance from Counts, for multiple time points}
\usage{
relAbForTimes(Count, Channel, RT, TimePoint, data, nboot = 0, ...)
regressionPlot(x, ...)
\method{plot}{RelAbTimes}(x, ..., type = c("relAb", "regression"))
makeRelAb(seq, regression.model, nboot = 0)
}
\arguments{
\item{Count}{Observed counts}
\item{Channel}{Channels corresponding to the observed counts}
\item{RT}{Retention times corresponding to the observed counts}
\item{TimePoint}{Time points corresponding to the observed counts}
\item{data}{An optional data frame to take the preceding variables from}
\item{nboot}{Experimental, for bootstrapping}
\item{\dots}{Additional parameters to be passed}
\item{x}{A regRelAbTimes or logRelAbTimes object to be plotted}
\item{type}{desired type of plot}
\item{seq}{the sequence to get the relative abundance for}
\item{regression.model}{passed as method to relAbFromCounts}
}
\value{
A list is returned with the following elements. It is of class
"regRelAbTimes" if a regression method is used and "logRelAbtimes" if a log
model is used.
\item{data.long}{The long data frames from each fit, bound together
into one data frame.}
\item{norm_channel}{The chosen baseline channel for each timepoint}
\item{coefs}{For the regression model, a list with the fitted coefficients at each
time points.}
\item{relab}{The fitted relative abundance at each channel and timepoint}
\item{proportion}{The fitted relative abundance, converted to a
proportion}
\item{method}{The method used}
\item{threshold}{The threshold used}
}
\description{
Gets relative abundance of each channel from counts taken at several
retention times, and over multiple time points
}
\details{
The data from each time point is fit separately, using
relAbFromCounts. Plots are created using the lattice library.
}
\examples{
data(isocounts)
}
\seealso{
\code{\link{relAbFromCounts}}
}
|
4eab660befddd713aec78010bb58a349d0c31b61
|
51f9af2d345c8a6a4bd219dfb715dda93f009f26
|
/man/ref.Rd
|
a3619ae4f40feb6c57688cd9dd9c4f70ed676911
|
[] |
no_license
|
eeholmes/SardineForecast
|
1e7449046aa4c750bfcc654b60a99e6288507752
|
ead7a65dd5e222b72748e569e8e0b52ab03b5bf1
|
refs/heads/master
| 2021-07-20T04:26:55.048757
| 2021-07-16T02:59:47
| 2021-07-16T02:59:47
| 108,604,050
| 0
| 2
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,185
|
rd
|
ref.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ref.r
\name{ref}
\alias{ref}
\title{Create counter for figure and tables}
\usage{
ref(useName, pre = "", app = "")
}
\arguments{
\item{useName}{name of the label. Must be of the form type:name, like fig:spawners}
\item{pre}{tag to put in front of number. ref("fig:foo", pre="A") gives A1}
\item{app}{tag to put at end of number}
}
\value{
The number of the figure or table with pre or app added if needed.
}
\description{
Used in the R markdown files for figure and table counters.
}
\details{
Can be used anywhere in text or captions. First time it is called, the counter .refctr is given a number associated with the name and the number returned. Next time it is called, it checks if the name exists. If so, it returns its number. Note, after the first time ref is called, pre and app are ignored.
Use like so `r ref("fig:1a")` whereever you need the number.
Note you need to define the environment of the rmarkdown file that is calling ref(). Add this at the top of the Rmd file
if(!exists(".rmdenvir")) .rmdenvir = environment()
.rmdenvir and .refctr will be created in the Rmd environment
}
|
5d43751c7b997a1dff0ad682995bc7fcf09e3c4b
|
7e5c87886c6fcbf4fd7cad55aadb4de90ed377fb
|
/janata_hr.R
|
9a776adc168f56ff313df9158dec84c7d8e53919
|
[] |
no_license
|
neek-ds/r-programming
|
61748b8edb8b7db12b204756ac92db31801c5387
|
61c836240ecfaedce85af7c483a1232a09d78a55
|
refs/heads/master
| 2021-07-09T03:27:59.893768
| 2020-11-01T18:33:13
| 2020-11-01T18:33:13
| 207,118,926
| 0
| 0
| null | 2019-09-09T05:28:20
| 2019-09-08T13:45:13
| null |
UTF-8
|
R
| false
| false
| 9,878
|
r
|
janata_hr.R
|
# to predict the probability of an enrollee to look for a new job.
#***********Load the Dataset**********************************
hrdata = read.csv(file.choose(),header = T)
#checking the structure of the data
head(hrdata)
str(hrdata)
#copy the data to another variable
hr_train = hrdata
str(hr_train)
head(hr_train)
#checking the summary of the data
summary(hr_train)
#checking for missing values in the data
sum(is.na(hr_train))
table(hr_train$gender)
#in this variable Other is a level we can use to replace the blank value with Other rather than
#a level with highest mode value
hr_train$gender[hr_train$gender=='']='Other'
#In a situation where we dont have a level named as Other we use to replace blank values with
#a level with highest frequency
table(hr_train$enrolled_university)
hr_train$enrolled_university[hr_train$enrolled_university=='']='no_enrollment'
table(hr_train$education_level)
dim(hr_train)
hr_train$education_level[hr_train$education_level=='']='Graduate'
#we r replacing the major discipline with other because it is one of the levels
table(hr_train$major_discipline)
hr_train$major_discipline[hr_train$major_discipline=='']='Other'
str(hr_train)
table(hr_train$experience)
#since we dont have 'Other' as a well in this variable we replace the blank value with
# a level which has highest frequency
hr_train$experience[hr_train$experience=='']='>20'
table(hr_train$company_size)
summary(hr_train$company_size)
levels(hr_train$company_size)[levels(hr_train$company_size)=='10/49']='10-49'
hr_train$company_size[hr_train$company_size=='']='50-99'
table(hr_train$company_type)
hr_train$company_type[hr_train$company_type=='']='Other'
table(hr_train$last_new_job)
#replace blanks with level with highest mode which is 1
hr_train$last_new_job[hr_train$last_new_job=='']='1'
#*********************************************************************************************
#******************************************************************
#class(hr_train)
#write.csv(hr_train,'E:/DS/Analytics/Hackathon/janata hack-hR/hr_train.csv',row.names = F)
#View(hr_train)
#checking for missing values in test data
hr_test=read.csv(file.choose(),header = T)
head(hr_test)
str(hr_test)
table(hr_test$gender)
hr_test$gender[hr_test$gender=='']='Other'
table(hr_test$enrolled_university)
hr_test$enrolled_university[hr_test$enrolled_university=='']='no_enrollment'
table(hr_test$education_level)
hr_test$education_level[hr_test$education_level=='']='Graduate'
table(hr_test$major_discipline)
hr_test$major_discipline[hr_test$major_discipline=='']='Other'
table(hr_test$experience)
hr_test$experience[hr_test$experience=='']='>20'
table(hr_test$company_size)
levels(hr_test$company_size)[levels(hr_test$company_size)=='10/49']='10-49'
hr_test$company_size[hr_test$company_size=='']='50-99'
table(hr_test$company_type)
hr_test$company_type[hr_test$company_type=='']='Other'
table(hr_test$last_new_job)
hr_test$last_new_job[hr_test$last_new_job=='']='1'
write.csv(hr_test,'E:/DS/Analytics/Hackathon/janata hack-hR/hr_test.csv',row.names = F)
View(hr_test)
#*****************************************************************************************
#Exploratory Data Analysis
str(hr_train)
hr_train=read.csv(file.choose(),header = T)
#checking for distribution of categorical variables
plot(hr_train$gender)
plot(hr_train$relevent_experience)
plot(hr_train$enrolled_university)
plot(hr_train$education_level)
plot(hr_train$major_discipline)
plot(hr_train$experience)
plot(hr_train$company_size)
plot(hr_train$company_type)
plot(hr_train$last_new_job)
hist(hr_train$training_hours)
boxplot(hr_train$training_hours)
#training hours variable has outliers and it is right skewed data, we can take the log of the variable
#to negate use of outliers
#hr_train$one=hr_train$training_hours
#boxplot(hr_train$one)
#hr_train$one=log(hr_train$one)
#boxplot(hr_train$one)
library(car)
library(carData)
library(caret)
library(lattice)
library(ggplot2)
hr_train$training_hours=log(hr_train$training_hours)
boxplot(hr_train$training_hours)
hist(hr_train$training_hours)
#to check correlation between categorical variables we will use cramer's v rule
str(hr_train)
library(GoodmanKruskal)
varset = c('city','gender','relevent_experience','enrolled_university','education_level',
'major_discipline','experience','company_size','company_type','last_new_job')
hr2=subset(hr_train,select = varset)
gkmat2=GKtauDataframe(hr2)
plot(gkmat2)
#since the target variable is in the form of probability we will convert target variable to
#factors
#checking important variables for model building
#hr_mod1 = glm(target~.,data=hr_train,family = 'binomial')
#print(hr_mod1)
#summary(hr_mod1)
#Degrees of Freedom: 18358 Total (i.e. Null); 18182 Residual
#Null Deviance: 14330
#Residual Deviance: 13450 AIC: 13800
#var2 = subset(hr_train,select = c(-city))
#mod_2=glm(target~.,data = var2,family = 'binomial')
#print(mod_2)
#summary(mod_2)
#Degrees of Freedom: 18358 Total (i.e. Null); 18303 Residual
#Null Deviance: 14330
#Residual Deviance: 13670 AIC: 13780
# with p value results for variables it can be seen thaty city and city development index is highly correlated
#so if we use one variable instead of 2 that will reduce multicollinearity
var3=subset(hr_train,select = c(-city_development_index))
mod_3=glm(target~.,data=var3,family='binomial')
summary(mod_3)
#Null deviance: 14332 on 18358 degrees of freedom
#Residual deviance: 13450 on 18182 degrees of freedom
#AIC: 13804
#var44=subset(hr_train,select = c(-city,-city_development_index))
#mod4=glm(target~.,data=var44,family='binomial')
#summary(mod4)
#Null deviance: 14332 on 18358 degrees of freedom
#Residual deviance: 13841 on 18304 degrees of freedom
#AIC: 13951
colnames(hr_train)
#var5=subset(hr_train,select = c(-training_hours,-city_development_index))
#mod5=glm(target~.,data=var5,family='binomial')
#summary(mod5)
#since city and city development index is correlated we r using only city to avoid multicollinearity
#as we have seen keeping both variables in model building city_development_index results in NA
#values in summary output moreover the null and residual deviance values doesnt change much
# when we exclude city and keep city development index
#but when city is excluded the difference between null and residual deviance is not much
#but when city_development_index is excluded the residual deviance is reduced
train_hr = var3
dim(train_hr)
#converting the target variable into factor
train_hr$target = as.factor(train_hr$target)
hr_model=glm(target~.,data=train_hr,family='binomial')
summary(hr_model)
#1, means looking for a job change
#0, means not looking for a job change
#finding the probabilities
train_hr$prob=round(fitted(hr_model),2)
head(train_hr$prob)
#View(train_hr$prob)
#breaking the probabilities into 0's and 1's
#train_hr$predictedjob = ifelse(train_hr$prob > 0.3,1,0)
#class(train_hr$predictedjob)
#train_hr$predictedjob=as.factor(train_hr$predictedjob)
#head(train_hr$predictedjob)
head(train_hr$target)
train_hr$pred1=ifelse(train_hr$prob>0.5,1,0)
class(train_hr$pred1)
train_hr$pred1=as.factor(train_hr$pred1)
class(train_hr$target)
#finding the accuracy of the model using AUC-ROC curve
library(ROCR)
library(gplots)
pred=prediction(train_hr$prob,train_hr$target)
pred
perform=performance(pred,'tpr','fpr')
perform
# prediction will calculate probability on it own seeing the cutoff value
# takes input from prediction , performance function calculate tpr and fpr
plot(perform,colorize = T,print.cutoffs.at=seq(0.1,by=0.1))
#creating table for confusion matrix
#classic_table=table(train_hr$target,train_hr$predictedjob)
#classic_table
#Accuracy
#(15403+262)/(15403+262+531+2163)
#[1] 0.85326
#sensitivity
#262/(262+531) #1 identified as 1
#[1] 0.3303909
#specificity
#15403/(15403+2163) #0 identified as 0
#[1] 0.8768644
#accu = performance(pred,"auc")
#(accu@y.values)
#[1] 0.6798148
#with cut off value being 0.3 the accuracy of the model is ~68%
#****************************************************************
#lets check the accuracy of the model with cutoff point as 0.5
tab1=table(train_hr$target,train_hr$pred1)
tab1
#Accuracy
(15921+16)/(15921+16+13+2409) #[1] 0.8680756
#sensitivity 1 identified as 1
16/(16+13)#[1] 0.5517241
#specificity
15921/(15921+2409) #[1] 0.8685761
#Area under the curve - [1] 0.6840505
accu = performance(pred,"auc")
(accu@y.values)
#with cutoff point 0.3 the accuracy of classification from confusion matrix is 85% and sensitivity
# 33% and specificity 87%
#with cutoff point 0.5 the accuracy of classification is 86% and sensitivity is 55% and
#specificity 86%
#model accuracy - [1] [1] 0.6840505 =68.3%
#********************************************************************************************
#Now testing the model with the test data
hr_test = read.csv(file.choose(),header = T)
head(hr_test)
dim(hr_test)
#check for missing values
colSums(is.na(hr_test))
#As per the question the
#Note that the test data is further randomly divided into Public (40%) and Private (60%) data.
# Your initial responses will be checked and scored on the Public data.
#finding the predictions on test data
hr_pred = round(predict(hr_model,data=hr_test,type = 'response'),2)
head(hr_pred)
dim(hr_pred)
res = ifelse(hr_pred > 0.5,1,0)
#now concatenating the ID and target column together
submission = data.frame(enrollee_id=1:length(hr_pred),res=res)
write.csv(submission,file = 'E:/DS/Analytics/Hackathon/janata hack-hR/finalsubmission.csv',row.names = F)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.