blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4e1e67ce1f3b1ce5d0618b2efc3cc429bdbb62b5
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/hydroTSM/examples/subdaily2daily.Rd.R
|
dc5bb32eccbf8a21d6137f3fc1f6bd620524f1dc
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 533
|
r
|
subdaily2daily.Rd.R
|
library(hydroTSM)
### Name: subdaily2daily
### Title: Sub-daily -> Daily
### Aliases: subdaily2daily subdaily2daily.default subdaily2daily.zoo
### subdaily2daily.data.frame subdaily2daily.matrix
### Keywords: manip
### ** Examples
## Loading the time series of hourly streamflows for the station Karamea at Gorge
data(KarameaAtGorgeQts)
x <- KarameaAtGorgeQts
# Plotting the hourly streamflow values
plot(x)
## sub-daily to Daily
d <- subdaily2daily(x, FUN=sum, na.rm=TRUE)
# Plotting the daily streamflow values
plot(d)
|
0e69b4d19da014436c1e03f021dcfe1a84227c57
|
16eee1f983eb371e5d736f5496fb02a3a5f26ded
|
/tests/testthat/test-as.ft_data.R
|
1077b740f397a1c99bc203484b720e2759572731
|
[
"MIT"
] |
permissive
|
shivam11/fulltext
|
ad3cf061a369d27751baa3cb168a072ac1dc6794
|
2813d2b9fc7dcaa07bd145b32af1d31ae48254a0
|
refs/heads/master
| 2020-04-01T04:15:01.464298
| 2018-10-12T18:06:33
| 2018-10-12T18:06:33
| 152,855,814
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 720
|
r
|
test-as.ft_data.R
|
context("as.ft_data")
# clean out cache first
ftxt_cache$delete_all()
test_that("as.ft_data works", {
skip_on_cran()
# with empty cache
aa <- as.ft_data()
expect_is(aa, "ft_data")
expect_named(aa, "cached")
expect_equal(length(aa$cached$data), 0)
expect_equal(NROW(aa$cached$error), 0)
# with non-empty cache
## download a paper first
bb <- sm(ft_get('10.7717/peerj.228'))
aa <- as.ft_data()
expect_is(aa, "ft_data")
expect_named(aa, "cached")
expect_equal(length(aa$cached$data), 1)
expect_named(aa$cached$data, "path")
expect_equal(NROW(aa$cached$error), 0)
})
test_that("as.ft_data fails well", {
skip_on_cran()
expect_error(as.ft_data(5), "invalid filename argument")
})
|
972b689567c0c86723d0bf88dbed1f6554f341a9
|
cfaf2ff8c52b316b3b674d9fac2362922bdc255f
|
/examples/example.R
|
0362ebfea4a7966221d3ad6e0e8a4f813406e811
|
[] |
no_license
|
darribas/WooW
|
835345721d28fab3e038c18aa56e4f426bf06a9a
|
b5e9b18da8113017ae9dac65b529f1ff37ffab1a
|
refs/heads/master
| 2020-12-24T14:01:59.765876
| 2014-01-28T08:57:17
| 2014-01-28T08:57:17
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 433
|
r
|
example.R
|
# Load up data
db <- read.csv("/Users/tomba/Thomas/Colleges/WooW/examples/buzz_data.csv")
# Print top
head(db)
# Print tail
tail(db)
# Print summary
summary(db)
# Kernel density
plot(density(db$div_i))
# Scatter
plot(db$div_i, db$industrie_pct_area)
# OLS Model
ols <- lm('checkins_all ~ total_units + div_i', data=db)
# Plain text summary
summary(ols)
# LaTeX summary
library(xtable)
smry.table <- xtable(ols)
print(smry.table)
|
fa5032152ab515b4f4f05c74d774e8cd787b30f3
|
75cef92ac4373580fbdf542900fcd84ebc880b56
|
/2020_09_15/27页.R
|
025654a8c0813d6100291ef6c9d8bceecce400af
|
[] |
no_license
|
niushufeng/R_learing
|
55befbf768d289eae15cfaa5057c2056039dc094
|
4327de54287da025391ab1d4be1d48a72cd482e7
|
refs/heads/master
| 2023-01-29T07:56:09.252672
| 2020-12-15T07:44:46
| 2020-12-15T07:44:46
| 295,648,172
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 839
|
r
|
27页.R
|
#object
data(mtcars)
col1 = mtcars$mpg
max(mtcars)
?mtcars
head(mtcars)
mtcars
min(mtcars)
which.max(mtcars$mpg) # 求最大值的下标
which.min(mtcars$mpg)
mean(mtcars$mpg) # 计算均值
median(mtcars$mpg) # 计算样本中位数
var(mtcars$mpg) # 计算样本的方差
sd(mtcars$mpg ) # 计算向量的标准差
range(mtcars$mpg) # 返回长度为2的向量
IQR(mtcars$mpg) # 计算样本的四分位数极差
quantile(mtcars$mpg) # 计算样本常用的分位数
summary(mtcars$mpg) # 计算样本的常用描述统计量
length(mtcars$mpg) # 返回向量的长度
sum(mtcars$mpg) # 给出向量的总和
prod(mtcars$mpg) # 给出向量的乘积
rev(mtcars$mpg) # 取向量的逆序
sort(mtcars$mpg) # 将向量按升序排列
rank(mtcars$mpg) # 返回向量的秩
cumsum(mtcars$mpg) # 返回向量和向量的累计积
|
1590727c02587f6180691f8782a3faf33362e091
|
a0ea9e38c84e3085176ff2b77d9f99189c1e9329
|
/Web_scrape.R
|
8e9ca019a6f55a963c620fff77ee5766e5d1ef76
|
[] |
no_license
|
vganamukhi/R-projects
|
3119ed84cddec17a4fd6f813ba3b2aca8e50aa27
|
b55eb71cdc97ca4b8e293adf73dee71dc6b5513b
|
refs/heads/master
| 2022-12-04T14:29:16.475135
| 2020-08-26T17:58:50
| 2020-08-26T17:58:50
| 290,566,178
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,261
|
r
|
Web_scrape.R
|
#Loading the rvest package
library('rvest')
library('stringr')
library(qdap)
library(magrittr)
#Specifying the url for desired website to be scraped
url <- 'https://www.amazon.com/Apple-iPhone-64GB-Silver-Prepaid/dp/B078HVJB69/ref=sr_1_1_sspa?keywords=iphone+x&qid=1571701171&sr=8-1-spons&psc=1&spLa=ZW5jcnlwdGVkUXVhbGlmaWVyPUEyRFo3M0hSMkJYSVQwJmVuY3J5cHRlZElkPUExMDAzODA2MUtDNlk1Q0JPVVFUUyZlbmNyeXB0ZWRBZElkPUEwOTE0NzI4MU9BS1lGTU9XRTdZQSZ3aWRnZXROYW1lPXNwX2F0ZiZhY3Rpb249Y2xpY2tSZWRpcmVjdCZkb05vdExvZ0NsaWNrPXRydWU='
#Reading the HTML code from the website
webpage <- read_html(url)
#scrape title of the product>
# Figure out the title for each of the top posts
title <- webpage %>%html_nodes(xpath = '//*[@id="productTitle"]') %>% html_text()
title
#Replace unnecessary lines
str_replace_all(title, "[\r\n]", "")
# Scrape the price of the phone
price <- webpage %>%
html_nodes(xpath ='//*[@id="priceblock_ourprice"]') %>%
html_text()
price
#Replace unnecessary lines
str_replace_all(price, "[\r\n]", "")
#Scrape the rating for the phone
rating <- webpage %>%
html_node(xpath = '//*[@id="acrPopover"]/span[1]/a/i[1]/span') %>%
html_text()
rating
#Replace unnecessary lines
str_replace_all(rating, "[\r\n]", "")
str_trim(rating)
rating
#loop through different nodes, get the comments and save it to data frame
#Reviewer 1
#Scrape the reviews, rating name and review date
customer_name1 <- webpage %>% html_node(xpath = '//*[@id="customer_review-RBRPZLUVB40S9"]/div[1]/a/div[2]/span') %>%
html_text()
customer_name1
customer_rate1 <- webpage %>% html_node(xpath = '//*[@id="customer_review-RBRPZLUVB40S9"]/div[2]/a[1]/i/span') %>%
html_text()
customer_rate1
customer_comment_date1 <- webpage %>% html_node(xpath = '//*[@id="customer_review-RBRPZLUVB40S9"]/span') %>%
html_text()
customer_comment_date1
customer_comment1 <- webpage %>% html_node(xpath = '//*[@id="customer_review-RBRPZLUVB40S9"]/div[4]/span/div/div[1]/span') %>%
html_text()
customer_comment1
customer_data <- data.frame(customer_name1, customer_rate1, customer_comment1,customer_comment_date1)
names(customer_data)<-c("Reviewer_name","Rating", "Reviewer_Comment", "Reviewed_Date")
customer_data
#Reviewer 2
#Scrape the reviews, rating name and review date
customer_name2 <- webpage %>% html_node(xpath = '//*[@id="customer_review-R2XS8CBW7AI3SZ"]/div[1]') %>%
html_text()
customer_name2
customer_rate2 <- webpage %>% html_node(xpath = '//*[@id="customer_review-R2XS8CBW7AI3SZ"]/div[2]/a[1]/i/span') %>%
html_text()
customer_rate2
customer_comment_date2 <- webpage %>% html_node(xpath = '//*[@id="customer_review-R2XS8CBW7AI3SZ"]/span') %>%
html_text()
customer_comment_date2
customer_comment2 <- webpage %>% html_node(xpath = '//*[@id="customer_review-R2XS8CBW7AI3SZ"]/div[4]/span/div/div[1]/span') %>%
html_text()
customer_comment2 <- str_replace_all(customer_comment2, "[\U0001f44d]", "")
customer_comment2
customer_data2 <- data.frame(customer_name2, customer_rate2, customer_comment2,customer_comment_date2)
names(customer_data2)<-c("Reviewer_name","Rating", "Reviewer_Comment", "Reviewed_Date")
new_customer_data <- rbind(customer_data, customer_data2)
new_customer_data
#Reviewer 3
#Scrape the reviews, rating name and review date
customer_name3 <- webpage %>% html_node(xpath = '//*[@id="customer_review-RW2WEJVX9G6LF"]/div[1]/a/div[2]/span') %>%
html_text()
customer_name3
customer_rate3 <- webpage %>% html_node(xpath = '//*[@id="customer_review-RW2WEJVX9G6LF"]/div[2]/a[1]/i/span') %>%
html_text()
customer_rate3
customer_comment_date3 <- webpage %>% html_node(xpath = '//*[@id="customer_review-RW2WEJVX9G6LF"]/span') %>%
html_text()
customer_comment_date3
customer_comment3 <- webpage %>% html_node(xpath = '//*[@id="customer_review-RW2WEJVX9G6LF"]/div[4]/span/div/div[1]/span') %>%
html_text()
customer_comment3
customer_data3 <- data.frame(customer_name3, customer_rate3, customer_comment3,customer_comment_date3)
names(customer_data3)<-c("Reviewer_name","Rating", "Reviewer_Comment", "Reviewed_Date")
new_customer_data <- rbind(new_customer_data, customer_data3)
View(new_customer_data)
#Reviewer 4
#Scrape the reviews, rating name and review date
customer_name4 <- webpage %>% html_node(xpath = '//*[@id="customer_review-R1T9SHSJWAXDY7"]/div[1]') %>%
html_text()
customer_name4
customer_rate4 <- webpage %>% html_node(xpath = '//*[@id="customer_review-R1T9SHSJWAXDY7"]/div[2]/a[1]/i/span') %>%
html_text()
customer_rate4
customer_comment_date4 <- webpage %>% html_node(xpath = '//*[@id="customer_review-R1T9SHSJWAXDY7"]/span') %>%
html_text()
customer_comment_date4
customer_comment4 <- webpage %>% html_node(xpath = '//*[@id="customer_review-R1T9SHSJWAXDY7"]/div[4]/span/div/div[1]/span') %>%
html_text()
customer_comment4
customer_data4 <- data.frame(customer_name4, customer_rate4, customer_comment4,customer_comment_date4)
names(customer_data4)<-c("Reviewer_name","Rating", "Reviewer_Comment", "Reviewed_Date")
new_customer_data <- rbind(new_customer_data, customer_data4)
View(new_customer_data)
#Reviewer 5
#Scrape the reviews, rating name and review date
customer_name5 <- webpage %>% html_node(xpath = '//*[@id="customer_review-R10SV32UFYYRFD"]/div[1]/a/div[2]/span') %>%
html_text()
customer_name5
customer_rate5 <- webpage %>% html_node(xpath = '//*[@id="customer_review-R10SV32UFYYRFD"]/div[2]/a[1]/i/span') %>%
html_text()
customer_rate5
customer_comment_date5 <- webpage %>% html_node(xpath = '//*[@id="customer_review-R10SV32UFYYRFD"]/span') %>%
html_text()
customer_comment_date5
customer_comment5 <- webpage %>% html_node(xpath = '//*[@id="customer_review-R10SV32UFYYRFD"]/div[4]/span/div/div[1]/span') %>%
html_text()
customer_comment5
customer_data5 <- data.frame(customer_name5, customer_rate5, customer_comment5,customer_comment_date5)
names(customer_data5)<-c("Reviewer_name","Rating", "Reviewer_Comment", "Reviewed_Date")
new_customer_data <- rbind(new_customer_data, customer_data5)
View(new_customer_data)
#Reviewer 6
#Scrape the reviews, rating name and review date
customer_name6 <- webpage %>% html_node(xpath = '//*[@id="customer_review-R1M8F05QA6F49N"]/div[1]/a/div[2]/span') %>%
html_text()
customer_name6
customer_rate6 <- webpage %>% html_node(xpath = '//*[@id="customer_review-R1M8F05QA6F49N"]/div[2]/a[1]/i/span') %>%
html_text()
customer_rate6
customer_comment_date6 <- webpage %>% html_node(xpath = '//*[@id="customer_review-R1M8F05QA6F49N"]/span') %>%
html_text()
customer_comment_date6
customer_comment6 <- webpage %>% html_node(xpath = '//*[@id="customer_review-R1M8F05QA6F49N"]/div[4]/span/div/div[1]/span') %>%
html_text()
customer_comment6
customer_data6 <- data.frame(customer_name6, customer_rate6, customer_comment6,customer_comment_date6)
names(customer_data6)<-c("Reviewer_name","Rating", "Reviewer_Comment", "Reviewed_Date")
new_customer_data <- rbind(new_customer_data, customer_data6)
View(new_customer_data)
#Reviewer 7
#Scrape the reviews, rating name and review date
customer_name7 <- webpage %>% html_node(xpath = '//*[@id="customer_review-R1NIOYOVVJQ1HO"]/div[1]/a/div[2]/span') %>%
html_text()
customer_name7
customer_rate7 <- webpage %>% html_node(xpath = '//*[@id="customer_review-R1NIOYOVVJQ1HO"]/div[2]/a[1]/i/span') %>%
html_text()
customer_rate7
customer_comment_date7 <- webpage %>% html_node(xpath = '//*[@id="customer_review-R1NIOYOVVJQ1HO"]/span') %>%
html_text()
customer_comment_date7
customer_comment7 <- webpage %>% html_node(xpath = '//*[@id="customer_review-R1NIOYOVVJQ1HO"]/div[4]/span/div/div[1]/span') %>%
html_text()
customer_comment7
customer_data7 <- data.frame(customer_name7, customer_rate7, customer_comment7,customer_comment_date7)
names(customer_data7)<-c("Reviewer_name","Rating", "Reviewer_Comment", "Reviewed_Date")
new_customer_data <- rbind(new_customer_data, customer_data7)
View(new_customer_data)
#Analyse the reviews using polarity function from qdap library
new_customer_data %$% polarity(Reviewer_Comment)
(datacamp_conversation <- new_customer_data %$% polarity(Reviewer_Comment, Reviewer_name))
datacamp_conversation$all$polarity
c <-counts(datacamp_conversation)
c$pos.words # positive words
c$neg.words #egative words
#plot graph fpr comments's polarity and length of the comments
plot(datacamp_conversation)
new_customer_data$Rate <- as.numeric(gsub("([0-9]+).*$", "\\1", new_customer_data$Rating))
View(new_customer_data)
barplot(new_customer_data$Rate)
library(ggplot2)
#Plot the graphs
p<-ggplot(new_customer_data, aes(x = datacamp_conversation$all$Reviewer_name, y =datacamp_conversation$all$polarity))+geom_bar(stat = "identity")
p + labs(title = "Polarity Plot") + labs(x = "Reviewer") + labs(x = "Polarity")
p<-ggplot(new_customer_data, aes(x = new_customer_data$Reviewer_name, y =new_customer_data$Rate))+geom_bar(stat = "identity")
p + labs(title = "Polarity Plot") + labs(x = "Reviewer") + labs(x = "Polarity")
|
a2dd14d5be0ae74bc11fb31301302523cb615940
|
f7f39756b62aa5703ea772729b70bf7a8f8ad431
|
/callisiaMapping.R
|
acf1d66b368aec67916c42493a5ee09e549c2598
|
[] |
no_license
|
k8hertweck/Callisia
|
ab9d51335b18d575ce027d7334a7a4aa4d9b94b7
|
3e432e32dc105c87d521a0355ebe608abf59f867
|
refs/heads/master
| 2021-04-28T22:37:18.921013
| 2017-09-26T23:21:33
| 2017-09-26T23:21:33
| 77,720,085
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,133
|
r
|
callisiaMapping.R
|
## Callisia mapping and data preparation
## load libraries
library(dismo)
library(fields)
library(maps)
library(rgdal)
library(raster)
library(maptools)
library(dplyr)
library(ggplot2)
library(stringr)
# create directory for figures
dir.create("figures")
## load and parse historical data
historical <- read.csv(file="data/callisia_historical_occurrence.csv")
histDip <- historical %>%
filter(Cytotype == "2X")
histTet <- historical %>%
filter(Cytotype == "4X")
# load and parse contemporary data
contemp <- read.csv(file="data/callisia_contemporary_occurrence.csv")
# basic histogram
hist(contemp$pg.2C)
contempGS <- contemp %>%
filter(pg.2C != "NA")
# ggplot histogram
ggplot(contempGS, aes(pg.2C)) + geom_histogram(binwidth=5) +
labs(x = "genome size (pg/2C)",
y = "number of individuals") +
theme_bw() +
theme(axis.title = element_text(size=22)) +
theme(axis.text = element_text(size=20))
ggsave("figures/GShistogram.jpg", width=10, height=8)
# plot histogram and color by site
ggplot(contempGS, aes(pg.2C, fill=Site.number), color=Site.number) + geom_histogram(binwidth=5)
# separate into putative diploid and tetraploid
contempDip <- contempGS %>%
filter(pg.2C < 60)
range(contempDip$pg.2C) #36.53 47.87
write.csv(contempDip, "data/contemporaryDiploid.csv", row.names=FALSE)
contempTet <- contempGS %>%
filter(pg.2C > 60)
range(contempTet$pg.2C) #73.71 84.52
write.csv(contempTet, "data/contemporaryTetraploid.csv", row.names = FALSE)
# map all occurrence points
jpeg(file="figures/mappingAll.jpg")
southeast <- c("florida", "georgia", "north carolina", "south carolina")
map(database="state", regions = southeast, interior=T, lwd=2)
points(histDip$Longitude, histDip$Latitude, col='#a6cee3', pch=20, cex=2)
points(histTet$Longitude, histTet$Latitude, col='#b2df8a', pch=20, cex=2)
points(contempDip$Longitude, contempDip$Latitude, col='#1f78b4', pch=20, cex=2)
points(contempTet$Longitude, contempTet$Latitude, col='#33a02c', pch=20, cex=2)
dev.off()
# map only historic points
jpeg(file="figures/mappingHistoric.jpg")
southeast <- c("florida", "georgia", "north carolina", "south carolina")
map(database="state", regions = southeast, interior=T, lwd=2)
points(histTet$Longitude, histTet$Latitude, col='#b2df8a', pch=20, cex=2)
points(histDip$Longitude, histDip$Latitude, col='#a6cee3', pch=20, cex=2)
dev.off()
# map only NC and SC
histTetCar <- histTet %>%
filter(!str_detect(Locality, "Fla")) %>%
filter(!str_detect(Locality, "GA"))
NCSC <- c("north carolina", "south carolina")
jpeg("figures/mappingCar.jpg")
map(database="state", regions = NCSC, interior=T, lwd=2)
points(histDip$Longitude, histDip$Latitude, col='#a6cee3', pch=20, cex=3)
points(histTetCar$Longitude, histTetCar$Latitude, col='#b2df8a', pch=20, cex=3)
points(contempDip$Longitude, contempDip$Latitude, col='#1f78b4', pch=20, cex=3)
points(contempTet$Longitude, contempTet$Latitude, col='#33a02c', pch=20, cex=3)
dev.off()
# map plain us outline
jpeg("figures/us.jpg")
map(database="state")
dev.off()
# map plain SE states outline
jpeg("figures/southeast.jpg")
map(database="state", regions = southeast)
dev.off()
|
1194c03c00d877bfda39e5214017311289a50a9a
|
edff288b86167601e9725895a4c2b2371ecffe8c
|
/plot1.R
|
80ceb0230994296061d7a4c5315202d8cdf48508
|
[] |
no_license
|
CamilleRSR/ExData_Plotting1
|
c35c32a8d84533d80907941083c34290373e2db6
|
90df3baa291b375b2e2d2017b9ff2afa7ee8b8d9
|
refs/heads/master
| 2021-01-19T07:12:10.265148
| 2014-09-06T08:24:37
| 2014-09-06T08:24:37
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 596
|
r
|
plot1.R
|
library(chron)
power_file <- "household_power_consumption.txt"
power <- read.table(power_file, header = TRUE, sep = ";",
stringsAsFactors = FALSE, na.strings = "?", nrows = 100000)
power$Date <- as.Date(power$Date, format = "%d/%m/%Y")
power$Time <- times(power$Time)
power_subset <- subset(power, power$Date >= "2007-02-01" & power$Date <= "2007-02-02")
row.names(power_subset) <- NULL
plot1 <- hist(power_subset$Global_active_power, col = "red", main = "Global Active Power", xlab = "Global Active Power (kilowatts)")
dev.copy(png,"plot1.png",width=480,height=480)
dev.off()
|
06125c9f850aabbd203d86d00313d1fd883e8b4e
|
8e5cc40ef528171e638ef2e15bf51885d1fb8853
|
/Plot2.R
|
beadf11d3b67d0b7914e139bd7a768a997cc8ed0
|
[] |
no_license
|
Alvago00/ExData_Plotting1
|
0b89ce26477e17c2b91970f59235fe207e47604f
|
c63273b6cf4fbeb755afb5402db86c1807a7a409
|
refs/heads/master
| 2021-09-02T08:13:58.031413
| 2017-12-31T20:47:30
| 2017-12-31T20:47:30
| 115,861,751
| 0
| 0
| null | 2017-12-31T12:27:57
| 2017-12-31T12:27:57
| null |
UTF-8
|
R
| false
| false
| 1,013
|
r
|
Plot2.R
|
## Download the file to load and unzip it
if (!file.exists("data")){
dir.create("data")}
fileUrl <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
download.file(fileUrl, destfile = "./data/household_power_consumption.zip")
unzip(zipfile = "./data/household_power_consumption.zip", exdir = "./data")
## Load and create Data
HPC_Data <- read.csv('./data/household_power_consumption.txt', header = TRUE, sep = ';', na.strings = "?")
head(HPC_Data)
HPC_Goal_Data <- HPC_Data[HPC_Data$Date %in% c("1/2/2007","2/2/2007"),]
head(HPC_Goal_Data)
# Convert in data/time info in format 'm/d/y h:m:s'
x <- paste(HPC_Goal_Data$Date, HPC_Goal_Data$Time)
SetTime <- strptime(x, "%d/%m/%Y %H:%M:%S")
FinalData <- cbind(HPC_Goal_Data, SetTime)
## Plot 2
plot(FinalData$Global_active_power ~ FinalData$SetTime, type="l", col="black", xlab="", ylab="Global Active Power (kilowatts)")
dev.copy(png,"plot2.png", width=480, height=480)
dev.off()
|
7a7184babc0a9fdeb4ec1a0f9eeda8f6fe21cf5f
|
3f22b5b7d5d22a0a63ece65475845c6c6c1eebd3
|
/MethylationScatterPlot.R
|
d06cbd7a7df63aea6313d96ca5b8efc0bc47bcf3
|
[] |
no_license
|
shanimalik86/EPIC_Methylation_Analysis
|
e043be2d6fe95ce3dceb5fe4a582712fe7a83ba8
|
e2959bf5b7a23709ace8c4e1024690e1f9907487
|
refs/heads/master
| 2022-11-26T21:10:34.230268
| 2020-08-07T15:56:07
| 2020-08-07T15:56:07
| 285,867,058
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,391
|
r
|
MethylationScatterPlot.R
|
library(pheatmap)
library(gridExtra)
library(ggplot2)
library(RColorBrewer)
library(gplots)
library(ComplexHeatmap)
library(ggpubr)
library(tidyverse)
library(Hmisc)
library(corrplot)
library(corrplot)
library(reshape)
library("ggpubr")
library(dplyr)
library(IlluminaHumanMethylationEPICanno.ilm10b2.hg19)
library(IlluminaHumanMethylationEPICmanifest)
source("http://www.sthda.com/upload/rquery_cormat.r")
#exp<- read.table("/Users/fazal2/Desktop/MethylationData_Updated/Methylation_Allrelaxed/NT21_C2_Geneexpression_Common_Reps_CpGsID.txt",header= TRUE)
#meth<- read.table("/Users/fazal2/Desktop/MethylationData_Updated/Methylation_Allrelaxed/NT21_C2_Methylation_Common_reps.txt",header= TRUE)
#exp=melt(exp)
#meth=melt(meth)
#write.table(exp, file="/Users/fazal2/Desktop/MethylationData_Updated/Methylation_Allrelaxed/NT21_C2_Geneexpression_Common_Reps_scatterformated.txt", sep="\t", row.names=TRUE)
#write.table(meth, file="/Users/fazal2/Desktop/MethylationData_Updated/Methylation_Allrelaxed/NT21_C2_Methylation_Common_Reps_scatterformated.txt", sep="\t", row.names=TRUE)
res=read.table("/Users/fazal2/Desktop/MethylationData_Updated/Methylation_Allrelaxed/K33_B4_Correlation_Matrix_Figure5.txt",header= TRUE)
#genes <- as.factor(data$Gene)
#cpgs <- as.factor(data$Probe)
#Cor = res %>%
#group_by(Gene, Probe) %>%
#mutate(corr = cor(Geneexpression, Beta))%>%
#arrange(Gene, Probe)
#write.table(Cor, file="/Users/fazal2/Desktop/MethylationData_Updated/Methylation_Allrelaxed/K33_B4_Correlation_Matrix_MultipleGenes.txt", sep="\t", row.names=TRUE)
cor2=res %>%
group_by(Gene, Probe) %>%
mutate(Cor = cor.test(Geneexpression, Beta)$estimate) %>%
mutate(p.value = cor.test(Geneexpression, Beta)$p.value) %>%
arrange(Gene, Probe)
#write.table(cor2, file="/Users/fazal2/Desktop/MethylationData_Updated/Methylation_Allrelaxed/K33_B4_Correlation_Matrix_Pvalue_MultipleGenes.txt", sep="\t", row.names=TRUE)
fit=group_split(cor2, .keep = TRUE)
length(fit)
scaleFUN2 <- function(x) sprintf("%.2f", x)
scaleFUN1 <- function(x) sprintf("%.1f", x)
ppi = 200
exp_vs_meth_plot <- function(fit){
scaleFUN2 <- function(x) sprintf("%.2f", x)
scaleFUN1 <- function(x) sprintf("%.1f", x)
ppi = 200
#cor.coef=TRUE, cor.coeff.args = list(method = "pearson", label.x.npc = "middle", label.y.npc = "top")
ggscatter(fit[[i]], x = "Geneexpression" , y="Beta", size= 3.5, color="Type") +
stat_smooth(method= "lm") +
stat_cor(method="pearson", label.x.npc= "middle", size = 3)+
labs(title = fit[[i]]$Gene) +
xlab("Geneexpression") +
ylab("Beta") +
#scale_x_continuous(trans = "log")
#facet_wrap(Gene~Probe, scales= "free")+
theme(strip.text = element_text(color= "black"),
panel.spacing= unit(1.5, "lines"),
panel.grid.major = element_blank(),
plot.title = element_text(color = "red", size = 12,
face = "bold", hjust = 0.5),
panel.border = element_rect(fill = NA, colour = "black", size = 0.8, linetype = "solid"),
panel.background = element_blank(),
legend.position="right") +
scale_x_continuous(labels= scaleFUN2) +
scale_y_continuous(labels= scaleFUN1)
}
for(i in 1:length(fit)){
p1 <- exp_vs_meth_plot(fit)
if(as.numeric(fit[[i]]$Cor)>0 & as.numeric(fit[[i]]$FC>0))
{
png(paste0("/Users/fazal2/Desktop/Verify/", fit[[i]]$Gene, "vs", fit[[i]]$Probe,"_",i, ".png"), width=6*ppi, height=4*ppi, res=ppi)
pdf("/Users/fazal2/Desktop/Verify/B4_Figure5_4.pdf")
print(p1)
dev.off()
#write.table(fit[[i]], file="/Users/fazal2/Desktop/Verify/Case4.txt", sep="\t", row.names=TRUE,append=TRUE)
}
if(as.numeric(fit[[i]]$Cor)>0 & as.numeric(fit[[i]]$FC<0))
{
png(paste0("/Users/fazal2/Desktop/Verify/", fit[[i]]$Gene, "vs", fit[[i]]$Probe,"_",i, ".png"), width=6*ppi, height=4*ppi, res=ppi)
pdf("/Users/fazal2/Desktop/Verify/B4_Figure5_3.pdf")
print(p1)
dev.off()
#write.table(fit[[i]], file="/Users/fazal2/Desktop/Verify/Case3.txt", sep="\t", row.names=TRUE,append=TRUE)
}
if(as.numeric(fit[[i]]$Cor)<0 & as.numeric(fit[[i]]$FC>0))
{
png(paste0("/Users/fazal2/Desktop/Verify/", fit[[i]]$Gene, "vs", fit[[i]]$Probe,"_",i, ".png"), width=6*ppi, height=4*ppi, res=ppi)
pdf("/Users/fazal2/Desktop/Verify/B4_Figure5_2.pdf")
print(p1)
dev.off()
#write.table(fit[[i]], file="/Users/fazal2/Desktop/Verify/Case2.txt", sep="\t", row.names=TRUE,append=TRUE)
}
if(as.numeric(fit[[i]]$Cor)<0 & as.numeric(fit[[i]]$FC<0))
{
png(paste0("/Users/fazal2/Desktop/Verify/", fit[[i]]$Gene, "vs", fit[[i]]$Probe,"_",i, ".png"), width=6*ppi, height=4*ppi, res=ppi)
pdf("/Users/fazal2/Desktop/Verify/B4_Figure5_1.pdf")
print(p1)
dev.off()
#write.table(fit[[i]], file="/Users/fazal2/Desktop/Verify/Case1.txt", sep="\t", row.names=TRUE,append=TRUE)
}
}
EPICanno = getAnnotation(IlluminaHumanMethylationEPICanno.ilm10b2.hg19)
head(EPICanno)
EPICanno=as.data.frame(EPICanno)
#write.table(EPICanno, file="/Users/fazal2/Desktop/MethylationData_Updated/Scatter_Data/.txt", sep="\t", row.names=TRUE)
CpG_Gene_Interest = EPICanno %>%
dplyr::select(Name, UCSC_RefGene_Name) %>%
tidyr::separate_rows(UCSC_RefGene_Name, sep = ";")%>%
dplyr::distinct()
#write.table(CpG_Gene_Interest, file="/Users/fazal2/Desktop/MethylationData_Updated/AllCpGs_withMissings.txt", sep="\t", row.names=TRUE)
|
8f962ca2a1789baa80dfd2fc0da7e4ce48c7b0f5
|
df820a23627f00fdd493d2d992fd1ef6f727ed14
|
/HEDNO Oracle/bin/Debug/Functions/[Multiple_ROC_Comparisons].R
|
45f7c4832d0bda86c4f1889d6283e55294afbfa0
|
[] |
no_license
|
N1h1l1sT/HEDNO-Oracle
|
4c0472a4e95723d37da178d0afafe18dcb8f8731
|
32c64c83f02c1e374172aff226252a491d23e94d
|
refs/heads/master
| 2021-03-26T10:13:04.653183
| 2017-03-22T16:33:31
| 2017-03-22T16:33:31
| 71,827,365
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 393
|
r
|
[Multiple_ROC_Comparisons].R
|
#Plotting the ROC Curve of multiple Prediction Models in a juxtaposing manner
rocOut <- rxRoc(actualVarName = "Label",
predVarName = c({0}),
data = paste(strXDF, "Test_DS.xdf", sep = "")
)
#Show ROC Information
rocOut
round(rxAuc(rocOut), {1})
plot(rocOut,
title = "ROC Curve for Label",
lineStyle = c("solid", "twodash", "dashed")
)
remove(rocOut)
|
7fb4a4fbe947533b8b1d3d25827130fdee24721f
|
64940f9bc750932828db39f6798a26538c01fb2f
|
/man/twitUT1.Rd
|
28999c4cd44b8c6386bee0d6ebacdfaa3a0a611c
|
[] |
no_license
|
Tom-Laetsch/Rtwit
|
3ee83d7a33a6ec7b078f19eff4b89e0206f9b7e0
|
5c76aa54abd0429313a89f40af1497196251d009
|
refs/heads/master
| 2021-01-10T15:22:25.397431
| 2016-02-22T20:18:13
| 2016-02-22T20:18:13
| 52,211,167
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 699
|
rd
|
twitUT1.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/twitUT1.R
\name{twitUT1}
\alias{twitUT1}
\title{Request Twitter User Timeline Once}
\usage{
twitUT1(twitter_token, query)
}
\arguments{
\item{twitter_token}{Your twitter_token; e.g. from twitToken() function.}
\item{query}{A user_timeline query; e.g. from twitUTQuery() function.}
}
\description{
Make a single request to extract user timeline tweets with query specifications.
}
\examples{
twitter_token <- twitToken(consumer_key, consumer_secret, access_token, access_secret)
query <- twitUTQuery(screen_name = 'thomas_laetsch')
user.data <- twitUT1(twitter_token, query)
user.json.data <- httr::content(user.data)
}
|
5ce3daad1fd5fe1f6337956fcca872f3e819d75d
|
130fac5c7630d17332e253b4da6870f028d6f4d2
|
/man/RPbernoulli.Rd
|
0832b6551304b08a5cbf1398b17d41bd170d966d
|
[] |
no_license
|
cran/RandomFields
|
41efaabb19f883462ec3380f3d4c3102b0ed86b4
|
41d603eb8a5f4bfe82c56acee957c79e7500bfd4
|
refs/heads/master
| 2022-01-26T09:24:35.125597
| 2022-01-18T18:12:52
| 2022-01-18T18:12:52
| 17,693,063
| 5
| 4
| null | 2019-05-20T21:08:38
| 2014-03-13T03:21:25
|
C++
|
UTF-8
|
R
| false
| false
| 1,922
|
rd
|
RPbernoulli.Rd
|
\name{RPbernoulli}
\alias{RPbernoulli}
\title{Simulation of Binary Random Fields}
\description{
Indicator or binary field which
has the value 1, if an underfield field
exceeds a given threshold, 0
otherwise.
}
\usage{
RPbernoulli(phi, stationary_only, threshold)
}
\arguments{
\item{phi}{the \command{\link{RMmodel}}.
Either a model for a process or a covariance model must be
specified.
In the latter case, a Gaussian process \command{\link{RPgauss}} is
tacitely assumed.}
\item{stationary_only}{optional arguments; same meaning as for
\command{\link{RPgauss}}. It is ignored if the submodel
is a process definition.}
\item{threshold}{real valued.
\command{\link{RPbernoulli}} returns \eqn{1}
if value of the random field given by \code{phi} is equal
to or larger than the value of \code{threshold}, and \eqn{0}
otherwise. In the multivariate case, a vector might be given.
If the threshold is not finite, then the original field is returned.
\code{threshold} default value is 0.
}
}
\value{
The function returns an object of class \code{\link[=RMmodel-class]{RMmodel}}.
}
\details{
\command{\link{RPbernoulli}} can be applied to any field. If only
a covariance model is given, a Gaussian field is simulated as
underlying field.
}
\me
\seealso{
\command{\link{Auxiliary RMmodels}},
\link{RP},
\command{\link{RMbernoulli}}.
}
\keyword{spatial}
\examples{\dontshow{StartExample()}
RFoptions(seed=0) ## *ANY* simulation will have the random seed 0; set
## RFoptions(seed=NA) to make them all random again
x <- seq(0, 10, 0.1)
model <- RPbernoulli(RMexp(), threshold=0)
z <- RFsimulate(model, x, x, n=4)
plot(z)
model <- RPbernoulli(RPbrownresnick(RMexp(), xi=1), threshold=1)
z <- RFsimulate(model, x, x, n=4)
plot(z)
\dontshow{FinalizeExample()}}
|
047633987d66bddb41e0c5b477cd7d11d21c801c
|
c0e23acfb7ae64dfd62f1ce2f53612f9202b1cab
|
/scripts/plot1.R
|
774cb53aab981140a68e6cf5800622d5d4fec960
|
[] |
no_license
|
LMHollingshead/ExData_Plotting1
|
396511e023934908f327e93af38e73804a493fb7
|
d74a1452a2cd41cad31bc5dd1fe7466e81537b59
|
refs/heads/master
| 2021-01-18T18:16:50.708186
| 2014-08-05T15:07:41
| 2014-08-05T15:07:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 298
|
r
|
plot1.R
|
## Set working directory
setwd("../ExploratoryAnalysis/data")
## Load in prepared dataset (see PrepareData.R)
ds <- read.csv("workingdata.csv")
## Plot 1
png("plot1.png",bg="white")
hist(ds$Global_active_power,col="red",main="Global Active Power",xlab="Global Active Power (kilowatts)")
dev.off()
|
3031e522dd47867e630a6e54f7ff457eef630ae4
|
f153381432a864aa0f1cf789d27aa2e0aba00614
|
/man/k_batch_dot.Rd
|
14da8fe249860b1ac5d0ef2843772a12ebc245be
|
[] |
no_license
|
rdrr1990/keras
|
0f997cf8632f6db623afcdb376ea8c258923e094
|
72b510456f15f5570388d0e610aa4917f1f1674b
|
refs/heads/master
| 2021-05-06T06:42:08.086819
| 2017-12-30T00:11:11
| 2017-12-30T00:11:11
| 113,892,962
| 2
| 0
| null | 2017-12-11T18:19:25
| 2017-12-11T18:19:24
| null |
UTF-8
|
R
| false
| true
| 1,495
|
rd
|
k_batch_dot.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/backend.R
\name{k_batch_dot}
\alias{k_batch_dot}
\title{Batchwise dot product.}
\usage{
k_batch_dot(x, y, axes)
}
\arguments{
\item{x}{Keras tensor or variable with 2 more more axes.}
\item{y}{Keras tensor or variable with 2 or more axes}
\item{axes}{List of (or single) integer with target dimensions (axis indexes
are 1-based). The lengths of \code{axes[[1]]} and \code{axes[[2]]} should be the
same.
[[1]: R:[1
[[2]: R:[2}
}
\value{
A tensor with shape equal to the concatenation of \code{x}'s shape (less
the dimension that was summed over) and \code{y}'s shape (less the batch
dimension and the dimension that was summed over). If the final rank is 1,
we reshape it to \code{(batch_size, 1)}.
}
\description{
\code{batch_dot} is used to compute dot product of \code{x} and \code{y} when \code{x} and \code{y}
are data in batch, i.e. in a shape of \code{(batch_size)}. \code{batch_dot} results in
a tensor or variable with less dimensions than the input. If the number of
dimensions is reduced to 1, we use \code{expand_dims} to make sure that ndim is
at least 2.
}
\section{Keras Backend}{
This function is part of a set of Keras backend functions that enable
lower level access to the core operations of the backend tensor engine
(e.g. TensorFlow, CNTK, Theano, etc.).
You can see a list of all available backend functions here:
\url{https://keras.rstudio.com/articles/backend.html#backend-functions}.
}
|
8ca311f63cd0c87e58fb2c8a2bd6ad4e9bbb2f59
|
0f339f11a7553b77d7850da8d73d6c13abe58d7b
|
/R/StepCLE.R
|
47ab2dc464a74127ec959f3de34e61d0de49e142
|
[] |
no_license
|
cran/smfsb
|
b7986f5ce22df75938d6917692f939ad684bddc0
|
591dff0b70dcd3b21b12653fde36adc2957f64cc
|
refs/heads/master
| 2021-06-02T23:01:47.665664
| 2018-08-30T14:40:03
| 2018-08-30T14:40:03
| 17,699,724
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 559
|
r
|
StepCLE.R
|
# function
StepCLE <- function(N,dt=0.01)
{
S = t(N$Post-N$Pre)
v = ncol(S)
sdt = sqrt(dt)
return(
function(x0, t0, deltat,...)
{
x = x0
t = t0
termt = t0+deltat
repeat {
h = N$h(x, t, ...)
dw = rnorm(v,0,sdt)
dx = S %*% (h*dt + sqrt(h)*dw)
x = x + as.vector(dx)
x[x<0] = -x[x<0] # reflection hack
t = t+dt
if (t > termt)
return(x)
}
}
)
}
# eof
|
944f43cebedb45ace37a24042759e2646e4dad8d
|
2a7e77565c33e6b5d92ce6702b4a5fd96f80d7d0
|
/fuzzedpackages/TreeDist/man/KendallColijn.Rd
|
4c3fabc5e4886f3e6bc9513bd2cf66d8af94ed0f
|
[] |
no_license
|
akhikolla/testpackages
|
62ccaeed866e2194652b65e7360987b3b20df7e7
|
01259c3543febc89955ea5b79f3a08d3afe57e95
|
refs/heads/master
| 2023-02-18T03:50:28.288006
| 2021-01-18T13:23:32
| 2021-01-18T13:23:32
| 329,981,898
| 7
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 2,729
|
rd
|
KendallColijn.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tree_distance_kendall-colijn.R
\name{KendallColijn}
\alias{KendallColijn}
\alias{KCVector}
\title{Kendall-Colijn distance}
\usage{
KendallColijn(tree1, tree2 = tree1)
KCVector(tree)
}
\arguments{
\item{tree1, tree2}{Trees of class \code{phylo}, with leaves labelled identically,
or lists of such trees to undergo pairwise comparison.}
\item{tree}{A tree of class \code{\link[ape:read.tree]{phylo}}.}
}
\value{
\code{KendallColijn()} returns an array of numerics providing the
distances between each pair of trees in \code{tree1} and \code{tree2},
or \code{splits1} and \code{splits2}.
}
\description{
Calculate the Kendall-Colijn tree distance, a measure related to the
path difference.
}
\details{
The Kendall-Colijn distance works by measuring, for each pair of leaves,
the distance from the most recent
common ancestor of those leaves and the root node. For a given tree, this
produces a vector of values recording the distance-from-the-root of each
most recent common ancestor of each pair of leaves.
Two trees are compared by taking the Euclidian distance between the
respective vectors. This is calculated by taking the square root of the sum
of the squares of the differences between the vectors.
This metric emphasizes the position of the root; the path difference
instead measures the distance of the last common ancestor of each pair
of leaves from the leaves themselves, i.e. the length of the path from one
leaf to another.
}
\section{Functions}{
\itemize{
\item \code{KCVector}: Creates a vector that characterises a rooted tree,
as described in Kendall & Colijn (2016).
}}
\examples{
KendallColijn(TreeTools::BalancedTree(8), TreeTools::PectinateTree(8))
set.seed(0)
KendallColijn(TreeTools::BalancedTree(8), lapply(rep(8, 3), ape::rtree))
KendallColijn(lapply(rep(8, 4), ape::rtree))
}
\references{
\insertRef{Kendall2016}{TreeDist}
}
\seealso{
\href{https://CRAN.R-project.org/package=treespace/vignettes/introduction.html}{\code{treespace::treeDist}}
is a more sophisticated, if more cumbersome, implementation that supports
lambda > 0, i.e. use of edge lengths in tree comparison.
Other tree distances:
\code{\link{JaccardRobinsonFoulds}()},
\code{\link{MASTSize}()},
\code{\link{MatchingSplitDistance}()},
\code{\link{NNIDist}()},
\code{\link{NyeSimilarity}()},
\code{\link{PathDist}()},
\code{\link{Robinson-Foulds}},
\code{\link{SPRDist}()},
\code{\link{TreeDistance}()}
}
\author{
\href{https://orcid.org/0000-0001-5660-1727}{Martin R. Smith}
(\href{mailto:martin.smith@durham.ac.uk}{martin.smith@durham.ac.uk})
}
\concept{tree distances}
|
b1a563066b985332a18208cbfcf1ebab02c6cb74
|
f7cb5ffe2d36c1a529a9b74ce6408dc0e0ae2413
|
/starting.R
|
5dde4dbe746562042f3bfc5e15d38fa8897a3bc3
|
[] |
no_license
|
alaindanet/fishcom
|
aa5b03f49c5e698d37003b6ffa75b88f3f18b1f4
|
76e30a7bf13cccabc1cd8fc230bb469276d7f846
|
refs/heads/master
| 2023-06-18T20:29:30.291089
| 2021-07-12T16:34:30
| 2021-07-12T16:34:30
| 160,175,823
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 334
|
r
|
starting.R
|
library("devtools")
options(devtools.name = "Alain Danet",
devtools.desc.author = "person('Alain', 'Danet',
email='alain.danet@caramail.fr', role = c('aut', 'cre'))",
devtools.desc.license = "MIT + file LICENSE"
)
setup()
use_testthat()
use_vignette("intro")
use_travis()
use_package_doc()
use_cran_comments()
use_readme_rmd()
|
603092081928dcb53d04f744ebef41fe2eec70df
|
6af844903b9d066581408450a1e743c1283468e5
|
/man/wavelet_radius.Rd
|
2115d568c75a2f851b0f1c172693070e756f074d
|
[] |
no_license
|
rbensua/wavScalogram
|
8ab5766639ec0154383b0ecc43858cedfe52019c
|
b811b3382de8190c0ed8249bcee04394a007ceda
|
refs/heads/master
| 2021-06-24T13:45:41.879344
| 2019-06-12T09:53:42
| 2019-06-12T09:53:42
| 136,003,978
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,513
|
rd
|
wavelet_radius.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/wavelet_radius.R
\name{wavelet_radius}
\alias{wavelet_radius}
\title{Wavelet radius}
\usage{
wavelet_radius(wname = c("MORLET", "DOG", "PAUL", "HAAR", "HAAR2"),
wparam = NULL,
perc = .0025,
scale = 100,
n = 1000,
makefigure = FALSE)
}
\arguments{
\item{wname}{A string, equal to "MORLET", "DOG", "PAUL", "HAAR" or "HAAR2". The
difference between "HAAR" and "HAAR2" is that "HAAR2" is more accurate but slower.}
\item{wparam}{The corresponding nondimensional parameter for the wavelet function
(Morlet, DoG or Paul).}
\item{perc}{Numeric. The wavelet radius is computed so that the area covered is at
least the 100*(1-\code{perc})\% of the total area of the mother wavelet.}
\item{scale}{Numeric. Scale of the wavelet used in the computations. It only affects
the accuracy.}
\item{n}{Numeric. The computations use a time series of length \eqn{2n+1}.}
\item{makefigure}{Logical. Plots a figure with the real part of the mother wavelet and
its modulus.}
}
\value{
A list with the following fields:
\itemize{
\item \code{left}: The radius on the left.
\item \code{right}: The radius on the right.
}
}
\description{
This function computes an approximation of the effective radius of a mother wavelet.
}
\examples{
waverad <- wavelet_radius(wname = "MORLET", makefigure = TRUE)
}
|
7012e85308cec8a14ef82fc96ab5b127858792a3
|
3b3f29bb712b8b0c73460e0dfe859bc1e3a63790
|
/R/data.R
|
bd74d1790185919634030acbca023b7781144222
|
[] |
no_license
|
r-transit/tidytransit
|
37765dedc4450dd1ff1b069f45491b69402e83da
|
878e64a4a4b7f10b42f0e9d2c156fd1314fddaad
|
refs/heads/master
| 2023-08-08T10:17:46.583304
| 2023-07-21T08:29:46
| 2023-07-21T08:29:46
| 142,806,717
| 123
| 24
| null | 2023-07-21T08:29:47
| 2018-07-30T00:43:24
|
R
|
UTF-8
|
R
| false
| false
| 655
|
r
|
data.R
|
#' Dataframe of route type id's and the names of the types (e.g. "Bus")
#'
#' Extended GTFS Route Types: https://developers.google.com/transit/gtfs/reference/extended-route-types
#'
#' @docType data
#' @format A data frame with 136 rows and 2 variables:
#' \describe{
#' \item{route_type}{the id of route type}
#' \item{route_type_name}{name of the gtfs route type}
#' }
#' @source \url{https://gist.github.com/derhuerst/b0243339e22c310bee2386388151e11e}
"route_type_names"
#' Example GTFS data
#'
#' Data obtained from
#' \url{https://data.trilliumtransit.com/gtfs/duke-nc-us/duke-nc-us.zip}.
#' @docType data
#' @seealso read_gtfs
"gtfs_duke"
|
3d928ad5fb5221bf6c55665111c8c673665dcfe9
|
a3f8ecc9f5ffdb849de8a72d0985c0cf31b9dbd1
|
/Code/simulate.R
|
a3a0e351150af48783a926f473adba3694c994e1
|
[] |
no_license
|
araea382/MT
|
7abb655ba9587d44f38a103c6b33dffe10ac7fca
|
c38557dcfa7a25d7ea8b85ad8ecae15c1c94c116
|
refs/heads/master
| 2021-01-11T15:50:34.185487
| 2017-07-03T15:52:54
| 2017-07-03T15:52:54
| 79,938,255
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 16,423
|
r
|
simulate.R
|
library(MSwM2)
library(ggplot2)
# simulation data
# three states
set.seed(1)
n <- 500
x1 <- runif(n,50,200)
x2 <- runif(n,0,50)
e1 <- rnorm(n,0,1)
e2 <- rnorm(n,2,0.5)
e3 <- rnorm(n,1,1)
y0 <- 0
y1 <- 10 + 0.6*x1 - 0.9*x2 + e1
y0 <- c(0,y1[-n])
y1 <- y1 + 0.5*y0
y2 <- 2 + 0.8*x1 + e2
y0 <- c(0,y2[-n])
y2 <- y2 + 0.2*y0
y3 <- -12 + 0.7*x1 + 0.2*x2 + e3
y0 <- c(0,y3[-n])
y3 <- y3 - 0.2*y0
plot(y1, type="l", ylim=c(0,300)) # normal
points(y2, type="l", col="red") # bad
points(y3, type="l", col="orange") # good
#--------------------------------------------------------------------#
# Simulated Dataset 1
state <- rep(0,n)
ind_normal <- c(1:50,71:110,161:220,351:370,421:450)
ind_bad <- c(51:70,241:290,371:420,451:491)
ind_good <- c(111:160, 221:240,291:350,491:500)
state[ind_normal] <- "Normal"
state[ind_bad] <- "Bad"
state[ind_good] <- "Good"
st <- rep(0,n)
st[ind_normal] <- 1
st[ind_bad] <- 2
st[ind_good] <- 3
chg <- which(diff(st) != 0) + 1
# 51 71 111 161 221 241 291 351 371 421 451 491
y <- rep(0,n)
y[ind_normal] <- y1[ind_normal]
y[ind_bad] <- y2[ind_bad]
y[ind_good] <- y3[ind_good]
points(y, type="l", col="green")
plot(y, type="l")
abline(v=chg,col="red")
ggplot(data.frame(index=seq(1:n),y), aes(x=index, y=y)) + geom_line() +
ggtitle("Simulated data")+ theme_bw()
simu_data <- data.frame(x1,x2,y)
ind <- 500*0.8
train <- simu_data[1:ind,]
test <- simu_data[-c(1:ind),]
chg_train <- chg[-which(chg > 400)]
mod <- lm(y~., data=train)
summary(mod)
set.seed(1)
mswm <- MSwM2::msmFit(mod, k=3, p=1, sw=rep(TRUE,length(mod$coefficients)+1+1),control=list(trace=TRUE,maxiter=500,parallel=FALSE))
summary(mswm)
plotProb(mswm)
# regime1 = bad (2)
# regime2 = normal (1)
# regime3 = good (3)
pred <- MSwM2::statePredict(mswm,test)
test_state <- state[-c(1:ind)]
test_state[which(test_state == "Bad")] <- 1
test_state[which(test_state == "Normal")] <- 2
test_state[which(test_state == "Good")] <- 3
tab <- table(actual=test_state, predict=pred)
sum(diag(tab))/sum(tab) # overall accuracy
1-sum(diag(tab))/sum(tab) # incorrect classification
library(caret)
confusionMatrix(test_state, pred)
Y <- as.factor(state)
Y <- factor(Y,levels(Y)[c(2,3,1)])
cbPalette <- c("#D55E00", "#56B4E9", "#009E73", "#CC79A7")
temp2 <- data.frame(Y,X=seq(1:500))
ggplot() + geom_point(data=temp2, aes(x=X, y=Y, colour=Y)) +
xlab("index") + ylab("") + theme_bw() +
theme(legend.title = element_blank(),
axis.text.y=element_blank(),
axis.ticks.y=element_blank()) +
ggtitle("State of the simulated data") +
scale_colour_manual(values=cbPalette)
p1<-ggplot(data.frame(index=seq(1:n),y), aes(x=index, y=y)) + geom_line() +
ggtitle("Simulated Dataset 1")+ theme_bw() +
theme(text = element_text(size=15))
p2<-ggplot() + geom_bar(data=temp2, aes(x=X, y=Y, fill=Y, color=Y),stat="identity") +
xlab("index") + ylab("") + theme_bw() +
theme(legend.title = element_blank(),
axis.ticks.y=element_blank(),
axis.text.y=element_text(colour="white",size=7),
legend.position="bottom",
text = element_text(size=15)) +
ggtitle("State of the the Dataset 1") +
scale_colour_manual(values=cbPalette) +
scale_fill_manual(values=cbPalette)
require(gridExtra)
grid.arrange(p1,p2,nrow=2)
# #--------------------------------#
# # MSwM2
# # smoothed prob plot
# sim <- as.data.frame(mswm@Fit@smoProb)
# sim <- cbind(index=seq(1,nrow(sim)),sim)
# colnames(sim) <- c("index","State 1","State 2","State 3")
#
# sim <- melt(sim, id="index")
# ggplot(data=sim, aes(x=index, y=value, colour=variable)) + geom_line() +
# ylab("Smoothed Probabilities") + ggtitle("Simulated Dataset 1") + scale_color_manual(values=c("#F8766D","#00BA38","#619CFF")) +
# theme_bw() + theme(legend.title = element_blank())
#
#
# # plot with state area
# gen_sim <- function(object,data){
# state <- sapply(1:nrow(data), function(x) which.max(object@Fit@smoProb[x,]))
# state <- factor(state)
# index=seq(1,nrow(data))
# xmin=index-0.5
# xmax=index+0.5
# y=data$y
# ans <- data.frame(index,xmin,xmax,state,y=y,ymin=min(y),ymax=max(y))
# return(ans)
# }
pred_state <- sapply(1:nrow(train), function(x) which.max(mswm@Fit@smoProb[x,]))
chg_mswm <- which(diff(pred_state) != 0) + 1
# state_sim <- gen_sim(mswm, train)
# ggplot(data=state_sim, aes(x=index, y=y)) + geom_line() +
# geom_rect(data=state_sim, aes(xmin=xmin, xmax=xmax, ymin=ymin, ymax=ymax, fill=state), alpha=0.2, inherit.aes=FALSE) +
# scale_fill_manual(values=c("red","green","blue")) +
# ylab("y") + ggtitle("Simulated Dataset 1") + theme_bw()
#
#
# #--------------------------------#
# # ecp
# set.seed(1)
# Ediv_sim <- e.divisive(matrix(train$y), R=499, min.size=5)
# Ediv_sim$k.hat
# Ediv_sim$estimates
# out <- Ediv_sim$estimates[c(-1,-length(Ediv_sim$estimates))] # 112 162 221 241 285 353
# dat <- data.frame(index=seq(1,nrow(train)), y=train$y)
# ggplot(data=dat, aes(x=index, y=y)) + geom_line() + scale_color_manual(values=c("#F8766D","#00BA38","#619CFF")) +
# geom_vline(xintercept=out, colour="red", linetype="longdash") +
# geom_vline(xintercept=chg_train, colour="purple", linetype="longdash") +
# ggtitle("E-divisive simulated Dataset 1") + theme_bw()
#
#
# #--------------------------------#
# # smo prob
# # quite difficult to see...
# g <- ggplot(data=sim, aes(x=index, y=value, colour=variable)) + geom_line() +
# ylab("Smoothed Probabilities") + ggtitle("Simulated Dataset 1") + scale_color_manual(values=c("#F8766D","#00BA38","#619CFF")) +
# theme_bw() + theme(legend.title = element_blank())
#
# g + geom_vline(xintercept=out, color="red", size=0.6, linetype="longdash") +
# geom_vline(xintercept=chg_train, colour="blue", size=0.6, linetype="longdash")
#
#
#
# # state
# # easier to see but there are switches that are missing
# g <- ggplot(data=state_sim, aes(x=index, y=y)) + geom_line() +
# geom_rect(data=state_sim, aes(xmin=xmin, xmax=xmax, ymin=ymin, ymax=ymax, fill=state), alpha=0.2, inherit.aes=FALSE) +
# scale_fill_manual(values=c("red","green","blue")) +
# ylab("t") + ggtitle("Simulated Dataset 1") + theme_bw()
#
# g + geom_vline(xintercept=out, color="red", linetype="longdash") +
# geom_vline(xintercept=chg_train, colour="blue", linetype="longdash")
#
#
# # TRUE state
# # two methods overlap.... ugh.. almost
# temp <- data.frame(index=seq(1, nrow(train)), y=train$y, state=state[1:ind])
# index=seq(1,nrow(train))
# state_sim_train <- data.frame(index,xmin=index-0.5,xmax=index+0.5,state=state[1:ind],y=train$y,ymin=min(train$y),ymax=max(train$y))
#
# ggplot(data=state_sim_train, aes(x=index, y=y)) + geom_line() +
# geom_rect(data=state_sim_train, aes(xmin=xmin, xmax=xmax, ymin=ymin, ymax=ymax, fill=state), alpha=0.2, inherit.aes=FALSE) +
# scale_fill_manual(values=c("red","green","blue")) +
# ylab("y") + ggtitle("Simulated Dataset 1") + theme_bw() +
# geom_vline(xintercept=out, color="magenta", linetype="longdash") +
# geom_vline(xintercept=chg_mswm, colour="blue", linetype="longdash")
# three plots at once
# yippie
method <- c(rep("Actual",ind),rep("Markov switching model",ind),rep("E-divisive",ind))
changePoints <- data.frame(changeP=c(chg_train, chg_mswm, out), method=c(rep("Actual",length(chg_train)), rep("Markov switching model",length(chg_mswm)), rep("E-divisive",length(out))))
temp <- data.frame(index=rep(1:ind,3),y=rep(train$y,3), method)
temp$method <- factor(temp$method, levels=c("Actual","Markov switching model","E-divisive"))
ggplot(data=temp, aes(x=index,y=y)) + geom_line() +
facet_grid(method ~ ., scales = 'free_y') + theme_bw() +
ggtitle("Simulated Dataset 1") +
theme(panel.spacing = unit(0.2, "lines")) +
geom_vline(aes(xintercept=changeP), data=changePoints, linetype="longdash", colour=c(rep("limegreen",length(chg_train)),rep("cyan3",length(chg_mswm)),rep("orangered",length(out))))
.no <- function(){
#--------------------------------#
# d <- data.frame(y,temp2)
# colnames(d) <- c("value","state","index")
# ggplot(d, aes(x=index, y=value)) + geom_line() +
# facet_grid(value~ ., scales = "free_y") + theme(legend.position = "none")
# Y_test <- state[401:500]
# temp3 <- data.frame(Y_test,X=seq(1:100))
# ggplot() + geom_point(data=temp3, aes(x=X, y=Y_test, colour=Y_test)) +
# xlab("index") + ylab("") + theme_bw() +
# theme(legend.title = element_blank(),
# axis.text.y=element_blank(),
# axis.ticks.y=element_blank()) +
# ggtitle("State of the simulated data") +
# scale_colour_manual(values=cbPalette)
#
# pred[which(pred == 1)] <- "Bad"
# pred[which(pred == 2)] <- "Normal"
# pred[which(pred == 3)] <- "Good"
# pred <- as.factor(pred)
# temp4 <- data.frame(pred,X=seq(1:100))
# ggplot() + geom_point(data=temp4, aes(x=X, y=pred, colour=pred)) +
# xlab("index") + ylab("") + theme_bw() +
# theme(legend.title = element_blank(),
# axis.text.y=element_blank(),
# axis.ticks.y=element_blank()) +
# ggtitle("State of the simulated data") +
# scale_colour_manual(values=cbPalette)
##
# test with one obs. at a time
# pred1 <- MSwM2::predict(mswm,test[1,])
# pred2 <- MSwM2::predict(mswm,test[2,]); pred2 # PROBLEM..!
# pred12 <- MSwM2::predict(mswm,test[1:2,]); pred12 # It's okay
}
#--------------------------------------------------------------------#
# Simulated Dataset 2
set.seed(1)
state2 <- rep(0,n)
samp <- sample(3,500,replace=TRUE)
ind_normal2 <- which(samp == 1); length(ind_normal2)
ind_bad2 <- which(samp == 2); length(ind_bad2)
ind_good2 <- which(samp == 3); length(ind_good2)
state2[ind_normal2] <- "Normal"
state2[ind_bad2] <- "Bad"
state2[ind_good2] <- "Good"
st2 <- rep(0,n)
st2[ind_normal2] <- 1
st2[ind_bad2] <- 2
st2[ind_good2] <- 3
chg2 <- which(diff(st2) != 0) + 1
yy <- rep(0,n)
yy[ind_normal2] <- y1[ind_normal2]
yy[ind_bad2] <- y2[ind_bad2]
yy[ind_good2] <- y3[ind_good2]
plot(yy, type="l")
ggplot(data.frame(index=seq(1:n),yy), aes(x=index, y=yy)) + geom_line() +
ggtitle("Simulated data")+ theme_bw()
simu_data2 <- data.frame(x1,x2,yy)
train2 <- simu_data2[1:ind,]
test2 <- simu_data2[-c(1:ind),]
chg_train2 <- chg2[-which(chg2 > 400)]
mod2 <- lm(yy~., data=train2)
summary(mod2)
set.seed(1)
mswm2 <- MSwM2::msmFit(mod2, k=3, p=1, sw=rep(TRUE,length(mod2$coefficients)+1+1),control=list(trace=TRUE,maxiter=500,parallel=FALSE))
summary(mswm2)
plotProb(mswm2)
# regime1 = bad (2)
# regime2 = good (3)
# regime3 = normal (1)
# pred_train_state <- apply(mswm2@Fit@smoProb,1,which.max)
# st <- samp[1:ind]
# st[which(st == 1)] <- 4
# st[which(st == 3)] <- 1
# st[which(st == 4)] <- 2
pred2 <- MSwM2::statePredict(mswm2,test2)
test_state2 <- state2[-c(1:ind)]
test_state2[which(test_state2 == "Bad")] <- 1
test_state2[which(test_state2 == "Normal")] <- 3
test_state2[which(test_state2 == "Good")] <- 2
tab2 <- table(actual=test_state2, predict=pred2)
sum(diag(tab2))/sum(tab2) # overall accuracy
1-sum(diag(tab2))/sum(tab2) # incorrect classification
YY <- as.factor(state2)
YY <- factor(YY,levels(YY)[c(2,3,1)])
temp2 <- data.frame(YY,X=seq(1:500))
# plot(x=seq(1,500),y=samp, type="h")
pp1 <- ggplot(data.frame(index=seq(1:n),yy), aes(x=index, y=yy)) + geom_line() +
ggtitle("Simulated Dataset 2")+ ylab("y") + theme_bw()+
theme(text = element_text(size=15))
pp2 <- ggplot() + geom_bar(data=temp2, aes(x=X, y=YY, fill=YY, color=YY),stat="identity") +
xlab("index") + ylab("") + theme_bw() +
theme(legend.title = element_blank(),
axis.ticks.y=element_blank(),
axis.text.y=element_text(colour="white",size=7),
legend.position="bottom",
text = element_text(size=15)) +
ggtitle("State of the Dataset 2")+
scale_colour_manual(values=cbPalette) +
scale_fill_manual(values=cbPalette)
grid.arrange(pp1,pp2,nrow=2)
# #--------------------------------#
# # MSwM2
# # smoothed prob plot
# sim2 <- as.data.frame(mswm2@Fit@smoProb)
# sim2 <- cbind(index=seq(1,nrow(sim2)),sim2)
# colnames(sim2) <- c("index","State 1","State 2","State 3")
#
# sim2 <- melt(sim2, id="index")
# ggplot(data=sim2, aes(x=index, y=value, colour=variable)) + geom_line() +
# ylab("Smoothed Probabilities") + ggtitle("Simulated Dataset 2") + scale_color_manual(values=c("#F8766D","#00BA38","#619CFF")) +
# theme_bw() + theme(legend.title = element_blank())
#
#
# # plot with state area
# gen_sim2 <- function(object,data){
# state <- sapply(1:nrow(data), function(x) which.max(object@Fit@smoProb[x,]))
# state <- factor(state)
# index=seq(1,nrow(data))
# xmin=index-0.5
# xmax=index+0.5
# y=data$yy
# ans <- data.frame(index,xmin,xmax,state,y=y,ymin=min(y),ymax=max(y))
# return(ans)
# }
pred_state2 <- sapply(1:nrow(train2), function(x) which.max(mswm2@Fit@smoProb[x,]))
chg_mswm2 <- which(diff(pred_state2) != 0) + 1
# state_sim2 <- gen_sim2(mswm2, train2)
# ggplot(data=state_sim2, aes(x=index, y=y)) + geom_line() +
# geom_rect(data=state_sim2, aes(xmin=xmin, xmax=xmax, ymin=ymin, ymax=ymax, fill=state), alpha=0.2, inherit.aes=FALSE) +
# scale_fill_manual(values=c("red","green","blue")) +
# ylab("y") + ggtitle("Simulated Dataset 2") + theme_bw()
#
#
# #--------------------------------#
# # ecp
# set.seed(1)
# Ediv_sim2 <- e.divisive(matrix(train2$yy), R=499, min.size=5)
# Ediv_sim2$k.hat
# Ediv_sim2$estimates
# out2 <- Ediv_sim2$estimates[c(-1,-length(Ediv_sim2$estimates))] # 270 300
#
# dat <- data.frame(index=seq(1,nrow(train2)), y=train2$yy)
# ggplot(data=dat, aes(x=index, y=y)) + geom_line() + scale_color_manual(values=c("#F8766D","#00BA38","#619CFF")) +
# geom_vline(xintercept=out2, colour="red", linetype="longdash") +
# ggtitle("E-divisive simulated Dataset 2") + theme_bw()
#
#
# #--------------------------------#
# # smo prob
# # quite difficult to see... OMG!!!!!!!!!!
# g <- ggplot(data=sim2, aes(x=index, y=value, colour=variable)) + geom_line() +
# ylab("Smoothed Probabilities") + ggtitle("Simulated Dataset 2") + scale_color_manual(values=c("#F8766D","#00BA38","#619CFF")) +
# theme_bw() + theme(legend.title = element_blank())
#
# g + geom_vline(xintercept=out2, color="red", size=0.6, linetype="longdash") +
# geom_vline(xintercept=chg_train2, colour="blue", size=0.6, linetype="longdash")
#
#
#
# # state
# # OMG!!!!!!!!! no different than the first one
# g <- ggplot(data=state_sim2, aes(x=index, y=y)) + geom_line() +
# geom_rect(data=state_sim2, aes(xmin=xmin, xmax=xmax, ymin=ymin, ymax=ymax, fill=state), alpha=0.2, inherit.aes=FALSE) +
# scale_fill_manual(values=c("red","green","blue")) +
# ylab("t") + ggtitle("Simulated Dataset 2") + theme_bw()
#
# g + geom_vline(xintercept=out2, color="red", linetype="longdash") +
# geom_vline(xintercept=chg_train2, colour="blue", linetype="longdash")
#
#
# # TRUE state
# # OMG!!!!!!!!! no different than the first and second one
# temp <- data.frame(index=seq(1, nrow(train2)), y=train2$yy, state=state2[1:ind])
# index=seq(1,nrow(train2))
# state_sim_train2 <- data.frame(index,xmin=index-0.5,xmax=index+0.5,state=state2[1:ind],y=train2$yy,ymin=min(train2$yy),ymax=max(train2$yy))
#
# ggplot(data=state_sim_train2, aes(x=index, y=y)) + geom_line() +
# geom_rect(data=state_sim_train2, aes(xmin=xmin, xmax=xmax, ymin=ymin, ymax=ymax, fill=state), alpha=0.2, inherit.aes=FALSE) +
# scale_fill_manual(values=c("red","green","blue")) +
# ylab("y") + ggtitle("Simulated Dataset 2") + theme_bw() +
# geom_vline(xintercept=out2, color="magenta", linetype="longdash") +
# geom_vline(xintercept=chg_mswm2, colour="blue", linetype="longdash")
# three plots at once
# uhh... still so scary
method <- c(rep("Actual",ind),rep("Markov switching model",ind),rep("E-divisive",ind))
changePoints <- data.frame(changeP=c(chg_train2, chg_mswm2, out2), method=c(rep("Actual",length(chg_train2)), rep("Markov switching model",length(chg_mswm2)), rep("E-divisive",length(out2))))
temp2 <- data.frame(index=rep(1:ind,3),y=rep(train2$yy,3), method)
temp2$method <- factor(temp2$method, levels=c("Actual","Markov switching model","E-divisive"))
ggplot(data=temp2, aes(x=index,y=y)) + geom_line() +
facet_grid(method ~ ., scales = 'free_y') + theme_bw() +
ggtitle("Simulated Dataset 2") +
theme(panel.spacing = unit(0.2, "lines")) +
geom_vline(aes(xintercept=changeP), data=changePoints, linetype="longdash", colour=c(rep("limegreen",length(chg_train2)),rep("cyan3",length(chg_mswm2)),rep("orangered",length(out2))))
|
0f14d5d30c444dbd56263195fc218ff424f2ab82
|
87824725ea1fa56118711b37ff22cea3a1b6425b
|
/R/PreProcess.R
|
84439e1efd8a7ff26d7ccd0913ef7c1989bd5f6f
|
[] |
no_license
|
chentian310/Image
|
4a8aa97d9e5edb481637b03a82ba7cb521420c49
|
59d42e70dba2c0f78417e118b4dbf208a9fa3295
|
refs/heads/master
| 2016-09-10T22:15:43.393146
| 2011-12-16T22:42:59
| 2011-12-16T22:42:59
| 2,862,685
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,779
|
r
|
PreProcess.R
|
PreProcess <-
function(mydata,default.val=0,...){
Timenames<-dimnames(mydata)[[3]]
nrows<-dim(mydata)[1];ncols<-dim(mydata)[2];TimeLen<-dim(mydata)[3];Time<-1:TimeLen
mydata.norm <- array(0,c(nrows,ncols,TimeLen));preproc <- array(0,c(nrows,ncols,TimeLen))
## begin clustering
##
X<-Time-mean(Time) # centered Time
coef.rq<-apply(mydata,c(1,2),function(Y) coefficients(rq(Y~X,tau=.5)))
## Take log transformation to make variance of two clusters more comparible
b0.log <- log(as.vector(coef.rq[1,,] - min(coef.rq[1,,]) +1))
b1.log <- log(-(as.vector(coef.rq[2,,])) - min(-coef.rq[2,,]) + 0.01)
## determines the two clusters. The two centers are the initial points
## trained from the training set.
clust2 <- kmeans(cbind(b0.log, b1.log), centers=2)
group1 <- matrix(clust2$cluster==1, nrow=nrows)
r1 <- range(mydata[group1])[2]-range(mydata[group1])[1] # diff in "max" is faster
r2 <- range(mydata[!group1])[2]-range(mydata[!group1])[1]
if (r1<r2) {
nonmito.ind.array <- group1 # T for nonmito
} else {
nonmito.ind.array <- !group1 # T for nonmito
}
## normalization
est.drifts <- outer(coef.rq[2,,],X,"*")
# est.drifts <- outer(coef.rq[2,,],X,"*") + outer(coef.rq[1,,],rep(1,length(X)),"*")
mydata.norm <- mydata-est.drifts
## smoothing
for (tt in 1:TimeLen){
preproc[,,tt] <- image.smooth(mydata.norm[,,tt])$z
}
# preproc <- replace(preproc,nonmito.ind.array,default.val)
# if (skip.first){
# preproc<-preproc[,,-1]
# dimnames(preproc)[[3]]<-Timenames[-1]} else {
# preproc<-preproc
# dimnames(preproc)[[3]]<-Timenames}
dimnames(preproc)[[3]]<-Timenames
return(list(preproc=preproc,mask=nonmito.ind.array))
}
|
ab056d9c558d19b6ca6990a705304cb105aec1cc
|
6d907dd36dbbfb54509d643a24eb98b1f6b447d0
|
/man/SummaryReporter.Rd
|
4650c15a12b58d860e1193eaac6228cb362f50fa
|
[] |
no_license
|
PaulHiemstra/test_that
|
d44b9b96586743b6cbadd73e0d8eaec047782bfc
|
d0eb41d38df0c1e7cbdc027a9848564ae83f1d2e
|
refs/heads/master
| 2021-01-18T10:19:14.675349
| 2012-04-16T14:28:15
| 2012-04-16T14:28:15
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 785
|
rd
|
SummaryReporter.Rd
|
\docType{data}
\name{SummaryReporter}
\alias{SummaryReporter}
\alias{SummaryReporter-class}
\title{Test reporter: summary of errors.}
\format{Reference class 'refObjectGenerator' [package "methods"] with 2 fields
and 17 methods, of which 6 are possibly relevant:
accessors, fields, help, lock, methods, new}
\usage{
SummaryReporter
}
\description{
This is the most useful reporting reporter as it lets you
know both which tests have run successfully, as well as
fully reporting information about failures and errors.
It is the default reporting reporter used by
\code{\link{test_dir}} and \code{\link{test_file}}.
}
\details{
As an additional benefit, this reporter will praise you
from time-to-time if all your tests pass.
}
\keyword{dataset}
\keyword{debugging}
|
8bf0b5536b75c48559d8c7ca0edfbacc4597abb8
|
5a389396139299bbbcd0a7725d704bbad0b89f2a
|
/man/h3_to_placekey.Rd
|
99b05e77dfb8cb2cff76c1aefa8d01c1bde292e9
|
[] |
no_license
|
NickCH-K/placekey
|
0296e50e61c90f1bfe52d0a5f60c2df5556427be
|
3ede82969e3fbbf2decaebeff45734a1b13b436f
|
refs/heads/master
| 2023-01-08T11:34:09.452507
| 2020-11-02T07:36:53
| 2020-11-02T07:36:53
| 309,290,589
| 1
| 0
| null | 2020-11-02T07:35:32
| 2020-11-02T07:29:37
|
R
|
UTF-8
|
R
| false
| true
| 293
|
rd
|
h3_to_placekey.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/h3_to_placekey.R
\name{h3_to_placekey}
\alias{h3_to_placekey}
\title{Convert h3 string to placekey}
\usage{
h3_to_placekey(h3_string)
}
\description{
Given an h3 string identify the _where_ portion of a placekey.
}
|
7ed6e4e700abe822c776f253ae0d86628cd12904
|
a203b14b580ad9c2c904f7095467b2231736e81b
|
/plot2.R
|
219024bff382a4a59a53e2b4db63b5b5d9ebb144
|
[] |
no_license
|
vitbao/ExData_Plotting1
|
3ca2ea1e84969f3c1d9ff71f858d035d04b0feee
|
272ab25f8f8433df06e1d1001e22cd77222a28e5
|
refs/heads/master
| 2020-12-11T09:26:12.962409
| 2014-05-11T11:21:01
| 2014-05-11T11:21:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,004
|
r
|
plot2.R
|
# Course Project 1 - Part 2
# Load data
data <- read.table ("household_power_consumption.txt", header = TRUE, sep = ";")
# Extract data for dates 1/2/2007 and 2/2/2007
data_of_interest <- data[data$Date == "1/2/2007" | data$Date == "2/2/2007", ]
# Convert column Date to Date class
dates <- as.Date(data_of_interest$Date, format = "%d/%m/%Y")
# Convert column Time to Time class
time <- as.character(data_of_interest$Time)
# Create a Date-time object using paste()
date_time <- paste(dates, time)
# Convert the Date-time object to character
date_time <- strptime(date_time, "%Y-%m-%d %H:%M:%S")
# Convert Global active power to character and then numeric (it was factor before)
Global_active_power <- as.numeric(as.character(data_of_interest$Global_active_power))
# Create a plot and send the plot to png file
png(filename = "plot2.png", width = 480, height = 480, units = "px")
plot(date_time, Global_active_power, xlab = "",
ylab = "Global Active Power (kilowatts)", type = "l")
dev.off()
|
11bc740de45890868c0344b6558a5fa82807f561
|
8eb16684e6d6ef4d0cf2c5890aea6aca25316c1d
|
/R/template.R
|
8aa461bae6a7dc6a200c7aa7aa7b9261e5363e48
|
[
"MIT"
] |
permissive
|
wfulp/valtools
|
0583f1934459c129a045f5a342df0ead5ca7b82b
|
595c343471b26a60a12b52fcdac3d220af813e2a
|
refs/heads/main
| 2023-05-02T04:06:39.137774
| 2021-05-20T19:38:49
| 2021-05-20T19:38:49
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,218
|
r
|
template.R
|
#' templating function
#'
#' Access valtools templates and create modified version based on the input data
#'
#' @param path output path to save rendered template to
#' @param template template to use from valtools/inst/templates
#' @param data named list or environment with variables that will be used during rendering
#'
#' @importFrom whisker whisker.render
#' @importFrom rlang abort
#'
#' @noRd
#'
render_template <- function(template, output = template, data = list()){
template_path <- file.path(
system.file("templates", package = "valtools"),
template)
if(!file.exists(template_path)){
abort(paste0("Template `",template,"` does not exist."),
class = "vt.template_exist_fail")
}
template_text <- readLines(template_path)
tryCatch({
output_text <- whisker.render(
template = template_text,
data = data, debug = TRUE)
file_con <- file(output)
on.exit(close(file_con))
writeLines(
output_text,
con = file_con
)
}, error = function(e) {
abort(paste0(c("Error during creation of template `",template,"`. Error: ",
e, sep = "\n")),
class = "vt.template_render_fail")
})
invisible(TRUE)
}
|
f0937617c52c1706c389d6664000d73d4cab2b91
|
dc990edc293a4c1d046a97df9ce31b150db95d9f
|
/man/Time2EventMain.Rd
|
29b8eb69f42811509628545e909819bda2b87418
|
[] |
no_license
|
cran/StatCharrms
|
6a442d509df0509ce0d31d16fe56fcd38b5d7a63
|
ae5bc0ca65227c3e8cec00addf07923d86af1cde
|
refs/heads/master
| 2021-06-01T23:50:03.728026
| 2020-11-14T13:20:09
| 2020-11-14T13:20:09
| 90,633,276
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,066
|
rd
|
Time2EventMain.Rd
|
\name{Time2EventMain}
\alias{Time2EventMain}
\title{
GUI for time to event analysis
}
\description{
Main function call that produces the GUI for a time to event analysis.
}
\usage{
Time2EventMain()
}
\details{
Used as the GUI front-end to perform the time to event analysis as outline in the LAGDA guidelines.
}
\value{
\item{\bold{Load Data} Button}{Load a data set from an csv file.}
\item{\bold{Specify Data} Button}{Calls \code{\link{addSpec.te}}. This will produce the widgets needed to specify the data.}
\item{\bold{Run Analysis} Button}{Calls \code{\link{analyseTime2Effect}} and performs the time to event
analysis. After which call the \code{\link{buildResultsWindow.te}}
function which displays the results from the analysis.}
}
\references{
OECD, 2015, OECD Guidelines for the Testing of Chemicals, Section 2. Test No. 241: The Larval Amphibian Growth and Development Assay (LAGDA)
OECD Publishing, Paris. DOI:http://dx.doi.org/10.1787/9789264242340-en
}
\author{
Joe Swintek
}
\keyword{GUI Main}
|
4f6fce55b3ab35092c853c2c4f7430951ac66078
|
031b79338ba35d35968113b72eb8775ed4dd1008
|
/Scripts/old scripts/smeb_calc_new.R
|
e3741df849e3e6d61bf628e4c338983d5a557908
|
[] |
no_license
|
agualtieri/yemen_jmmi
|
fc706602e7528935f54a5a45f2afc6663c8a62d0
|
8d3741358589d596c2a72edb847822317b39ca4c
|
refs/heads/master
| 2020-04-28T10:50:15.312374
| 2019-05-20T13:55:44
| 2019-05-20T13:55:44
| 175,216,403
| 1
| 2
| null | 2019-05-20T14:07:39
| 2019-03-12T13:24:26
|
R
|
UTF-8
|
R
| false
| false
| 1,919
|
r
|
smeb_calc_new.R
|
### SMEB CALCULATION / FUNCTION ###
smeb_calculation <- function(data, level){
`%!in%` = Negate(`%in%`)
## Load SMEB items ##
#smeb.items <- c("country_name",
#"country_ID",
#"governorate_name",
#"governorate_ID",
#"district_name",
#"district_ID",
#"price_soap_normalised",
#"price_laundry_powder_normalised",
#"price_sanitary_napkins_normalised",
#"cost_cubic_meter")
data.select <- data %>% dplyr::select("country_name", "country_ID", "market_name", "governorate_ID", "governorate_name", "district_ID", "district_name", contains("normalised"), "cost_cubic_meter", "exchange_rate_result")
## Created weighted values for each item
data %>%
mutate(
smeb_soap = price_soap_normalised*10.5,
smeb_laundry = price_laundry_powder_normalised*20,
smeb_napkins = price_sanitary_napkins_normalised*2,
smeb_cubic = cost_cubic_meter*3.15
) -> data
## Proximity analysis -> use prices coming from other markets in the same governorate if available
data %>%
dplyr::group_by(governorate_ID) %>%
dplyr::mutate(
smeb_soap= ifelse(is.na(smeb_soap), median(smeb_soap, na.rm=T), smeb_soap),
smeb_laundry= ifelse(is.na(smeb_laundry), median(smeb_laundry, na.rm=T), smeb_laundry),
smeb_napkins= ifelse(is.na(smeb_napkins), median(smeb_napkins, na.rm=T), smeb_napkins),
smeb_cubic= ifelse(is.na(smeb_cubic), median(smeb_cubic, na.rm=T), smeb_cubic)
) -> data
## Calculate total semb
data %>%
dplyr::mutate(smeb_total = smeb_soap +
smeb_laundry +
smeb_napkins +
smeb_cubic) -> data
print(median_smeb)
}
data2 <- smeb_calculation(data)
|
00be8c816f9ddc1cc02b73ed62da482fd03ede52
|
76e67672932655cc635798d00620cdf98881edf1
|
/R可视化/Grouping over a variable.R
|
9dc6e9682cb6ddf45d78d677e24e4be11be7c360
|
[] |
no_license
|
rogerjms/R_data_mining
|
b2b4ad8213ced0f2a5ea97c8a4e27ce8ca726e34
|
a336980a13b1f1b72b5cba0dcda0d2aebb26d2b8
|
refs/heads/master
| 2016-09-09T19:03:21.190427
| 2016-01-11T15:32:33
| 2016-01-11T15:32:33
| 19,964,972
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 385
|
r
|
Grouping over a variable.R
|
metals<-read.csv("metals.csv")
boxplot(Cu~Source,data=metals,
main="Summary of Copper (Cu) concentrations by Site")
#Varying box widths by number of observations
boxplot(Cu ~ Source, data = metals,varwidth=TRUE,
main="Summary of Copper concentrations by Site")
#水平方向
boxplot(metals[,-1],
horizontal=TRUE,las=1,
main="Summary of metal concentrations by Site")
|
cc615d395c80055a842acfa4f6c79d769f98f8bd
|
ff962da91c4647de059ed0baef6afc02b84e24d1
|
/Sabermetrics/Team Offensive Perfomance/Compute Team Offensive Statistics.r
|
17607494086cedd4f6ebbcb738a113f3ed8fd63f
|
[] |
no_license
|
kyle-roesler/utaData
|
a4d448b2dc2a6436d96ef4f7bcc889c7fa76acad
|
a9ef0b137d8bd10e03a4dad152805b26898e1d95
|
refs/heads/master
| 2020-04-16T02:32:09.439220
| 2015-09-01T16:47:56
| 2015-09-01T16:47:56
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,494
|
r
|
Compute Team Offensive Statistics.r
|
# Compute Team Offensive Statistics.r
require( Lahman ) # Obtain Lahman database package
data( Teams ) # Make Teams avaible for analysis
head( Teams ) # Show first 6 rows
tail( Teams ) # and last 6 rows
Teams <- Teams[ Teams$yearID > 1999, ] # Only use temas since 2000
head( Teams ) # Confirm row deletion
tail( Teams )
Teams$batAvg <- with( Teams, ( H / AB )) # Compute batting average
Teams$OBP <- with( Teams, # Compute onbase percentage
( H + BB +HBP ) /
( AB + BB + HBP + SF ))
Teams$SLG <- with( Teams, # Compute slugging percentage
( H+X2B+2*X3B +3*HR)/ AB )
Teams$OBSP <- with( Teams, OBP+SLG ) # Compute onbase percentage plus slugging percentage
Teams$OBST <- with( Teams, OBP*SLG )
Sabermetrics <- with( Teams, data.frame(
yearID,
teamID,
W,
L,
batAvg,
OBP,
SLG,
OBSP,
OBST,
R ))
save( Sabermetrics, file="Sabermetrics.rda" )
require( GGally )
ggpairs( Sabermetrics[5:10] )
|
4115d2ddf63a5c2387090337035bc778d398258b
|
a47ce30f5112b01d5ab3e790a1b51c910f3cf1c3
|
/A_github/sources/authors/3048/evmix/tcplot.r
|
00ed4bfbe9662715445bcc781502d5c5f460e73d
|
[] |
no_license
|
Irbis3/crantasticScrapper
|
6b6d7596344115343cfd934d3902b85fbfdd7295
|
7ec91721565ae7c9e2d0e098598ed86e29375567
|
refs/heads/master
| 2020-03-09T04:03:51.955742
| 2018-04-16T09:41:39
| 2018-04-16T09:41:39
| 128,578,890
| 5
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 26,122
|
r
|
tcplot.r
|
#' @export
#'
#' @title Parameter Threshold Stability Plots
#'
#' @description Plots the MLE of the GPD parameters against threshold
#'
#' @inheritParams mrlplot
#' @param ylim.xi y-axis limits for shape parameter or \code{NULL}
#' @param ylim.sigmau y-axis limits for scale parameter or \code{NULL}
#'
#' @details The MLE of the (modified) GPD scale and shape (xi) parameters are
#' plotted against a set of possible thresholds. If the GPD is a suitable
#' model for a threshold \eqn{u} then for all higher thresholds \eqn{v > u} it
#' will also be suitable, with the shape and modified scale being
#' constant. Known as the threshold stability plots (Coles, 2001). The modified
#' scale parameter is \eqn{\sigma_u - u\xi}.
#'
#' In practice there is sample uncertainty in the parameter estimates, which
#' must be taken into account when choosing a threshold.
#'
#' The usual asymptotic Wald confidence intervals are shown based on the
#' observed information matrix to measure this uncertainty. The sampling density
#' of the Wald normal approximation is shown by a greyscale image, where lighter
#' greys indicate low density.
#'
#' A pre-chosen threshold (or more than one) can be given in \code{try.thresh}.
#' The GPD is fitted to the excesses using maximum likelihood estimation. The
#' estimated parameters are shown as a horizontal line which is solid above this
#' threshold, for which they should be the same if the GPD is a good model (upto sample uncertainty).
#' The threshold should always be chosen to be as low as possible to reduce sample uncertainty.
#' Therefore, below the pre-chosen threshold, where the GPD should not be a good model, the line
#' is dashed and the parameter estimates should now deviate from the dashed line
#' (otherwise a lower threshold could be used).
#
#' If no threshold limits are provided \code{tlim = NULL} then the lowest threshold is set
#' to be just below the median data point and the maximum threshold is set to the 11th
#' largest datapoint. This is a slightly lower order statistic compared to that used in the MRL plot
#' \code{\link[evmix:mrlplot]{mrlplot}} function to account for the fact the maximum likelihood
#' estimation is likely to be unreliable with 10 or fewer datapoints.
#'
#' The range of permitted thresholds is just below the minimum datapoint and the
#' second largest value. If there are less unique values of data within the threshold
#' range than the number of threshold evalations requested, then instead of a sequence
#' of thresholds they will be set to each unique datapoint, i.e. MLE will only be applied
#' where there is data.
#'
#' The missing (\code{NA} and \code{NaN}) and non-finite values are ignored.
#'
#' The lower x-axis is the threshold and an upper axis either gives the number of
#' exceedances (\code{p.or.n = FALSE}) or proportion of excess (\code{p.or.n = TRUE}).
#' Note that unlike the \code{gpd} related functions the missing values are ignored, so
#' do not add to the lower tail fraction. But ignoring the missing values is consistent
#' with all the other mixture model functions.
#'
#' @return \code{\link[evmix:tcplot]{tshapeplot}} and
#' \code{\link[evmix:tcplot]{tscaleplot}} produces the threshold stability plot for the
#' shape and scale parameter respectively. They also returns a matrix containing columns of
#' the threshold, number of exceedances, MLE shape/scale
#' and their standard devation and \eqn{100(1 - \alpha)\%} Wald confidence interval if requested. Where the
#' observed information matrix is not obtainable the standard deviation and confidence intervals
#' are \code{NA}. For the \code{\link[evmix:tcplot]{tscaleplot}} the modified scale quantities
#' are also provided. \code{\link[evmix:tcplot]{tcplot}} produces both plots on one graph and
#' outputs a merged dataframe of results.
#'
#' @note If the user specifies the threshold range, the thresholds above the sixth
#' largest are dropped. A warning message is given if any thresholds have at most 10
#' exceedances, in which case the maximum likelihood estimation is unreliable. If there
#' are less than 10 exceedances of the minimum threshold then the function will stop.
#'
#' By default, no legend is included when using \code{\link[evmix:tcplot]{tcplot}} to get
#' both threshold stability plots.
#'
#' Error checking of the inputs (e.g. invalid probabilities) is carried out and
#' will either stop or give warning message as appropriate.
#'
#' @references
#'
#' Scarrott, C.J. and MacDonald, A. (2012). A review of extreme value
#' threshold estimation and uncertainty quantification. REVSTAT - Statistical
#' Journal 10(1), 33-59. Available from \url{http://www.ine.pt/revstat/pdf/rs120102.pdf}
#'
#' Coles S.G. (2004). An Introduction to the Statistical Modelling of Extreme Values.
#' Springer-Verlag: London.
#'
#' @author Yang Hu and Carl Scarrott \email{carl.scarrott@@canterbury.ac.nz}
#'
#' @section Acknowledgments: Based on the threshold stability plot function \code{\link[evd:tcplot]{tcplot}} in the
#' \code{\link[evd:fpot]{evd}} package for which Stuart Coles' and Alec Stephenson's
#' contributions are gratefully acknowledged.
#' They are designed to have similar syntax and functionality to simplify the transition for users of these packages.
#'
#' @seealso \code{\link[evmix:mrlplot]{mrlplot}} and \code{\link[evd:tcplot]{tcplot}} from
#' \code{\link[evd:mrlplot]{evd}} library
#' @aliases tcplot tshapeplot tscaleplot
#' @family tcplot
#'
#' @examples
#' \dontrun{
#' x = rnorm(1000)
#' tcplot(x)
#' tshapeplot(x, tlim = c(0, 2))
#' tscaleplot(x, tlim = c(0, 2), try.thresh = c(0.5, 1, 1.5))
#' tcplot(x, tlim = c(0, 2), try.thresh = c(0.5, 1, 1.5))
#' }
tcplot <- function(data, tlim = NULL, nt = min(100, length(data)), p.or.n = FALSE,
alpha = 0.05, ylim.xi = NULL, ylim.sigmau = NULL, legend.loc = "bottomright",
try.thresh = quantile(data, 0.9, na.rm = TRUE), ...) {
# make sure defaults which result from function evaluations are obtained
invisible(nt)
invisible(try.thresh)
# Check properties of inputs
check.quant(data, allowna = TRUE, allowinf = TRUE)
if (any(!is.finite(data))) warning("non-finite data valueshave been removed")
data = data[which(is.finite(data))]
if (is.unsorted(data)) {
data = sort(data)
} else {
if (data[1] > data[length(data)])
data = rev(data)
}
check.quant(data)
check.param(tlim, allowvec = TRUE, allownull = TRUE)
if (!is.null(tlim)) {
if (length(tlim) != 2)
stop("threshold range tlim must be a numeric vector of length 2")
if (tlim[2] <= tlim[1])
stop("a range of thresholds must be specified by tlim")
}
check.logic(p.or.n)
check.n(nt)
if (nt == 1)
stop("number of thresholds must be a non-negative integer >= 2")
check.prob(alpha, allownull = TRUE)
if (!is.null(alpha)) {
if (alpha <= 0 | alpha >= 1)
stop("significance level alpha must be between (0, 1)")
}
check.param(ylim.xi, allowvec = TRUE, allownull = TRUE)
if (!is.null(ylim.xi)) {
if (length(ylim.xi) != 2)
stop("ylim must be a numeric vector of length 2")
if (ylim.xi[2] <= ylim.xi[1])
stop("a range of shape y axis limits must be specified by ylim.xi")
}
check.param(ylim.sigmau, allowvec = TRUE, allownull = TRUE)
if (!is.null(ylim.sigmau)) {
if (length(ylim.sigmau) != 2)
stop("ylim must be a numeric vector of length 2")
if (ylim.sigmau[2] <= ylim.sigmau[1])
stop("a range of scale y axis limits must be specified by ylim.sigmau")
}
check.text(legend.loc, allownull = TRUE)
if (!is.null(legend.loc)) {
if (!(legend.loc %in% c("bottomright", "bottom", "bottomleft", "left",
"topleft", "top", "topright", "right", "center")))
stop("legend location not correct, see help(legend)")
}
if (is.null(tlim)) {
tlim = c(median(data) - 2*.Machine$double.eps, data[length(data) - 11])
}
par(mfrow = c(2, 1))
shaperesults = tshapeplot(data, tlim, nt, p.or.n, alpha, ylim.xi, legend.loc, try.thresh, ...)
scaleresults = tscaleplot(data, tlim, nt, p.or.n, alpha, ylim.sigmau, legend.loc, try.thresh, ...)
invisible(merge(shaperesults, scaleresults))
}
#' @export
#' @aliases tcplot tshapeplot tscaleplot
#' @rdname tcplot
tshapeplot <- function(data, tlim = NULL, nt = min(100, length(data)), p.or.n = FALSE,
alpha = 0.05, ylim = NULL, legend.loc = "bottomright",
try.thresh = quantile(data, 0.9, na.rm = TRUE), main = "Shape Threshold Stability Plot",
xlab = "Threshold u", ylab = "Shape Parameter", ...) {
# make sure defaults which result from function evaluations are obtained
invisible(nt)
invisible(try.thresh)
# Check properties of inputs
check.quant(data, allowna = TRUE, allowinf = TRUE)
if (any(!is.finite(data))) warning("non-finite data values have been removed")
data = data[which(is.finite(data))]
if (is.unsorted(data)) {
data = sort(data)
} else {
if (data[1] > data[length(data)])
data = rev(data)
}
check.quant(data)
check.param(tlim, allowvec = TRUE, allownull = TRUE)
if (!is.null(tlim)) {
if (length(tlim) != 2)
stop("threshold range tlim must be a numeric vector of length 2")
if (tlim[2] <= tlim[1])
stop("a range of thresholds must be specified by tlim")
}
check.logic(p.or.n)
check.n(nt)
if (nt < 2)
stop("number of thresholds must be a non-negative integer >= 2")
check.prob(alpha, allownull = alpha)
if (!is.null(alpha)) {
if (alpha <= 0 | alpha >= 1)
stop("significance level alpha must be between (0, 1)")
}
check.param(ylim, allowvec = TRUE, allownull = TRUE)
if (!is.null(ylim)) {
if (length(ylim) != 2)
stop("ylim must be a numeric vector of length 2")
if (ylim[2] <= ylim[1])
stop("a range of y axis limits must be specified by ylim")
}
check.text(legend.loc, allownull = TRUE)
if (!is.null(legend.loc)) {
if (!(legend.loc %in% c("bottomright", "bottom", "bottomleft", "left",
"topleft", "top", "topright", "right", "center")))
stop("legend location not correct, see help(legend)")
}
if (is.null(tlim)) {
tlim = c(median(data) - 2*.Machine$double.eps, data[length(data) - 11])
}
thresholds = seq(tlim[1], tlim[2], length.out = nt)
n = length(data)
data = data[data > min(thresholds)]
# Trick to evaluate MRL at all datapoints if there are not too many
udata = unique(data)
if (length(udata) <= nt) {
warning("less data than number of thresholds requested, so will use unique data as thresholds")
thresholds = udata[-length(udata)]
}
# Check given thresholds
nminu = sum(data > min(thresholds))
if (nminu <= 10)
stop("data must have more than 10 exceedances of lowest threshold")
nmaxu = sum(data > max(thresholds))
if (nmaxu <= 5) {
warning("thresholds above 6th largest input data are dropped")
thresholds = thresholds[thresholds < data[length(data) - 5]]
nmaxu = sum(data > max(thresholds))
}
if (nmaxu <= 10) warning("maximum likelihood estimation is unreliable with less than 10 exceedances")
nt = length(thresholds)
if (nt < 2)
stop("must be more than 1 threshold")
if (!is.null(try.thresh)) {
if (length(try.thresh) == 0 | mode(try.thresh) != "numeric")
stop("threshold to fit GPD to must be numeric scalar or vector")
if (any((try.thresh < tlim[1]) | (try.thresh >= tlim[2])))
stop("potential thresholds must be within range specifed by tlim")
}
mle.calc <- function(x, u, alpha) {
gpdmle = fgpd(x, u)
if (is.null(gpdmle$se)) gpdmle$se = rep(NA, 2)
results = c(u, sum(x > u), gpdmle$mle, gpdmle$se)
if (!is.null(alpha)) {
results = c(results, gpdmle$sigmau + qnorm(c(alpha/2, 1 - alpha/2)) * gpdmle$se[1],
gpdmle$xi + qnorm(c(alpha/2, 1 - alpha/2)) * gpdmle$se[2])
}
return(results)
}
mleresults = matrix(NA, nrow = nt, ncol = ifelse(is.null(alpha), 4, 10))
mleresults[1,] = as.vector(mle.calc(data, thresholds[1], alpha))
for (i in 2:nt) {
mleresults[i,] = mle.calc(data, thresholds[i], alpha)
}
mleresults = as.data.frame(mleresults)
if (!is.null(alpha)) {
names(mleresults) = c("u", "nu", "sigmau", "xi", "se.sigmau", "se.xi",
"cil.sigmau", "ciu.sigmau", "cil.xi", "ciu.xi")
} else {
names(mleresults) = c("u", "nu", "sigmau", "xi", "se.sigmau", "se.xi")
}
# if CI requested then fancy plot, otherwise give usual threshold stability plots
par(mar = c(5, 4, 7, 2) + 0.1)
if (!is.null(alpha)) {
xicis = c(mleresults$cil.xi, mleresults$ciu.xi)
xis = range(xicis[is.finite(xicis)])
xirange = seq(xis[1] - (xis[2] - xis[1])/10, xis[2] + (xis[2] - xis[1])/10, length.out = 200)
allmat = matrix(xirange, nrow = nt, ncol = 200, byrow = TRUE)
ximat = matrix(mleresults$xi, nrow = nt, ncol = 200, byrow = FALSE)
sdmat = matrix(mleresults$se.xi, nrow = nt, ncol = 200, byrow = FALSE)
z = (allmat - ximat)/sdmat
z[abs(z) > 3] = NA
if (is.null(ylim)) {
ylim = range(xis, na.rm = TRUE)
ylim = ylim + c(-1, 1) * diff(ylim)/10
}
image(thresholds, xirange, dnorm(z), col = gray(seq(1, 0.3, -0.01)),
main = main, xlab = xlab, ylab = ylab, ylim = ylim, ...)
matplot(matrix(thresholds, nrow = nt, ncol = 3, byrow = FALSE),
mleresults[, c("xi", "cil.xi", "ciu.xi")],
add = TRUE, type = "l", lty = c(1, 2, 2), col = "black", lwd = c(2, 1, 1), ...)
} else {
if (is.null(ylim)) {
ylim = range(mleresults[, c("xi")], na.rm = TRUE)
ylim = ylim + c(-1, 1) * diff(ylim)/10
}
image(thresholds, xirange, dnorm(z), col = gray(seq(1, 0.3, -0.01)),
...)
plot(thresholds, mleresults[, c("xi")], main = main, xlab = xlab, ylab = ylab, ylim = ylim,
type = "l", lty = 1, col = "black", lwd = 2, ...)
}
box()
naxis = rev(ceiling(2^pretty(log2(c(nmaxu, nminu)), 10)))
naxis = naxis[(naxis > nmaxu) & (naxis < nminu)]
nxaxis = c(min(thresholds), rev(data)[naxis+1], max(thresholds))
naxis = c(nminu, naxis, nmaxu)
if ((nxaxis[length(nxaxis)] - nxaxis[length(nxaxis) - 1]) < diff(range(thresholds))/20) {
nxaxis = nxaxis[-(length(nxaxis) - 1)]
naxis = naxis[-(length(naxis) - 1)]
}
if ((nxaxis[2] - nxaxis[1]) < diff(range(thresholds))/20) {
nxaxis = nxaxis[-2]
naxis = naxis[-2]
}
if (p.or.n) {
axis(side = 3, at = nxaxis, line = 0, labels = formatC(naxis/n, digits = 2, format = "g"))
mtext("Tail Fraction phiu", side = 3, line = 2)
} else {
axis(side = 3, at = nxaxis, line = 0, labels = naxis)
mtext("Number of Excesses", side = 3, line = 2)
}
if (!is.null(try.thresh)) {
ntry = length(try.thresh)
mleparams = matrix(NA, nrow = 2, ncol = ntry)
linecols = rep(c("blue", "green", "red"), length.out = ntry)
for (i in 1:ntry) {
fitresults = fgpd(data, try.thresh[i], std.err = FALSE)
mleparams[, i] = fitresults$mle
# Suppose to be constant after suitable threshold, different line type before and after
lines(c(try.thresh[i], max(thresholds)), rep(fitresults$xi, 2), lwd = 2, lty = 1, col = linecols[i])
lines(c(min(thresholds), try.thresh[i]), rep(fitresults$xi, 2), lwd = 2, lty = 2, col = linecols[i])
abline(v = try.thresh[i], lty = 3, col = linecols[i])
}
if (!is.null(legend.loc)) {
if (!is.null(alpha)) {
legend(legend.loc, c("MLE of Shape", paste(100*(1-alpha), "% CI"),
paste("u =", formatC(try.thresh[1:min(c(3, ntry))], digits = 2, format = "g"),
"sigmau =", formatC(mleparams[1, 1:min(c(3, ntry))], digits = 2, format = "g"),
"xi =", formatC(mleparams[2, 1:min(c(3, ntry))], digits = 2, format = "g"))),
lty = c(1, 2, rep(1, min(c(3, ntry)))), lwd = c(2, 1, rep(1, min(c(3, ntry)))),
col = c("black", "black", linecols), bg = "white")
} else {
legend(legend.loc, c("MLE of Shape",
paste("u =", formatC(try.thresh[1:min(c(3, ntry))], digits = 2, format = "g"),
"sigmau =", formatC(mleparams[1, 1:min(c(3, ntry))], digits = 2, format = "g"),
"xi =", formatC(mleparams[2, 1:min(c(3, ntry))], digits = 2, format = "g"))),
lty = c(1, rep(1, min(c(3, ntry)))), lwd = c(2, rep(1, min(c(3, ntry)))),
col = c("black", linecols), bg = "white")
}
}
} else {
if (!is.null(legend.loc)) {
if (!is.null(alpha)) {
legend(legend.loc, c("MLE of Shape", paste(100*(1-alpha), "% CI")),
lty = c(1, 2), lwd = c(2, 1), bg = "white")
} else {
legend(legend.loc, "MLE of Shape", lty = 1, lwd = 2, bg = "white")
}
}
}
invisible(mleresults)
}
#' @export
#' @aliases tcplot tshapeplot tscaleplot
#' @rdname tcplot
tscaleplot <- function(data, tlim = NULL, nt = min(100, length(data)), p.or.n = FALSE,
alpha = 0.05, ylim = NULL, legend.loc = "bottomright",
try.thresh = quantile(data, 0.9, na.rm = TRUE), main = "Modified Scale Threshold Stability Plot",
xlab = "Threshold u", ylab = "Modified Scale Parameter", ...) {
# make sure defaults which result from function evaluations are obtained
invisible(nt)
invisible(try.thresh)
# Check properties of inputs
check.quant(data, allowna = TRUE, allowinf = TRUE)
if (any(!is.finite(data))) warning("non-finite data values have been removed")
data = data[which(is.finite(data))]
if (is.unsorted(data)) {
data = sort(data)
} else {
if (data[1] > data[length(data)])
data = rev(data)
}
check.quant(data)
check.param(tlim, allowvec = TRUE, allownull = TRUE)
if (!is.null(tlim)) {
if (length(tlim) != 2)
stop("threshold range tlim must be a numeric vector of length 2")
if (tlim[2] <= tlim[1])
stop("a range of thresholds must be specified by tlim")
}
check.logic(p.or.n)
check.n(nt)
if (nt < 2)
stop("number of thresholds must be a non-negative integer >= 2")
check.prob(alpha, allownull = TRUE)
if (!is.null(alpha)) {
if (alpha <= 0 | alpha >= 1)
stop("significance level alpha must be between (0, 1)")
}
check.param(ylim, allowvec = TRUE, allownull = TRUE)
if (!is.null(ylim)) {
if (length(ylim) != 2)
stop("ylim must be a numeric vector of length 2")
if (ylim[2] <= ylim[1])
stop("a range of y axis limits must be specified by ylim")
}
check.text(legend.loc, allownull = TRUE)
if (!is.null(legend.loc)) {
if (!(legend.loc %in% c("bottomright", "bottom", "bottomleft", "left",
"topleft", "top", "topright", "right", "center")))
stop("legend location not correct, see help(legend)")
}
if (is.null(tlim)) {
tlim = c(median(data) - 2*.Machine$double.eps, data[length(data) - 11])
}
thresholds = seq(tlim[1], tlim[2], length.out = nt)
n = length(data)
data = data[data > min(thresholds)]
# Trick to evaluate MRL at all datapoints if there are not too many
udata = unique(data)
if (length(udata) <= nt) {
warning("less data than number of thresholds requested, so will use unique data as thresholds")
thresholds = udata[-length(udata)]
}
# Check given thresholds
nminu = sum(data > min(thresholds))
if (nminu <= 10)
stop("data must have more than 10 exceedances of lowest threshold")
nmaxu = sum(data > max(thresholds))
if (nmaxu <= 5) {
warning("thresholds above 6th largest input data are dropped")
thresholds = thresholds[thresholds < data[length(data) - 5]]
nmaxu = sum(data > max(thresholds))
}
if (nmaxu <= 10) warning("maximum likelihood estimation is unreliable with less than 10 exceedances")
nt = length(thresholds)
if (nt < 2)
stop("must be more than 1 threshold")
if (!is.null(try.thresh)) {
if (length(try.thresh) == 0 | mode(try.thresh) != "numeric")
stop("threshold to fit GPD to must be numeric scalar or vector")
if (any((try.thresh < tlim[1]) | (try.thresh >= tlim[2])))
stop("potential thresholds must be within range specifed by tlim")
}
mle.calc <- function(x, u, alpha) {
gpdmle = fgpd(x, u)
if (is.null(gpdmle$se)) gpdmle$se = rep(NA, 2)
if (is.null(gpdmle$cov)) {
gpdmle$cov12 = NA
} else {
gpdmle$cov12 = gpdmle$cov[1, 2]
}
results = c(u, sum(x > u), gpdmle$mle, gpdmle$se)
if (!is.null(alpha)) {
results = c(results, gpdmle$sigmau + qnorm(c(alpha/2, 1 - alpha/2)) * gpdmle$se[1],
gpdmle$xi + qnorm(c(alpha/2, 1 - alpha/2)) * gpdmle$se[2], gpdmle$cov12)
}
return(results)
}
mleresults = matrix(NA, nrow = nt, ncol = ifelse(is.null(alpha), 9, 11))
mleresults[1,] = as.vector(mle.calc(data, thresholds[1], alpha))
for (i in 2:nt) {
mleresults[i,] = mle.calc(data, thresholds[i], alpha)
}
mleresults = as.data.frame(mleresults)
if (!is.null(alpha)) {
names(mleresults) = c("u", "nu", "sigmau", "xi", "se.sigmau", "se.xi",
"cil.sigmau", "ciu.sigmau", "cil.xi", "ciu.xi", "cov12")
} else {
names(mleresults) = c("u", "nu", "sigmau", "xi", "se.sigmau", "se.xi")
}
mleresults$mod.sigmau = mleresults$sigmau - mleresults$xi * mleresults$u
mleresults$mod.se.sigmau = sqrt(mleresults$se.sigmau^2 -
2 * mleresults$u * mleresults$cov12 + (mleresults$u * mleresults$se.xi)^2)
if (!is.null(alpha)) {
mleresults$mod.cil.sigmau = mleresults$mod.sigmau + qnorm(alpha/2) * mleresults$mod.se.sigmau
mleresults$mod.ciu.sigmau = mleresults$mod.sigmau + qnorm(1 - alpha/2) * mleresults$mod.se.sigmau
}
# if CI requested then fancy plot, otherwise give usual threshold stability plots
par(mar = c(5, 4, 7, 2) + 0.1)
if (!is.null(alpha)) {
sigmaucis = c(mleresults$mod.cil.sigmau, mleresults$mod.ciu.sigmau)
sigmaus = range(sigmaucis[is.finite(sigmaucis)])
sigmaurange = seq(sigmaus[1] - (sigmaus[2] - sigmaus[1])/10,
sigmaus[2] + (sigmaus[2] - sigmaus[1])/10, length.out = 200)
allmat = matrix(sigmaurange, nrow = nt, ncol = 200, byrow = TRUE)
sigmaumat = matrix(mleresults$mod.sigmau, nrow = nt, ncol = 200, byrow = FALSE)
sdmat = matrix(mleresults$mod.se.sigmau, nrow = nt, ncol = 200, byrow = FALSE)
z = (allmat - sigmaumat)/sdmat
z[abs(z) > 3] = NA
if (is.null(ylim)) {
ylim = range(sigmaus, na.rm = TRUE)
ylim = ylim + c(-1, 1) * diff(ylim)/10
}
image(thresholds, sigmaurange, dnorm(z), col = gray(seq(1, 0.3, -0.01)),
main = main, xlab = xlab, ylab = ylab, ylim = ylim, ...)
matplot(matrix(thresholds, nrow = nt, ncol = 3, byrow = FALSE),
mleresults[, c("mod.sigmau", "mod.cil.sigmau", "mod.ciu.sigmau")],
add = TRUE, type = "l", lty = c(1, 2, 2), col = "black", lwd = c(2, 1, 1), ...)
} else {
if (is.null(ylim)) {
ylim = range(mleresults[,c("mod.sigmau")], na.rm = TRUE)
ylim = ylim + c(-1, 1) * diff(ylim)/10
}
matplot(thresholds, mleresults[, c("mod.sigmau")], main = main, xlab = xlab, ylab = ylab, ylim = ylim,
type = "l", lty = 1, col = "black", lwd = 2, ...)
}
box()
naxis = rev(ceiling(2^pretty(log2(c(nmaxu, nminu)), 10)))
naxis = naxis[(naxis > nmaxu) & (naxis < nminu)]
nxaxis = c(min(thresholds), rev(data)[naxis+1], max(thresholds))
naxis = c(nminu, naxis, nmaxu)
if ((nxaxis[length(nxaxis)] - nxaxis[length(nxaxis) - 1]) < diff(range(thresholds))/20) {
nxaxis = nxaxis[-(length(nxaxis) - 1)]
naxis = naxis[-(length(naxis) - 1)]
}
if ((nxaxis[2] - nxaxis[1]) < diff(range(thresholds))/20) {
nxaxis = nxaxis[-2]
naxis = naxis[-2]
}
if (p.or.n) {
axis(side = 3, at = nxaxis, line = 0, labels = formatC(naxis/n, digits = 2, format = "g"))
mtext("Tail Fraction phiu", side = 3, line = 2)
} else {
axis(side = 3, at = nxaxis, line = 0, labels = naxis)
mtext("Number of Excesses", side = 3, line = 2)
}
if (!is.null(try.thresh)) {
ntry = length(try.thresh)
mleparams = matrix(NA, nrow = 2, ncol = ntry)
linecols = rep(c("blue", "green", "red"), length.out = ntry)
for (i in 1:ntry) {
fitresults = fgpd(data, try.thresh[i], std.err = FALSE)
mleparams[1, i] = fitresults$sigmau - fitresults$xi * fitresults$u
mleparams[2, i] = fitresults$xi
# Suppose to be constant after suitable threshold, different line type before and after
lines(c(try.thresh[i], max(thresholds)), rep(mleparams[1, i], 2), lwd = 2, lty = 1, col = linecols[i])
lines(c(min(thresholds), try.thresh[i]), rep(mleparams[1, i], 2), lwd = 2, lty = 2, col = linecols[i])
abline(v = try.thresh[i], lty = 3, col = linecols[i])
}
if (!is.null(legend.loc)) {
if (!is.null(alpha)) {
legend(legend.loc, c("MLE of Modified Scale", paste(100*(1 - alpha), "% CI"),
paste("u =", formatC(try.thresh[1:min(c(3, ntry))], digits = 2, format = "g"),
"sigmau =", formatC(mleparams[1, 1:min(c(3, ntry))], digits = 2, format = "g"),
"xi =", formatC(mleparams[2, 1:min(c(3, ntry))], digits = 2, format = "g"))),
lty = c(1, 2, rep(1, min(c(3, ntry)))),
lwd = c(2, 1, rep(1, min(c(3, ntry)))),
col = c("black", "black", linecols), bg = "white")
} else {
legend(legend.loc, c("MLE of Modified Scale",
paste("u =", formatC(try.thresh[1:min(c(3, ntry))], digits = 2, format = "g"),
"sigmau =", formatC(mleparams[1, 1:min(c(3, ntry))], digits = 2, format = "g"),
"xi =", formatC(mleparams[2, 1:min(c(3, ntry))], digits = 2, format = "g"))),
lty = c(1, rep(1, min(c(3, ntry)))), lwd = c(2, rep(1, min(c(3, ntry)))),
col = c("black", linecols), bg = "white")
}
}
} else {
if (!is.null(legend.loc)) {
if (!is.null(alpha)) {
legend(legend.loc, c("MLE of Modified Scale", paste(100*(1 - alpha), "% CI")),
lty = c(1, 2), lwd = c(2, 1), bg = "white")
} else {
legend(legend.loc, "MLE of Modified Scale", lty = 1, lwd = 2, bg = "white")
}
}
}
invisible(mleresults)
}
|
51e4359312b0b50e74ef703087036cc06e9e9c16
|
a7a6792765d871f227c786b3594826b552373ce4
|
/Laplacian_tests/figure_timings.R
|
8eeb2ff192b27c95305967037a244fd2e2ae2046
|
[] |
no_license
|
geoffliddell/resistor_mesh_test
|
bbe8ed86670e8535221e93a0ae73ca9c799fedd6
|
cc0031bc306bdb07883b584f3ceb1ff12fc49fab
|
refs/heads/main
| 2023-05-01T12:59:41.529986
| 2021-05-13T18:01:57
| 2021-05-13T18:01:57
| 367,128,944
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,008
|
r
|
figure_timings.R
|
# figure... plot solveing time for different lengths of line...
graphics.off()
source("prep/importFunctions.R")
len <- 15
Nt <- round(lseq(from = 10, to = 25000, length.out =len))
I <- 10
a <- 1
b <- 50
tmp <- rep(NA, len)
t <- data.frame(t_R = tmp, t_n = tmp, r_R = tmp, r_n = tmp, diff = tmp)
i <- 0
for (n in Nt)
{
i <- i+1
Sys.time() -> begin1
##############################################################################
# solve with RSpice
##############################################################################
node1series <- 1:(n-1)
node2series <- 2:n
node1par <- 2:n
node2par <- n+1
# resistor netlist
resistors<- data.frame(element = paste(rep("R",2*(n-1)), 1:(2*(n-1)), sep = ""),
node1 = c(node1series,node1par),
node2 = c(node2series,rep(node2par,(n-1))),
value = c(rep(a,(n-1)), rep(b,(n-1))) )
# current source definition
current_source <- data.frame( element = "I1",
node1 = n+1,
node2 = 1,
value = I)
# make the whole
network <- rbind(current_source, resistors)
network <- paste0(network$element," ",network$node1," ",network$node2, " ", network$value)
# format for RSpice
netlist<-as.vector(c("spp name",
".options savecurrents",
network,
".op",
".end")) # adding op to do a dc operating point analysis
circ <- circuitLoad(netlist)
runSpice()
whole <- capture.output(spiceCommand("print all"))
currents <- str_split_fixed(str_split_fixed(whole, "@", 2)[,2], " = ", 2)
current_val <- as.numeric(currents[,2])
current_id <- currents[!is.na(current_val),1]
current_val <- current_val[!is.na(current_val)]
current <- current_val[2:n]
voltage1 <- capture.output(spiceCommand("print v(1)"))
v1<- str_split_fixed(voltage1, "= ", 2)
v1<- as.numeric(v1[1,2])
voltage2 <- capture.output(spiceCommand(paste0("print v(", n+1, ")")))
v2<- str_split_fixed(voltage2, "= ", 2)
v2<- as.numeric(v2[1,2])
t$r_R[i] <- (v1 - v2)/I
Sys.time() -> finish1
##############################################################################
# solve with RSpice
##############################################################################
Sys.time() -> begin2
C <- c(rep(1/a, (n-1)), rep(1/b, (n-1)))
# edges...
Vert <- matrix(c(1:n, n/2, rep(0,n), -0.5), nrow = n+1)
Edges <- data.frame(from = c(1:(n-1), n:2), to = c(2:n, rep(n+1, n-1) ), weight = C) # , weights = rep(1,(2*N_x*N_y - (N_x+N_y))))
the_graph <- graph_from_data_frame(Edges, directed = FALSE)
# lo <- layout.norm(as.matrix(Vert))
# plot(the_graph, layout = lo, directed = FALSE, edge.arrow.size=0)
# solving
L <- laplacian_matrix(the_graph)
L[n+1,n+1] <- L[n+1,n+1] + 0.01
# Boundary conditions:
# - current @ node 1 = +10
# - voltage @ node 11:19 = 0 => perturb the laplacian at all these points??
q <- matrix(rep(0, nrow(Vert)), ncol = 1)
q[1,] <- +10
q[(n+1),] <- -10
p <- SparseM::solve(L,q)
# series currents...
p_diffs <- p[c(-n, -(n+1)),] - p[c(-1,-(n+1)),]
# resistance
t$r_n[i] <- (p[1,])/I
Sys.time() -> finish2
# total difference between the results
diff <- sum(abs(p_diffs - current))
# add to results
t$t_R[i] <- difftime(finish1, begin1)
t$t_n[i] <- difftime(finish2, begin2)
t$diff[i] <- diff
}
N <- matrix(rep(Nt, 2), ncol = 2)
time <- as.matrix(t[, c(1,2)])
matplot(N,time, xlab = "number of nodes", ylab = "time (s)", pch = 20, col = 'black')
lines(N[,1],time[,1], col = 'red')
lines(N[,2],time[,2], col = 'green')
plot(Nt, t$diff)
|
99349da063226dd6f76ba718236d981dc71e179e
|
3512010bdd0dfa635f3be7c05038dc760976c0b9
|
/src/specConfFn.R
|
b7e6467b4e6fffca1d064d887fd1ea6f1f6406c6
|
[] |
no_license
|
dfo-mar-odis/Publication-FlemishCap-2018
|
63946fe6b4961afe6f7858435b6d59de74dba8a9
|
378c92b1d71b501eea7465bfdb6e6f938b3cf8e3
|
refs/heads/master
| 2021-05-12T14:47:26.731811
| 2018-01-11T19:20:46
| 2018-01-11T19:20:46
| 116,961,452
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,502
|
r
|
specConfFn.R
|
##got below code from looking in
##getAnywhere(plot.spec)
spec.ci <- function(spec.obj, coverage = 0.95) {
if (coverage < 0 || coverage >= 1)
stop("coverage probability out of range [0,1)")
tail <- (1 - coverage)
df <- spec.obj$df
upper.quantile <- 1 - tail * pchisq(df, df, lower.tail = FALSE)
lower.quantile <- tail * pchisq(df, df)
1/(qchisq(c(upper.quantile, lower.quantile), df)/df)
}
plotSpecConf <- function(x, ci, ci.col, conf.x){
if (missing(conf.x))
conf.x <- max(x$freq) - x$bandwidth
conf.lim <- spec.ci(x, coverage = ci)
conf.y <- max(x$spec)/conf.lim[2L]
lines(rep(conf.x, 2), conf.y * conf.lim, col = ci.col)
lines(conf.x + c(-0.5, 0.5) * x$bandwidth, rep(conf.y, 2), col = ci.col)}
spec.ci.shumway <- function(spec.obj, ci){
df <- spec.obj$df
alpha <- 1 - ci
L <- qchisq(1 - alpha/2, df)
qchisq(alpha/2 , df)
}
drawSpectrumLimits <- function(spec.obj, coverage=0.95, ...)
{
CI <- spec.ci(spec.obj, coverage)
lines(spec.obj$freq, spec.obj$spec*CI[1], ...)
lines(spec.obj$freq, spec.obj$spec*CI[2], ...)
}
drawSpectrumCross <- function(spec.obj, coverage=0.95, frequency,...)
{
if (missing(frequency))
frequency <- spec.obj$freq[length(spec.obj$freq)/2]
CI <- spec.ci(spec.obj, coverage)
spec <- spec.obj$spec[which.min(abs(spec.obj$freq-frequency))]
lines(frequency+spec.obj$bandwidth*c(-0.5,0.5), rep(spec,2), ...)
lines(rep(frequency, 2), spec*CI, ...)
}
|
a7c209d188d2978970459bc732d13b391993e743
|
921c9346e778f69b1083f8e9c0e71a44774a9f32
|
/man/eta_sq.Rd
|
45a56f761ed39622ca92ab34e9e4cf16c3f310b6
|
[] |
no_license
|
cybernetics/devel
|
3d70cf0b6ea5f2601110e6adb68859a983de0a41
|
a87b381d2dfa524d01658eb012dace1d8559db44
|
refs/heads/master
| 2021-01-24T00:15:50.041047
| 2015-02-24T19:51:33
| 2015-02-24T19:51:33
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,387
|
rd
|
eta_sq.Rd
|
% Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/sjStatistics.R
\name{eta_sq}
\alias{eta_sq}
\title{Retrieve eta squared of fitted anova}
\usage{
eta_sq(...)
}
\arguments{
\item{...}{A fitted one-way-anova model or a dependent and grouping variable (see examples).}
}
\value{
The eta squared value.
}
\description{
Returns the eta squared value for 1-way-anovas.
}
\note{
Interpret eta^2 as for r2 or R2; a rule of thumb (Cohen):
\itemize{
\item .02 ~ small
\item .13 ~ medium
\item .26 ~ large
}
}
\examples{
# load sample data
data(efc)
# fit linear model
fit <- aov(c12hour ~ as.factor(e42dep), data = efc)
# print eta sqaured
eta_sq(fit)
# grouping variable will be converted to factor autoamtically
eta_sq(efc$c12hour, efc$e42dep)
}
\references{
\itemize{
\item \href{http://stats.stackexchange.com/questions/78808/}{stack exchange 1}
\item \href{http://stats.stackexchange.com/questions/15958/}{stack exchange 2}
\item \href{http://en.wikiversity.org/wiki/Eta-squared}{Wikipedia: Eta-squared}
\item Levine TR, Hullett CR (2002): Eta Squared, Partial Eta Squared, and Misreporting of Effect Size in Communication Research (\href{https://www.msu.edu/~levinet/eta\%20squared\%20hcr.pdf}{pdf})
}
}
\seealso{
\code{\link{sjp.aov1}}
}
|
2663c64104669d5e7e71f8042952c8fa1787c544
|
4939158964c360b58d230268f8d084b865fc14cd
|
/mcrnn_visual.R
|
f5d6363be68750cf9b7ad8dcb25e6d2ef086f93e
|
[] |
no_license
|
Sandy-HE/mer_deeplearning_1000songs
|
e1d175b8cad06abd120f68d90db75cbd5806200c
|
27f6f9c94b6a5727a02ac0e6c2a2ae9226e1f9c1
|
refs/heads/master
| 2023-03-04T23:41:03.609457
| 2023-03-03T04:56:46
| 2023-03-03T04:56:46
| 247,827,265
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 10,785
|
r
|
mcrnn_visual.R
|
library(tidyverse)
library(data.table)
library("plotrix") # for drawing circle
#This code file is for data visualization related to 1000-song dataset.
#The file "mcrnn_eva_10fold.csv" contain 10 fold test results of MCRNN model in terms of rmse and r2.
#The file "dnn_eva_10fold.csv" contain 10 fold test results of DNN model in terms of rmse and r2.
#The file "index.csv" give the all index(705 songs) of 1000 songs which are 44100Hz rather than 48000Hz.
#Every groundtruth file includes 744 songs, we need to filter them to 705 based on these indexes.
#Another work is reordering songs by ascii order of file names rahter than integer order.
#For data generated from experimental results(usually use python to write to csv file), we don't
idx = fread("index.csv")
mcrnn_eva = fread("mcrnn_eva_10fold.csv")
dnn_eva = fread("dnn_eva_10fold.csv")
mcrnn_r2= mcrnn_eva %>% select(a_r2:avg_r2,fold)
mcrnn_rmse= mcrnn_eva %>% select(a_rmse:fold)
#====R2 scrore barplot===========
colnames(mcrnn_r2)[1:3] = c("arousal","valence","average")
mcrnn_r2 = mcrnn_r2 %>%
gather("type","value",arousal:average)
mcrnn_r2[,2]<-factor(mcrnn_r2[,2],levels=c("arousal","valence","average"),ordered=FALSE)
mcrnn_r2[,1]<-factor(mcrnn_r2[,1],levels=c("F1","F2","F3","F4","F5","F6","F7","F8","F9","F10"),ordered=FALSE)
r2exp=expression(paste(R^2," Score"))
ggplot(mcrnn_r2, aes(x=fold,y=value, fill=type,group=type))+
geom_bar(stat = "identity",position = position_dodge(0.8))+
coord_cartesian(ylim = c(-0.15, 0.75))+
xlab("Fold")+
ylab(r2exp)+
scale_fill_discrete(name="MCRNN")+
theme_bw()+
theme(legend.position=c(0.9, 0.88))
#====RMSE barplot=============
colnames(mcrnn_rmse)[1:3] = c("arousal","valence","average")
mcrnn_rmse = mcrnn_rmse %>%
gather("type","value",arousal:average)
mcrnn_rmse[,2]<-factor(mcrnn_rmse[,2],levels=c("arousal","valence","average"),ordered=FALSE)
mcrnn_rmse[,1]<-factor(mcrnn_rmse[,1],levels=c("F1","F2","F3","F4","F5","F6","F7","F8","F9","F10"),ordered=FALSE)
ggplot(mcrnn_rmse, aes(x=fold,y=value, fill=type, group=type))+
geom_bar(stat = "identity",position = "dodge")+
coord_cartesian(ylim = c(0, 0.4))+
xlab("Fold")+
ylab("RMSE")+
scale_fill_discrete(name="DNN")+
theme_bw()+
theme(legend.position=c(0.9, 0.88))
#====The distribution of static emotion for all songs or fold-level songs====
songsav = fread("./1000songs_annotations/static_annotations.csv")
songsav = songsav[songsav$song_id %in% idx$index,] #filter 744 to 705.
songsav$mean_arousal = (songsav$mean_arousal-1)/8*2-1
songsav$mean_valence = (songsav$mean_valence-1)/8*2-1
songsav = songsav %>% arrange(as.character(song_id))
#check sample count in each quadrant
temp = songsav[songsav$mean_arousal>0 & songsav$mean_valence>0,]
#plot(songsav[,c("mean_valence","mean_arousal")],kind = 0)
plot.new()
#plot.window(xlim=c(-2,2), ylim=c(-2,2))
plot(c(-1.1,1.1), c(-1.1,1.1), type='n', asp=1,main = "Emotion Dimensional Model")
draw.circle(0, 0, 1, nv = 1000, border = NULL, col = NA, lty = 1, lwd = 1)
arrows(c(-1.1,0),c(0,-1.1),c(1.1,0),c(0,1.1), length=0.1)
text(x=0.22,y=1.1, "Arousal", font=2)
text(x=1.2,y=0.1, "Valence", font=2)
text(x=1.15,y=-0.05, "positive" , cex=.7, color="grey", font=3)
text(x=-1.15,y=-0.05, "negative" , cex=.7, color="grey", font=3)
text(x=-0.15,y=1.05, "active" , cex=.7, color="grey", font=3)
text(x=-0.15,y=-1.05, "inactive" , cex=.7, color="grey", font=3)
#visualize for one cross-validation fold or all songs
#points(songsav$mean_valence,songsav$mean_arousal,pch=16)
points(songsav$mean_valence[631:700],songsav$mean_arousal[631:700],pch=16)
points(songsav$mean_valence[1:70],songsav$mean_arousal[1:70],pch=20, col='red')
points(songsav$mean_valence[701:705],songsav$mean_arousal[701:705],pch=20, col=alpha('grey',0.8))
points(songsav$mean_valence[71:630],songsav$mean_arousal[71:630],pch=20, col=alpha('grey',0.8))
#====visualize standard deviation===========
a_std = fread("./1000songs_annotations/arousal_cont_std.csv")
a_std = a_std[a_std$song_id %in% idx$index,]
a_std = a_std %>% arrange(as.character(song_id))
a_std$idx = 1:nrow(a_std)
a_std$fold = a_std$idx %/% 70 +1
a_std1= a_std %>% select(-sample_15000ms)
a_std1 = a_std1 %>% mutate(st_mean = rowMeans(select(a_std1,starts_with("sample_"))),st_std = apply(select(a_std1,starts_with("sample_")),1,sd))
a_std2 = group_by(a_std1, fold)
a_std3 = summarise(a_std2, st_fold_mean = mean(st_mean),st_fold_st = sd(st_std))
#=====fold-level time-series visual=========
library(ggplot2)
avtrue = fread("./av_true_f3.csv")
avbase = fread("./av_base_f3.csv")
avpred = fread("./av_pred_f3.csv")
avtrue$idx =1:nrow(avtrue)
avtrue$id = (avtrue$idx-1) %/% 60 +1
avtrue$ts = 0.5*(avtrue$idx - 60*(avtrue$id - 1))
colnames(avbase) = c("abase","vbase")
avall = cbind(avtrue, avbase, avpred)
colnames(avall)[c(1,2,6:9)]<- c("A-Truth","V-Truth","A-Baseline","V-Baseline","A-MCRNN","V-MCRNN")
avall_new = avall %>%
gather(key = "type", value="value", 'A-Truth':'V-Truth', 'A-Baseline':'V-MCRNN')
#visline = c("dashed")
#viscolor =
#avtrue %>%
#for 70 songs
ggplot(avall_new, aes(x=ts,y=value, color=type ))+
geom_line()+
theme_bw()+
xlab("Time Steps")+
ylab("Arousal(A) / Valence(V)")+
facet_wrap(~id)
#for one song
av4one=avall_new[avall_new$id==3,]
color=c("green","red","blue","green","red","blue")
linetype=c("longdash","longdash","longdash","solid","solid","solid")
sca = 0.5
ggplot(av4one, aes(x=ts,y=value,group=type))+
geom_line(aes(col=type,linetype=type),size=1)+
theme_bw()+
xlab("Time Steps")+
ylab("Arousal(A) / Valence(V)")+
scale_linetype_manual(values = linetype) +
scale_color_manual(values = color)+
theme(legend.position = "none",
axis.title.y=element_text(size=15),
axis.title.x=element_text(size=15),
axis.text=element_text(size=15))+
coord_cartesian(ylim = c(-sca, sca))
#add legend (980*350)
ggplot(av4one, aes(x=ts,y=value,group=type))+
geom_line(aes(col=type,linetype=type),size=1)+
theme_bw()+
xlab("Time Steps")+
ylab("Arousal(A) / Valence(V)")+
scale_linetype_manual(values = linetype) +
scale_color_manual(values = color)+
theme(legend.title = element_blank(),
legend.key.width=unit(1, "cm"),
legend.text=element_text(size=20),
axis.title.y=element_text(size=15),
axis.title.x=element_text(size=15),
axis.text=element_text(size=15))+
#guides(shape = guide_legend(override.aes = list(size=50)))+
coord_cartesian(ylim = c(-sca, sca))
#====dynamic anno for one or more songs======
#avtrue = fread("./av_true.csv")
#avpred = fread("./av_pred.csv")
library(ggplot2)
avtrue = fread("./av_true_f5.csv")
avbase = fread("./av_base_f5.csv")
avpred = fread("./av_pred_f5.csv")
avtrue$idx =1:nrow(avtrue)
avtrue$id = (avtrue$idx-1) %/% 60 +1
avtrue$ts = 0.5*(avtrue$idx - 60*(avtrue$id - 1))
colnames(avbase) = c("abase","vbase")
avall = cbind(avtrue, avbase, avpred)
sca = 0.5
sca1=0.54
pstart = 1021
pend = 1080
#only figure(800*550), figure with legend(920*550)
plot.new()
par(xpd = T, mar = par()$mar + c(0,0,0,7))
#plot.window(xlim=c(-2,2), ylim=c(-2,2))asp=1,
plot(c(-sca,sca), c(-sca,sca), type='n',xlab = "Valence", ylab = "Arousal", cex.lab=1.5, cex.axis=1.5)
#draw.circle(0, 0, 1, nv = 1000, border = NULL, col = NA, lty = 1, lwd = 1)
#arrows(c(-sca1,0),c(0,-sca1),c(sca1,0),c(0,sca1), length=0.1)
lines(c(-sca1,sca1),c(0,0), type='l')
lines(c(0,0),c(-sca1,sca1), type='l')
points(avall$vtrue[pstart:pend] ,avall$atrue[pstart:pend],pch=16, col='blue')
points(avall$vpred[pstart:pend] ,avall$apred[pstart:pend],pch=16, col=alpha('red',0.7))
points(avall$vbase[pstart:pend] ,avall$abase[pstart:pend],pch=16, col=alpha('green',0.7))
#arrows(avtrue$atrue[pstart:pend] ,avtrue$vtrue[pstart:pend], avpred$apred[pstart:pend] ,avpred$vpred[pstart:pend],length=0.1, col="red")
legend(0.55,0.2, legend=c("Baseline","MCRNN","Truth"), col=c("green","red","blue"),
pch = c(16,16,16),bty='n',cex=1.5)
par(mar=c(5, 4, 4, 2) + 0.1)
#, lty=1:2, cex=0.8)
#====all songs distribution=====
atrue_all = fread("./1000songs_annotations/arousal_cont_average.csv")
atrue_all = atrue_all[atrue_all$song_id %in% idx$index,]
atrue_all = atrue_all %>% arrange(as.character(song_id))
vtrue_all = fread("./1000songs_annotations/valence_cont_average.csv")
vtrue_all = vtrue_all[vtrue_all$song_id %in% idx$index,]
vtrue_all = vtrue_all %>% arrange(as.character(song_id))
atrue_all_new = atrue_all %>%
select(-sample_15000ms)%>%
gather(ts, avalue, sample_15500ms:sample_45000ms)
vtrue_all_new = vtrue_all %>%
select(-sample_15000ms)%>%
gather(ts, vvalue, sample_15500ms:sample_45000ms)
avtrue_all = cbind(atrue_all_new,vtrue_all_new$vvalue)
colnames(avtrue_all)[4] = "vvalue"
sca = 1.1
plot.new()
plot(c(-sca,sca), c(-sca,sca),,type='n',asp=1,xlab = "", ylab = "")
#plot.window()
draw.circle(0, 0, 1, nv = 1000, border = NULL, col = NA, lty = 1, lwd = 1)
arrows(c(-sca,0),c(0,-sca),c(sca,0),c(0,sca), length=0.1)
points(avtrue_all$vvalue ,avtrue_all$avalue,pch=16, col=alpha('grey',0.5))
text(x=0.22,y=1.1, "Arousal", font=2)
text(x=1.2,y=0.1, "Valence", font=2)
text(x=1.15,y=-0.05, "positive" , cex=.7, font=3)
text(x=-1.15,y=-0.05, "negative" , cex=.7, font=3)
text(x=-0.15,y=1.05, "active" , cex=.7, font=3)
text(x=-0.15,y=-1.05, "inactive" , cex=.7, font=3)
#====The average of A-V of 60 annotation======
fold = av_stat_f7
avtrue = fread("./av_true_f7.csv")
avtrue$idx =1: nrow(avtrue)
avtrue$id = (avtrue$idx-1) %/% 60 +1
avtrue_stat = avtrue %>%
select(-idx) %>%
group_by(id) %>%
summarise(mean_arousal=mean(atrue), mean_valence=mean(vtrue))
avpred = fread("./av_pred_f7.csv")
avpred$idx =1: nrow(avpred)
avpred$id = (avpred$idx-1) %/% 60 +1
avpred_stat = avpred %>%
select(-idx) %>%
group_by(id) %>%
summarise(mean_arousal_pred=mean(apred), mean_valence_pred=mean(vpred))
fold = cbind(avpred_stat,avtrue_stat)
fwrite(fold, "./av_stat_f7.csv")
sca = 0.7
plot.new()
#plot.window(xlim=c(-2,2), ylim=c(-2,2))asp=1
plot(c(-sca,sca), c(-sca,sca), type='n',main = "Emotion Dimensional Model", xlab = "Valence", ylab = "Arousal")
#draw.circle(0, 0, 1, nv = 1000, border = NULL, col = NA, lty = 1, lwd = 1)
arrows(c(-1.1,0),c(0,-1.1),c(1.1,0),c(0,1.1), length=0.1)
points(fold$mean_arousal ,fold$mean_valence,pch=1,col='blue')
points(fold$mean_arousal_pred ,fold$mean_valence_pred,pch=19, col='red')
arrows(fold$mean_arousal_pred ,fold$mean_valence_pred,fold$mean_arousal ,fold$mean_valence, length=0.1, col="grey")
|
39170a063470c575dfad2d0835bd882c8a89a570
|
1aa92f850ce632811aaa74d769527a8037d8c484
|
/R/fit_fast.R
|
02f7d32a4bf7b00acb11159d98aa99e41f04b437
|
[] |
no_license
|
cran/mvord
|
253c6e7deaf07bf5ac111571b6db307219f1597c
|
6699126154748d7510647afc7bda27066aad3549
|
refs/heads/master
| 2021-06-02T15:11:40.519370
| 2021-03-17T12:20:12
| 2021-03-17T12:20:12
| 102,715,261
| 2
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,836
|
r
|
fit_fast.R
|
###### fast ######
build_error_struct_fast <- function(eobj, tpar)
{
## takes the transformed parameters and builds initializes some attributes of cor_general eobjs
ndim <- attr(eobj, "ndim")
covar <- attr(eobj, "covariate")
nlev <- NCOL(covar)
npar1 <- attr(eobj, "npar")/nlev
corr_pars <- sapply(seq_len(nlev), function(l) {
nu <- tpar[(l - 1) * npar1 + seq_len(npar1)]
angles <- pi * exp(nu)/(1 + exp(nu))
cosmat <- diag(ndim)
cosmat[lower.tri(cosmat)] <- cos(angles)
S1 <- matrix(0, nrow = ndim, ncol = ndim)
S1[, 1L] <- 1
S1[lower.tri(S1, diag = T)][-(1:ndim)] <- sin(angles)
#S1[-1L, -1L][lower.tri(S1[-1L, -1L], diag = T)] <- sin(angles)
tLmat <- sapply(seq_len(ndim),
function(j) cosmat[j, ] * cumprod(S1[j, ]))
sigma <- crossprod(tLmat)
sigma[lower.tri(sigma)]
})
if (npar1 == 1) dim(corr_pars) <- c(1, nlev)
rVec <- corr_pars
sd <- rep(1, ndim)
return(list(rVec = rVec, sdVec = sd))
}
transf_par_fast <- function(par, rho) {
par_sigma <- par[rho[["npar.thetas"]] + rho[["npar.betas"]] +
seq_len(attr(rho[["error.structure"]], "npar"))]
sigmas <- build_error_struct_fast(rho[["error.structure"]], par_sigma)
par_beta <- par[rho[["npar.thetas"]] + seq_len(rho[["npar.betas"]])]
betatilde <- rho[["constraints_mat"]] %*% par_beta
betatilde_fast <- par_beta
par_theta <- rho[["transf_thresholds"]](par[seq_len(rho[["npar.thetas"]])], rho,
betatilde)
thetatilde <- lapply(seq_len(rho[["ndim"]]), function(j)
par_theta[[j]] + rho[["thold_correction"]][[j]](betatilde, k = j, rho = rho))
# <- sapply(1:rho[["mult.obs, function(j) rho[["x[[j]] %*% beta[[j]])
pred.fixed <- lapply(seq_len(rho[["ndim"]]), function(j) as.double(rho[["xfast"]] %*% betatilde_fast[rho[["indjbeta_mat"]][j,]]))
#pred.fixed <- lapply(rho[["indjbeta_fast, function(j) as.double(crossprod(rho[["xfast, betatilde_fast[j])))
# pred.upper <- lapply(seq_len(rho[["ndim), function(j) {
# th_u <- c(thetatilde[[j]], rho[["inf.value)[rho[["y[, j]]
# (th_u - pred.fixed[[j]] - rho[["offset[[j]])/sigmas[["sdVec
# })#/sigmas[["sdVec
pred.upper <- vapply(seq_len(rho[["ndim"]]), function(j) {
th_u <- c(thetatilde[[j]], rho[["inf.value"]])[rho[["y"]][, j]]
th_u - pred.fixed[[j]] - rho[["offset"]][[j]]
}, FUN.VALUE = double(rho$n))/sigmas[["sdVec"]]
pred.lower <- vapply(seq_len(rho[["ndim"]]), function(j) {
th_l <- c(-rho[["inf.value"]], thetatilde[[j]])[rho[["y"]][, j]]
th_l - pred.fixed[[j]] - rho[["offset"]][[j]]
}, FUN.VALUE = double(rho$n))/sigmas[["sdVec"]]
predu <- do.call("rbind",lapply(rho[["combis_fast"]], function(h){
pred.upper[h[["ind_i"]], h[["combis"]], drop = F]
}))
predl <- do.call("rbind",lapply(rho[["combis_fast"]], function(h){
pred.lower[h[["ind_i"]], h[["combis"]], drop = F]
}))
predr <- unlist(lapply(rho[["combis_fast"]], function(h){
sigmas$rVec[h[["r"]]]
}))
predu_univ <- pred.upper[rho[["ind_univ"]]]
predl_univ <- pred.lower[rho[["ind_univ"]]]
list(U = predu, L = predl, U_univ = predu_univ, L_univ = predl_univ,
corr_par = predr)
}
PLfun_fast <- function(par, rho){
tmp <- transf_par_fast(par, rho)
#r_mat <- tmp[["corr_par"]]#[, rho$dummy_pl_lag == 1, drop = F]
logp <- double(rho[["n"]])
## check for q_i = 1
pr <- rho[["link"]][["F_uni"]](tmp[["U_univ"]]) -
rho[["link"]][["F_uni"]](tmp[["L_univ"]])
pr[pr < .Machine$double.eps] <- .Machine$double.eps
logp[rho[["ind_univ"]][,1]] <- log(pr)
## iterate over bivariate pairs
prh <- rho[["link"]][["F_biv_rect"]](
U = tmp[["U"]],
L = tmp[["L"]],
r = tmp[["corr_par"]])
prh[prh < .Machine$double.eps] <- .Machine$double.eps
-sum(rho[["weights_fast"]] * log(c(prh, pr)))
}
|
0546b5b09b021082239ce83f3c01f6f664e233d9
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/strataG/examples/gtypes.accessors.Rd.R
|
a637c2f22c905876240beb73af0c95ecb8c5705f
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,062
|
r
|
gtypes.accessors.Rd.R
|
library(strataG)
### Name: gtypes.accessors
### Title: 'gtypes' Accessors
### Aliases: gtypes.accessors accessors nInd,gtypes-method nInd
### nLoc,gtypes-method nLoc nStrata nStrata,gtypes-method nStrata
### indNames,gtypes-method indNames locNames,gtypes-method locNames
### strataNames strataNames,gtypes-method strataNames
### ploidy,gtypes-method ploidy other,gtypes-method other
### strata,gtypes-method strata strata<- strata<-,gtypes-method strata
### schemes schemes,gtypes-method schemes schemes<-
### schemes<-,gtypes-method schemes alleleNames alleleNames,gtypes-method
### alleleNames sequences sequences,gtypes-method sequences description
### description,gtypes-method description description<-
### description<-,gtypes-method description [,gtypes,ANY,ANY,ANY-method
### index subset
### ** Examples
#--- create a diploid (microsatellite) gtypes object
data(msats.g)
msats.g <- stratify(msats.g, "fine")
nStrata(msats.g)
strataNames(msats.g)
nLoc(msats.g)
locNames(msats.g)
# reassign all samples to two randomly chosen strata
strata(msats.g) <- sample(c("A", "B"), nInd(msats.g), rep = TRUE)
msats.g
#--- a sequence example
library(ape)
data(woodmouse)
genes <- list(gene1=woodmouse[,1:500], gene2=woodmouse[,501:965])
x <- new("multidna", genes)
wood.g <- sequence2gtypes(x)
strata(wood.g) <- sample(c("A", "B"), nInd(wood.g), rep = TRUE)
wood.g
# get the multidna sequence object
multi.seqs <- sequences(wood.g)
class(multi.seqs) # "multidna"
# get a list of DNAbin objects
library(apex)
dnabin.list <- getSequences(multi.seqs)
class(dnabin.list) # "list"
# get a DNAbin object of the first locus
dnabin.1 <- getSequences(multi.seqs, locNames(wood.g)[1])
class(dnabin.1) # "DNAbin"
# NOTE: The default to the 'simplify' argument in 'getSequences' is TRUE,
# so if there is only one locus, 'getSequences' will return a DNAbin object
# rather than a single element list unless 'simplify = FALSE':
gene1 <- wood.g[, "gene1", ]
gene1.dnabin <- getSequences(sequences(gene1))
class(gene1.dnabin) # "DNAbin"
|
8d1b48aa8c6fb93fe446bc2c8d05f6d759e24a3e
|
d3fdedc49b7f6bdc9ec63033cc3e928dd8d2b7a8
|
/MethylC/rel_methylation_plots_v2.r
|
f5fd73e0a4e0a866ab29646e5f8308f81c888f92
|
[] |
no_license
|
dtrain16/NGS-scripts
|
1085ac1b17eea684d463c20d4f997542794309fe
|
ef1a95cf0ba421dd59e6c2bc62dcd78017d67309
|
refs/heads/master
| 2023-08-04T19:47:44.176749
| 2023-08-01T21:06:43
| 2023-08-01T21:06:43
| 27,793,742
| 10
| 5
| null | null | null | null |
UTF-8
|
R
| false
| false
| 948
|
r
|
rel_methylation_plots_v2.r
|
# produce mean 5mC levels for R plotting
options(echo=T)
library(tidyverse)
library(fields)
args=commandArgs(trailingOnly=T)
print(args)
smplname <- as.character(paste0(args[1]))
outname <- as.character(paste0(args[2]))
context <- as.character(paste0(args[3]))
data <- dir(pattern=paste0(smplname,"-",outname,"-sub",context,"-report.1k.bed")) %>%
read_delim(delim = '\t', col_names=F) %>%
mutate(rel.dist=ifelse(X13==0,ifelse(X12=="-",((X9-X2)/(X9-X8))*1000,((X2-X8)/(X9-X8))*1000),ifelse(X13>0,X13+1000,X13))) %>%
mutate(fixy=ifelse(rel.dist<0 & X13==0,0,ifelse(rel.dist>1000 & X13==0, 1000, rel.dist)))
out <- NULL
for(i in unique(data$X5)){
a <- subset(data, X5 == i)
a <- stats.bin(a$fixy,a$X6,N=100)
temp <- as.data.frame(cbind(matrix(a$centers,ncol=1),a$stats["mean",]))
temp$motiff <- paste0(i)
out <- rbind(temp, out)
}
write.table(out,paste0(paste(smplname,context,outname,sep='_'),'.txt'),quote=F, col.names=T, row.names=F, sep='\t')
|
44ff13d26b6ec4e4c8f821ce366b2c89a592c299
|
1fdbe029ec4904f46325d07523dc90eb76e586f2
|
/distantLRTsummary.r
|
3257834c8b9f20bd6169b34ae213bbd5643a3786
|
[] |
no_license
|
jpverta/verta_et_al_2016_new_phytol
|
c365c909465e05a4034a326fe11500d5e409a597
|
6a43f098be7b06215bc9d46d6ec103dd98e11852
|
refs/heads/master
| 2021-01-10T09:22:54.774355
| 2016-01-05T13:21:21
| 2016-01-05T13:21:21
| 49,066,754
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,296
|
r
|
distantLRTsummary.r
|
'''
Summarize distant effect tests to a single table. Loops through all test outcomes generated with distantLRT.r.
'''
testDir = # PATH TO DIRECTORY WITH RESULTS FROM DISTANT EFFECT TESTS (output from distantLRT.r)
lgPath = # PATH TO FILE THAT DEFINES LINKAGE GROUPS (output from mapFiles.r)
rfPath = # PATH TO FILE THAT DESCRIBES PAIR-WISE RECOMBINAITON FRACTIONS (output from mapFiles.r)
outPath = # PATH TO OUTFILE
lg = read.table(lgPath,header=T)
rf = read.table(rfPath)
f = list.files(testDir)
results = matrix(ncol=9,nrow=length(f)*6258)
colnames(results) = c('target','effect','logFC','logCPM','LR','Pvalue','LG_target','LG_effect','RF')
results=as.data.frame(results)
for (i in f){
trans = read.table(paste(testDir,i,sep=''),header=T)
trans[,5:9] = NA
trans[,3:6] = trans[,1:4]
trans[,1] = i
trans[,2] = rownames(trans)
colnames(trans) = c('target','effect','logFC','logCPM','LR','Pvalue','LG_target','LG_effect','RF')
trans[,7] = lg[i,2]
trans[,8] = lg[trans[,2],2]
trans[trans[,2] %in% colnames(rf),9] = as.numeric(rf[i,trans[trans[,2] %in% colnames(rf),2]])
rownames(trans) = paste(i,rownames(trans),sep=':')
lastrow = unlist(which(is.na(results[,1])))[1]
results[c(lastrow):c(lastrow+nrow(trans)-1),1:9] = trans[1:nrow(trans),1:9]
}
write.table(results,outPath)
|
ba4fe61d8e1e8e9e2b8e9e52ad86b19f849f5475
|
52824070453254349e1e82785856cb3d07e3fad0
|
/man/gsw_frazil_properties_potential.Rd
|
63a0e4852200faee3c7146ad577f37efc3225b7a
|
[] |
no_license
|
dyarger/GSW-R
|
fd3f90fa5712a492f69440645b548ae3769421c4
|
93d4cf94389db39ed412d450c593c8a95266463b
|
refs/heads/master
| 2021-10-25T04:12:21.747187
| 2019-03-31T14:54:13
| 2019-03-31T14:54:13
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 3,184
|
rd
|
gsw_frazil_properties_potential.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/gsw.R
\name{gsw_frazil_properties_potential}
\alias{gsw_frazil_properties_potential}
\title{Properties of Frazil ice i.t.o. potential enthalpy}
\usage{
gsw_frazil_properties_potential(SA_bulk, h_pot_bulk, p)
}
\arguments{
\item{SA_bulk}{Absolute Salinity of a combination of seawater and ice [ g/kg ]}
\item{h_pot_bulk}{potential enthalpy of a mixture of seawater and ice [ J/kg ]}
\item{p}{sea pressure [dbar], i.e. absolute pressure [dbar] minus 10.1325 dbar}
}
\value{
a list containing \code{SA_final}, \code{h_final} and \code{w_Ih_final}.
}
\description{
Calculation of Absolute Salinity, Conservative Temperature, and ice mass fraction
based on bulk Absolute Salinity, bulk potential enthalpy, and pressure
}
\details{
The present R function works with a wrapper to a C function contained
within the GSW-C system (Version 3.05-4 dated 2017-08-07, available at
\url{https://github.com/TEOS-10/GSW-C},
as git commit '5b4d959e54031f9e972f3e863f63e67fa4f5bfec'), which
stems from the GSW-Fortran system (\url{https://github.com/TEOS-10/GSW-Fortran})
which in turn stems from the GSW-Matlab system
(\url{https://github.com/TEOS-10/GSW-Matlab}).
Consult \url{http://www.teos-10.org} to learn more about
these software systems, their authorships, and the science behind
it all.
}
\examples{
SA_bulk <- c( 34.7118, 34.8915, 35.0256, 34.8472, 34.7366, 34.7324)
h_pot_bulk <- c(-4.5544e4, -4.6033e4, -4.5830e4, -4.5589e4, -4.4948e4, -4.4027e4)
p <- c( 10, 50, 125, 250, 600, 1000)
r <- gsw_frazil_properties_potential(SA_bulk, h_pot_bulk, p)
expect_equal(r$SA_final, c(39.098258701462051, 39.343217598625756, 39.434254585716296,
39.159536295126657, 38.820511558004590, 38.542322667924459))
expect_equal(r$CT_final, c(-2.155553336670014, -2.200844802695826, -2.264077329325076,
-2.344567015865174, -2.598559540430464, -2.900814843304696))
expect_equal(r$w_Ih_final, c(0.112190640891586, 0.113150826758543, 0.111797588975174,
0.110122251260246, 0.105199838799201, 0.098850365110330))
}
\references{
\url{http://www.teos-10.org/pubs/gsw/html/gsw_frazil_properties_potential.html}
}
\seealso{
Other things related to enthalpy: \code{\link{gsw_CT_from_enthalpy}},
\code{\link{gsw_dynamic_enthalpy}},
\code{\link{gsw_enthalpy_CT_exact}},
\code{\link{gsw_enthalpy_diff}},
\code{\link{gsw_enthalpy_first_derivatives_CT_exact}},
\code{\link{gsw_enthalpy_first_derivatives}},
\code{\link{gsw_enthalpy_ice}},
\code{\link{gsw_enthalpy_t_exact}},
\code{\link{gsw_enthalpy}},
\code{\link{gsw_frazil_properties_potential_poly}},
\code{\link{gsw_pot_enthalpy_from_pt_ice_poly}},
\code{\link{gsw_pot_enthalpy_from_pt_ice}},
\code{\link{gsw_pot_enthalpy_ice_freezing_poly}},
\code{\link{gsw_pot_enthalpy_ice_freezing}},
\code{\link{gsw_pt_from_pot_enthalpy_ice_poly}},
\code{\link{gsw_pt_from_pot_enthalpy_ice}},
\code{\link{gsw_specvol_first_derivatives_wrt_enthalpy}},
\code{\link{gsw_specvol_first_derivatives}}
}
\concept{things related to enthalpy}
|
9f1cecda23f23675908d3167f6a9429362f74324
|
b8735a2bc8e6e2f759d2fc50c73d2506c894a713
|
/DEVILSTAZ/man/checkConfig.Rd
|
62d52e8740be41b28d647858131a046f671bdc3b
|
[] |
no_license
|
ICRAR/DEVILS-TAZ
|
3698ba8e1120d9d2c9f68a01a325f90f5fa7c259
|
e1bc67bfdf6ad0e196d612bb8023ae134ac6c2d0
|
refs/heads/master
| 2023-05-29T23:43:42.947950
| 2019-11-08T04:08:00
| 2019-11-08T04:08:00
| 111,745,574
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 750
|
rd
|
checkConfig.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/checkConfig.R
\name{checkConfig}
\alias{checkConfig}
\title{Check Configuration Files are consistent with DMCat}
\usage{
checkConfig(configFiles = configFiles, DMCatN = DMCatN,
logName = logName, verbose = verbose)
}
\arguments{
\item{configFiles}{Vector of paths to config files to be checked.}
\item{logName}{log filename to write progress to}
\item{verbose}{tell me whats going on: 0=nothing, 1=somethings, 2=everything}
\item{DMCat}{Path to current DMCat to compare against}
}
\description{
Checks that final output configuration files are consistent with
the current DMCat in terms of fields, IDs, priorities, star-galaxy flags, VISCLASS flags and
mask flags.
}
|
83df157701c7bdfa5e9f6f4a55138778b24a5f67
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/MAVE/examples/mave.data.Rd.R
|
d62d94c92bf6b28190141285f2bcc219f7d46e39
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 232
|
r
|
mave.data.Rd.R
|
library(MAVE)
### Name: mave.data
### Title: The reduced data matrix
### Aliases: mave.data
### ** Examples
x <- matrix(rnorm(400),100,4)
y <- x[,1]+x[,2]+as.matrix(rnorm(100))
dr <- mave(y~x)
x.reduced <- mave.data(dr,x,3)
|
fbcdb51b7b658db315bc69f015b5d9b3a1b1ec42
|
119e0655c3a2e1418bb67e42083e492546998f2e
|
/scripts_queue/space_time_analysis_part2_11282013.R
|
781995339c1aaa5b9b041b142c4801c3527eca2b
|
[] |
no_license
|
dahcase/space_time_lucc
|
435041b61700d481c86d02704d2151c8376e4e82
|
67fe9dca7a496714478fa1ce860e0cfbfc9fe603
|
refs/heads/master
| 2021-05-07T17:55:50.016325
| 2017-11-01T05:26:11
| 2017-11-01T05:26:11
| 108,768,679
| 0
| 1
| null | 2017-10-29T20:28:16
| 2017-10-29T20:28:16
| null |
UTF-8
|
R
| false
| false
| 6,375
|
r
|
space_time_analysis_part2_11282013.R
|
###Loading R library and packages
library(sp)
library(rgdal)
library(BMS) #contains hex2bin and bin2hex
library(bitops)
library(gtools)
library(parallel)
library(rasterVis)
library(raster)
library(forecast)
library(xts)
library(zoo)
library(lubridate)
### Parameters and arguments
in_dir<- "/Users/benoitparmentier/Dropbox/Data/Space_Time"
out_dir<- "/Users/benoitparmentier/Dropbox/Data/Space_Time"
setwd(out_dir)
function_analyses_paper <- "MODIS_and_raster_processing_functions_10182013.R"
script_path <- in_dir #path to script functions
source(file.path(script_path,function_analyses_paper)) #source all functions used in this script.
#This is the shape file of outline of the study area #It is an input/output of the covariate script
infile_reg_outline <- "/Users/benoitparmentier/Dropbox/Data/Space_Time/GYRS_MX_trisate_sin_windowed.shp" #input region outline defined by polygon: Oregon
#ref_rast_name<-"" #local raster name defining resolution, exent, local projection--. set on the fly??
ref_rast_name<-"/Users/benoitparmentier/Dropbox/Data/Space_Time/gyrs_sin_mask_1km_windowed.rst" #local raster name defining resolution, exent: oregon
ref_samp4_name <-"/Users/benoitparmentier/Dropbox/Data/Space_Time/reg_Sample4.rst"
ref_EDGY_name <-"/Users/benoitparmentier/Dropbox/Data/Space_Time/reg_EDGY_mask_sin_1km.rst"
ref_egg_rings_gyr_name <-"/Users/benoitparmentier/Dropbox/Data/Space_Time/reg_egg_rings_gyr.rst"
infile_modis_grid<-"/Users/benoitparmentier/Dropbox/Data/Space_Time/modis_sinusoidal_grid_world.shp" #modis grid tiling system, global
proj_modis_str <-"+proj=sinu +lon_0=0 +x_0=0 +y_0=0 +a=6371007.181 +b=6371007.181 +units=m +no_defs"
#CRS_interp <-"+proj=longlat +ellps=WGS84 +datum=WGS84 +towgs84=0,0,0" #Station coords WGS84
CRS_interp <- proj_modis_str
out_suffix <-"11272013" #output suffix for the files that are masked for quality and for
## Other specific parameters
NA_flag_val<- -9999
reg_var_list <- list.files(path=in_dir,pattern="reg_mosaiced_MOD13A2_A.*.__005_1_km_16_days_NDVI_09242013_09242013.rst$")
r_stack <- stack(reg_var_list)
levelplot(r_stack,layers=1:12) #show first 12 images (half a year more or less)
ref_rast_r <- raster(ref_rast_name)
ref_samp4_r <- raster(ref_samp4_name)
mask_EDGY_r <- raster(ref_EDGY_name) #create raster image
egg_rings_gyr_r <- raster(ref_egg_rings_gyr_name)
projection(mask_EDGY_r) <- proj_modis_str #assign projection coord defined earlier
projection(ref_samp4_r) <- proj_modis_str
projection(ref_rast_r) <- proj_modis_str
projection(egg_rings_gyr_r) <- proj_modis_str
r_x <- ref_rast_r #create raster image that will contain x coordinates
r_y <- ref_rast_r #create raster image that will contain y coordiates
values(r_x) <- coordinates(ref_rast_r)[,1] #set values in raster image to x coord
values(r_y) <- coordinates(ref_rast_r)[,2] #set values in raster image to y coord
pix_id_r <- ref_rast_r
values(pix_id_r) <- 1:ncell(ref_rast_r)
s_dat_var <-stack(pix_id_r,r_x,r_y,egg_rings_gyr_r,mask_EDGY_r,ref_samp4_r)
layerNames(s_dat_var) <- c("pix_id_r","r_x","r_y","egg_rings_gyr_r",
"mask_EDGY_r","ref_samp4_r")
projection(s_dat_var) <- proj_modis_str
plot(s_dat_var)
## Now extract data for only the EDGY region of Yucatan
EDGY_spdf <- as(mask_EDGY_r,"SpatialPointsDataFrame") #create a SpatialPointsDataFrame
data_EDGY<- extract(r_stack,EDGY_spdf) #extract pixels with NDVI in EDGY area in a matrix
s_dat_var_EDGY <- extract(s_dat_var,EDGY_spdf) #extract pixels with attributes in EDGY area in a matrix
EDGY_dat_spdf<- cbind(data_EDGY,s_dat_var_EDGY) #Add columns from matrix to EDGY
#EDGY_dat_spdf<- EDGY_spdf
#Save spdf as .RData object
save(EDGY_dat_spdf,file= file.path(out_dir,paste("EDGY_dat_spdf_",out_suffix,".RData",sep="")))
#Save spdf as shapefile, note that column names can be truncated
outfile1<-file.path(out_dir,paste("EDGY_dat_spdf","_",out_suffix,".shp",sep=""))
writeOGR(EDGY_dat_spdf,dsn= dirname(outfile1),layer= sub(".shp","",basename(outfile1)), driver="ESRI Shapefile",overwrite_layer=TRUE)
#Save spdf as delimited csv text file.
outfile1<-file.path(out_dir,paste("EDGY_dat_spdf","_",out_suffix,".txt",sep=""))
write.table(as.data.frame(EDGY_dat_spdf),file=outfile1,sep=",")
#Hurricane August 17, 2007
day_event<-strftime(as.Date("2007.08.17",format="%Y.%m.%d"),"%j")
#153,154
layerNames(r_stack)
grep(paste("2007",day_event,sep=""),layerNames(r_stack))
### NOW ANALYSES WITH TIME AND SPACE...
#filename<-sub(".shp","",infile_reg_outline) #Removing the extension from file.
#interp_area <- readOGR(dsn=dirname(filename),basename(filename))
#CRS_interp<-proj4string(interp_area) #Storing the coordinate information: geographic coordinates longlat WGS84
#test <- EDGY_spdf[170:180,]
#plot(test,add=T)
# now extract these pixels and fit and arim for before period of hurrincane
r_w<-raster("cropped_area.rst")
r_w_spdf<-as(r_w,"SpatialPointsDataFrame")
pix_val <- extract(r_stack,r_w_spdf,df=TRUE)
plot(pix_val[1,],type="b")
abline(v=153,col="red")
plot(pix_val[1,139:161],type="b")
abline(v=(154-139),col="red")
levelplot(r_stack,layer=152:155)
plot(subset(r_stack,154))
plot(egg_rings_gyr_r,add=T)
arima_mod <- auto.arima(pix_val[1,1:153])
p_arima<-predict(arima_mod,n.ahead=2)
plot(pix_val[1,152:155],type="b")
lines(c(pix_val[1,152:153],p_arima$pred),type="b",col="red")
raster_ts_arima(pix_val[1,1:153],na.rm=T,c(1,0,0))
raster_ts_arima(pix_val[1,1:153],na.rm=T,arima_order=c(0,0,2),n_ahead=2)
acf(pix[1:153],na.action=na.pass)
tt<-raster_ts_arima_predict(pix[,1],na.rm=T,arima_order=NULL,n_ahead=2)
pix <- as.data.frame(t(pix_val[,1:153]))
ttx<-lapply(pix,FUN=raster_ts_arima_predict,na.rm=T,arima_order=NULL,n_ahead=2)
tt_dat<-do.call(rbind,ttx)
raster_ts_arima<-function(pixel,na.rm=T,arima_order){
arima_obj<-arima(pixel,order=arima_order)
a<-as.numeric(coef(arima_obj)[1])
return(a)
}
raster_ts_arima_predict <- function(pixel,na.rm=T,arima_order=NULL,n_ahead=2){
if(is.null(arima_order)){
arima_mod <- auto.arima(pixel)
p_arima<-predict(arima_mod,n.ahead=2)
}else{
arima_mod<-arima(pixel,order=arima_order)
p_arima<-predict(arima_mod,n.ahead=2)
}
y<- t(as.data.frame(p_arima$pred))
return(y)
}
|
9d84e8b04c967b1aa91af25b7ad2e91feeb5a423
|
d683d982e78d02586b33e0444209b0603754d73e
|
/COVID_med_ckd.R
|
3fa6fb96a7e53a897a393e1a930b760452d88fb7
|
[] |
no_license
|
TSChang-Lab/preexisiting-conditions-HL-COVID19
|
a8f14cc039c1cd5d1f05550e27ceb7d1bf36ca1a
|
6355623b796b3e8c5bfbea85a1fcf82948c7989e
|
refs/heads/main
| 2023-03-08T01:47:03.829533
| 2021-02-05T07:26:10
| 2021-02-05T07:26:10
| 335,115,995
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 17,040
|
r
|
COVID_med_ckd.R
|
rm(list = ls())
library('openxlsx')
library('tidyr', lib = '\\\\netshares.ccnet.ucla.edu/ctx_profiles/_EEG_XDR/_Redirect/YiDing/Documents/R/win-library/3.6')
library('dplyr', lib = '\\\\netshares.ccnet.ucla.edu/ctx_profiles/_EEG_XDR/_Redirect/YiDing/Documents/R/win-library/3.6')
library('logistf', lib = '\\\\netshares.ccnet.ucla.edu/ctx_profiles/_EEG_XDR/_Redirect/YiDing/Documents/R/win-library/3.6')
options(stringsAsFactors = FALSE)
PATIENT_DATE = '08312020'
DATA_CREATION_DATE = '09012020'
ANALYSIS_DATE = format(Sys.time(), "%m%d%Y")
data_dir = 'J:\\OHIA_Investigators\\YiDing\\covid19\\Data'
result_dir = 'J:\\OHIA_Investigators\\YiDing\\covid19\\results'
time_string = 'before'
data_dir = file.path(data_dir, PATIENT_DATE)
result_dir = file.path(result_dir, PATIENT_DATE)
data_file = file.path(data_dir, sprintf('patient_data_%s_%s.csv', time_string, DATA_CREATION_DATE))
df = read.csv(data_file, row.names = 1)
enrichment_test <- function(risk_factors, outcome_string,
cohort_string,
df,
time_string,
membership_string,
adjust_string = 'adj_SA', race_ethnicity = 'All'){
# subset data frame
condition = ((df[outcome_string ] == 1) | (df[cohort_string] == 1))
if (membership_string == 'NotNewToUCLA'){condition = (df['NewToUCLA'] == 0) & condition}
if (race_ethnicity == 'Hispanic'){
condition = condition & (df['SIRE_Ethnicity'] == 'Hispanic or Latino')
}else if (race_ethnicity == 'NonHispanic'){
condition = condition & (df['SIRE_Ethnicity'] == 'Not Hispanic or Latino')
}else if (race_ethnicity == 'NonHispanicWhite'){
condition = condition & ((df['SIRE_Ethnicity'] == 'Not Hispanic or Latino') & (df[,'SIRE_Race'] == 'White or Caucasian'))
}else if (race_ethnicity == 'All'){
condition = condition
}else{
stop('No this Ethnicity')
}
analysis_df = df[condition,]
# get covariates
if (adjust_string == 'crude'){
covariates = c()
}else if (adjust_string == 'adj_SA'){
covariates = c('age','age2', 'Sex')
}else if (adjust_string == 'adj_SAM'){
covariates = c('age','age2','Sex', 'NewToUCLA')
}else if(adjust_string == 'adj_SA_krf'){
covariates = c( 'age','age2','Sex', known_risk_factors)
}else if(adjust_string == 'adj_SAM_krf'){
covariates = c( 'age','age2','Sex', 'NewToUCLA', known_risk_factors)
}else{
stop('adjustment type not recognized')
}
if (cohort_string == 'DDRSAMPLE'){
covariates = c('AgeGroup', covariates)
adjust_string = paste0(adjust_string, '_', 'MC')
}
# initiate progress bar
total = length(risk_factors)
pb = txtProgressBar(min = 0, max = total, style = 3)
# hold results for each risk factor
results_df = data.frame('Membership' = character(), 'Outcome' = character(), 'Cohort' = character(), 'Adjust'= character(), 'ckd/medicine'= character(),
'Odds Ratio' = numeric(), '2.5%'= numeric(), '97.5%'= numeric(), 'ORCI' = character(),'Pvalue (Firth)'= numeric(),
'# ckd/medicine in Outcome+'=integer(), '# Outcome+'=integer(),
'# ckd/medicine in Outcome-'=integer(), '# Outcome-'=integer(),
'Freq ckd/medicine in Outcome+'= numeric(), 'Freq ckd/medicine in Outcome-'= numeric(),
'Freq ckd/medicine in SEVERE'= numeric(),'Freq ckd/medicine in INPATIENT'= numeric(),
'Freq ckd/medicine in COVID'= numeric(),'Freq ckd/medicine in TESTED'= numeric(),
'# ckd/medicine in SEVERE'=integer(), '# SEVERE' = integer(),
'# ckd/medicine in INPATIENT'=integer(), '# INPATIENT'=integer(),
'# ckd/medicine in COVID'=integer(),'# COVID'=integer(),
'# ckd/medicine in TESTED'=integer(), '# TESTED'=integer(),
'age coef' = numeric(), 'age 2.5%' = numeric(),'age 97.5%' = numeric(), 'age pvalue' = numeric(),
'age2 coef' = numeric(), 'age2 2.5%' = numeric(),'age2 97.5%' = numeric(), 'age2 pvalue' = numeric(),
'SexFemale' = numeric(), 'SexFemale 2.5%' = numeric(),'SexFemale 97.5%' = numeric(), 'SexFemale pvalue' = numeric())
for (i in seq(total)){
# update progress bar
setTxtProgressBar(pb, i)
# run logistic regression
rf = risk_factors[i]
model_covariates = c(covariates, rf)
f = as.formula( paste0(outcome_string, '~' ,paste(model_covariates, collapse = ' + '), sep = ' '))
model = logistf(formula = f, data = analysis_df)
# get odds ratio, ci and pvalue
p = model$prob[rf]
oddsratio = round(exp(coef(model)[rf]),2)
ci_lower = round(exp(confint(model)[rf,1]),2)
ci_upper = round(exp(confint(model)[rf,2]),2)
orci_string = sprintf('%s [%s, %s]', oddsratio, ci_lower, ci_upper)
# age
age_coef = signif(exp(coef(model)['age']),3)
age_lower = signif(exp(confint(model)['age',1]),3)
age_upper = signif(exp(confint(model)['age',2]),3)
age_p = model$prob['age']
# age2
age2_coef = signif(exp(1000*coef(model)['age2']),3)
age2_lower = signif(exp(1000*confint(model)['age2',1]),3)
age2_upper = signif(exp(1000*confint(model)['age2',2]),3)
age2_p = model$prob['age2']
# SexFemale
SexFemale_coef = signif(exp(- coef(model)['SexMale']),3)
SexFemale_lower = signif(exp(- confint(model)['SexMale',2]),3)
SexFemale_upper = signif(exp(- confint(model)['SexMale',1]),3)
SexFemale_p = model$prob['SexMale']
# frequency in positive and negative outcome
n_pos_rf = sum(analysis_df[rf] * analysis_df[outcome_string])
n_neg_rf = sum(analysis_df[rf] * (1-analysis_df[outcome_string]))
n_pos = sum(analysis_df[outcome_string])
n_neg = sum((1-analysis_df[outcome_string]))
freq_pos_rf = round(n_pos_rf/n_pos*100, 2)
freq_neg_rf = round(n_neg_rf/n_neg*100, 2)
# frequency in tested
n_TESTED = sum(df['TESTED'])
n_TESTED_rf = sum(df['TESTED']*df[rf])
freq_TESTED_rf = round(n_TESTED_rf/n_TESTED*100,2)
# frequency in covid
n_COVID = sum(df['COVID'])
n_COVID_rf = sum(df['COVID']*df[rf])
freq_COVID_rf = round(n_COVID_rf/n_COVID*100,2)
# frequency in inpatient
n_INPATIENT = sum(df['INPATIENT'])
n_INPATIENT_rf = sum(df['INPATIENT']*df[rf])
freq_INPATIENT_rf = round(n_INPATIENT_rf/n_INPATIENT*100,2)
# frequency in severe
n_SEVERE = sum(df['SEVERE'])
n_SEVERE_rf = sum(df['SEVERE']*df[rf])
freq_SEVERE_rf = round(n_SEVERE_rf/n_SEVERE*100,2)
result_row= list(membership_string, outcome_string, cohort_string, adjust_string, rf,
oddsratio, ci_lower, ci_upper,orci_string, p,
n_pos_rf, n_pos,
n_neg_rf, n_neg,
freq_pos_rf, freq_neg_rf,
freq_SEVERE_rf, freq_INPATIENT_rf, freq_COVID_rf, freq_TESTED_rf,
n_SEVERE_rf, n_SEVERE,
n_INPATIENT_rf, n_INPATIENT,
n_COVID_rf, n_COVID,
n_TESTED_rf, n_TESTED,
age_coef, age_lower, age_upper, age_p,
age2_coef, age2_lower, age2_upper, age2_p,
SexFemale_coef, SexFemale_lower, SexFemale_upper, SexFemale_p)
results_df[i,] = result_row
}
results_df = data.frame(results_df)
colnames(results_df) = c('Membership', 'Outcome', 'Cohort', 'Adjust', 'ckd/medicine',
'Odds Ratio', '2.5%', '97.5%', 'ORCI','Pvalue (Firth)',
'# ckd/medicine in Outcome+', '# Outcome+',
'# ckd/medicine in Outcome-', '# Outcome-',
'Freq ckd/medicine in Outcome+', 'Freq ckd/medicine in Outcome-',
'Freq ckd/medicine in SEVERE','Freq ckd/medicine in INPATIENT','Freq ckd/medicine in COVID','Freq ckd/medicine in TESTED',
'# ckd/medicine in SEVERE', '# SEVERE',
'# ckd/medicine in INPATIENT', '# INPATIENT',
'# ckd/medicine in COVID','# COVID',
'# ckd/medicine in TESTED', '# TESTED',
'age OR' , 'age 2.5%' ,'age 97.5%' , 'age pvalue' ,
'age2 OR' , 'age2 2.5%' ,'age2 97.5%' , 'age2 pvalue' ,
'SexFemale OR' , 'SexFemale 2.5%' ,'SexFemale 97.5%' , 'SexFemale pvalue' )
return(results_df)
}
membership_string = 'NotNewToUCLA'
known_risk_factors= c('chf', 'diabetes', 'hyperlipidemia', 'hypertension', 'obesity', 'ckd', 'copd', 'chd')
compare_list = list( c('COVID', 'TESTED'), c('INPATIENT', 'COVID'), c('SEVERE', 'INPATIENT'))
risk_factors = c('ImmunoSuppressants', 'steroids', 'ACEInhibitors', 'ARBs', 'AntiCoagulants', 'NSAID', 'SSRI')
sheet_list = list()
race_ethnicity_group = c('Hispanic', 'NonHispanicWhite', 'All')
for(compare in compare_list){
outcome_string = compare[1]
cohort_string = compare[2]
count = 1
results_df = data.frame('Membership' = character(), 'Outcome' = character(), 'Cohort' = character(), 'Adjust'= character(), 'ckd/medicine'= character(),
'Odds Ratio' = numeric(), '2.5%'= numeric(), '97.5%'= numeric(), 'ORCI' = character(),'Pvalue (Firth)'= numeric(),
'# ckd/medicine in Outcome+'=integer(), '# Outcome+'=integer(),
'# ckd/medicine in Outcome-'=integer(), '# Outcome-'=integer(),
'Freq ckd/medicine in Outcome+'= numeric(), 'Freq ckd/medicine in Outcome-'= numeric(),
'Freq ckd/medicine in SEVERE'= numeric(),'Freq ckd/medicine in INPATIENT'= numeric(),
'Freq ckd/medicine in COVID'= numeric(),'Freq ckd/medicine in TESTED'= numeric(),
'# ckd/medicine in SEVERE'=integer(), '# SEVERE' = integer(),
'# ckd/medicine in INPATIENT'=integer(), '# INPATIENT'=integer(),
'# ckd/medicine in COVID'=integer(),'# COVID'=integer(),
'# ckd/medicine in TESTED'=integer(), '# TESTED'=integer(),
'age OR' = numeric(), 'age 2.5%' = numeric(),'age 97.5%' = numeric(), 'age pvalue' = numeric(),
'age2 OR' = numeric(), 'age2 2.5%' = numeric(),'age2 97.5%' = numeric(), 'age2 pvalue' = numeric(),
'SexFemale OR' = numeric(), 'SexFemale 2.5%' = numeric(),'SexFemale 97.5%' = numeric(), 'SexFemale pvalue' = numeric())
for (race_ethnicity in race_ethnicity_group){
results_df[count,1] = race_ethnicity
sub_results_df = enrichment_test(risk_factors, outcome_string,
cohort_string,
df,
time_string,
membership_string,
adjust_string = 'adj_SA_krf', race_ethnicity)
results_df[(count+1):(count + nrow(sub_results_df)),] = sub_results_df
count = count + nrow(sub_results_df) + 1
}
colnames(results_df) = c('Membership', 'Outcome', 'Cohort', 'Adjust', 'ckd/medicine',
'Odds Ratio', '2.5%', '97.5%', 'ORCI','Pvalue (Firth)',
'# ckd/medicine in Outcome+', '# Outcome+',
'# ckd/medicine in Outcome-', '# Outcome-',
'Freq ckd/medicine in Outcome+', 'Freq ckd/medicine in Outcome-',
'Freq ckd/medicine in SEVERE','Freq ckd/medicine in INPATIENT','Freq ckd/medicine in COVID','Freq ckd/medicine in TESTED',
'# ckd/medicine in SEVERE', '# SEVERE',
'# ckd/medicine in INPATIENT', '# INPATIENT',
'# ckd/medicine in COVID','# COVID',
'# ckd/medicine in TESTED', '# TESTED',
'age OR' , 'age 2.5%' ,'age 97.5%' , 'age pvalue' ,
'age2 OR' , 'age2 2.5%' ,'age2 97.5%' , 'age2 pvalue' ,
'SexFemale OR' , 'SexFemale 2.5%' ,'SexFemale 97.5%' , 'SexFemale pvalue' )
sheet_list[[paste0(outcome_string, 'in', cohort_string)]] = results_df
}
output_file = file.path(result_dir, sprintf('medication_krf_%s.xlsx', ANALYSIS_DATE))
write.xlsx(sheet_list, file = output_file)
membership_string = 'NotNewToUCLA'
known_risk_factors= c('chf', 'diabetes', 'hyperlipidemia', 'hypertension', 'obesity', 'ckd', 'copd', 'chd')
compare_list = list( c('COVID', 'TESTED'), c('INPATIENT', 'COVID'), c('SEVERE', 'INPATIENT'))
risk_factors = c('ckd_any', 'ckd_1', 'ckd_2', 'ckd_3', 'ckd_4', 'ckd_5')
sheet_list = list()
race_ethnicity_group = c('Hispanic', 'NonHispanicWhite', 'All')
for(compare in compare_list){
outcome_string = compare[1]
cohort_string = compare[2]
count = 1
results_df = data.frame('Membership' = character(), 'Outcome' = character(), 'Cohort' = character(), 'Adjust'= character(), 'ckd/medicine'= character(),
'Odds Ratio' = numeric(), '2.5%'= numeric(), '97.5%'= numeric(), 'ORCI' = character(),'Pvalue (Firth)'= numeric(),
'# ckd/medicine in Outcome+'=integer(), '# Outcome+'=integer(),
'# ckd/medicine in Outcome-'=integer(), '# Outcome-'=integer(),
'Freq ckd/medicine in Outcome+'= numeric(), 'Freq ckd/medicine in Outcome-'= numeric(),
'Freq ckd/medicine in SEVERE'= numeric(),'Freq ckd/medicine in INPATIENT'= numeric(),
'Freq ckd/medicine in COVID'= numeric(),'Freq ckd/medicine in TESTED'= numeric(),
'# ckd/medicine in SEVERE'=integer(), '# SEVERE' = integer(),
'# ckd/medicine in INPATIENT'=integer(), '# INPATIENT'=integer(),
'# ckd/medicine in COVID'=integer(),'# COVID'=integer(),
'# ckd/medicine in TESTED'=integer(), '# TESTED'=integer(),
'age OR' = numeric(), 'age 2.5%' = numeric(),'age 97.5%' = numeric(), 'age pvalue' = numeric(),
'age2 OR' = numeric(), 'age2 2.5%' = numeric(),'age2 97.5%' = numeric(), 'age2 pvalue' = numeric(),
'SexFemale OR' = numeric(), 'SexFemale 2.5%' = numeric(),'SexFemale 97.5%' = numeric(), 'SexFemale pvalue' = numeric())
for (race_ethnicity in race_ethnicity_group){
results_df[count,1] = race_ethnicity
sub_results_df = enrichment_test(risk_factors, outcome_string,
cohort_string,
df,
time_string,
membership_string,
adjust_string = 'adj_SA_krf', race_ethnicity)
results_df[(count+1):(count + nrow(sub_results_df)),] = sub_results_df
count = count + nrow(sub_results_df) + 1
}
colnames(results_df) = c('Membership', 'Outcome', 'Cohort', 'Adjust', 'ckd/medicine',
'Odds Ratio', '2.5%', '97.5%', 'ORCI','Pvalue (Firth)',
'# ckd/medicine in Outcome+', '# Outcome+',
'# ckd/medicine in Outcome-', '# Outcome-',
'Freq ckd/medicine in Outcome+', 'Freq ckd/medicine in Outcome-',
'Freq ckd/medicine in SEVERE','Freq ckd/medicine in INPATIENT','Freq ckd/medicine in COVID','Freq ckd/medicine in TESTED',
'# ckd/medicine in SEVERE', '# SEVERE',
'# ckd/medicine in INPATIENT', '# INPATIENT',
'# ckd/medicine in COVID','# COVID',
'# ckd/medicine in TESTED', '# TESTED',
'age OR' , 'age 2.5%' ,'age 97.5%' , 'age pvalue' ,
'age2 OR' , 'age2 2.5%' ,'age2 97.5%' , 'age2 pvalue' ,
'SexFemale OR' , 'SexFemale 2.5%' ,'SexFemale 97.5%' , 'SexFemale pvalue' )
sheet_list[[paste0(outcome_string, 'in', cohort_string)]] = results_df
}
output_file = file.path(result_dir, sprintf('ckd_krf_%s.xlsx', ANALYSIS_DATE))
write.xlsx(sheet_list, file = output_file)
|
f4de91b99e6c94da4ac497839d1f4ea34b898b62
|
aac2ab584738d2184ce07d6478fdd13c8dd65056
|
/exercise-1/exercise.R
|
104218b3f110f150e214304ff2fb74c28507cfab
|
[
"MIT"
] |
permissive
|
stphnhng/module9-dataframes
|
f7e2893c114e35ff4d1040d236dbfea92b394a54
|
7964cfd11b35d7019a6bca023d3f6f3e7a13105b
|
refs/heads/master
| 2021-01-11T18:57:32.344320
| 2017-01-19T23:19:51
| 2017-01-19T23:19:51
| 79,280,564
| 0
| 0
| null | 2017-01-17T22:49:33
| 2017-01-17T22:49:33
| null |
UTF-8
|
R
| false
| false
| 1,233
|
r
|
exercise.R
|
# Exercise 1: Creating data frames
# Create a vector of the number of points the Seahawks scored in each game this
# season (google "Seahawks" for the info!)
points <- c(20,31,31,26,5,40,10,24,31,25,26,20)
# Create a vector of the number of points the Seahwaks have allowed to be scored
# against them in each game this season
opponent.points <- c(25,25,24,15,5,7,38,3,34,23,6,36)
# Combine your two vectors into a dataframe
seahawks.data <- data.frame(seahawks.points,seahawks.opponent.points, stringsAsFactors = FALSE)
# Create a new column "diff" that is the difference in points.
# Hint: recall the syntax for assigning new elements (which in this case will be
# a vector) to a list!
diff <- opponent.points - points
seahawks.data$diff <- diff
# Create a new column "won" which is TRUE if the Seahawks won
seahawks.data$won <- diff > 0
# Create a vector of the opponent names corresponding to the games played
opponent.names <- c("Saints","Bills","Patriots","Eagles","Buccaneers","Panthers","Packers","Rams","Cardinals","49ers","Lions","Falcons")
# Assign your dataframe rownames of their opponents
seahawks.data$opponents <- opponent.names
# View your data frame to see how it has changed!
View(seahawks.data)
|
635fcf06b894e0185bea1ebf746a58fcc8bf940a
|
4fa10361f4cb3a7e01618acd898db278ae9d3546
|
/non-pipeline Code/MAPK.pathway.in.luminal.LM.R
|
a00a776b36d184e562b1786aebc136d7509584a9
|
[] |
no_license
|
dudious/QCRI-SIDRA-ICR
|
2e8005e518a51ffba8a7c02adbf4ceb3206af3a2
|
c38d69b7eb523cb6f5e869d8a2220a13abeb4944
|
refs/heads/master
| 2021-04-18T23:35:44.960473
| 2019-05-22T09:04:38
| 2019-05-22T09:04:38
| 32,503,747
| 7
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 10,902
|
r
|
MAPK.pathway.in.luminal.LM.R
|
# Setup environment
#rm(list=ls())
## dependencies
## install java for xlsx export
## download TCGA assembler scripts http://www.compgenome.org/TCGA-Assembler/
required.packages <- c("xlsx","plyr","ggplot2","reshape","plotrix")
missing.packages <- required.packages[!(required.packages %in% installed.packages()[,"Package"])]
if(length(missing.packages)) install.packages(missing.packages)
library (plyr)
library (reshape)
library (ggplot2)
library(mygene)
library(plotrix)
setwd("~/Dropbox/BREAST_QATAR/")
# Load expression data
load ("./2 DATA/LM.BRCA/LM.Dataset.split.Rdata")
Expression.Data<-Expression.Data[,-1955]
#MAPK.genes <- read.csv ("./3 ANALISYS/DIFFERENTIAL EXPRESSION/15Oct/MAPKs/genes.from.MAPK.pathway.csv") FROM our pathway analysis
#colnames(MAPK.genes) <-"entrez"
#gene.names <- queryMany(MAPK.genes$entrez, scopes="entrezgene", fields=c("symbol", "go"), species="human")
#gene.table <- as.data.frame(gene.names)
#MAPK.genes$symbol <- gene.table$symbol[match(MAPK.genes$entrez,gene.table$query)]
#MAPK.genes <- unique(MAPK.genes)
#Genes selection
#MAPK.genes <- read.csv ("./3 ANALISYS/MAPK/MAPK.pathway.genes.csv",header = FALSE,stringsAsFactors = FALSE)
#MAPK.genes <- MAPK.genes[,2,drop=FALSE]
#colnames(MAPK.genes) <- "symbol"
#MAPK.genes <- DEGinICR4vs1.MAPK.PW
#MAPK.genes$symbol <- rownames(MAPK.genes)
#LM UPREGULATED
MAPK.genes <- DEGinMUTvsWT.lum.MAPK.PW.UP
MAPK.genes$symbol <- rownames(MAPK.genes)
Available.probes <- data.frame(Probe.ID=Gene.Meta.data$Affy_Probe_ID[Gene.Meta.data$Symbol %in% MAPK.genes$symbol],Symbol=Gene.Meta.data$Symbol[Gene.Meta.data$Symbol %in% MAPK.genes$symbol])
rownames(Available.probes) <- Available.probes$Probe.ID
MAPk.expresion.matrix <- Expression.Data[rownames(Available.probes),]
#load cluster data
Consensus.class <- read.csv(paste0("./3 ANALISYS/CLUSTERING/MA/LM.Dataset/LM.Dataset.MA.k7.DBGS3.reps5000/LM.Dataset.MA.k7.DBGS3.reps5000.k=4.consensusClass.ICR.csv"),header=TRUE) # select source data
#select data
heatmap.table <-as.data.frame(t(MAPk.expresion.matrix))
heatmap.table$subtype <- Sample.Meta.data$PAM50_SUBTYPE[match(rownames(heatmap.table),rownames(Sample.Meta.data))]
heatmap.table$cluster <- Consensus.class$Group[match(rownames(heatmap.table),Consensus.class$PatientID)]
heatmap.table <- heatmap.table[complete.cases(heatmap.table),]
#Filter
heatmap.table <- heatmap.table[heatmap.table$subtype == "LumA" | heatmap.table$subtype == "LumB",]
#heatmap.table <- heatmap.table[heatmap.table$subtype == "LumB",]
heatmap.table <- heatmap.table[heatmap.table$cluster == "ICR1" | heatmap.table$cluster == "ICR4",]
#split data and create color mapping
heatmap.matrix <- as.matrix(heatmap.table[,1:(ncol(heatmap.table)-2)])
mode(heatmap.matrix) <- "numeric"
heatmap.meta <- heatmap.table[,(ncol(heatmap.table)-1):ncol(heatmap.table)]
heatmap.matrix <- heatmap.matrix[row.names(heatmap.meta),]
means.UP <- as.data.frame(rowMeans(heatmap.matrix))
colnames(means.UP)<- "means.UP"
heatmap.meta <- cbind(heatmap.meta,means.UP$means.UP)
heatmap.meta <- heatmap.meta[order(heatmap.meta$means),]
heatmap.genes <- Available.probes[colnames(heatmap.matrix),]
#combined z-score
heatmap.meta <- heatmap.meta[rownames(means.BOTH),]
heatmap.matrix <- heatmap.matrix[row.names(heatmap.meta),]
cluster.colors <- heatmap.meta$cluster
levels (cluster.colors) <- c(levels (cluster.colors),c("#FF0000","#FFA500","#00FF00","#0000FF")) #Aply color scheme to patients
cluster.colors[cluster.colors=="ICR4"] <- "#FF0000"
cluster.colors[cluster.colors=="ICR3"] <- "#FFA500"
cluster.colors[cluster.colors=="ICR2"] <- "#00FF00"
cluster.colors[cluster.colors=="ICR1"] <- "#0000FF"
cluster.colors <- as.character(cluster.colors)
subtype.colors <-heatmap.meta$subtype
levels (subtype.colors) <- c(levels (subtype.colors),c("#eaff00","#00c0ff","#da70d6","#daa520","#d3d3d3","#000000"))
subtype.colors[subtype.colors=="LumA"] <- "#eaff00"
subtype.colors[subtype.colors=="LumB"] <- "#00c0ff"
subtype.colors[subtype.colors=="Basal"] <- "#da70d6"
subtype.colors[subtype.colors=="Her2"] <- "#daa520"
subtype.colors[subtype.colors=="Normal"] <- "#d3d3d3"
subtype.colors[subtype.colors=="ClaudinLow"] <- "#000000"
subtype.colors <- as.character(subtype.colors)
MAPK.color.scale <- color.scale(heatmap.meta$means,cs1 = c(0,1),cs2=c(0,0),cs3=c(0,0),alpha=1,extremes=NA,na.color=NA)
MAPK.color.scale <- color.scale(means.BOTH$BOTH.rank,cs1 = c(0,1),cs2=c(0,0),cs3=c(0,0),alpha=1,extremes=NA,na.color=NA)
heatmap.genes$label <- paste0(heatmap.genes$Symbol," - ",heatmap.genes$Probe.ID)
source ("~/Dropbox/R-projects/QCRI-SIDRA-ICR/R tools/heatmap.3.R")
meta.matrix <- as.matrix(rbind(cluster.colors,subtype.colors,MAPK.color.scale))
meta.matrix<- t(meta.matrix)
my.palette <- colorRampPalette(c("blue", "yellow", "red"))(n = 299)
my.colors <- c(seq(-10,-1,length=100),seq(-1,1,length=100),seq(1,10,length=100))
#dev.new(width=6, height=6)
png(paste0("./4 FIGURES/Heatmaps/DEG_heatmap.LM.LUM.SIGN.UP.rankmixorder.ICR1vs4.png"),res=600,height=10,width=14,unit="in")
heatmap.3((t(heatmap.matrix)),
main = "SIGN.UP.MUTvsWT",
ColSideColors = meta.matrix,
col=my.palette,
breaks=my.colors,
trace = "none",
scale ="row",
labCol = FALSE,
labRow = heatmap.genes$label,
margins=c(2,10),
Colv = FALSE
)
par(lend = 1)
#legend("left",legend = c("ICR4","ICR3","ICR2","ICR1"),
# col = c("red","orange","green","blue"),lty= 1,lwd = 5,cex = 1.3)
legend("topright",legend = c("Luminal A","Luminal B","Basal-like","HER2-enriched","Normal-like","","ICR4","ICR1"),
col = c("#eaff00","#00c0ff","#da70d6","#daa520","#d3d3d3","white","red","blue"),lty= 1,lwd = 5,cex = 0.6)
dev.off()
#correlation between ICR score and mapscore
immunescore<-read.csv("./3 ANALISYS/IMMUNOSCORE/immunoscore.TCGA.BRCA.LMDATA.csv",row.names = 1)
immunescore<-immunescore[rownames(heatmap.meta),,drop=FALSE]
cor.test(immunescore$unscaled.score,heatmap.meta$means)
#LM DOWNREGULATED
MAPK.genes <- DEGinMUTvsWT.lum.MAPK.PW.DOWN
MAPK.genes$symbol <- rownames(MAPK.genes)
Available.probes <- data.frame(Probe.ID=Gene.Meta.data$Affy_Probe_ID[Gene.Meta.data$Symbol %in% MAPK.genes$symbol],Symbol=Gene.Meta.data$Symbol[Gene.Meta.data$Symbol %in% MAPK.genes$symbol])
rownames(Available.probes) <- Available.probes$Probe.ID
MAPk.expresion.matrix <- Expression.Data[rownames(Available.probes),]
#reverse expression
MAPk.expresion.matrix <- as.matrix(MAPk.expresion.matrix)
mode (MAPk.expresion.matrix) <- "numeric"
MAPk.expresion.matrix <- -(MAPk.expresion.matrix)
#load cluster data
Consensus.class <- read.csv(paste0("./3 ANALISYS/CLUSTERING/MA/LM.Dataset/LM.Dataset.MA.k7.DBGS3.reps5000/LM.Dataset.MA.k7.DBGS3.reps5000.k=4.consensusClass.ICR.csv"),header=TRUE) # select source data
#select data
heatmap.table <-as.data.frame(t(MAPk.expresion.matrix))
heatmap.table$subtype <- Sample.Meta.data$PAM50_SUBTYPE[match(rownames(heatmap.table),rownames(Sample.Meta.data))]
heatmap.table$cluster <- Consensus.class$Group[match(rownames(heatmap.table),Consensus.class$PatientID)]
heatmap.table <- heatmap.table[complete.cases(heatmap.table),]
#Filter
heatmap.table <- heatmap.table[heatmap.table$subtype == "LumA" | heatmap.table$subtype == "LumB",]
#heatmap.table <- heatmap.table[heatmap.table$subtype == "LumB",]
heatmap.table <- heatmap.table[heatmap.table$cluster == "ICR1" | heatmap.table$cluster == "ICR4",]
#split data and create color mapping
heatmap.matrix <- as.matrix(heatmap.table[,1:(ncol(heatmap.table)-2)])
mode(heatmap.matrix) <- "numeric"
heatmap.meta <- heatmap.table[,(ncol(heatmap.table)-1):ncol(heatmap.table)]
heatmap.matrix <- heatmap.matrix[row.names(heatmap.meta),]
means.DOWN <- as.data.frame(rowMeans(heatmap.matrix))
colnames(means.DOWN)<- "means.DOWN"
heatmap.meta <- cbind(heatmap.meta,means.DOWN$means.DOWN)
heatmap.meta <- heatmap.meta[order(heatmap.meta$means),]
heatmap.genes <- Available.probes[colnames(heatmap.matrix),]
#combined z-score
heatmap.meta <- heatmap.meta[rownames(means.BOTH),]
heatmap.matrix <- heatmap.matrix[row.names(heatmap.meta),]
cluster.colors <- heatmap.meta$cluster
levels (cluster.colors) <- c(levels (cluster.colors),c("#FF0000","#FFA500","#00FF00","#0000FF")) #Aply color scheme to patients
cluster.colors[cluster.colors=="ICR4"] <- "#FF0000"
cluster.colors[cluster.colors=="ICR3"] <- "#FFA500"
cluster.colors[cluster.colors=="ICR2"] <- "#00FF00"
cluster.colors[cluster.colors=="ICR1"] <- "#0000FF"
cluster.colors <- as.character(cluster.colors)
subtype.colors <-heatmap.meta$subtype
levels (subtype.colors) <- c(levels (subtype.colors),c("#eaff00","#00c0ff","#da70d6","#daa520","#d3d3d3","#000000"))
subtype.colors[subtype.colors=="LumA"] <- "#eaff00"
subtype.colors[subtype.colors=="LumB"] <- "#00c0ff"
subtype.colors[subtype.colors=="Basal"] <- "#da70d6"
subtype.colors[subtype.colors=="Her2"] <- "#daa520"
subtype.colors[subtype.colors=="Normal"] <- "#d3d3d3"
subtype.colors[subtype.colors=="ClaudinLow"] <- "#000000"
subtype.colors <- as.character(subtype.colors)
MAPK.color.scale <- color.scale(heatmap.meta$means,cs1 = c(0,1),cs2=c(0,0),cs3=c(0,0),alpha=1,extremes=NA,na.color=NA)
MAPK.color.scale <- color.scale(means.BOTH$BOTH.rank,cs1 = c(0,1),cs2=c(0,0),cs3=c(0,0),alpha=1,extremes=NA,na.color=NA)
heatmap.genes$label <- paste0(heatmap.genes$Symbol," - ",heatmap.genes$Probe.ID)
source ("~/Dropbox/R-projects/QCRI-SIDRA-ICR/R tools/heatmap.3.R")
meta.matrix <- as.matrix(rbind(cluster.colors,subtype.colors,MAPK.color.scale))
meta.matrix<- t(meta.matrix)
my.palette <- colorRampPalette(c("red", "yellow", "blue"))(n = 299)
my.colors <- c(seq(-10,-1,length=100),seq(-1,1,length=100),seq(1,10,length=100))
#dev.new(width=6, height=6)
png(paste0("./4 FIGURES/Heatmaps/DEG_heatmap.LM.LUM.SIGN.DOWN.rankmixorder.ICR1vs4.png"),res=600,height=10,width=14,unit="in")
heatmap.3((t(heatmap.matrix)),
main = "SIGN.DOWN.rev.MUTvsWT",
ColSideColors = meta.matrix,
col=my.palette,
breaks=my.colors,
trace = "none",
scale ="row",
labCol = FALSE,
labRow = heatmap.genes$label,
margins=c(2,10),
Colv = FALSE
)
legend("topright",legend = c("Luminal A","Luminal B","Basal-like","HER2-enriched","Normal-like","","ICR4","ICR1"),
col = c("#eaff00","#00c0ff","#da70d6","#daa520","#d3d3d3","white","red","blue"),lty= 1,lwd = 5,cex = 0.6)
dev.off()
#combined z-score
means.BOTH <- means.UP
means.BOTH$DOWN <- means.DOWN$means.DOWN[match(rownames(means.BOTH),rownames(means.DOWN))]
colnames(means.BOTH) <- c("UP","DOWN")
#means.BOTH$DOWN <- -(means.BOTH$DOWN)
means.BOTH$UP.rank<-rank(means.BOTH$UP)
means.BOTH$DOWN.rank<-rank(means.BOTH$DOWN)
means.BOTH$BOTH <- rowMeans(means.BOTH[,c(1,2)])
means.BOTH$BOTH.rank <- rowMeans(means.BOTH[,c(3,4)])
means.BOTH <- means.BOTH[order(means.BOTH$BOTH.rank),]
|
dc8cfc384315290247b5ed09ee01b39f830efe64
|
bf9de74d9eb31f5211eac3a5e0d623f1345d22b1
|
/R/008.mapping_seed.R
|
12aa189020b018ab263449861f3aa966d692ea09
|
[
"MIT"
] |
permissive
|
ellisztamas/fecundity_components
|
85f6d46ac8b99ce3fd41faa45787385cb1e33a1b
|
82db42dcc50052e6ef3f488beb5487f73f958e75
|
refs/heads/master
| 2021-06-07T04:51:08.766653
| 2021-06-01T05:39:25
| 2021-06-01T05:39:25
| 139,826,874
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 750
|
r
|
008.mapping_seed.R
|
# This script peforms stepwise QTL mapping with R/QTL for one of seven traits.
# Which trait to map is determined by changing the index for the variable ix.
# This is based on input R/QTL files created with the script /R/rqtl_files.R
# There are six phenotypes:
traits <- c(
"mass", # 1. seed mass
"frut", # 2. fruits/plant
"seed", # 3. seeds/fruit
"tofu", # 4. Total fecundity (estimated seeds/plant = frut*seed)
"ffit", # 5. Fruits per planted seedling; fitness measure used by Agren etal 2013
"tfit", # 6. Seeds per planted seedling; fitness incorporating seed number.
"surv") # 7. Survival
# parameters to input
set.seed(249) # random seed.
source('R/perform_mapping.R')
perform_mapping(traits[3], nperms = 10000, nclusters = 16)
|
22b4104c2b15933ebf2bbf76e52e67cfb69bfaaf
|
78ddd2410b2895224654d0159e1443512784ff77
|
/code/analyze/plotting/plot_gene_region_only_int.R
|
848578c532c0958e5ec7637e21c53fa112d3f200
|
[] |
no_license
|
hyperboliccake/introgression
|
935b330e3177154275afec752966517a0d4c37e7
|
afe29d2c8db7366b43261e5a6d78829ba43f1660
|
refs/heads/master
| 2020-05-21T19:18:02.952950
| 2019-03-18T21:57:35
| 2019-03-18T21:57:35
| 63,725,778
| 1
| 1
| null | 2019-04-09T18:21:21
| 2016-07-19T20:27:07
|
Python
|
UTF-8
|
R
| false
| false
| 6,648
|
r
|
plot_gene_region_only_int.R
|
# like plot_genes, except we want to plot a whole set of genes together to show a complete region
# also plot gaps and variants colored by whether they match each reference
library(ggplot2)
library(viridis)
require(grDevices)
source('../my_color_palette.R')
options(stringsAsFactors = FALSE) # fuck you R
# r2259 -> SUL1
#region_start = 787000
#region_end = 794000
#chrm = 'II'
## r4560 -> SIR1
region_start = 917571 - 100
region_end = 921647 + 100
chrm = 'IV'
# save plot
fn = paste('/tigress/AKEY/akey_vol2/aclark4/projects/introgression/results/analysis/u3_i.001_tv_l1000_f.01/plots/region_chr', chrm, '_', region_start, '-', region_end,'.pdf', sep='')
pdf(fn, width = 10, height = 5)
# get strains
strains = read.table('strains.txt', header=F)
strains = t(strains)
row.names(strains) = NULL
strains = data.frame(strain=strains[3:nrow(strains),1])
#strains = data.frame(strain=c('yjm450', 'yjm320', 'yjm1399', 'yjm1355'))#
strains = data.frame(strain=c('yjm1304', 'yjm1202', 'yjm1199', 'yjm681'))#
strains$index=1:nrow(strains)
print(strains)#
## formatting parameters
#context_length = 200
genome_height = 1
num_strains = nrow(strains)
strain_height = 1
padding = .2
genome_width = genome_height/2
hmargin = .2 # for top and bottom
## plot overall outline, line for each strain etc
## base plot
## type n doesn't produce any points or lines
## first strain is at padding
plot(c(region_start, region_end),
c(-genome_height - padding - hmargin,
num_strains * (padding + strain_height) + hmargin),
type = "n", xlab = "",
ylab = "", main = "", xaxt='n', yaxt='n', xaxs='i', yaxs='i')
## move x axis label and title closer to axis
title(xlab = paste("Position on chromosome", chrm), line = 1.8, cex.lab=1.5)
## plot overall line for genome at bottom
rect(region_start,
-genome_height/2 - padding - genome_width/2,
region_end,
-genome_height/2 - padding + genome_width/2,
col = "gray50", border = "gray50")
## move position labels closer
axis(1, mgp=c(3, .5, 0))
## plot strain labels
positions = seq(padding + strain_height/2,
(num_strains*(padding + strain_height)),
(padding+strain_height))
axis(2, at=positions, labels=toupper(strains$strain[order(strains$index)]),
las=1, cex.axis=1, mgp=c(3, .1, 0), tick=FALSE)
## plot gridlines for strains
for (p in positions) {
abline(a=p, b=0, col=alpha(my_color_palette[['nonintrogressed']],0), lwd=2)
}
## plot genes
# get set of all genes
fn = paste('/tigress/AKEY/akey_vol2/aclark4/projects/introgression/results/analysis/S288c_chr',chrm,'_genes.txt', sep='')
gene_coords = read.table(fn, header=F, stringsAsFactors=F)
names(gene_coords) = c('gene', 'start', 'end')
genes = gene_coords[which((gene_coords$start > region_start &
gene_coords$start < region_end) |
(gene_coords$end > region_start &
gene_coords$end < region_end)),]
for (i in 1:nrow(genes)) {
gene_name = genes[i,]$gene
gene_start = max(region_start, genes[i,]$start)
gene_end = min(region_end, genes[i,]$end)
gene_length = gene_end - gene_start + 1
# gene box
rect(gene_start,
-genome_height - padding,
gene_end,
-padding,
col = 'white', border = 'white')
rect(gene_start,
-genome_height - padding,
gene_end,
-padding,
col = alpha(my_color_palette[['nonintrogressed']],1),
border = my_color_palette[['nonintrogressed']])
# gene name
text((gene_start + gene_end) / 2, -genome_height/2-padding,
labels = gene_name, adj=c(.5,.5), cex=1.3, col="white")
}
## plot variants
fn = 'gene_region_variants.txt'
variants = read.table(fn, header=T, stringsAsFactors=F)
for (i in 1:nrow(variants)) {
if (i %% 100 == 0) {
print(i)
}
for (j in strains$index) {
strain = strains[which(strains$index == j),]$strain
x = variants[[strain]][i]
ps = variants$ps[i]
if (x == 'p') {
#points(ps, positions[j], col = alpha(my_color_palette[['introgressed']],.5), pch='|',cex=.3)
segments(ps, positions[j] - strain_height / 3, y1=positions[j] + strain_height/3, col = alpha(my_color_palette[['introgressed']],.5), lend='butt', lwd=.5)
}
else if (x == 'c') {
#points(ps, positions[j], col = alpha(my_color_palette[['nonintrogressed']],.5),pch='|',cex=.3)
segments(ps, positions[j] - strain_height / 3, y1=positions[j] + strain_height/3, col = alpha(my_color_palette[['nonintrogressed']],.5), lend='butt', lwd=.5)
}
else if (x == 'n') {
#points(ps, positions[j], col = alpha('gray50',.5),pch='|',cex=.3)
segments(ps, positions[j] - strain_height / 3, y1=positions[j] + strain_height/3, col = alpha('black',.5), lend='butt', lwd=.5)
}
else if (x == '-') {
segments(ps, positions[j] - strain_height / 3, y1=positions[j] + strain_height/4, col = alpha('black',0), lend='butt', lwd=.5)
#segments(ps-.5, positions[j], x1=ps+.5, col = 'white',lend='butt')
}
}
}
## plot boxes around introgressed regions
fn = '/tigress/AKEY/akey_vol2/aclark4/projects/introgression/results/analysis/u3_i.001_tv_l1000_f.01/introgressed_blocks_filtered_par_u3_i.001_tv_l1000_f.01_summary_plus.txt'
region_summary = read.table(fn, stringsAsFactors=F, header=T)
for (i in 1:nrow(region_summary)) {
if (region_summary[i,]$chromosome == chrm) {
r_start = region_summary[i,]$start
r_end = region_summary[i,]$end
if ((r_start > region_start & r_start < region_end) |
(r_end > region_start & r_end < region_end)) {
r_start = max(r_start, region_start)
r_end = min(r_end, region_end)
strain_index = strains[which(strains$strain == region_summary[i,]$strain),]$index
print(strain_index)
print(region_summary[i,]$strain)
print(strains)
print(which(strains$strain == region_summary[i,]$strain))
rect(r_start,
positions[strain_index]-strain_height/2,
r_end,
positions[strain_index]+strain_height/2,
col = alpha(my_color_palette[['introgressed']],0), border=my_color_palette[['introgressed']])
}
}
}
## overall outline on top
rect(region_start,
-genome_height - padding - hmargin,
region_end,
num_strains * (padding + strain_height) + hmargin,
col = FALSE, border = "black")
dev.off()
|
e33d501347693056a7e1e174f53b4294d41aebde
|
225c02d2f1ab2425618f627f3936069e13c023fc
|
/updateWHO.R
|
189f5e3edfc5c54c6c0ce7dd94b75a59146a6401
|
[] |
no_license
|
SACEMA/COVID10k
|
0cc116d806fcfc9b9b8deafa5bbe9334fc3ad969
|
571a62b786d533f776636fc224b5af169b8cb62f
|
refs/heads/master
| 2021-03-21T11:48:42.643118
| 2020-04-06T02:30:07
| 2020-04-06T02:30:07
| 247,289,851
| 8
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,266
|
r
|
updateWHO.R
|
suppressPackageStartupMessages({
require(remotes)
require(data.table)
})
.args <- if (interactive()) c("latest_who.rds") else commandArgs(trailingOnly = TRUE)
remotes::install_github("eebrown/data2019nCoV", upgrade = "always")
require(data2019nCoV)
ref <- data.table(WHO_SR)
# only work to do if updates
if (!file.exists(.args[1]) || (readRDS(.args[1])[.N, date] < ref[.N, Date])) {
ctys.cases <- grep("deaths", colnames(ref), invert = T, value = TRUE)
ctys.deaths <- grep("deaths", colnames(ref), value = TRUE)
#' WARNING: assumes that Africa region remains a block
#' that starts after Region.EasternMediterranean
afr.cases <- c(
ctys.cases[
(which(ctys.cases == "Region.EasternMediterranean")+1) :
(which(ctys.cases == "Region.African")-1)
],
c("Sudan","Somalia","Djibouti","Tunisia","Morocco","Egypt")
)
SR <- melt(
ref[,.SD,.SDcols = intersect(colnames(ref), c(afr.cases)), by=.(SituationReport, Date)],
id.vars = c('SituationReport','Date'), variable.factor = F
)
SR[, country := gsub("\\..+$","",variable) ]
SR[, measure := gsub("^.+\\.(.+)$","\\1", variable) ]
SR[measure == variable, measure := "cases" ]
saveRDS(SR[,.(value),keyby=.(country,measure,date=Date)], tail(.args, 1))
}
|
ce2e4f081f15873ff58abd33ca933593dbe4a07c
|
dc3642ea21337063e725441e3a6a719aa9906484
|
/DevInit/R/P20/mics_probe_walls_floors.R
|
6283c9bd549ca5324d596aae3f7793c30dedd4f0
|
[] |
no_license
|
akmiller01/alexm-util
|
9bbcf613384fe9eefd49e26b0c841819b6c0e1a5
|
440198b9811dcc62c3eb531db95abef8dbd2cbc7
|
refs/heads/master
| 2021-01-18T01:51:53.120742
| 2020-09-03T15:55:13
| 2020-09-03T15:55:13
| 23,363,946
| 0
| 7
| null | null | null | null |
ISO-8859-1
|
R
| false
| false
| 3,058
|
r
|
mics_probe_walls_floors.R
|
####Function and setup####
library(Hmisc)
library(plyr)
library(foreign)
library(data.table)
####Run function####
# set our working directory, change this if using on another machine
wd <- "D:/Documents/Data/MICSauto/"
setwd(wd)
# List out all the directories in our wd, this is where our data is contained
dirs <- list.dirs(wd,full.names=TRUE)
filenames <- c()
#Floor
hc3s <- c()
#Wall
hc5s <- c()
#Floor
hi8s <- c()
#Test for phase?
hh1s <- c()
# Loop through every dir
for(i in 2:length(dirs)){
dir <- dirs[i]
message(basename(dir))
hh <- read.csv(paste0(dir,"/hh.csv"),as.is=TRUE,na.strings="",check.names=FALSE)
filenames <- c(filenames,basename(dir))
hc3s <- c(hc3s
,sum(
grepl("mud",hh$hc3,ignore.case=TRUE)
,grepl("dirt",hh$hc3,ignore.case=TRUE)
,grepl("wood",hh$hc3,ignore.case=TRUE)
,grepl("terr",hh$hc3,ignore.case=TRUE)
,grepl("cemen",hh$hc3,ignore.case=TRUE)
,grepl("cimen",hh$hc3,ignore.case=TRUE)
,grepl("metal",hh$hc3,ignore.case=TRUE)
,grepl("adobe",hh$hc3,ignore.case=TRUE)
)>0
)
hc5s <- c(hc5s
,sum(
grepl("mud",hh$hc5,ignore.case=TRUE)
,grepl("dirt",hh$hc5,ignore.case=TRUE)
,grepl("wood",hh$hc5,ignore.case=TRUE)
,grepl("terr",hh$hc5,ignore.case=TRUE)
,grepl("cemen",hh$hc5,ignore.case=TRUE)
,grepl("cimen",hh$hc5,ignore.case=TRUE)
,grepl("metal",hh$hc5,ignore.case=TRUE)
,grepl("adobe",hh$hc5,ignore.case=TRUE)
)>0
)
hi8s <- c(hi8s
,sum(
grepl("mud",hh$hi8,ignore.case=TRUE)
,grepl("dirt",hh$hi8,ignore.case=TRUE)
,grepl("wood",hh$hi8,ignore.case=TRUE)
,grepl("terr",hh$hi8,ignore.case=TRUE)
,grepl("cemen",hh$hi8,ignore.case=TRUE)
,grepl("cimen",hh$hi8,ignore.case=TRUE)
,grepl("metal",hh$hi8,ignore.case=TRUE)
,grepl("adobe",hh$hi8,ignore.case=TRUE)
)>0
)
hh1s <- c(hh1s,typeof(hh$hh1))
}
df <- data.frame(filenames,hc3s,hc5s,hi8s,hh1s)
df[which(df$filenames=="Algeria_MICS4_Datasets"),]$hc5s<-TRUE
zeroes <- subset(df,(hc3s==FALSE | hc5s==FALSE) & hh1s!="NULL")
insufficient <- c(
"Argentina_MICS4_Datasets"
,"State of Palestine_MICS4_Datasets"
,"Yemen MICS 2006 SPSS Datasets"
,"Cuba MICS 2006 SPSS Datasets"
,"Cuba_MICS4_Datasets"
,"Cuba_MICS5_Datasets"
,"Jamaica MICS 2005 SPSS Datasets"
)
zeroes <- subset(zeroes,!(filenames %in% insufficient))
nrow(zeroes)
df <- df[order(df$hh1s),]
#Argentina_MICS4_Datasets is missing HC5, but has HC3
# Likewise with State of Palestine_MICS4_Datasets
# likewise with Yemen MICS 2006 SPSS Datasets
# Algeria_MICS4_Datasets, hh$hc5
# Motte de Terres 13
# Toub ou Terre Séchées 14
# Roseaux avec boue 21
# Pierre avec boue 22
# Pierres avec Chaux/Ciment 32
# Briques 33
# Planches de bois 36
# Parpaing 37
# Autre (à préciser)......... 96
|
b58d0cf50daffd22bd6144aacf212cf29072d2f5
|
dea51706c221e2b2813d62513b295ac99f6f82ae
|
/projects/2xmammals/tree.R
|
f1a4ca100273071133c15a6a2d24aa81ef82f291
|
[] |
no_license
|
gjuggler/greg-ensembl
|
47bcb82e29013866735b5af398965b83e02b6088
|
5564fc09bbb0e586c9a1999f21e713c119c490db
|
refs/heads/master
| 2016-09-11T12:37:14.557777
| 2013-12-18T13:31:46
| 2013-12-18T13:31:46
| 275,374
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 15,985
|
r
|
tree.R
|
require(ape)
# Extracts the NHX annotations from a tree and returns a list with the annotations and the
# tree string (with NHX stuff removed).
tree.read.nhx.old <- function(str) {
if (file.exists(str)) {
# We're reading a file -- load the file into a string.
str <- readLines(str)
}
nhx.matches <- gregexpr("[,\\)\\( ]?([^,\\)\\(]+)?(:?\\d*\\.?\\d*)?\\[.*?\\]", str)
matches <- nhx.matches[[1]]
match.pos <- as.numeric(matches)
match.len <- attr(matches, 'match.length')
if (match.len[1] == -1) {
return(tree.read(text=str))
}
match.pos <- match.pos + 1
match.len <- match.len - 1
#print(match.pos)
nhx.strings <- substring(str, match.pos, match.pos+match.len-1)
#print(nhx.strings)
labels <- gsub("\\[&&NHX.*", "", nhx.strings)
labels.no.bl <- gsub(":.*", "", labels)
#print(labels.no.bl)
# Go through and splice the stripped-down labels back into the string.
for (i in 1:length(match.pos)) {
new.label <- gsub(labels.no.bl[i], paste('zzz',i,'zzz',sep=''), labels[i])
str <- paste(substr(str, 0, match.pos[i]-1 ), new.label, substr(str, match.pos[i] + match.len[i], nchar(str)), sep='')
match.pos <- match.pos - match.len[i] + nchar(new.label)
}
# Parse the Phylo object from the cleaned-up string.
#print(str)
tree <- tree.read(text=str)
#print(str(tree))
# Create a list of NHX annotations keyed by the node ID.
tag.values <- gsub(".*\\[&&NHX:(.*)\\]", "\\1", nhx.strings)
tagval.list <- strsplit(tag.values, ":")
names(tagval.list) <- labels.no.bl
library(plyr)
map.list <- llply(tagval.list, function(x) {
list.out <- list()
cur.list <- strsplit(x, "=")
if (length(cur.list) > 0) {
for (i in 1:length(cur.list)) {
vec <- cur.list[[i]]
list.out[vec[1]] = vec[2]
}
}
return(list.out)
})
# Replace the labels with the true labels.
tree$.tags <- list()
for (i in 1:(tree$Nnode+length(tree$tip.label))) {
tree$.tags[[i]] <- list()
}
for (i in 1:length(match.pos)) {
cur.node <- node.with.label(tree, paste('zzz', i, 'zzz', sep=''))
leaf <- tree.is.leaf(tree, cur.node)
real.node.name <- names(map.list)[i]
if (leaf) {
tree$tip.label[cur.node] <- real.node.name
} else {
tree$node.label[cur.node-length(tree$tip.label)] <- real.node.name
}
tree$.tags[[cur.node]] <- map.list[[i]]
}
#tree$.tags <- map.list
return(tree)
}
tree.write <- function(phylo, f) {
write.tree(phylo, f)
}
# Tries to get a tag from
tree.get.tag <- function(phylo, node, tag) {
tag <- phylo$.tags[[node]][[tag]]
if(is.null(tag)) {return('')}
return(tag)
}
tree.get.tags <- function(phylo, node) {
tags <- phylo$.tags[[node]]
if (is.null(tags)) {
return(list())
} else {
return(tags)
}
}
tree.foreach <- function(phylo, fn, leaves=T, nodes=T) {
n.leaves <- length(phylo$tip.label)
n.internals <- phylo$Nnode
indices <- c()
if (leaves) {
indices <- c(indices, 1:n.leaves)
}
if (nodes) {
indices <- c(indices, (n.leaves+1):(n.leaves+n.internals))
}
for (node in indices) {
do.call(fn, as.list(c(phylo, node)))
}
}
tree.as.data.frame <- function(tree, order.cladewise=T) {
tags <- c()
tree.foreach(tree, function(phylo, node) {
cur.tags <- tree.get.tags(phylo, node)
tags <<- c(tags, names(cur.tags))
})
tree.df <- data.frame(stringsAsFactors=F)
tree.foreach(tree, function(phylo, node) {
cur.tags <- tree.get.tags(phylo, node)
cur.tags[['Label']] <- tree.label.for.node(phylo, node)
cur.tags[['Depth']] <- tree.leaves.beneath(phylo, node)
cur.tags[['id']] <- node
tree.df <<- rbind.fill(tree.df, as.data.frame(cur.tags, stringsAsFactors=F))
})
if (order.cladewise) {
tree.df <- tree.df[tree.order.nodes(tree, include.internals=T),]
}
tree.df
}
## read.tree.R (2010-09-27)
## Read Tree Files in Parenthetic Format
## Copyright 2002-2010 Emmanuel Paradis, Daniel Lawson and Klaus Schliep
## This file is part of the R-package `ape'.
## See the file ../COPYING for licensing issues.
tree.build <- function(tp)
{
add.internal <- function() {
edge[j, 1] <<- current.node
edge[j, 2] <<- current.node <<- node <<- node + 1L
index[node] <<- j # set index
j <<- j + 1L
}
add.terminal <- function() {
edge[j, 1] <<- current.node
edge[j, 2] <<- tip
index[tip] <<- j # set index
X <- unlist(strsplit(tpc[k], ":"))
tip.label[tip] <<- X[1]
edge.length[j] <<- as.numeric(X[2])
k <<- k + 1L
tip <<- tip + 1L
j <<- j + 1L
}
go.down <- function() {
l <- index[current.node]
X <- unlist(strsplit(tpc[k], ":"))
node.label[current.node - nb.tip] <<- X[1]
edge.length[l] <<- as.numeric(X[2])
k <<- k + 1L
current.node <<- edge[l, 1]
}
if (!length(grep(",", tp))) {
obj <- list(edge = matrix(c(2L, 1L), 1, 2))
tp <- unlist(strsplit(tp, "[\\(\\):;]"))
obj$edge.length <- as.numeric(tp[3])
obj$Nnode <- 1L
obj$tip.label <- tp[2]
if (tp[4] != "") obj$node.label <- tp[4]
class(obj) <- "phylo"
return(obj)
}
tpc <- unlist(strsplit(tp, "[\\(\\),;]"))
tpc <- tpc[nzchar(tpc)]
## the following 2 lines are (slightly) faster than using gsub()
tsp <- unlist(strsplit(tp, NULL))
skeleton <- tsp[tsp %in% c("(", ")", ",", ";")]
nsk <- length(skeleton)
nb.node <- sum(skeleton == ")")
nb.tip <- sum(skeleton == ",") + 1
## We will assume there is an edge at the root;
## if so, it will be removed and put into a vector
nb.edge <- nb.node + nb.tip
node.label <- character(nb.node)
tip.label <- character(nb.tip)
edge.length <- numeric(nb.edge)
edge <- matrix(0L, nb.edge, 2)
current.node <- node <- as.integer(nb.tip + 1) # node number
edge[nb.edge, 2] <- node
index <- numeric(nb.edge + 1) # hash index to avoid which
index[node] <- nb.edge
## j: index of the line number of edge
## k: index of the line number of tpc
## tip: tip number
j <- k <- tip <- 1L
for (i in 2:nsk) {
if (skeleton[i] == "(") add.internal() # add an internal branch (on top)
if (skeleton[i] == ",") {
if (skeleton[i - 1] != ")") add.terminal() # add a terminal branch
}
if (skeleton[i] == ")") {
if (skeleton[i - 1] != ")") { # add a terminal branch and go down one level
add.terminal()
go.down()
}
if (skeleton[i - 1] == ")") go.down() # go down one level
}
}
edge <- edge[-nb.edge, ]
obj <- list(edge = edge, Nnode = nb.node, tip.label = tip.label)
root.edge <- edge.length[nb.edge]
edge.length <- edge.length[-nb.edge]
if (!all(is.na(edge.length))) # added 2005-08-18
obj$edge.length <- edge.length
if (is.na(node.label[1])) node.label[1] <- ""
if (any(nzchar(node.label))) obj$node.label <- node.label
if (!is.na(root.edge)) obj$root.edge <- root.edge
class(obj) <- "phylo"
obj
}
tree.read <- function(file = "", text = NULL, tree.names = NULL, skip = 0, remove.whitespace=F,
comment.char = "#", keep.multi = FALSE, ...)
{
unname <- function(treetext) {
nc <- nchar(treetext)
tstart <- 1
while (substr(treetext, tstart, tstart) != "(" && tstart <= nc)
tstart <- tstart + 1
if (tstart > 1)
return(c(substr(treetext, 1, tstart - 1),
substr(treetext, tstart, nc)))
return(c("", treetext))
}
if (!is.null(text)) {
if (!is.character(text))
stop("argument `text' must be of mode character")
tree <- text
} else {
tree <- scan(file = file, what = "", sep = "\n", quiet = TRUE,
skip = skip, comment.char = comment.char, ...)
}
## Suggestion from Eric Durand and Nicolas Bortolussi (added 2005-08-17):
if (identical(tree, character(0))) {
warning("empty character string.")
return(NULL)
}
if (remove.whitespace) {
tree <- gsub("[ \t]", "", tree)
}
tree <- unlist(strsplit(tree, NULL))
y <- which(tree == ";")
Ntree <- length(y)
x <- c(1, y[-Ntree] + 1)
## Suggestion from Olivier François (added 2006-07-15):
if (is.na(y[1])) return(NULL)
STRING <- character(Ntree)
for (i in 1:Ntree)
STRING[i] <- paste(tree[x[i]:y[i]], sep = "", collapse = "")
tmp <- unlist(lapply(STRING, unname))
tmpnames <- tmp[c(TRUE, FALSE)]
STRING <- tmp[c(FALSE, TRUE)]
if (is.null(tree.names) && any(nzchar(tmpnames)))
tree.names <- tmpnames
colon <- grep(":", STRING)
if (!length(colon)) {
obj <- lapply(STRING, clado.build)
} else if (length(colon) == Ntree) {
obj <- lapply(STRING, tree.build)
} else {
obj <- vector("list", Ntree)
obj[colon] <- lapply(STRING[colon], tree.build)
nocolon <- (1:Ntree)[!1:Ntree %in% colon]
obj[nocolon] <- lapply(STRING[nocolon], clado.build)
}
for (i in 1:Ntree) {
## Check here that the root edge is not incorrectly represented
## in the object of class "phylo" by simply checking that there
## is a bifurcation at the root
ROOT <- length(obj[[i]]$tip.label) + 1
if(sum(obj[[i]]$edge[, 1] == ROOT) == 1 && dim(obj[[i]]$edge)[1] > 1)
stop(paste("There is apparently two root edges in your file: cannot read tree file.\n Reading Newick file aborted at tree no.", i, sep = ""))
}
if (Ntree == 1 && !keep.multi) obj <- obj[[1]] else {
if (!is.null(tree.names)) names(obj) <- tree.names
class(obj) <- "multiPhylo"
}
obj
}
tree.remove.node.labels <- function(phylo) {
phylo$node.label <- NULL
phylo
}
tree.remove.branchlengths <- function(phylo, push.to.tips=F) {
n.leaves <- length(phylo$tip.label)
n.nodes <- length(phylo$tip.label)+phylo$Nnode
max.depth <- 0
for (i in 1:n.nodes) {
depth <- tree.depth.to.root(phylo, i)
max.depth <- max(depth, max.depth)
}
max.depth <- max.depth + 1
for (i in 1:n.nodes) {
cur.depth <- tree.depth.to.root(phylo,i)
parent.node <- tree.parent.node(phylo, i)
edge.index <- which(phylo$edge[,2]==i)
is.leaf <- i <= n.leaves
if (is.leaf) {
cur.count <- 1
} else {
cur.count <- tree.leaves.beneath(phylo, i)
}
if (parent.node > -1) {
parent.count <- tree.leaves.beneath(phylo, parent.node)
if (push.to.tips) {
count.diff <- parent.count - cur.count
#print(paste(i, count.diff))
phylo$edge.length[edge.index] <- count.diff
} else {
if (is.leaf) {
# Branch length should equal diff. between depth to root and max depth.
cur.d <- tree.depth.to.root(phylo, i)
phylo$edge.length[edge.index] <- max.depth - cur.d
} else {
phylo$edge.length[edge.index] <- 1
}
}
} else {
phylo$edge.length[edge.index] <- 0
}
}
phylo
}
tree.leaves.beneath <- function(phylo, node) {
if (is.leaf(phylo, node)) {
return(1)
}
cld <- tree.extract.clade(phylo, node)
length(cld$tip.label)
}
# The length from the root to the given node. Can be given either as a node ID or a tip label.
tree.depth.to.root <- function(phylo,node) {
tip.index <- node
if (is.character(node)) {
tip.index <- which(phylo$tip.label==node)
}
cur.node.b <- tip.index
p.edges <- phylo$edge
length <- 0
while(length(which(p.edges[,2]==cur.node.b)) > 0) {
cur.edge.index <- which(p.edges[,2]==cur.node.b)
cur.edge.length <- 1
length <- length + cur.edge.length
cur.node.a <- p.edges[cur.edge.index,1]
cur.node.b <- cur.node.a # Move up to the next edge
}
return(length)
}
# Finds the node with a given label.
tree.node.with.label <- function(tree,label) {
all.labels <- c(tree$tip.label,tree$node.label)
return(which(all.labels %in% label))
}
tree.label.for.node <- function(tree, node) {
if (node <= length(tree$tip.label)) {
return(tree$tip.label[node])
} else if (node <= (tree$Nnode + length(tree$tip.label))) {
node.label.index <- node - length(tree$tip.label)
return(tree$node.label[node.label.index])
}
}
# Extracts the length of the branch above the given node. Returns 0 if the node is root.
tree.branch.length <- function(phylo,node) {
edge.index <- which(phylo$edge[,2]==node)
bl <- phylo$edge.length[edge.index]
if (length(bl)==0 || is.na(bl)) {
bl <- 0
}
return(bl)
}
tree.scale.by <- function(phylo, factor) {
phylo$edge.length <- phylo$edge.length * factor
return(phylo)
}
tree.scale.to <- function(phylo, total.length) {
cur.total <- tree.total.branch.length(phylo)
scale.factor <- total.length / cur.total
tree.scale.by(phylo, scale.factor)
}
tree.total.branch.length <- function(phylo) {
sum(phylo$edge.length)
}
# The maximum root-to-tip length in the tree.
tree.max.length.to.root <- function(phylo) {
max.length <- max(tree.lengths.to.root(phylo))
if (is.na(max.length)) {
max.depth <- 0
for (i in 1:length(phylo$tip.label)) {
cur.depth <- tree.depth.to.root(phylo,i)
max.depth <- max(max.depth, cur.depth)
}
return(max.depth)
}
return(max.length)
}
tree.mean.path.length <- function(phylo) {
mean(tree.lengths.to.root(phylo))
}
tree.lengths.to.root <- function(phylo) {
lengths <- c()
if (length(phylo$tip.label) == 0) {
return(NA)
}
for (i in 1:length(phylo$tip.label)) {
lengths[i] <- tree.length.to.root(phylo,i)
}
lengths
}
# The length from the root to the given node. Can be given either as a node ID or a tip label.
tree.length.to.root <- function(phylo,node) {
tip.index <- node
if (is.character(node)) {
tip.index <- which(phylo$tip.label==node)
}
cur.node.b <- tip.index
p.edges <- phylo$edge
p.lengths <- phylo$edge.length
if(is.null(p.lengths)) {
p.lengths <- rep(1, length(p.edges[,1]))
}
length <- 0
while(length(which(p.edges[,2]==cur.node.b)) > 0) {
cur.edge.index <- which(p.edges[,2]==cur.node.b)
cur.edge.length <- p.lengths[cur.edge.index]
if (length(cur.edge.length) == 0 || is.na(cur.edge.length)) {
cur.edge.length <- 0
}
length <- length + cur.edge.length
cur.node.a <- p.edges[cur.edge.index,1]
cur.node.b <- cur.node.a # Move up to the next edge
}
return(length)
}
tree.remove.leaf <- function(tree, x) {
tree <- drop.tip(tree, x)
tree
}
tree.extract.subtree <- function(tree, x) {
not.in.set <- setdiff(tree$tip.label, x)
tree <- drop.tip(tree, not.in.set)
tree
}
tree.is.leaf <- function(phylo,node) {
return(node <= length(phylo$tip.label))
}
# Extracts a list of child node IDs for the given node. Returns (-1,-1) if the node is a leaf.
tree.child.nodes <- function(phylo,node) {
edge.indices <- which(phylo$edge[,1]==node)
nodes <- phylo$edge[edge.indices,2]
if (length(nodes)==0) {
nodes <- list(c(-1,-1))
} else {
nodes <- list(nodes)
}
return(list(nodes))
}
# Extracts the parent node ID for the given node. Returns -1 if the node is root.
tree.parent.node <- function(phylo,node) {
edge.index <- which(phylo$edge[,2]==node)
node <- phylo$edge[edge.index,1]
if (length(node)==0) {
node <- -1
}
return(node)
}
tree.order.nodes <- function(phylo, include.internals=T) {
phylo <- reorder(phylo, order="cladewise");
df.list <- phylo.layout.df(phylo,
layout.ancestors=T
)
nodes <- df.list$nodes
if (!include.internals) {
nodes <- subset(nodes, is.leaf==FALSE)
}
nodes[order(nodes$y), 'node']
}
sort.df.by.tree <- function(tree.df, tree) {
phylo.order <- tree.order.nodes(tree)
df.order <- match(phylo.order, tree.df$id)
tree.df[df.order, ]
}
is.tree <- function(x) {
if (!is.null(x$edge) && x$Nnode > 0) {
TRUE
} else {
FALSE
}
}
|
e84c7b500bb1429455d0076d715f94838889c4cc
|
101e8c42522268a33ccb8b9be44140c77ffcf129
|
/server.R
|
fa14ff16fceb16b199dfad3af3ac16b833af9b54
|
[] |
no_license
|
li-xiaohui/developing-data-products
|
590573969931ea0e28801aa53707f2e6e9910459
|
3820d1db3198a1ed40cda7d3c1194e6f94878286
|
refs/heads/master
| 2021-01-10T08:39:01.059711
| 2017-06-07T02:48:03
| 2017-06-07T02:48:03
| 43,115,393
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 996
|
r
|
server.R
|
# app2
library(mlbench)
library(caret)
library(dplyr)
library(ggplot2)
data("PimaIndiansDiabetes")
shinyServer(
function(input, output) {
output$plot2 <- renderPlot({
inputAge <- input$inputAge
counts <- PimaIndiansDiabetes %>%
filter(age < inputAge) %>%
group_by(diabetes, age) %>%
summarise(count=n())
ggplot(counts, aes(x= age, y=count, fill=diabetes)) + geom_bar(stat="identity", position=position_dodge()) +
xlab("Age") + ylab("Count")
# , counts, type='l' , xlab='age', col='blue', main='Pos Cases by Age')
})
output$pregPlot <- renderPlot({
preg <- input$preg
pregCounts <- PimaIndiansDiabetes %>%
filter(pregnant < preg) %>%
group_by(diabetes, pregnant) %>%
summarise(count=n())
ggplot(pregCounts, aes(x=pregnant, y=count, fill=diabetes)) + geom_bar(stat="identity", position=position_dodge())+
xlab("Number of times pregnant") + ylab("Count")
})
})
|
f1541ce47945540d48ce87e9957f76c013dad763
|
b24fc860c61f03e43e39491b7bf094910d04796a
|
/analyse deprivation.R
|
d7754022f1cf303253e7e7766ac9b11b9ca5c823
|
[
"MIT"
] |
permissive
|
matthewgthomas/covid-19-regional-deprivation
|
b42024021abaadfde491974b7030eb5e4c5864bf
|
80769119012d1086d3392bb1ead02b3adbb518f5
|
refs/heads/main
| 2023-03-07T21:31:12.480015
| 2021-02-20T10:49:03
| 2021-02-20T10:49:03
| 340,119,529
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,884
|
r
|
analyse deprivation.R
|
library(tidyverse)
library(rstanarm)
options(mc.cores = parallel::detectCores())
deaths_msoa <- read_csv("data/deaths-msoa.csv")
deaths_la <- read_csv("data/deaths-la.csv")
m_la <- stan_lmer(DeathRate ~ Extent + (Extent | RGN19NM), data = deaths_la,
prior_intercept = normal(0, 5), prior = normal(0,2), prior_covariance = decov(regularization=2),
cores = 1, chains = 4)
saveRDS(m_la, "data/la-model.rds")
launch_shinystan(m_la, ppd = FALSE)
# ---- Obtain regional varying slopes ----
get_slopes <- function(m, param = "Extent") {
# draws for overall mean
mu_a_sims <- as.matrix(m,
pars = param)
# draws for region-level error
u_sims <- as.matrix(m,
regex_pars = paste0("b\\[", param, " RGN19NM\\:"))
# draws for regional varying intercepts
a_sims <- as.numeric(mu_a_sims) + u_sims
# ---- Compute mean, SD, median, and 95% credible interval of varying intercepts ----
# Posterior mean and SD of each alpha
a_mean <- apply(X = a_sims, # posterior mean
MARGIN = 2,
FUN = mean)
a_sd <- apply(X = a_sims, # posterior SD
MARGIN = 2,
FUN = sd)
# Posterior median and 95% credible interval
a_quant <- apply(X = a_sims,
MARGIN = 2,
FUN = quantile,
probs = c(0.025, 0.50, 0.975))
a_quant <- data.frame(t(a_quant))
names(a_quant) <- c("Q2.5", "Q50", "Q97.5")
# Combine summary statistics of posterior simulation draws
a_df <- data.frame(a_mean, a_sd, a_quant)
a_df
}
# ---- Plot predicted death rates ----
new_data <- expand_grid(
Score = seq(min(deaths_la$Score), max(deaths_la$Score)),
RGN19NM = unique(deaths_la$RGN19NM)
)
post <- posterior_predict(m_la, new_data)
pred <- posterior_epred(m_la, new_data)
quants <- apply(post, 2, quantile, probs = c(0.025, 0.5, 0.975)) # quantiles over mcmc samples
quants2 <- apply(pred, 2, quantile, probs = c(0.025, 0.5, 0.975)) # quantiles over mcmc samples
row.names(quants) <- c("sim.lwr", "sim.med", "sim.upr")
row.names(quants2) <- c("lwr", "DeathRate.pred", "upr")
new_data <- cbind(new_data, t(quants), t(quants2))
new_data %>%
ggplot(aes(x = Score, y = DeathRate.pred)) +
geom_line(aes(colour = RGN19NM)) +
geom_ribbon(aes(ymin = lwr, ymax = upr, fill = RGN19NM), alpha = 0.2) +
# geom_blank(data = ylims) + # plot dummy data to give facets the same y limits by country
facet_wrap(~RGN19NM) +
theme_classic()
# ---- MSOA model ----
m_msoa <- stan_lmer(DeathRate ~ Extent + (Extent | RGN19NM), data = deaths_msoa,
prior_intercept = normal(0, 5), prior = normal(0,2), prior_covariance = decov(regularization=2),
cores = 1, chains = 4)
saveRDS(m_msoa, "data/msoa-model.rds")
launch_shinystan(m_msoa, ppd = FALSE)
# ---- Make tibble with slope estimates from both models ----
slopes_la <- get_slopes(m_la)
slopes_msoa <- get_slopes(m_msoa)
slopes <- bind_rows(
slopes_la %>% rownames_to_column(var = "Parameter") %>% mutate(Model = "LA"),
slopes_msoa %>% rownames_to_column(var = "Parameter") %>% mutate(Model = "MSOA")
) %>%
mutate(Region = Parameter %>%
str_remove("b\\[Extent RGN19NM:") %>%
str_remove("\\]") %>%
str_replace_all("_", " "))
# Save slopes
write_csv(slopes, "data/regression-slopes.csv")
# ---- Fit models with IMD score rather than extent ----
# Standardise scores
deaths_la$Score_z = as.numeric(scale(deaths_la$Score))
deaths_msoa$Score_z = as.numeric(scale(deaths_msoa$Score))
m_score_la <- stan_lmer(DeathRate ~ Score_z + (Score_z | RGN19NM), data = deaths_la,
prior_intercept = normal(0, 5), prior = normal(0,2), prior_covariance = decov(regularization=2),
cores = 1, chains = 4)
m_score_msoa <- stan_lmer(DeathRate ~ Score_z + (Score_z | RGN19NM), data = deaths_msoa,
prior_intercept = normal(0, 5), prior = normal(0,2), prior_covariance = decov(regularization=2),
cores = 1, chains = 4)
write_rds(m_score_la, "data/la-score-model.rds")
write_rds(m_score_msoa, "data/msoa-score-model.rds")
launch_shinystan(m_score_la, ppd = FALSE)
launch_shinystan(m_score_msoa, ppd = FALSE)
slopes_la <- get_slopes(m_score_la, param = "Score_z")
slopes_msoa <- get_slopes(m_score_msoa, param = "Score_z")
slopes <- bind_rows(
slopes_la %>% rownames_to_column(var = "Parameter") %>% mutate(Model = "LA"),
slopes_msoa %>% rownames_to_column(var = "Parameter") %>% mutate(Model = "MSOA")
) %>%
mutate(Region = Parameter %>%
str_remove("b\\[Score_z RGN19NM:") %>%
str_remove("\\]") %>%
str_replace_all("_", " "))
# Save slopes
write_csv(slopes, "data/regression-slopes-scores.csv")
|
3441c90a20c16332e2421724ef338e5442561e7a
|
679ec7dfb61aa31a6a86aaff76bc7e9f277eacd2
|
/input/gcam-data-system/aglu-processing-code/level1/LB113.bio_Yield_R_AEZ.R
|
6c5e5e3cc30dc90f91ca5b0a2cba28a2889853e0
|
[
"ECL-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
Randynat/gcam-core
|
1f607b95a760c0e81bd625ae2e614df08eda1b53
|
174872a6fe4af92bb622ca6a56ea0cef39a308b9
|
refs/heads/master
| 2021-08-06T17:40:57.446707
| 2017-11-06T14:02:59
| 2017-11-06T14:02:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,574
|
r
|
LB113.bio_Yield_R_AEZ.R
|
# Before we can load headers we need some paths defined. They
# may be provided by a system environment variable or just
# having already been set in the workspace
if( !exists( "AGLUPROC_DIR" ) ){
if( Sys.getenv( "AGLUPROC" ) != "" ){
AGLUPROC_DIR <- Sys.getenv( "AGLUPROC" )
} else {
stop("Could not determine location of aglu processing scripts, please set the R var AGLUPROC_DIR to the appropriate location")
}
}
# Universal header file - provides logging, file support, etc.
source(paste(AGLUPROC_DIR,"/../_common/headers/GCAM_header.R",sep=""))
source(paste(AGLUPROC_DIR,"/../_common/headers/AGLU_header.R",sep=""))
logstart( "LB113.bio_Yield_R_AEZ.R" )
adddep(paste(AGLUPROC_DIR,"/../_common/headers/GCAM_header.R",sep=""))
adddep(paste(AGLUPROC_DIR,"/../_common/headers/AGLU_header.R",sep=""))
printlog( "Biomass yield by region / AEZ, base year" )
# -----------------------------------------------------------------------------
# 1. Read files
sourcedata( "COMMON_ASSUMPTIONS", "A_common_data", extension = ".R" )
sourcedata( "COMMON_ASSUMPTIONS", "unit_conversions", extension = ".R" )
sourcedata( "AGLU_ASSUMPTIONS", "A_aglu_data", extension = ".R" )
iso_GCAM_regID <- readdata( "COMMON_MAPPINGS", "iso_GCAM_regID" )
EPIC_bio_Yield <- readdata( "AGLU_LEVEL0_DATA", "EPIC_bio_Yield" )
L103.ag_HA_bm2_R_C_Y_AEZ <- readdata( "AGLU_LEVEL1_DATA", "L103.ag_HA_bm2_R_C_Y_AEZ" )
L104.ag_Prod_Mt_R_C_Y_AEZ <- readdata( "AGLU_LEVEL1_DATA", "L104.ag_Prod_Mt_R_C_Y_AEZ" )
# -----------------------------------------------------------------------------
# 2. Perform computations
#Subset only the relevant (cellulosic) crops from the production and harvested area databases
printlog( "Aggregating yield data for crops being used for computing regional bioenergy indices" )
L113.ag_Prod_Mt_R_Ccell_fby_AEZ <- L104.ag_Prod_Mt_R_C_Y_AEZ[ L104.ag_Prod_Mt_R_C_Y_AEZ$GCAM_commodity %in% cellulosic_crops, c( R_C_AEZ, X_bio_yield_year ) ]
L113.ag_Prod_Mt_R_Ccell_fby <- aggregate( L113.ag_Prod_Mt_R_Ccell_fby_AEZ[ X_bio_yield_year ], by=as.list( L113.ag_Prod_Mt_R_Ccell_fby_AEZ[ R_C ] ), sum )
L113.ag_HA_bm2_R_Ccell_fby_AEZ <- L103.ag_HA_bm2_R_C_Y_AEZ[ L103.ag_HA_bm2_R_C_Y_AEZ$GCAM_commodity %in% cellulosic_crops, c( R_C_AEZ, X_bio_yield_year ) ]
L113.ag_HA_bm2_R_Ccell_fby <- aggregate( L113.ag_HA_bm2_R_Ccell_fby_AEZ[ X_bio_yield_year ], by=as.list( L113.ag_HA_bm2_R_Ccell_fby_AEZ[ R_C ] ), sum )
#Regional index calculation using cellulosic crops
L113.ag_Index_R_Ccell <- data.frame( L113.ag_Prod_Mt_R_Ccell_fby[ R_C ],
Prod_Mt = L113.ag_Prod_Mt_R_Ccell_fby[[ X_bio_yield_year ]],
HA_bm2 = L113.ag_HA_bm2_R_Ccell_fby[[ X_bio_yield_year ]],
Yield_kgm2 = L113.ag_Prod_Mt_R_Ccell_fby[[ X_bio_yield_year ]] / L113.ag_HA_bm2_R_Ccell_fby[[ X_bio_yield_year ]] )
#Index each region's yield to the USA, and weight by the production
USA_regID <- iso_GCAM_regID$GCAM_region_ID[ iso_GCAM_regID$iso == "usa" ]
L113.ag_Index_R_Ccell$Index <- L113.ag_Index_R_Ccell$Yield_kgm2 / L113.ag_Index_R_Ccell$Yield_kgm2[
match( paste( USA_regID, L113.ag_Index_R_Ccell$GCAM_commodity ),
paste( L113.ag_Index_R_Ccell$GCAM_region_ID, L113.ag_Index_R_Ccell$GCAM_commodity ) ) ]
L113.ag_Index_R_Ccell$ProdxIndex <- L113.ag_Index_R_Ccell$Index * L113.ag_Index_R_Ccell$Prod_Mt
#Aggregate by crop to compute regional indices (to region 1)
L113.ag_bioYieldIndex_R <- aggregate( L113.ag_Index_R_Ccell[ c( "Prod_Mt", "ProdxIndex" ) ],
by=as.list( L113.ag_Index_R_Ccell[ R ] ), sum )
#Set regional index to a maximum of 1 (no region can be > USA)
L113.ag_bioYieldIndex_R$bioYieldIndex <- pmin( 1, L113.ag_bioYieldIndex_R$ProdxIndex / L113.ag_bioYieldIndex_R$Prod_Mt )
#AEZ indexing using cellolosic crops
printlog( "Aggregating yield data for crops being used for computing AEZ bioenergy indices" )
L113.ag_Prod_Mt_Ccell_fby_AEZ <- aggregate( L113.ag_Prod_Mt_R_Ccell_fby_AEZ[ X_bio_yield_year ],
by=as.list( L113.ag_Prod_Mt_R_Ccell_fby_AEZ[ C_AEZ ] ), sum )
L113.ag_HA_bm2_Ccell_fby_AEZ <- aggregate( L113.ag_HA_bm2_R_Ccell_fby_AEZ[ X_bio_yield_year ],
by=as.list( L113.ag_HA_bm2_R_Ccell_fby_AEZ[ C_AEZ ] ), sum )
#Create a single table for computation of the indices
printlog( "Calculating AEZ bioenergy indices" )
L113.ag_Index_Ccell_AEZ <- data.frame(
L113.ag_Prod_Mt_Ccell_fby_AEZ[ C_AEZ ],
Prod_Mt = L113.ag_Prod_Mt_Ccell_fby_AEZ[[ X_bio_yield_year ]],
HA_bm2 = L113.ag_HA_bm2_Ccell_fby_AEZ[[ X_bio_yield_year ]],
Yield_kgm2 = L113.ag_Prod_Mt_Ccell_fby_AEZ[[ X_bio_yield_year ]] / L113.ag_HA_bm2_Ccell_fby_AEZ[[ X_bio_yield_year ]] )
L113.ag_Index_Ccell_AEZ$Yield_kgm2[ is.na( L113.ag_Index_Ccell_AEZ$Yield_kgm2 ) ] <- 0
#Index each AEZ's yield to the "index AEZ", and weight by production
L113.ag_Index_Ccell_AEZ$Index <- L113.ag_Index_Ccell_AEZ$Yield_kgm2 / L113.ag_Index_Ccell_AEZ$Yield_kgm2[
match( paste( L113.ag_Index_Ccell_AEZ$GCAM_commodity, Index_AEZ ),
paste( L113.ag_Index_Ccell_AEZ$GCAM_commodity, L113.ag_Index_Ccell_AEZ$AEZ ) ) ]
L113.ag_Index_Ccell_AEZ$ProdxIndex <- L113.ag_Index_Ccell_AEZ$Index * L113.ag_Index_Ccell_AEZ$Prod_Mt
#Aggregate by AEZ to compute each AEZ's production-weighted index
L113.ag_bioYieldIndex_AEZ <- aggregate( L113.ag_Index_Ccell_AEZ[ c( "Prod_Mt", "ProdxIndex" ) ], by=as.list( L113.ag_Index_Ccell_AEZ[ AEZ ] ), sum )
L113.ag_bioYieldIndex_AEZ$Index_Ccell <- L113.ag_bioYieldIndex_AEZ$ProdxIndex / L113.ag_bioYieldIndex_AEZ$Prod_Mt
L113.ag_bioYieldIndex_AEZ$Index_Ccell[ is.na( L113.ag_bioYieldIndex_AEZ$Index_Ccell ) ] <- 0
#NOTE: Setting AEZ11's AEZ index equal to AEZ10
L113.ag_bioYieldIndex_AEZ$Index_Ccell[ L113.ag_bioYieldIndex_AEZ$AEZ == "AEZ11" ] <-
L113.ag_bioYieldIndex_AEZ$Index_Ccell[ L113.ag_bioYieldIndex_AEZ$AEZ == "AEZ10" ]
#Cast by AEZs and repeat by number of regions
L113.ag_bioYieldIndex_R_AEZ <- repeat_and_add_vector( L113.ag_bioYieldIndex_AEZ, R, sort( unique( iso_GCAM_regID$GCAM_region_ID ) ) )
L113.ag_bioYieldIndex_R_AEZ <- dcast( L113.ag_bioYieldIndex_R_AEZ, GCAM_region_ID ~ AEZ, value.var = "Index_Ccell" )
#Build table of yield indices by region and AEZ
printlog( "Multiplying regional bioenergy indices by EPIC-adjusted AEZ bioenergy indices" )
L113.ag_bioYieldIndex_R_AEZ[ AEZs ] <- L113.ag_bioYieldIndex_R$bioYieldIndex * L113.ag_bioYieldIndex_R_AEZ[ AEZs ]
#Convert units and adjust yields in EPIC yield table
printlog( "Converting units from EPIC and adjusting upwards for consistency with literature estimates" )
EPIC_bio_Yield$MEAN_kgm2 <- EPIC_bio_Yield$MEAN * bio_GJt / conv_Ha_m2
EPIC_bio_Yield$MEAN_SD_kgm2 <- ( EPIC_bio_Yield$MEAN + EPIC_bio_Yield$STD ) * bio_GJt / conv_Ha_m2
EPIC_bio_Yield$MEAN_mult_kgm2 <- EPIC_bio_Yield$MEAN * bio_yield_mult * bio_GJt / conv_Ha_m2
#Multiply table of indices by region and AEZ by base yield in index AEZ. Arid AEZs use a different base yield.
printlog( "Multiplying base year yields by region and AEZ bioenergy indices for two scenarios" )
L113.ag_bioYield_GJm2_R_AEZ_ref <- L113.ag_bioYieldIndex_R_AEZ
L113.ag_bioYield_GJm2_R_AEZ_ref[ AEZs ] <-
L113.ag_bioYieldIndex_R_AEZ[ AEZs ] * EPIC_bio_Yield$MEAN_SD_kgm2[ EPIC_bio_Yield$AEZ == Index_AEZ ]
L113.ag_bioYield_GJm2_R_AEZ_ref[ AEZs_arid ] <-
L113.ag_bioYieldIndex_R_AEZ[ AEZs_arid ] * EPIC_bio_Yield$MEAN_kgm2[ EPIC_bio_Yield$AEZ == Index_AEZ ]
#Set yields to zero in regions with no agricultural data
L113.ag_bioYield_GJm2_R_AEZ_ref[ is.na( L113.ag_bioYield_GJm2_R_AEZ_ref ) ] <- 0
#For "hi" scenario, arid AEZs use the same yield base
L113.ag_bioYield_GJm2_R_AEZ_hi <- L113.ag_bioYieldIndex_R_AEZ
L113.ag_bioYield_GJm2_R_AEZ_hi[ AEZs ] <-
L113.ag_bioYieldIndex_R_AEZ[ AEZs ] * EPIC_bio_Yield$MEAN_mult_kgm2[ EPIC_bio_Yield$AEZ == Index_AEZ ]
L113.ag_bioYield_GJm2_R_AEZ_hi[ is.na( L113.ag_bioYield_GJm2_R_AEZ_hi ) ] <- 0
# -----------------------------------------------------------------------------
# 3. Output
#Add comments to tables
comments.L113.ag_bioYield_GJm2_R_AEZ_ref <- c( "Reference base year bioenergy yields by GCAM region / AEZ","Unit = GJ.m2" )
comments.L113.ag_bioYield_GJm2_R_AEZ_hi <- c( "High base year bioenergy yields by GCAM region / AEZ","Unit = GJ.m2" )
writedata( L113.ag_bioYield_GJm2_R_AEZ_ref, domain="AGLU_LEVEL1_DATA", fn="L113.ag_bioYield_GJm2_R_AEZ_ref", comments=comments.L113.ag_bioYield_GJm2_R_AEZ_ref )
writedata( L113.ag_bioYield_GJm2_R_AEZ_hi, domain="AGLU_LEVEL1_DATA", fn="L113.ag_bioYield_GJm2_R_AEZ_hi", comments=comments.L113.ag_bioYield_GJm2_R_AEZ_hi )
# Every script should finish with this line
logstop()
|
d3189e8c1e27b3c1ea07e99100e9bc5e8011ad0a
|
93fef68695ec291350e728b928c608f6cb9e09eb
|
/Archive/coexpression_activepathways_setup_May16.R
|
3e470ea20f0679bdcfdaee1669ac51d5b4ff3c43
|
[] |
no_license
|
HongyuanWu/lncRNAs_TCGA
|
ae4fa9202704545fc59a9dae19dabeeda2b7cb34
|
cbfe2356f8d65b20672dcc378fe7de309eec3dba
|
refs/heads/master
| 2023-07-28T00:11:29.152750
| 2021-09-09T13:33:01
| 2021-09-09T13:33:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,890
|
r
|
coexpression_activepathways_setup_May16.R
|
#top5_cancers_median5fpkm_specificFind.R
#Karina Isaev
#August 28th, 2017
#Purpose: using the top 5 cancers selected for analysis,
#run survival analysis in a pancancer approach with cancer
#type as covariate as Neat1 is highly expressed in all cancers
#++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
#RNA files used here obtained from script:
#pcawg_analysis_July2017/top5_cancers_extraction_script3.R
##++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
#Preamble#-------------------------------------------------
options(stringsAsFactors=F)
source("universal_LASSO_survival_script.R")
#Later on - incorporate FANTOM5 and CRISPRi lncRNAs
#Libraries#------------------------------------------------
library(data.table)
library(survival)
library(ggplot2)
library(ggfortify)
library(cluster)
library(qqman)
library(dplyr)
library(rafalib)
library(RColorBrewer)
#library(genefilter)
library(gplots) ##Available from CRAN
library(survminer)
library(MASS)
library(Hmisc)
library(gProfileR)
library(wesanderson)
library(ggsci)
library(gridExtra)
library(ggpubr)
library(ggthemes)
library(plyr)
mypal = pal_npg("nrc", alpha = 0.7)(10)
#Data-------------------------------------------------------
allCands = readRDS("final_candidates_TCGA_PCAWG_results_100CVsofElasticNet_June15.rds")
allCands = filter(allCands, data=="TCGA") #175 unique lncRNA-cancer combos, #166 unique lncRNAs
#UCSC gene info
ucsc <- fread("UCSC_hg19_gene_annotations_downlJuly27byKI.txt", data.table=F)
#z <- which(ucsc$hg19.ensemblSource.source %in% c("antisense", "lincRNA", "protein_coding"))
#ucsc <- ucsc[z,]
z <- which(duplicated(ucsc[,8]))
ucsc <- ucsc[-z,]
#fantom
fantom <- fread("lncs_wENSGids.txt", data.table=F) #6088 lncRNAs
extract3 <- function(row){
gene <- as.character(row[[1]])
ens <- gsub("\\..*","",gene)
return(ens)
}
fantom[,1] <- apply(fantom[,1:2], 1, extract3)
#remove duplicate gene names (gene names with multiple ensembl ids)
z <- which(duplicated(fantom$CAT_geneName))
rm <- fantom$CAT_geneName[z]
z <- which(fantom$CAT_geneName %in% rm)
fantom <- fantom[-z,]
#Combined into one dataframe because need to get ranks
all <- merge(rna, pcg, by = c("patient", "Cancer"))
all = all[,1:25170]
#------------------------------------------------------------------
#Within each tissue type, rank lncRNAs by which percentile of
#expression they fall into to then compare with PCAWG lncRNAS exp
#------------------------------------------------------------------
#1. log1p
z = which(str_detect(colnames(all), "ENSG"))
all[,z] <- log1p(all[,z])
#2. Get lncRNA - median within each tissue type
tissues <- unique(allCands$cancer)
#tissues <- tissues[c(7,9,12,13)]
####TEST
#tissues = tissues[1:4]
####TEST
#3. Want ranking seperatley for high lncRNA expression group versus low lncRNA expression group
#Function 1
#input: tissue
#output: list of dataframes by tissue
get_tissue_specific <- function(tissue){
tis <- all[all$Cancer==tissue,]
return(tis)
}
tissues_data <- llply(tissues, get_tissue_specific, .progress="text")
#Function 2
#for each lnc-cancer, label patient as lncRNA-risk or non-risk
get_lnc_canc = function(dat){
canc = dat$Cancer[1]
lncs = as.character(unique(subset(allCands, cancer == canc)$gene))
evaluate_each_lnc = function(lnc){
pcgs = colnames(pcg)[2:19351]
dat_keep = dat[,which(colnames(dat) %in% c("patient", lnc, pcgs))]
rownames(dat_keep) = dat_keep$patient
dat_keep$patient = NULL
#figure out which patients are high risk and which patients low risk
dat_keep$median <- ""
median2 <- quantile(as.numeric(dat_keep[,1]), 0.5)
if(median2 ==0){
#if median = 0 then anyone greater than zero is 1
l1 = which(dat_keep[,1] > 0)
l2 = which(dat_keep[,1] ==0)
dat_keep$median[l1] = 1
dat_keep$median[l2] = 0
}
if(!(median2 ==0)){
l1 = which(dat_keep[,1] >= median2)
l2 = which(dat_keep[,1] < median2)
dat_keep$median[l1] = 1
dat_keep$median[l2] = 0
}
#which one is high risk --> need surivval data
dat_keep$patient = rownames(dat_keep)
dat_keep$median[dat_keep$median ==0] = "Low"
dat_keep$median[dat_keep$median==1] = "High"
#cox ph
z = which((allCands$gene == lnc) & (allCands$cancer == canc))
HR = as.numeric(allCands$HR[z])
if(HR <1){
risk = "Low"
dat_keep$risk = ""
dat_keep$risk[dat_keep$median=="High"] ="noRISK"
dat_keep$risk[dat_keep$median=="Low"] ="RISK"
}
if(HR >1){
risk = "High"
dat_keep$risk = ""
dat_keep$risk[dat_keep$median=="High"] ="RISK"
dat_keep$risk[dat_keep$median=="Low"] ="noRISK"
}
dat_keep$lnc = colnames(dat_keep)[1]
dat_keep$canc = canc
colnames(dat_keep)[1] = "lncRNA"
return(dat_keep)
}#end function evaluate_each_lnc
results_lncs = llply(lncs, evaluate_each_lnc, .progress="text")
results_lncs1 = as.data.frame(do.call("rbind", results_lncs))
}
all_canc_lnc_data = llply(tissues_data, get_lnc_canc, .progress="text")
#Function 3
#wtihin each cancer
#calculate for each lncRNAs which PCGs have enriched expression in the high risk group
get_pcg_enrichment = function(dat){
lncs = unique(dat$lnc)
get_pcgs_high = function(lncrna){
newdat = subset(dat, lnc == lncrna)
#which PCGs have higher expression in the high risk group
z = which(str_detect(colnames(newdat), "ENSG"))
meds = apply(newdat[,z], 2, median)
z = which(meds <= log1p(5))
rm = names(meds)[z]
z = which(colnames(newdat) %in% rm)
newdat = newdat[,-z]
pcgs = colnames(newdat)[which(str_detect(colnames(newdat), "ENSG"))]
pcgs_exps = newdat[,c(pcgs)]
#medians = apply(pcgs_exps, 2, median)
#pcgs = names(medians[medians > 2])
print(length(pcgs))
#lnc_pcg_results = as.data.frame(matrix(ncol=5)) ; colnames(lnc_pcg_results) = c("lnc", "pcg", "canc", "mean_diff", "pvalue")
#pcgs = pcgs[1:10]
get_correlation = function(pcg){
p = pcg
z = which(colnames(newdat) %in% p)
lncpcg = newdat[,c(z, 1, (ncol(newdat)-4):ncol(newdat))]
colnames(lncpcg)[1] = "pcgExp"
order = c("RISK", "noRISK")
lncpcg$risk <- factor(lncpcg$risk, levels = order)
fit <- lm(pcgExp ~ risk, data=lncpcg)
#get p-value and generate boxplot with wilcoxon p-value
fit_pval = summary(fit)$coefficients[2,4]
#which group is it higher in?
mean_diff = mean(lncpcg$pcgExp[lncpcg$risk == "RISK"]) - mean(lncpcg$pcgExp[lncpcg$risk == "noRISK"])
#if higher than 0 --> more expressed in risk group, less than 0 --> more expressed in low risk group
#g = ggboxplot(lncpcg, x = "risk", y="pcgExp", color="median", title=paste(lncpcg$lnc[1], p, lncpcg$canc[1]))
#g = g + stat_compare_means()
#print(g)
row = c(lncpcg$lnc[1], p, lncpcg$canc[1], mean_diff, fit_pval)
return(row)
#names(row) = colnames(lnc_pcg_results)
#lnc_pcg_results = rbind(lnc_pcg_results, row)
}#end get_correlation function
pcgs_results = llply(pcgs, get_correlation, .progress="text")
pcgs_results1 = as.data.frame(do.call("rbind", pcgs_results))
colnames(pcgs_results1) = c("lnc", "pcg", "canc", "mean_diff", "pvalue")
return(pcgs_results1)
} #end get_pcgs_high function
results_lncs = llply(lncs, get_pcgs_high, .progress="text")
results_lncs1 = as.data.frame(do.call("rbind", results_lncs))
#results for all lncRNA-PCG correlations in a single cancer type
return(results_lncs1)
}
#all_canc_lnc_data = all_canc_lnc_data[1:2] ###TEST CASE -------------------------------------------------------------
all_canc_lnc_data = llply(all_canc_lnc_data, get_pcg_enrichment, .progress="text")
all_canc_lnc_data1 = as.data.frame(do.call("rbind", all_canc_lnc_data))
z = which(all_canc_lnc_data1$lnc %in% cands_dups)
if(!(length(z))==0){
all_canc_lnc_data1$lnc[z] = paste(all_canc_lnc_data1$lnc[z], all_canc_lnc_data1$canc[z], sep="_")
}
saveRDS(all_canc_lnc_data1, file="all_results_for_each_cancer_from_coexpression_analysis_july19_allCands.rds")
#For each cancer type, for each lncRNA ...
#Summarize #of PCGs upregulated in risk group and #of PCGs upregulated in non-risk group
#divide into high risk and low risk lncRNAs
high_risk = subset(all_canc_lnc_data1, mean_diff >=1.5) #should set higher mean difference threshold?
low_risk = subset(all_canc_lnc_data1, mean_diff <=0.75) #should set higher mean difference threshold?
library(reshape2)
#pcgs enriched in high risk lncRNAs
high_risk_matrix = acast(high_risk, pcg ~ lnc, function(x) {sort(as.character(x))[1]},
value.var = 'pvalue', fill = 'na')
#pcgs enriched in low risk lncRNAS
low_risk_matrix = acast(low_risk, pcg ~ lnc, function(x) {sort(as.character(x))[1]},
value.var = 'pvalue', fill = 'na')
#columns are lncRNAs and rows are PCGs
saveRDS(high_risk_matrix, file="high_risk_matrix_lncRNA_candidates_June6.rds")
saveRDS(low_risk_matrix, file="low_risk_matrix_lncRNA_candidates_June6.rds")
|
4a5e80b39f702a68c3348017d4a8a13f2ace0fd3
|
a47ce30f5112b01d5ab3e790a1b51c910f3cf1c3
|
/B_analysts_sources_github/MarkEdmondson1234/autoGoogleAPI/servicecontrol_objects.R
|
7a8c8efb3f06b06a4a69287911df2a5d9cee6a08
|
[] |
no_license
|
Irbis3/crantasticScrapper
|
6b6d7596344115343cfd934d3902b85fbfdd7295
|
7ec91721565ae7c9e2d0e098598ed86e29375567
|
refs/heads/master
| 2020-03-09T04:03:51.955742
| 2018-04-16T09:41:39
| 2018-04-16T09:41:39
| 128,578,890
| 5
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 38,980
|
r
|
servicecontrol_objects.R
|
#' Google Service Control API Objects
#' Google Service Control provides control plane functionality to managed services, such as logging, monitoring, and status checks.
#'
#' Auto-generated code by googleAuthR::gar_create_api_objects
#' at 2017-03-05 20:12:13
#' filename: /Users/mark/dev/R/autoGoogleAPI/googleservicecontrolv1.auto/R/servicecontrol_objects.R
#' api_json: api_json
#'
#' Objects for use by the functions created by googleAuthR::gar_create_api_skeleton
#' AuditLog Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' Common audit log format for Google Cloud Platform API operations.
#'
#' @param AuditLog.response The \link{AuditLog.response} object or list of objects
#' @param AuditLog.request The \link{AuditLog.request} object or list of objects
#' @param AuditLog.serviceData The \link{AuditLog.serviceData} object or list of objects
#' @param numResponseItems The number of items returned from a List or Query API method,
#' @param status The status of the overall operation
#' @param authenticationInfo Authentication information
#' @param serviceName The name of the API service performing the operation
#' @param response The operation response
#' @param methodName The name of the service method or operation
#' @param authorizationInfo Authorization information
#' @param resourceName The resource or collection that is the target of the operation
#' @param request The operation request
#' @param requestMetadata Metadata about the operation
#' @param serviceData Other service-specific data about the request, response, and other
#'
#' @return AuditLog object
#'
#' @family AuditLog functions
#' @export
AuditLog <- function(AuditLog.response = NULL, AuditLog.request = NULL, AuditLog.serviceData = NULL,
numResponseItems = NULL, status = NULL, authenticationInfo = NULL, serviceName = NULL,
response = NULL, methodName = NULL, authorizationInfo = NULL, resourceName = NULL,
request = NULL, requestMetadata = NULL, serviceData = NULL) {
structure(list(AuditLog.response = AuditLog.response, AuditLog.request = AuditLog.request,
AuditLog.serviceData = AuditLog.serviceData, numResponseItems = numResponseItems,
status = status, authenticationInfo = authenticationInfo, serviceName = serviceName,
response = response, methodName = methodName, authorizationInfo = authorizationInfo,
resourceName = resourceName, request = request, requestMetadata = requestMetadata,
serviceData = serviceData), class = "gar_AuditLog")
}
#' AuditLog.response Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' The operation response. This may not include all response elements,such as those that are too large, privacy-sensitive, or duplicatedelsewhere in the log record.It should never include user-generated data, such as file contents.When the JSON object represented here has a proto equivalent, the protoname will be indicated in the `@type` property.
#'
#'
#'
#' @return AuditLog.response object
#'
#' @family AuditLog functions
#' @export
AuditLog.response <- function() {
list()
}
#' AuditLog.request Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' The operation request. This may not include all request parameters,such as those that are too large, privacy-sensitive, or duplicatedelsewhere in the log record.It should never include user-generated data, such as file contents.When the JSON object represented here has a proto equivalent, the protoname will be indicated in the `@type` property.
#'
#'
#'
#' @return AuditLog.request object
#'
#' @family AuditLog functions
#' @export
AuditLog.request <- function() {
list()
}
#' AuditLog.serviceData Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' Other service-specific data about the request, response, and otheractivities.
#'
#'
#'
#' @return AuditLog.serviceData object
#'
#' @family AuditLog functions
#' @export
AuditLog.serviceData <- function() {
list()
}
#' LogEntry Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' An individual log entry.
#'
#' @param LogEntry.labels The \link{LogEntry.labels} object or list of objects
#' @param LogEntry.structPayload The \link{LogEntry.structPayload} object or list of objects
#' @param LogEntry.protoPayload The \link{LogEntry.protoPayload} object or list of objects
#' @param labels A set of user-defined (key, value) data that provides additional
#' @param severity The severity of the log entry
#' @param name Required
#' @param insertId A unique ID for the log entry used for deduplication
#' @param structPayload The log entry payload, represented as a structure that
#' @param textPayload The log entry payload, represented as a Unicode string (UTF-8)
#' @param protoPayload The log entry payload, represented as a protocol buffer that is
#' @param timestamp The time the event described by the log entry occurred
#'
#' @return LogEntry object
#'
#' @family LogEntry functions
#' @export
LogEntry <- function(LogEntry.labels = NULL, LogEntry.structPayload = NULL, LogEntry.protoPayload = NULL,
labels = NULL, severity = NULL, name = NULL, insertId = NULL, structPayload = NULL,
textPayload = NULL, protoPayload = NULL, timestamp = NULL) {
structure(list(LogEntry.labels = LogEntry.labels, LogEntry.structPayload = LogEntry.structPayload,
LogEntry.protoPayload = LogEntry.protoPayload, labels = labels, severity = severity,
name = name, insertId = insertId, structPayload = structPayload, textPayload = textPayload,
protoPayload = protoPayload, timestamp = timestamp), class = "gar_LogEntry")
}
#' LogEntry.labels Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' A set of user-defined (key, value) data that provides additionalinformation about the log entry.
#'
#'
#'
#' @return LogEntry.labels object
#'
#' @family LogEntry functions
#' @export
LogEntry.labels <- function() {
list()
}
#' LogEntry.structPayload Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' The log entry payload, represented as a structure thatis expressed as a JSON object.
#'
#'
#'
#' @return LogEntry.structPayload object
#'
#' @family LogEntry functions
#' @export
LogEntry.structPayload <- function() {
list()
}
#' LogEntry.protoPayload Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' The log entry payload, represented as a protocol buffer that isexpressed as a JSON object. You can only pass `protoPayload`values that belong to a set of approved types.
#'
#'
#'
#' @return LogEntry.protoPayload object
#'
#' @family LogEntry functions
#' @export
LogEntry.protoPayload <- function() {
list()
}
#' MetricValue Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' Represents a single metric value.
#'
#' @param MetricValue.labels The \link{MetricValue.labels} object or list of objects
#' @param boolValue A boolean value
#' @param endTime The end of the time period over which this metric value's measurement
#' @param startTime The start of the time period over which this metric value's measurement
#' @param moneyValue A money value
#' @param stringValue A text string value
#' @param labels The labels describing the metric value
#' @param doubleValue A double precision floating point value
#' @param int64Value A signed 64-bit integer value
#' @param distributionValue A distribution value
#'
#' @return MetricValue object
#'
#' @family MetricValue functions
#' @export
MetricValue <- function(MetricValue.labels = NULL, boolValue = NULL, endTime = NULL,
startTime = NULL, moneyValue = NULL, stringValue = NULL, labels = NULL, doubleValue = NULL,
int64Value = NULL, distributionValue = NULL) {
structure(list(MetricValue.labels = MetricValue.labels, boolValue = boolValue,
endTime = endTime, startTime = startTime, moneyValue = moneyValue, stringValue = stringValue,
labels = labels, doubleValue = doubleValue, int64Value = int64Value, distributionValue = distributionValue),
class = "gar_MetricValue")
}
#' MetricValue.labels Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' The labels describing the metric value.See comments on google.api.servicecontrol.v1.Operation.labels forthe overriding relationship.
#'
#'
#'
#' @return MetricValue.labels object
#'
#' @family MetricValue functions
#' @export
MetricValue.labels <- function() {
list()
}
#' EndReconciliationResponse Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' No description
#'
#' @param quotaMetrics Metric values as tracked by One Platform before the adjustment was made
#' @param operationId The same operation_id value used in the EndReconciliationRequest
#' @param reconciliationErrors Indicates the decision of the reconciliation end
#' @param serviceConfigId ID of the actual config used to process the request
#'
#' @return EndReconciliationResponse object
#'
#' @family EndReconciliationResponse functions
#' @export
EndReconciliationResponse <- function(quotaMetrics = NULL, operationId = NULL, reconciliationErrors = NULL,
serviceConfigId = NULL) {
structure(list(quotaMetrics = quotaMetrics, operationId = operationId, reconciliationErrors = reconciliationErrors,
serviceConfigId = serviceConfigId), class = "gar_EndReconciliationResponse")
}
#' Money Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' Represents an amount of money with its currency type.
#'
#' @param nanos Number of nano (10^-9) units of the amount
#' @param units The whole units of the amount
#' @param currencyCode The 3-letter currency code defined in ISO 4217
#'
#' @return Money object
#'
#' @family Money functions
#' @export
Money <- function(nanos = NULL, units = NULL, currencyCode = NULL) {
structure(list(nanos = nanos, units = units, currencyCode = currencyCode), class = "gar_Money")
}
#' Distribution Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' Distribution represents a frequency distribution of double-valued samplepoints. It contains the size of the population of sample points plusadditional optional information: - the arithmetic mean of the samples - the minimum and maximum of the samples - the sum-squared-deviation of the samples, used to compute variance - a histogram of the values of the sample points
#'
#' @param explicitBuckets Buckets with arbitrary user-provided width
#' @param maximum The maximum of the population of values
#' @param sumOfSquaredDeviation The sum of squared deviations from the mean:
#' @param exponentialBuckets Buckets with exponentially growing width
#' @param minimum The minimum of the population of values
#' @param linearBuckets Buckets with constant width
#' @param mean The arithmetic mean of the samples in the distribution
#' @param count The total number of samples in the distribution
#' @param bucketCounts The number of samples in each histogram bucket
#'
#' @return Distribution object
#'
#' @family Distribution functions
#' @export
Distribution <- function(explicitBuckets = NULL, maximum = NULL, sumOfSquaredDeviation = NULL,
exponentialBuckets = NULL, minimum = NULL, linearBuckets = NULL, mean = NULL,
count = NULL, bucketCounts = NULL) {
structure(list(explicitBuckets = explicitBuckets, maximum = maximum, sumOfSquaredDeviation = sumOfSquaredDeviation,
exponentialBuckets = exponentialBuckets, minimum = minimum, linearBuckets = linearBuckets,
mean = mean, count = count, bucketCounts = bucketCounts), class = "gar_Distribution")
}
#' ExplicitBuckets Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' Describing buckets with arbitrary user-provided width.
#'
#' @param bounds 'bound' is a list of strictly increasing boundaries between
#'
#' @return ExplicitBuckets object
#'
#' @family ExplicitBuckets functions
#' @export
ExplicitBuckets <- function(bounds = NULL) {
structure(list(bounds = bounds), class = "gar_ExplicitBuckets")
}
#' ExponentialBuckets Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' Describing buckets with exponentially growing width.
#'
#' @param numFiniteBuckets The number of finite buckets
#' @param growthFactor The i'th exponential bucket covers the interval
#' @param scale The i'th exponential bucket covers the interval
#'
#' @return ExponentialBuckets object
#'
#' @family ExponentialBuckets functions
#' @export
ExponentialBuckets <- function(numFiniteBuckets = NULL, growthFactor = NULL, scale = NULL) {
structure(list(numFiniteBuckets = numFiniteBuckets, growthFactor = growthFactor,
scale = scale), class = "gar_ExponentialBuckets")
}
#' AuthorizationInfo Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' Authorization information for the operation.
#'
#' @param resource The resource being accessed, as a REST-style string
#' @param granted Whether or not authorization for `resource` and `permission`
#' @param permission The required IAM permission
#'
#' @return AuthorizationInfo object
#'
#' @family AuthorizationInfo functions
#' @export
AuthorizationInfo <- function(resource = NULL, granted = NULL, permission = NULL) {
structure(list(resource = resource, granted = granted, permission = permission),
class = "gar_AuthorizationInfo")
}
#' StartReconciliationResponse Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' No description
#'
#' @param quotaMetrics Metric values as tracked by One Platform before the start of
#' @param operationId The same operation_id value used in the StartReconciliationRequest
#' @param reconciliationErrors Indicates the decision of the reconciliation start
#' @param serviceConfigId ID of the actual config used to process the request
#'
#' @return StartReconciliationResponse object
#'
#' @family StartReconciliationResponse functions
#' @export
StartReconciliationResponse <- function(quotaMetrics = NULL, operationId = NULL,
reconciliationErrors = NULL, serviceConfigId = NULL) {
structure(list(quotaMetrics = quotaMetrics, operationId = operationId, reconciliationErrors = reconciliationErrors,
serviceConfigId = serviceConfigId), class = "gar_StartReconciliationResponse")
}
#' QuotaProperties Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' Represents the properties needed for quota operations.Use the metric_value_sets field in Operation message to provide costoverride with metric_name in <service_name>/quota/<quota_group_name>/costformat. Overrides for unmatched quota groups will be ignored.Costs are expected to be >= 0. Cost 0 will cause no quota check,but still traffic restrictions will be enforced.
#'
#' @param QuotaProperties.limitByIds The \link{QuotaProperties.limitByIds} object or list of objects
#' @param limitByIds LimitType IDs that should be used for checking quota
#' @param quotaMode Quota mode for this operation
#'
#' @return QuotaProperties object
#'
#' @family QuotaProperties functions
#' @export
QuotaProperties <- function(QuotaProperties.limitByIds = NULL, limitByIds = NULL,
quotaMode = NULL) {
structure(list(QuotaProperties.limitByIds = QuotaProperties.limitByIds, limitByIds = limitByIds,
quotaMode = quotaMode), class = "gar_QuotaProperties")
}
#' QuotaProperties.limitByIds Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' LimitType IDs that should be used for checking quota. Key in this mapshould be a valid LimitType string, and the value is the ID to be used. Forexample, an entry <USER, 123> will cause all user quota limits to use 123as the user ID. See google/api/quota.proto for the definition of LimitType.CLIENT_PROJECT: Not supported.USER: Value of this entry will be used for enforcing user-level quota limits. If none specified, caller IP passed in the servicecontrol.googleapis.com/caller_ip label will be used instead. If the server cannot resolve a value for this LimitType, an error will be thrown. No validation will be performed on this ID.Deprecated: use servicecontrol.googleapis.com/user label to send user ID.
#'
#'
#'
#' @return QuotaProperties.limitByIds object
#'
#' @family QuotaProperties functions
#' @export
QuotaProperties.limitByIds <- function() {
list()
}
#' LinearBuckets Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' Describing buckets with constant width.
#'
#' @param offset The i'th linear bucket covers the interval
#' @param numFiniteBuckets The number of finite buckets
#' @param width The i'th linear bucket covers the interval
#'
#' @return LinearBuckets object
#'
#' @family LinearBuckets functions
#' @export
LinearBuckets <- function(offset = NULL, numFiniteBuckets = NULL, width = NULL) {
structure(list(offset = offset, numFiniteBuckets = numFiniteBuckets, width = width),
class = "gar_LinearBuckets")
}
#' AuthenticationInfo Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' Authentication information for the operation.
#'
#' @param principalEmail The email address of the authenticated user making the request
#' @param authoritySelector The authority selector specified by the requestor, if any
#'
#' @return AuthenticationInfo object
#'
#' @family AuthenticationInfo functions
#' @export
AuthenticationInfo <- function(principalEmail = NULL, authoritySelector = NULL) {
structure(list(principalEmail = principalEmail, authoritySelector = authoritySelector),
class = "gar_AuthenticationInfo")
}
#' AllocateQuotaResponse Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' Response message for the AllocateQuota method.
#'
#' @param operationId The same operation_id value used in the AllocateQuotaRequest
#' @param serviceConfigId ID of the actual config used to process the request
#' @param allocateErrors Indicates the decision of the allocate
#' @param quotaMetrics Quota metrics to indicate the result of allocation
#'
#' @return AllocateQuotaResponse object
#'
#' @family AllocateQuotaResponse functions
#' @export
AllocateQuotaResponse <- function(operationId = NULL, serviceConfigId = NULL, allocateErrors = NULL,
quotaMetrics = NULL) {
structure(list(operationId = operationId, serviceConfigId = serviceConfigId,
allocateErrors = allocateErrors, quotaMetrics = quotaMetrics), class = "gar_AllocateQuotaResponse")
}
#' ReleaseQuotaRequest Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' Request message for the ReleaseQuota method.
#'
#' @param serviceConfigId Specifies which version of service configuration should be used to process
#' @param releaseOperation Operation that describes the quota release
#'
#' @return ReleaseQuotaRequest object
#'
#' @family ReleaseQuotaRequest functions
#' @export
ReleaseQuotaRequest <- function(serviceConfigId = NULL, releaseOperation = NULL) {
structure(list(serviceConfigId = serviceConfigId, releaseOperation = releaseOperation),
class = "gar_ReleaseQuotaRequest")
}
#' QuotaError Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' No description
#'
#' @param subject Subject to whom this error applies
#' @param description Free-form text that provides details on the cause of the error
#' @param code Error code
#'
#' @return QuotaError object
#'
#' @family QuotaError functions
#' @export
QuotaError <- function(subject = NULL, description = NULL, code = NULL) {
structure(list(subject = subject, description = description, code = code), class = "gar_QuotaError")
}
#' RequestMetadata Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' Metadata about the request.
#'
#' @param callerIp The IP address of the caller
#' @param callerSuppliedUserAgent The user agent of the caller
#'
#' @return RequestMetadata object
#'
#' @family RequestMetadata functions
#' @export
RequestMetadata <- function(callerIp = NULL, callerSuppliedUserAgent = NULL) {
structure(list(callerIp = callerIp, callerSuppliedUserAgent = callerSuppliedUserAgent),
class = "gar_RequestMetadata")
}
#' CheckInfo Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' No description
#'
#' @param unusedArguments A list of fields and label keys that are ignored by the server
#'
#' @return CheckInfo object
#'
#' @family CheckInfo functions
#' @export
CheckInfo <- function(unusedArguments = NULL) {
structure(list(unusedArguments = unusedArguments), class = "gar_CheckInfo")
}
#' ReleaseQuotaResponse Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' Response message for the ReleaseQuota method.
#'
#' @param quotaMetrics Quota metrics to indicate the result of release
#' @param operationId The same operation_id value used in the ReleaseQuotaRequest
#' @param serviceConfigId ID of the actual config used to process the request
#' @param releaseErrors Indicates the decision of the release
#'
#' @return ReleaseQuotaResponse object
#'
#' @family ReleaseQuotaResponse functions
#' @export
ReleaseQuotaResponse <- function(quotaMetrics = NULL, operationId = NULL, serviceConfigId = NULL,
releaseErrors = NULL) {
structure(list(quotaMetrics = quotaMetrics, operationId = operationId, serviceConfigId = serviceConfigId,
releaseErrors = releaseErrors), class = "gar_ReleaseQuotaResponse")
}
#' AllocateQuotaRequest Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' Request message for the AllocateQuota method.
#'
#' @param allocationMode Allocation mode for this operation
#' @param serviceConfigId Specifies which version of service configuration should be used to process
#' @param allocateOperation Operation that describes the quota allocation
#'
#' @return AllocateQuotaRequest object
#'
#' @family AllocateQuotaRequest functions
#' @export
AllocateQuotaRequest <- function(allocationMode = NULL, serviceConfigId = NULL, allocateOperation = NULL) {
structure(list(allocationMode = allocationMode, serviceConfigId = serviceConfigId,
allocateOperation = allocateOperation), class = "gar_AllocateQuotaRequest")
}
#' MetricValueSet Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' Represents a set of metric values in the same metric.Each metric value in the set should have a unique combination of start time,end time, and label values.
#'
#' @param metricValues The values in this metric
#' @param metricName The metric name defined in the service configuration
#'
#' @return MetricValueSet object
#'
#' @family MetricValueSet functions
#' @export
MetricValueSet <- function(metricValues = NULL, metricName = NULL) {
structure(list(metricValues = metricValues, metricName = metricName), class = "gar_MetricValueSet")
}
#' ReportError Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' Represents the processing error of one `Operation` in the request.
#'
#' @param operationId The Operation
#' @param status Details of the error when processing the `Operation`
#'
#' @return ReportError object
#'
#' @family ReportError functions
#' @export
ReportError <- function(operationId = NULL, status = NULL) {
structure(list(operationId = operationId, status = status), class = "gar_ReportError")
}
#' StartReconciliationRequest Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' No description
#'
#' @param reconciliationOperation Operation that describes the quota reconciliation
#' @param serviceConfigId Specifies which version of service configuration should be used to process
#'
#' @return StartReconciliationRequest object
#'
#' @family StartReconciliationRequest functions
#' @export
StartReconciliationRequest <- function(reconciliationOperation = NULL, serviceConfigId = NULL) {
structure(list(reconciliationOperation = reconciliationOperation, serviceConfigId = serviceConfigId),
class = "gar_StartReconciliationRequest")
}
#' CheckError Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' Defines the errors to be returned ingoogle.api.servicecontrol.v1.CheckResponse.check_errors.
#'
#' @param detail Free-form text providing details on the error cause of the error
#' @param code The error code
#'
#' @return CheckError object
#'
#' @family CheckError functions
#' @export
CheckError <- function(detail = NULL, code = NULL) {
structure(list(detail = detail, code = code), class = "gar_CheckError")
}
#' QuotaInfo Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' Contains the quota information for a quota check response.
#'
#' @param QuotaInfo.quotaConsumed The \link{QuotaInfo.quotaConsumed} object or list of objects
#' @param quotaConsumed Map of quota group name to the actual number of tokens consumed
#' @param quotaMetrics Quota metrics to indicate the usage
#' @param limitExceeded Quota Metrics that have exceeded quota limits
#'
#' @return QuotaInfo object
#'
#' @family QuotaInfo functions
#' @export
QuotaInfo <- function(QuotaInfo.quotaConsumed = NULL, quotaConsumed = NULL, quotaMetrics = NULL,
limitExceeded = NULL) {
structure(list(QuotaInfo.quotaConsumed = QuotaInfo.quotaConsumed, quotaConsumed = quotaConsumed,
quotaMetrics = quotaMetrics, limitExceeded = limitExceeded), class = "gar_QuotaInfo")
}
#' QuotaInfo.quotaConsumed Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' Map of quota group name to the actual number of tokens consumed. If thequota check was not successful, then this will not be populated due to noquota consumption.Deprecated: Use quota_metrics to get per quota group usage.
#'
#'
#'
#' @return QuotaInfo.quotaConsumed object
#'
#' @family QuotaInfo functions
#' @export
QuotaInfo.quotaConsumed <- function() {
list()
}
#' CheckRequest Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' Request message for the Check method.
#'
#' @param serviceConfigId Specifies which version of service configuration should be used to process
#' @param skipActivationCheck Indicates if service activation check should be skipped for this request
#' @param operation The operation to be checked
#' @param requestProjectSettings Requests the project settings to be returned as part of the check response
#'
#' @return CheckRequest object
#'
#' @family CheckRequest functions
#' @export
CheckRequest <- function(serviceConfigId = NULL, skipActivationCheck = NULL, operation = NULL,
requestProjectSettings = NULL) {
structure(list(serviceConfigId = serviceConfigId, skipActivationCheck = skipActivationCheck,
operation = operation, requestProjectSettings = requestProjectSettings),
class = "gar_CheckRequest")
}
#' QuotaOperation Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' Represents information regarding a quota operation.
#'
#' @param QuotaOperation.labels The \link{QuotaOperation.labels} object or list of objects
#' @param labels Labels describing the operation
#' @param consumerId Identity of the consumer for whom this quota operation is being performed
#' @param operationId Identity of the operation
#' @param methodName Fully qualified name of the API method for which this quota operation is
#' @param quotaMode Quota mode for this operation
#' @param quotaMetrics Represents information about this operation
#'
#' @return QuotaOperation object
#'
#' @family QuotaOperation functions
#' @export
QuotaOperation <- function(QuotaOperation.labels = NULL, labels = NULL, consumerId = NULL,
operationId = NULL, methodName = NULL, quotaMode = NULL, quotaMetrics = NULL) {
structure(list(QuotaOperation.labels = QuotaOperation.labels, labels = labels,
consumerId = consumerId, operationId = operationId, methodName = methodName,
quotaMode = quotaMode, quotaMetrics = quotaMetrics), class = "gar_QuotaOperation")
}
#' QuotaOperation.labels Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' Labels describing the operation.
#'
#'
#'
#' @return QuotaOperation.labels object
#'
#' @family QuotaOperation functions
#' @export
QuotaOperation.labels <- function() {
list()
}
#' EndReconciliationRequest Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' No description
#'
#' @param serviceConfigId Specifies which version of service configuration should be used to process
#' @param reconciliationOperation Operation that describes the quota reconciliation
#'
#' @return EndReconciliationRequest object
#'
#' @family EndReconciliationRequest functions
#' @export
EndReconciliationRequest <- function(serviceConfigId = NULL, reconciliationOperation = NULL) {
structure(list(serviceConfigId = serviceConfigId, reconciliationOperation = reconciliationOperation),
class = "gar_EndReconciliationRequest")
}
#' ReportInfo Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' No description
#'
#' @param operationId The Operation
#' @param quotaInfo Quota usage info when processing the `Operation`
#'
#' @return ReportInfo object
#'
#' @family ReportInfo functions
#' @export
ReportInfo <- function(operationId = NULL, quotaInfo = NULL) {
structure(list(operationId = operationId, quotaInfo = quotaInfo), class = "gar_ReportInfo")
}
#' ReportResponse Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' Response message for the Report method.
#'
#' @param reportInfos Quota usage for each quota release `Operation` request
#' @param serviceConfigId The actual config id used to process the request
#' @param reportErrors Partial failures, one for each `Operation` in the request that failed
#'
#' @return ReportResponse object
#'
#' @family ReportResponse functions
#' @export
ReportResponse <- function(reportInfos = NULL, serviceConfigId = NULL, reportErrors = NULL) {
structure(list(reportInfos = reportInfos, serviceConfigId = serviceConfigId,
reportErrors = reportErrors), class = "gar_ReportResponse")
}
#' Operation Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' Represents information regarding an operation.
#'
#' @param Operation.labels The \link{Operation.labels} object or list of objects
#' @param Operation.userLabels The \link{Operation.userLabels} object or list of objects
#' @param quotaProperties Represents the properties needed for quota check
#' @param consumerId Identity of the consumer who is using the service
#' @param operationId Identity of the operation
#' @param endTime End time of the operation
#' @param operationName Fully qualified name of the operation
#' @param startTime Required
#' @param importance DO NOT USE
#' @param resourceContainer The resource name of the parent of a resource in the resource hierarchy
#' @param labels Labels describing the operation
#' @param logEntries Represents information to be logged
#' @param userLabels User defined labels for the resource that this operation is associated
#' @param metricValueSets Represents information about this operation
#'
#' @return Operation object
#'
#' @family Operation functions
#' @export
Operation <- function(Operation.labels = NULL, Operation.userLabels = NULL, quotaProperties = NULL,
consumerId = NULL, operationId = NULL, endTime = NULL, operationName = NULL,
startTime = NULL, importance = NULL, resourceContainer = NULL, labels = NULL,
logEntries = NULL, userLabels = NULL, metricValueSets = NULL) {
structure(list(Operation.labels = Operation.labels, Operation.userLabels = Operation.userLabels,
quotaProperties = quotaProperties, consumerId = consumerId, operationId = operationId,
endTime = endTime, operationName = operationName, startTime = startTime,
importance = importance, resourceContainer = resourceContainer, labels = labels,
logEntries = logEntries, userLabels = userLabels, metricValueSets = metricValueSets),
class = "gar_Operation")
}
#' Operation.labels Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' Labels describing the operation. Only the following labels are allowed:- Labels describing monitored resources as defined in the service configuration.- Default labels of metric values. When specified, labels defined in the metric value override these default.- The following labels defined by Google Cloud Platform: - `cloud.googleapis.com/location` describing the location where the operation happened, - `servicecontrol.googleapis.com/user_agent` describing the user agent of the API request, - `servicecontrol.googleapis.com/service_agent` describing the service used to handle the API request (e.g. ESP), - `servicecontrol.googleapis.com/platform` describing the platform where the API is served (e.g. GAE, GCE, GKE).
#'
#'
#'
#' @return Operation.labels object
#'
#' @family Operation functions
#' @export
Operation.labels <- function() {
list()
}
#' Operation.userLabels Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' User defined labels for the resource that this operation is associatedwith.
#'
#'
#'
#' @return Operation.userLabels object
#'
#' @family Operation functions
#' @export
Operation.userLabels <- function() {
list()
}
#' CheckResponse Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' Response message for the Check method.
#'
#' @param operationId The same operation_id value used in the CheckRequest
#' @param checkErrors Indicate the decision of the check
#' @param checkInfo Feedback data returned from the server during processing a Check request
#' @param quotaInfo Quota information for the check request associated with this response
#' @param serviceConfigId The actual config id used to process the request
#'
#' @return CheckResponse object
#'
#' @family CheckResponse functions
#' @export
CheckResponse <- function(operationId = NULL, checkErrors = NULL, checkInfo = NULL,
quotaInfo = NULL, serviceConfigId = NULL) {
structure(list(operationId = operationId, checkErrors = checkErrors, checkInfo = checkInfo,
quotaInfo = quotaInfo, serviceConfigId = serviceConfigId), class = "gar_CheckResponse")
}
#' Status Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' The `Status` type defines a logical error model that is suitable for differentprogramming environments, including REST APIs and RPC APIs. It is used by[gRPC](https://github.com/grpc). The error model is designed to be:- Simple to use and understand for most users- Flexible enough to meet unexpected needs# OverviewThe `Status` message contains three pieces of data: error code, error message,and error details. The error code should be an enum value ofgoogle.rpc.Code, but it may accept additional error codes if needed. Theerror message should be a developer-facing English message that helpsdevelopers *understand* and *resolve* the error. If a localized user-facingerror message is needed, put the localized message in the error details orlocalize it in the client. The optional error details may contain arbitraryinformation about the error. There is a predefined set of error detail typesin the package `google.rpc` which can be used for common error conditions.# Language mappingThe `Status` message is the logical representation of the error model, but itis not necessarily the actual wire format. When the `Status` message isexposed in different client libraries and different wire protocols, it can bemapped differently. For example, it will likely be mapped to some exceptionsin Java, but more likely mapped to some error codes in C.# Other usesThe error model and the `Status` message can be used in a variety ofenvironments, either with or without APIs, to provide aconsistent developer experience across different environments.Example uses of this error model include:- Partial errors. If a service needs to return partial errors to the client, it may embed the `Status` in the normal response to indicate the partial errors.- Workflow errors. A typical workflow has multiple steps. Each step may have a `Status` message for error reporting purpose.- Batch operations. If a client uses batch request and batch response, the `Status` message should be used directly inside batch response, one for each error sub-response.- Asynchronous operations. If an API call embeds asynchronous operation results in its response, the status of those operations should be represented directly using the `Status` message.- Logging. If some API errors are stored in logs, the message `Status` could be used directly after any stripping needed for security/privacy reasons.
#'
#' @param details A list of messages that carry the error details
#' @param code The status code, which should be an enum value of google
#' @param message A developer-facing error message, which should be in English
#'
#' @return Status object
#'
#' @family Status functions
#' @export
Status <- function(details = NULL, code = NULL, message = NULL) {
structure(list(details = details, code = code, message = message), class = "gar_Status")
}
#' ReportRequest Object
#'
#' @details
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
#' Request message for the Report method.
#'
#' @param operations Operations to be reported
#' @param serviceConfigId Specifies which version of service config should be used to process the
#'
#' @return ReportRequest object
#'
#' @family ReportRequest functions
#' @export
ReportRequest <- function(operations = NULL, serviceConfigId = NULL) {
structure(list(operations = operations, serviceConfigId = serviceConfigId), class = "gar_ReportRequest")
}
|
b7e4d0d1884b4cfca6fcfd23357d0993a9bcac3e
|
a9528637237fd53ee9bb1ca0c36e16bd8e6d2849
|
/Exam_2/Exam_2_README.txt
|
508935098c22908c8ab96cc57608f501ad93c208
|
[] |
no_license
|
EmilyNaylor/Data_Course_NAYLOR
|
1fba538b1a52f7842e88e35243dde47526b9ffc2
|
caca0adf5289fb0d4367ac03ca25a29a6490caf5
|
refs/heads/master
| 2023-01-30T13:54:22.605340
| 2020-12-17T04:50:03
| 2020-12-17T04:50:03
| 290,574,036
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,054
|
txt
|
Exam_2_README.txt
|
SKILLS TEST 2
_____________
Do a fresh "git pull" to get the skills test files.
The files you just got from your "git pull" are:
README.txt # this file containing the test prompts
landdata-states.csv # Land and home price values over time, broken down into state and region
unicef-u5mr.csv # UNICEF data regarding child mortality rates for children under 5 years old
# This is a time series (by year)
# It covers 196 countries and 68 years
# The first column lists the country name, subsequent columns are each year
fig1.png # Need to recreate this figure (see instructions)
fig2.png # Need to recreate this figure (see instructions)
fig3.png # Need to recreate this figure (see instructions)
fig4.png # Need to recreate this figure (see instructions)
################################################################################
# Create a new directory in YOUR data course repository called Exam_2 #
# Create a new Rproject in this new directory and copy all exam files to it #
# Complete the tasks below in a script called LASTNAME_Skills_Test_2.R #
# Be sure that your file paths are relative to your new Rproject #
################################################################################
Tasks:
I. Load the landdata-states.csv file into R
Re-create the graph shown in "fig1.png"
Export it to your Exam_2 folder as LASTNAME_Fig_1.jpg (note, that's a jpg, not a png)
To change the y-axis values to plain numeric, add options(scipen = 999) to your script
II. What is "NA Region???"
Write some code to show which state(s) are found in the "NA" region
III. The rest of the test uses another data set. The unicef-u5mr.csv data. Get it loaded and take a look.
It's not exactly tidy. You had better tidy it!
IV. Re-create the graph shown in fig2.png
Export it to your Exam_2 folder as LASTNAME_Fig_2.jpg (note, that's a jpg, not a png)
IV. Re-create the graph shown in fig3.png
Note: This is a line graph of average mortality rate over time for each continent
(i.e., all countries in each continent, yearly average), this is NOT a geom_smooth()
Export it to your Exam_2 folder as LASTNAME_Fig_3.jpg (note, that's a jpg, not a png)
V. Re-create the graph shown in fig4.png
Note: The y-axis shows proportions, not raw numbers
This is a scatterplot, faceted by region
Export it to your Exam_2 folder as LASTNAME_Fig_3.jpg (note, that's a jpg, not a png)
VI. Commit and push all your code and files to GitHub. I'll pull your repository at 9:30pm sharp and grade what I find.
## Grading ##
Re-create fig1 20pts
What states are in region "NA"? 10pts
Tidy the UNICEF data for plotting 10pts
Re-create fig2 20pts
Re-create fig3 20pts
Re-create fig4 20pts
|
68ea67ce4f8c4356305e976ae8ecf0d8b1b65c4e
|
6e37f91fb6448ee19a3ba5c70e277f124a6c8844
|
/R/load_biom.R
|
565a09ea8f4d34604ada774448a4fc60a1120418
|
[] |
no_license
|
sirusb/metagenomeSeq
|
91c7d7e3521176d1521296d713f2cbf6d35352d1
|
17c90b8e254564bd6a99b29a588307ddab0bbf5e
|
refs/heads/master
| 2021-01-17T20:55:02.053059
| 2015-10-02T14:15:35
| 2015-10-02T14:15:35
| 43,594,131
| 1
| 0
| null | 2015-10-03T09:27:42
| 2015-10-03T09:27:41
| null |
UTF-8
|
R
| false
| false
| 592
|
r
|
load_biom.R
|
#' Load objects organized in the Biome format.
#'
#' Wrapper to load Biome formatted object.
#'
#' @param file The biome object filepath.
#' @return A MRexperiment object.
#' @seealso \code{\link{load_meta}} \code{\link{load_phenoData}} \code{\link{newMRexperiment}} \code{\link{biom2MRexperiment}}
#' @examples
#'
#' #library(biom)
#' #rich_dense_file = system.file("extdata", "rich_dense_otu_table.biom", package = "biom")
#' #x = load_biome(rich_dense_file)
#' #x
load_biom <- function(file){
library(biom)
x = biom::read_biom(file);
mrobj = biom2MRexperiment(x);
return(mrobj);
}
|
ed5d4783b9b88c7c6c1418d67fa1076fae734860
|
8a0dfba9208eb531bee92f5c91dec522b004148b
|
/prepare_MaP_events.R
|
74bc8c771bd1c155cb2ca86c869b4057395ddac5
|
[] |
no_license
|
NNander/CizelHouben
|
e4aab259af39bf18efc18949bf57d2acaae1ad82
|
3115eff3550bba934fde35880d3caa9c2925aaed
|
refs/heads/master
| 2020-05-09T18:36:22.015369
| 2019-04-18T09:46:58
| 2019-04-18T09:46:58
| 181,348,227
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,396
|
r
|
prepare_MaP_events.R
|
##' @import openxlsx readxl pipeR data.table dplyr lubridate
##' @export
prepare_MaP_events <- function(){
# FILE.XL <- system.file('./data/original/prudential_ind_1.xlsx',
# package = 'Projects2016.Macropru')
FILE.XL <- '/Users/Nander/Desktop/inst-data-original/prudential_ind_1.xlsx'
read_excel(FILE.XL) %>>%
data.table %>>%
mutate(
date = sprintf("%s-%s-%s",
year,
quarter*3,
1) %>>%
as.Date %>>%
(x ~ x + months(1) - 1)
) %>>%
select(
- year,
- quarter,
- qdate,
- country,
- biscode
) %>>%
melt.data.table(
id.vars = c('ifscode','date')
) %>>%
subset(
value != 0 &
(! variable %like% "cum") &
(! variable %like% "int") &
(! variable %like% "PruC")
) %>>%
mutate(
type = ifelse(value>0,"Tighten","Relax")
) ->
mapevents
return(mapevents)
}
##' @export
prepare_MaP_events_update <- function(){
# FILE.XL <- system.file('./data/original/Prudential policy instruments_new/prudential_ind_3.xlsx',
# package = 'Projects2016.Macropru')
FILE.XL <- '/Users/Nander/Desktop/inst-data-original/prudential_ind_3.xlsx'
read_excel(FILE.XL) %>>%
data.table %>>%
mutate(
date = sprintf("%s-%s-%s",
year,
quarter*3,
1) %>>%
as.Date %>>%
(x ~ x + months(1) - 1)
) %>>%
data.table %>>%
select(
- year,
- quarter,
- qdate,
- country,
- biscode
) %>>%
melt.data.table(
id.vars = c('ifscode','date')
) %>>%
subset(
value != 0 &
(! variable %like% "cum") &
(! variable %like% "int") &
(! variable %like% "PruC") &
(! variable %like% "core_country")
) %>>%
mutate(
type = ifelse(value>0,"Tighten","Relax")
) ->
mapevents
return(mapevents)
}
prepare_MaP_events() -> events
|
1d614a47dc3b6bbc1ba3aa70a50a7cb9b419a5cb
|
70b1a3a9f52d6788c76aed11897e4f8725141f21
|
/plot3.R
|
1e48d23a2873ab80ed9b9446e0a57ac763e954f9
|
[] |
no_license
|
CarenM/Exploratory_Data_Analysis
|
5ab2bdeb07621c49772f3be29541c3ca9a34006d
|
a7963baba9103eb8ab73a98975d831a4a036c3cc
|
refs/heads/master
| 2021-01-19T09:12:52.607188
| 2015-08-09T16:18:31
| 2015-08-09T16:18:31
| 40,441,447
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,414
|
r
|
plot3.R
|
# Set working directory
setwd("./4_Exploratory_Data_Analysis")
# Check size of object
object.size("household_power_consumption.txt")
# Read and subset the data for 1/2/2007 and 2/2/2007
data <- subset(read.table("household_power_consumption.txt", header=TRUE, sep=";", colClasses="character"), Date == "1/2/2007" | Date == "2/2/2007")
# Paste Date and Time into new field and change to format that R recognizes
data$datetime <- paste(data$Date, data$Time)
data$datetime <- strptime(data$datetime, format = "%d/%m/%Y %H:%M:%S")
# Add the Day of the Week
data$day <- weekdays(data$datetime, abbreviate = TRUE)
# Change Submetering class to numeric
data$Sub_metering_1 <- as.numeric(data$Sub_metering_1)
data$Sub_metering_2 <- as.numeric(data$Sub_metering_2)
data$Sub_metering_3 <- as.numeric(data$Sub_metering_3)
# Create plot and safe as png with defined size
png("plot3.png", width = 480 , height = 480)
with(data, plot(datetime, Sub_metering_1, xlab = " ", ylab = "Energy sub metering", type = "n"))
with(data, lines(datetime, Sub_metering_1, col="black"))
with(data, lines(datetime, Sub_metering_2, col="red"))
with(data, lines(datetime, Sub_metering_3, col="blue"))
legend("topright", col = c("black", "red", "blue"), lty = 1, legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), cex = 0.75)
dev.off()
### Please do not wonder that my weekdays are in German
# Do = Thu, Fr = Fri, Sa = Sat
|
da72c3c3c564050c0fde102d46ee66ac3ef37afc
|
7044839eae96eaec4642a9fd4be56ceca84ca657
|
/tests/testthat/helper-clustering.R
|
785b7f02af289cf50d3bddc9ab3ea51e70c1b886
|
[
"MIT"
] |
permissive
|
cugliari/iecclust
|
bb58e11c35bece256d613f0824ad98967e4e441c
|
1b6e97a0c317a8f1959b5d927a0787a36726a4de
|
refs/heads/master
| 2021-08-19T19:50:24.825273
| 2017-11-27T09:01:53
| 2017-11-27T09:01:53
| 110,530,261
| 3
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 323
|
r
|
helper-clustering.R
|
# Compute the sum of (normalized) sum of squares of closest distances to a medoid.
computeDistortion <- function(series, medoids)
{
n <- ncol(series)
L <- nrow(series)
distortion <- 0.
for (i in seq_len(n))
distortion <- distortion + min( colSums( sweep(medoids,1,series[,i],'-')^2 ) / L )
sqrt( distortion / n )
}
|
28e64aae794e650370aa09ca46177ee18fb1314c
|
1983479ed9c5ffc0d302ee200ebde5613d6625ba
|
/inCountry.R
|
6d53f41eddcef70d5d05786224bb325562141db1
|
[] |
no_license
|
kloessa/Shiny_APP
|
9237203e5acb12e2255ebd0aa56a3217b93fdae0
|
ab0547260eab8aacc28b833829e52642102c789c
|
refs/heads/master
| 2021-01-10T08:26:11.700862
| 2016-03-29T16:27:37
| 2016-03-29T16:27:37
| 54,989,220
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 744
|
r
|
inCountry.R
|
# Function calculate if longitude and latitude is in given country with margin of 50 km
inCountry <- function(country,lon,lat){
# Get Border of choosen country
countryBorder <- map("world",country, plot = FALSE)
# Combine longitude and latitude in matrix
countryBorder <- cbind(countryBorder$x,countryBorder$y)
# Delete entries with NA
countryBorder <- na.omit(countryBorder)
# Calculate distance to border
dist <- dist2Line(c(lon,lat), countryBorder[,1:2], distfun=distHaversine)
z <- F
# Calculate if point is in country
if (toString (map.where("world",lon,lat)) == toString(country)){
z <- TRUE
}
# Calculate if point is the range of 50 km to the border
else if (dist[1,1] <= 50000){
z <- TRUE
}
return(z)
}
|
44de7007600e71f30ff47d517e6fd92c8312b1b7
|
6a3deb38999bb284e8dfa27e3612ea73e92fe1ed
|
/src/main/python/RScripts/name-disambiguation/wmh-parameter-trimming/02 badanie użycia SVM/przetwarzanie AND/01 wczytanie AND.R
|
ed2bc4f196309cffd4a283f28aee309fbc92f654
|
[] |
no_license
|
pszostek/research-python-backup
|
c7d1bf56dc4b1ab9412fcecb1a9222f2a8dbe6ca
|
e38508de91f8a7bda3096c6f0a361734207357a5
|
refs/heads/master
| 2020-05-20T08:50:05.306294
| 2012-09-28T13:24:43
| 2012-09-28T13:24:43
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,950
|
r
|
01 wczytanie AND.R
|
# read.csv(file, header = FALSE, sep = "", quote = "\"'",
# dec = ".", row.names, col.names,
# as.is = !stringsAsFactors,
# na.strings = "NA", colClasses = NA, nrows = -1,
# skip = 0, check.names = TRUE, fill = !blank.lines.skip,
# strip.white = FALSE, blank.lines.skip = TRUE,
# comment.char = "#",
# allowEscapes = FALSE, flush = FALSE,
# stringsAsFactors = default.stringsAsFactors(),
# fileEncoding = "", encoding = "unknown", text)
file <- read.csv(file="/home/pdendek/WMH/00 badania/01 import wskazówek do CSV/and_9135418604463356.csv",header=T, quote="\"",sep=" ")
View(file)
#Order data by "id"
f2 <- file[with(file, order(id)), ]
View(f2)
#Delete additional column f2$row.names is not needed,
#it is only phantom column, not seen in the data frame
#Cast -1 to NA
#changing -1 to NA
fna <- f2
max <-
for(i in 2:(ncol(f2)-1)){
fna = fna[fna[,i]==-1,i]=NA
}
head(fna)
summary(fna)
#Usuwamy niepotrzebne wskazówki (w pełni nullowe, bądź bardzo nullowe)
f3 <-f2
f3$f0 <- NULL
f3$f1 <- NULL
f3$f5 <- NULL
f3$f6 <- NULL
#Sprawdzamy czy są jakieś wartości błędne, np. dla roku różnica więsza niż 1000lat, 1999, bo to znaczy, że jeden rok był błędnie zerem
tmp <- f3[,]
tmp
f3 <- f3[ f3$f7<1000 , ]
summary(f3)
# Pozostałe w grze wskazówki
#3CoContribution
#4CoClassif
#5CoKeywordPhrase
#8Year
f <- d[,-1]
# summary(f)
f_true <- f[ f$samePerson!=FALSE ,]
summary(f_true)
f_false <-f[ f$samePerson==FALSE ,]
summary(f_false)
print("False ratio [%]")
100*nrow(f_false)/nrow(f)
100*nrow(f_true)/nrow(f)
rhv <- runif(nrow(f_true))
f_true_sel <- f_true[rhv<nrow(f_false)/nrow(f_true),]
nrow(f_false)/nrow(f_true_sel)
rhv <- runif(nrow(f_true_sel))
fs_t <- f_true_sel[rhv<0.001,]
summary(fs_t)
rhv <- runif(nrow(f_false))
fs_f <- f_false[rhv<0.001,]
summary(fs_f)
f_new <- rbind(fs_t, fs_f)
head(f_new)
d <- f_new
|
6c68d26c14c712eba29b59bd6221b9c212e7b8c9
|
88fbd53f171e9ac0ab1cfd10865c7fb6bbd4d32b
|
/functions/functions_data_prep.R
|
fafd4b875700b686b953462224f32a948abd77ec
|
[] |
no_license
|
RaihaTuiTaura/covid-hawkes-paper
|
b296ac34267d1313c43d9041aec7e80a81e285a2
|
e497f57e4333366db26a91bff3e3092ede361578
|
refs/heads/master
| 2023-04-27T07:09:41.374135
| 2023-04-15T15:23:28
| 2023-04-15T15:23:28
| 308,214,814
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,388
|
r
|
functions_data_prep.R
|
##### Data functions #####
# Function to filter data
filter_data_cp_peak = function(country, cps, i_star=NULL){
# Filter data to country of interest
modelling_data.df = covid19_bycountry_dailycounts.df %>%
filter(Country == country)
# Start observation period once there is 10 events
t1_ind = as.numeric(which(cumsum(modelling_data.df$n) >=10)[1])
# Calculate number of days in sample
start_date = modelling_data.df$Date[t1_ind]
if (!(country %in% c("China", "US", "Spain", "India"))){
end_date = max(modelling_data.df$Date)
} else if (country == "China"){
end_date = as.Date("2020-04-13")
} else if (country == "Spain"){
end_date = as.Date("2020-06-15")
} else if (country == "India"){
end_date = as.Date("2020-06-12")
} else if (country == "US"){
end_date = as.Date("2020-06-21")
}
max_T = as.numeric(difftime(end_date, start_date, units="days")) + 1
# Modelling data
modelling_data.df %<>%
filter(Date <= end_date) %>%
mutate(time = as.numeric(difftime(Date, start_date, units="days") + 1)) %>%
select(time, n)
# Calculate individual events and their previous events
event_count = as.matrix(sapply(modelling_data.df, as.numeric))
decay_times = lapply(1:max_T, function(t) as.numeric(t- event_count[event_count[,1] <t & event_count[,2] >0 ,1]))
decay_times_counts = lapply(1:max_T, function(t) as.numeric(event_count[event_count [,1] <t & event_count[,2] >0 ,2]))
# Create global variables
assign("start_date", start_date, envir = .GlobalEnv)
assign("event_count", event_count, envir = .GlobalEnv)
if (length(cps)==1){
# Upward trajectory
max_T_upward = cps[1]
event_count_upward = event_count[t1_ind:(max_T_upward+t1_ind-1),]
decay_times_upward = decay_times[1:max_T_upward]
decay_times_counts_upward = decay_times_counts[1:max_T_upward]
# Create global variables
assign("event_count_upward", event_count_upward, envir = .GlobalEnv)
assign("decay_times_upward", decay_times_upward, envir = .GlobalEnv)
assign("decay_times_counts_upward", decay_times_counts_upward, envir = .GlobalEnv)
# Downward trajectory
if (!(country %in% c("Brazil", "India"))){
event_count_downward = event_count[(max_T_upward+t1_ind):(max_T+t1_ind-1),]
decay_times_downward = decay_times[(max_T_upward+1):max_T]
decay_times_counts_downward = decay_times_counts[(max_T_upward+1):max_T]
assign("decay_times_downward", decay_times_downward, envir = .GlobalEnv)
assign("decay_times_counts_downward", decay_times_counts_downward, envir = .GlobalEnv)
assign("event_count_downward", event_count_downward, envir = .GlobalEnv)
}
# If doing cross validation
if (!is.null(i_star)){
if (i_star <= cps[1]){
# Upward trajectory
event_count_upward_val = event_count_upward[1:i_star,]
decay_times_upward_val = decay_times_upward[1:i_star]
decay_times_counts_upward_val = decay_times_counts_upward[1:i_star]
} else {
event_count_upward_val = event_count_upward
decay_times_upward_val = decay_times_upward
decay_times_counts_upward_val = decay_times_counts_upward
if (!(country %in% c("Brazil", "India"))){
# Downward trajectory
if (i_star == (cps[1]+1)){
event_count_downward_val = t(event_count_downward[1:(i_star-cps[1]),])
} else {
event_count_downward_val = event_count_downward[1:(i_star-cps[1]),]
}
decay_times_downward_val = decay_times_downward[1:(i_star-cps[1])]
decay_times_counts_downward_val = decay_times_counts_downward[1:(i_star-cps[1])]
# Create global variables
assign("decay_times_downward_val", decay_times_downward_val, envir = .GlobalEnv)
assign("decay_times_counts_downward_val", decay_times_counts_downward_val, envir = .GlobalEnv)
assign("event_count_downward_val", event_count_downward_val, envir = .GlobalEnv)
}
}
# Create global variables
assign("decay_times_upward_val", decay_times_upward_val, envir = .GlobalEnv)
assign("decay_times_counts_upward_val", decay_times_counts_upward_val, envir = .GlobalEnv)
assign("event_count_upward_val", event_count_upward_val, envir = .GlobalEnv)
}
}
return()
}
|
7fae0d23a872f6fffb728f7504332255f950bec7
|
e909b3bc3f95d4656c2d9a263768b372551e9bf8
|
/ui.R
|
82a35254f6de9f5d88dcbcabf79b61c3fd3e28fc
|
[] |
no_license
|
VivianBailey/Mutplot
|
ff93c3263643b9c0f96baa8dfa6e41d8ea0fd661
|
39ce3d6cacd8c68647654bc75b2e92087a035246
|
refs/heads/master
| 2022-07-21T18:10:47.437001
| 2022-06-27T21:29:38
| 2022-06-27T21:29:38
| 166,429,105
| 15
| 30
| null | 2022-02-24T23:56:17
| 2019-01-18T15:46:51
|
R
|
UTF-8
|
R
| false
| false
| 1,097
|
r
|
ui.R
|
library(shiny)
genelist<-read.table("UniProt.txt",header=T)
ui <-
fluidPage(
h3(
div(style="display:inline-block;",img(src="mutplot.2.png",height=150,width=700,style="left;")),
div(id="title","mutplot")
),
sidebarLayout(
sidebarPanel(
fileInput("file1","Select File",accept=c("text","csv",".txt",".csv")),
checkboxInput("header","Header",TRUE),
radioButtons("sep","Separator",c(Tab="\t",Comma=",")),
#radioButtons("gene","Gene",c("AR"="AR","BRCA1"='BRCA1',TP53="TP53"),inline=T)
selectInput("gene","Gene",genelist[,1],selectize=FALSE),
selectInput("AAfreq","Amino acid frequency threshold for highlight",c("Frequency",1:4,seq(5,30,by=5))),
selectInput("plotformat","Image file figure format",c("format","jpeg","pdf","png","svg")),
#actionButton("plot","Plot"),
downloadButton("downloadplot","Download Plot"),
downloadButton("downloaddata","Download Data"),
downloadButton("downloaddomain","Download Domain")
),
mainPanel(
h4("An example for the selected file:"),
tableOutput("freq")
#plotOutput("plot")
)
)
)
|
c54ad649aafe15c3f606a80b869f4307354c136a
|
acbff8b08d01821d70bd4054fb70af1b531a118e
|
/man/stat_vline.Rd
|
982898592f4df679f327fcb39566aa5320f8afec
|
[] |
no_license
|
djmurphy420/ggplot2
|
e1d01de07282f4f4360e83299e1b53909ca0d15a
|
1f50afc1f6e57f17a69012698b12835eb9029b3c
|
refs/heads/master
| 2021-01-18T09:31:42.735717
| 2011-12-30T03:41:19
| 2011-12-30T03:41:19
| 3,041,453
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 264
|
rd
|
stat_vline.Rd
|
\name{stat_vline}
\alias{stat_vline}
\title{Add a vertical line}
\usage{
stat_vline(mapping = NULL, data = NULL, geom = "vline",
position = "identity", intercept, ...)
}
\description{
Add a vertical line
}
\examples{
# see geom_vline
}
\keyword{internal}
|
db9a6b25591be58900b85a9e918205061283bf10
|
4f51e567d6b9ffd8622640ba67ddb72d92dbf397
|
/predictionModel/dataHunt.R
|
555e53965093c933989fdaa264a2aba74f5a9c1b
|
[] |
no_license
|
breenbo/DS_10_capstone
|
259afbb193b3b776c75db8904bdb6cd89deb908a
|
e322d8d51224d4a31f377572184583e2bf58a2bf
|
refs/heads/master
| 2021-01-09T06:51:35.257804
| 2017-03-04T13:53:04
| 2017-03-04T13:53:04
| 80,879,735
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,776
|
r
|
dataHunt.R
|
# read datasets
twit <- readLines("../text/en_US.twitter.txt", skipNul=TRUE)
blog <- readLines("../text/en_US.blogs.txt", skipNul=TRUE)
news <- readLines("../text/en_US.news.txt", skipNul=TRUE)
# take samples because datasets are too big
set.seed(2704)
twitTest <- sample(twit, 100000, replace=F)
blogTest <- sample(blog, 100000, replace=F)
newsTest <- sample(news, 100000, replace=F)
# clean datasets in order to make ngrams
cleanText <- function(texte, lang="en", rmStopword=FALSE, stemwords=FALSE) {
# take a text file, and return a possibly clean without stopwords and stemmed
# tibble depending if chosen options
library(data.table)
library(dplyr)
library(qdap)
library(hunspell)
library(stringr)
# cleaning sentences
texte <- texte %>%
replace_contraction() %>%
tolower() %>%
replace_number(remove=TRUE) %>%
replace_ordinal(remove=TRUE) %>%
replace_symbol() %>% replace_abbreviation()
# remove non english char
texte <- iconv(x = texte, from = "ASCII", sub="")
# remove non english words
# better to use hunspell than qdap because qdap causes errors with some chars
missReplace <- function(string){
miss <- unlist(hunspell(string))
for(m in miss){
string <- str_replace(string, m, "")
}
string
}
texte <- lapply(texte, missReplace)
# remove stopwords option
if(rmStopword==TRUE){
texte <- rm_stopwords(texte, separate=FALSE)
}
# stemming option
if(stemwords==TRUE){
texte <- stemmer(texte)
}
# transforms list in tibble
len <- length(texte)
textFile <- tibble()
textFile <- tibble(line=1:len, text=unlist(texte))
textFile
}
# create ngrams
tidyNGram <- function(texte, n.gram=1){
# take a tibble text and return tibble of ngrams
library(tidytext)
texte <- unnest_tokens(texte, ngram, text, token="ngrams", n=n.gram)
texte
}
############################################################
library(data.table)
cleanTwit <- cleanText(twitTest)
cleanBlog <- cleanText(blogTest)
cleanNews <- cleanText(newsTest)
cleanTexts <- rbind(cleanTwit, cleanBlog, cleanNews)
fwrite(cleanTexts, 'cleanTexts.csv')
############################################################
library(data.table)
cleanTexts <- fread("cleanTexts.csv", stringsAsFactors=F)
monograms <- tidyNGram(cleanTexts)
fwrite(monograms, 'monograms.csv')
bigrams <- tidyNGram(cleanTexts, n.gram=2)
data.table::fwrite(bigrams, 'bigrams.csv')
trigrams <- tidyNGram(cleanTexts, n.gram=3)
fwrite(trigrams, 'trigrams.csv')
quadgrams <- tidyNGram(cleanTexts, n.gram=4)
fwrite(quadgrams, 'quadgrams.csv')
pentagrams <- tidyNGram(cleanTexts, n.gram=5)
fwrite(pentagrams, 'pentagrams.csv')
|
f9e3f77249bc6acf9d720cfa04767caf7bf1b701
|
d89f9fca31a1faf07bc869ce3ffa9c52a110909f
|
/model estimate.R
|
ba6b5936f03cc30cc50a01ead8803591adf72e92
|
[] |
no_license
|
costlysignalling/Mate-choice-consistency-2
|
f3139e93f2f5435edab490bef17016dfc2b6f9ee
|
6c02e5551ffca08c3e7dfa0fcf20f9cbbc4335e4
|
refs/heads/master
| 2020-04-03T02:20:27.157476
| 2018-10-27T10:55:11
| 2018-10-27T10:55:11
| 154,953,471
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 360
|
r
|
model estimate.R
|
results<-read.delim("raw.results.txt")
View(results)
ef.pas<-results$effect.size
foc<-results$focal.person.var
model<-lm((foc*100)~ef.pas)
model
sumaz<-summary(model)
inter<-sumaz[[4]][1,1]
stder<-sumaz[[4]][1,2]
inter+stder*1.96
inter-stder*1.96
inter<-sumaz[[4]][2,1]
stder<-sumaz[[4]][2,2]
inter+stder*1.96
inter-stder*1.96
|
621c46499cd30112a96ca92b0a86ac142358c694
|
9d208ad12fd033b2c018057d256fbe390b05583a
|
/cnidaria/man/distribute.vector.Rd
|
b68b48d337ae54a4368ed88be263f747f5477612
|
[
"Apache-2.0"
] |
permissive
|
smartinsightsfromdata/cnidaria
|
99832d38c9280bf0d8e851013d3f9c70fbb47d67
|
dcfe311b83b4df961349ff7676e781af490e9020
|
refs/heads/master
| 2021-01-17T05:55:35.657761
| 2014-03-06T22:34:42
| 2014-03-06T22:34:42
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 488
|
rd
|
distribute.vector.Rd
|
\name{distribute.vector}
\alias{distribute.vector}
\title{Create a distributed vector from a native R vector}
\usage{
distribute.vector(vec, block.size = 1000)
}
\arguments{
\item{vec}{The vector to distribute}
\item{block.size}{The size of a block of a distributed
vector}
\item{constructor}{A constructor for the vector type to
create}
}
\value{
a new dist.vector object
}
\description{
This function allows you to easily create new distributed
vectors from a local one.
}
|
2c34aa912864e8a2fee7423960932b36c419bc7f
|
661e6ba0b86bf35b3ea9c0fa8f94043471bb3729
|
/rideshare/rideshare.R
|
af9ee7fb2787d28e51cb7260b50a28fae9e8db3c
|
[] |
no_license
|
devinsimmons/HONR238V_project
|
8c21143cdb30a35f7acd46e00e5585e73a5c405c
|
a2742e094a141af3daca46e2193b7332a20d0c61
|
refs/heads/master
| 2020-04-28T06:25:54.825901
| 2019-05-06T18:34:33
| 2019-05-06T18:34:33
| 175,057,910
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 463
|
r
|
rideshare.R
|
# Ride Share Data Processing
library(dplyr)
library(lubridate)
# set working directory
setwd("~/COLLEGE/Spring 2019/HONR238V/Final Project")
# loading files
uber <- read.csv("uber-raw-data-janjune-15.csv")
# uber data
uber$Pickup_date <- ymd_hms(uber$Pickup_date)
colnames(uber) <- c("dispatching_base_num","Date","affiliated_base_num","location_ID")
uber <- na.omit(uber)
uber <- subset(uber,Date>="2015-01-01" & Date<="2015-06-01")
uber <- select(uber,Date)
|
c9d91b19fc436f8e38f168e3c5d38ea520627e4a
|
127ca883eb836064c2a02d176f05f2242894ae73
|
/R/datadescriptions.R
|
8a80d03b0cb1654d6ff9d3447a0ddc4607890a2a
|
[] |
no_license
|
lindesaysh/MRSea
|
3598a8431eb9fbfc6eb8abbee0c9841a626c018f
|
4ecc35843e49bf51a6e979f35e2544b3b315f341
|
refs/heads/master
| 2023-09-04T03:16:04.660394
| 2023-08-30T15:48:17
| 2023-08-30T15:48:17
| 13,676,222
| 5
| 3
| null | 2023-03-22T11:20:01
| 2013-10-18T11:22:55
|
R
|
UTF-8
|
R
| false
| false
| 13,136
|
r
|
datadescriptions.R
|
#' Line transect data with no post-impact consequence
#'
#' A simulated dataset containing the observed perpendicular distances, the effort data and other variables of
#' segmented line transect data. The variables are as follows:
#'
#' \itemize{
#' \item \code{transect.id} Identifier for the individual visits to the transects
#' \item \code{transect.label} Labels for transects
#' \item \code{season} Numerical indicator for the four different seasons
#' \item \code{impact} Numerical indicator for before (0) and after (1) impact
#' \item \code{segment.id} Identifier for individual visits to the segment
#' \item \code{segment.label} Label for segments
#' \item \code{length} Length of segment in km
#' \item \code{x.pos} spatial location in the horizontal axis in UTMs
#' \item \code{y.pos} spatial location in the vertical axis in UTMs
#' \item \code{depth} Depth in m
#' \item \code{object} Id for detected object
#' \item \code{distance} Perpendicular distance from the line
#' }
#'
#' @docType data
#' @keywords datasets
#' @format A data frame with 10771 rows and 12 variables
#'
#' @name dis.data.no
NULL
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#' Line transect data with decrease post-impact
#'
#' A simulated dataset containing the observed perpendicular distances, the effort data and other variables of
#' segmented line transect data. The variables are as follows:
#'
#' \itemize{
#' \item \code{transect.id} Identifier for the individual visits to the transects
#' \item \code{transect.label} Labels for transects
#' \item \code{season} Numerical indicator for the four different seasons
#' \item \code{impact} Numerical indicator for before (0) and after (1) impact
#' \item \code{segment.id} Identifier for individual visits to the segment
#' \item \code{segment.label} Label for segments
#' \item \code{length} Length of segment in km
#' \item \code{x.pos} spatial location in the horizontal axis in UTMs
#' \item \code{y.pos} spatial location in the vertical axis in UTMs
#' \item \code{depth} Depth in m
#' \item \code{object} Id for detected object
#' \item \code{distance} Perpendicular distance from the line
#' }
#'
#' @docType data
#' @keywords datasets
#' @format A data frame with 10759 rows and 12 variables
#'
#' @name dis.data.de
NULL
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#' Line transect data with redistribution post-impact
#'
#' A simulated dataset containing the observed perpendicular distances, the effort data and other variables of
#' segmented line transect data. The variables are as follows:
#'
#' \itemize{
#' \item \code{transect.id} Identifier for the individual visits to the transects
#' \item \code{transect.label} Labels for transects
#' \item \code{season} Numerical indicator for the four different seasons
#' \item \code{impact} Numerical indicator for before (0) and after (1) impact
#' \item \code{segment.id} Identifier for individual visits to the segment
#' \item \code{segment.label} Label for segments
#' \item \code{length} Length of segment in km
#' \item \code{x.pos} spatial location in the horizontal axis in UTMs
#' \item \code{y.pos} spatial location in the vertical axis in UTMs
#' \item \code{depth} Depth in m
#' \item \code{object} Id for detected object
#' \item \code{distance} Perpendicular distance from the line
#' }
#'
#' @docType data
#' @keywords datasets
#' @format A data frame with 10951 rows and 12 variables
#'
#' @name dis.data.re
NULL
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#' Knot grid data for nearshore example
#'
#' @name knotgrid.ns
#' @docType data
NULL
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#' Knot grid data for offshore example
#'
#' @name knotgrid.off
#' @docType data
NULL
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#' Prediction grid data for no post-impact consequence
#'
#' A simulated dataset containing the true number of birds, the effort data and other variables of
#' prediction grid data. The variables are as follows:
#'
#' \itemize{
#' \item \code{area} area surveyed in the gridcell in km squared
#' \item \code{x.pos} spatial location in the horizontal axis in UTMs
#' \item \code{y.pos} spatial location in the vertical axis in UTMs
#' \item \code{depth} depth in m
#' \item \code{segment.id} Identifier for individual visits to the segment
#' \item \code{season} Numerical indicator for the four different seasons
#' \item \code{impact} Numerical indicator for before (0) and after (1) impact
#' \item \code{truth} number of birds
#' }
#'
#' @docType data
#' @keywords datasets
#' @format A data frame with 37928 rows and 8 variables
#'
#' @name predict.data.no
NULL
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#' Prediction grid data for post-impact decrease
#'
#' A simulated dataset containing the true number of birds, the effort data and other variables of
#' prediction grid data. The variables are as follows:
#'
#' \itemize{
#' \item \code{area} area surveyed in the gridcell in km squared
#' \item \code{x.pos} spatial location in the horizontal axis in UTMs
#' \item \code{y.pos} spatial location in the vertical axis in UTMs
#' \item \code{depth} depth in m
#' \item \code{segment.id} Identifier for individual visits to the segment
#' \item \code{season} Numerical indicator for the four different seasons
#' \item \code{impact} Numerical indicator for before (0) and after (1) impact
#' \item \code{truth} number of birds
#' }
#'
#' @docType data
#' @keywords datasets
#' @format A data frame with 37928 rows and 8 variables
#'
#' @name predict.data.de
NULL
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#' Prediction grid data for post-impact redistribution
#'
#' A simulated dataset containing the true number of birds, the effort data and other variables of
#' prediction grid data. The variables are as follows:
#'
#' \itemize{
#' \item \code{area} area surveyed in the gridcell in km squared
#' \item \code{x.pos} spatial location in the horizontal axis in UTMs
#' \item \code{y.pos} spatial location in the vertical axis in UTMs
#' \item \code{depth} depth in m
#' \item \code{segment.id} Identifier for individual visits to the segment
#' \item \code{season} Numerical indicator for the four different seasons
#' \item \code{impact} Numerical indicator for before (0) and after (1) impact
#' \item \code{truth} number of birds
#' }
#'
#' @docType data
#' @keywords datasets
#' @format A data frame with 37928 rows and 8 variables
#'
#' @name predict.data.re
NULL
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#' Nearshore data with no effect of impact
#'
#' A simulated dataset containing the observed counts, the effort data and other variables of
#' grid data. The variables are as follows:
#'
#' \itemize{
#' \item \code{x.pos} spatial location in the horizontal axis in UTMs
#' \item \code{y.pos} spatial location in the vertical axis in UTMs
#' \item \code{area} area surveyed in the gridcell in km squared
#' \item \code{floodebb} 3 level factor covariate for tides
#' \item \code{observationhour} hour of observation
#' \item \code{GridCode} identifier for the different grids that were surveyed
#' \item \code{Year} Year of the survey
#' \item \code{DavOfMonth} Day of the survey
#' \item \code{MonthOfYear} Month of the survey
#' \item \code{impact} numerical indicator for before (0) and after (1) impact
#' \item \code{birds} observed number of birds
#' \item \code{cellid} identifier for the individual records
#' }
#'
#' @docType data
#' @keywords datasets
#' @format A data frame with 27798 rows and 12 variables
#'
#' @name ns.data.no
NULL
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#' Nearshore data with decrease post-impact
#'
#' A simulated dataset containing the observed counts, the effort data and other variables of
#' grid data. The variables are as follows:
#'
#' \itemize{
#' \item \code{x.pos} spatial location in the horizontal axis in UTMs
#' \item \code{y.pos} spatial location in the vertical axis in UTMs
#' \item \code{area} area surveyed in the gridcell in km squared
#' \item \code{floodebb} 3 level factor covariate for tides
#' \item \code{observationhour} hour of observation
#' \item \code{GridCode} identifier for the different grids that were surveyed
#' \item \code{Year} Year of the survey
#' \item \code{DavOfMonth} Day of the survey
#' \item \code{MonthOfYear} Month of the survey
#' \item \code{impact} numerical indicator for before (0) and after (1) impact
#' \item \code{birds} observed number of birds
#' \item \code{cellid} identifier for the individual records
#' }
#'
#' @docType data
#' @keywords datasets
#' @format A data frame with 27798 rows and 12 variables
#'
#' @name ns.data.de
NULL
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#' Nearshore data with redistribution post-impact
#'
#' A simulated dataset containing the observed counts, the effort data and other variables of
#' grid data. The variables are as follows:
#'
#' \itemize{
#' \item \code{x.pos} spatial location in the horizontal axis in UTMs
#' \item \code{y.pos} spatial location in the vertical axis in UTMs
#' \item \code{area} area surveyed in the gridcell in km squared
#' \item \code{floodebb} 3 level factor covariate for tides
#' \item \code{observationhour} hour of observation
#' \item \code{GridCode} identifier for the different grids that were surveyed
#' \item \code{Year} Year of the survey
#' \item \code{DavOfMonth} Day of the survey
#' \item \code{MonthOfYear} Month of the survey
#' \item \code{impact} numerical indicator for before (0) and after (1) impact
#' \item \code{birds} observed number of birds
#' \item \code{cellid} identifier for the individual records
#' }
#'
#' @docType data
#' @keywords datasets
#' @format A data frame with 27798 rows and 12 variables
#'
#' @name ns.data.re
NULL
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#' Prediction grid data for nearshore with no effect of impact
#'
#' A simulated prediction dataset containing the true counts, the effort data and other variables of
#' grid data. The variables are as follows:
#'
#' \itemize{
#' \item \code{x.pos} spatial location in the horizontal axis in UTMs
#' \item \code{y.pos} spatial location in the vertical axis in UTMs
#' \item \code{area} Area surveyed in the gridcell in km squared
#' \item \code{floodebb} 3 level factor covariate for tide state
#' \item \code{observationhour} hour of observation
#' \item \code{GridCode} identifier for the different grids that were surveyed
#' \item \code{Year} Year of the survey
#' \item \code{DavOfMonth} Day of the survey
#' \item \code{MonthOfYear} Month of the survey
#' \item \code{impact} numerical indicator for before (0) and after (1) impact
#' \item \code{birds} true density of birds
#' }
#'
#' @docType data
#' @keywords datasets
#' @format A data frame with 27798 rows and 11 variables
#'
#' @name ns.predict.data.no
NULL
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#' Prediction grid data for nearshore post-impact decrease
#'
#' A simulated prediction dataset containing the true counts, the effort data and other variables of
#' grid data. The variables are as follows:
#'
#' \itemize{
#' \item \code{x.pos} spatial location in the horizontal axis in UTMs
#' \item \code{y.pos} spatial location in the vertical axis in UTMs
#' \item \code{area} Area surveyed in the gridcell in km squared
#' \item \code{floodebb} 3 level factor covariate for tide state
#' \item \code{observationhour} hour of observation
#' \item \code{GridCode} identifier for the different grids that were surveyed
#' \item \code{Year} Year of the survey
#' \item \code{DavOfMonth} Day of the survey
#' \item \code{MonthOfYear} Month of the survey
#' \item \code{impact} numerical indicator for before (0) and after (1) impact
#' \item \code{birds} true density of birds
#' }
#'
#' @docType data
#' @keywords datasets
#' @format A data frame with 27798 rows and 11 variables
#'
#' @name ns.predict.data.de
NULL
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#' Prediction grid data for nearshore post-impact redistribution
#'
#' A simulated prediction dataset containing the true counts, the effort data and other variables of
#' grid data. The variables are as follows:
#'
#' \itemize{
#' \item \code{x.pos} spatial location in the horizontal axis in UTMs
#' \item \code{y.pos} spatial location in the vertical axis in UTMs
#' \item \code{area} Area surveyed in the gridcell in km squared
#' \item \code{floodebb} 3 level factor covariate for tide state
#' \item \code{observationhour} hour of observation
#' \item \code{GridCode} identifier for the different grids that were surveyed
#' \item \code{Year} Year of the survey
#' \item \code{DavOfMonth} Day of the survey
#' \item \code{MonthOfYear} Month of the survey
#' \item \code{impact} numerical indicator for before (0) and after (1) impact
#' \item \code{birds} true density of birds
#' }
#'
#' @docType data
#' @keywords datasets
#' @format A data frame with 27798 rows and 11 variables
#'
#' @name ns.predict.data.re
NULL
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
0a43482358dc7268a14ee03d22d618ad700bb414
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/surveyplanning/examples/optsize.Rd.R
|
f292e4f99b55842f0b788d3768f9586d74208d20
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 579
|
r
|
optsize.Rd.R
|
library(surveyplanning)
### Name: optsize
### Title: Optimal sample size allocation
### Aliases: optsize
### Keywords: surveyplanning
### ** Examples
data <- data.table(H = 1 : 3,
s2h=10 * runif(3),
s2h2 = 10 * runif(3),
poph = 8 * 1 : 3,
Rh = rep(1, 3),
dd = c(1, 1, 1))
vars <- optsize(H = "H",
s2h=c("s2h", "s2h2"),
n = 10, poph = "poph",
Rh = "Rh",
fullsampleh = NULL,
dataset = data)
vars
|
433930284d37eb238b31bdcee47cb1cc97b425ef
|
d53fa8725ada45c2f436733fc82d4585f91ebc8e
|
/scripts/additional_network_functions.r
|
b7a1df27fbab5018e40764a91641baf353920f33
|
[] |
no_license
|
tanaylab/embflow
|
ecab97e6e88fef0af5b8375f9bea7606ee928e00
|
9f0c2a704d586bad09a6022b363fc02ba61f4f9f
|
refs/heads/main
| 2023-04-18T14:01:17.542958
| 2022-11-09T11:39:35
| 2022-11-09T11:39:35
| 346,700,550
| 11
| 5
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,160
|
r
|
additional_network_functions.r
|
mctnetwork_get_egc_on_cluster_transition = function(mct, min_time, max_time, type1, type2, mc_type=NULL) {
mc = scdb_mc(mct@mc_id)
e_gc = mc@e_gc
net = mct@network
if(is.null(mc_type)) {
mc_type = mc@colors
names(mc_type) = as.character(1:length(mc_type))
}
# flow_mm = mctnetwork_get_flow_mat(mct, time, max_time=time)
f_t = net$time1 >= min_time & net$time2 <= max_time &
net$time1 == net$time2-1 &
net$type1 != "growth" & net$type2!="growth"
net = net[f_t,]
f_types = mc_type[as.numeric(net$mc1)]==type1 & mc_type[as.numeric(net$mc2)]==type2
net = net[f_types,]
src_mc_wgt = tapply(net$flow, net$mc1, sum)
targ_mc_wgt = tapply(net$flow, net$mc2, sum)
src_mc_wgt_n = as.vector(src_mc_wgt/sum(src_mc_wgt))
names(src_mc_wgt_n) = names(src_mc_wgt)
targ_mc_wgt_n = as.vector(targ_mc_wgt/sum(targ_mc_wgt))
names(targ_mc_wgt_n) = names(targ_mc_wgt)
src_e_gc = colSums(t(e_gc[,names(src_mc_wgt_n)]) * src_mc_wgt_n)
targ_e_gc = colSums(t(e_gc[,names(targ_mc_wgt_n)]) * targ_mc_wgt_n)
return(data.frame(src = src_e_gc, targ = targ_e_gc, lf = log2(1e-5+targ_e_gc)-log2(1e-5+src_e_gc)))
}
|
d1c1385bd41f03d33d623c8930dd1761c39e2800
|
6ff1395ddeedf486f7af75c21cbf546e0622b6c1
|
/man/create_weights_censoc_dmf.Rd
|
9a7dde21c0c48ed830cf6e2b9c7e73e1a98e0516
|
[] |
no_license
|
demographer/censocdev
|
3add321053ba8b4de2595a34ddb8c51519962234
|
6387f4b8f63d047835929cb59c03144f463db90e
|
refs/heads/master
| 2022-07-18T11:03:06.062343
| 2020-05-13T22:37:12
| 2020-05-13T22:37:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 685
|
rd
|
create_weights_censoc_dmf.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/create_weights_censoc_dmf.R
\name{create_weights_censoc_dmf}
\alias{create_weights_censoc_dmf}
\title{Create weights
This function will weight the numident censoc.dmf up to HMD lexis triangles
Restrict data to certain cohorts and ages at deaths}
\usage{
create_weights_censoc_dmf(censoc.dmf, cohorts = c(1895:1939),
death_ages = c(65:100))
}
\arguments{
\item{data}{data.frame with birth and death info}
}
\value{
data.frame
}
\description{
Create weights
This function will weight the numident censoc.dmf up to HMD lexis triangles
Restrict data to certain cohorts and ages at deaths
}
\keyword{internal}
|
de4e4ebc4135689e17c5bb77e68c975888a11771
|
a1a68bf3675c0d01bef17bc2ab2291529b718644
|
/.tmp/hrapgc.ToExcel.R
|
fdf1d09111c90d95826db0dbb670bde01d4daa14
|
[] |
no_license
|
Tuxkid/Gems
|
3d3c1aac6df3f11cf209cf33da23c214b26ff5c6
|
c0c5dfe02c826030e6e1c97ab8cd8191caacd33f
|
refs/heads/master
| 2021-01-10T15:17:41.468718
| 2020-08-13T03:15:14
| 2020-08-13T03:15:14
| 50,632,804
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,306
|
r
|
hrapgc.ToExcel.R
|
ToExcel <-
structure(function(dfs = ls(patt = "df", pos = 1), groupid = "", out = "out",
rm.csv = TRUE, RN = TRUE)
{
## Purpose: Excel files from the data frames
## ----------------------------------------------------------------------
## Modified from:
## ----------------------------------------------------------------------
## Arguments:
## dfs: which dataframes do we want
## groupid: Character to distinguish from other groups of csv files
## out: prefix to the Excel file name
## RN: do we want row names?
## rm.csv: logical -- are we deleting the CSV files?
## ----------------------------------------------------------------------
## Author: Patrick Connolly, Creation date: 26 Sep 2007, 11:11
## ----------------------------------------------------------------------
## Revisions:- 27/10/11: copied RN argument from ToExcelF
for(i in dfs){
csv <- ppaste(groupid, sub("df", "CSV", i))
write.csv(get(i, parent.frame()), csv, quote = FALSE,
na = "", row.names = RN)
}
out.xls <- ppaste(out, ".xls")
make.excel <- ppaste("WriteExcel.pl --CSVfiles=", groupid, "*CSV ", out.xls)
system(make.excel)
if(rm.csv) system(ppaste("rm ", groupid, "*CSV"))
}
, comment = "27/10/2011")
|
9d477686dd606e5a887d562e91377c18f036a331
|
b9fd6018f5431e48090842f89ed3b71b8532b4f7
|
/Chapter 5 Time series regression models.R
|
2c8c0dc68cba538487dd64e5b7205fb1ca906ee9
|
[] |
no_license
|
Agewerc/forecasting_principles_practices
|
dfbe90c6b1e999d0f85620724d1fbdd1de984add
|
5655fb4842039cf3ee78d4642546355d7c8f3dc9
|
refs/heads/master
| 2023-01-24T03:06:19.309785
| 2020-12-05T02:25:07
| 2020-12-05T02:25:07
| 318,045,621
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,778
|
r
|
Chapter 5 Time series regression models.R
|
##############################################
# Chapter 5 - Time series regression models ##
##############################################
# Introduction from the Book
####################################################################################
#In this chapter we discuss regression models. The basic concept is that we
# forecast the time series of interest y assuming that it has a linear
# relationship with other time series x. For example, we might wish to forecast
# monthly sales y using total advertising spend x as a predictor.
# Or we might forecast daily electricity demand y using temperature x1
# and the day of week x2 as predictors. The forecast variable y is sometimes
# also called the regressand, dependent or explained variable. The
# predictor variables x are sometimes also called the regressors,
# independent or explanatory variables. In this book we will always refer to them
# as the “forecast” variable and “predictor” variables.
####################################################################################
# load libraries
library(forecast)
library(ggplot2)
library(fpp2)
##############################################
# 5.1 - The linear model
###############
# Percentage changes in personal consumption expenditure and personal income for the US
autoplot(uschange[,c("Consumption","Income")]) +
ylab("% change") + xlab("Year")
# Scatterplot of quarterly changes in consumption expenditure versus
# quarterly changes in personal income and the fitted regression line.
uschange %>%
as.data.frame() %>%
ggplot(aes(x=Income, y=Consumption)) +
ylab("Consumption (quarterly % change)") +
xlab("Income (quarterly % change)") +
geom_point() +
geom_smooth(method="lm", se=FALSE)
#> `geom_smooth()` using formula 'y ~ x'
tslm(Consumption ~ Income, data=uschange)
# A scatterplot matrix of US consumption expenditure and the four predictors
uschange %>%
as.data.frame() %>%
GGally::ggpairs()
##############################################
# 5.2 - The linear model
###############
# information about the fitted model
fit.consMR <- tslm(
Consumption ~ Income + Production + Unemployment + Savings,
data=uschange)
summary(fit.consMR)
# Time plot of actual US consumption expenditure and
# predicted US consumption expenditure.
autoplot(uschange[,'Consumption'], series="Data") +
autolayer(fitted(fit.consMR), series="Fitted") +
xlab("Year") + ylab("") +
ggtitle("Percent change in US consumption expenditure") +
guides(colour=guide_legend(title=" "))
# Actual US consumption expenditure plotted against predicted US consumption expenditure
cbind(Data = uschange[,"Consumption"],
Fitted = fitted(fit.consMR)) %>%
as.data.frame() %>%
ggplot(aes(x=Data, y=Fitted)) +
geom_point() +
ylab("Fitted (predicted values)") +
xlab("Data (actual values)") +
ggtitle("Percent change in US consumption expenditure") +
geom_abline(intercept=0, slope=1)
##############################################
# 5.3 - Evaluating the regression model
###############
# Analysing the residuals from a regression model for US quarterly consumption
checkresiduals(fit.consMR)
# Scatterplots of residuals versus each predictor
df <- as.data.frame(uschange)
df[,"Residuals"] <- as.numeric(residuals(fit.consMR))
p1 <- ggplot(df, aes(x=Income, y=Residuals)) +
geom_point()
p2 <- ggplot(df, aes(x=Production, y=Residuals)) +
geom_point()
p3 <- ggplot(df, aes(x=Savings, y=Residuals)) +
geom_point()
p4 <- ggplot(df, aes(x=Unemployment, y=Residuals)) +
geom_point()
gridExtra::grid.arrange(p1, p2, p3, p4, nrow=2)
# Scatterplots of residuals versus fitted values
cbind(Fitted = fitted(fit.consMR),
Residuals=residuals(fit.consMR)) %>%
as.data.frame() %>%
ggplot(aes(x=Fitted, y=Residuals)) + geom_point()
##############################################
# 5.4- Some useful predictors
###############
# Australian quarterly beer production
beer2 <- window(ausbeer, start=1992)
autoplot(beer2) + xlab("Year") + ylab("Megalitres")
fit.beer <- tslm(beer2 ~ trend + season)
summary(fit.beer)
# Time plot of beer production and predicted beer production
autoplot(beer2, series="Data") +
autolayer(fitted(fit.beer), series="Fitted") +
xlab("Year") + ylab("Megalitres") +
ggtitle("Quarterly Beer Production")
# Actual beer production plotted against predicted beer production
cbind(Data=beer2, Fitted=fitted(fit.beer)) %>%
as.data.frame() %>%
ggplot(aes(x = Data, y = Fitted,
colour = as.factor(cycle(beer2)))) +
geom_point() +
ylab("Fitted") + xlab("Actual values") +
ggtitle("Quarterly beer production") +
scale_colour_brewer(palette="Dark2", name="Quarter") +
geom_abline(intercept=0, slope=1)
# fourier
fourier.beer <- tslm(beer2 ~ trend + fourier(beer2, K=2))
summary(fourier.beer)
##############################################
# 5.5 - Forecasting with regression
###############
# Forecasts from the regression model for beer production. The dark shaded
# region shows 80% prediction intervals and the light shaded region shows
# 95% prediction intervals.
beer2 <- window(ausbeer, start=1992)
fit.beer <- tslm(beer2 ~ trend + season)
fcast <- forecast(fit.beer)
autoplot(fcast) +
ggtitle("Forecasts of beer production using regression") +
xlab("Year") + ylab("megalitres")
# Forecasting percentage changes in personal consumption
# expenditure for the US under scenario based forecasting.
fit.consBest <- tslm(
Consumption ~ Income + Savings + Unemployment,
data = uschange)
h <- 4
newdata <- data.frame(
Income = c(1, 1, 1, 1),
Savings = c(0.5, 0.5, 0.5, 0.5),
Unemployment = c(0, 0, 0, 0))
fcast.up <- forecast(fit.consBest, newdata = newdata)
newdata <- data.frame(
Income = rep(-1, h),
Savings = rep(-0.5, h),
Unemployment = rep(0, h))
fcast.down <- forecast(fit.consBest, newdata = newdata)
autoplot(uschange[, 1]) +
ylab("% change in US consumption") +
autolayer(fcast.up, PI = TRUE, series = "increase") +
autolayer(fcast.down, PI = TRUE, series = "decrease") +
guides(colour = guide_legend(title = "Scenario"))
# Prediction intervals if income is increased by its historical mean of
# 0.72 % versus an extreme increase of 5%.
fit.cons <- tslm(Consumption ~ Income, data = uschange)
h <- 4
fcast.ave <- forecast(fit.cons,
newdata = data.frame(
Income = rep(mean(uschange[,"Income"]), h)))
fcast.up <- forecast(fit.cons,
newdata = data.frame(Income = rep(5, h)))
autoplot(uschange[, "Consumption"]) +
ylab("% change in US consumption") +
autolayer(fcast.ave, series = "Average increase",
PI = TRUE) +
autolayer(fcast.up, series = "Extreme increase",
PI = TRUE) +
guides(colour = guide_legend(title = "Scenario"))
# Projecting forecasts from a linear,
# exponential, piecewise linear trends and a cubic spline for the
# Boston marathon winning times
h <- 10
fit.lin <- tslm(marathon ~ trend)
fcasts.lin <- forecast(fit.lin, h = h)
fit.exp <- tslm(marathon ~ trend, lambda = 0)
fcasts.exp <- forecast(fit.exp, h = h)
t <- time(marathon)
t.break1 <- 1940
t.break2 <- 1980
tb1 <- ts(pmax(0, t - t.break1), start = 1897)
tb2 <- ts(pmax(0, t - t.break2), start = 1897)
fit.pw <- tslm(marathon ~ t + tb1 + tb2)
t.new <- t[length(t)] + seq(h)
tb1.new <- tb1[length(tb1)] + seq(h)
tb2.new <- tb2[length(tb2)] + seq(h)
newdata <- cbind(t=t.new, tb1=tb1.new, tb2=tb2.new) %>%
as.data.frame()
fcasts.pw <- forecast(fit.pw, newdata = newdata)
fit.spline <- tslm(marathon ~ t + I(t^2) + I(t^3) +
I(tb1^3) + I(tb2^3))
fcasts.spl <- forecast(fit.spline, newdata = newdata)
##############################################
# 5.8 - Nonlinear regression
###############
autoplot(marathon) +
autolayer(fitted(fit.lin), series = "Linear") +
autolayer(fitted(fit.exp), series = "Exponential") +
autolayer(fitted(fit.pw), series = "Piecewise") +
autolayer(fitted(fit.spline), series = "Cubic Spline") +
autolayer(fcasts.pw, series="Piecewise") +
autolayer(fcasts.lin, series="Linear", PI=FALSE) +
autolayer(fcasts.exp, series="Exponential", PI=FALSE) +
autolayer(fcasts.spl, series="Cubic Spline", PI=FALSE) +
xlab("Year") + ylab("Winning times in minutes") +
ggtitle("Boston Marathon") +
guides(colour = guide_legend(title = " "))
|
0a0e6801212a2010eb329310b25cd1a84b7fcf95
|
cfcf2291aafea8e6212ad8897807f6fec818da71
|
/cachematrix.R
|
e059415a6c6a8d4cc614b7d7a6a6e139e81ba428
|
[] |
no_license
|
sivasunnam/ProgrammingAssignment2
|
be01af65ddd86411057bb2dfcde9c57d26199ca0
|
b9f9f82099ef473c61e35ab2704b0e65c4466306
|
refs/heads/master
| 2021-01-17T21:49:22.215838
| 2015-09-24T08:09:33
| 2015-09-24T08:09:33
| 41,148,923
| 0
| 0
| null | 2015-08-21T09:57:28
| 2015-08-21T09:57:27
| null |
UTF-8
|
R
| false
| false
| 1,306
|
r
|
cachematrix.R
|
## This script returns an inverse of a matrix from cache or by calculating it
## This function create getters and setters for a matrix and its inverse and stores it in a list
makeCacheMatrix <- function(x = matrix())
{
m <- NULL
## The value of X have been set
set <- function(y) {
x <<- y
m <<- NULL
}
## The value of X have been recieved using get method
get <- function() x
## We are setting the inverse value for matrix
setinv <- function(sol) m <<- sol
## We are getting the inverse value for matrix
getinv <- function() m
list(set = set, get = get,
setinv = setinv,
getinv = getinv)
}
cacheSolve <- function(x, ...)
{
m <- x$getinv() ## getting the Inverse metrix value
if(!is.null(m)) { ## returning the cached value if it is present
message("getting cached data")
return(m)
}
data <- x$get()
if(nrow(data) == ncol(data)){ ## Finding for the square matrix
m <- solve(data, ...) ## Finding inverse of the matrix for the given data
x$setinv(m)
}else{
message("Not a Square Matrix")
}
m
}
|
22995526bb9e1cd86d97921c905d75724530b848
|
d2f39a2258dbe6253bc28fd00717a67b131751f4
|
/man/scale_brewer.Rd
|
151d0591e424982b727b7576f35676c328906f1a
|
[
"BSD-3-Clause",
"BSD-2-Clause"
] |
permissive
|
andrewzm/MVST
|
6e5d9d5c84ba0d28e38fdb69b12cfa8ba1bcc45f
|
2bf0835e66e04e120f78fe8673afe3dd9d6f42c0
|
refs/heads/master
| 2022-09-29T23:40:39.048820
| 2022-09-15T21:37:50
| 2022-09-15T21:37:50
| 20,478,703
| 10
| 9
| null | 2018-10-18T14:50:36
| 2014-06-04T10:13:03
|
R
|
UTF-8
|
R
| false
| true
| 5,937
|
rd
|
scale_brewer.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/scale-brewer.r
\name{scale_colour_brewer}
\alias{scale_colour_brewer}
\alias{scale_fill_brewer}
\alias{scale_colour_distiller}
\alias{scale_fill_distiller}
\title{Sequential, diverging and qualitative colour scales from colorbrewer.org}
\usage{
scale_colour_brewer(..., type = "seq", palette = 1)
scale_fill_brewer(..., type = "seq", palette = 1)
scale_colour_distiller(..., type = "seq", palette = 1, values = NULL,
space = "Lab", na.value = "grey50")
scale_fill_distiller(..., type = "seq", palette = 1, values = NULL,
space = "Lab", na.value = "grey50", reverse = F)
}
\arguments{
\item{...}{Arguments passed on to \code{discrete_scale}
\describe{
\item{breaks}{One of:
\itemize{
\item \code{NULL} for no breaks
\item \code{waiver()} for the default breaks computed by the
transformation object
\item A character vector of breaks
\item A function that takes the limits as input and returns breaks
as output
}}
\item{limits}{A character vector that defines possible values of the scale
and their order.}
\item{drop}{Should unused factor levels be omitted from the scale?
The default, \code{TRUE}, uses the levels that appear in the data;
\code{FALSE} uses all the levels in the factor.}
\item{na.translate}{Unlike continuous scales, discrete scales can easily show
missing values, and do so by default. If you want to remove missing values
from a discrete scale, specify \code{na.translate = FALSE}.}
\item{na.value}{If \code{na.translate = TRUE}, what value aesthetic
value should missing be displayed as? Does not apply to position scales
where \code{NA} is always placed at the far right.}
\item{scale_name}{The name of the scale}
\item{palette}{A palette function that when called with a single integer
argument (the number of levels in the scale) returns the values that
they should take}
\item{name}{The name of the scale. Used as axis or legend title. If
\code{waiver()}, the default, the name of the scale is taken from the first
mapping used for that aesthetic. If \code{NULL}, the legend title will be
omitted.}
\item{labels}{One of:
\itemize{
\item \code{NULL} for no labels
\item \code{waiver()} for the default labels computed by the
transformation object
\item A character vector giving labels (must be same length as \code{breaks})
\item A function that takes the breaks as input and returns labels
as output
}}
\item{expand}{Vector of range expansion constants used to add some
padding around the data, to ensure that they are placed some distance
away from the axes. Use the convenience function \code{\link[=expand_scale]{expand_scale()}}
to generate the values for the \code{expand} argument. The defaults are to
expand the scale by 5\% on each side for continuous variables, and by
0.6 units on each side for discrete variables.}
\item{guide}{A function used to create a guide or its name. See
\code{\link[=guides]{guides()}} for more info.}
\item{position}{The position of the axis. "left" or "right" for vertical
scales, "top" or "bottom" for horizontal scales}
\item{super}{The super class to use for the constructed scale}
}}
\item{type}{One of seq (sequential), div (diverging) or qual (qualitative)}
\item{palette}{If a string, will use that named palette. If a number, will
index into the list of palettes of appropriate \code{type}}
\item{values}{if colours should not be evenly positioned along the gradient
this vector gives the position (between 0 and 1) for each colour in the
\code{colours} vector. See \code{\link[=rescale]{rescale()}} for a convience function
to map an arbitrary range to between 0 and 1.}
\item{space}{colour space in which to calculate gradient. Must be "Lab" -
other values are deprecated.}
\item{na.value}{Colour to use for missing values}
}
\description{
Create colour scales based on ColorBrewer colours.
}
\details{
Note: this is an offshoot from ggplot2 with some added functions. I am not sure who the author is as the forum on which this appeared
is now down. The functions of interest here are the bottom two, scale_colour_distiller and scale_fill_distiller which
allow for nice brewer plots on continuous scales
ColorBrewer provides sequential, diverging and qualitative colour schemes
which are particularly suited and tested to display discrete values (levels
of a factor) on a map. ggplot2 can use those colours in discrete scales. It
also allows to smoothly interpolate the colours to a continuous scale,
although the original colour schemes (particularly the qualitative ones)
were not intended for this. The perceptual result is left to the
appreciation of the user.
See \url{http://colorbrewer2.org} for more information.
}
\examples{
dsamp <- diamonds[sample(nrow(diamonds), 1000), ]
(d <- qplot(carat, price, data=dsamp, colour=clarity))
# Change scale label
d + scale_colour_brewer()
d + scale_colour_brewer("clarity")
d + scale_colour_brewer(expression(clarity[beta]))
# Select brewer palette to use, see ?scales::brewer_pal for more details
d + scale_colour_brewer(type="seq")
d + scale_colour_brewer(type="seq", palette=3)
d + scale_colour_brewer(palette="Blues")
d + scale_colour_brewer(palette="Set1")
# scale_fill_brewer works just the same as
# scale_colour_brewer but for fill colours
ggplot(diamonds, aes(x=price, fill=cut)) +
geom_histogram(position="dodge", binwidth=1000) +
scale_fill_brewer()
# Generate map data
library(reshape2) # for melt
volcano3d <- melt(volcano)
names(volcano3d) <- c("x", "y", "z")
# Basic plot
v <- ggplot() + geom_tile(aes(x=x, y=y, fill=z), data=volcano3d)
v
v + scale_fill_distiller()
v + scale_fill_distiller(palette=2)
v + scale_fill_distiller(type="div")
v + scale_fill_distiller(palette="Spectral")
v + scale_fill_distiller(palette="Spectral", trans="reverse")
v + scale_fill_distiller(type="qual")
# Not appropriate for continuous data, issues a warning
}
\concept{colour scales}
|
f4e369d0af18a2ddefa1b8f611984ae38e695ef7
|
b8756cf7e224eed7291700aa98c4a4afe05381b3
|
/man/alt_barproj.Rd
|
94e68b2d1aa24ef79906a204c16029d6cf8ed1a1
|
[] |
no_license
|
jonasbhend/NRMgraphics
|
879e8cff100d1a570453cc294c504db2d4dc7a7c
|
503f11fe95d562d3ce5c157b0d377ded1a8c499c
|
refs/heads/master
| 2016-08-04T20:58:24.326426
| 2014-11-05T10:20:02
| 2014-11-05T10:20:02
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 906
|
rd
|
alt_barproj.Rd
|
% Generated by roxygen2 (4.0.2): do not edit by hand
\name{alt_barproj}
\alias{alt_barproj}
\title{Plot grouped projection bars with inner shading}
\usage{
alt_barproj(meanchange, meansd = NULL, ylim = NULL, col = hcl(c(240, 0), l
= 60, c = 50), col2 = hcl(c(240, 0), l = 30, c = 80), lwd = 10,
siglevel = 0.2, lty = 1, distance = 0.5)
}
\arguments{
\item{meanchange}{list of percentile changes}
\item{meansd}{list of standard dev. or percentile changes for nat. var.}
\item{ylim}{vertical extent of plot}
\item{col,col2}{colours for positive and negative segments of bars}
\item{lwd}{bar width}
\item{siglevel}{significance level to convert sdev. to shading ranges}
\item{lty}{line type for horizontal grid lines}
\item{distance}{relative distance between group of bars}
}
\description{
Bar plots grouped by scneario or seas with inner shading reflecting
natural variability.
}
\keyword{plot}
|
7f2e01c21c3a72f12c21a02bc1132b50beebc689
|
1030b5169e3f13a0fc72eefb2b2e5614567a621e
|
/code/Rank_Profit.R
|
05e4f2eb3af5fdc19af86210c9014a27513c6748
|
[] |
no_license
|
SeanGRichardson/20174489_Assignment
|
c18c3aebca726a476d4c7fdd425077ac1531356f
|
dd886815635cb89f43d81df4183cda0b68eb48f7
|
refs/heads/main
| 2023-04-21T11:24:53.312388
| 2021-05-24T19:04:33
| 2021-05-24T19:04:33
| 370,394,237
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 247
|
r
|
Rank_Profit.R
|
Rank_Profit <- function(Data){
Data <- Data %>% filter(is.na(Profitability) == 0) %>% group_by(Lead.Studio) %>% summarise(Mean_Studio_Profitability = mean(Profitability)) %>% arrange(desc(Mean_Studio_Profitability))
return(Data)
}
|
b67005686fa3c9cadc2805971ab0e1ce268f20bf
|
9b5aaacd59506bae1ab810c6ae1695a968903476
|
/ui.R
|
467be0e5b76a1489e6f819b1c5884224b9f1fe15
|
[] |
no_license
|
darrendonhardt/DataProducts-Assignment
|
05cae6e79eb38c4b91c2246795fa4427abd9349f
|
97486a9d43b3afb057f9dac6e9be838ba05cf82c
|
refs/heads/master
| 2016-09-08T07:23:42.197993
| 2014-08-24T14:02:05
| 2014-08-24T14:02:05
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 870
|
r
|
ui.R
|
# ui.R
shinyUI(fluidPage(
titlePanel("Data Products Assignment"),
sidebarLayout(
sidebarPanel(
"FILTER OPTIONS",
br(),
br(),
sliderInput("cyl.range",
label="Number of cylinders:",
min=4, max=8, value=c(4,8)),
sliderInput("disp.range",
label="Displacement (cu.in.):",
min=71, max=472, value=c(71,472)),
sliderInput("wt.range",
label="Weight (lb/1000):",
min=1, max=6, value=c(1,6), step=0.5),
sliderInput("qsec.range",
label="Quarter Mile time (seconds):",
min=14, max=23, value=c(14,23), step=1)
),
mainPanel(plotOutput("plot")
)
)
))
|
0f2eceab7df5591cf5538d6892e0f56cafac12ff
|
68b8fb9056be04cb9b5a387e8e09da45553bfe9d
|
/man/ml_bilog.Rd
|
0ddb7c3fbd4fac980ef052be4a1b9944052895b2
|
[] |
no_license
|
muguangyuze/mgpd
|
b42aa03ebe15aa5c7689362ccffffb11f87c9935
|
46fbfada83fe27e90f911f8c97ef7a1ebf4fc08f
|
refs/heads/master
| 2020-12-25T05:17:10.404600
| 2012-03-15T00:00:00
| 2012-03-15T00:00:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,445
|
rd
|
ml_bilog.Rd
|
\name{ml_bilog}
\alias{ml_bilog}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
internal
}
\description{
internal use only
}
\usage{
ml_bilog(param, dat, mlmax = 1e+15, fixed = FALSE, ...)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{param}{
%% ~~Describe \code{param} here~~
}
\item{dat}{
%% ~~Describe \code{dat} here~~
}
\item{mlmax}{
%% ~~Describe \code{mlmax} here~~
}
\item{fixed}{
%% ~~Describe \code{fixed} here~~
}
\item{\dots}{
%% ~~Describe \code{\dots} here~~
}
}
\details{
internal use only
}
\value{
internal use only
%% If it is a LIST, use
%% \item{comp1 }{Description of 'comp1'}
%% \item{comp2 }{Description of 'comp2'}
%% ...
}
\references{
internal use only
}
\author{
P. Rakonczai
}
\note{
internal use only
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
internal use only
}
\examples{
##---- Should be DIRECTLY executable !! ----
##-- ==> Define data, use random,
##-- or do help(data=index) for the standard data sets.
## The function is currently defined as
function (param, dat, mlmax = 1e+15, fixed = FALSE, ...)
{
loglik = mlmax
hxy = NA
x = dat[, 1]
y = dat[, 2]
error = FALSE
mux = param[1]
muy = param[4]
sigx = param[2]
sigy = param[5]
gamx = param[3]
gamy = param[6]
a = param[7]
b = param[8]
if (sigx < 0 | sigy < 0 | a < 0 | b < 0 | a > 1 | b > 1)
error = TRUE
if (fixed == TRUE) {
mux = 0
}
if (error)
loglik = mlmax
if (!error) {
tx = (1 + gamx * (x - mux)/sigx)^(1/gamx)
ty = (1 + gamy * (y - muy)/sigy)^(1/gamy)
tx0 = (1 + gamx * (-mux)/sigx)^(1/gamx)
ty0 = (1 + gamy * (-muy)/sigy)^(1/gamy)
dtx = (1/sigx) * pmax((1 + gamx * (x - mux)/sigx), 0)^(1/gamx -
1)
dty = (1/sigy) * pmax((1 + gamy * (y - muy)/sigy), 0)^(1/gamy -
1)
w = tx/(tx + ty)
l = length(w)
gma1 = rep(NA, l)
if (any(is.na(w)))
loglik = mlmax
else for (i in 1:l) {
eqn = function(z) (1 - a) * (1 - w[i]) * (1 - z)^b -
(1 - b) * w[i] * z^a
if (w[i] == 0)
gma1[i] = 0
else if (w[i] == 1)
gma1[i] = 1
else gma1[i] = uniroot(eqn, lower = 0, upper = 1,
tol = .Machine$double.eps^0.5)$root
}
hdens = function(w, gma = gma1) ((1 - a) * (1 - gma) *
gma^(1 - a))/((1 - w) * w^2 * ((1 - gma) * a + gma *
b))
dxdymu = function(x1, y1) -(x1 + y1)^(-3) * hdens(x1/(x1 +
y1))
c0 = log(pbvevd(c(0, 0), model = "bilog", mar1 = c(mux,
sigx, gamx), mar2 = c(muy, sigy, gamy), alpha = a,
beta = b))
hxy = 1/c0 * dxdymu(tx, ty) * dtx * dty
hxy = as.numeric(hxy * (1 - ((x < 0) * (y < 0))))
loglik = -sum(log(hxy))
}
if (min(1 + gamx * (x - mux)/sigx) < 0)
loglik = mlmax
if (min(1 + gamy * (y - muy)/sigy) < 0)
loglik = mlmax
loglik
}
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ internal }
%% \keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
|
2f98a5038b758a2ac2acf49a8a3b5384b1d4f01b
|
e59bfb6100c4ef2de8ba99b865f510db5a47cb52
|
/scripts/plotting/004_plot_deltas.R
|
efecd04e3033d432310b0f328aac343b4bbd946a
|
[] |
no_license
|
jmmarzolino/CCII_BOZ
|
0330a1a8abe2249e4412ad73d7309d451c6d8f0a
|
bca16e6fde90c989a7c1b90c9a65cf000d1677c2
|
refs/heads/master
| 2021-12-11T18:00:35.595392
| 2021-11-30T23:01:15
| 2021-11-30T23:01:15
| 214,494,990
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,850
|
r
|
004_plot_deltas.R
|
#!/usr/bin/env Rscript
#SBATCH --ntasks=1
#SBATCH --mem=60G
#SBATCH --time=02:00:00
#SBATCH --job-name='delta_AF'
#SBATCH --output=/rhome/jmarz001/bigdata/CCII_BOZ/scripts/delta_AF.stdout
#SBATCH -p koeniglab
#Set up environment
setwd("/bigdata/koeniglab/jmarz001/CCII_BOZ/results")
library(readr)
options(stringsAsFactors = F)
##############################################################################
#####################Read In Dataset##########################################
##############################################################################
#Load in files with change in allele frequencies calculated
delta_AF_F1toALL <- read_delim("delta_AF_F1toALL","\t", col_names = T, trim_ws = TRUE)
delta_AF_DAVIS <- read_delim("delta_AF_DAVIS","\t", col_names = T, trim_ws = TRUE)
delta_AF_BOZ <- read_delim("delta_AF_BOZ","\t", col_names = T, trim_ws = TRUE)
###########################################################################
#Average change in allele frequncy between Davis F18 and Bozeman F27
###########################################################################
#Some plotting to get an idea what the data looks like
##Parents
hist(rowSums(abs(delta_AF_F1toALL[,3])),ylim=c(0,2000000))
hist(rowSums(abs(delta_AF_F1toALL[,4])),ylim=c(0,2000000))
hist(rowSums(abs(delta_AF_F1toALL[,5])),ylim=c(0,2000000))
hist(rowSums(abs(delta_AF_F1toALL[,6])),ylim=c(0,2000000))
hist(rowSums(abs(delta_AF_F1toALL[,7])),ylim=c(0,2000000))
##Davis
hist(rowSums(abs(delta_AF_DAVIS[,3])))
hist(rowSums(abs(delta_AF_DAVIS[,4])))
hist(rowSums(abs(delta_AF_DAVIS[,5])))
##Bozeman
hist(rowSums(abs(delta_AF_BOZ[,3])))
hist(rowSums(abs(delta_AF_BOZ[,4])))
hist(rowSums(abs(delta_AF_BOZ[,5])))
##Mean & median allele frequency change per pop
BOZ_matrix<-data.matrix(delta_AF_BOZ[,3:ncol(delta_AF_BOZ)])
for(i in 1:ncol(BOZ_matrix)){
print(paste(colnames(BOZ_matrix)[i],"mean is",mean(BOZ_matrix[,i],na.rm=T)))
print(paste(colnames(BOZ_matrix)[i],"median is",quantile(BOZ_matrix[,i],na.rm=T)[3]))
print(paste(colnames(BOZ_matrix)[i],"max is",quantile(BOZ_matrix[,i],na.rm=T)[5]))
print(paste(colnames(BOZ_matrix)[i],"min is",quantile(BOZ_matrix[,i],na.rm=T)[1]))
}
##DAVIS
DAV_matrix<-data.matrix(delta_AF_DAVIS[,3:ncol(delta_AF_DAVIS)])
for(i in 1:ncol(DAV_matrix)){
print(paste(colnames(DAV_matrix)[i],"mean is",mean(DAV_matrix[,i],na.rm=T)))
print(paste(colnames(DAV_matrix)[i],"median is",quantile(DAV_matrix[,i],na.rm=T)[3]))
print(paste(colnames(DAV_matrix)[i],"max is",quantile(DAV_matrix[,i],na.rm=T)[5]))
print(paste(colnames(DAV_matrix)[i],"min is",quantile(DAV_matrix[,i],na.rm=T)[1]))
}
##PARENTS
F1_matrix<-data.matrix(delta_AF_F1toALL[,3:ncol(delta_AF_F1toALL)])
for(i in 1:ncol(F1_matrix)){
print(paste(colnames(F1_matrix)[i],"mean is",mean(F1_matrix[,i],na.rm=T)))
print(paste(colnames(F1_matrix)[i],"median is",quantile(F1_matrix[,i],na.rm=T)[3]))
print(paste(colnames(F1_matrix)[i],"max is",quantile(F1_matrix[,i],na.rm=T)[5]))
print(paste(colnames(F1_matrix)[i],"min is",quantile(F1_matrix[,i],na.rm=T)[1]))
}
#could make the print out of that information prettyier/more useful format by putting it into columns or rows and binding those into table/matrix/frame
###########################################################################
#Plot positive and negative delta AF
###########################################################################
#across the genome
#bind all the data columns together
df <- cbind.data.frame(delta_AF_F1toALL,delta_AF_DAVIS[,3:5],delta_AF_BOZ[,3:5])
#write_delim(df,"delta_AF_all",delim="\t",col_names=T)
# name relevant columns
names(df)[1]<-"CHR"
names(df)[2]<-"POS"
# format for plotting
df$BP<-as.numeric(df$POS)
result <- df %>%
# Compute chromosome size
group_by(CHR) %>%
summarise(chr_len=max(BP)) %>%
# Calculate cumulative position of each chromosome
mutate(tot=cumsum(as.numeric(chr_len))-as.numeric(chr_len)) %>%
select(-chr_len) %>%
# Add this info to the initial dataset
left_join(df, ., by=c("CHR"="CHR")) %>%
# Add a cumulative position of each SNP
arrange(CHR, BP) %>%
mutate(BPcum=BP+tot)
#head(result)
axisresult = result %>% group_by(CHR) %>% summarize(center=( max(BPcum) + min(BPcum) ) / 2 )
for (x in 3:13){
OutName <- paste0(colnames(result)[x],"_deltaAF")
xlab <- colnames(result)[x]
names(result)[x]<-"Y"
g <- ggplot(result, aes(x=BPcum, y=Y)) +
# Show all points
geom_point(aes(color=as.factor(CHR)), alpha=0.5, size=1.3) +
scale_color_manual(values = rep(c("black", "grey"), 22 )) +
# custom X axis:
scale_x_continuous(label = axisresult$CHR, breaks= axisresult$center) +
scale_y_continuous(expand = c(0, 0.5)) + # remove space between plot area and x axis
# Customize the theme:
theme_classic() +
theme(legend.position="none",
panel.border = element_blank(),
panel.grid.major.x = element_blank(),
panel.grid.minor.x = element_blank(),
text=element_text(size=16)) +
xlab(xlab) +
ylab("Allele Frequency")+
ylim(0,1)
OutName2<-paste0(OutName, "_manhattan.jpeg")
ggsave(OutName2, g, width=8, height=5, units="in")
trash <- paste0("X",x)
names(result)[x]<- trash
}
###########################################################################
#Plot magnitude of delta AF (absolute value) across the genome
###########################################################################
# edit data frame to be absolute values
df <- read_delim("delta_AF_all",delim="\t",col_names=T)
df[,3:ncol(df)] <- abs(df[,3:ncol(df)])
df$BP<-as.numeric(df$POS)
result <- df %>%
# Compute chromosome size
group_by(CHR) %>%
summarise(chr_len=max(BP)) %>%
# Calculate cumulative position of each chromosome
mutate(tot=cumsum(as.numeric(chr_len))-as.numeric(chr_len)) %>%
select(-chr_len) %>%
# Add this info to the initial dataset
left_join(df, ., by=c("CHR"="CHR")) %>%
# Add a cumulative position of each SNP
arrange(CHR, BP) %>%
mutate(BPcum=BP+tot)
#head(result)
axisresult = result %>% group_by(CHR) %>% summarize(center=( max(BPcum) + min(BPcum) ) / 2 )
for (x in 3:13){
OutName <- paste0(colnames(result)[x],"_deltaAF_abs")
xlab <- colnames(result)[x]
names(result)[x]<-"Y"
g <- ggplot(result, aes(x=BPcum, y=Y)) +
# Show all points
geom_point(aes(color=as.factor(CHR)), alpha=0.5, size=1.3) +
scale_color_manual(values = rep(c("black", "grey"), 22 )) +
# custom X axis:
scale_x_continuous(label = axisresult$CHR, breaks= axisresult$center) +
scale_y_continuous(expand = c(0, 0.5)) + # remove space between plot area and x axis
# Customize the theme:
theme_classic() +
theme(legend.position="none",
panel.border = element_blank(),
panel.grid.major.x = element_blank(),
panel.grid.minor.x = element_blank(),
text=element_text(size=16)) +
xlab(xlab) +
ylab("Change in Allele Frequency")+
ylim(0,1)
OutName2<-paste0(OutName, "_manhattan.jpeg")
ggsave(OutName2, g, width=10, height=6, units="in")
trash <- paste0("X",x)
names(result)[x]<- trash
}
###########################################################################
#Plot actual AF changes as distributions
###########################################################################
x=5
gen <- colnames(df)[x]
xlab <- paste("Allele Frequency Change",gen)
OutName <- paste("delta_AF",gen,sep="_")
g <- ggplot(df, aes(get(gen))) + geom_histogram(bins=40)+theme_minimal() + xlab(xlab)+ylab("Frequency")
OutName2 <- paste0(OutName, "_distribution.jpeg")
ggsave(g,OutName2, width=10, height=6, units="in")
###########################################################################
#Plot absolute AF changes as distributions
###########################################################################
|
d80a57a05589fa63ff29169cb3b5cdeee347b47a
|
156e540389dca2dae31639da3d0e766273726fe6
|
/R/read_model.r
|
e2815ba3bb1929ef749d80853695c90de99c9302
|
[] |
no_license
|
zhanglj37/blcfa
|
3f653105b129e7c1fb158349d40569c05a35608e
|
800fa582cd8646e8d213db1f2dc30c9cccd298f5
|
refs/heads/master
| 2023-04-13T03:12:59.951267
| 2023-03-22T22:03:01
| 2023-03-22T22:03:01
| 187,756,358
| 5
| 1
| null | 2019-08-08T13:22:34
| 2019-05-21T03:38:57
|
R
|
UTF-8
|
R
| false
| false
| 2,503
|
r
|
read_model.r
|
### function---read_model----
## Output factors and indicators
read_model<-function(myModel)
{
mmsplit<-strsplit(myModel,'\n')
mmsplit<-mmsplit[[1]]
mmlength<-length(mmsplit)
loc_blank = rep(0, mmlength)
## Delete blank line
for (i in 1:mmlength)
{
tempstr<-mmsplit[i]
tempsplit<-strsplit(tempstr,' ')
tempsplit<-tempsplit[[1]]
templength<-length(tempsplit)
## Delete blank line
if (templength > 0)
{
tempempty<-NULL
for (j in 1:length(tempsplit))
{
tempempty=paste0(tempempty," ")
}
}else{
tempempty = tempstr
}
if(tempstr == tempempty)
{
loc_blank[i] = 9
}
}
if (sum(loc_blank)>0)
{
mm2split<-mmsplit[-which(loc_blank==9)]
}else{
mm2split<-mmsplit
}
mm2length<-length(mm2split)
mmvar<-NULL
numw<-1 ## num of latent variables(w)
factorname<-NULL
## Process each line of characters separately
for (i in 1:mm2length)
{
tempstr<-mm2split[i]
tempsplit<-strsplit(tempstr,' ')
tempsplit<-tempsplit[[1]]
templength<-length(tempsplit)
## fault tolerance
## Remove multi-entered spaces
temp2split<-tempsplit
loc<-0
for (j in 1:templength)
{
if (tempsplit[j] == "")
{
temp2split<-temp2split[-j+loc]
loc<-loc+1
}
}
## Locate the mathematical symbols in the model to locate each variable
templength<-length(temp2split)
locj<-1
cfa_loc<-rep(0,templength)
for (j in 1:templength)
{
if (temp2split[j] == "=~" || temp2split[j] == "+")
{
cfa_loc[locj]<-j
locj=locj+1
}
}
cfa_loc<-cfa_loc[1:(locj-1)]
## If there is a =~ symbol, the number of latent variables +1.
if (cfa_loc[1] != 0)
{
numw=numw+1
cfa_loc1<-cfa_loc+1
mmvar[[numw]]<-temp2split[cfa_loc1]
}
factorname[(numw-1)]<-temp2split[1]
}
mmvar[[1]]<-factorname
return(mmvar)
}
## Calculate the location of each indicator in the corresponding dataset
cfa_loc<-function(mmvar,data)
{
mmvar_loc<-NULL
for (i in 1:length(mmvar))
{
tempsplit<-strsplit(mmvar[[i]]," ")
col_loc<-rep(0,length(mmvar[[i]]))
for (j in 1:length(mmvar[[i]]))
{
tempstr<-as.character(tempsplit[j])
for (q in 1:ncol(data))
{
## Fault tolerance: capitalization
if (tempstr == colnames(data)[q])
{
col_loc[j]<-q
}
}
}
mmvar_loc[[i]]<-col_loc
}
return(mmvar_loc)
}
|
a2d7263f6d42f28b3ea5857f3306689789230f7e
|
227c4849fe0107829ea98614d5113f58f9a082fb
|
/R/grid-silent.R
|
64103cee25b5777226a1b7a188158ec0f4cc09db
|
[] |
no_license
|
VijayEluri/memoryagents
|
b52fb75b9af9f7911b1abf2f1c257a34eef44ab5
|
0289908f17335b28e080d01ee3594f8cbbd14ea2
|
refs/heads/master
| 2020-05-20T11:03:57.570093
| 2012-02-12T21:33:28
| 2012-02-12T21:33:28
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 140
|
r
|
grid-silent.R
|
source("magents_experiments.R")
generateSingleGraph("grid_silent", "Grid agents without communication")
ggsave(file="grid_silent.eps")
|
f7fa2adc241abaa9ee001fc54bc7bb6f8862c988
|
e243b6f069b685384198fc1396c10d719b8d351a
|
/man/graph.blackwhite.max.Rd
|
09332e252df6e5efd8950708e8bb384d79239c5d
|
[] |
no_license
|
cran/spnet
|
e96438c422d28ead493862e77ac23e4649717d69
|
5aeac6549ba8bcf40a71da823a4f876496dfbae4
|
refs/heads/master
| 2021-01-21T21:53:58.142646
| 2016-02-22T14:33:39
| 2016-02-22T14:33:39
| 22,459,348
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,049
|
rd
|
graph.blackwhite.max.Rd
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/SpatialNetwork.r
\docType{methods}
\name{graph.blackwhite.max}
\alias{graph.blackwhite.max}
\alias{graph.blackwhite.max,SpatialNetwork-method}
\alias{graph.blackwhite.max<-,SpatialNetwork,numeric-method}
\title{Get the black and white mode maximal gray value of a \code{SpatialNetwork} object}
\usage{
graph.blackwhite.max(object)
\S4method{graph.blackwhite.max}{SpatialNetwork}(object)
\S4method{graph.blackwhite.max}{SpatialNetwork,numeric}(object) <- value
}
\arguments{
\item{object}{a \code{SpatialNetwork} object.}
\item{value}{a \code{logical}, the black and white mode maximal gray value.}
}
\description{
This generic method intends to extract the black and white mode maximal gray value (from 0 to 1) of a \code{SpatialNetwork} object.
}
\section{Methods (by class)}{
\itemize{
\item \code{SpatialNetwork}: method for \code{SpatialNetwork} objects.
\item \code{object = SpatialNetwork,value = numeric}: method for \code{SpatialNetwork} objects.
}}
|
a708e9f840c32bc43855b0761c2fcb87c084f85c
|
e7f8624e8a1b420755d2e55586527cddd7c8b1de
|
/R/Gibbs.sample.coeff.gp.R
|
a2156c16c8d68127e901b58d29bbba3567c25136
|
[] |
no_license
|
cran/spectralGP
|
a65ac1c8214590212ce65b0194ea24bdc6ce5212
|
281be08965e89b7bed467ad2771b9a8f735d64a9
|
refs/heads/master
| 2020-05-20T09:15:19.754934
| 2015-06-30T00:00:00
| 2015-06-30T00:00:00
| 17,700,012
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,403
|
r
|
Gibbs.sample.coeff.gp.R
|
"Gibbs.sample.coeff.gp" <-
function(object,z,sig2e,meanVal=0,sdVal=1,returnHastings=FALSE,...){
# takes a Gibbs sample, following the approach of Wikle (2002)
m1=object$gridsize[1]
m2=object$gridsize[2]
sig2e.precMatrix=matrix(2*sdVal*sdVal/sig2e,nrow=m1,ncol=m2)
sig2e.precMatrix[1,1]=(1/2)*sig2e.precMatrix[1,1]
sig2e.precMatrix[(m1/2+1),1]=(1/2)*sig2e.precMatrix[(m1/2+1),1]
if(object$d==2){
sig2e.precMatrix[(m1/2+1),(m2/2+1)]=(1/2)*sig2e.precMatrix[(m1/2+1),(m2/2+1)]
sig2e.precMatrix[1,(m2/2+1)]=(1/2)*sig2e.precMatrix[1,(m2/2+1)]
}
coeff.var=1/(1/object$variances+sig2e.precMatrix)
coeff.mean=coeff.var*sig2e.precMatrix*fft(matrix((z-meanVal)/sdVal,nrow=m1,ncol=m2,byrow=FALSE), inverse = FALSE)/(sqrt(m1*m2)) # division by sqrt(m1*m2) ensures proper scaling
object$coeff=matrix(rnorm(m1*m2,Re(coeff.mean),sqrt(c(coeff.var))),nrow=m1,ncol=m2)+(0+1i)*matrix(rnorm(m1*m2,Im(coeff.mean),sqrt(c(coeff.var))),nrow=m1,ncol=m2)
if(object$const.fixed){
object$coeff[1,1]=0
} else{
object$coeff[1,1]=Re(object$coeff[1,1])
}
object$coeff[(m1/2+1),1]=Re(object$coeff[(m1/2+1),1])
object$coeff[m1:(m1/2+2),1]=Conj(object$coeff[2:(m1/2),1])
if(object$d==2){
object$coeff[1,(m2/2+1)]=Re(object$coeff[1,(m2/2+1)])
object$coeff[(m1/2+1),(m2/2+1)]=Re(object$coeff[(m1/2+1),(m2/2+1)])
object$coeff[1,m2:(m2/2+2)]=Conj(object$coeff[1,2:(m2/2)])
object$coeff[m1:(m1/2+2),m2:(m2/2+1)]=Conj(object$coeff[2:(m1/2),2:(m2/2+1)])
object$coeff[(m1/2+1):2,m2:(m2/2+2)]=Conj(object$coeff[(m1/2+1):m1,2:(m2/2)])
}
updateprocess(object)
if(!returnHastings){
return(NULL)
} else{
# this block of code determines which coefficients are actually proposed and not just determined as complex conjugates
screenr=matrix(1,nrow=m1,ncol=m2)
screenr[m1:(m1/2+2),1]=0
if(object$d==2){
screenr[(m1/2+2):m1,(m2/2+1):m2]=0
screenr[1:(m1/2+1),(m2/2+2):m2]=0
}
if(object$const.fixed){
screenr[1,1]=0
}
screeni=screenr
screeni[1,1]=0
screeni[(m1/2+1),1]=0
if(object$d==2){
screeni[1,(m2/2+1)]=0
screeni[(m1/2+1),(m2/2+1)]=0
}
tmpr=c(dnorm(Re(object$coeff),Re(coeff.mean),sqrt(coeff.var),log=T))
tmpi=c(dnorm(Im(object$coeff),Im(coeff.mean),sqrt(coeff.var),log=T))
return(sum(tmpr[c(screenr==1)])+sum(tmpi[c(screeni==1)])) # calculate logdensity of proposal
}
}
|
d5b55956873557739ae9649c729dd11b4417c772
|
50ab7b34847636f4efa7bc5b7e7ee0c4d01d68f5
|
/ABtesteval/simulation.R
|
92129d802bb8abccdaa99eb7f5175a276e08578f
|
[] |
no_license
|
lubospernis/exponea-assignment
|
10d529348803d2ecc429f89fdf0239359c87c652
|
4305713fa49238d849f75fab071f315335e6dac2
|
refs/heads/master
| 2021-09-06T23:53:42.541139
| 2018-02-13T17:42:13
| 2018-02-13T17:42:13
| 121,394,834
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,340
|
r
|
simulation.R
|
library(dplyr)
library(tidyr)
library(magrittr)
library(ggplot2)
customers <- data.frame(banner= rep(0, 834), reven = rep(0, 834))
customers$reven[1:22] <- rnorm(mean= 189.07, sd=30, n = 22) #create data for the control group
customersb <- data.frame(banner= rep(1, 1020), reven = rep(0,1020))
customersb$reven[1:30] <- rnorm(mean=191.88, sd=30, n=30) #create data for variant A
customers <- rbind(customers, customersb) #bind data together
model1 <- lm(data=customers, reven ~ banner) #run OLS model
summary(model1) #the coefficient for banner is insignificant
##visualisation of the coefficient
plot.df <- data.frame(y= model1$coefficients["banner"], x= 0, lb= confint(model1, "banner",level=0.95)[1], ub= confint(model1, "banner",level=0.95)[2])
plot.df %>% ggplot(aes(x, y)) + geom_point() + geom_errorbar(aes(ymin= lb, ymax=ub, width=0.1)) + geom_hline(aes(yintercept = 0), linetype="dashed") + scale_x_continuous(limits = c(-1, 1)) + labs(y= "Coef value", title= "OLS coef for banner with 95% CI", x="")+ theme(axis.ticks.y= element_blank(), axis.text.y = element_blank())+coord_flip()
##distoring the data by adding 6 purchases of 250€ for the customers who were not shown the banner
customers_new_purchases <- rbind(customers, rep(c(0, 250), 6))
model2 <- lm(data=customers_new_purchases, reven ~ banner)
summary(model2)
|
8ae7038f57b11025788b244eed74e2b304d4319a
|
48e949a36254d8544c2a066f492b98f575aa1655
|
/man/redownload_blm.Rd
|
669a167384edb44633adac10c975e72d4a900aa5
|
[] |
no_license
|
Vittoriabrown/democracyData
|
d51d3e77c53fd44b81ca28e1e2c87dd0f6e033aa
|
348df1c7ef5dde48aa365dfa2862d1d475f38965
|
refs/heads/master
| 2023-05-04T00:21:52.220382
| 2021-05-30T20:56:53
| 2021-05-30T20:56:53
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 11,760
|
rd
|
redownload_blm.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/redownload_functions.R
\name{redownload_blm}
\alias{redownload_blm}
\alias{redownload_anckar}
\alias{redownload_bmr}
\alias{redownload_bnr}
\alias{redownload_gwf}
\alias{redownload_pacl}
\alias{redownload_peps}
\alias{redownload_utip}
\alias{redownload_wahman_teorell_hadenius}
\alias{redownload_polyarchy_original}
\alias{redownload_polyarchy_dimensions}
\alias{redownload_magaloni}
\alias{redownload_svmdi}
\alias{redownload_ulfelder}
\alias{redownload_pipe}
\alias{redownload_bti}
\alias{redownload_pacl_update}
\title{Download and process various democracy datasets.}
\source{
Bowman, Kirk, Fabrice Lehoucq, and James Mahoney. 2005. Measuring
Political Democracy: Case Expertise, Data Adequacy, and Central America.
Comparative Political Studies 38 (8): 939-970.
\url{http://cps.sagepub.com/content/38/8/939}. Data available at
\url{http://www.blmdemocracy.gatech.edu/}.
Anckar, Carsten and Cecilia Fredriksson (2018). "Classifying
political regimes 1800-2016: a typology and a new dataset." European
Political Science, doi: 10.1057/s41304-018-0149-8. Data, article, and
codebook available at: \doi{10.1057/s41304-018-0149-8}
Boix, Carles, Michael Miller, and Sebastian Rosato. 2012. A Complete
Data Set of Political Regimes, 1800-2007. Comparative Political Studies 46
(12): 1523-1554. Available at \url{https://dataverse.harvard.edu/dataset.xhtml?persistentId=doi:10.7910/DVN/FJLMKT}
Michael Bernhard, Timothy Nordstrom, and Christopher Reenock,
"Economic Performance, Institutional Intermediation and Democratic
Breakdown," Journal of Politics 63:3 (2001), pp. 775-803. Data and coding
description available at
\url{http://users.clas.ufl.edu/bernhard/content/data/data.htm}
Barbara Geddes, Joseph Wright, and Erica Frantz.
2014. "Autocratic Breakdown and Regime Transitions: A New Data Set."
Perspectives on Politics 12(2): 313-331. The full data and codebook can be
downloaded here \url{http://sites.psu.edu/dictators/}.
Cheibub, Jose Antonio, Jennifer Gandhi, and James
Raymond Vreeland. 2010. "Democracy and Dictatorship Revisited." Public
Choice, vol. 143, no. 2-1, pp. 67-101. DOI: 10.1007/s11127-009-9491-2. The
full data and codebook can be downloaded here
\url{https://sites.google.com/site/joseantoniocheibub/datasets/democracy-and-dictatorship-revisited}
Bruce E. Moon, Jennifer Harvey Birdsall, Sylvia Ceisluk, Lauren M. Garlett,
Joshua J. Hermias, Elizabeth Mendenhall, Patrick D. Schmid, and Wai Hong Wong
(2006) "Voting Counts: Participation in the Measurement of Democracy" Studies
in Comparative International Development 42, 2 (Summer, 2006). The complete
dataset is available here:
\url{http://www.lehigh.edu/~bm05/democracy/Obtain_data.htm}.
The University of Texas Inequality Project Categorical Dataset of Political
Regimes. Described in Sara Hsu, "The Effect of Political Regimes on
Inequality, 1963-2002," UTIP Working Paper No. 53 (2008),
\url{http://utip.gov.utexas.edu/papers/utip_53.pdf}. Data available for
download at
\url{http://utip.gov.utexas.edu/data/}
Wahman, Michael, Jan Teorell, and Axel Hadenius. 2013. Authoritarian
regime types revisited: updated data in comparative perspective.
Contemporary Politics 19 (1): 19-34. The dataset and codebook can be
downloaded from
\url{https://sites.google.com/site/authoritarianregimedataset/data}
Michael Coppedge and Wolfgang Reinicke, "Measuring Polyarchy," Studies in
Comparative International Development 25:1 (Spring 1990): 51-72. Data
available at \url{http://www3.nd.edu/~mcoppedg/crd/datacrd.htm}
Michael Coppedge, Angel Alvarez, and Claudia
Maldonado, "Two Persistent Dimensions of Democracy: Contestation and
Inclusiveness," Journal of Politics 70:3 (July 2008): 632-647.
Magaloni, Beatriz, Jonathan Chu, and Eric Min.
2013. Autocracies of the World, 1950-2012 (Version 1.0). Dataset, Stanford
University. Original data and codebook available at
\url{http://cddrl.fsi.stanford.edu/research/autocracies_of_the_world_dataset/}.
Grundler, Klaus, and Tommy Krieger. 2018. "Machine Learning
Indicators, Political Institutions, and Economic Development." CESifo
Working Paper. Original data available at
\url{https://www.dropbox.com/s/a7yqs5txt3qpwn0/Index\%20Upload.xlsx?dl=0}. Working paper available at
\url{https://www.cesifo-group.de/DocDL/cesifo1_wp6930.pdf}
Jay Ulfelder. 2012. Democracy/Autocracy Data Set. \url{http://hdl.handle.net/1902.1/18836}.
Adam Przeworski. 2013 \emph{Political Institutions and Political Events
(PIPE) Data Set}. Data set.
\url{https://sites.google.com/a/nyu.edu/adam-przeworski/home/data}.
Transformation Index of the Bertelsmann Stiftung 2020. Bertelsmann
Stiftung. Available at
\url{https://www.bti-project.org/en/index/political-transformation.html}
Bjornskov, C. and M. Rode (2020). "Regime types and regime change: A
new dataset on democracy, coups, and political institutions." The Review of
International Organizations 15(2): 531-551. Available at
\url{http://www.christianbjoernskov.com/bjoernskovrodedata/}
}
\usage{
redownload_blm(url, verbose = TRUE, return_raw = FALSE, ...)
redownload_anckar(url, verbose = TRUE, return_raw = FALSE, ...)
redownload_bmr(url, verbose = TRUE, return_raw = FALSE, ...)
redownload_bnr(url, verbose = TRUE, extend = FALSE, return_raw = FALSE, ...)
redownload_gwf(
url,
verbose = TRUE,
extend = FALSE,
dataset = c("all", "autocratic only"),
return_raw = FALSE,
...
)
redownload_pacl(url, verbose = TRUE, return_raw = FALSE, ...)
redownload_peps(url, verbose = TRUE, return_raw = FALSE, ...)
redownload_utip(url, verbose = TRUE, return_raw = FALSE, ...)
redownload_wahman_teorell_hadenius(
url,
verbose = TRUE,
return_raw = FALSE,
...
)
redownload_polyarchy_original(url, verbose = TRUE, return_raw = FALSE, ...)
redownload_polyarchy_dimensions(url, verbose = TRUE, return_raw = FALSE, ...)
redownload_magaloni(
url,
verbose = TRUE,
extend = FALSE,
return_raw = FALSE,
...
)
redownload_svmdi(
url,
release_year = 2020,
verbose = TRUE,
return_raw = FALSE,
...
)
redownload_ulfelder(
url,
verbose = TRUE,
return_raw = FALSE,
extend = FALSE,
...
)
redownload_pipe(url, verbose = TRUE, return_raw = FALSE, ...)
redownload_bti(url, verbose = TRUE, return_raw = FALSE, ...)
redownload_pacl_update(url, verbose = TRUE, return_raw = FALSE, ...)
}
\arguments{
\item{url}{The URL of the dataset. This defaults to:
\itemize{
\item For \link{anckar}:
\url{https://static-content.springer.com/esm/art\%3A10.1057\%2Fs41304-018-0149-8/MediaObjects/41304_2018_149_MOESM2_ESM.xlsx}
\item For \link{blm}:
\url{http://www.blmdemocracy.gatech.edu/blm\%20final\%20data.xls}
\item For \link{bmr}:
\url{https://dataverse.harvard.edu/api/access/datafile/3130643}
\item For \link{bnr}:
\url{http://users.clas.ufl.edu/bernhard/content/data/meister1305.dta}
\item For \link{bti}:
\url{https://www.bti-project.org/content/en/downloads/data/BTI\%202006-2020\%20Scores.xlsx}
\item For \link{gwf_all} and \link{gwf_autocratic}:
\url{http://sites.psu.edu/dictators/wp-content/uploads/sites/12570/2016/05/GWF-Autocratic-Regimes-1.2.zip}
\item For \link{LIED}:
\url{https://dataverse.harvard.edu/api/access/datafile/:persistentId?persistentId=doi:10.7910/DVN/29106/SXRLK1}
\item For \link{pacl}:
\url{https://uofi.box.com/shared/static/bba3968d7c3397c024ec.dta}
\item For \link{pacl_update}
\url{http://www.christianbjoernskov.com/wp-content/uploads/2020/09/Bj\%C3\%B8rnskov-Rode-integrated-dataset-v3.2.xlsx}
\item For \link{peps}:
\url{http://www.lehigh.edu/~bm05/democracy/PEPS1pub.dta}
\item For \link{svmdi}:
\url{https://www.dropbox.com/s/a7yqs5txt3qpwn0/Index\%20Upload.xlsx?dl=1}.
For the 2016 release, it defaults to
\url{http://www.wiwi.uni-wuerzburg.de/fileadmin/12010400/Data.dta}
\item For \link{utip}:
\url{http://utip.lbj.utexas.edu/data/political\%20regime\%20data\%20set\%20RV.xls}
\item For \link{wahman_teorell_hadenius}:
\url{https://sites.google.com/site/authoritarianregimedataset/data/ARD_V6.dta?attredirects=0&d=1}
\item For \link{polyarchy}:
\url{https://www3.nd.edu/~mcoppedg/crd/poly8500.sav}
\item For \link{polyarchy_dimensions}:
\url{http://www3.nd.edu/~mcoppedg/crd/DahlDims.sav}
\item For \link{magaloni}:
\url{https://fsi-live.s3.us-west-1.amazonaws.com/s3fs-public/res/Data_Set.xls}
\item For \link{ulfelder}:
\url{https://dataverse.harvard.edu/api/access/datafile/2420018}
\item For \link{PIPE}:
\url{https://sites.google.com/a/nyu.edu/adam-przeworski/home/data}
}}
\item{verbose}{Whether to print a running commentary of what the function is
doing while processing the data.}
\item{return_raw}{Whether to return the raw data, without any processing.
Default is \code{FALSE}.}
\item{...}{Other parameters passed to \link{country_year_coder}.}
\item{extend}{(Only for \link{redownload_bnr}, \link{redownload_gwf},
\link{redownload_magaloni}, and \link{redownload_ulfelder}). Whether to
extend the dataset back in time using a full panel of independent countries
(for the \link{redownload_bnr} case) or the appropriate duration variable
(\code{gwf_duration}, \code{duration_nr}, or \code{rgjdura} and
\code{rgjdurd}, respectively, for \link{redownload_gwf},
\link{redownload_magaloni}, and \link{redownload_ulfelder}). For example,
the United States enters the GWF dataset in 1946, where \code{gwf_duration}
is already 75; one can extend the dataset to indicate that the country was
classified as a democracy from 1872. Default is \code{FALSE}.}
\item{dataset}{(Only for \link{redownload_gwf}). The dataset to output. Geddes, Wright, and Frantz provide
two country-year files, one with autocratic regimes only ("autocratic only"), and one with both
democratic and non-democratic regimes ("all"). Default is "all".}
\item{release_year}{(Only in \link{redownload_svmdi}). The year of the release
to be downloaded. For \link{svmdi}, it can be 2016 or 2020.}
}
\value{
A \link{tibble} with the processed dataset, unless \code{return_raw}
is \code{TRUE}, in which case the function returns the raw data without
processing.
}
\description{
Download and process various democracy datasets. Note that the datasets
returned by the \code{redownload_*} family of functions (\link{blm},
\link{bmr}, \link{bnr}, \link{bti}, \link{gwf_autocratic},
\link{gwf_autocratic_extended}, \link{gwf_all}, \link{gwf_all_extended},
\link{LIED}, \link{magaloni}, \link{pacl}, \link{pacl_update}, \link{PIPE}, \link{peps},
\link{polityIV}, \link{polyarchy}, \link{polyarchy_dimensions}, \link{uds_2014},
\link{uds_2010}, \link{uds_2011}, \link{ulfelder}, \link{utip},
\link{wahman_teorell_hadenius}, \link{anckar}, \link{svmdi}) are all available
directly from this package and are unlikely to have changed since the package
was installed. Access the respective dataset by typing its name, and refer to
their documentation for details. You will not normally need to redownload
them, unless you want to process the raw data yourself (set \code{return_raw =
TRUE}) or suspect they have changed since the package was installed.
}
\examples{
\dontrun{
blm <- redownload_blm()
blm
}
\dontrun{
redownload_anckar()}
\dontrun{
bmr <- redownload_bmr()
bmr
}
\dontrun{
redownload_bnr()
redownload_bnr(full_panel = FALSE)}
\dontrun{
gwf <- redownload_gwf()
gwf
}
\dontrun{
redownload_pacl()}
\dontrun{
redownload_peps()}
\dontrun{
redownload_utip()}
\dontrun{
redownload_wahman_teorell_hadenius()}
\dontrun{
redownload_polyarchy_original()}
\dontrun{
redownload_polyarchy_dimensions()}
\dontrun{
redownload_magaloni()}
\dontrun{
redownload_svmdi(release_year = 2016)
redownload_svmdi() # For release year 2020}
\dontrun{
redownload_ulfelder()}
\dontrun{
redownload_pipe()}
\dontrun{
bti <- redownload_bti()
bti
}
\dontrun{
redownload_pacl_update()}
}
|
371f55129e9d9e4c81523bb90c90ef6a8d63649c
|
8ff0e39c82a85c8addccfa99e15c5fb708a65d2c
|
/run_analysis.R
|
e7526b9dece6dd68b92daa5ae359f22a538b274d
|
[] |
no_license
|
evayan16915/Getting-and-Cleaning-Data-Run-Analysis
|
0dc50d361a7cc9821c7d91c8817955ca0de103fe
|
a8a0a1eec0d7916e1ea76a5ad13710bff5a041f2
|
refs/heads/master
| 2020-04-06T07:08:39.717215
| 2016-08-24T04:38:09
| 2016-08-24T04:38:09
| 62,289,597
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,687
|
r
|
run_analysis.R
|
library(dplyr)
## fetch data
if(!file.exists("./project")){
dir.create("./project")
}
url <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
download.file(url, destfile = "./project/activity.csv.zip")
list.files("./project")
## unzip then read in data and info files
RawdData <- unzip("./project/activity.csv.zip")
trainingSet <- read.table("./project./UCI HAR Dataset/train/X_train.txt")
testSet <- read.table( "./project./UCI HAR Dataset/test/X_test.txt")
activity_label <- read.table("./project./UCI HAR Dataset/activity_labels.txt")
features <- read.table("./project./UCI HAR Dataset/features.txt")
features_info <- readLines("./project./UCI HAR Dataset/features_info.txt")
readme <- readLines("./project./UCI HAR Dataset/README.txt")
## check NA value
sum(is.na(trainingSet))
sum(is.na(testSet))
## merge training and test sets
df <- rbind(trainingSet, testSet)
## add features and make names to valid name without duplication
ta = t(features$V2)
valid_column_names <- make.names(names=ta, unique=TRUE, allow_ = TRUE)
names(df) <- valid_column_names
## Extracts only the measurements on the mean and standard deviation
pos_mean_std <- grep("mean|std", names(df))
mean_std <- select(df, pos_mean_std)
## add one column "activityinfo" to df and name the activity use y_train and y_test info
ytrain <- read.csv("./project./UCI HAR Dataset/train/y_train.txt")
ytest <- read.csv("./project./UCI HAR Dataset/test/y_test.txt")
### copy last row to add one more observation
sum(is.na(ytrain))
sum(is.na(ytest))
temtrain <- ytrain[7351,1]
temtest <- ytest[2946,1]
ytrain <- rbind(ytrain, temtrain)
ytest <- rbind(ytest,temtest)
activityinfo <- rbind(ytrain, ytest)
names(activityinfo) <- c("activityinfo")
mean_std_activityinfo <- cbind(mean_std, activityinfo)
## add one column "subject" to mean_std
subject_train <- read.csv("./project./UCI HAR Dataset/train/subject_train.txt")
subject_test <- read.csv("./project./UCI HAR Dataset/test/subject_test.txt" )
### copy last row to add one more observation
a <- subject_train[7352,1]
b <- subject_test[2946,1]
subject_train <- rbind(subject_train,a)
subject_test <- rbind(subject_test,b)
names(subject_test) <- c("X1")
subject <- rbind(subject_test, subject_train)
names(subject) <- c("subject")
## final tidy data set
tidy_df <- cbind(subject, activityinfo, mean_std)
## creat a independent dataset
df2 <- tidy_df %>% group_by(subject, activityinfo) %>% summarise_each(funs(mean))
## data output
write.csv(tidy_df, "./tidydata.csv")
write.csv(df2, "./average.csv")
## end of script
|
e4abda5818056c2a134e21208c8b608b6490c068
|
22ce2ea2e94599f026faf51ee6625a0fb11d7758
|
/R/plotters.R
|
cbee4eeb67fc149b16966fe23d886b52fcc41ab4
|
[
"Artistic-2.0"
] |
permissive
|
hillarykoch/locdiffr
|
41eac9468c4b0db08651ea5355587d8aaa6b6902
|
2d30c09c7a1e4d14f91bdc35d17b805bcb0d0aa5
|
refs/heads/master
| 2023-04-09T21:20:19.503378
| 2021-04-19T13:53:22
| 2021-04-19T13:53:22
| 179,743,564
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 12,223
|
r
|
plotters.R
|
plot_rejections_along_process <-
function(scc_scan_file,
mcmc_fit_file,
sampled_nngps_file,
rejection_files,
rejection_names = NULL) {
z <- readRDS(scc_scan_file)
preds <- readRDS(sampled_nngps_file)$predictions
fits <- readRDS(mcmc_fit_file)
if(!is.null(rejection_names)) {
rej <- purrr::map(rejection_files, readRDS) %>%
setNames(rejection_names)
} else {
rej <- purrr::map(rejection_files, readRDS) %>%
setNames(rejection_files)
}
winsizes <- names(z)
p <- list()
for(i in seq_along(z)) {
winsize <- winsizes[i]
#-----------------------------------------------------------------------
# Prep data about observed scc scan statistics, estimated mean
# function, and average sampled nngps
#-----------------------------------------------------------------------
z_df <- purrr::imap(z[[i]], ~ mutate(.x, "process" = .y)) %>%
bind_rows %>%
mutate(scc = NULL) %>%
setNames(c("crd", "val", "process")) %>%
dplyr::filter(crd %in% rej[[1]][[i]]$crd)
pred_df <-
tidyr::tibble("crd" = z[[i]]$z1$crd,
"val" = rowMeans(preds[[i]]),
"process" = "z_star") %>%
dplyr::filter(crd %in% rej[[1]][[i]]$crd)
mean_process_df <- tidyr::tibble(
"crd" = z[[i]]$z1$crd,
"val" = as.vector(fits[[i]]$X %*% colMeans(fits[[i]]$chain$beta[preds$stationary_iterations,])),
"process" = "mean_function"
) %>%
dplyr::filter(crd %in% rej[[1]][[i]]$crd)
line_df <- as_tibble(bind_rows(z_df, pred_df, mean_process_df)) %>%
mutate(process = as.factor(process))
#-----------------------------------------------------------------------
# Prep data about rejection locations
#-----------------------------------------------------------------------
rej_df <- map(rej, i) %>%
map2(.y = names(rej), ~ mutate(.x, "criterion" = .y)) %>%
bind_rows() %>%
dplyr::mutate(val = rep(pred_df$val, length(rej))) %>%
dplyr::filter(reject) %>%
mutate(reject = NULL)
#-----------------------------------------------------------------------
# Plot rejections along the process
#-----------------------------------------------------------------------
p[[i]] <- ggplot(line_df, aes(x = crd, y = val)) +
geom_line(aes(color = process)) +
geom_point(data = rej_df, aes(x = crd, y = val, shape = criterion), fill = "goldenrod3", size = 3) +
scale_shape_manual(values = c(22, 2, 24, 8, 23, 21, 4, 3)) +
ggtitle(paste0("Window size = ", winsize)) +
labs(y = "z", x = "loc") +
theme_minimal()
}
return(p)
}
plot_cond_vs_cond <-
function(infiles1,
infiles2,
resolution,
offset = TRUE,
condition_names = NULL,
sub_range = NULL) {
# THIS FUNCTION DOWNSAMPLES THE DATA TO EQUAL READS FOR MORE ACCURATE COMPARISON
# infiles1, infiles2: vector/list of paths to input files for both conditions
# resolution: resolution of the data in base pairs, e.g. 50000
# condition names: names for axes of the heatmap
# offset: are the data 0-indexed?
# sub_range: sub index to be plotted
if(is.null(condition_names)) {
condition_names <- c("condition 1", "condition 2")
}
d <- c(
purrr::map(infiles1,
~ readr::read_tsv(.x, col_names = FALSE)) %>%
setNames(paste0("cond1_rep", seq_along(infiles1))),
purrr::map(infiles2,
~ readr::read_tsv(.x, col_names = FALSE)) %>%
setNames(paste0("cond2_rep", seq_along(infiles2)))
)
cond1 <- grep("cond1", names(d))
cond2 <- grep("cond2", names(d))
# Ensure consistent dimensions across replicates
offset <- as.numeric(offset)
maxdim <-
max(map_dbl(d, ~ max(.x$X1, .x$X2))) / resolution + offset
if(is.null(sub_range)) {
sub_range <- 1:maxdim
} else {
sub_range <- round(sub_range[1] / resolution):round(sub_range[2] / resolution)
}
# must be lower-tri
dmat <-
purrr::map(
d,
~ Matrix::sparseMatrix(
i = .x$X2 / resolution + offset,
j = .x$X1 / resolution + offset,
x = round(.x$X3),
dims = rep(maxdim, 2)
)
)
d1 <- Reduce(`+`, dmat[cond1])
d2 <- Reduce(`+`, dmat[cond2])
rm(d)
rm(dmat)
dd <- downsample_to_equal_reads(list(d1, d2))
m1 <- reshape2::melt(as.matrix(dd[[1]])) %>%
dplyr::filter(value != 0) %>%
dplyr::filter(Var1 %in% sub_range & Var2 %in% sub_range)
m2 <- reshape2::melt(as.matrix(dd[[2]])) %>%
dplyr::filter(value != 0) %>%
dplyr::filter(Var1 %in% sub_range & Var2 %in% sub_range)
rm(d1)
rm(d2)
rm(dd)
# Remove redundant diagonal information
m1 <- dplyr::filter(m1, Var1 != Var2)
# Make condition 1 lower-tri, condition 2 upper-tri
colnames(m2) <- c("Var2", "Var1", "value")
m2 <- m2[,c(2,1,3)]
df <- bind_rows(m1, m2)
p <- ggplot(data = df, aes(x = Var1, y = Var2, fill = log2(value + 1))) +
geom_tile() +
scale_fill_gradientn(colours = colorRampPalette(c("white", "red"))(10000), na.value = "white") +
# scale_fill_gradient(low = "white", high = "red", na.value = "white") +
labs(x = condition_names[1], y = condition_names[2]) +
theme_minimal()
return(p)
}
plot_rej_vs_diffs <-
function(infiles1,
infiles2,
rejections_file,
resolution,
condition_names = NULL,
offset = TRUE,
sub_range = NULL,
absolute = FALSE) {
# THIS FUNCTION DOWNSAMPLES THE DATA TO EQUAL READS FOR MORE ACCURATE COMPARISON
# infiles1, infiles2: vector/list of paths to input files for both conditions
# rejection_file: file output from test_with_FDR or test_with_FDX
# resolution: resolution of the data in base pairs, e.g. 50000
# condition names: names for axes of the heatmap
# offset: are the data 0-indexed?
# sub_range: sub index to be plotted
if (is.null(condition_names)) {
condition_names <- c("condition 1", "condition 2")
}
d <- c(
purrr::map(infiles1,
~ readr::read_tsv(.x, col_names = FALSE)) %>%
setNames(paste0("cond1_rep", seq_along(infiles1))),
purrr::map(infiles2,
~ readr::read_tsv(.x, col_names = FALSE)) %>%
setNames(paste0("cond2_rep", seq_along(infiles2)))
)
cond1 <- grep("cond1", names(d))
cond2 <- grep("cond2", names(d))
# Ensure consistent dimensions across replicates
offset <- as.numeric(offset)
maxdim <-
max(map_dbl(d, ~ max(.x$X1, .x$X2))) / resolution + offset
if (is.null(sub_range)) {
sub_range <- 1:maxdim
} else {
sub_range <-
round(sub_range[1] / resolution):round(sub_range[2] / resolution)
}
# must be lower-tri
dmat <-
purrr::map(
d,
~ Matrix::sparseMatrix(
i = .x$X2 / resolution + offset,
j = .x$X1 / resolution + offset,
x = round(.x$X3),
dims = rep(maxdim, 2)
)
)
d1 <- Reduce(`+`, dmat[cond1])
d2 <- Reduce(`+`, dmat[cond2])
rm(d)
rm(dmat)
dd <- downsample_to_equal_reads(list(d1, d2))
if(absolute) {
abs_diffs <- abs(dd[[1]] - dd[[2]])
} else {
abs_diffs <- dd[[1]] - dd[[2]]
}
rejections <- readRDS(rejections_file)
winsizes <- as.numeric(names(rejections))
if(!any(map_lgl(map(rejections, "reject"), any))) {
stop(paste0("No rejections are contained in file ", rejections_file, "."))
} else {
init_idx <- which(map_lgl(map(rejections, "reject"), any))[1]
rej_mat <- Matrix::sparseMatrix(
i = rejections[[init_idx]]$crd[which(rejections[[init_idx]]$reject)[1]],
j = rejections[[init_idx]]$crd[which(rejections[[init_idx]]$reject)[1]] + 1,
x = 1,
dims = rep(maxdim, 2)
)
for(j in seq_along(rejections)) {
if (any(rejections[[j]]$reject)) {
win_size <- winsizes[j]
rej_mat <- cpopulate_rejected_differences(
rej_mat,
rejections[[j]]$crd[rejections[[j]]$reject] - 1, win_size
)
}
}
#-------------------------------------------------------------------
# Make triangular (without diagonal) to make accurate comparisons
# Lower tri is the absolute differences between conditions,
# upper tri are the rejections by given method
rej_mat <- Matrix::triu(rej_mat, 1)
diff_molten <- reshape2::melt(abs_diffs) %>%
dplyr::filter(Var1 %in% sub_range & Var2 %in% sub_range)
rej_molten <- reshape2::melt(as.matrix(rej_mat)) %>%
dplyr::filter(Var1 %in% sub_range & Var2 %in% sub_range)
#-------------------------------------------------------------------
# plot
if(absolute) {
labels <- c(paste0("|", paste0(condition_names, collapse = "-"), "|"), "rejections")
} else {
labels <- c(paste0(condition_names, collapse = "-"), "rejections")
}
if(absolute) {
rej_p <- ggplot(data = dplyr::filter(rej_molten, Var1 < Var2), aes(
x = Var1,
y = Var2,
fill = value
)) +
geom_tile() +
scale_fill_distiller(limits = c(0,1), palette = "Greys", direction = 1) +
ggnewscale::new_scale("fill") +
geom_tile(
mapping = aes(
x = Var1,
y = Var2,
fill = log2(value + 1)
),
data = dplyr::filter(diff_molten, Var1 >= Var2)
) +
coord_fixed() +
scale_fill_gradient(low = "white", high = "red", na.value = "white") +
theme_minimal() +
labs(x = labels[1], y = labels[2])
} else {
diff_molten[diff_molten$value < 0,"value"] <- -log2(abs(diff_molten[diff_molten$value < 0,"value"]))
diff_molten[diff_molten$value > 0,"value"] <- log2(diff_molten[diff_molten$value > 0,"value"])
rej_p <- ggplot(data = dplyr::filter(rej_molten, Var1 < Var2), aes(
x = Var1,
y = Var2,
fill = value
)) +
geom_tile() +
scale_fill_distiller(limits = c(0,1), palette = "Greys", direction = 1) +
ggnewscale::new_scale("fill") +
geom_tile(
mapping = aes(
x = Var1,
y = Var2,
fill = value
),
data = dplyr::filter(diff_molten, Var1 >= Var2)
) +
coord_fixed() +
scale_fill_distiller(palette = "RdBu", na.value = "white") +
theme_minimal() +
labs(x = labels[1], y = labels[2])
}
return(rej_p)
}
}
|
bcdd883b6f207f63c263d1e223f5d7837d886bd9
|
7fab445d3cf2ce3865c28e13f35f62f490b1cecc
|
/run_analysis.R
|
ef41f10761cf498a4a31cf996a9792613631f6fa
|
[] |
no_license
|
nrodriguezh/GetCleanDataRepo
|
2e46ef6143177f13a1e15cdc03f15688330e0c29
|
b68907369e46b69f6df5997098d0ece53dc1c953
|
refs/heads/master
| 2020-12-24T14:56:36.627463
| 2014-06-22T17:09:47
| 2014-06-22T17:09:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,149
|
r
|
run_analysis.R
|
#### Run Analysis ####
#You should create one R script called run_analysis.R that does the following.
#Merges the training and the test sets to create one data set.
#Extracts only the measurements on the mean and standard deviation for each measurement.
#Uses descriptive activity names to name the activities in the data set
#Appropriately labels the data set with descriptive variable names.
#Creates a second, independent tidy data set with the average of each variable for each activity and each subject.
#Good luck!
## Load requires packages
require(reshape2)
## Set wd and set object to help in loading files
## Note this assumes the .zip has been donwloaded to wd.
mainwd <- "C:/Users/Nahir/Documents/CleaningData"
setwd(mainwd)
rawdata <- "UCI HAR Dataset"
train <- "train"
test <- "test"
url <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
## Originally donwloaded on 6/22/2014
download.file(url, file.path(mainwd,paste(rawdata,".zip",sep="")))
unzip(file.path(mainwd,paste(rawdata,".zip",sep="")))
## Load test files
filename <- file.path(mainwd,rawdata,test,"X_test.txt")
x_test <- read.table(filename)
filename <- file.path(mainwd,rawdata,test,"y_test.txt")
y_test <- read.table(filename)
filename <- file.path(mainwd,rawdata,test,"subject_test.txt")
subject_test <- read.table(filename)
## Load train files
filename <- file.path(mainwd,rawdata,train,"X_train.txt")
x_train <- read.table(filename)
filename <- file.path(mainwd,rawdata,train,"y_train.txt")
y_train <- read.table(filename)
filename <- file.path(mainwd,rawdata,train,"subject_train.txt")
subject_train <- read.table(filename)
## Load features
filename <- file.path(mainwd,rawdata,"features.txt")
features <- read.table(filename)
## activity_labels
filename <- file.path(mainwd,rawdata,"activity_labels.txt")
activity_labels <- read.table(filename)
## assign colnames, in the case of x_test and x_train also subset desired columns, those with "mean()" and "std()"
colnames(x_test) <- features[,c("V2")]
colnames(x_train) <- features[,c("V2")]
## Note that this relates to:
## Extracts only the measurements on the mean and standard deviation for each measurement.
x_test <- x_test[,(grepl("mean()", colnames(x_test),fixed = TRUE) | grepl("std()", colnames(x_test),fixed = TRUE))]
x_train <- x_train[,(grepl("mean()", colnames(x_train),fixed = TRUE) | grepl("std()", colnames(x_train),fixed = TRUE))]
colnames(y_test) <- "activity"
colnames(y_train) <- "activity"
colnames(subject_test) <- "subject"
colnames(subject_train) <- "subject"
## Merge test data
testdb <- cbind(y_test,subject_test,x_test)
## Merge train data
traindb <- cbind(y_train,subject_train,x_train)
## Merge test and train data
db <- rbind(testdb,traindb)
## assign factors to activity
db$activity <- factor(db$activity,levels = 1:6,labels = activity_labels$V2)
## Second tidy data set
dbmelted <- melt(db,id.vars = c("activity","subject"))
dbcasted <- dcast(dbmelted, activity + subject ~ variable,mean)
## Write Results
tidydataoutput <- dbcasted
write.csv(tidydataoutput, file = "tidydataoutput.txt",row.names = FALSE)
|
e64d2d04eb314caec9f11b48ab97647e77a7d97f
|
4f602cda9f8e260cb05da937a318b9b621d4dc17
|
/man/highlight.Rd
|
72d40fad92062b9540943ae252ea0eadf531e05b
|
[
"MIT"
] |
permissive
|
jeevanyue/highlighter
|
5c2dbce8dffc42de33367bd44e1f9205ba0cd01c
|
031cc731f36e1ec5f6f3fd1b3768fab967c8a736
|
refs/heads/master
| 2023-01-18T16:32:14.756035
| 2020-11-10T05:42:57
| 2020-11-10T05:42:57
| 304,838,028
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,018
|
rd
|
highlight.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/highlight.R
\name{highlight}
\alias{highlight}
\title{Highlight code}
\usage{
highlight(
code = NULL,
file = NULL,
language = "r",
style = "default",
width = "100\%",
height = "100\%",
elementId = NULL
)
}
\arguments{
\item{code}{The original raw code.}
\item{file}{The file want highlight.}
\item{language}{Language name, supports r, python and sql. The default is r.}
\item{style}{Highlight style. The default is default.}
\item{width, height}{Must be a valid CSS unit (like \code{'100\%'},
\code{'400px'}, \code{'auto'}) or a number, which will be coerced to a
string and have \code{'px'} appended. The default is \code{'100\%'}.}
\item{elementId}{Use an explicit element ID for the widget (rather than an automatically generated one). The default is NULL.}
}
\description{
Highlight code using highlight.js
}
\examples{
highlight(code = "df <- head(mtcars)")
highlight(code = "df <- head(mtcars)", style = "dark")
}
|
be1c354d7b9ed3382715c676b0729f436389edbf
|
d146e87c2ab4b39c7ef2cdd536f1fc2eb0430446
|
/Functional_Blackjack_code.R
|
5809e4f7b0ed90d8fbbdba57e2515531e886b7f2
|
[] |
no_license
|
jankroon/Blackjack
|
ef15b202607840347bce5d9331ead9a070f1c055
|
ce75d21cb866304aa688703902cd002a1255d02e
|
refs/heads/main
| 2023-02-22T17:55:59.226702
| 2021-01-25T10:30:32
| 2021-01-25T10:30:32
| 317,189,567
| 0
| 0
| null | 2021-01-21T15:21:07
| 2020-11-30T10:35:25
|
R
|
UTF-8
|
R
| false
| false
| 9,686
|
r
|
Functional_Blackjack_code.R
|
source("userInputFunctions.R")
source("printHand.R")
Blackjack <- function(){
# game config
game_config <- function(){
# set starting bankroll
starting_bankroll <<- askForInt("Insert starting bankroll", 10, Inf)
# set number of players
number_of_player <<- askForInt("Insert number of physical players", 1, 8)
#! set bank?
PLAYER <<- data.frame(player=c("Bank"), bankroll= as.numeric(Inf), virtual = TRUE)
# set players name
for (i in 1:number_of_player) {
PLAYER[i+1,1] <<- readline("Insert a name : ")
PLAYER[i+1,2] <<- starting_bankroll
PLAYER[i+1,3] <<- FALSE
}
#enable bots
enable_bots <<- askForYN("Enable bots")
if(enable_bots) {
PLAYER <<- rbind(PLAYER, data.frame(player=c("copycat", "randomrat", "riskbot", "cowardbot"), bankroll=starting_bankroll, virtual=TRUE ))
} else {
NULL
}
# set number of decks
number_of_decks <<- askForInt("Insert number of decks", 1, 10)
# confirming setting
confirm_player_choice <<- askForYN("Do you want to continue with these settings")
}
# create new game
new_game <- function(){
minimum_bet <- as.numeric(readline("Insert minimum bet "))
HANDS <<- data.frame("cards"= rep("nothing", times = 1 * nrow(PLAYER)),
player= PLAYER$player,
bet= c(as.numeric(Inf), rep(0, times = 1 * (nrow(PLAYER)-1))),
score= rep(0, times = 1 * nrow(PLAYER)),
virtual=PLAYER$virtual)
for (i in 2:nrow(HANDS)){
repeat {
repeat{
if(HANDS$virtual[i] == TRUE) {
HANDS$bet[i] <<- runif(1, minimum_bet, (minimum_bet * 100))
} else {
HANDS$bet[i] <<- as.numeric(readline(paste("Place your bet,", HANDS$player[i], " " ) ))
}
if (!is.na(HANDS$bet[i])) {break}
}
if (HANDS$bet[i]>= minimum_bet) {break}
}
PLAYER$bankroll[i] <<- PLAYER$bankroll[i] - HANDS$bet[i]
}
make_cards()
deal_cards()
ace_control()
if (21 %in% HANDS$score) {
check_for_winner()
}
}
# create new round
new_round <- function() {
hit <- function() {
card <<- deck[1,]
deck <<- deck[-1,]
}
repeat {
printHand(HANDS,"all")
complete_data <<- merge(HANDS, PLAYER, sort = FALSE)
players_alive <- complete_data$player[complete_data$virtual == FALSE & complete_data$score <= 21]
bots_alive <<- complete_data$player[complete_data$virtual == TRUE & complete_data$score <= 21]
move <- NULL
if (length(bots_alive) >= 1) {
for (q in 1:length(bots_alive)) {
strategy_bank <- function() {
if (HANDS$score[HANDS$player=="Bank"] < 17) {
move <<- append(move, "hit")
} else if (HANDS$score[HANDS$player=="Bank"] > 21) {
move <<- append(move, NULL)
} else {
move <<- append(move, "pass")
}
}
strategy_copycat <- function() {
if (HANDS$score[HANDS$player == "copycat"] < 17) {
move <<- append(move, "hit")
} else if (HANDS$score[HANDS$player == "copycat"] > 21) {
move <<- append(move, NULL)
} else {
move <<- append(move, "pass")
}
}
strategy_cowardbot <- function() {
if (HANDS$score[HANDS$player == "cowardbot"] < 14) {
move <<- append(move, "hit")
} else if (HANDS$score[HANDS$player == "cowardbot"] > 21) {
move <<- append(move, NULL)
} else {
move <<- append(move, "pass")
}
}
strategy_riskbot <- function() {
if (HANDS$score[HANDS$player == "riskbot"] < 19) {
move <<- append(move, "hit")
} else if (HANDS$score[HANDS$player == "riskbot"] > 21) {
move <<- append(move, NULL)
} else {
move <<- append(move, "pass")
}
}
strategy_randomrat <- function() {
if (HANDS$score[HANDS$player == "randomrat"] < 21) {
move <<- append(move, sample(c("hit", "pass"), 1))
} else if (HANDS$score[HANDS$player == "randomrat"] > 21) {
move <<- append(move, NULL)
} else if (HANDS$score[HANDS$player == "randomrat"] == 21) {
move <<- append(move, "pass")
}
}
strategy_bank()
if("copycat" %in% HANDS$player) {
strategy_copycat()
}
if("randomrat" %in% HANDS$player) {
strategy_randomrat()
}
if("cowardbot" %in% HANDS$player) {
strategy_cowardbot()
}
if("riskbot" %in% HANDS$player) {
strategy_riskbot()
}
if (is.null(move)){
print("Bots are out")
} else if (move[q] == "pass"){
print(paste(bots_alive[q], "chose to",move[q]))
} else if (move[q] == "hit"){
print(paste(bots_alive[q], "chose to",move[q]))
hit()
HANDS[[1]][[(as.numeric(row.names(subset(HANDS, player==bots_alive[q]))))]] <<- rbind(HANDS[[1]][[(as.numeric(row.names(subset(HANDS, player==bots_alive[q]))))]], card)
ace_control()
}
}
}
#in progress start
if (length(players_alive >= 1)) {
for (i in 1:length(players_alive)) {
repeat {
move <- tolower(readline(paste("What is your move,", players_alive[i], "? ")))
if (move == "pass") {
print(paste(players_alive[i], "chose to", move))
break
} else if (move == "hit") {
print(paste(players_alive[i], "chose to", move))
hit()
HANDS[[1]][[(as.numeric(row.names(subset(HANDS, player==players_alive[i]))))]] <<- rbind(HANDS[[1]][[(as.numeric(row.names(subset(HANDS, player==players_alive[i]))))]], card)
ace_control()
break
} else {
print("Non-existing move, did you spell it correctly?")
}
}
}
}
# break if player dies or everyone passes
if ( (all( complete_data$score == HANDS$score )) ==TRUE){
break
}
}
check_for_winner()
}
# create cards
make_cards <- function(){
face <- c("Ace", "King", "Queen", "Jack", "Ten", "9", "8", "7", "6", "5", "4", "3", "2")
suit <- c("\U2660", "\U2663", "\U2666", "\U2665")
value <- c(11, 10, 10, 10, 10, 9, 8, 7, 6, 5, 4, 3, 2)
freshdeck <- data.frame(face= rep(face, times = 4 * number_of_decks ),
suit= rep(suit, times = 13 * number_of_decks ),
value= rep(value, times = 4 * number_of_decks))
deck <<- freshdeck[sample(nrow(freshdeck)),]
}
# deal cards
deal_cards <- function() {
a <- seq(1, (nrow(HANDS)*2), by = 2)
b <- seq(2, 1+(nrow(HANDS)*2), by = 2)
for (i in 1:nrow(HANDS)) {
HANDS$cards[i] <<- list(deck[a[i]:b[i],])
HANDS$score[i] <<- sum(HANDS$cards[[c(i,3)]])
}
deck <<- deck[-(1:(nrow(HANDS)*2)),]
}
# check for winners
# check for winners
check_for_winner <- function() {
if ( HANDS$player=="Bank" && HANDS$score==21){
winner <- "Bank"
} else if (HANDS$player=="Bank" && HANDS$score > 21){
winner <- HANDS$player[HANDS$score <= 21]
} else if (21 %in% HANDS$score){
winner <- HANDS$player[HANDS$score==21]
} else {
winner <- HANDS$player[HANDS$score > HANDS$score[HANDS$player=="Bank"] & HANDS$score <= 21 ]
if (length(winner) < 1){
winner <- "Bank"
}
}
if (length(winner)>=1){
print(paste("Congratulations, The winner is ", winner ,"!"))
for (i in 1:length(winner)) {
PLAYER$bankroll[PLAYER$player==winner[i]] <<- PLAYER$bankroll[PLAYER$player==winner[i]] + ( HANDS$bet[HANDS$player==winner[i]]*1.5)
}
} else {
print("It's a draw, nobody wins")
}
printHand(HANDS,"all")
repeat {
continue <- tolower(readline("Would you like to continue? "))
if (continue == "yes") {
endgame <<- FALSE
continue_game()
break
} else if (continue == "no") {
endgame <<- TRUE
break
} else {
print("Choose between yes or no")
}
}
}
# continue game
continue_game <- function() {
new_game()
new_round()
}
endgame <<- 0
# check if integer
is.wholenumber <- function(x, tol = .Machine$double.eps^0.5) abs(x - round(x)) < tol
# ace control
ace_control <<- function(){
for ( i in 1:nrow(HANDS)){
if ( "Ace" %in% HANDS[[1]][[i]]$face ){
HANDS[[1]][[i]][3][HANDS[[1]][[i]][1]=="Ace"][1] <<- 11
HANDS[[1]][[i]][3][HANDS[[1]][[i]][1]=="Ace"][-1] <<- 1
}
HANDS[[4]][[i]] <<- sum(HANDS[[1]][[i]][3])
}
}
game_config()
new_game()
if (endgame) {
print("Thank you for playing")
} else if (endgame==FALSE) {
new_round()
if (endgame==TRUE) {
print("Thank you for playing")
} else {
print("something went wrong with endgame")
}
} else {
print("something went wrong with endgame")
}
##### things to do
# to do, add more moves
# to do, what to do with person with to low bankroll ?
}
|
b258b047df91ccfe3f4afc99d5989f83c454f552
|
9b0e9c1e919b9abc7e1b321cc5d7e2a14451037b
|
/R/set-theme.R
|
d0084a47c108eb15eaa1a440fdb6c4acf3128141
|
[] |
no_license
|
atlas-aai/ratlas
|
abbcf9050885c6189e8bec2d6dfa42bff301664b
|
7e88f6a826b6e0d855d49e960c84ef3182b64aa8
|
refs/heads/main
| 2023-07-24T20:21:59.886545
| 2023-06-29T21:41:00
| 2023-06-29T21:41:00
| 150,454,937
| 30
| 10
| null | 2023-06-29T21:41:01
| 2018-09-26T16:14:04
|
R
|
UTF-8
|
R
| false
| false
| 2,661
|
r
|
set-theme.R
|
#' Set default ggplot2 theme
#'
#' Sets the default color schemes, fonts, and theme for ggplot2 plots. The
#' default color scheme for continuous variables is the
#' [viridis](https://cran.r-project.org/web/packages/viridis/index.html)
#' color palette, and the default color scheme for discrete variables is the
#' [Okabe Ito](http://jfly.iam.u-tokyo.ac.jp/color/) palette.
#'
#' @param font The base font family to be used in plots.
#' @param discrete Color palette for discrete colors. One of "okabeito"
#' (default), "atlas", or "ggplot2".
#' @param continuous Color palette for continuous scales. One of "magma",
#' "inferno", "plasma", "viridis" (default), or "cividis", or "ggplot2".
#' @param ... Additional arguments to pass to theme functions.
#'
#' @examples
#' set_theme("Arial Narrow")
#'
#' @export
set_theme <- function(font = "Arial Narrow",
discrete = c("okabeito", "atlas", "ggplot2"),
continuous = c("viridis", "magma", "inferno", "plasma",
"cividis", "ggplot2"),
...) {
discrete <- match.arg(discrete)
continuous <- match.arg(continuous)
cont_option <- switch(continuous,
magma = "A",
inferno = "B",
plasma = "C",
viridis = "D",
cividis = "E")
disc_option <- switch(discrete,
okabeito = palette_okabeito,
atlas = palette_atlas)
ggplot2::theme_set(theme_atlas(base_family = font, ...))
update_geom_font_defaults(family = font)
if (!is.null(disc_option)) {
disc_fill <- switch(discrete,
okabeito = scale_fill_okabeito,
atlas = scale_fill_atlas)
disc_colr <- switch(discrete,
okabeito = scale_colour_okabeito,
atlas = scale_colour_atlas)
options(ggplot2.discrete.fill = disc_fill)
options(ggplot2.discrete.colour = disc_colr)
} else {
options(ggplot2.discrete.fill = NULL)
options(ggplot2.discrete.colour = NULL)
}
if (!is.null(cont_option)) {
cont_fill <- function(..., option = cont_option) {
ggplot2::scale_fill_continuous(..., option = option, type = "viridis")
}
cont_colr <- function(..., option = cont_option) {
ggplot2::scale_colour_continuous(..., option = option, type = "viridis")
}
options(ggplot2.continuous.fill = cont_fill)
options(ggplot2.continuous.colour = cont_colr)
} else {
options(ggplot2.continuous.fill = NULL)
options(ggplot2.continuous.colour = NULL)
}
}
|
2b3135a10f57ad5e5d8e99442c50a597ab6c57da
|
579c2e56ace8f4206c18edcced2f58af706eaae0
|
/R/plot.blca.boot.R
|
ac121caf009f984ab82b8a5355c6403e8df9dcf5
|
[] |
no_license
|
cran/BayesLCA
|
dc89cd0e31b1e298267d1d80fe768a0c839d47c6
|
67f6bd1b5eb343c2df288a5a01cdc2e6cfd7549e
|
refs/heads/master
| 2021-05-21T11:42:04.674719
| 2020-05-06T16:20:02
| 2020-05-06T16:20:02
| 17,678,033
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 291
|
r
|
plot.blca.boot.R
|
plot.blca.boot <-
function(x, which=1L, main="", ...){
#class(x)<- "blca"
#print("NextMethodUsed")
show<- rep(FALSE, 5)
show[which]<- TRUE
if(show[5]){
warning("No diagnostic plot for bootstrapping method")
show[5]<- FALSE
}
which<- c(1:4)[show]
NextMethod("plot")
}
|
d3f29fece2f451f4f642ae2bc52391d3f4fd95d3
|
50beb4502893bf218cd7282584946c5fd3e8218a
|
/FDA_Analysis.R
|
864bf1f49f61d1799b36c1b02b534c2baf06734d
|
[] |
no_license
|
jwisch/Actigraph
|
38077bfde928b43e53d5d6ba15f6970f71b665e0
|
cc30f0c4047e65c8ec5ca332187d3666517014fa
|
refs/heads/master
| 2020-12-21T21:32:06.409513
| 2020-01-27T19:04:53
| 2020-01-27T19:04:53
| 236,569,082
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,670
|
r
|
FDA_Analysis.R
|
library(nparACT)
library(Actigraphy)
library(lattice)
library(dplyr)
source("C:/Users/julie.wisch/Documents/nparACT_auxfunctions1.R")
source("C:/Users/julie.wisch/Documents/nparACT_auxfunctions2.R")
source("C:/Users/julie.wisch/Documents/npar_Base.R")
source("C:/Users/julie.wisch/Documents/Actigraphy_otherData/CommonFuncs.R")
#######
file_list <- list.files(path="C:/Users/julie.wisch/Documents/Actigraphy/", pattern="*.agd")
file_list<-file_list[c(1:19, 21:94, 96:117, 119:133, 137:143)]
setwd("C:/Users/julie.wisch/Documents/Actigraphy/")
myfiles = lapply(file_list, readActigraph)
datalist = list()
for (i in 1:length(file_list)) {
result<-data.frame(SetUp(myfiles[[i]]))
result$id <- substr(file_list[i], start = 1, stop = 9)
result$Timepoint <- substr(file_list[i], start = 11, stop = 12)
datalist[[i]] <- result
}
result = do.call(rbind, datalist)
df_T1<-result[result$Timepoint == "T1",]
df_T2<-result[result$Timepoint == "T2",]
df_T3<-result[result$Timepoint == "T3",]
df_hold_T1<-GetToActFormat(df_T1)
df_hold_T2<-GetToActFormat(df_T2)
df_hold_T3<-GetToActFormat(df_T3)
#Then have to drop participants that aren't in all the datasets
df_hold_T1<-select(df_hold_T1, names(df_hold_T3))
df_hold_T2<-select(df_hold_T2, names(df_hold_T3))
#Getting their cohort info
Info<-read.csv("C:/Users/julie.wisch/Documents/Actigraphy_otherData/HECS_Data_021219.csv")
Info<-Info[,c("related_study_id", "Group")]
Info$id<-paste("HECS", substr(Info$related_study_id, 1, 5), sep = "")
Info<-Info[,c("id", "Group")]
Info<-Info[!duplicated(Info$id),]
Exercisers<-Info[Info$Group == 1,"id"]
Exercisers<-Exercisers[!is.na(Exercisers)]
Exercisers<-c("time", Exercisers)
##############################################################################
#Setting things up and getting data frames for analysis together
#Have data frames with only the exercisers in them
df_T1<-df_hold_T1[,colnames(df_hold_T1) %in% Exercisers]
df_T2<-df_hold_T2[,colnames(df_hold_T2) %in% Exercisers]
df_T2_stretch<-df_hold_T2[,!(colnames(df_hold_T2) %in% Exercisers)]
df_T3<-df_hold_T3[,colnames(df_hold_T3) %in% Exercisers]
clinic<-data.frame(c(names(df_T2[-1]),names(df_T2_stretch[-1])), c(rep(-1, length(df_T2[-1])), rep(1, length(df_T2_stretch[-1]))))
colnames(clinic)<-c("id", "Timepoint")
data<-data.frame(cbind(df_T2, df_T2_stretch[,-1]))
#############################################################################
#############################################################################
#############################################################################
#############################################################################
#############################################################################
matchid <- fda.matchid(data[,-1], clinic, "contin") #Can analyze relative to a continuous measure as well
matchid<-fda.matchid(data[,-1], clinic, type = "factor", grouplab = c("excer", "stretch"))
FDcont <- fda.smoothdata(matchid)
geftFDcont <- flm_cate(FDcont)
par(mfrow=c(1,1))
### Smooth the Results
ts.plot(predict(FDcont$fd$fd, c(1:length(data[,1]))), main="Smoothed Activity Data")
result<-flm_cate(FDcont, basistype="fourier", nbasis=9, norder=4)
ahidatav2<-fda.matchid(data[,-1], clinic, type = "factor", grouplab = c("excer", "stretch"))
tempv2 <- ahidatav2[[2]]
tempv2[,3] <- ifelse(tempv2[,3] == 0, -1, 1)
ahidatav2$cov <- data.frame(id=tempv2$id, mean=1, ahi=tempv2[,3])
colv2 <- ifelse(tempv2[,3] == -1, 4, 2)
smoothDatav2 <- fda.smoothdata(ahidatav2)
par(mfrow=c(1,1))
ts.plot(predict(smoothDatav2$fd$fd, c(1:length(data[,1]))), main="Smoothed Activity Data", ylim = c(0, 2800))
geftahiv2 <- flm_cate(smoothDatav2, basistype="fourier", nbasis=9, norder=4)
meanefv2 <- geftahiv2$freg$betaestlist[[1]]
ahiefv2 <- geftahiv2$freg$betaestlist[[2]]
### Plot Options and Parameters
L <- 1440
xat <- c(0, L/4, L/2, 3*L/4, L)
lb <- c("Midnight", "6AM", "Noon", "6PM", "Midnight")
### Plot Figure
ts.plot(predict(smoothDatav2$fd$fd, c(1:length(data[,1]))), main="Smoothed Activity Data")
par(mfrow=c(2,1), mar=c(4,4,3,1))
plot(0, 0, xlim=c(0,L), ylim=c(600,2600), xaxt="n", xlab="(a)", ylab="Activity", type="n", main="Smoothed Circadian Activity Curves ")
for(i in 1:(length(data)-1)){lines(predict(smoothDatav2$fd$fd, c(1:L))[,i], col=colv2[i], lwd = 0.2, lty = 2) }
### Plot the group mean activities
lines(meanefv2$fd-ahiefv2$fd, col=4, lwd=3)
lines(meanefv2$fd+ahiefv2$fd, col=2, lwd=3)
### Plot the overall mean
lines(meanefv2$fd, col=1, lwd=1)
axis(1, at=xat, labels=lb)
plot(0, 0, xlim=c(0,L), ylim=c(600,2600), xaxt="n", xlab="(a)", ylab="Activity", type="n", main="Average Circadian Activity Curves ")
### Plot the group mean activities
lines(meanefv2$fd-ahiefv2$fd, col=4, lwd=3)
lines(meanefv2$fd+ahiefv2$fd, col=2, lwd=3)
### Add the axis and legend to finish the plot
axis(1, at=xat, labels=lb)
#legend("topleft", c("AHI High Curves", "AHI High Mean", "AHI Low Curves", "AHI Low Mean ", "Overall Mean"),
#lty=1, col=c(4,4,2,2,1), lwd=c(1,3,1,3,3), cex=.8)
### F Test
cov2 <- smoothDatav2$cov[, -1]
grp2 <- ncol(cov2)
fd <- smoothDatav2$fd
L <- length(fd$argvals)
npt <- ncol(fd$y)
fbase <- create.fourier.basis(rangeval=c(0, length(data[,1])), nbasis=9)
fpar <- fdPar(fbase)
xfdlist <- vector("list", grp2)
xfdlist[[1]] <- cov2[, 1] + 0
for(i in 2:grp2){
xfdlist[[i]] <- cov2[, i] + 0}
betalist <- xfdlist
for(i in 1:grp2){
betalist[[i]] <- fpar}
freg2 <- fRegress(fd$fd, xfdlist, betalist)
preact2 <- predict(freg2$yhatfdobj, c(1:L))
resid2 <- fd$y - preact2[, 1:npt]
sigma2 <- cov(t(resid2))
fregstd2 <- fRegress.stderr(freg2, fd$y2cMap, sigma2)
Fratio <- Ftest(fd$fd, xfdlist, betalist, argvals = c(1:length(data$T1)), nperm=1000, xaxt="n")
##############Plotting options follow
#################################################################################################
#################################################################################################
#################################################################################################
###This block lets you smooth multiple files from an individual and create a plot that shows their activity for each of the times they were sampled
library(PhysicalActivity)
library(RSQLite)
plots<-IndividualPlots("HECS80001", 3)
grid.arrange(plots[[1]], plots[[2]], plots[[3]], ncol = 1)
########################################################################################
########################################################################################
########################################################################################
########################################################################################
#This is how to make 3 plots - one smoothed by mins, one smoothed by hours, one showing each day of the week
PARTICIPANTID<-"HECS80001"
df1<-readActigraph(paste("C:/Users/julie.wisch/Documents/Actigraphy/", PARTICIPANTID, "_T1.agd", sep= ""))
df1<-df1[,c("TimeStamp", "vm")]
df1<-dataCollapser(df1, TS = "TimeStamp", col = "vm", by = 60)
bin_hr <- 60
SR<-1/60
cutoff<-1
nparACT_plot(df1, SR, cutoff = 1, plot = T, fulldays = T)
rm(df1)
########################################################################################
########################################################################################
########################################################################################
########################################################################################
|
99e2001c1c9c03a78f5afd500652c9dbefbcd3c2
|
a15c82b3af65adf5161fc4b08dc7ce3751bdd1f7
|
/cachematrix.R
|
c72dcd60b176d5290c2458fe6d56e14f89db9aac
|
[] |
no_license
|
RoliMu/ProgrammingAssignment2
|
0709125f9a29de96cfbd28359242dda8df10972a
|
50457d51a6dff848821f564094b74e281bd10048
|
refs/heads/master
| 2020-12-11T02:03:20.146917
| 2016-05-07T22:56:14
| 2016-05-07T22:56:14
| 58,231,471
| 0
| 0
| null | 2016-05-06T19:53:43
| 2016-05-06T19:53:43
| null |
UTF-8
|
R
| false
| false
| 1,914
|
r
|
cachematrix.R
|
## Programming Assignment 2: Lexical Scoping
## 07/05(2016)
## Starting with:'Matrix inversion is usually a costly
## computation and there may be some benefit to caching the
## inverse of a matrix rather than computing it repeatedly'
## The assignment is to write a pair of functions that cache
## the inverse of a matrix.
## the first function makeCacheMatrix is to create a special
## matrix object that can cache its inverse.
## the second function cacheSolve is to compute the inverse of
## the special matrix returned by makeCacheMatrix above. if the
## inverse has already been calculated (and the matrix has not
## changed), then this second function cacheSolve should retrieve
## the inverse from the cache.
## solve() function computes the inverse of a square matrix (if
## invertible).
## for this assignment it is assumed that the input matrix is
## always convertible!
## Write a short comment describing this function
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y) {
x <<- y
inv <<- NULL
}
get <- function() x
setInverse <- function(inverse) inv <<- inverse
getInverse <- function() inv
list(set = set,
get = get,
setInverse = setInverse,
getInverse = getInverse)
}
## The function cacheSolve returns the inverse of a matrix A created with
## the makeCacheMatrix function.
## If the cached inverse is available, cacheSolve retrieves it, while if
## not, it computes, caches, and returns it.
cacheSolve <- function (x=matrix(), ...) {
# Need to compare matrix to what was there before!
inv <- x$getInverse()
if (!is.null(inv)) {
message("getting cached data")
return(inv)
}
mat <- x$get()
inv <- solve(mat, ...)
x$setInverse(inv)
inv
}
|
113fdddfc06c83acdd4cede04bfdbfc40fb02965
|
934d29dd215c4d76fe299e4514100277ef5213d0
|
/DissertationChapter/FAOvGTAplot.R
|
bd755f28ceda2795ede11483ec764b408565bb0d
|
[] |
no_license
|
kmblincow/GlobalSeafoodTrade
|
d0dc45905ea4514141d065394a91dc1ae3906a17
|
a6475718656c04f0ccaba38127bbac20a7c856c3
|
refs/heads/main
| 2023-04-15T21:50:21.836901
| 2022-10-10T15:40:08
| 2022-10-10T15:40:08
| 343,531,329
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,079
|
r
|
FAOvGTAplot.R
|
#Kayla Blincow
#5/27/21
#Comparing FAO and GTA data
#clear my workspace
rm(list = ls())
#load packages
library(tidyverse)
#load data
FAO <- read.csv("ConsumptionAngle/Final_FAOFMI.csv", header = T)
GTA <- read.csv("ConsumptionAngle/GTA_FAOmatch.csv", header = T)
#Plot total imports for each country in each dataset
dGTA <- GTA %>% group_by(Reporter, Direction) %>%
summarize(Quantity = sum(livewgt)) %>%
filter(Direction == "Import")
dFAO <- FAO %>%
group_by(Country) %>%
summarize(Imports = sum(Imports))
d <- left_join(dFAO, dGTA, by = c("Country" = "Reporter")) %>%
filter(Country != "Mozambique" & Country != "Ecuador")
p1 <- ggplot(d, aes(x = Imports, y = Quantity)) +
geom_point() +
geom_abline(slope = 1, intercept = 0) +
labs(x = "FAO Imports (2012-2017)", y = "GTA Imports (2012-2017)",
title = "Comparison of Total Import Data by Country") +
theme_classic()
png(filename="ConsumptionAngle/Figures/FAOvGTA.png",
units="in",
width=5,
height=5,
res=400)
p1
dev.off()
|
0b461985c47c1adbc9bf332067cef0669c99057c
|
385657ba6ae43b8d9ee35b0691909ab0ff3dafb2
|
/R/plot_r0_profile_fromcsv.R
|
17906b2aa5760277226191aa392b6a5fed08ed8c
|
[] |
no_license
|
fecor21/ssdiags
|
0c800a3b2bc4ddd974d8eb725553209e25c0cb85
|
083da546c7859ca9a534ac8d9ce83a96f191b02f
|
refs/heads/master
| 2020-05-26T14:50:54.346651
| 2019-05-23T15:25:53
| 2019-05-23T15:25:53
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,136
|
r
|
plot_r0_profile_fromcsv.R
|
#rm(list=ls())
library(r4ss)
library(colorRamps)
library(ggplot2)
library(dplyr); library(reshape2); require(gridExtra)
# setwd("C:/Users/felip/Dropbox/MLS_2019_1994/R0_Profile_Model_382")
setwd("C:/Users/mkapur/Documents/GitHub/ssdiags")
df <- read.csv("./R/r0profile_20190328_0858.csv")
profile.label <- expression(log(italic(R)[0]))
fleetmatch <-which(grepl("F.*_", names(df)))
survmatch <- which(grepl("S0.*_", names(df)))
## catch likelihoods -- all cols after "ALL.2"
catchmatch <- grep('ALL.2',names(df)):length(names(df))
tomatch <- c(which(grepl("F.*_", names(df))),
which(grepl("S0.*_", names(df))))
FleetNames <- names(df)[tomatch][!is.na(names(df)[tomatch])]
## Totally Raw Plotting Reboot -- sorry
plist <- list()
## R0 color by component ---
plist[[1]] <- df[,1:5] %>% melt(id = c('SR_LN.R0.')) %>%
ggplot(., aes(x = SR_LN.R0., y = value, color = variable,pch = variable)) +
theme_classic() +
theme(panel.grid = element_blank(),
legend.position = c(0.8,0.8))+
geom_line(lwd = 1.1) +
geom_point() +
scale_color_manual(values=c('black',rainbow(3))) +
labs(x = profile.label, pch = '', color = '',
y = 'Change in Log-Likelihood')
ggsave(plot = last_plot(), file = paste0("PinerPlot.tiff"),
width = 4, height = 6, units = 'in', dpi = 480)
## R0 color by fleet ---
plist[[2]] <- cbind(df$Label, df$SR_LN.R0.,df[,survmatch],df$ALL) %>%
filter(df$Label == 'Surv_like') %>%
# select(-c("df$Label") )%>%
plyr::rename(c("df$SR_LN.R0." = "SR_LN.R0.", "df$ALL"='ALL')) %>%
melt(id = c('SR_LN.R0.')) %>%
ggplot(., aes(x = SR_LN.R0., y = value, color = variable,pch = variable)) +
theme_classic() +
theme(panel.grid = element_blank(),
legend.position = c(0.5,0.8))+
geom_line(lwd = 1.1) +
geom_point() +
scale_color_manual(values=c(rainbow(length(survmatch)),'black')) +
labs(x = profile.label, pch = '', color = '',
y = 'Change in Log-Likelihood',
title = 'Changes in Index Likelihood by Fleet')
ggsave(plot = last_plot(), file = paste0("Index Likelihood by Fleet.tiff"),
width = 4, height = 6, units = 'in', dpi = 480)
## LengthLike color by fleet ----
## these are denoted by ".1"
plist[[3]] <- cbind(df$Label.1, df$SR_LN.R0.,df[,fleetmatch],df$ALL.1) %>%
filter(df$Label.1 == 'Length_like') %>%
select(-"df$Label.1") %>%
plyr::rename(c("df$SR_LN.R0." = "SR_LN.R0.", "df$ALL.1"='ALL.1')) %>%
melt(id = c('SR_LN.R0.')) %>%
ggplot(., aes(x = SR_LN.R0., y = value, color = variable)) +
theme_classic() +
theme(panel.grid = element_blank(),
legend.position = c(0.5,0.6),
legend.background = element_blank())+
geom_line(lwd = 1.1) +
scale_color_manual(values=c(rainbow(length(fleetmatch)),'black')) +
labs(x = profile.label, color = '',
y = 'Change in Log-Likelihood',
title = 'Changes in Length_Like by Fleet')
ggsave(plot = last_plot(), file = paste0("Length_Like by Fleet.tiff"),
width = 4, height = 6, units = 'in', dpi = 480)
## Changes in catchlike by fleet ----
## denoted by ".2"
plist[[4]] <-cbind(df$Label.2, df$SR_LN.R0.,df[,catchmatch],df$ALL.2) %>%
filter(df$Label.2 == 'Catch_like') %>%
select(-"df$Label.2") %>%
plyr::rename(c("df$SR_LN.R0." = "SR_LN.R0.", "df$ALL.2"='ALL.2')) %>%
melt(id = c('SR_LN.R0.')) %>%
ggplot(., aes(x = SR_LN.R0., y = value, color = variable,pch = variable)) +
theme_classic() +
theme(panel.grid = element_blank(),
legend.position = c(0.5,0.8),
legend.background = element_blank())+
geom_line(lwd = 1.1) +
geom_point() +
scale_color_manual(values=c('black', rainbow(length(catchmatch)))) +
labs(x = "log(R0)", pch = '', color = '',
y = 'Change in Log-Likelihood',
title = 'Changes in Catch Likelihood by Fleet')
ggsave(plot = last_plot(), file = paste0("Catch Likelihood by Fleet.tiff"),
width = 4, height = 6, units = 'in', dpi = 480)
## save it ----
lay <- cbind(c(1,2),
c(3,4))
grid.arrange(grobs = plist, layout_matrix = lay) %>%
ggsave(plot = ., file = paste0("all_ikelihoods_from_csv.tiff), width = 8, height = 12, units = 'in', dpi = 480)
|
76ac6b7270f6c5f79db1cd56abb3d9e52c07bdd5
|
a895bbebd37b76348aac2ebd8fdd0ef2ffc7829b
|
/R/ENMnulls.test.R
|
8686bcd614e741a48918a149cefa3cd19ca49a1d
|
[] |
no_license
|
johnsonojeda/ENMeval
|
9a34ee7476034bfa2c25ff0f169600f1e28f6a06
|
7132a42879b24d58fa35028cef42a4b3db59556d
|
refs/heads/master
| 2023-08-03T04:19:24.207693
| 2023-03-28T23:20:00
| 2023-03-28T23:20:00
| 528,523,469
| 0
| 0
| null | 2022-08-24T17:20:02
| 2022-08-24T17:20:02
| null |
UTF-8
|
R
| false
| false
| 25,906
|
r
|
ENMnulls.test.R
|
#' @title Compare model accuracy metrics of Ecological Niche Models (ENMs) built with different set of predictors.
#' #' @description \code{ENMnulls.test()} Iteratively builds null ENMs for "k" sets of user-specified model
#' settings bsaed on "k" input ENMevaluation objects, from which all other analysis
#' settings are extracted.Summary statistics of the performance metrics for the null ENMs
#' are taken (averages and standard deviations) and effect sizes and p-values are calculated by
#' comparing these summary statistics to the empirical values of the performance metrics
#' (i.e., from the model built with the empirical data). This is an extension of {ENMnulls()} for comparisons
#' of two or more predictor variable sets.
#'
#' @param e.list: list of ENMevaluation objects to be compared
#' @param mod.settings.list named list: model settings corresponding to ENMevaluation objects in e.list
#' that specify the settings used for building null models
#' @param no.iter numeric: number of null model iterations.
#' @param eval.stats character: model accuarcy metrics that will be used to calculate null model statistics.
#' Can be one of “auc”, “cbi”, “or.mtp”, “or.10p”.
#' @param user.enm ENMdetails object: if implementing a user-specified model.
#' @user.eval.type character: if implementing a user-specified model -- either "knonspatial", "kspatial",
#' "testing" or "none".
#' @param alternative a character string
#' @param userStats.signs named list: user-defined evaluation statistics attributed with either 1 or -1
#' to designate whether the expected difference between empirical and null models is positive or negative;
#' this is used to calculate the p-value of the z-score when comparing two predictor variable sets. Default is NULL.
#' @param removeMxTemp boolean: if TRUE, delete all temporary data generated when using maxent.jar for modeling
#' @param parallel boolean: if TRUE, use parallel processing.
#' @param numCores numeric: number of cores to use for parallel processing; if NULL, all available cores will be used.
#' @param parallelType character: either "doParallel" or "doSNOW" (default: "doSNOW").
#' @param quiet boolean: if TRUE, silence all function messages (but not errors).
#'
#' #' @details This null ENM technique extends the implementation in Bohl \emph{et al.} (2019)and Kass \emph{et al.} (2020),
#' which follows the original methodology of Raes & ter Steege (2007). Here we evaluate if observed differences in accuracy metric values
#' (e.g., AUC, omission rates, CBI) of empirical models built with different sets of predictor variable are greater than expected
#' at random. This is done by building the null distributions of the difference in accuracy metrics
# employing the same withheld validation data used to evaluate the empirical models. Please see the vignette for a brief example: <
#'
#' This function avoids using raster data to speed up each iteration, and instead samples null occurrences from the
#' partitioned background records. Thus, you should avoid running this when your background records are not well
#' sampled across the study extent, as this limits the extent that null occurrences can be sampled from.
#'
#' @references
#' Bohl, C. L., Kass, J. M., & Anderson, R. P. (2019). A new null model approach to quantify performance and significance for ecological niche models of species distributions. \emph{Journal of Biogeography}, \bold{46}: 1101-1111. \url{https://doi.org/10.1111/jbi.13573}
#'
#' Kass, J. M., Anderson, R. P., Espinosa-Lucas, A., Juárez-Jaimes, V., Martínez-Salas, E., Botello, F., Tavera, G., Flores-Martínez, J. J., & Sánchez-Cordero, V. (2020). Biotic predictors with phenological information improve range estimates for migrating monarch butterflies in Mexico. \emph{Ecography}, \bold{43}: 341-352. \url{https://doi.org/10.1111/ecog.04886}
#'
#' Raes, N., & ter Steege, H. (2007). A null-model for significance testing of presence-only species distribution models. \emph{Ecography}, \bold{30}: 727-736. \url{https://doi.org/10.1111/j.2007.0906-7590.05041.x}
#'
#' #' @return An \code{ENMnull}An ENMnull object with slots containing evaluation summary statistics
#' for the null models and their cross-validation results, as well as differences in results between the
#' empirical and null models. This comparison table includes T-statistics for pairwise comparisons (T-test)
#' and F-statistic (ANOVA) of these differences and their associated p-values (under a normal distribution).
ENMnulls.test <- function(e.list, mod.settings.list, no.iter,
eval.stats = c("auc", "cbi","or.mtp","or.10p"),
user.enm = NULL, user.eval.type = NULL,
alternative = "two.sided",
userStats.signs = NULL,
removeMxTemp = TRUE, parallel = FALSE, numCores = NULL,
parallelType = "doSnow", quiet = FALSE){
#loading dependencies
require(doParallel)
require(doSNOW)
require(tidyr)
require(dplyr)
require(rstatix)
## checks
#more than one input enm
if(length(e.list) == 1){
stop("Please input more than one ENM to run comparisons.")
}
# model settings are all single entries for each enm treatment
for(k in 1:length(mod.settings.list)){
if(!all(sapply(mod.settings.list[[k]], length) == 1)){
stop("Please input a single set of model settings.")
}
}
# model settings are correct for input algorithm and are entered in the right order --
#if not, put them in the right order, else indexing models later will fail because the model
# name will be incorrect
for(k in 1:length(e.list)){
if(e.list[[k]]@algorithm %in% c("maxent.jar", "maxnet")){
if(length(mod.settings.list[[k]]) != 2){
stop("Please input two complexity settings (fc [feature classes] and rm [regularization
multipliers]) for mod.settings for maxent.jar and maxnet models.")
}
if(all(names(mod.settings.list[[k]]) %in% c("fc", "rm"))) {
if(!all(names(mod.settings.list[[k]]) == c("fc", "rm"))) {
mod.settings.list[[k]] <- mod.settings.list[[k]][c("fc", "rm")]
}
}else{
stop('Please input only "fc" (feature classes) and "rm" (regularization multipliers) for
mod.settings for maxent.jar and maxnet models.')
}
}else if(e.list[[1]]@algorithm == "bioclim") {
if(length(mod.settings.list[[k]]) != 1) {
stop("Please input one complexity setting (tails) for mod.settings for BIOCLIM models.")
}
if(!all(names(mod.settings.list[[k]]) == "tails")) {
stop('Please input only "tails" for mod.settings for BIOCLIM models.')
}
}
}
# assign evaluation type based on partition method
if(is.null(user.eval.type)) {
eval.type <- switch(e.list[[1]]@partition.method,
randomkfold = "knonspatial",
jackknife = "knonspatial",
block = "kspatial",
checkerboard1 = "kspatial",
checkerboard2 = "kspatial",
testing = "testing",
none = "none")
}else{
eval.type <- user.eval.type
}
# assign directionality of sign for evaluation stats in post-hoc tests
signs <- c(list("auc.val.avg" = 1, "auc.train.avg" = 1, "cbi.val.avg" = 1, "cbi.train.avg" = 1,
"auc.diff.avg" = -1, "or.10p.avg" = -1, "or.mtp.avg" = -1), userStats.signs)
# record start time
start.time <- proc.time()
##############################
## 1. Create null occurrences
##############################
#Create list of "i" sets of null "n" occurrences for each ENMevaluation object in e.list
# assign the number of cross validation iterations
# each ENMevaluation object in e.list should be built using the same parameters
nk <- max(as.numeric(as.character(e.list[[1]]@occs.grp)))
# get number of occurrence points by partition
occs.grp.tbl.list <- lapply(e.list, function(e){table(e@occs.grp)})
# if more than one background partition exists, assume spatial CV and
# keep existing partitions
#Create list of background SWD dataframes for each ENM treatment
null.samps.list <- lapply(e.list, function(e){
#Get occ points env values from ENMevaluation object
null.samps <- cbind(rbind(e@occs, e@bg), grp = c(e@occs.grp, e@bg.grp))
#Get bg points env values from ENMevaluation object
return(null.samps)
})
for(k in 1:length(e.list)){
if(e.list[[k]]@algorithm == "maxent.jar") {
# create temp directory to store maxent.jar output, for potential removal later
tmpdir <- paste(tempdir(), runif(1,0,1), sep = "/")
dir.create(tmpdir, showWarnings = TRUE, recursive = FALSE)
}
}
# assign user algorithm if provided
if(!is.null(user.enm)) {
for(k in 1:length(e.list)){
e.list[[k]]@algorithm <- user.enm
}
}
#################################################
## 2. Specify empirical model evaluation metrics
#################################################
# Specify empirical model evaluation metrics for each ENM treatment
emp.mod.res.list<- mapply(function(e, m){
mod.tune.args <- paste(names(m), m, collapse = "_", sep = ".")
emp.mod <- e@models[[mod.tune.args]]
emp.mod.res <- e@results %>% dplyr::filter(tune.args == mod.tune.args)
},e.list, mod.settings.list)
emp.mod.res.list <- lapply(1:ncol(emp.mod.res.list), function(x){
as.data.frame(t(emp.mod.res.list))[x,]})
#########################################
## 3. Build null models
#########################################
#Iteratively apply ENMulls for each ENMevaluation object in e.list
if(quiet == FALSE) message(paste("Building and evaluating null ENMs with", no.iter, "iterations..."))
if(quiet == FALSE) pb <- txtProgressBar(0, no.iter, style = 3)
# set up parallel processing functionality
if(parallel == TRUE) {
allCores <- parallel::detectCores()
if (is.null(numCores)) {
numCores <- allCores
}
cl <- parallel::makeCluster(numCores, setup_strategy = "sequential")
if(quiet != TRUE) progress <- function(n) setTxtProgressBar(pb, n)
if(parallelType == "doParallel") {
doParallel::registerDoParallel(cl)
opts <- NULL
} else if(parallelType == "doSNOW") {
doSNOW::registerDoSNOW(cl)
if(quiet != TRUE) opts <- list(progress=progress) else opts <- NULL
}
numCoresUsed <- foreach::getDoParWorkers()
if(quiet != TRUE) message(paste0("\nOf ", allCores, " total cores using ", numCoresUsed, "..."))
if(quiet != TRUE) message(paste0("Running in parallel using ", parallelType, "..."))
}
#Specify clamping directions
clamp.directions.list <- lapply(e.list, function(e){
if(length(e@clamp.directions) == 0){
clamp.directions.i <- NULL
}else{
clamp.directions.i <- e@clamp.directions
}
})
# define function to run null model for iteration i
null_i <- function(i, e, null.samps,
occs.grp.tbl,
mod.settings,
clamp.directions.i) {
null.occs.ik <- list()
if(eval.type == "kspatial") {
# randomly sample the same number of training occs over each k partition
# of envs; if kspatial evaluation, only sample over the current spatial
# partition of envs.z
for(k in 1:nk) {
# sample null occurrences only from
# the records in partition group k
null.samps.k <- null.samps %>% dplyr::filter(grp == k)
# randomly sample n null occurrences, where n equals the number
# of empirical occurrence in partition group k
samp.k <- sample(1:nrow(null.samps.k), occs.grp.tbl[k])
null.occs.ik[[k]] <- null.samps.k[samp.k, ]
}
}else if(eval.type == "knonspatial") {
for(k in 1:nk) {
# randomly sample n null occurrences, where n equals the number
# of empirical occurrence in partition group k
samp.k <- sample(1:nrow(null.samps), occs.grp.tbl[k])
null.occs.ik[[k]] <- null.samps[samp.k, ]
}
}else if(eval.type %in% c("testing", "none")) {
samp.test <- sample(1:nrow(null.samps), occs.grp.tbl)
null.occs.ik[[1]] <- null.samps[samp.test, ]
}
# bind rows together to make full null occurrence dataset
null.occs.i.df <- dplyr::bind_rows(null.occs.ik)
if(eval.type == "knonspatial") {
if(e@partition.method == "randomkfold") null.occs.i.df$grp <- get.randomkfold(null.occs.i.df, e@bg, kfolds = e@partition.settings$kfolds)$occs.grp
if(e@partition.method == "jackknife") null.occs.i.df$grp <- get.jackknife(null.occs.i.df, e@bg)$occs.grp
}
null.occs.i.z <- null.occs.i.df %>% dplyr::select(-grp)
# shortcuts
categoricals <- names(which(sapply(e@occs, is.factor)))
if(length(categoricals) == 0) categoricals <- NULL
if(eval.type %in% c("testing", "none")) {
partitions <- eval.type
user.grp <- NULL
user.val.grps <- NULL
}else{
# assign the null occurrence partitions as user partition settings, but
# keep the empirical model background partitions
user.grp <- list(occs.grp = null.occs.i.df$grp, bg.grp = e@bg.grp)
# assign user validation partitions to those used in the empirical model
user.val.grps <- cbind(e@occs, grp = e@occs.grp)
partitions <- "user"
}
# check if ecospat is installed, and if not, prevent CBI calculations
if(requireNamespace("ecospat", quietly = TRUE)) {
e@other.settings$ecospat.use <- TRUE
}else{
e@other.settings$ecospat.use <- FALSE
}
args.i <- list(occs = null.occs.i.z, bg = e@bg, tune.args = mod.settings, categoricals = categoricals, partitions = partitions,
algorithm = e@algorithm, other.settings = e@other.settings, partition.settings = e@partition.settings,
occs.testing = e@occs.testing, user.val.grps = user.val.grps, user.grp = user.grp,
doClamp = e@doClamp, clamp.directions = clamp.directions.i, quiet = TRUE)
null.e.i <- tryCatch({
do.call(ENMevaluate, args.i)
}, error = function(cond) {
if(quiet != TRUE) message(paste0("\n", cond, "\n"))
# Choose a return value in case of error
return(NULL)
})
if(is.null(null.e.i)) {
results.na <- e@results[1,] %>% dplyr::mutate(dplyr::across(auc.train:ncoef, ~NA))
mod.settings.i <- paste(names(mod.settings), mod.settings, collapse = "_", sep = ".")
if(nrow(e@results.partitions) > 0) {
results.partitions.na <- e@results.partitions %>% dplyr::filter(tune.args == mod.settings.i) %>% dplyr::mutate(dplyr::across(3:ncol(.), ~NA)) %>% dplyr::mutate(iter = i)
}else{
results.partitions.na <- e@results.partitions
}
out <- list(results = results.na, results.partitions = results.partitions.na)
}else{
out <- list(results = null.e.i@results,
results.partitions = null.e.i@results.partitions %>% dplyr::mutate(iter = i) %>% dplyr::select(iter, dplyr::everything()))
# restore NA row if partition evaluation is missing (model was NULL)
if(eval.type != "testing") {
allParts <- unique(user.grp$occs.grp) %in% out$results.partitions$fold
if(!all(allParts)) {
inds <- which(allParts == FALSE)
newrow <- out$results.partitions[1,]
newrow[,4:ncol(newrow)] <- NA
for(ind in inds) {
out$results.partitions <- dplyr::bind_rows(out$results.partitions, newrow %>% dplyr::mutate(fold = ind))
}
out$results.partitions <- dplyr::arrange(out$results.partitions, fold)
}
}
}
return(out)
}
#Run null models
null.i.list <- list()
for(x in 1:length(e.list)) {
cat("Doing e", x, "\n")
e <- e.list[[x]]
null.samps <- null.samps.list[[x]]
occs.grp.tbl <- occs.grp.tbl.list[[x]]
mod.settings <- mod.settings.list[[x]]
clamp.directions.i <- clamp.directions.list[[x]]
null.i.list[[x]] <- lapply(c(1:no.iter), function(i){
cat("Doing i", i, "\n")
null_i(i, e, null.samps, occs.grp.tbl, mod.settings,
clamp.directions.i)
})
}
#######################################################
## 4. Calculate differences in model evaluation metrics
#######################################################
#Extract relevant model accuracy metrics &
#Calculate pairwise differences among empirical and null model treatments
#NOTE: WE NEED TO WORK IN A WAY TO MAKE THE TEST DIRECTIONAL
# assemble null evaluation statistics and take summaries
nulls <- lapply(null.i.list, function(x){
null.stat <- dplyr::bind_rows(lapply(x, function(y) y$results)) %>%
dplyr::select(-dplyr::contains("AIC"))
})
nulls.grp <- lapply(null.i.list, function(x){
null.stat <- dplyr::bind_rows(lapply(x, function(y) y$results.partitions))
})
if(eval.type %in% c("testing", "none")) {
nulls <- lapply(nulls, function(x){
dif <- x %>% dplyr::select(dplyr::contains(eval.stats) & dplyr::ends_with("train")) %>%
tibble::rownames_to_column(var = "iter")
})
for(x in 1:length(nulls)){
nulls[[x]] <- nulls[[x]] %>% dplyr::mutate(model.treatment = rep(LETTERS[x], nrow(.)))
}
stat <- grep(eval.stats, names(nulls), value = T)
nulls <- nulls %>% dplyr::bind_rows(.)%>%
tidyr::pivot_wider(names_from = model.treatment, values_from = grep(eval.stats, names(.), value = T))
# get empirical model evaluation statistics for comparison
emp.dif <- lapply(emp.mod.res.list, function(emp){
emp %>% dplyr::select(dplyr::contains(eval.stats) & dplyr::ends_with("train"))
})
emp.dif <- emp.dif %>% dplyr::bind_rows(.) %>%
dplyr::mutate(model.treatment = LETTERS[1:nrow(.)]) %>%
tidyr::pivot_wider(names_from = model.treatment, values_from = grep(eval.stats, names(.), value = T))%>%
tidyr::unnest(cols = colnames(.))
}else{
nulls <- lapply(nulls, function(x){
if("auc" %in% eval.stats | "cbi" %in% eval.stats){
dif <- x %>% dplyr::select(paste0(eval.stats, ".val.avg")) %>% tibble::rownames_to_column(var = "iter")
} else if("or.mtp" %in% eval.stats | "or.10p" %in% eval.stats){
dif <- x %>% dplyr::select(paste0(eval.stats, ".avg")) %>% tibble::rownames_to_column(var = "iter")
}
})
for(x in 1:length(nulls)){
nulls[[x]] <- nulls[[x]] %>% dplyr::mutate(model.treatment = rep(LETTERS[x], nrow(.)))
}
nulls <- nulls %>% dplyr::bind_rows(.)%>%
tidyr::pivot_wider(names_from = model.treatment, values_from = grep(eval.stats, names(.), value = T))
if("auc" %in% eval.stats | "cbi" %in% eval.stats){
statistic <- paste0(eval.stats, ".val.avg")
} else if ("or.mtp" %in% eval.stats | "or.10p" %in% eval.stats){
statistic <- paste0(eval.stats, ".avg")
}
# get empirical model evaluation statistics for comparison
emp.dif <- lapply(emp.mod.res.list, function(emp){
if("auc" %in% eval.stats | "cbi" %in% eval.stats){
emp <- emp %>% dplyr::select(paste0(eval.stats, ".val.avg"))
} else if("or.mtp" %in% eval.stats | "or.10p" %in% eval.stats){
emp <- emp %>% dplyr::select(paste0(eval.stats, ".avg"))
}
})
emp.dif <- emp.dif %>% dplyr::bind_rows(.) %>%
dplyr::mutate(model.treatment = LETTERS[1:nrow(.)]) %>%
tidyr::pivot_wider(names_from = model.treatment, values_from = grep(eval.stats, names(.), value = T))%>%
tidyr::unnest(cols = colnames(.))
}
#calculate model metrics pairwise differences among treatments
if(ncol(nulls) == 3){
if(alternative == "two.sided"){
nulls.dif <- nulls %>% dplyr::transmute(`null.B-A` = abs(B - A)) %>% dplyr::mutate(iter = 1:nrow(.))
} else {
nulls.dif <- nulls %>% dplyr::transmute(`null.B-A` = B - A) %>% dplyr::mutate(iter = 1:nrow(.))
}
}else if(ncol(nulls > 3)){
#Obtaining all pairwise model treatment combinations
comb <- nulls %>% dplyr::select(-iter)
comb <- combn(colnames(comb), 2)
#calculating pairwise differences among treatments & merging in single dataframe
nulls.dif.list <- list()
for(i in 1:ncol(comb)){
name <- paste0("null.",comb[,i][2], "-", comb[,i][1])
if(alternative == "two.sided"){
nulls.dif.list[[i]] <- nulls %>%
dplyr::transmute(abs(nulls[grep(comb[,i][2], colnames(.))] - nulls[grep(comb[,i][1], colnames(.))]))
names(nulls.dif.list[[i]]) <- name
}else{
nulls.dif.list[[i]] <- nulls %>%
dplyr::transmute(nulls[grep(comb[,i][2], colnames(.))] - nulls[grep(comb[,i][1], colnames(.))])
names(nulls.dif.list[[i]]) <- name
}
}
nulls.dif <- dplyr::bind_cols(nulls.dif.list)%>% dplyr::mutate(iter = 1:nrow(.))
}
#calculate model metrics pairwise differences among treatments
if(ncol(emp.dif) == 2){
if(alternative == "two.sided"){
emp.dif <- emp.dif %>% dplyr::transmute(`emp.B-A` = abs(B - A))
} else {
emp.dif <- emp.dif %>% dplyr::transmute(`emp.B-A` = B - A)
}
}else if(ncol(emp.dif > 2)){
#Obtaining all pairwise model treatment combinations
comb <- emp.dif
comb <- combn(colnames(comb), 2)
#calculating pairwise differences among treatments & merging in single dataframe
emp.dif.list <- list()
if(alternative == "two.sided"){
for(i in 1:ncol(comb)){
name <- paste0("emp.",comb[,i][2], "-", comb[,i][1])
emp.dif.list[[i]] <- emp.dif %>%
dplyr::transmute(abs(emp.dif[grep(comb[,i][2], colnames(.))] - emp.dif[grep(comb[,i][1], colnames(.))]))
names(emp.dif.list[[i]]) <- name
}
}else{
for(i in 1:ncol(comb)){
name <- paste0("emp.",comb[,i][2], "-", comb[,i][1])
emp.dif.list[[i]] <- emp.dif %>%
dplyr::transmute(emp.dif[grep(comb[,i][2], colnames(.))] - emp.dif[grep(comb[,i][1], colnames(.))])
names(emp.dif.list[[i]]) <- name
}
}
emp.dif <- dplyr::bind_cols(emp.dif.list)
}
nulls.dif.avg <- nulls.dif %>% dplyr::select(-iter) %>% dplyr::summarise_all(mean, na.rm = T)
nulls.dif.sd <- nulls.dif %>% dplyr::select(-iter) %>% dplyr::summarise_all(sd, na.rm = T)
##################################################################
## 5. Estimate statistical differences among null model treatments
##################################################################
#Run a one-way repeated measures ANOVA on null model differences
#shaping data into the correct format for anova testing
if(ncol(nulls) > 3){
nulls.l<- tidyr::pivot_longer(nulls, cols= -iter, names_to = "predictor",
values_to = eval.stats)%>%
mutate(iter = as.factor(iter))
}
#using the rstatix package to implement repeated measures anova
anova.nulls <- rstatix::anova_test(data = nulls.l,
dv = eval.stats,
wid = iter,
within = predictor)
#post-hoc tests to examine pairwise differences among predictor sets
pairwise.mod <- as.formula(paste(eval.stats, "predictor", sep = "~"))
pairwise.nulls <- nulls.l %>%
rstatix::pairwise_t_test(pairwise.mod, paired = TRUE,
p.adjust.method = "bonferroni",
alternative = alternative)
####################################################################
## 6. Estimate statistical differences between real and null models
####################################################################
#One sample t-test between empirical and null differences for each treatment combination
#NOTE: NEED TO CHECK HOW TO INCORPORATE DIRECTIONALITY.
empNull.stats.list <- list()
for(i in 1:ncol(emp.dif)){
#create output dataframe
empNull.stats.list[[i]] <- as.data.frame(matrix(nrow = 1, ncol = 7))
names(empNull.stats.list[[i]]) <- c("treatment.dif","statistic","emp.dif.mean", "null.dif.mean", "null.dif.sd", "zscore", "pvalue")
#fill in slots
empNull.stats.list[[i]]$treatment.dif <- names(emp.dif[i])
empNull.stats.list[[i]]$statistic <- statistic
empNull.stats.list[[i]]$emp.dif.mean <- as.numeric(as.data.frame(emp.dif[i]))
empNull.stats.list[[i]]$null.dif.mean <- as.numeric(as.data.frame(nulls.dif.avg[i]))
empNull.stats.list[[i]]$null.dif.sd <- as.numeric(as.data.frame(nulls.dif.sd[i]))
empNull.stats.list[[i]]$zscore <- as.numeric(as.data.frame(emp.dif[i] - nulls.dif.avg[i]) / nulls.dif.sd[i])
# find statistics that need a positive pnorm, and those that need a negative pnorm
p.pos <- names(signs[sapply(signs, function(x) x == 1)])
p.neg <- names(signs[sapply(signs, function(x) x == -1)])
if(empNull.stats.list[[i]]$statistic %in% p.pos){
empNull.stats.list[[i]]$pvalue <- pnorm(empNull.stats.list[[i]]$zscore, lower.tail = FALSE)
}else if(empNull.stats.list[[i]]$statistic %in% p.neg){
empNull.stats.list[[i]]$pvalue <- pnorm(empNull.stats.list[[i]]$zscore)
}
}
#Consider removing bind_rows
empNull.stats <- dplyr::bind_rows(empNull.stats.list)
return(list(anova.nulls = anova.nulls, pair.nulls = pairwise.nulls,
emp.nulls = empNull.stats))
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.