blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1350c19783b91677f13dd909065e143ee505c453
|
5ac5920bc54c456669b9c1c1d21ce5d6221e27eb
|
/facebook/delphiFacebook/integration-tests/testthat/setup-run.R
|
33d016b1b473699ab40f1cc417bd7e845943cf55
|
[
"MIT"
] |
permissive
|
alexcoda/covidcast-indicators
|
50e646efba61fbfe14fd2e78c6cf4ffb1b9f1cf0
|
0c0ca18f38892c850565edf8bed9d2acaf234354
|
refs/heads/main
| 2023-08-13T04:26:36.413280
| 2021-09-16T18:16:08
| 2021-09-16T18:16:08
| 401,882,787
| 0
| 0
|
MIT
| 2021-09-01T00:41:47
| 2021-09-01T00:41:46
| null |
UTF-8
|
R
| false
| false
| 714
|
r
|
setup-run.R
|
library(tibble)
run_facebook(relativize_params(read_params(test_path("params-test.json"))))
run_facebook(relativize_params(read_params(test_path("params-full.json"))))
aggs <- tribble(
~name, ~metric, ~group_by, ~compute_fn, ~post_fn,
"freq_anxiety", "mh_anxious", c("gender"), compute_binary, I,
"pct_hh_fever", "hh_fever", c("gender"), compute_binary, I,
"pct_heartdisease", "comorbidheartdisease", c("gender"), compute_binary, I
)
params <- relativize_params(read_contingency_params(test_path("params-test.json")))
run_contingency_tables_many_periods(params, aggs)
params <- relativize_params(read_contingency_params(test_path("params-full.json")))
run_contingency_tables_many_periods(params, aggs)
|
9837470c2a47d786241aa84773a5bd932c00b684
|
bb9504bfabd84b5c2b662095b33188cf35264343
|
/man/predict.regression_model_fit.Rd
|
7edc93ac1fe5525d24f99d1f3d1a09e9a8e4f1ff
|
[] |
no_license
|
bbuchsbaum/rMVPA
|
5f34425859cc6f0a69f223992b43e12a2e9c7f11
|
bd178d4967a70c8766a606810f5de6f3e60b9c22
|
refs/heads/master
| 2023-05-25T16:08:02.323323
| 2023-05-01T01:43:15
| 2023-05-01T01:43:15
| 18,340,070
| 14
| 11
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,120
|
rd
|
predict.regression_model_fit.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/model_fit.R
\name{predict.regression_model_fit}
\alias{predict.regression_model_fit}
\title{Predict continuous values for a new dataset using a regression model}
\usage{
\method{predict}{regression_model_fit}(object, newdata, sub_indices = NULL, ...)
}
\arguments{
\item{object}{A fitted model object of class \code{regression_model_fit}.}
\item{newdata}{New data to predict on, either as a matrix or a \code{NeuroVec} or \code{NeuroSurfaceVector} object.}
\item{sub_indices}{A vector of indices used to subset rows of `newdata` (optional).}
\item{...}{Additional arguments to be passed to the underlying prediction function.}
}
\value{
A list containing predicted continuous values with class attributes "regression_prediction", "prediction", and "list".
}
\description{
This function predicts continuous values for new data using a fitted regression model.
}
\examples{
# Assuming `fitted_model` is a fitted model object of class `regression_model_fit`
new_data <- iris_dataset$test_data
predictions <- predict(fitted_model, new_data)
}
|
f7cc93cc7f8e437da2a87400636433d381d04718
|
82eb68f90a8a54a1a22a7443fbd1d565ee1c29de
|
/app_c4/server_0_9.R
|
62a71150c3bcb42521e1484696c44b3fbd2ae678
|
[] |
no_license
|
Ry2an/connect_4_chess
|
b68f794b7332f5792130f789bc34b414990c8b3f
|
b64b533ee087a15c7ed9bb94c1437ce2783d0cd6
|
refs/heads/master
| 2020-12-09T02:48:39.211789
| 2020-01-30T18:00:13
| 2020-01-30T18:00:13
| 233,167,934
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,996
|
r
|
server_0_9.R
|
####fake server####
library(ggplot2)
draw_map <- function(
current_map_temp = matrix(c(
-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,
-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,
-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,
-9,-9,-9,-5,-5,-5,-5,-5,-5,-5,-9,-9,-9,
-9,-9,-9,-5,-5,-5,-5,-5,-5,-5,-9,-9,-9,
-9,-9,-9,-5,-5,-5,-5,-5,-5,-5,-9,-9,-9,
-9,-9,-9,-5,-5,-5,-5,-5,-5,-5,-9,-9,-9,
-9,-9,-9,-5,-5,-5,-5,-5,-5,-5,-9,-9,-9,
-9,-9,-9,-5,-5,-5,-5,-5,-5,-5,-9,-9,-9,
-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,
-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,
-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9
), nrow = 12, ncol = 13, byrow = T)
){
list_temp <- data.frame("x_ax" = 1:42, "y_ax" = 1:42, "value_temp" = 1:42)
counter <- 1
for(i in 1:7){
for(j in 1:6){
list_temp$x_ax[counter] <- j
list_temp$y_ax[counter] <- i
list_temp$value_temp[counter] <- as.character(current_map_temp[j + 3, i + 3])
counter <- counter + 1
}
}
output_img <- ggplot(list_temp, aes(x = y_ax, y = x_ax, color = value_temp, alpha = value_temp)) + geom_point(shape = 19, size = 10) +
scale_color_manual(values = c("-1" = "#1E9F94","1" = "#E91773","-5" ="#0B0838")) +
scale_alpha_manual(values = c("-1" = 1,"1" = 1,"-5" = 0)) +
theme(legend.position = "none") +
theme(axis.title = element_blank())
return(output_img)
}
win_test <- function(vec_temp = c(0,0,0,0,0,0,0)){
counter <- 1
for(i in 2:length(vec_temp)){
if(vec_temp[i] == vec_temp[i - 1]){
counter <- counter + 1
if(counter >= 4){
break()
}
}else{
counter <- 1
}
}
if(counter >= 4){
return(1)
}else{
return(0)
}
}
find_array <- function(
x = 1, y = 1,
current_map_temp = matrix(c(
-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,
-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,
-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,
-9,-9,-9,-5,-5,-5,-5,-5,-5,-5,-9,-9,-9,
-9,-9,-9,-5,-5,-5,-5,-5,-5,-5,-9,-9,-9,
-9,-9,-9,-5,-5,-5,-5,-5,-5,-5,-9,-9,-9,
-9,-9,-9,-5,-5,-5,-5,-5,-5,-5,-9,-9,-9,
-9,-9,-9,-5,-5,-5,-5,-5,-5,-5,-9,-9,-9,
-9,-9,-9,-5,-5,-5,-5,-5,-5,-5,-9,-9,-9,
-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,
-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,
-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9
), nrow = 12, ncol = 13, byrow = T)
){
#vec 1: shuiping
#vec 2: shuzhi
#vec 3: zuoshang youxia
#vec 4: youshang zuo xia
vec_1 <- c(current_map_temp[x + 3, y],
current_map_temp[x + 3, y + 1],
current_map_temp[x + 3, y + 2],
current_map_temp[x + 3, y + 3],
current_map_temp[x + 3, y + 4],
current_map_temp[x + 3, y + 5],
current_map_temp[x + 3, y + 6])
vec_2 <- c(current_map_temp[x, y + 3],
current_map_temp[x + 1, y + 3],
current_map_temp[x + 2, y + 3],
current_map_temp[x + 3, y + 3],
current_map_temp[x + 4, y + 3],
current_map_temp[x + 5, y + 3],
current_map_temp[x + 6, y + 3])
vec_3 <- c(current_map_temp[x + 6, y],
current_map_temp[x + 5, y + 1],
current_map_temp[x + 4, y + 2],
current_map_temp[x + 3, y + 3],
current_map_temp[x + 2, y + 4],
current_map_temp[x + 1, y + 5],
current_map_temp[x, y + 6])
vec_4 <- c(current_map_temp[x, y],
current_map_temp[x + 1, y + 1],
current_map_temp[x + 2, y + 2],
current_map_temp[x + 3, y + 3],
current_map_temp[x + 4, y + 4],
current_map_temp[x + 5, y + 5],
current_map_temp[x + 6, y + 6])
win_1 <- win_test(vec_1)
if(win_test(vec_1) == 1 || win_test(vec_2) == 1 || win_test(vec_3) == 1 || win_test(vec_4) == 1){
return(1)
}else{
return(0)
}
}
add_dot <- function(col_num_temp = 1,
color_temp = -1,
current_map_temp = matrix(c(
-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,
-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,
-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,
-9,-9,-9,-5,-5,-5,-5,-5,-5,-5,-9,-9,-9,
-9,-9,-9,-5,-5,-5,-5,-5,-5,-5,-9,-9,-9,
-9,-9,-9,-5,-5,-5,-5,-5,-5,-5,-9,-9,-9,
-9,-9,-9,-5,-5,-5,-5,-5,-5,-5,-9,-9,-9,
-9,-9,-9,-5,-5,-5,-5,-5,-5,-5,-9,-9,-9,
-9,-9,-9,-5,-5,-5,-5,-5,-5,-5,-9,-9,-9,
-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,
-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,
-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9
), nrow = 12, ncol = 13, byrow = T)
){
add_success <- 0
win_temp <- 0
for(i in 1:6){
if(current_map_temp[i + 3, col_num_temp + 3] == -5){
current_map_temp[i + 3, col_num_temp + 3] <- color_temp
add_success <- 1
win_temp <- find_array(x = i, y = col_num_temp, current_map_temp = current_map_temp)
#next player
current_map_temp[12, 13] <- color_temp * (-1)
#drop success
current_map_temp[11, 13] <- 1
#win
current_map_temp[10, 13] <- win_temp
break()
}else{
current_map_temp[12, 13] <- color_temp
#drop success
current_map_temp[11, 13] <- 0
#win
current_map_temp[10, 13] <- win_temp
}
}
#return(list(current_map_temp, add_success, win_temp))
save_map(current_map_temp)
return(1)
}
restart <- function(){
current_map_temp <- matrix(c(
-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,
-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,
-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,
-9,-9,-9,-5,-5,-5,-5,-5,-5,-5,-9,-9,-9,
-9,-9,-9,-5,-5,-5,-5,-5,-5,-5,-9,-9,-9,
-9,-9,-9,-5,-5,-5,-5,-5,-5,-5,-9,-9,-9,
-9,-9,-9,-5,-5,-5,-5,-5,-5,-5,-9,-9,-9,
-9,-9,-9,-5,-5,-5,-5,-5,-5,-5,-9,-9,-9,
-9,-9,-9,-5,-5,-5,-5,-5,-5,-5,-9,-9,-9,
-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,0,
-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,1,
-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-1
), nrow = 12, ncol = 13, byrow = T)
write.csv(current_map_temp, file = "main_map.csv", row.names = F)
return(1)
}
load_map <- function(){
return(read.csv(file = "main_map.csv", header = T))
}
save_map <- function(
current_map_temp = matrix(c(
-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,
-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,
-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,
-9,-9,-9,-5,-5,-5,-5,-5,-5,-5,-9,-9,-9,
-9,-9,-9,-5,-5,-5,-5,-5,-5,-5,-9,-9,-9,
-9,-9,-9,-5,-5,-5,-5,-5,-5,-5,-9,-9,-9,
-9,-9,-9,-5,-5,-5,-5,-5,-5,-5,-9,-9,-9,
-9,-9,-9,-5,-5,-5,-5,-5,-5,-5,-9,-9,-9,
-9,-9,-9,-5,-5,-5,-5,-5,-5,-5,-9,-9,-9,
-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,
-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,
-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9,-9
), nrow = 12, ncol = 13, byrow = T)
){
write.csv(current_map_temp, file = "main_map.csv", row.names = F)
return(1)
}
|
2647e2e2dc3546e5e341a4b031fad824a98cfe1d
|
bd14617e30d84c330d25b5b797d76b45c701bb18
|
/R/cluspop.nb.R
|
ecca7f1a52c663e53225d1c5916f4b236fca8815
|
[] |
no_license
|
cran/prabclus
|
28747636b75a4917741451fa88dbc93372963249
|
ab34ba6e8f1c67fb3ad2b7fa27b783315db71a4c
|
refs/heads/master
| 2021-01-23T18:56:39.980448
| 2020-01-08T22:00:39
| 2020-01-08T22:00:39
| 17,698,701
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 11,116
|
r
|
cluspop.nb.R
|
"cluspop.nb" <-
function(neighbors,p.nb=0.5,n.species,clus.specs,reg.group,
grouppf=10, n.regions=length(neighbors),
vector.species=rep(1,n.species),
pdf.regions=rep(1/n.regions,n.regions),count=TRUE,
pdfnb=FALSE){
# print(vector.species)
out <- matrix(0,ncol=n.species,nrow=n.regions)
if (pdfnb)
{
for (i in 1:n.regions)
pdf.regions[i] <- pdf.regions[i]/max(1,length(neighbors[[i]]))
pdf.regions <- pdf.regions/sum(pdf.regions)
}
pdf.group <- pdf.complement <- pdf.regions
pdf.groupstart <- pdf.cstart <- rep(0,n.regions)
sp <- spc <- spr <- 0
prob.group <- sum(pdf.group[reg.group])
pdf.complement[reg.group] <- pdf.regions[reg.group]/grouppf
spc <- sum(pdf.complement[reg.group])
sp <- spc*grouppf
reg.c <- (1:n.regions)[-reg.group]
pdf.group[reg.c] <- pdf.regions[reg.c]/grouppf
pdf.cstart[reg.c] <- pdf.regions[reg.c]/(1-sp)
pdf.complement[reg.c] <- pdf.cstart[reg.c]*(1-spc)
spr <- sum(pdf.group[reg.c])
pdf.groupstart[reg.group] <- pdf.regions[reg.group]/sp
pdf.group[reg.group] <- pdf.groupstart[reg.group]*(1-spr)
cdf.local <- cdf.regions <- cdf.groupstart <- cdf.cstart <- c()
for (i in 1:n.regions){
cdf.regions[i] <- sum(pdf.regions[1:i])
cdf.groupstart[i] <- sum(pdf.groupstart[1:i])
cdf.cstart[i] <- sum(pdf.cstart[1:i])
}
# print(pdf.groupstart)
# print(cdf.groupstart)
# regular species
for (i in 1:(n.species-clus.specs))
{
if(count)
cat("Species ",i,"\n")
spec.regind <- spec.neighb <- rep(FALSE,n.regions)
nsize <- vector.species[1+floor(length(vector.species)*runif(1))]
# print(nsize)
r1 <- runif(1)
reg <- 1+sum(r1>cdf.regions)
# print(reg)
spec.regind[reg] <- TRUE
for (k in neighbors[[reg]])
spec.neighb[k] <- TRUE
out[reg,i] <- 1
if(nsize>1)
for (j in 2:nsize)
if ((sum(spec.neighb)==0) | (sum(pdf.regions[spec.neighb])<1e-8) |
(sum(spec.neighb | spec.regind)==n.regions))
# no further neighbors or only neighbors, i.e., next region is drawn from all
# remaining
{
nreg <- sum(!spec.regind)
pdf.local <- pdf.regions[!spec.regind]
pdf.local <- pdf.local/sum(pdf.local)
for (l in 1:nreg)
cdf.local[l] <- sum(pdf.local[1:l])
# cat(nreg, "\n")
r1 <- runif(1)
zz <- 1+sum(r1>cdf.local[1:nreg])
# cat(zz,"\n")
reg <- (1:n.regions)[!spec.regind][zz]
# cat("reg, all ",reg,"\n")
spec.regind[reg] <- TRUE
spec.neighb[reg] <- FALSE
for (k in neighbors[[reg]])
spec.neighb[k] <- !(spec.regind[k])
out[reg,i] <- 1
}
else
if (runif(1)<p.nb)
# next region is drawn from non-neighbors (jump)
{
regs <- !(spec.regind | spec.neighb)
nreg <- sum(regs)
pdf.local <- pdf.regions[regs]
pdf.local <- pdf.local/sum(pdf.local)
for (l in 1:nreg)
cdf.local[l] <- sum(pdf.local[1:l])
r1 <- runif(1)
zz <- 1+sum(r1>cdf.local[1:nreg])
# cat(nreg," ",zz,"\n")
reg <- (1:n.regions)[regs][zz]
# cat("reg, jump ",reg,"\n")
spec.regind[reg] <- TRUE
for (k in neighbors[[reg]])
spec.neighb[k] <- !(spec.regind[k])
out[reg,i] <- 1
# if (sum(out[,i])!=sum(spec.regind))
# cat("error: sum= ",sum(out[,i])," ind=",sum(spec.regind),"\n")
}
else
{
# next region is drawn from neighbors
nreg <- sum(spec.neighb)
pdf.local <- pdf.regions[spec.neighb]
# print(pdf.local)
pdf.local <- pdf.local/sum(pdf.local)
for (l in 1:nreg)
cdf.local[l] <- sum(pdf.local[1:l])
# print(cdf.local)
r1 <- runif(1)
zz <- 1+sum(r1>cdf.local[1:nreg])
# cat("nreg= ",nreg," zz =",zz,"\n")
reg <- (1:n.regions)[spec.neighb][zz]
# cat("reg, neighbor ",reg,"\n")
spec.regind[reg] <- TRUE
spec.neighb[reg] <- FALSE
for (k in neighbors[[reg]])
spec.neighb[k] <- !(spec.regind[k])
out[reg,i] <- 1
# if (sum(out[,i])!=sum(spec.regind))
# cat("error: sum= ",sum(out[,i])," ind=",sum(spec.regind),"\n")
}
# end if nsize>1 for j
# cat("out=",sum(out[,i])," ind=",sum(spec.regind)," nb=",sum(spec.neighb),
# " nni=",sum(!(spec.regind | spec.neighb)),"\n")
} # for i - regular species
# species from reg.group
for (i in 1:clus.specs)
{
ind <-i+n.species-clus.specs
if(count)
cat("Clustered species ",ind,"\n")
groupind <- runif(1)<prob.group
if (groupind){
spec.regind <- spec.neighb <- rep(FALSE,n.regions)
nsize <- vector.species[1+floor(length(vector.species)*runif(1))]
# print(nsize)
r1 <- runif(1)
reg <- 1+sum(r1>cdf.groupstart)
# print(reg)
spec.regind[reg] <- TRUE
for (k in neighbors[[reg]])
spec.neighb[k] <- TRUE
out[reg,ind] <- 1
if(nsize>1)
for (j in 2:nsize)
if ((sum(spec.neighb)==0) | (sum(pdf.group[spec.neighb])<1e-8) |
(sum(spec.neighb | spec.regind)==n.regions))
# no further neighbors or only neighbors, i.e., next region is drawn from all
# remaining
{
nreg <- sum(!spec.regind)
pdf.local <- pdf.group[!spec.regind]
pdf.local <- pdf.local/sum(pdf.local)
for (l in 1:nreg)
cdf.local[l] <- sum(pdf.local[1:l])
# cat(nreg, "\n")
r1 <- runif(1)
zz <- 1+sum(r1>cdf.local[1:nreg])
# cat(zz,"\n")
reg <- (1:n.regions)[!spec.regind][zz]
# cat("reg, all ",reg,"\n")
spec.regind[reg] <- TRUE
spec.neighb[reg] <- FALSE
for (k in neighbors[[reg]])
spec.neighb[k] <- !(spec.regind[k])
out[reg,ind] <- 1
}
else
if (runif(1)<p.nb)
# next region is drawn from non-neighbors (jump)
{
regs <- !(spec.regind | spec.neighb)
nreg <- sum(regs)
pdf.local <- pdf.group[regs]
pdf.local <- pdf.local/sum(pdf.local)
for (l in 1:nreg)
cdf.local[l] <- sum(pdf.local[1:l])
r1 <- runif(1)
zz <- 1+sum(r1>cdf.local[1:nreg])
# cat(nreg," ",zz,"\n")
reg <- (1:n.regions)[regs][zz]
# cat("reg, jump ",reg,"\n")
spec.regind[reg] <- TRUE
for (k in neighbors[[reg]])
spec.neighb[k] <- !(spec.regind[k])
out[reg,ind] <- 1
# if (sum(out[,i])!=sum(spec.regind))
# cat("error: sum= ",sum(out[,i])," ind=",sum(spec.regind),"\n")
}
else
{
# next region is drawn from neighbors
nreg <- sum(spec.neighb)
pdf.local <- pdf.group[spec.neighb]
# print(pdf.local)
pdf.local <- pdf.local/sum(pdf.local)
for (l in 1:nreg)
cdf.local[l] <- sum(pdf.local[1:l])
# print(cdf.local)
r1 <- runif(1)
zz <- 1+sum(r1>cdf.local[1:nreg])
# cat("nreg= ",nreg," zz =",zz,"\n")
reg <- (1:n.regions)[spec.neighb][zz]
# cat("reg, neighbor ",reg,"\n")
spec.regind[reg] <- TRUE
spec.neighb[reg] <- FALSE
for (k in neighbors[[reg]])
spec.neighb[k] <- !(spec.regind[k])
out[reg,ind] <- 1
# if (sum(out[,i])!=sum(spec.regind))
# cat("error: sum= ",sum(out[,i])," ind=",sum(spec.regind),"\n")
}
# end if nsize>1 for j
# cat("out=",sum(out[,i])," ind=",sum(spec.regind)," nb=",sum(spec.neighb),
# " nni=",sum(!(spec.regind | spec.neighb)),"\n")
} # if groupind
# species from complement
else{
spec.regind <- spec.neighb <- rep(FALSE,n.regions)
nsize <- vector.species[1+floor(length(vector.species)*runif(1))]
# print(nsize)
r1 <- runif(1)
reg <- 1+sum(r1>cdf.cstart)
# print(reg)
spec.regind[reg] <- TRUE
for (k in neighbors[[reg]])
spec.neighb[k] <- TRUE
out[reg,ind] <- 1
if(nsize>1)
for (j in 2:nsize)
if ((sum(spec.neighb)==0) | (sum(pdf.complement[spec.neighb])<1e-8) |
(sum(spec.neighb | spec.regind)==n.regions))
# no further neighbors or only neighbors, i.e., next region is drawn from all
# remaining
{
nreg <- sum(!spec.regind)
pdf.local <- pdf.complement[!spec.regind]
pdf.local <- pdf.local/sum(pdf.local)
for (l in 1:nreg)
cdf.local[l] <- sum(pdf.local[1:l])
# cat(nreg, "\n")
r1 <- runif(1)
zz <- 1+sum(r1>cdf.local[1:nreg])
# cat(zz,"\n")
reg <- (1:n.regions)[!spec.regind][zz]
# cat("reg, all ",reg,"\n")
spec.regind[reg] <- TRUE
spec.neighb[reg] <- FALSE
for (k in neighbors[[reg]])
spec.neighb[k] <- !(spec.regind[k])
out[reg,ind] <- 1
}
else
if (runif(1)<p.nb)
# next region is drawn from non-neighbors (jump)
{
regs <- !(spec.regind | spec.neighb)
nreg <- sum(regs)
pdf.local <- pdf.complement[regs]
pdf.local <- pdf.local/sum(pdf.local)
for (l in 1:nreg)
cdf.local[l] <- sum(pdf.local[1:l])
r1 <- runif(1)
zz <- 1+sum(r1>cdf.local[1:nreg])
# cat(nreg," ",zz,"\n")
reg <- (1:n.regions)[regs][zz]
# cat("reg, jump ",reg,"\n")
spec.regind[reg] <- TRUE
for (k in neighbors[[reg]])
spec.neighb[k] <- !(spec.regind[k])
out[reg,ind] <- 1
# if (sum(out[,i])!=sum(spec.regind))
# cat("error: sum= ",sum(out[,i])," ind=",sum(spec.regind),"\n")
}
else
{
# next region is drawn from neighbors
nreg <- sum(spec.neighb)
pdf.local <- pdf.complement[spec.neighb]
# print(pdf.local)
pdf.local <- pdf.local/sum(pdf.local)
for (l in 1:nreg)
cdf.local[l] <- sum(pdf.local[1:l])
# print(cdf.local)
r1 <- runif(1)
zz <- 1+sum(r1>cdf.local[1:nreg])
# cat("nreg= ",nreg," zz =",zz,"\n")
reg <- (1:n.regions)[spec.neighb][zz]
# cat("reg, neighbor ",reg,"\n")
spec.regind[reg] <- TRUE
spec.neighb[reg] <- FALSE
for (k in neighbors[[reg]])
spec.neighb[k] <- !(spec.regind[k])
out[reg,ind] <- 1
# if (sum(out[,i])!=sum(spec.regind))
# cat("error: sum= ",sum(out[,i])," ind=",sum(spec.regind),"\n")
}
# end if nsize>1 for j
# cat("out=",sum(out[,i])," ind=",sum(spec.regind)," nb=",sum(spec.neighb),
# " nni=",sum(!(spec.regind | spec.neighb)),"\n")
} # else (complement)
} # for i
out
}
|
4e8d78cc383fa023e359301b5053446f1ca7393c
|
e2e6fe9504c3e5fc6af558c45234bbb9eabfd9d1
|
/tests/testthat/test_getAnimalAlleles.R
|
2d7e196e0b21218e2585d6daaeb09092bc50221b
|
[
"MIT"
] |
permissive
|
rmsharp/parentfindr
|
2ebca196d8ff9627874738cb469d4f93807415e2
|
052c42a50f835fe1321dd93dc4cd21b844218e73
|
refs/heads/master
| 2023-01-13T23:04:28.613212
| 2020-11-23T02:44:23
| 2020-11-23T02:44:23
| 266,241,084
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,244
|
r
|
test_getAnimalAlleles.R
|
#' Copyright(c) 2020 R. Mark Sharp
#' This file is part of parentfindr
library(testthat)
context("getAnimalAlleles")
library(parentfindr)
library(stringi)
test_that("getAnimalAlleles correctly extracts data CSV file", {
alleleFile <- system.file("testdata", "snp-animal-alleles.txt",
package = "parentfindr")
animalAlleles <- getAnimalAlleles(alleleFile, dateType = "YYYYMMDD")
expect_equal(animalAlleles[[1]]$refId, "43336")
expect_equal(animalAlleles[[31]][["refId"]], "48799")
expect_equal(length(animalAlleles), 31)
expect_true(all(is.na(animalAlleles[[1]]$alleles[["AGTR11303"]])))
expect_equal(animalAlleles[[1]]$alleles[["AK53266"]], c("1", "1"))
expect_equal(animalAlleles[[3]]$alleles[["AKAP33344"]],
c("1", "1"))
expect_equal(animalAlleles[[31]]$alleles[["X98874013.14.D8YOWMI02JQVIE"]],
c("1", "1"))
expect_equal(animalAlleles[[1]]$birthDate, as.Date("2008-04-28"))
})
test_that("getAnimalAlleles correctly extracts data from Excel file", {
alleleFile <- system.file("testdata", "snp-animal-alleles.xlsx",
package = "parentfindr")
animalAlleles <- getAnimalAlleles(alleleFile, dateType = "YYYYMMDD")
expect_equal(animalAlleles[[1]]$refId, "43336")
expect_equal(animalAlleles[[31]][["refId"]], "48799")
expect_equal(length(animalAlleles), 31)
expect_true(all(is.na(animalAlleles[[1]]$alleles[["AGTR11303"]])))
expect_equal(animalAlleles[[1]]$alleles[["AK53266"]], c("1", "1"))
expect_equal(animalAlleles[[3]]$alleles[["AKAP33344"]],
c("1", "1"))
expect_equal(animalAlleles[[31]]$alleles[["X98874013.14.D8YOWMI02JQVIE"]],
c("1", "1"))
expect_equal(animalAlleles[[1]]$birthDate, as.Date("2008-04-28"))
})
test_that(
paste0("getAnimalAlleles provides informative error messages and ",
"stop when a path to a non-existent file is provided."), {
alleleFile <- system.file("testdata", "snp-animal-alleles.csv",
package = "parentfindr")
expect_error(getAnimalAlleles(alleleFile, dateType = "YYYYMMDD"),
paste0("The animal allele file cannot be found. ",
"The file name provided is"))
})
|
981fa7750e9872926ee889fd0c942f91c463f1e9
|
05de83161571e4d5c080f0342d07f213984804e3
|
/analyses/empirical_considerations_polarized/src/stochastic_map_sim.R
|
add49e8e5156d2541a195b79e681e2b27a703cd1
|
[] |
no_license
|
mikeryanmay/marattiales_supplemental
|
22b470aa58f687aecd78861fe48470826245f2fb
|
8c38c4b3075c9f864de37381576be51727d3e7d9
|
refs/heads/master
| 2023-03-19T10:47:56.300246
| 2020-09-25T15:52:05
| 2020-09-25T15:52:05
| 292,940,697
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,955
|
r
|
stochastic_map_sim.R
|
# get the arguments
args = commandArgs(trailingOnly = TRUE)
# args = c("--data", "data",
# "--samples", "output/tree_model_variable_rate_fbd_mixture_diversification_ingroup_sub_model_GTR_I_G_mol_clock_UCLN_morph_mat_F81_MIX_G_morph_rel_unlinked_morph_clock_linked/",
# "--output", "output_stoch_map/tree_model_variable_rate_fbd_mixture_diversification_ingroup_sub_model_GTR_I_G_mol_clock_UCLN_morph_mat_F81_MIX_G_morph_rel_unlinked_morph_clock_linked/",
# "--nsim", 1000)
# get the data directory
if ( "--data" %in% args ) {
data_dir = args[which(args == "--data") + 1]
} else {
stop("Must provide an --data argument!")
}
# get the sample directory
if ( "--samples" %in% args ) {
sample_dir = args[which(args == "--samples") + 1]
} else {
stop("Must provide an --sample argument!")
}
# get the output directory
if ( "--output" %in% args ) {
output_dir = args[which(args == "--output") + 1]
} else {
stop("Must provide an --output argument!")
}
# the number of simulations
nsim = 1000
if ( "--nsim" %in% args ) {
nsim = as.numeric(args[which(args == "--nsim") + 1])
}
ncores = 1
if ( "--ncores" %in% args ) {
ncores = as.numeric(args[which(args == "--ncores") + 1])
}
# get the overwrite
overwrite = FALSE
if ( "--overwrite" %in% args ) {
if ( tolower(args[which(args == "--overwrite") + 1]) == "true" ) {
overwrite = TRUE
} else if ( tolower(args[which(args == "--overwrite") + 1]) == "false" ) {
overwrite = FALSE
} else {
stop("Invalided --overwrite value!")
}
}
cat("Stochastic mapping for:\n ", sample_dir,"\n", sep="")
# check for tree files
tree_fn = paste0(sample_dir, "/morph_phylogram_combined_MCC.tre")
tt_fn = paste0(sample_dir, "/tree_combined_MCC.tre")
if ( file.exists(tree_fn) == FALSE | file.exists(tt_fn) == FALSE ) {
cat("Tree files do not exist.")
q()
}
# install packages
cat("Checking for packages.\n")
if ( "ape" %in% rownames(installed.packages()) == FALSE ) {
install.packages("ape")
}
library(ape)
if ( "phytools" %in% rownames(installed.packages()) == FALSE ) {
install.packages("phytools")
}
library(phytools)
if ( "phangorn" %in% rownames(installed.packages()) == FALSE ) {
install.packages("phangorn")
}
library(phangorn)
if ( "rncl" %in% rownames(installed.packages()) == FALSE ) {
install.packages("rncl")
}
library(rncl)
if ( "stringr" %in% rownames(installed.packages()) == FALSE ) {
install.packages("stringr")
}
if ( "parallel" %in% rownames(installed.packages()) == FALSE ) {
install.packages("parallel")
}
library(parallel)
if ( "plotrix" %in% rownames(installed.packages()) == FALSE ) {
install.packages("plotrix")
}
library(plotrix)
if ( "RColorBrewer" %in% rownames(installed.packages()) == FALSE ) {
install.packages("RColorBrewer")
}
library(RColorBrewer)
cbPalette <- c("#E69F00", "#56B4E9", "#009E73", "#F0E442", "#999999", "#D55E00", "#0072B2", "#CC79A7")
# source the appropriate scripts
source("src/pps_functions.R")
source("src/plot_simmap.R")
rename = c(
"Marattia_asiatica" = "Marattiopsis_asiatica",
"Marattia_anglica" = "Marattiopsis_anglica",
"Marattia_aganzhenensis" = "Marattiopsis_aganzhenensis",
"Scolecopteris_alta_A" = "Scolecopteris_alta",
"Scolecopteris_antarctica_L" = "Scolecopteris_antarctica",
"Scolecopteris_calicifolia_L" = "Scolecopteris_calicifolia",
"Scolecopteris_charma_O" = "Scolecopteris_charma",
"Scolecopteris_fragilis_L" = "Scolecopteris_fragilis",
"Scolecopteris_incisifolia_L" = "Scolecopteris_incisifolia",
"Scolecopteris_iowensis_O" = "Scolecopteris_iowensis",
"Scolecopteris_latifolia_L" = "Scolecopteris_latifolia",
"Scolecopteris_majopsis_O" = "Scolecopteris_majopsis",
"Scolecopteris_mamayi_L" = "Scolecopteris_mamayi",
"Scolecopteris_minor_M" = "Scolecopteris_minor",
"Scolecopteris_monothrix_L" = "Scolecopteris_monothrix",
"Scolecopteris_nigra_A" = "Scolecopteris_nigra",
"Scolecopteris_oliveri_O" = "Scolecopteris_oliveri",
"Scolecopteris_parkerensis_L" = "Scolecopteris_parkerensis",
"Scolecopteris_saharaensis_M" = "Scolecopteris_saharaensis",
"Scolecopteris_vallumii_L" = "Scolecopteris_vallumii"
)
# read tree and samples
tree = make_tree_bifurcating(read.nexus(tree_fn))
time_tree = make_tree_bifurcating(read.nexus(tt_fn))
# drop sampled ancestors
time_tree = drop.tip(time_tree, tree$tip.label[tree$edge[tree$edge.length == 0,2]])
tree = drop.tip(tree, tree$tip.label[tree$edge[tree$edge.length == 0,2]])
# rename the time tree
for(i in 1:length(rename)) {
this_rename = rename[i]
time_tree$tip.label[time_tree$tip.label == names(this_rename)] = this_rename
}
# read observed data
obs_data = readDataAsProbs(paste0(data_dir, "/morpho.nex"))
# read the character information
char_data = read.csv(paste0(data_dir, "/char_table.tsv"), check.names=FALSE, sep="\t", stringsAsFactors=FALSE)
# read the parameters
param_files = paste0(paste0(sample_dir, "/params_combined.log"))
samples = do.call(rbind, lapply(param_files, read.table, header=TRUE, sep="\t", stringsAsFactors=TRUE, check.names=FALSE))
samples_mean = colMeans(samples)
# compute the number of partitions
num_partitions = length(obs_data)
num_states = as.numeric(names(obs_data))
# create the output directory
dir.create(output_dir, recursive=TRUE, showWarnings=FALSE)
mclapply(1:num_partitions, function(i){
# simulate maps
sims = simulate_stochastic_map(obs_data[[i]], tree, samples_mean, part=i, num_states=num_states[i], nsims=nsim)
# get char info
this_char_data = char_data[char_data$num_states == num_states[i],]
# make colors
labels = as.character(1:num_states[i]-1)
# colors = brewer.pal(9,"Set1")[1:num_states[i]]
colors = cbPalette[1:num_states[i]]
for(j in 1:nrow(this_char_data)) {
# get this character
this_char_name = this_char_data$names[j]
this_char_title = gsub("_"," ", this_char_name)
this_char_id = this_char_data$index[j]
these_sims = sims[[j]]
state_labels = strsplit(this_char_data$states[j],",")[[1]]
# plot this character
this_fig_name = paste0(output_dir,"/stoch_map_", this_char_id,"_",this_char_name,".pdf")
pdf(this_fig_name)
par(mar=c(2,0,1,0), lend=2)
plot_simmap(time_tree, tree, obs_data[[i]][,j,], these_sims, labels, colors=colors, nt=2001, show.tip.label=TRUE, lwd=2, edge.width=3, lend=2, pie_size=3.5, label.offset=10, label.cex=0.5)
legend("bottomleft", legend=gsub("_"," ",state_labels), fill=colors, bty="n")
mtext(this_char_title)
axisPhylo()
dev.off()
}
}, mc.cores=ncores, mc.preschedule=FALSE)
# for(i in 1:num_partitions) {
#
# # simulate maps
# sims = simulate_stochastic_map(obs_data[[i]], tree, samples_mean, part=i, num_states=num_states[i], nsims=nsim)
#
# # get char info
# this_char_data = char_data[char_data$num_states == num_states[i],]
#
# # make colors
# labels = as.character(1:num_states[i]-1)
# colors = brewer.pal(9,"Set1")[1:num_states[i]]
#
# for(j in 1:nrow(this_char_data)) {
#
# # get this character
# this_char_name = this_char_data$names[j]
# this_char_title = gsub("_"," ", this_char_name)
# this_char_id = this_char_data$index[j]
# these_sims = sims[[j]]
# state_labels = strsplit(this_char_data$states[j],",")[[1]]
#
# # plot this character
# this_fig_name = paste0(output_dir,"/stoch_map_", this_char_id,"_",this_char_name,".pdf")
#
# pdf(this_fig_name)
# par(mar=c(2,0,1,0), lend=2)
# plot_simmap(time_tree, tree, obs_data[[i]][,j,], these_sims, labels, colors=colors, nt=2001, show.tip.label=TRUE, lwd=2, edge.width=3, lend=2, pie_size=3.5, label.offset=10, label.cex=0.5)
# legend("bottomleft", legend=gsub("_"," ",state_labels), fill=colors, bty="n")
# mtext(this_char_title)
# axisPhylo()
# dev.off()
#
# }
#
# }
|
53ed20887b06bdc734e67a0fd2df0aa5a3e364e9
|
8617936d92c16346a0eb263aa8f249928a9492c0
|
/server.R
|
b86092d87e4e333fa838ce81d6b43020f4a387ef
|
[] |
no_license
|
qntkhvn/epl_goal_time
|
25acf12f2dafde60613bb3ba425c559f243242d0
|
e03ca60146a89a1c85928d65aab6d4801175b547
|
refs/heads/main
| 2023-07-17T16:29:40.672892
| 2021-08-17T00:47:04
| 2021-08-17T00:47:04
| 396,600,653
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,601
|
r
|
server.R
|
library(tidyverse)
data <- read_csv("goal_min.csv") %>%
select(-goal_score) %>%
mutate(goal_club = ifelse(goal_club == "home", home_club, away_club),
minute = factor(minute, levels = c(1:45, paste("45", 1:7, sep = "+"),
46:90, paste("90", 1:12, sep = "+"))))
# group_by(season, matchweek, home_club, away_club) %>%
# arrange(minute, .by_group = TRUE) %>%
# mutate(is_home = cumsum(as.numeric(home_club == goal_club)),
# is_away = cumsum(as.numeric(away_club == goal_club))) %>%
# filter(str_detect(minute, "90")) %>%
# add_count(season, matchweek, home_club, away_club)
function(input, output) {
datasetInput <- reactive(data)
output$table <- DT::renderDataTable(DT::datatable({
if (input$season != "All") {
data <- data[data$season == input$season, ]
}
if (input$matchweek != "All") {
data <- data[data$matchweek == input$matchweek, ]
}
if (input$goal_club != "All") {
data <- data[data$goal_club == input$goal_club, ]
}
data
},
rownames = FALSE,
style = "bootstrap",
colnames = c("Season", "Matchweek", "Home Club", "Away Club",
"Final Score", "Goal Club", "Goal Scorer", "Minute"),
options = list(pageLength = 25)))
output$downloadData <- downloadHandler(
filename = function() {
paste(input$data, "goals.csv", sep = "")
},
content = function(file) {
write_csv(datasetInput(), file)
}
)
}
|
8a026a72568c470bf1df1a965f3c501e8713ea8d
|
75a5de8a713432d4ea6e2c1214b7c3b04dfbbd66
|
/man/GenerateSelfCovarName.Rd
|
b4937cf84f439acae06a9aab277029eb5b191893
|
[] |
no_license
|
andrewhaoyu/bc3
|
25038d6942e2165ac1de154cba07fb7798ac2fd2
|
262e78031350c78eeccde2f7beb352719daf8d0f
|
refs/heads/master
| 2021-06-25T18:35:30.888944
| 2020-11-06T01:41:49
| 2020-11-06T01:41:49
| 155,858,459
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 336
|
rd
|
GenerateSelfCovarName.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/GenerateSelfCovarName.R
\name{GenerateSelfCovarName}
\alias{GenerateSelfCovarName}
\title{Title}
\usage{
GenerateSelfCovarName(x.self.design, baselineonly, additive,
pairwise.interaction, saturated)
}
\arguments{
\item{saturated}{}
}
\description{
Title
}
|
9576a3484cbaf629aad38d0dd5776ce5f7a1eb41
|
e518a260b8b5d38c4a3f9237f3ef3ff8a3c635b4
|
/03_summarize-AIS-data-cargo-transits.R
|
d78cd3f92bd731980b8cf98373fcbf00b866e3f5
|
[] |
no_license
|
fjoyce/cargo_AIS_tracks
|
a546ad28b96e891adce8b6608bcf1a82903d74d5
|
1a2819af1b5d3c19729ef64ac0aec0f2579deb8e
|
refs/heads/master
| 2020-03-19T23:37:17.116144
| 2018-06-09T04:09:21
| 2018-06-09T04:09:21
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,136
|
r
|
03_summarize-AIS-data-cargo-transits.R
|
#03_summarize-AIS-data-cargo-transits
#This R script aims to organzie and summarize AIS data cargo transits in the SB channel, including identifying cargo transits, summarizing patterns
#and visualizing such data
#load libraries
library(pacman) #I heard about this package at an R meeting - essentially if you load this package and then do p_load for the rest of the libraries then if it's not installed it will automatically install it for you, otherwise it just loads it
p_load(dplyr)
p_load(ggplot2)
p_load(sp)
p_load(rgdal)
p_load(maps)
p_load(maptools)
p_load(rgeos)
#load function script
source("04_Functions-AIS-data.R")
#load cleaned Class A AIS data
ships_channel_2 <- readRDS("01_SBARC-AIS_201801-03_ClassA_lanes.Rds")
#filter so only look at defined cargo ships and tankers for now (Ship type 70-89)------------------
cargo_tankers_channel <- ships_channel_2 %>%
filter(ship_type >= 70 & ship_type < 90)
# Get mmsis that have travelled through the northern and southernd ends of the shipping lanes
# ships in bbox of northern beginning part of shipping lane
mmsi_lanes_north <- cargo_tankers_channel %>%
filter((lat <= 34.47 & lat >= 34.36 & lon >= -120.9 & lon <= 120.8)) %>%
group_by(mmsi, name)%>%
summarize(AIS_points_n = n())
# ships in bbox of ships_channel_2 beginning part of shipping lane
mmsi_lanes_south <- cargo_tankers_channel %>%
filter((lat <= 33.68 & lat >= 33.576 & lon >= -118.326 & lon <= 118.265)) %>%
group_by(mmsi, name)%>%
summarize(AIS_points_s = n())
#create df of just mmsis that were in the bbox of the beginning/end of cargo lanes
join_lanes <- mmsi_lanes_south %>%
full_join(mmsi_lanes_north, by="mmsi")%>% #use a full join to get all of the ships that went through both bbox
na.omit()%>%
select(mmsi)
#get just AIS points for mmsis that were in both bboxes of the cargo lane, and then create transit numbers for each transit
cargo_tankers_lane_transits <- cargo_tankers_channel %>%
right_join(join_lanes, by = "mmsi") %>%
group_by(mmsi) %>%
arrange(datetime_PST)%>%
mutate(mmsi_seq = row_number(), time_diff_hrs = (c(0, diff(datetime)) / #have to start the first point for each ship with 0 (can't use NA otherwise creating new transits doesn't work)
(3600))) %>%
mutate(trip = cumsum(time_diff_hrs > 12) + 1L) %>% #this creates a new trip/transit for all ships, it starts once the time time difference between two points is > 12 hours
mutate(mmsi_trip = paste0(mmsi, "-", trip)) %>% #creates a unique transit identifier based on the mmsi and trip number, essentially MMSI-TRIP
mutate(heading_diff=(c(0,diff(heading))))
#create a dataframe with mmsi trips we don't want (starting from scratch you'd have to create the summary table first, plot the lines and figure out which ones you don't want and then come back here and remove them) - ideally I will figure out a way so that we don't have to do this based but I'm obviously not there yet
mmsi_trip_remove <- c("211327410-1", "211327410-3", "356872000-5", "355717000-1", "636014557-6", "352776000-2")
mmsi_trip_remove_df <- data.frame(mmsi_trip_remove, stringsAsFactors=FALSE)
#dataframe of shipping companies
shipping_companies <- read.csv("cargo_vessels_shipping_company.csv")
shipping_companies_anon <-readRDS("shipping_companies_anonymous.Rds")
shipping_companies_edit <- shipping_companies %>%
left_join(shipping_companies_anon, by="company")%>%
mutate(mmsi=as.factor(mmsi))%>%
select(mmsi, company, company_x)
#create summary table for each trip
cargo_tankers_lane_transits_summary <- cargo_tankers_lane_transits %>%
ungroup()%>%
filter(speed < 30)%>%
anti_join(mmsi_trip_remove_df, by=c("mmsi_trip"="mmsi_trip_remove"))%>%
left_join(shipping_companies_edit, by="mmsi")%>%
#filter((heading < 140 & heading > 75) | (heading > 250 & heading < 320))%>%
group_by(mmsi, name, ship_type, mmsi_trip, company, company_x) %>%
#get max/mins for various columns
summarize(
min_time = min(datetime_PST),
max_time = max(datetime_PST),
median_time = median(datetime_PST),
avg_speed = mean(speed),
max_speed = max(speed),
min_speed = min(speed),
AIS_points = n(),
AIS_points_lane = sum(!is.na(lane)),
max_lon = max(lon),
min_lon = min(lon),
max_lat = max(lat),
min_lat = min(lat),
max_heading =max(heading),
min_heading= min(heading),
max_heading_diff=max(heading_diff),
heading_diff_50=length(mmsi_seq[heading_diff>50]),
heading_diff_100=length(mmsi_seq[heading_diff>100])) %>%
#calculate the length of the transit (time_diff) as well as the differences between the max and min lat/lon (respectively).
mutate(
time_diff = as.numeric(difftime(max_time, min_time, units="hours")),
lon_diff = max_lon - min_lon,
lat_diff = max_lat - min_lat,
prop_AIS_in_lane = AIS_points_lane/AIS_points
) %>%
#essentially we are making sure that the transits are the full length (2.4 decimal degrees in lon and 0.7 degrees in lat) of the the shipping lane (although this doesn't always seem to be the case...)
filter(lon_diff > 2.4 & lat_diff > .7 & prop_AIS_in_lane > 0.95)
saveRDS(cargo_tankers_lane_transits_summary, file="cargo_tankers_lane_transits_summary.RDS")
#get these mmsis from cargo tanker transits, and filter AIS point to only these tranists (from the summary table)
cargo_tanker_refined_mmsi <- unique(cargo_tankers_lane_transits_summary$mmsi_trip)
cargo_tanker_lane_transits_refined <- subset(cargo_tankers_lane_transits, mmsi_trip %in% cargo_tanker_refined_mmsi)
cargo_tanker_lane_transits_refined_30 <- subset(cargo_tanker_lane_transits_refined, speed <= 30)
saveRDS(cargo_tanker_lane_transits_refined_30, file="cargo_tanker_lane_transits_refined_30.RDS")
#create polylines based on individual transits
#make sure no NAs in dataframe
colSums(is.na(cargo_tanker_lane_transits_refined_30))
#use function (from Function script) to create polylines of each transit
test <- transit_map(data = cargo_tanker_lane_transits_refined_30)
#plot the polylines
plot(test)
cargo_tanker_transit_lines <- sp::merge(test, cargo_tankers_lane_transits_summary, by.x = "mmsi_trip", by.y="mmsi_trip")
writeOGR(cargo_tanker_transit_lines, dsn="." ,layer="cargo_tanker_transit_lines_test2",driver="ESRI Shapefile")
#March Summary Transit Table ---------------------
#From 03-summary AIS data
Mar_cargo_transits <- cargo_tankers_lane_transits_summary %>%
filter(max_time >="2018-03-01 -00:00:00 PST" & prop_AIS_in_lane > .9)
#explore cargo transits errant points----
#this looks at individual trips to see where the points are in that trip - I've been saving it and then exploring the file in QGIS to get a better sense of what's going on with some of the weird errant points in a vessel's trip
Resolute_bay_1 <- cargo_tanker_lane_transits_refined %>%
filter(mmsi_trip=="232005179-1")
coordinates(Resolute_bay_1) <- ~lon + lat
WG84_crs <- CRS("+proj=longlat +datum=WGS84 +no_defs +ellps=WGS84 +towgs84=0,0,0")
proj4string(Resolute_bay_1) <- WG84_crs
writeOGR(Resolute_bay_1, dsn="." ,layer="Resolute_bay_1",driver="ESRI Shapefile")
|
bafeae830c5ec0d471839b855fadd80ecdc098a8
|
fd7596e9133063578fd1f0b71d3fdaa4e1c4ee63
|
/File1.R
|
4536a923999b2c65d243d68e5f72a3e182c099b9
|
[] |
no_license
|
ishaparasramka/Analytics
|
3536f43b1b478c7bb430c11fcce6c54918ecf9d1
|
cac195b330e662938a5b4edac2cb2ed965c2f068
|
refs/heads/master
| 2020-03-30T06:38:36.243834
| 2018-10-02T17:31:18
| 2018-10-02T17:31:18
| 150,877,076
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 108
|
r
|
File1.R
|
women
git config --global user.email "um18089@stu.ximb.ac.in"
git config --global user.name "ishaparasramka"
|
8da5207cd4fd9cf1b523f5b7358b668a72407fe5
|
58b943c940514e5c08106b97da156404ba6345cf
|
/R/summary.treeshape.R
|
c393191a71ed4b94c3b75538141096340cbe8d0b
|
[] |
no_license
|
bcm-uga/apTreeshape
|
1dd209ae8e4a0a30d22b906a99f98f459ef992f7
|
369236069399b25f462bc109db305cbdb4a6113c
|
refs/heads/master
| 2020-04-09T18:54:00.858207
| 2019-09-25T12:08:34
| 2019-09-25T12:08:34
| 124,239,795
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 557
|
r
|
summary.treeshape.R
|
"summary.treeshape" <-
function(object, ...){
tree=object
n=nrow(tree$merge)+1
cat("\n")
cat("Phylogenetic tree shape: ")
cat("object ",deparse(substitute(object))," of class 'treeshape'\n")
cat("\n")
cat("Number of tips:", nrow(tree$merge)+1, "\n\n")
cat("Colless' shape statistic:", colless(tree), "\n")
cat("Expected value (Yule model): ",n*log(n)+(0.57721566-1-log(2))*n," ")
cat("Standard Deviation: ",sqrt(3-pi^2/6-log(2))*n,"\n")
cat("Expected value (PDA model): ", sqrt(pi)*n^(3/2)," ")
cat("Standard Deviation: ",sqrt(10/3-pi)*n^(3/2),"\n")
}
|
cad5b7f0f3508f5f62513951ab801a6a3c8ef79f
|
2573b2b226e922302accf53ae5b7f055c2951d6e
|
/R/download_to_file.R
|
1eedd1f8669afd6e0aa7fc393b15bbdefffcec37
|
[] |
no_license
|
hieuqtran/ALA4R
|
442c32e9b5d2c750b748856d6dc7aaa4723e2529
|
ec240a434d8ba86c0c5c0a14961629ed313d705e
|
refs/heads/master
| 2020-08-24T01:51:35.699192
| 2019-08-01T04:48:08
| 2019-08-01T04:48:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,105
|
r
|
download_to_file.R
|
# Internal function used to download results to a file
download_to_file <- function(url, outfile, binary_file=FALSE, caching=ala_config()$caching, verbose=ala_config()$verbose, on_redirect=NULL, on_client_error=NULL, on_server_error=NULL, ...) {
assert_that(is.notempty.string(url))
## download from a URL using RCurl to a file
## we do this directly using RCurl to file, rather than reading into R memory and then dumping to file
if (missing(outfile)) {
outfile <- ala_cache_filename(url)
} else {
assert_that(is.string(outfile), is.dir(dirname(outfile))) ## check that outfile is a string and that it points to a valid directory
}
assert_that(is.flag(verbose))
## first check for zero-size cached files
if (file.exists(outfile) && !(file.info(outfile)$size>0)) {
## file exists but is zero sized
unlink(outfile)
}
if (nchar(url)>getOption("ALA4R_server_config")$server_max_url_length) warning("URL length may be longer than is allowed by the server")
## are we using cached results?
if ((caching %in% c("off", "refresh")) || (! file.exists(outfile))) {
if (verbose && (caching != "off")) message(sprintf("Caching %s to file %s", url, outfile))
## either we are not using caching, or we want to refresh the cache, or the file doesn't exist in the cache
if (verbose) {
get <- GET(url, write_disk(outfile, overwrite=TRUE), user_agent(ala_config()$user_agent), verbose(data_out=FALSE, data_in=FALSE, info=FALSE, ssl=FALSE))
} else {
get <- GET(url, write_disk(outfile, overwrite=TRUE), user_agent(ala_config()$user_agent)) }
status_code <- status_code(get)
## check http status here
## if unsuccessful, delete the file from the cache first, after checking if there's any useful info in the file body
diag_message <- ""
if ((substr(status_code, 1, 1)=="5") || (substr(status_code, 1, 1)=="4")) {
headers <- headers(get)
if (exists("content-length",where=headers) && (as.numeric(headers["content-length"][1])<10000)) {
## if the file body is not too big, check to see if there's any useful diagnostic info in it
diag_message <- get_diag_message(outfile)
}
unlink(outfile)
}
## check status code of response. Note that we execute the on_redirect etc functions, but we don't capture the output. might wish to implement this differently?
check_status_code(status_code, on_redirect=on_redirect, on_client_error=on_client_error, on_server_error=on_server_error, extra_info=diag_message)
} else {
if (verbose) message(sprintf("Using cached file %s for %s", outfile, url))
}
outfile
}
get_diag_message <- function(jsonfile) {
## attempt to extract message field from JSON-encoded file
diag_message <- ""
try({ suppressWarnings(thing <- readLines(jsonfile))
diag_message <- jsonlite::fromJSON(thing)$message }, silent=TRUE)
if (is.null(diag_message)) diag_message <- ""
diag_message
}
|
839bb0aa962826c2330adbc341d35f226b0a1566
|
fdb4e5c640b7951153f5bf67f21b136db07f8925
|
/R/endpoint.R
|
4d30c5026cdc931c0adc08bf8116cf5046ccd43a
|
[] |
no_license
|
cran/AzureQstor
|
5db4ef900aedb199afa81bbf926345879773ea5f
|
1cad45eb870b619a98a20680f3fa682039ef14f6
|
refs/heads/master
| 2022-12-28T15:20:13.428445
| 2020-10-15T22:00:08
| 2020-10-15T22:00:08
| 269,042,909
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,930
|
r
|
endpoint.R
|
#' Create a queue endpoint object
#'
#' @param endpoint The URL (hostname) for the endpoint, of the form `http[s]://{account-name}.queue.{core-host-name}`. On the public Azure cloud, endpoints will be of the form `https://{account-name}.queue.core.windows.net`.
#' @param key The access key for the storage account.
#' @param token An Azure Active Directory (AAD) authentication token. This can be either a string, or an object of class AzureToken created by [AzureRMR::get_azure_token]. The latter is the recommended way of doing it, as it allows for automatic refreshing of expired tokens.
#' @param sas A shared access signature (SAS) for the account.
#' @param api_version The storage API version to use when interacting with the host. Defaults to `"2019-07-07"`.
#'
#' @details
#' This is the queue storage counterpart to the endpoint functions defined in the AzureStor package.
#' @return
#' An object of class `queue_endpoint`, inheriting from `storage_endpoint`.
#' @seealso
#' [`AzureStor::storage_endpoint`], [`AzureStor::blob_endpoint`], [`storage_queue`]
#' @examples
#' \dontrun{
#'
#' # obtaining an endpoint from the storage account resource object
#' AzureRMR::get_azure_login()$
#' get_subscription("sub_id")$
#' get_resource_group("rgname")$
#' get_storage_account("mystorage")$
#' get_queue_endpoint()
#'
#' # creating an endpoint standalone
#' queue_endpoint("https://mystorage.queue.core.windows.net/", key="access_key")
#'
#' }
#' @export
queue_endpoint <- function(endpoint, key=NULL, token=NULL, sas=NULL,
api_version=getOption("azure_storage_api_version"))
{
if(!is_endpoint_url(endpoint, "queue"))
warning("Not a recognised queue endpoint", call.=FALSE)
obj <- list(url=endpoint, key=key, token=token, sas=sas, api_version=api_version)
class(obj) <- c("queue_endpoint", "storage_endpoint")
obj
}
|
64cf9f4039ea0f7f121ffde225caeb5d866cee13
|
cda88a3dad4eda02f75f2e63c3f1cb659fa4d942
|
/t.r
|
c80e900d826c5c0c13d9dde249ad6807a9c4b07c
|
[] |
no_license
|
cassimahmedattia/hello-r
|
bb3a32f6b809eba9f3b6cb926fdd49df6246135f
|
6b7fc259f23fa0c6059af80d870f59dc03ba97a5
|
refs/heads/master
| 2023-06-16T16:02:37.135063
| 2021-07-15T14:02:15
| 2021-07-15T14:02:15
| 385,812,269
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 27
|
r
|
t.r
|
# Testing
print("Testing")
|
d5b03353f327ccbef65add9c34c321ce0726e139
|
3835a1026861b876ef5a82de6e18ed54f65391bb
|
/code/test runs/analysis2.R
|
1af324c55d7bc582fc010736cfe670579654e618
|
[] |
no_license
|
rhnfzl/optimize-targeted-ads
|
25ff583f73dd056b8b10c570a7f3067ed1ee8e50
|
50fa15a12ae93ebfef54d2ca6db6f9fdeda1cc99
|
refs/heads/main
| 2023-07-22T08:44:57.866039
| 2021-09-05T16:13:17
| 2021-09-05T16:13:17
| 403,282,923
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,233
|
r
|
analysis2.R
|
require(ggplot2)
summary(my_lm_modelm2)
summary(my_lm_modelm2)$coefficient
coef(my_lm_modelm2)[1]
coef(my_lm_modelm2)[3]
coef(my_lm_modelm2)[2]
install.packages("devtools")
devtools::install_github("cardiomoon/moonBook")
devtools::install_github("cardiomoon/moonBook2")
require(moonBook)
require(moonBook2)
require(ggplot2)
ggAncova(my_lm_modelm2,interactive=TRUE)
equation=function(x){coef(my_lm_modelm2)[2]*seriesH+
coef(my_lm_modelm2)[3]*moviesH+
coef(my_lm_modelm2)[4]*genereA+
coef(my_lm_modelm2)[5]*genereC+
coef(my_lm_modelm2)[6]*genereDo+
coef(my_lm_modelm2)[7]*genereDr+
coef(my_lm_modelm2)[1]}
library(ggiraphExtra)
ggPredict(my_lm_modelm2,se=TRUE,interactive=TRUE)
ggplot(training_dsetm2,aes(y=cont_subs,x=series_hour + movie_hour))+geom_point()
+
stat_function(fun=equation,geom="line",color=scales::hue_pal()(2)[1])
ggplot(data = training_dsetm2, aes(x = series_hour + movie_hour, y = cont_subs, color=genre)) + #passing the mod table to create the plot
geom_smooth(method = "lm") + # creating linear regression line in the plot layer
geom_point() # adding the points in the graph
|
9d32225748d7fd601d4fd0b61937c482dd23839f
|
82f6f1253b3a45533b2e017f1d3b381357f65b2b
|
/plot1.R
|
3f632964cfca29f532de20a01c6ec6321e755fdb
|
[] |
no_license
|
ramkumar88/ExData_Plotting1
|
1999a0ef4322d87f29d398218df8a672a1fd2ebf
|
61e441c21d81f2636758815ae47547e89a7dc6b0
|
refs/heads/master
| 2021-01-18T00:46:52.126270
| 2014-05-11T23:11:31
| 2014-05-11T23:11:31
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 823
|
r
|
plot1.R
|
## Read all lines from the data file
f <- readLines("../household_power_consumption.txt")
## Get first row -> used for data-frame
firstRow <- f[1]
## Only load rows from 1st and 2nd February 2007
rowsToEvaluate <- c(firstRow,grep("^2/[1,2]/2007;+",f,value=TRUE))
## Load rows to evaluate into a data-frame
powerData <- read.table(textConnection(rowsToEvaluate),sep=";",header=TRUE)
## Create a new DateTime column from the date and time columns
powerData <- transform(powerData, DateTime = as.POSIXct(strptime(paste(Date,Time,sep=" "), "%m/%d/%Y %H:%M:%S")))
## Create the plot
hist(powerData$Global_active_power,col="Red",main="Global Active Power",xlab="Global Active Power (kilowatts)")
## Copy the plot to PNG of size 480 by 480
dev.copy(png, file = "plot1.png",width=480,height=480)
## close the PNG device
dev.off()
|
ca60d47984e509968b04462a0f4d68ed386c8746
|
8d62402a4fb63be4021371d3d5535677306af9e9
|
/plot1.R
|
05bebeb2099be4fb79d1f0c0be0544d39d7df183
|
[] |
no_license
|
aryalsohan0/ExData_Plotting1
|
6485b9ed2e5d6442141c67017e13fd9b1414a756
|
6c9fcdb0095e664629bc815cba4b9e3009ddff0f
|
refs/heads/master
| 2022-12-09T06:23:06.164051
| 2020-08-10T16:13:56
| 2020-08-10T16:13:56
| 286,503,373
| 0
| 0
| null | 2020-08-10T14:52:09
| 2020-08-10T14:52:08
| null |
UTF-8
|
R
| false
| false
| 662
|
r
|
plot1.R
|
# Reading Data
epcData <- read.csv("household_power_consumption.txt",
sep = ";", na = "?")
# Changing Date and Time column type
epcData$Date <- as.Date(epcData$Date, format = "%d/%m/%Y")
epcData$DateTime <- strptime(paste(epcData$Date, epcData$Time),'%Y-%m-%d %H:%M:%S')
# Filtering Date from Data
library(tidyverse)
epc_filtered <- epcData %>%
filter((Date >= "2007-02-01") & (Date <= "2007-02-02"))
# Making Plot 1
png("plot1.png", width = 480, height = 480)
hist(epc_filtered$Global_active_power, main="Global Active Power",
xlab="Global Active Power (kilowatts)", ylab="Frequency", col="Red")
dev.off()
|
23e385083aaff0b458cb9e5503c40409a3833649
|
8b26dee1f2c44a9383a1e4de39e2023b64e6bd5f
|
/tests/testthat/test-getMetaGenomeSummary.R
|
e3be72076c9f6e832f0aa677711dc61fa2e6a293
|
[] |
no_license
|
arpankbasak/biomartr
|
4e95661b00a6b3068b7a584c82d22547806a9c92
|
ebd78af060e99d1c60edf965f0c02eb8307e4e9d
|
refs/heads/master
| 2020-03-19T17:58:48.067441
| 2018-06-09T13:26:44
| 2018-06-09T13:26:44
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 139
|
r
|
test-getMetaGenomeSummary.R
|
context("Test: getMetaGenomeSummary()")
test_that("The getMetaGenomeSummary() interface works properly..",{
getMetaGenomeSummary()
})
|
16c9c1633966fe35ae91be0c57235b156a587a7d
|
c84427b4e9293e4d7326e309b33f23ceef732623
|
/src/logarithmic_axes.R
|
a7f15274a354b2cd65fb3d2deda27001df979e10
|
[] |
no_license
|
pmavrodiev/hornets
|
1978b1c4a9a67f4194513c9511e624cbc09735b1
|
3eed8906e1435edd91fd0f1697d279307e145dc0
|
refs/heads/master
| 2020-05-30T04:41:01.498459
| 2015-04-25T09:59:05
| 2015-04-25T09:59:05
| 16,486,778
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 629
|
r
|
logarithmic_axes.R
|
axp <- par("xaxp") #-> powers of 10 *inside* ?usr?
axp[3] <- 1 # such that only 10^. are labeled
aX <- axTicks(1, axp = 10^axp)
xu <- 10 ^ par("usr")[1:2]
e10 <- c(-1,1) + round(axp[1:2]) ## exponents of 10 *outside* ?usr?
v <- c(outer(1:9, e10[1]:e10[2], function(x,E) x * 10 ^ E))
v <- v[xu[1] <= v & v <= xu[2]]
ayp <- par("yaxp") #-> powers of 10 *inside* ?usr?
ayp[3] <- 1 # such that only 10^. are labeled
aY <- axTicks(2, axp = ayp)
yu <- 10 ^ par("usr")[1:2]
e10 <- c(-1,1) + round(ayp[1:2]) ## exponents of 10 *outside* ?usr?
w <- c(outer(1:9, e10[1]:e10[2], function(x,E) x * 10 ^ E))
#w <- w[yu[1] <= w & w <= yu[2]]
|
4867b4c922d83d2c98d7ed8d82cc764bf33d9a7a
|
53010da0027d6b6b7a44309d36e26010e0852681
|
/pcvpc_paperBW.R
|
4b7cda7ba0b7ab1e50ee3de535b40fcf8af5781e
|
[] |
no_license
|
jhhughes256/LEN_PK
|
7575ea8e92ecf2638de2ffdf641b9c1b052c7280
|
6f19454fc314728c536e94ccd762748d603132e2
|
refs/heads/master
| 2022-01-07T14:43:38.819059
| 2019-06-14T02:22:56
| 2019-06-14T02:22:56
| 62,599,627
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 10,656
|
r
|
pcvpc_paperBW.R
|
# Generating VPCs for each of the three models using the external validation dataset
# -----------------------------------------------------------------------------
# Prepare work environment
# Remove any previous objects in the workspace
rm(list=ls(all=TRUE))
graphics.off()
# Set the working directory
master.dir <- "E:/Hughes/Data/PK/FLAG"
setwd(master.dir)
# Load required packages
library(ggplot2)
library(Hmisc)
library(doBy)
library(plyr)
library(scales)
# Source functions
source("E:/Hughes/functions_utility.r")
# Customize ggplot2 theme - R 2.15.3
theme_bw2 <- theme_set(theme_bw(base_size = 22))
theme_bw2 <- theme_update(plot.margin = unit(c(1, 0.5, 3, 0.5), "lines"),
axis.title.x = element_text(size = 18, vjust = 0),
axis.title.y = element_text(size = 18, vjust = 0, angle = 90),
strip.text.x = element_text(size = 16),
strip.text.y = element_text(size = 16, angle = 90))
# Confidence intervals - from function utility
CI90lo <- function(x) quantile(x, probs = 0.05)
CI90hi <- function(x) quantile(x, probs = 0.95)
CI95lo <- function(x) quantile(x, probs = 0.025)
CI95hi <- function(x) quantile(x, probs = 0.975)
# -----------------------------------------------------------------------------
# Read in data for plotting
# Process the simulated *.fit files
setwd("E:/Hughes/Data/PK/FLAG/COV15")
runname1 <- "RUN016_CL_CRCL2_FFM_VPC"
# processSIMdata(paste(runname1,".ctl",sep=""))
SIM.data1 <- read.csv(paste(runname1, ".nm7/", runname1, ".fit.csv", sep = ""),
stringsAsFactors = F, na.strings = ".")
SIM.data1 <- SIM.data1[SIM.data1$MDV == 0, ]
setwd(master.dir)
runname2 <- "RUN028_CELGENE"
# processSIMdata(paste(runname2,".ctl",sep=""))
SIM.data2 <- read.csv(paste(runname2, ".nm7/", runname2, ".fit.csv", sep = ""),
stringsAsFactors = F, na.strings = ".")
SIM.data2 <- SIM.data2[SIM.data2$MDV == 0, ]
SIM.data2$DV <- exp(SIM.data2$DV)
SIM.data2$PRED <- exp(SIM.data2$PRED)
SIM.data2$IPRED <- exp(SIM.data2$IPRED)
runname3 <- "RUN029_LOPEZ"
# processSIMdata(paste(runname3,".ctl",sep=""))
SIM.data3 <- read.csv(paste(runname3, ".nm7/", runname3, ".fit.csv", sep = ""),
stringsAsFactors = F, na.strings = ".")
SIM.data3 <- SIM.data3[SIM.data3$MDV == 0, ]
# Read in the original data
ORG.data <- read.csv("nmprep_flagged.csv", stringsAsFactors = F, na.strings = ".")
names(ORG.data)[names(ORG.data) == "X.ID"] <- "ID"
ORG.data <- ORG.data[ORG.data$MDV == 0 & ORG.data$FLAG == 0, ]
# -----------------------------------------------------------------------------
# Assign factors to covariates
# Time binning
bin_cuts <- c(0.52, 1.02, 2.02, 3.02, 5.02, 9.02, 49)
ORG.data$TADBIN <- cut2(ORG.data$TAD, cuts = bin_cuts, levels.mean = T)
ORG.data$TADBIN <- as.numeric(paste(ORG.data$TADBIN))
# with(ORG.data, table(TADBIN))
SIM.data1$TADBIN <- cut2(SIM.data1$TAD, cuts = bin_cuts, levels.mean = T)
SIM.data1$TADBIN <- as.numeric(paste(SIM.data1$TADBIN))
SIM.data2$TADBIN <- cut2(SIM.data2$TAD, cuts = bin_cuts, levels.mean = T)
SIM.data2$TADBIN <- as.numeric(paste(SIM.data2$TADBIN))
SIM.data3$TADBIN <- cut2(SIM.data3$TAD, cuts = bin_cuts, levels.mean = T)
SIM.data3$TADBIN <- as.numeric(paste(SIM.data3$TADBIN))
# Covariates
ORG.data$IDf <- as.factor(ORG.data$ID)
ORG.data$SEXf <- factor(ORG.data$SEX, labels = c("F", "M"))
ORG.data$CRCLf <- factor(ifelse(ORG.data$CRCL2 <= 60, 1, 2),
labels = c("CrCl <60mL/min", "CrCl >60mL/min"))
SIM.data1$IDf <- as.factor(SIM.data1$ID)
SIM.data2$IDf <- as.factor(SIM.data2$ID)
SIM.data3$IDf <- as.factor(SIM.data3$ID)
SIM.data1$SEXf <- factor(SIM.data1$SEX, labels = c("F", "M"))
SIM.data2$SEXf <- factor(SIM.data2$SEX, labels = c("F", "M"))
SIM.data3$SEXf <- factor(SIM.data3$SEX, labels = c("F", "M"))
SIM.data1$CRCLf <- factor(ifelse(SIM.data1$CRCL2 <= 60, 1, 2),
labels = c("CrCl <60mL/min", "CrCl >60mL/min"))
SIM.data2$CRCLf <- factor(ifelse(SIM.data2$CRCL2 <= 60, 1, 2),
labels = c("CrCl <60mL/min", "CrCl >60mL/min"))
SIM.data3$CRCLf <- factor(ifelse(SIM.data3$CRCL2 <= 60, 1, 2),
labels = c("CrCl <60mL/min", "CrCl >60mL/min"))
# -----------------------------------------------------------------------------
# Plot an Uppsala-style pcVPC
# -----------------------------------------------------------------------------
# PRED Correction
# Bergstrand et al 2011 - Prediction-Corrected Visual Predictive Checks for
# Diagnosing Nonlinear Mixed-Effects Model
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Calculate the median PRED for each TADBIN
SIM.data1$PRED <- as.numeric(SIM.data1$PRED)
SIM.data1BIN <- summaryBy(PRED ~ TADBIN, SIM.data1, FUN = median, na.rm = T)
SIM.data2$PRED <- as.numeric(SIM.data2$PRED)
SIM.data2BIN <- summaryBy(PRED ~ TADBIN, SIM.data2, FUN = median, na.rm = T)
SIM.data3$PRED <- as.numeric(SIM.data3$PRED)
SIM.data3BIN <- summaryBy(PRED ~ TADBIN, SIM.data3, FUN = median, na.rm = T)
# Merge median PREDs into simulated dataset matching for their TIMEBIN
SIM.data1 <- merge(SIM.data1, SIM.data1BIN, by = c("TADBIN"), all = T)
names(SIM.data1)[names(SIM.data1) == "PRED.median"] <- "PREDMED"
SIM.data1 <- SIM.data1[with(SIM.data1,
order(SIM.data1$SIM, SIM.data1$ID, SIM.data1$TAD, SIM.data1$TADBIN)), ]
ORG.data1 <- ORG.data[with(ORG.data,
order(ORG.data$ID, ORG.data$TAD, ORG.data$TADBIN)), ]
SIM.data2 <- merge(SIM.data2, SIM.data2BIN, by = c("TADBIN"), all = T)
names(SIM.data2)[names(SIM.data2) == "PRED.median"] <- "PREDMED"
SIM.data2 <- SIM.data2[with(SIM.data2,
order(SIM.data2$SIM, SIM.data2$ID, SIM.data2$TAD, SIM.data2$TADBIN)), ]
ORG.data2 <- ORG.data[with(ORG.data,
order(ORG.data$ID, ORG.data$TAD, ORG.data$TADBIN)), ]
SIM.data3 <- merge(SIM.data3, SIM.data3BIN, by = c("TADBIN"), all = T)
names(SIM.data3)[names(SIM.data3) == "PRED.median"] <- "PREDMED"
SIM.data3 <- SIM.data3[with(SIM.data3,
order(SIM.data3$SIM, SIM.data3$ID, SIM.data3$TAD, SIM.data3$TADBIN)), ]
ORG.data3 <- ORG.data[with(ORG.data,
order(ORG.data$ID, ORG.data$TAD, ORG.data$TADBIN)), ]
# Subset for one simulation of the same length of the original dataset
SIM.data1ONE <- SIM.data1[SIM.data1$SIM == 1, ]
SIM.data2ONE <- SIM.data2[SIM.data2$SIM == 1, ]
SIM.data3ONE <- SIM.data3[SIM.data3$SIM == 1, ]
# Add median PRED for each TIMEBIN to the orignal dataset
ORG.data1$PREDMED <- SIM.data1ONE$PREDMED
ORG.data1$PRED <- SIM.data1ONE$PRED
ORG.data2$PREDMED <- SIM.data2ONE$PREDMED
ORG.data2$PRED <- SIM.data2ONE$PRED
ORG.data3$PREDMED <- SIM.data3ONE$PREDMED
ORG.data3$PRED <- SIM.data3ONE$PRED
# Calculate the prediction corrected observed and simulated DVs
ORG.data1$pcY <- (ORG.data1$DV)*(ORG.data1$PREDMED)/(ORG.data1$PRED)
SIM.data1$pcY <- (SIM.data1$DV)*(SIM.data1$PREDMED)/(SIM.data1$PRED)
ORG.data2$pcY <- (ORG.data2$DV)*(ORG.data2$PREDMED)/(ORG.data2$PRED)
SIM.data2$pcY <- (SIM.data2$DV)*(SIM.data2$PREDMED)/(SIM.data2$PRED)
ORG.data3$pcY <- (ORG.data3$DV)*(ORG.data3$PREDMED)/(ORG.data3$PRED)
SIM.data3$pcY <- (SIM.data3$DV)*(SIM.data3$PREDMED)/(SIM.data3$PRED)
# Combine the 3 SIM.data's and 3 ORG.datat's into a single SIM.data and ORG.data
SIM.data1$MODEL <- 1
SIM.data2$MODEL <- 2
SIM.data3$MODEL <- 3
SIM.data <- rbind(SIM.data1, SIM.data2, SIM.data3)
SIM.data$MODEL <- factor(SIM.data$MODEL)
levels(SIM.data$MODEL) <- c("Present Model", "Connarn et al. (2017)", "Guglieri-Lopez et al. (2017)")
ORG.data1$MODEL <- 1
ORG.data2$MODEL <- 2
ORG.data3$MODEL <- 3
ORG.data <- rbind(ORG.data1, ORG.data2, ORG.data3)
ORG.data$MODEL <- factor(ORG.data$MODEL)
levels(ORG.data$MODEL) <- c("Present Model", "Connarn et al. (2017)", "Guglieri-Lopez et al. (2017)")
# -----------------------------------------------------------------------------
# Create pcVPC using Xpose method
# Plot the confidence interval for the simulated data's percentiles for each bin
# (for each simulated data set compute the percentiles for each bin, then, from
# all of the percentiles from all of the simulated datasets compute the 95% CI
# of these percentiles).
# http://www.inside-r.org/packages/cran/xpose4specific/docs/xpose.VPC
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Calculate 5, 50 and 95 percentiles for each simulated study (S)
SIM.data.bystudy <- ddply(SIM.data, .(MODEL, SIM, TADBIN), function(x) {
data.frame(
medianS = median(x$pcY),
loCI90S = CI90lo(x$pcY),
hiCI90S = CI90hi(x$pcY)
)
})
# Build plot object
titletext <- "VPC - Uppsala Style\n"
p <- NULL
p <- ggplot(data = ORG.data)
# p <- p + ggtitle(titletext)
p <- p + stat_summary(aes(x = TADBIN, y = medianS, group = MODEL), data = SIM.data.bystudy,
geom = "ribbon", fun.ymin = "CI95lo", fun.ymax = "CI95hi", fill = "grey")
p <- p + stat_summary(aes(x = TADBIN, y = loCI90S, group = MODEL), data = SIM.data.bystudy,
geom = "ribbon", fun.ymin = "CI95lo", fun.ymax = "CI95hi", fill = "grey")
p <- p + stat_summary(aes(x = TADBIN, y = hiCI90S, group = MODEL), data = SIM.data.bystudy,
geom = "ribbon", fun.ymin = "CI95lo", fun.ymax = "CI95hi", fill = "grey")
p <- p + geom_point(aes(x = TADBIN, y = pcY, group = MODEL), colour = "#999999", shape = 1)
p <- p + stat_summary(aes(x = TADBIN, y = medianS, group = MODEL), data = SIM.data.bystudy,
fun.y = median, geom = "line", colour = "black", size = 1)
p <- p + stat_summary(aes(x = TADBIN, y = pcY, group = MODEL), fun.y = median,
geom = "line", colour = "grey40", size = 1)
p <- p + stat_summary(aes(x = TADBIN, y = pcY, group = MODEL), fun.y = CI90lo,
geom = "line", colour = "grey40", linetype = "dashed", size = 1)
p <- p + stat_summary(aes(x = TADBIN, y = pcY, group = MODEL), fun.y = CI90hi,
geom = "line", colour = "grey40", linetype = "dashed", size = 1)
p <- p + stat_summary(aes(x = TADBIN, y = loCI90S, group = MODEL), data = SIM.data.bystudy,
fun.y = median, geom = "line", colour = "black", linetype = "dashed", size = 1)
p <- p + stat_summary(aes(x = TADBIN, y = hiCI90S, group = MODEL), data = SIM.data.bystudy,
fun.y = median, geom = "line", colour = "black", linetype = "dashed", size = 1)
p <- p + scale_y_log10("Prediction Corrected\nConcentration (mg/L)\n", labels = comma)
p <- p + scale_x_continuous("\nTime (hours)", breaks = 0:8*3)
p <- p + coord_cartesian(ylim = c(0.00001, 100),)
p <- p + facet_wrap(~MODEL, nrow = 3)
p
ggsave("pcvpc_paperBW.png", width = 17.4, height = 23.4, units = c("cm"))
ggsave("pcvpc_paperBW.eps", width = 17.4, height = 23.4, units = c("cm"),
dpi = 1200, device = cairo_ps, fallback_resolution = 1200)
|
34dacdbd6bddeed73f66137b2d28dcf85e584625
|
98d83e9e525cabadfa812b88568e2f02ad30a941
|
/man/linreg.Rd
|
337a857ba50d638f1d9cdb38d145b46103409044
|
[] |
no_license
|
priku577/boxlinre
|
bc70fb5c3f9a38aa715e7083c0a969e4fb01c39c
|
3f99b225906497e9785e54ee979f8e395cffb5b5
|
refs/heads/master
| 2021-08-11T07:42:30.210879
| 2017-11-13T10:22:01
| 2017-11-13T10:22:01
| 106,527,302
| 0
| 0
| null | 2017-10-11T08:33:26
| 2017-10-11T08:33:26
| null |
UTF-8
|
R
| false
| true
| 379
|
rd
|
linreg.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/linreg.R
\name{linreg}
\alias{linreg}
\title{Multiple Linear Regression.}
\usage{
linreg(formula, data)
}
\arguments{
\item{formula}{A formula implies the model (e.g. y ~ x).}
\item{data}{data frame, dataset attached to the algorithm.}
}
\value{
a list.
}
\description{
Multiple Linear Regression.
}
|
d56baf49514301a5c32c54e41fa580bd686f62a2
|
146251d0f22e2d438474281587804686bcc36f8c
|
/run_all_analysis_scripts.R
|
b91783b3f6ee6c93a08be545e5e8ffee6e88ae2f
|
[
"MIT"
] |
permissive
|
MichaelHoltonPrice/price_et_al_tikal_rc
|
9c533aae49ad29e77495632c602809db0fef1388
|
3ac1e35f4277ef878f8e3aac3d05159928a09a2b
|
refs/heads/master
| 2023-07-01T06:58:50.155602
| 2021-08-09T03:34:52
| 2021-08-09T03:34:52
| 314,018,711
| 6
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 264
|
r
|
run_all_analysis_scripts.R
|
source("create_Fig1.R")
source("create_identif_results_exp.R")
source("create_identif_results_gm.R")
source("do_simulations.R")
source("create_simulation_plots.R")
source("preprocess_mesorad_dataset.R")
source("do_tikal_inference.R")
source("create_tikal_plots.R")
|
f9a730b60c4cf829d04e130d0d82dffb3565b025
|
8f0beeea9d1bdaa9c361bb7228972ae8abddafb6
|
/lat_phys_ratios/compute_ratio.R
|
771349176a3b0d2c38dc1960dbed71ca089e8ccd
|
[] |
no_license
|
kostrzewa/misc_R_scripts
|
86b3e7ff0cef773393ac79e12260754bc573012a
|
f81d1264ba99147418b3d322bc325d5eda4b4d40
|
refs/heads/master
| 2020-04-12T06:35:41.028319
| 2019-08-16T12:12:43
| 2019-08-16T12:12:43
| 9,692,321
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 366
|
r
|
compute_ratio.R
|
compute_ratio <- function(dividend,divisor,name="",debug=FALSE) {
ratio <- list( val=dividend$val / divisor$val,
dval=sqrt( (dividend$dval/divisor$val)^2 + (divisor$dval*dividend$val/divisor$val^2)^2 ),
name=name )
if(debug) {
print(sprintf("compute_ratio: %s",as.character(name)))
print(ratio)
}
return(ratio)
}
|
61eef6baf3653b9fb185af270ad8a517cfaac105
|
f714d5a98e3527823a14511e3357ebe32c17d055
|
/Code_hwdefinitions.R
|
409fc614d7488174066a2dc00e73e1b87a797372
|
[] |
no_license
|
ericamartinez/Temperatures_mortality
|
1922781bbe9552bdc15e04ab1b19f3763624a7ad
|
c75cbed170350a3e33f02a6c416eb25e3696514f
|
refs/heads/master
| 2021-01-04T14:10:32.106634
| 2018-10-01T20:29:20
| 2018-10-01T20:29:20
| 88,835,362
| 0
| 0
| null | null | null | null |
IBM852
|
R
| false
| false
| 25,922
|
r
|
Code_hwdefinitions.R
|
################################################################################
# "Impact of ambient temperatures on mortality in Spain (1993-2013)"
#
# CODE (INTERACTION TERM) AND HEAT WAVES INDICATOR
#
# ISGlobal
# Januray 2018
#
#
################################################################################
################################################################################
# PREPARE THE DATA
################################################################################
# LOAD THE PACKAGES
library(dlnm) ; library(mvmeta) ; library(splines) ; library(tsModel);library(mgcv)
library(foreach); library(doSNOW);
library(Epi); library(metafor)
#base_path <- "//fs.isglobal.lan/temperature/Mortality/02_Stata/03_data"
#base_path <- "/media/eloi/Linux Mint 17.1 Xfce 64-bit"
#setwd(paste0(base_path, "/doctorat/Mortality/02_Stata/03_data"))
#load(paste0("G:/doctorat/Mortality/02_Stata/03_data/tempDEATHS.Rdata"))
#load("//fs.isglobal.lan/temperature/Mortality/02_Stata/03_data/tempDEATHS.Rdata")
load("T:\\xavi\\etec\\erica\\mortality\\tempDEATHS.Rdata")
# Day of the year (1:365)
tempDEATHS$doy <- as.numeric(strftime(tempDEATHS$date, format = "%j"))
tempDEATHS$dow <- as.factor(tempDEATHS$dow)
# We exclude 2003
tempDEATHS <- subset(tempDEATHS, tempDEATHS$yyyy!=2003)
###Pick holidays (summer and Christmas)
tempDEATHS$phday <- 0
# First three weeks of August
tempDEATHS[tempDEATHS$dd < 22 & tempDEATHS$mm == 8, 'phday'] <- 1
# From the 23rd of December until the 6th of January
tempDEATHS[(tempDEATHS$dd > 22 & tempDEATHS$mm == 12) |
(tempDEATHS$dd < 7 & tempDEATHS$mm == 1), 'phday'] <- 1
# Binary variables for before and after periods
tempDEATHS$int1 <- 0
tempDEATHS[tempDEATHS$yyyy >= 2004, 'int1'] <- 1
tempDEATHS$int2 <- 0
tempDEATHS[tempDEATHS$yyyy < 2004, 'int2'] <- 1
provincies_n_total <- list("Alava", "Albacete", "Alicante", "Almeria", "Avila",
"Badajoz", "Illes Balears", "Barcelona", "Burgos", "Caceres", "Cadiz",
"Castellon", "Ciudad Real", "Cordoba", "A Coruna", "Cuenca", "Girona",
"Granada", "Guadalajara", "Guipuzcoa", "Huelva", "Huesca", "Jaen", "Leon",
"Lleida", "La Rioja", "Lugo", "Madrid", "Malaga", "Murcia", "Navarra",
"Ourense", "Asturias", "Palencia", "Las Palmas", "Pontevedra", "Salamanca",
"Santa Cruz de Tenerife", "Cantabria", "Segovia", "Sevilla", "Soria",
"Tarragona", "Teruel", "Toledo", "Valencia", "Valladolid", "Vizcaya",
"Zamora", "Zaragoza")
# Select months when the Plan is activated
tempDEATHS <- subset(tempDEATHS, (tempDEATHS$mm>=6 & tempDEATHS$mm<=8) | (tempDEATHS$mm==9 & tempDEATHS$dd<=15))
# ARRANGE THE DATA AS A LIST OF DATA SETS
provinces_total <- as.character(unique(tempDEATHS$province)) # My provinces
dlist_total <- lapply(provinces_total,function(x) tempDEATHS[tempDEATHS$province==x,])
# Create a list with 50 provinces
#(agafa el data frame i el converteix a llista de tants elements com provincies)
names(dlist_total) <- provincies_n_total
# PARAMETERS FOR THE EXPOSURE-RESPONSE FUNCTION
# 2 internal knots placed at 75th and 90th percentiles of
# location-specific temperature distribution
varper <- c(75,90)
vardegree <- 2
# SPECIFICATION OF THE LAG FUNCTION
lag <- 21
lagnk <- 2 #Number of knots for lag model
arglag<- list(knots=logknots(lag,lagnk))
# DEEGRES OF FREEDOM
dfseas <- 4 #Seasonality
dftrend <- 1 #Long-term trend
# FUNCTION TO CREATE AN HEAT WAVE INDICATOR FOR A TEMPERATURE SERIES
# BASED ON THE THRESHOLD AND THE DURATION, BY GROUPS (PROVINCE)
fun.hw.thr <- function(x,thr,dur,group=NULL) {
as.numeric(apply(Lag(x>=thr,0:(dur-1),group=group),
1,sum,na.rm=T)>(dur-1))
}
# CREATE THE MATRICES TO STORE THE RESULTS
hw.N <- hw1.N <- hw2.N <- matrix(NA,length(dlist_total),12, dimnames=list(provincies_n_total,
paste("hw",rep(c(2,3,4),each=4),rep(c(90,925,95,975),2),sep=".")))
main.eff <- added.eff <- matrix(NA,length(provincies_n_total),24,
dimnames=list(provincies_n_total,paste("hw",rep(c(2,3,4),each=8),
rep(c(90,925,95,975),each=2),c("est","sd"),sep=".")))
main.eff_before <- added.eff_before <- main.eff_after <- added.eff_after <- main.eff
### CODE FOR DIFFERENT HEAT WAVES DEFINITIONS
for(i in seq(length(dlist_total))) {
# PRINT
cat(i,"")
# FIRST STAGE
data <- dlist_total[[i]]
percentiles <- quantile(data$tempmax_compl,c(75,90,92.5,95,97.5)/100,na.rm=T)
range <- round(range(data$tempmax_compl,na.rm=T),0)
# HW DEFINITIONS
hw.def <- cbind(rep(percentiles[2:5],3),rep(c(2,3,4),c(4,4,4)))
# RUN THE MODEL FOR EACH DEFINITION
for(k in 1:nrow(hw.def)) {
data1 <- subset(data, data$yyyy < 2003)
data2 <- subset(data, data$yyyy > 2003)
# CREATE HEATWAVE INDICATOR FOR THE SPECIFIC HW DEFINITION BEFORE
hw <- fun.hw.thr(data$tempmax_compl,hw.def[k,1],hw.def[k,2],data$yyyy)
hw.N[i,k] <- sum(hw)
hw1 <- fun.hw.thr(data1$tempmax_compl,hw.def[k,1],hw.def[k,2],data1$yyyy)
hw1.N[i,k] <- sum(hw1)
hw2 <- fun.hw.thr(data2$tempmax_compl,hw.def[k,1],hw.def[k,2],data2$yyyy)
hw2.N[i,k] <- sum(hw2)
if (sum(hw2)>0 & sum(hw1)>0){
###### BEFORE
# DEFINE THE CROSSBASIS
argvar <- list(fun="bs",degree=2,knots=quantile(data1$tempmax_compl,varper/100,na.rm=T))
# We define 2 internal knots placed at 75,90th percentiles
cb1 <- crossbasis(data1$tempmax_compl, lag=lag, argvar=argvar,
arglag=arglag)
data1$t=1:dim(data1)[1]
model1 <- glm(adeath ~ hw1 + cb1 + dow + hday + phday + total_influenza_h +
ns (doy,df=dfseas): factor(yyyy) +
ns(t, df=round(length(unique(yyyy))/dftrend/10)),
data1, family=quasipoisson, na.action="na.exclude")
######## AFTER
# DEFINE THE CROSSBASIS
argvar <- list(fun="bs",degree=2,knots=quantile(data2$tempmax_compl,varper/100,na.rm=T))
# We define 2 internal knots placed at 75,90th percentiles
cb2 <- crossbasis(data2$tempmax_compl, lag=lag, argvar=argvar,
arglag=arglag)
data2$t=1:dim(data2)[1]
model2 <- glm(adeath ~ hw2 + cb2 + dow + hday + phday + total_influenza_h +
ns (doy,df=dfseas): factor(yyyy) +
ns(t, df=round(length(unique(yyyy))/dftrend/10)),
data2, family=quasipoisson, na.action="na.exclude")
}
# SAVE MAIN EFFECT
if(sum(hw2)>0 & sum(hw1)>0) {
tmedian <- median(data$tempmax_compl[hw==1],na.rm=T)
pred.before <- crosspred(cb1,model1,at=c((range[1]+1):(range[2]-1),tmedian),
cen=percentiles[1])
pred.after <- crosspred(cb2,model2,at=c((range[1]+1):(range[2]-1),tmedian),
cen=percentiles[1])
main.eff_before[i,c(k*2-1,k*2)] <- cbind(pred.before$allfit,
pred.before$allse)[as.character(tmedian),]
main.eff_after[i,c(k*2-1,k*2)] <- cbind(pred.after$allfit,
pred.after$allse)[as.character(tmedian),]
# SAVE ADDED EFFECT
added.eff_before[i,c(k*2-1,k*2)] <- ci.lin(model1)["hw1",1:2]
added.eff_after[i,c(k*2-1,k*2)] <- ci.lin(model2)["hw2",1:2]
} else {main.eff_before[i,c(k*2-1,k*2)] <- c(NA,NA)
main.eff_after[i,c(k*2-1,k*2)] <- c(NA,NA)
}
}
}
########################################################################################################
# SECOND-STAGE ANALYSIS: UNIVARIATE META-ANALYSIS
########################################################################################################
label <- paste("hw",rep(c(2,3,4),each=4),rep(c(90,92.5,95,97.5),2),sep=".")
table1.before <- table1.after <- matrix(NA,12,7,dimnames=list(label,
c("N comm","Est.main","95%CI.main","P-het.added","Est.added",
"95%CI.added","P-het.added")))
for(i in 1:12) {
# SET TO MISSING IF NO ESTIMATE FOR ADDED EFFECT
added.eff_before[added.eff_before[,2*i]==0,c(2*i-1,2*i)] <- NA
main.eff_before[is.na(added.eff_before[,2*i]),c(2*i-1,2*i)] <- NA
added.eff_after[added.eff_after[,2*i]==0,c(2*i-1,2*i)] <- NA
main.eff_after[is.na(added.eff_after[,2*i]),c(2*i-1,2*i)] <- NA
# RUN THE META-ANALYSIS
pool.main.before <- rma.uni(yi=main.eff_before[,2*i-1],sei=main.eff_before[,2*i])
pool.added.before <- rma.uni(yi=added.eff_before[,2*i-1],sei=added.eff_before[,2*i])
pool.main.after <- rma.uni(yi=main.eff_after[,2*i-1],sei=main.eff_after[,2*i])
pool.added.after <- rma.uni(yi=added.eff_after[,2*i-1],sei=added.eff_after[,2*i])
# FILL TABLE1
table1.before[i,] <- c(sum(!is.na(added.eff_before[,2*i-1])),
round(exp(pool.main.before$b)*100-100,1),
paste(round(exp(pool.main.before$b-1.96*pool.main.before$se)*100-100,1),"to",
round(exp(pool.main.before$b+1.96*pool.main.before$se)*100-100,1)),
round(pool.main.before$QEp,3),
round(exp(pool.added.before$b)*100-100,1),
paste(round(exp(pool.added.before$b-1.96*pool.added.before$se)*100-100,1),"to",
round(exp(pool.added.before$b+1.96*pool.added.before$se)*100-100,1)),
round(pool.added.before$QEp,3))
table1.after[i,] <- c(sum(!is.na(added.eff_after[,2*i-1])),
round(exp(pool.main.after$b)*100-100,1),
paste(round(exp(pool.main.after$b-1.96*pool.main.after$se)*100-100,1),"to",
round(exp(pool.main.after$b+1.96*pool.main.after$se)*100-100,1)),
round(pool.main.after$QEp,3),
round(exp(pool.added.after$b)*100-100,1),
paste(round(exp(pool.added.after$b-1.96*pool.added.after$se)*100-100,1),"to",
round(exp(pool.added.after$b+1.96*pool.added.after$se)*100-100,1)),
round(pool.added.after$QEp,3))
}
table1.before
table1.after
########################################################################################################
# ATTRIBUTABLE MEASURES (WITH ADDED EFFECTS)
########################################################################################################
#setwd("//fs.isglobal.lan/temperature/Mortality/02_Stata/01_do/R code/Temperatures_mortality-master/Functions - heatwaves/Erica")
#setwd("G:/doctorat/Mortality/02_Stata/01_do/R code/Temperatures_mortality-master/Functions - heatwaves/Erica")
setwd("T:\\Xavi\\ETEC\\Erica\\mortality\\separate models\\heatwaves\\erica")
setwd("H:/doctorat/Mortality/02_Stata/01_do/R code/Temperatures_mortality-master/separate models/heat waves/erica")
source("04_blups_per_attrm_hwdefinitions.R")
# per alguna rao no em funciona tal qual per˛ si corro el codi de la funciˇ si
blupsattr_plan <- blups_plan(provinces_total,provincies_n_total,dlist_total,varper,lag,arglag,"adeath",dfseas,dftrend)
save(blupsattr_plan,file="T:\\Xavi\\ETEC\\Erica\\mortality\\separate models\\heatwaves\\erica\\blupsattr_plan.RData")
save(blupsattr_plan,file="H:/doctorat/Mortality/02_Stata/01_do/R code/Temperatures_mortality-master/separate models/heat waves/erica/blupsattr_plan.RData")
# load(file="T:\\Xavi\\ETEC\\Erica\\mortality\\separate models\\heatwaves\\erica\\blupsattr_plan.RData")
coef_blupP1_hw_1 <- blupsattr_plan[[1]]; coef_blupP1_cb_hw_1 <- blupsattr_plan[[2]]
coef_blupP2_hw_1 <- blupsattr_plan[[3]]; coef_blupP2_cb_hw_1 <- blupsattr_plan[[4]]
vcov_blupP1_cb_hw_1 <- blupsattr_plan[[6]]; vcov_blupP2_cb_hw_1 <- blupsattr_plan[[8]]
coef_blupP1_hw_2 <- blupsattr_plan[[9]]; coef_blupP1_cb_hw_2 <- blupsattr_plan[[10]]
coef_blupP2_hw_2 <- blupsattr_plan[[11]]; coef_blupP2_cb_hw_2 <- blupsattr_plan[[12]]
vcov_blupP1_cb_hw_2 <- blupsattr_plan[[14]]; vcov_blupP2_cb_hw_2 <- blupsattr_plan[[16]]
coef_blupP1_hw_3 <- blupsattr_plan[[17]]; coef_blupP1_cb_hw_3 <- blupsattr_plan[[18]]
coef_blupP2_hw_3 <- blupsattr_plan[[19]]; coef_blupP2_cb_hw_3 <- blupsattr_plan[[20]]
vcov_blupP1_cb_hw_3 <- blupsattr_plan[[22]]; vcov_blupP2_cb_hw_3 <- blupsattr_plan[[24]]
coef_blupP1_hw_4 <- blupsattr_plan[[25]]; coef_blupP1_cb_hw_4 <- blupsattr_plan[[26]]
coef_blupP2_hw_4 <- blupsattr_plan[[27]]; coef_blupP2_cb_hw_4 <- blupsattr_plan[[28]]
vcov_blupP1_cb_hw_4 <- blupsattr_plan[[30]]; vcov_blupP2_cb_hw_4 <- blupsattr_plan[[32]]
coef_blupP1_hw_5 <- blupsattr_plan[[33]]; coef_blupP1_cb_hw_5 <- blupsattr_plan[[34]]
coef_blupP2_hw_5 <- blupsattr_plan[[35]]; coef_blupP2_cb_hw_5 <- blupsattr_plan[[36]]
vcov_blupP1_cb_hw_5 <- blupsattr_plan[[38]]; vcov_blupP2_cb_hw_5 <- blupsattr_plan[[40]]
coef_blupP1_hw_6 <- blupsattr_plan[[41]]; coef_blupP1_cb_hw_6 <- blupsattr_plan[[42]]
coef_blupP2_hw_6 <- blupsattr_plan[[43]]; coef_blupP2_cb_hw_6 <- blupsattr_plan[[44]]
vcov_blupP1_cb_hw_6 <- blupsattr_plan[[46]]; vcov_blupP2_cb_hw_6 <- blupsattr_plan[[48]]
coef_blupP1_hw_7 <- blupsattr_plan[[49]]; coef_blupP1_cb_hw_7 <- blupsattr_plan[[50]]
coef_blupP2_hw_7 <- blupsattr_plan[[51]]; coef_blupP2_cb_hw_7 <- blupsattr_plan[[52]]
vcov_blupP1_cb_hw_7 <- blupsattr_plan[[54]]; vcov_blupP2_cb_hw_7 <- blupsattr_plan[[56]]
coef_blupP1_hw_8 <- blupsattr_plan[[57]]; coef_blupP1_cb_hw_8 <- blupsattr_plan[[58]]
coef_blupP2_hw_8 <- blupsattr_plan[[59]]; coef_blupP2_cb_hw_8 <- blupsattr_plan[[60]]
vcov_blupP1_cb_hw_8 <- blupsattr_plan[[62]]; vcov_blupP2_cb_hw_8 <- blupsattr_plan[[64]]
coef_blupP1_hw_9 <- blupsattr_plan[[65]]; coef_blupP1_cb_hw_9 <- blupsattr_plan[[66]]
coef_blupP2_hw_9 <- blupsattr_plan[[67]]; coef_blupP2_cb_hw_9 <- blupsattr_plan[[68]]
vcov_blupP1_cb_hw_9 <- blupsattr_plan[[70]]; vcov_blupP2_cb_hw_9 <- blupsattr_plan[[72]]
coef_blupP1_hw_10 <- blupsattr_plan[[73]]; coef_blupP1_cb_hw_10 <- blupsattr_plan[[74]]
coef_blupP2_hw_10 <- blupsattr_plan[[75]]; coef_blupP2_cb_hw_10 <- blupsattr_plan[[76]]
vcov_blupP1_cb_hw_10 <- blupsattr_plan[[78]]; vcov_blupP2_cb_hw_10 <- blupsattr_plan[[80]]
coef_blupP1_hw_11 <- blupsattr_plan[[81]]; coef_blupP1_cb_hw_11 <- blupsattr_plan[[82]]
coef_blupP2_hw_11 <- blupsattr_plan[[83]]; coef_blupP2_cb_hw_11 <- blupsattr_plan[[84]]
vcov_blupP1_cb_hw_11 <- blupsattr_plan[[86]]; vcov_blupP2_cb_hw_11 <- blupsattr_plan[[88]]
coef_blupP1_hw_12 <- blupsattr_plan[[89]]; coef_blupP1_cb_hw_12 <- blupsattr_plan[[90]]
coef_blupP2_hw_12 <- blupsattr_plan[[91]]; coef_blupP2_cb_hw_12 <- blupsattr_plan[[92]]
vcov_blupP1_cb_hw_12 <- blupsattr_plan[[94]]; vcov_blupP2_cb_hw_12 <- blupsattr_plan[[96]]
# Added by XB:
vcov_blupP1_hw_1 <- blupsattr_plan[[5]]
vcov_blupP1_hw_2 <- blupsattr_plan[[13]]
vcov_blupP1_hw_3 <- blupsattr_plan[[21]]
vcov_blupP1_hw_4 <- blupsattr_plan[[29]]
vcov_blupP1_hw_5 <- blupsattr_plan[[37]]
vcov_blupP1_hw_6 <- blupsattr_plan[[45]]
vcov_blupP1_hw_7 <- blupsattr_plan[[53]]
vcov_blupP1_hw_8 <- blupsattr_plan[[61]]
vcov_blupP1_hw_9 <- blupsattr_plan[[69]]
vcov_blupP1_hw_10 <- blupsattr_plan[[77]]
vcov_blupP1_hw_11 <- blupsattr_plan[[85]]
vcov_blupP1_hw_12 <- blupsattr_plan[[93]]
vcov_blupP2_hw_1 <- blupsattr_plan[[7]]
vcov_blupP2_hw_2 <- blupsattr_plan[[15]]
vcov_blupP2_hw_3 <- blupsattr_plan[[23]]
vcov_blupP2_hw_4 <- blupsattr_plan[[31]]
vcov_blupP2_hw_5 <- blupsattr_plan[[39]]
vcov_blupP2_hw_6 <- blupsattr_plan[[47]]
vcov_blupP2_hw_7 <- blupsattr_plan[[55]]
vcov_blupP2_hw_8 <- blupsattr_plan[[63]]
vcov_blupP2_hw_9 <- blupsattr_plan[[71]]
vcov_blupP2_hw_10 <- blupsattr_plan[[79]]
vcov_blupP2_hw_11 <- blupsattr_plan[[87]]
vcov_blupP2_hw_12 <- blupsattr_plan[[95]]
# changed by XB:
#source("attrdl_effectiveness_added_effect_hwdef.R")
# XB: I made several changes to the function:
source("attrdl_effectiveness_added_effect_hwdef_XB.R")
# CREATE THE VECTORS TO STORE THE TOTAL MORTALITY (ACCOUNTING FOR MISSING)
totdeath <- rep(NA,length(provincies_n_total))
names(totdeath) <- provincies_n_total
totdeath_1 <- totdeath_2 <- totdeath
# CREATE THE MATRIX TO STORE THE ATTRIBUTABLE INJURIES (simulations)
# Attention:
# P1 corresponds to the attributable numbers with risk P1 and
# temperatures P2
# P2 corresponds to the attributable numbers with risk P2 and
# temperatures P2
matsim <- lapply(1:12, function(x) matrix(NA, nrow=50, ncol=2, dimnames=list(provincies_n_total,
c("Added_effect_P1", "Added_effect_P2"))))
# NUMBER OF SIMULATION RUNS FOR COMPUTING EMPIRICAL CI
nsim <- 1000
# CREATE THE ARRAY TO STORE THE CI OF ATTRIBUTABLE INJURIES
# arraysim1 <- array(NA,dim=c(length(provincies_n_total),2,nsim),dimnames=list(provincies_n_total,
# c("Added_effect_P1", "Added_effect_P2")))
# arraysim2 <- arraysim3 <- arraysim4 <- arraysim5 <- arraysim6 <- arraysim7 <- arraysim8 <- arraysim9 <-
# arraysim10 <- arraysim11 <- arraysim12 <- arraysim1
arraysim <- lapply(1:12, function(x) array(NA, dim=c(length(provincies_n_total),2,nsim),dimnames=list(provincies_n_total,
c("Added_effect_P1", "Added_effect_P2"))))
# RUN THE LOOP
for(i in 1:length(dlist_total)){
# PRINT
cat(i,"")
# EXTRACT THE DATA
data_tot <- dlist_total[[i]]
# percentiles <- quantile(data$tempmax_compl,c(75,90,92.5,95,97.5)/100,na.rm=T)
# range <- round(range(data$tempmax_compl,na.rm=T),0)
# changed by XB:
percentiles <- quantile(data_tot$tempmax_compl,c(75,90,92.5,95,97.5)/100,na.rm=T)
range <- round(range(data_tot$tempmax_compl,na.rm=T),0)
# HW DEFINITIONS
# hw.def <- cbind(rep(percentiles[2:5],2),rep(c(2,3,4),c(4,4,4)))
# Changed by XB:
hw.def <- cbind(rep(percentiles[2:5],3),rep(c(2,3,4),c(4,4,4)))
# RUN THE MODEL FOR EACH DEFINITION
for(k in 1:nrow(hw.def)) {
# COMPUTE THE ATTRIBUTABLE MORTALITY
# NB: THE REDUCED COEFFICIENTS ARE USED HERE
### With added effect
# Before and after
# XB: in all calls to attrdl_effectiveness_plan_added_effect I made changes in the hw.def argument
matsim[[k]][i,"Added_effect_P1"] <- attrdl_effectiveness_plan_added_effect(data_tot,coef1=get(paste0("coef_blupP1_cb_hw_",k))[[i]],
vcov1=get(paste0("vcov_blupP1_cb_hw_",k))[[i]],
coef2=get(paste0("coef_blupP2_cb_hw_",k))[[i]],
vcov2=get(paste0("vcov_blupP2_cb_hw_",k))[[i]],
coef_addeff_1=get(paste0("coef_blupP1_hw_",k))[[i]],
coef_addeff_2=get(paste0("coef_blupP2_hw_",k))[[i]],
vcov_addeff_1=get(paste0("vcov_blupP1_hw_",k))[[i]],
vcov_addeff_2=get(paste0("vcov_blupP2_hw_",k))[[i]],
outcome="adeath",type="an",dir="back",tot=TRUE,
threshold=hw.def[k,1], hw.def=hw.def[k,], range=NULL,sim=FALSE)[1]
matsim[[k]][i,"Added_effect_P2"] <- attrdl_effectiveness_plan_added_effect(data_tot,coef1=get(paste0("coef_blupP1_cb_hw_",k))[[i]],
vcov1=get(paste0("vcov_blupP1_cb_hw_",k))[[i]],
coef2=get(paste0("coef_blupP2_cb_hw_",k))[[i]],
vcov2=get(paste0("vcov_blupP2_cb_hw_",k))[[i]],
coef_addeff_1=get(paste0("coef_blupP1_hw_",k))[[i]],
coef_addeff_2=get(paste0("coef_blupP2_hw_",k))[[i]],
vcov_addeff_1=get(paste0("vcov_blupP1_hw_",k))[[i]],
vcov_addeff_2=get(paste0("vcov_blupP2_hw_",k))[[i]],
outcome="adeath",type="an",dir="back",tot=TRUE,
threshold=hw.def[k,1], hw.def=hw.def[k,], range=NULL,sim=FALSE)[2]
# COMPUTE EMPIRICAL OCCURRENCES OF THE ATTRIBUTABLE INJURIES
# USED TO DERIVE CONFIDENCE INTERVALS
# With added effect
# Before and after
arraysim[[k]][i,"Added_effect_P1",] <- attrdl_effectiveness_plan_added_effect(data_tot,coef1=get(paste0("coef_blupP1_cb_hw_",k))[[i]],
vcov1=get(paste0("vcov_blupP1_cb_hw_",k))[[i]],
coef2=get(paste0("coef_blupP2_cb_hw_",k))[[i]],
vcov2=get(paste0("vcov_blupP2_cb_hw_",k))[[i]],
coef_addeff_1=get(paste0("coef_blupP1_hw_",k))[[i]],
coef_addeff_2=get(paste0("coef_blupP2_hw_",k))[[i]],
vcov_addeff_1=get(paste0("vcov_blupP1_hw_",k))[[i]],
vcov_addeff_2=get(paste0("vcov_blupP2_hw_",k))[[i]],
outcome="adeath",type="an",dir="back",tot=TRUE,
threshold=hw.def[k,1],hw.def=hw.def[k,],sim=T,nsim=nsim)[1:nsim]
arraysim[[k]][i,"Added_effect_P2",] <- attrdl_effectiveness_plan_added_effect(data_tot,coef1=get(paste0("coef_blupP1_cb_hw_",k))[[i]],
vcov1=get(paste0("vcov_blupP1_cb_hw_",k))[[i]],
coef2=get(paste0("coef_blupP2_cb_hw_",k))[[i]],
vcov2=get(paste0("vcov_blupP2_cb_hw_",k))[[i]],
coef_addeff_1=get(paste0("coef_blupP1_hw_",k))[[i]],
coef_addeff_2=get(paste0("coef_blupP2_hw_",k))[[i]],
vcov_addeff_1=get(paste0("vcov_blupP1_hw_",k))[[i]],
vcov_addeff_2=get(paste0("vcov_blupP2_hw_",k))[[i]],
outcome="adeath",type="an",dir="back",tot=TRUE,
threshold=hw.def[k,1],hw.def=hw.def[k,],sim=T,nsim=nsim)[(nsim+1):(nsim*2)]
# STORE THE TOTAL INJURIES (ACCOUNTING FOR MISSING)
totdeath[i] <- sum(data_tot$adeath,na.rm=T)
totdeath_1[i] <- sum(data_tot$adeath[which(data_tot$bef_aft==0)],na.rm=T)
totdeath_2[i] <- sum(data_tot$adeath[which(data_tot$bef_aft==1)],na.rm=T)
}
}
################################################################################
# ATTRIBUTABLE NUMBERS
#### OVERALL PERIOD ####
# CITY-SPECIFIC
ancity <- matsim
# changed by XB:
ancitylow=list()
for (i in 1:12) {
ancitylow[[i]] <- apply(arraysim[[i]],c(1,2),quantile,0.025)
}
ancityhigh=list()
for (i in 1:12) {
ancityhigh[[i]] <- apply(arraysim[[i]],c(1,2),quantile,0.975)
}
#rownames(ancity) <- rownames(ancitylow) <- rownames(ancityhigh) <- provincies_n_total
# TOTAL
# NB: FIRST SUM THROUGH CITIES
# changed by XB:
antot=list()
for (i in 1:12) {
antot[[i]]=apply(ancity[[i]],2,sum)
}
#antot <- lapply(matsim, function(x) sum(x))
antotlow=list()
for (i in 1:12) {
antotlow[[i]] <- apply(apply(arraysim[[i]],c(2,3),sum),1,quantile,0.025)
}
antothigh=list()
for (i in 1:12) {
antothigh[[i]] <- apply(apply(arraysim[[i]],c(2,3),sum),1,quantile,0.975)
}
################################################################################
# TOTAL INJURIES
# BY COUNTRY. OVERALL
totdeathtot <- sum(totdeath_2)
# BEFORE PERIOD
totdeathtot_1 <- sum(tempDEATHS$adeath[which(tempDEATHS$bef_aft==0)])
# AFTER PERIOD
totdeathtot_2 <- sum(tempDEATHS$adeath[which(tempDEATHS$bef_aft==1)])
################################################################################
# ATTRIBUTABLE FRACTIONS
#### OVERALL PERIOD ####
# CITY-SPECIFIC
# changed by XB:
afcity=list()
for (i in 1:12) {
afcity[[i]] <- ancity[[i]]/totdeath_2*100
}
afcitylow=list()
for (i in 1:12) {
afcitylow[[i]] <- ancitylow[[i]]/totdeath_2*100
}
afcityhigh=list()
for (i in 1:12) {
afcityhigh[[i]] <- ancityhigh[[i]]/totdeath_2*100
}
# TOTAL
aftot = list()
for (i in 1:12) {
aftot[[i]] <- antot[[i]]/totdeathtot_2*100
}
aftotlow = list()
for (i in 1:12) {
aftotlow[[i]] <- antotlow[[i]]/totdeathtot_2*100
}
aftothigh=list()
for (i in 1:12) {
aftothigh[[i]] <- antothigh[[i]]/totdeathtot_2*100
}
mat.af=round(matrix(unlist(aftot),ncol=2,byrow=T),3)
mat.af.low=round(matrix(unlist(aftotlow),ncol=2,byrow=T),3)
mat.af.high=round(matrix(unlist(aftothigh),ncol=2,byrow=T),3)
res.before=matrix(paste0(mat.af[,1]," (",mat.af.low[,1],", ",mat.af.high[,1],")"),ncol=1)
res.after=matrix(paste0(mat.af[,2]," (",mat.af.low[,2],", ",mat.af.high[,2],")"),ncol=1)
print.data.frame(data.frame(res.before),quote=F,row.names=F)
print.data.frame(data.frame(res.after),quote=F,row.names=F)
|
77f0fa2e4f472d74aa2d380aeb1e4a91f5559b32
|
28024b6265d05dd41c646fdac57426bfa5f275e9
|
/tests/testthat.R
|
361ed9cac7431778cfd71aba9e9823e7b2ceb61f
|
[
"MIT"
] |
permissive
|
jonthegeek/human
|
5bfa288ea2784c0449f3e1fe976b4fae91342e8b
|
bf276ef05ffd5ca6ebaf6f6b7d6ba43d074a7bcd
|
refs/heads/master
| 2020-12-02T07:26:39.439972
| 2019-12-30T15:10:52
| 2019-12-30T15:19:55
| 230,933,432
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 54
|
r
|
testthat.R
|
library(testthat)
library(human)
test_check("human")
|
dc6045994955d88099d3c902522c2c9ff9bfc8a4
|
29585dff702209dd446c0ab52ceea046c58e384e
|
/mapmisc/R/legendBreaks.R
|
6c08f835144d2b9331c34dd1305c9d0098b13c39
|
[] |
no_license
|
ingted/R-Examples
|
825440ce468ce608c4d73e2af4c0a0213b81c0fe
|
d0917dbaf698cb8bc0789db0c3ab07453016eab9
|
refs/heads/master
| 2020-04-14T12:29:22.336088
| 2016-07-21T14:01:14
| 2016-07-21T14:01:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,278
|
r
|
legendBreaks.R
|
legendBreaks = function(pos,
breaks,
col,
legend,
rev=TRUE,
outer=TRUE,
pch=15,
bg='white',
cex=par('cex'),
pt.cex=2.5*cex,
text.col=par('fg'),
title=NULL,
inset=0.05,
title.col=text.col,
adj=0,
width=Inf, lines=Inf,
y.intersp,
...){
if(!missing(breaks)){
if(is.factor(breaks)){
# if it's a raster
if(length(grep("^Raster",class(breaks)))){
breaks = levels(breaks)[[1]]
} else {
breaks=list(legend=levels(breaks))
}
}
}
if( missing(legend) & missing(breaks))
warning("legend or breaks must be supplied")
if(missing(legend)&!missing(breaks)) {
if(is.list(breaks)){
legendCol = intersect(
c('legend','label','level','breaks','ID'),
names(breaks)
)
if(!length(legendCol)){
warning("can't find legend in breaks")
}
legend = breaks[[ legendCol[1] ]]
} else { # breaks isn't a list (or df)
legend=breaks
}
}
if(missing(col)){
col='black'
if(!missing(breaks)) {
if(is.list(breaks)) {
if(any(names(breaks)=='col'))
col = breaks[['col']]
}
}
}
if(rev){
col=rev(col)
legend=rev(legend)
}
diffYmult = 0
if(length(col) == (length(legend)-1)) {
# one more legend item than colours
col = c(NA, col)
pch = c(NA,
pch[round(seq(1, length(pch), len=length(legend)-1))]
)
diffyMult=1
theTextCol = '#FFFFFF00'
} else { # same number of colours as legend entries
theTextCol = text.col
# get rid of entries where col is NA
theNA = is.na(col)
if(any(theNA)){
col = col[!theNA]
legend = legend[!theNA]
}
}
# line wrapping for legend labels
if(any(nchar(as.character(legend)) > width)) {
legend = trim(
gsub(
paste('(.{1,', width, '})(\\s|/|$)' ,sep=''),
'\\1\n ',
as.character(legend)
)
)
}
# remove excess lines
theNewLines = gregexpr('\n', as.character(legend))
toCrop = which(unlist(lapply(theNewLines, length)) >= lines)
if(length(toCrop)) {
cropPos = unlist(lapply(theNewLines[toCrop], function(qq) qq[lines]))
legend = as.character(legend)
legend[toCrop] =
trim(substr(legend[toCrop], 1, cropPos))
}
if(missing(y.intersp)){
if(is.character(legend)) {
theNewLines = gregexpr('\n', as.character(legend))
y.intersp=max(
c(0.5, unlist(lapply(theNewLines, function(qq) sum(qq>0))))
) - 0.25
} else {
y.intersp = 1
}
}
if(all(is.na(y.intersp))){
y.intersp=0
}
adj = rep_len(adj, 2)
adj[2] = adj[2] + y.intersp/3
# get rid of transparency in col
withTrans = grep("^#[[:xdigit:]]{8}$", col)
col[withTrans] = gsub("[[:xdigit:]]{2}$", "", col[withTrans])
if(outer){
oldxpd = par("xpd")
par(xpd=NA)
fromEdge = matrix(par("plt"), 2, 2,
dimnames=list(c("min","max"), c("x","y")))
propIn = apply(fromEdge, 2, diff)
if(is.character(pos)) {
forInset = c(0,0)
if(length(grep("left$", pos))){
forInset[1] = -fromEdge["min","x"]
} else if(length(grep("right$", pos))){
forInset[1] = fromEdge["max","x"]-1
}
if(length(grep("^top", pos))){
forInset[2] = -fromEdge["min","y"]
} else if(length(grep("^bottom", pos))){
forInset[2] = fromEdge["max","y"]-1
}
inset = forInset/propIn + inset
}
}
result=legend(
pos,
legend=as.character(legend),
bg=bg,
col=col,
pch=pch,
pt.cex=pt.cex,
inset=inset,
cex=cex,
text.col=theTextCol,
title.col=title.col,
title=title,
y.intersp=y.intersp,
adj=adj,
...
)
if(text.col != theTextCol) {
diffy = diff(result$text$y)/2
diffy = c(
diffy,diffy[length(diffy)]
)*diffyMult
result$text$y = result$text$y + diffy
if(par("xlog")) result$text$x = 10^result$text$x
if(par("ylog")) result$text$y = 10^result$text$y
text(result$text$x, result$text$y,
legend, col=text.col,adj=adj, cex=cex)
}
par(xpd=oldxpd)
return(invisible(result))
}
|
74bb006ff9f80b72736048c67272b42752ffc4f2
|
c414a3d6d466ca68f9076d6cbe125e485485fea8
|
/CIPinnipedAnalysis/man/getdead_ch.Rd
|
feec0f309ec60ee30e40bc91bc977c117c5ac565
|
[] |
no_license
|
jlaake/CIPinnipedAnalysis
|
1b35cafa3865a8cacfbc1c68fe9bf1ce21debc72
|
0c0d8f28a45253e7227ebc902f06819a0ecdd437
|
refs/heads/master
| 2021-01-17T09:26:37.164597
| 2019-04-12T02:28:11
| 2019-04-12T02:28:11
| 2,009,438
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,481
|
rd
|
getdead_ch.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/getdead_ch.r
\name{getdead_ch}
\alias{getdead_ch}
\title{Dead pup capture histories}
\usage{
getdead_ch(island, year, development = "fullterm", merge = TRUE)
}
\arguments{
\item{island}{("SMI" or "SNI")}
\item{year}{four digit numeric year}
\item{development}{either "premie" or "fullterm"}
\item{merge}{if TRUE, merges disparate area codes into "PTS","SCV","WCV","NWC"}
}
\value{
\preformatted{dataframe containing
ch(capture history),
Carcass condition: F-fresh, D- decomposing, P- pancake,
Position: A (Above) or B (Below)
Substrate: N (non-consolidated - sandy), C (consolidated - rocky)
AreaCode - beach area"}
}
\description{
Creates capture histories for tagged and untagged dead pups for POPAN analysis
}
\details{
Extracts data for a particular year, island and pup development stage (premie/fullterm) from
Zc dead tag initial and Zc dead tag resight for tagged dead pups,
and Zc Cu dead pup census for untagged stacked dead pups. Creates a capture history
for each pup and uses -1 for freq for any stacked pup. Includes initial substrate, carcass
condition and beach position as covariates. Note FP (flood pond) has been used infrequently
and has been coverted to A (above beach crest) versus B (below beach crest).
It reports any mismatches (id in resights with no initial) and any duplicate initial data
records.
}
\author{
Jeff Laake
}
|
9b5862cc666e92adec0f82b03519548849d03607
|
a28bcddae647acfa4fabe04be2a5ef6846b637ee
|
/ui.R
|
8c4e0753a5e2475e0d8f7dfb8a8c793e71c05a2a
|
[] |
no_license
|
Samdsc001/DevelopingDataProducts
|
02a39c01ca756bbdb009e38e41f243191fc2c473
|
c821b05204a3a42f7ce7695982cc46fd1539f65f
|
refs/heads/master
| 2021-01-10T15:35:29.643045
| 2015-11-22T18:30:58
| 2015-11-22T18:30:58
| 46,662,901
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 864
|
r
|
ui.R
|
library(shiny)
# Define UI for miles per gallon application
shinyUI(fluidPage(
# title
titlePanel("Miles Per Gallon Analysis"),
h5('Change the plot by selecting different drop down values and using the check box'),
p('This is a dynamic visual interpretation of the relationship between different variables on the miles per gallon. The variables analyzed are:'),
p('
1) Cylinders
2) Automatic vs. Manual
3) Number of Gears'),
sidebarLayout(
sidebarPanel(
selectInput("variable", "Select the Variables to Plot:",
c("Cylinders" = "cyl",
"Transmission" = "am",
"Gears" = "gear")),
checkboxInput("outliers", "Show outliers", FALSE)
),
# mpg
mainPanel(
h3(textOutput("caption")),
plotOutput("mpgPlot")
)
)
))
|
84913f2828d24e71b5a002c7d56b521494661b65
|
609e12295b740d904c45896444e71c7c40215c8e
|
/R/testph.R
|
c7c9e0b338b8717d1a1621191be8a5c4950dc823
|
[] |
no_license
|
stranda/holoSimCell
|
a7f68cec17f0c9f599b94fb08344819de40ecd2e
|
f1570c73345c1369ed3bf3aad01a404c1c5ab3a2
|
refs/heads/master
| 2023-08-20T09:08:13.503681
| 2023-07-20T19:09:41
| 2023-07-20T19:09:41
| 189,275,434
| 1
| 2
| null | 2021-03-19T19:32:01
| 2019-05-29T18:08:16
|
R
|
UTF-8
|
R
| false
| false
| 6,220
|
r
|
testph.R
|
##
## function to take a landscape and make sure that all sampled sites are in unique cells
##
uniqueSampled <- function(landscape)
{
ok <- TRUE
sdf <- landscape$sampdf
sdf <- sdf[order(sdf$cell),]
r <- rle(sdf$cell)
if (max(r$lengths)>1)
{
ok <- FALSE
message(paste("This landscape combines mutiple sampled populations into one raster cell",
paste(sdf$abbrev[sdf$cell %in% r$values[r$lengths>1]],collapse=", ")))
}
ok
}
#' Test pophist object before coalescent simulation
#'
#' Function to take output from getpophist_cells and def_grid and test for cell occupancy
#'
#' @param ph a pophist object, output by getpophist2.cells()
#' @param landscape the landscape object used in the forward simulation
#'
#' @details
#' All cells with population genetic samples must be colonized during the forward simulation. Simulations that do not fully colonize sampled populations are discarded. This function tests three aspects of the lansdcape produced in the forward demographic simulation:
#' \itemize{
#' \item{All populations with genetic samples are colonized during the forward simulation.}
#' \item{Habitat suitability is non-zero at the last simulation time step in all cells with genetic samples.}
#' \item{The spatial extent of the simulated landscape matches that of the genetic samples.}
#' }
#'
#' @return
#' Returns a logical (T or F) indicating whether all sampled cells are occupied at the end of the forward simulation.
#'
#' @examples
#' library(holoSimCell)
#' parms <- drawParms(control = system.file("extdata/ashpaper","Ash_priors.csv",package="holoSimCell"))
#' load(file=paste0(system.file(package="holoSimCell"),"/extdata/landscapes/",pollenPulls[[1]]$file))
#' refpops <- pollenPulls[[1]]$refs
#' avgCellsz <- mean(c(res(landscape$sumrast)))
#'
#' ph = getpophist2.cells(h = landscape$details$ncells, xdim = landscape$details$x.dim, ydim = landscape$details$y.dim,
#' landscape=landscape,
#' refs=refpops,
#' refsz=parms$ref_Ne,
#' lambda=parms$lambda,
#' mix=parms$mix,
#' shortscale=parms$shortscale*avgCellsz,
#' shortshape=parms$shortshape,
#' longmean=parms$longmean*avgCellsz,
#' ysz=res(landscape$sumrast)[2],
#' xsz=res(landscape$sumrast)[1],
#' K = parms$Ne)
#' testPophist(ph, landscape)
#'
#' @seealso \code{\link{ashSetupLandscape}}, \code{\link{getpophist2.cells}}, \code{\link{make.gmap}}, \code{\link{pophist.aggregate}}, \code{\link{runFSC_step_agg3}}
#'
#' @export
testPophist <- function(ph,landscape)
{
ok <- T
popdf <- landscape$sampdf
nonfilled <- popdf[which(ph$Nvec[popdf$cell,ncol(ph$Nvec)]<1),]
if (nrow(nonfilled)>0)
{
ok <- F
message(paste("These pops have size < 1 at end of simulation",paste(nonfilled$abbrev,collapse=",")))
message(paste("Sizes, respectively:", paste(ph$Nvec[nonfilled$cell,ncol(ph$Nvec)],collapse=",")))
}
sampsuit <- landscape$hab_suit[,popdf$cell]
if (min(sampsuit[nrow(landscape$hab_suit),])==0)
{
ok <- F
message(paste("habitat suitability for these sites zero at current time",
popdf$cell[which(sampsuit[nrow(landscape$hab_suit),]==0)])
)
}
if (!all.equal (extent(landscape$sumrast),extent(landscape$samplocsrast)))
{
message("the extent of the landscape and the samples is different")
ok <- F
}
ok
}
#' Tests aggregation scheme before coalescent simulation
#'
#' Test the gmap aggregation scheme against a landscape to see if cells with genetic samples are being combined
#'
#' @param gmap data frame that maps forward time populations to genetic populations (from \code{make.gmap()})
#' @param landscape the landscape object used in the forward simulation
#'
#' @details
#' Avoid combining cells with two separate genetic samples during cell aggregation
#'
#' @return
#' Returns a logical (TRUE or FALSE) indicating whether the aggregation scheme specified by gmap would combine two cells with genetic samples. Combining genetic samples can result in problems during subsequent coalescent simulations. If FALSE, no issues are expected.
#'
#' @examples
#' library(holoSimCell)
#' parms <- drawParms(control = system.file("extdata/ashpaper","Ash_priors.csv",package="holoSimCell"))
#' load(file=paste0(system.file(package="holoSimCell"),"/extdata/landscapes/",pollenPulls[[1]]$file))
#' refpops <- pollenPulls[[1]]$refs
#' avgCellsz <- mean(c(res(landscape$sumrast)))
#'
#' ph = getpophist2.cells(h = landscape$details$ncells, xdim = landscape$details$x.dim, ydim = landscape$details$y.dim,
#' landscape=landscape,
#' refs=refpops,
#' refsz=parms$ref_Ne,
#' lambda=parms$lambda,
#' mix=parms$mix,
#' shortscale=parms$shortscale*avgCellsz,
#' shortshape=parms$shortshape,
#' longmean=parms$longmean*avgCellsz,
#' ysz=res(landscape$sumrast)[2],
#' xsz=res(landscape$sumrast)[1],
#' K = parms$Ne)
#'
#' gmap=make.gmap(ph$pophist,
#' xnum=2, #number of cells to aggregate in x-direction
#' ynum=2) #number of aggregate in the y-direction
#'
#' doesGmapCombine(gmap, landscape)
#'
#' @seealso \code{\link{getpophist2.cells}}, \code{\link{make.gmap}}, \code{\link{pophist.aggregate}}, \code{\link{runFSC_step_agg3}}
#' @export
doesGmapCombine <- function(gmap,landscape)
{
ok <- FALSE
sdf <- landscape$sampdf
tgm <- gmap[sdf$cell,]
tgm <- tgm[order(tgm$gpop),]
r <- rle(tgm$gpop)
if (max(r$lengths)>1)
{
ok <- TRUE
pops <- r$values[which(r$lengths>1)]
message(paste("this gmap combines populations",paste(sdf$abbrev[sdf$cell%in%gmap$pop[gmap$gpop==pops]],collapse=", ")))
}
ok
}
|
544d41115ac21a5f99c15890b417f3ee088168c7
|
3299bb7303681f7305c0b2996f48f145fa5bb98b
|
/R/reexports.R
|
fd5246842cf45e307939e792f9101cc6d3296306
|
[] |
no_license
|
paulponcet/tribe
|
a64be02c1f342cc5f2907dbbb8a710c52f590a98
|
0c47c0a79ee9870207ad66c1f3df39006a0d40bc
|
refs/heads/master
| 2020-06-14T09:14:21.926723
| 2019-11-23T22:29:40
| 2019-11-23T22:29:40
| 75,429,883
| 5
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 56
|
r
|
reexports.R
|
#' @importFrom rlang "%@%"
#' @export
#'
rlang::"%@%"
|
53fc6063440863286cb69be9f61535e8d8b74036
|
dd93d8ca4fd860d4187ebff8385fe9b3c29b5084
|
/r_scripts/unops_scratch.R
|
be04e843dffa27e6f859fce2e9bb059ed3c78f88
|
[
"MIT"
] |
permissive
|
ricpie/E2E-Data-Analysis
|
4e2d9f1fd55f4f23416bf950ad32229977644eba
|
75b7cc45c25950ae5253ae1dca29425f1bbba27c
|
refs/heads/master
| 2020-12-03T18:47:11.544931
| 2020-12-01T23:52:02
| 2020-12-01T23:52:02
| 231,430,828
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 12,956
|
r
|
unops_scratch.R
|
# unops_ap[, sum_TraditionalManufactured_minutes:=as.numeric(sum_TraditionalManufactured_minutes)]
# unops_ap[, sum_TraditionalManufactured_minutes:=as.numeric(sum_TraditionalManufactured_minutes)]
# unops_ap[, sum_charcoal_jiko_minutes:=as.numeric(sum_charcoal_jiko_minutes)]
# unops_ap[, sum_traditional_non_manufactured:=as.numeric(sum_traditional_non_manufactured)]
# unops_ap[, sum_lpg:=as.numeric(sum_lpg)]
# unops_ap[sum_TraditionalManufactured_minutes==0, sum_TraditionalManufactured_minutes:=0.001]
# unops_ap[sum_TraditionalManufactured_minutes == 0, sum_TraditionalManufactured_minutes := 0.001]
# unops_ap[sum_charcoal_jiko_minutes == 0, sum_charcoal_jiko_minutes := 0.001]
# unops_ap[sum_traditional_non_manufactured == 0, sum_traditional_non_manufactured := 0.001]
# unops_ap[sum_lpg == 0, sum_lpg := 0.001]
# unops_ap[meanCO_ppmKitchen == 0, meanCO_ppmKitchen := unops_ap[meanCO_ppmKitchen!=0, min(meanCO_ppmKitchen, na.rm=T)]]
# unops_ap[meanCO_ppmLivingRoom == 0, meanCO_ppmLivingRoom := unops_ap[meanCO_ppmLivingRoom!=0, min(meanCO_ppmLivingRoom, na.rm=T)]]
# unops_ap[meanCO_ppmAmbient == 0, meanCO_ppmAmbient := unops_ap[meanCO_ppmAmbient!=0, min(meanCO_ppmAmbient, na.rm=T)]]
# unops_ap[meanCO_indirect_nearest == 0, meanCO_indirect_nearest := unops_ap[meanCO_indirect_nearest!=0, min(meanCO_indirect_nearest, na.rm=T)]]
# unops_ap[meanCO_indirect_nearest_threshold ==0, meanCO_indirect_nearest_threshold := unops_ap[meanCO_indirect_nearest_threshold!=0, min(meanCO_indirect_nearest_threshold, na.rm=T)]]
#shift the 0 sums to 0.0001
#kitchen sink
# k <- lm(cook_pm_log ~ co2 + kit_pm + lr_pm + amb_pm + cook_co + kit_co + lr_co + amb_co + trad_mins + lpg_mins + walls_with_eaves + kit_vol + door_win_area + ses + aer + primary_stove + day_of_week, data = unops_ap)
# summary(k)
# model_performance(k)
# check_model(k)
# k_optimum <- ols_step_best_subset(k)
# alarm()
# alarm()
# alarm()
# alarm()
# alarm()
# alarm()
# alarm()
# alarm()
# alarm()
#unops scratch
m1 <- lm(log_meanPM25Cook ~ unique_primary_stove, data = unops_log)
model_performance(m1)
# check_model(m1)
m2 <- lm(log(meanPM25Cook) ~ unique_primary_stove + score, data = unops_log)
model_performance(m2)
# check_model(m2)
m3 <- lm(log(meanPM25Cook) ~ log(meanPM25Kitchen), data = unops_log)
model_performance(m3)
m4 <- lm(log(meanPM25Cook) ~ log(meanPM25Kitchen) + unique_primary_stove, data = unops_log)
model_performance(m4)
m5 <- lm(log(meanPM25Cook) ~ log(meanPM25Kitchen) + unique_primary_stove + score, data = unops_log)
model_performance(m5)
m6 <- lm(log(meanPM25Cook) ~ log(meanPM25Kitchen) + unique_primary_stove + score + aer, data = unops_log)
model_performance(m6)
m7 <- lm(log(meanPM25Cook) ~ log(meanPM25Kitchen) + unique_primary_stove + score + walls_w_eaves_n + volume + door_win_m2, data = unops_log)
model_performance(m7)
compare_performance(m1, m2, m3, m4, m5, m6, m7, rank = TRUE)
m8 <- lm(log(meanPM25Cook) ~ unique_primary_stove + score + walls_w_eaves_n + volume + door_win_m2 +sum_TraditionalManufactured_minutes + sum_traditional_non_manufactured, data = all_merged_summary)
model_performance(m8)
summary(m8)
m9 <- lm(log(meanPM25Cook) ~ log(meanpm25_indirect_nearest), data = all_merged_summary)
summary(m9)
m10 <- lm(log(meanPM25Cook) ~ log(meanpm25_indirect_nearest_threshold) + unique_primary_stove + score , data = all_merged_summary)
summary(m10)
m11 <- lm(log(meanPM25Cook) ~ log(meanCO_ppmCook) + unique_primary_stove + aer + score, data = all_merged_summary)
summary(m11)
m12 <- lm(log_meanPM24Cook ~ log_meanPM25Kitchen + log_meanCO_ppmCook + log_meanpm25_indirect_nearest_threshold80 + walls_w_eaves_n + volume + door_win_m2 + score + aer + unique_primary_stove, data = all_merged_summary)
k <- ols_step_all_possible(m12)
plot(ols_step_best_subset(m12))
cv_lm <- function(fold, data, reg_form) {
# get name and index of outcome variable from regression formula
out_var <- as.character(unlist(str_split(reg_form, " "))[1])
out_var_ind <- as.numeric(which(colnames(data) == out_var))
# split up data into training and validation sets
train_data <- training(data)
valid_data <- validation(data)
# fit linear model on training set and predict on validation set
mod <- lm(as.formula(reg_form), data = train_data)
preds <- predict(mod, newdata = valid_data)
# capture results to be returned as output
out <- list(coef = data.frame(t(coef(mod))),
SE = ((preds - valid_data[, out_var_ind])^2))
return(out)
}
library(origami)
all_merged_summary[, log_meanPM24Cook := log(meanPM25Cook)]
folds <- make_folds(as.data.frame(all_merged_summary))
cvlm_results <- cross_validate(cv_fun = cv_lm, folds = folds, data = as.data.frame(all_merged_summary),
reg_form = "log_meanPM24Cook ~ unique_primary_stove + score + aer")
mean(cvlm_results$SE, na.rm=TRUE)
# check_model(m1)
#separate frames for datastreams
pm_pollution <- c('PATS_1m', 'PATS_2m', 'PATS_Kitchen', 'PATS_LivingRoom', 'PATS_Ambient', 'pm_compliant', 'PM25Cook', 'PM25Kitchen', 'ECM_kitchen', 'pm25_conc_beacon_nearest_ecm', 'pm25_conc_beacon_nearestthreshold_ecm', 'pm25_conc_beacon_nearestthreshold_ecm80')
co_pollution <- c('CO_ppm1m', 'CO_ppm2m', 'CO_ppmCook', 'CO_ppmKitchen', 'CO_ppmLivingRoom', 'CO_ppmAmbient', 'co_estimate_beacon_nearest', 'co_estimate_beacon_nearest_threshold')
metadata <- c('datetime', 'date', 'HHID', 'HHIDnumeric', 'stovetype', 'pm_primary_stove')
sums <- c('sumstraditional_non_manufactured', 'sumslpg', 'sumstraditional_manufactured', 'sumscharcoal.jiko')
#drop wonky household, spare pain
unops <- unops[HHID!='KE238-KE06']
### CRONCHY DATA TABLES MMM
#pm
pm <- unops[, c(metadata, pm_pollution), with = F]
pm_long <- melt(pm, id.var = c('datetime', 'date', 'HHID', 'HHIDnumeric', 'stovetype', 'pm_primary_stove', 'pm_compliant'))
pm_long[, rollmean := frollmean(value, n = 15), by = 'HHID,HHIDnumeric,pm_primary_stove,variable']
pm_long_summary <- pm_long[, list(
compliance = length(pm_compliant[!is.na(pm_compliant)]),
avg = mean(value, na.rm = T),
max_15m = max(rollmean),
sd = sd(value, na.rm = T),
n = length(value[!is.na(value)]),
start = min(datetime),
stop = max(datetime)
), by = 'HHID,HHIDnumeric,pm_primary_stove,variable']
setkey(pm_long_summary, HHID, date)
pm_long_summary[, c(NA, diff(date)), by ='HHID'][V1>1]
pm_long_summary[HHID == 'KE238-KE06']
pm_summary_wide <- dcast.data.table(pm_long_summary, HHID + HHIDnumeric + pm_primary_stove + compliance + start + stop ~ variable, value.var = c('avg', 'max_15m', 'sd', 'n'))
#fin
#co
co <- unops[, c(metadata, co_pollution), with = F]
co_long <- melt(co, id.var = c('datetime', 'date', 'HHID', 'HHIDnumeric', 'stovetype', 'pm_primary_stove'))
co_long[, rollmean := frollmean(value, n = 15), by = 'HHID,HHIDnumeric,pm_primary_stove,variable']
co_long_summary <- co_long[, list(
avg = mean(value, na.rm = T),
max_15m = max(rollmean),
sd = sd(value, na.rm = T),
n = length(value[!is.na(value)]),
start = min(datetime),
stop = max(datetime)
), by = 'HHID,HHIDnumeric,pm_primary_stove,variable']
setkey(co_long_summary, HHID, date)
co_long_summary[, c(NA, diff(date)), by ='HHID'][V1>1]
co_summary_wide <- dcast.data.table(co_long_summary, HHID + HHIDnumeric + pm_primary_stove + start + stop ~ variable, value.var = c('avg', 'max_15m', 'sd', 'n'))
#fin
###SUMS
#extract variable
sums <- unops[, c(metadata, sums), with = F]
#make long
sums_long <- melt(sums, id.var = c('datetime', 'date', 'HHID', 'HHIDnumeric', 'stovetype', 'pm_primary_stove'))
#turn boolean into numeric
sums_long[, value:=as.numeric(value)]
#unique stoves
sums_long[, unique(pm_primary_stove)]
#make long summary
sums_long_summary <- sums_long[, list(
mins = sum(value, na.rm = T),
n = length(value[!is.na(value)]),
start = min(datetime),
stop = max(datetime)
), by = 'HHID,HHIDnumeric,pm_primary_stove,variable']
#make wide summary
sums_wide_summary <- dcast.data.table(sums_long_summary, HHID + HHIDnumeric + pm_primary_stove + start + stop ~ variable, value.var = c("mins", "n"))
#fin
# remerge all
setkey(pm_summary_wide)
setkey(co_summary_wide)
setkey(sums_wide_summary)
unops_ap <- merge(merge(pm_summary_wide, co_summary_wide), sums_wide_summary)[HHID!='KE238-KE06']
# SES
ses <- as.data.table(readRDS('~/Dropbox/UNOPS emissions exposure/E2E Data Analysis/Processed Data/predicted_ses.rds'))
ses[, id := NULL]
merge(unops_ap, ses, by = "HHID", all.x = T)
corr_var(unops_summary[,-c('Date','Start_datetime','End_datetime','meanPM25Cook')], # name of dataset
cook_log, # name of variable to focus on
top = 25, logs = T # display top 5 correlations
)
#impute by median
#model hierarchy
#PE ~ stove type / fuel type only
#PE ~ stove type + survey data only
#PE ~ kitchen PM
#PE ~ kitchen PM * 0.742
#PE ~ kitchen CO
#PE ~ kitchen PM + kitchen CO
# ...
#PE ~ beacon
#PE ~ beacon + survey
# SES score - keep it valued
# For all 2000 households
# all major assets, household features (owning vs renting), housing characteristics
# emissions - volume, eaves
#
# meta emissions
# lascar 1.5m monitor
# supplementing with TSI if available
# HHID-full
#all_merged_summary
#build linear models. plug in values for long term monitoring. around 20 households were intensive.
#compare single measure to long term measure.
#kitchen * .742; compare with personal
# meanpm25_indirect_nearest
# meanpm25_indirect_nearest_threshold - didn't perform as well as threshold 80. The more loose algorithm performed better than the more strict one.
# meanCO_indirect_nearest
# meanCO_indirect_nearest_threshold
#coefficients R2s
# overview <- melt.data.table(
# data.table(
# predictors = 1:19,
# `Adj R2` = summary(final_subset_lm)$adjr2,
# Cp = summary(final_subset_lm)$cp,
# BIC = summary(final_subset_lm)$bic
# ), id.var = 'predictors')
# ggplot(aes(predictors, value), data = overview) +
# geom_line(aes(color = variable), show.legend = F) +
# theme_bw() +
# geom_point(aes(shape = variable, color = variable), show.legend = F) +
# facet_wrap( ~ variable, ncol = 4, scales = 'free_y') +
# theme_bw() +
# theme(
# strip.background = element_blank(),
# strip.text = element_text(face = 'bold'),
# panel.border = element_blank()
# ) +
# labs(x = 'Number of Predictors', y = 'Value')
for(j in 1:k){
best_subset <- regsubsets(cook_pm ~ primary_stove + co2 + kit_pm + lr_pm + amb_pm + cook_co + kit_co + lr_co + amb_co + bcn_pm_thres80 + trad_mins + lpg_mins + walls_with_eaves + kit_vol + door_win_area + ses + aer + day_of_week, data = unops_ap[folds != j, ], nvmax = 22)
for(i in 1:n_vars){
pred_x <- predict.regsubsets(best_subset, unops_ap[folds == j, ], id = i)
cv_errors[j, i] <- mean((unops_ap$cook_pm_log[folds == j] - pred_x)^2)
}
}
mean_cv_errors <- colMeans(cv_errors, na.rm=TRUE)
plot(mean_cv_errors, type = "b")
final_subset <- regsubsets(cook_pm ~ primary_stove + co2 + kit_pm + lr_pm + amb_pm + cook_co + kit_co + lr_co + amb_co + bcn_pm_thres80 + trad_mins + lpg_mins + walls_with_eaves + kit_vol + door_win_area + ses + aer + day_of_week, data = unops_ap, nvmax = 22)
coef(final_subset, 6)
summary(lm(cook_pm ~ primary_stove + cook_co + kit_co + lr_co + bcn_pm_thres80 + kit_vol, data = unops_ap))
overview <- melt.data.table(
data.table(
predictors = 1:22,
`Adj R2` = summary(final_subset)$adjr2,
Cp = summary(final_subset)$cp,
BIC = summary(final_subset)$bic
), id.var = 'predictors')
ggplot(aes(predictors, value), data = overview) + geom_line(aes(color = variable), show.legend = F) + theme_bw() + geom_point(aes(shape = variable, color = variable), show.legend = F) + facet_wrap( ~ variable, ncol = 4, scales = 'free_y') + theme_bw() +
theme(strip.background = element_blank(), strip.text = element_text(face = 'bold'), panel.border = element_blank()) +
labs(x = 'Number of Predictors', y = 'Value')
unops_ap[, predicted := exp(predict.regsubsets(final_subset, unops_ap, id = 7))]
unops_ap[, predicted2 := exp(predict.regsubsets(final_subset, unops_ap, id = 22))]
with(unops_ap, Metrics::rmse(cook_pm, predicted2))
ggplot(aes(cook_pm, predicted), data=unops_ap) + geom_point() + theme_bw() + labs(y = expression(paste("Predicted PM"[2.5], " Exposure ", ("μg/m"^{3}))), x = expression(paste("Measured PM"[2.5], " Exposure ", ("μg/m"^{3})))) + scale_x_log10() + scale_y_log10() + geom_smooth(method = 'lm') + geom_abline(linetype = 'dashed') + ggtitle(expression("Relationship between predicted and measured personal exposures to PM"[2.5]))
ggplot(aes(cook_pm, predicted), data=unops_ap) + geom_point() + theme_bw() + labs(x = expression(paste("Predicted PM"[2.5], " Exposure ", ("μg/m"^{3}))), y = expression(paste("Measured PM"[2.5], " Exposure ", ("μg/m"^{3})))) + scale_x_continuous(limits = c(0, 700), breaks = seq(0,700,50)) + scale_y_continuous(limits = c(0, 700), breaks = seq(0,700,50)) + geom_smooth(method = 'lm') + geom_abline(linetype = 'dashed')
|
cd484a1753b02f114e7449a2df9e33774b0f7086
|
77157987168fc6a0827df2ecdd55104813be77b1
|
/palm/inst/testfiles/euc_distances/libFuzzer_euc_distances/euc_distances_valgrind_files/1612969129-test.R
|
af17eaca7816d183f42e5e29350cc15ef848941b
|
[] |
no_license
|
akhikolla/updatedatatype-list2
|
e8758b374f9a18fd3ef07664f1150e14a2e4c3d8
|
a3a519440e02d89640c75207c73c1456cf86487d
|
refs/heads/master
| 2023-03-21T13:17:13.762823
| 2021-03-20T15:46:49
| 2021-03-20T15:46:49
| 349,766,184
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,719
|
r
|
1612969129-test.R
|
testlist <- list(x1 = Inf, x2 = NaN, y1 = c(2.7502984733185e-135, NaN, 5.48545699190191e+303, NaN, 3.9886384405544e-91, 1.13923782404367e-305, 2.75909158067388e-306, 1.38523901781317e-309, NaN, 3.3487723144616e-303, 1.18182126306977e-125, 4.13000185147604e-27, 8.37116099366851e+298, 7.51095820725777e+105, NaN, -5.82900682309329e+303, NaN, 4.98669010876345e-312, NaN, -2.11899199031169e+307, NaN, NaN, NaN, NaN, 5.43189477256323e-312, 1.67635632281809e-260, -3.52444640470551e+196, 2.81776912248137e-202, 2.81776900841821e-202, 2.81776900841821e-202, 4.48896483198783e-260, 5.38128466814806e-202, NaN, 7.99173712248595e-308, 2.7744800176236e+180, NaN, 2.77448001762442e+180, NaN, 2.12455197126707e+183, -0.000153938424773514, 0, NaN, NaN, 5.48545699190191e+303, 1.39889278681467e-306, NaN, 7.15131437095871e-304, 2.39021688577355e-310, -1.41149075388308e+204, NaN, 7.29023190852836e-304, 5.43225836212213e-312, -5.48612406879369e+303, 9.33678471000373e-313, 3.10412454864856e+168, 2.7744800222921e+180, 2.77448001762442e+180, 2.77448001762435e+180, 2.77448001762435e+180, 2.77447923525493e+180, 9.52157095844336e-307, 2.77447923395664e+180, 2.77448001762435e+180, 2.77448001762435e+180, 2.77448001761244e+180, 2.82010673669098e+179, 2.77448001762435e+180, 2.81776900886378e-202, -5.00181487693708e+204, NaN, 1.03833873673291e-309, 0, NaN, -3.73056436677971e+305, -1.48791703540521e+193, 2.01042830921278e+180, -4.69526163440602e+305, -5.77007384969805e+303, 2.77427939097716e+180, 2.83962612749882e+238, 2.8396262443943e+238, 2.8396262443943e+238, 2.8396262443943e+238, 2.8396262443943e+238, 2.8396262443943e+238, 2.77448121255285e+180, 6.8990322149929e-310, NaN, NaN), y2 = c(-2.25310239761229e+204, 5.7537465036642e-49, 1.15163156848979e-309, -4.65661287307739e-10, NaN, 6.45271418143364e-307, 2.81025057772722e-202, 6.48024597825776e-198, NaN, -6.54705712752652e-21, 2.77447996824898e+180, -2.56842573317787e+207, 1.98937072828034e+87, NaN, NaN, -1.54223965309304e+206, NaN, 10843961477955214, 9.26419250662757e+159, 2.81776900883407e-202, 2.77428335534071e+180, 2.77448001762435e+180, 2.77448001762435e+180, -1.78179153423514e+92, NaN, 2.77448001764258e+180, 2.77448001762435e+180, 2.77448001762435e+180, 2.77448001762435e+180, 2.85729382227919e-202, 2.77432333142229e+180, -1.55510104271851e+206, NaN, 2.12439534740444e+183, 2.77448001762435e+180, 1.7572552928111e+159, 2.81773963955111e-202, -3.9851050691209e-08, -3.73056234457184e+305, NaN, 2.77448002229187e+180, -1.31666977453567e+305, 2.81776900842191e-202, 2.93180384159114e-202, NaN, 2.82524440463152e-202, 3.64710345055108e+178, 5.3803634585478e-202, NaN))
result <- do.call(palm:::euc_distances,testlist)
str(result)
|
5cd1b8d69159b7d604032e6d79cac94b76356846
|
a6e33678f7f3e3cfd6e9d4d1fe94a568bcc561d2
|
/paragrapq20.R
|
d45e33ea85dee3ce00c604c6c7a58e03599f979e
|
[] |
no_license
|
ibradley711/36-350
|
e95577d72ef52c9b4fe3e3343aa711f75ab7a029
|
7656f128bbfee23e5d91cb0f7c503fda09838bfa
|
refs/heads/master
| 2021-08-28T00:55:29.769182
| 2017-12-10T23:59:29
| 2017-12-10T23:59:29
| 109,596,842
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 733
|
r
|
paragrapq20.R
|
On Earth we have places called universities, where students learn information about various academic fields. One such university is Carnegie Mellon, where one of the academic fields in Statistics, in which professors teach various classes on Statistics. One of the classes is called 36-350 Statistical Computing, I am writing this paragraph for that class. On Earth we also have things called sports where people play a certain game with rules against an opponent, the best
sports to watch are soccer, football, and basketball. Arsenal is the best soccer team, and the Cubs, a baseball team from Chicago, a city (collection of people and buildings) on Earth won a World Series Championship after not winning one for over 100 years.
|
05a3d97d27867458fec5009168e1e39f7a97282b
|
77eee0a81f69d7273c604aa27a35ed96f4cc031c
|
/Plotting/Base package/20150328 Error Distribution Plot.R
|
90115831015e7318051e0c82e4946f74d0b48a03
|
[] |
no_license
|
rsankowski/usefulRcodes
|
ef1afd11a173407c8521c23d57786a36723b6a9d
|
efa9a116ef6789e0a81496d796f9c1b03cf6999a
|
refs/heads/master
| 2021-01-19T05:48:29.617304
| 2015-05-24T17:47:24
| 2015-05-24T17:47:24
| 35,740,218
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,183
|
r
|
20150328 Error Distribution Plot.R
|
setwd("/Users/romansankowski/Dropbox/Writing/My Authorships/20141213 Clockmaze Priject/strategies/")
DT <- read.csv("/Users/romansankowski/Dropbox/Writing/My Authorships/20141213 Clockmaze Priject/strategies/errors.csv", header = T)
head(DT)
head(DT2)
write.table(DT2, "/Users/romansankowski/Dropbox/Writing/My Authorships/20141213 Clockmaze Priject/strategies/summary stats.txt", sep="\t")
str(DT)
DF<- na.omit(DT)
SWISS <- DF[,1]
Balbc <- DF$Balbc
C57 <- DF$C57
NR2AKO <- DF$DUN
head(NR2AKO)
tail(SWISS)
plot(density(C57), col="black", lwd = 3, main = "Strain-specific Number of Errors Distribution",
xlab="Number of Errors", ylab = "Density")
lines(density(Balbc), col = "grey", lwd = 3)
lines(density(NR2AKO), col="black", lwd = 3, lty = "dashed")
lines(density(SWISS), col="grey", lwd = 3, lty = "dotted")
legend("topright", # places a legend at the appropriate place
c("C57 (n=49)","Balbc (n=60)", "NR2aKO (n=53)", "SWISS (n=39)"), # puts text in the legend
lty=c("solid", "solid", "dashed", "dotted"), # gives the legend appropriate symbols (lines)
lwd=c(3,3,3,3),col=c("black","grey","black","grey"), title="Strain")
?plot
boxplot(DF)
sd(NR2AKO)
|
770858fe29ef211b992cc7c073adfcf3c3809db1
|
33c215081baf0ac47554b2708a6cdd79930d59ae
|
/refine ellis fmps_1_scaleNaomi.R
|
3745dd8ccfe37f62a7127ab6074d53f9096aa224
|
[] |
no_license
|
hugh1li/provat_ufp_lur
|
6a8f618ae2059a1413d5d2f6588595201777566e
|
b2d52ec7c906c9cb6d8de020185654f2c8779f01
|
refs/heads/master
| 2020-03-27T21:04:05.120195
| 2019-03-31T02:10:38
| 2019-03-31T02:10:38
| 147,112,963
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,416
|
r
|
refine ellis fmps_1_scaleNaomi.R
|
library(tidyverse)
library(rgdal)
library(lubridate)
# gpx read
# I see backup folder
# i think they are the same, and i just load the files in the normal folder (and assume backup maximum size == the normal one)
#1.1 GPS----
# GPS speed units in m/s!
setwd('data/FMPS_winter_ellis/gps_raw/')
GPS_raw <- list.files(pattern = ".gpx")
GPS_transform <- function(x){
layers <- ogrListLayers("20180111-162901-0010307-132351.gpx")
x %>% readOGR(layer = layers[5], stringsAsFactors = FALSE) %>% as_tibble() %>%
select(Elev = ele, DateTime = time, Speed = badelf_speed, Lon = coords.x1, Lat = coords.x2) %>%
mutate(DateTime = parse_date_time(DateTime, "%Y/%m/%d %H:%M:%Sz"))
}
GPS_UTC <- GPS_raw %>% purrr::map_df(GPS_transform)
GPS_EST <- GPS_UTC %>% mutate(DateTime = with_tz(DateTime, tz = "EST"))
# fix some weird speed issues or weird GPS latitudes, lon.
GPS_EST_f <- GPS_EST %>% filter(Speed < 200, Lat > 40, Lat < 41, Lon > -81, Lon < -79, year(DateTime) %in% c(2016, 2017, 2018), Elev > 0) %>% distinct(DateTime, .keep_all = TRUE)
# ?change speed limit (17 m/s ~ 100), nope, coz you can jsut explain it's the GPS accuracy, and it makes me lost 4.6 hours of data (pretty much they are just stationary data).
# second(GPS_EST_f$DateTime[1]), don't need to round
# need to remove the duplicates
GPS_EST_f1 <- distinct(GPS_EST_f, DateTime, .keep_all = TRUE)
GPS_EST_f2 <- distinct(GPS_EST_f)
#well, I will adopt the f1 one (i saw several numbers missing in f2 is because that second is in previous rows)
setwd("../../..")
write_rds(GPS_EST_f2, 'data/refined_data/WinterGPS.rds')
GPS_shapefile <- as.data.frame(GPS_EST_f2)
coordinates(GPS_shapefile) <- ~Lon + Lat
proj4string(GPS_shapefile) <- "+proj=longlat +datum=WGS84"
write_rds(GPS_EST_f2, 'data/refined_data/WinterGPS.rds')
write_csv(GPS_EST_f2, 'data/refined_data/WinterGPS.csv')
# Write to shapefile to verify my sp method is right
writeOGR(obj = GPS_shapefile, dsn = "data/gps_shape", layer = "GPS_new_winter", driver = 'ESRI Shapefile')
# 1.2 fmps extract and recal----
# i eyeball every file, removed two trip files. lul
# MY PREVIOUS experience
# check how many files have two trips inside. Then you have to rename files
# then FMPS, I see txt and fmps specific file. I think i can just use txt. coz they correspond one to one (FMPS file)
setwd("data/FMPS_winter_ellis/fmps_raw/")
fmps_files <- list.files(pattern ='*.txt')
all_lines <- purrr::map(fmps_files, ~read_lines(.x, n_max = 2))
Alldates <- purrr::map(seq(length(fmps_files)), ~all_lines[[.]][str_detect(all_lines[[.]], pattern = "^Date")])
# use this one, FMPS change the date time for manual one trip file.
FMPS_timeToDatetime <- function(i){
coltype <- paste(c("-", rep("d", 32), "-"), collapse = "") # Ellis's files total concentration directly follows the last size bin + datetime column
# though i was not quite sure why only ignore one column after rep(d, 32)
test <- read_tsv(fmps_files[i], col_names = FALSE, col_types = coltype, skip = 15) # some only need to skip 14 lines, but one more is fine
test_Dates <- Alldates[[i]]
test_dt <- mdy_hms(test_Dates, tz = "EST")
test$DateTime <- seq.POSIXt(from = test_dt + seconds(1), by = "sec", length.out = nrow(test))
# mdy_hms('01-02-2018 23:59:59') + seconds(1)
# "2018-01-03 UTC" # don't need to worry about becoming another day
test
}
FMPS_winter <- seq(1:length(fmps_files)) %>% purrr::map_df(FMPS_timeToDatetime) # with stationary data as well.
FMPS_winter1 <- select(FMPS_winter, DateTime, everything())
FMPS_correctSize_names <- unlist(str_split("DateTime,F6.04,F6.98,F8.06,F9.31,F10.8,F12.4,F14.3,F16.5,F19.1,F22.1,F25.5,F29.4,F34,F39.2,F45.3,F52.3,F60.4,F69.8,F92.5,F114.1,F138.9,F167.6,F200.8,F239.1,F283.3,F334.4,F393.3,F461.5,F540,F630.8,F735.8,F856.8,", pattern =","))[-34] # -35 is for removing that blank
names(FMPS_winter1) <- FMPS_correctSize_names
# reallign because retention in sampling lines
FMPS_winter2 <- FMPS_winter1 %>% mutate(DateTime = DateTime - seconds(7))
setwd("../../..")
# resize and calibrate
FMPS_resize <- read_csv("data/FMPS resize.csv",
col_types = cols(Intercept = col_double()))
FMPS_winter3 <- FMPS_winter2 # make a copy here...
for(i in seq(32)){
FMPS_winter3[,i+1] = FMPS_winter3[,i+1]*FMPS_resize$Slope[i] + FMPS_resize$Intercept[i] # naomi paper correction
}
write_rds(FMPS_winter3, "data/refined_data/FMPS_winter_ellis.rds")
|
4c1f5d366d23b32527fbbb603166a711ce261b64
|
d8e31459ccc57a5d0f9453798eb684eefc70f1d5
|
/plot3.R
|
22a145d68e116de02fad8a5c0dd8bdd6f703901b
|
[] |
no_license
|
milkmansrevenge/ExData_Plotting1
|
01ffa172eb799e852197a10eddfe556d1584363a
|
0408e4eeae5ff6a54bef983f46818dfd17127776
|
refs/heads/master
| 2021-01-15T20:57:33.518143
| 2016-01-10T14:19:51
| 2016-01-10T14:19:51
| 47,937,280
| 0
| 0
| null | 2015-12-13T21:33:23
| 2015-12-13T21:33:22
| null |
UTF-8
|
R
| false
| false
| 1,121
|
r
|
plot3.R
|
## Read data
data_file <- "..\\household_power_consumption.txt"
power_data <- read.table(data_file,
header = TRUE,
sep = ";",
na.strings = '?',
stringsAsFactors = TRUE)
## Set date format and extract the time period we are interested in
power_data$Date <- as.Date(power_data$Date,
format="%d/%m/%Y")
power_data <- subset(power_data,
subset=(Date >= "2007-02-01" & Date <= "2007-02-02"))
## Combine date and time
power_data$Date_time <- as.POSIXct(paste(power_data$Date, power_data$Time))
## Plot
png(file = "plot3.png", height = 480, width = 480, bg = "transparent")
with(power_data,{
plot(Date_time, Sub_metering_1, type = "l",
ylab = "Energy sub metering", xlab = "");
lines(Date_time, Sub_metering_2, col = "red");
lines(Date_time, Sub_metering_3, col = "blue");
legend("topright",
legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"),
lty = c(1, 1),
col = c("black", "red", "blue"));
})
dev.off()
|
d2c843a7c7c8d59e775fd60c36236dba77e64359
|
82cb848b8d7e5dcdb52ba62578f094141eeb8bd2
|
/setup.R
|
270aa6b7443faaea6c8adb946782b3a03c41f727
|
[] |
no_license
|
anonymnous2023-lab/IRGA
|
0c19aae26142953bba9d712eba4f61f8710fa1df
|
5119a911fb7a793ec514dc97847db6e9d78c10e9
|
refs/heads/master
| 2023-05-14T01:03:03.901867
| 2020-12-15T04:04:20
| 2020-12-15T04:04:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 165
|
r
|
setup.R
|
# Specify the maximum number of CPU cores to be used
n_cores = 8
# Load the functions that contain the methods considered
source("BVS_IRGA.R")
source("BVS_other.R")
|
5af21f21a7cb6121173ab45337be8b3ef3c40ca0
|
d6c0595084b6f9f3a541df39d7e54ad2cdd29d8e
|
/R/convert.R
|
a8543b1c67734f3c6b49f33d9d45db5f517f8a36
|
[] |
no_license
|
cran/phenopix
|
2b6e5b2ea601de51c312e692e04ec050529bf5e8
|
9220b65ba06c6e08e1df76a365db0b78364ed684
|
refs/heads/master
| 2023-08-19T07:31:53.401802
| 2023-08-09T13:50:02
| 2023-08-09T15:30:47
| 94,452,244
| 7
| 5
| null | null | null | null |
UTF-8
|
R
| false
| false
| 330
|
r
|
convert.R
|
convert <- function(x, year=NULL) {
if (!inherits(x, 'ts') | !inherits(x, 'zoo'))
time.new <- index(x)
x.df <- as.data.frame(x)
rownames(x.df) <- NULL
x.df$doy <- time.new
if (!is.null(year)) {
time.posix <- as.POSIXct(strptime(paste(year, time.new, sep='-'), format='%Y-%j'))
x.df$time <- time.posix
}
return(x.df)
}
|
fef746b7715d096114fb8451c4823ebe1786d2d6
|
26455464b8fd0bc5516f5af293dcbb9d35509295
|
/quiz-results/john_bonney/textables/R/title_row.R
|
de5646e347f9546b85dd7c4642843de0bd5b52ee
|
[
"MIT"
] |
permissive
|
PercyUBC/r-tutorial
|
dd1dd5f03dea414ea8ce38409032e7cadde98370
|
cb2b6d8f8ae8d1d2cbe1f667f93e5f997ebb4732
|
refs/heads/master
| 2022-01-26T12:13:39.214680
| 2018-08-02T21:05:12
| 2018-08-02T21:05:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 756
|
r
|
title_row.R
|
#' Function to create title row in LaTeX document.
#'
#' @description
#' This function creates a title row in LaTeX.
#'
#' @param panel The name of the panel (character).
#' @param title The title of the row (character).
#' @param num (numeric).
titlerow <- function(panel, title, num) {
if (!is.character(panel)){
stop(sprintf("Input `panel` must be character but is %s. \n", class(panel)))
}
if (!is.character(title)){
stop(sprintf("Input `title` must be character but is %s. \n", class(title)))
}
if (!is.numeric(num)){
stop(sprintf("Input `array` must be numeric but is %s. \n", class(num)))
}
return(paste0("\\textbf{", panel, "}"," & ",
sprintf("\\multicolumn{%d}{c}{\\textbf{", num), title, "}} \\\\"))
}
|
7693ed2abde47153e6e776769a173d7ceef002bd
|
59833e9fe4bd6e26984a9d06b76aa15e118d462a
|
/R/ch_13_S3.R
|
12225dfc41a7b7edd0799cf1195c6c2a4051eda1
|
[] |
no_license
|
josefondrej/advanced-r
|
37aa90bac6db1ab7108f2d565a90df8d43cde1e5
|
b68c205a422dba410e13eb3f4c02cc0ac206ddc4
|
refs/heads/master
| 2023-02-24T04:09:42.819980
| 2021-01-21T20:40:26
| 2021-01-21T20:40:26
| 324,523,463
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,168
|
r
|
ch_13_S3.R
|
# Title : S3 Objects
# Created by: Josef Ondrej (www.josefondrej.com)
# Created on: 28.12.20
# functions are typically generic
# WARNING str is also generic
time = strptime(c("2017-01-01", "2020-05-04 03:21"), "%Y-%m-%d")
str(time)
str(unclass(time))
sloop::s3_dispatch(str(time))
# Methods have dot in name or use ftype to check
sloop::ftype(strptime)
x = list(1, 2, 3)
attr(x, "class") = "foo"
class(x) = "foo"
inherits(x, "foo")
new_myclass = function(x) {
obj = structure(
x,
class = "myclass"
)
return(obj)
}
new_myclass(list(1, 3, 4))
validate_myclass = function(object) {
is_valid = class(object) == "myclass"
return(is_valid)
}
myclass = function(x) {
return(new_myclass(x))
}
# Generics
# s3_methods_generic("mean") -- list them all
# Bound by naming convention to class generic_name.method_name
# sometimes the dot is used even if the function is not method
print.myclass = function(x) {
print(paste0("[MyClass] ", x))
}
print(myclass(5))
print_generics_methods = s3_methods_generic("print") # there should be row print.myclass somewhere
# if we want to define own generic
my_new_generic <- function(x) {
base::UseMethod("my_new_generic") # -- this basically just finds function called my_new_generic.class
}
# find out if function is generic
body(t)
body(t.test)
data.frame(a = c(1, 2, 3, 4), b = c(1, 2))
data.frame(a = c(1, 2, 3, 4), b = c(1, 2, 3))
# Inheritance
print.foo = function(x) {
print(paste0("[foo] ", toString(x)))
}
print.bar = function(x) {
print(paste0("[bar] ", toString(x)))
}
obj = structure(list(1, 2, "a"), class = c("foo", "bar"))
print(obj) # calls print.foo
# We’ll say that ordered is a subclass of factor because it always appears before it in the class vector, and,
# conversely, we’ll say factor is a superclass of ordered.
# The base type of the subclass should be that same as the superclass.
# The attributes of the subclass should be a superset of the attributes of the superclass.
print.bar = function(x) {
print(paste0("[bar] ", toString(x)))
NextMethod(print, x)
}
obj = structure(list(1, 2, "a"), class = c("bar", "foo"))
print(obj) # calls print.foo
# When you create a class, you need to decide if you want to allow subclasses,
# because it requires some changes to the constructor and careful thought in your methods.
# To allow subclasses, the parent constructor needs to have ... and class arguments:
new_secret = function(x, ..., class = character()) {
stopifnot(is.double(x))
secret = structure(
x,
...,
class = c(class, "secret")
)
return(secret)
}
new_secret(1.0)
# Resolving based on subclass type
# in python we would do classmethod and use cls
new_supersecret = function(x) {
new_secret(x, class = "supersecret")
}
`[.secret` <- function(x, ...) {
new_secret(NextMethod())
}
# we would like the `[.secret` to return not secret class but supersecret class
# somehow magically by default
vec_restore.secret <- function(x, to, ...) new_secret(x)
vec_restore.supersecret <- function(x, to, ...) new_supersecret(x)
`[.secret` <- function(x, ...) {
vctrs::vec_restore(NextMethod(), x)
}
# this ^^ is the solution
|
cb70fe17d92ea4d5b836942875d3c6f156b30340
|
e3f07dbce78c54e7e4d2f3946d089c0f11a789f3
|
/Plot1.R
|
d7b7b7016822d304dd0e88521f7547a8394a6f10
|
[] |
no_license
|
pmpergon/ExploratoryDataAnalysis
|
b90f4638f7996424e36451633a6b7038e3b94c4e
|
18645171354c1f412de189c4d032350a230d8b58
|
refs/heads/master
| 2021-01-01T06:11:08.547478
| 2014-10-10T22:07:07
| 2014-10-10T22:07:07
| null | 0
| 0
| null | null | null | null |
WINDOWS-1252
|
R
| false
| false
| 2,069
|
r
|
Plot1.R
|
## Exploratory Data Analysis
# Project Assignment 1 PLot #1
# Data: https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip
library(Defaults) #http://stackoverflow.com/questions/13022299/specify-date-format-for-colclasses-argument-in-read-table-read-csv
library(plyr)
## NOTE: household_power_consumption.txt FILE IN DIRECTORY "./ExplAnalData"
# We will only be using data from the dates 2007-02-01 and 2007-02-02.
# One alternative is to read the data from just those dates rather than reading
# in the entire dataset and subsetting to those dates.
# · Note that in this dataset missing values are coded as ?.
setDefaults('as.Date.character', format = '%d/%m/%Y')
hpc<- read.table("./ExplAnalData/household_power_consumption.txt", header=TRUE, sep=";",na.strings="?",colClasses= c("Date","character","numeric","numeric","numeric","numeric","numeric","numeric","numeric"))
# We will only be using data from the dates 2007-02-01 and 2007-02-02.
datesub0 <- (as.POSIXlt(hpc$Date)$year) == 107
hpc.sub0<- hpc[datesub0,]
datesub1 <- (as.POSIXlt(hpc.sub0$Date)$yday) == 31
datesub2 <- (as.POSIXlt(hpc.sub0$Date)$yday) == 32
hpc.sub1 <- hpc.sub0[datesub1,]
hpc.sub2 <- hpc.sub0[datesub2,]
hpc.sub <- rbind(hpc.sub1,hpc.sub2)
colnames(hpc.sub) <- colnames(hpc.sub0)
hpc.sub <- hpc.sub [complete.cases(hpc.sub),]
rm(list="hpc")
library (datasets)
# You may find it useful to convert the Date and Time variables to Date/Time classes
# in R using the strptime() and as.Date() functions.
# Merge Date & Time data
# http://stackoverflow.com/questions/11609252/r-tick-data-merging-date-and-time-into-a-single-object
hpc.sub <- mutate(hpc.sub, DateTime = as.POSIXct(paste(hpc.sub$Date, hpc.sub$Time), format="%Y-%m-%d %H:%M:%S"))
## PLOT #1 Histogram of Global Active Power: plot1.png
plot.new()
dev.cur()
dev.copy (png, file ="./ExplAnalData/plot1.png")
hist(hpc.sub$Global_active_power, col=2, main = "Global Active Power", xlab="Global Active Power (kilowatts)" )
dev.off()
|
72e5a6e0103ee9176c5e7636c467861c8f3fe96f
|
5db34fe55462f237703358e5ead7c80299de3d02
|
/R/plot.tlm.R
|
833a55d87dce3c3b0e2216fb997972bef4ddad53
|
[] |
no_license
|
cran/tlm
|
687fe4cb6d25a1086f46e61afb5faa898037f9e2
|
4a399dc84a6b38f8681ef4709c14115d89505f27
|
refs/heads/master
| 2021-01-17T07:11:00.175043
| 2017-04-10T12:15:19
| 2017-04-10T12:15:19
| 23,803,445
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,557
|
r
|
plot.tlm.R
|
plot.tlm <-
function(x, type = c("original", "transformed", "diagnosis"), observed = FALSE, xname = "x", yname = "y", level = 0.95, ...)
{
if (!inherits(x, "tlm"))
stop("argument 'x' must be of class 'tlm'")
if(any(is.na(coef(x$model))))
stop("plot is not available for models with any missing estimated coefficient")
### 'type' control:
type <- match.arg(type)
### 'xname' control:
if (!is.null(xname) && (!inherits(xname, "character") || is.na(xname) || length(xname) != 1))
stop("the name for the explanatory variable X 'xname' must be a character")
### 'yname' control:
if (!is.null(yname) && (!inherits(yname, "character") || is.na(yname) || length(yname) != 1))
stop("the name for the response variable Y 'yname' must be a character")
### 'level' control:
if (!inherits(level, "numeric") || level <= 0 || level >= 1 || length(level) != 1)
stop("'level' must be a number in (0, 1)")
if (type == "diagnosis")
{
par(mfrow = c(2, 2), ...)
# Diagnosis plot for the fitted model (transformed space):
plot(x$model, ...)
} else {
mod <- x$model
family <- family(mod)$family
mf <- model.frame(mod)
mt <- attr(mf, "terms")
Xclass <- attr(mt, "dataClasses")[2]
if (missing(xname)) xlabel <- names(Xclass) else xlabel <- xname
if (missing(yname)) yname <- names(mf)[1]
if (Xclass == "factor")
{
if (observed)
warning("the observations are not shown in the plot if the explanatory variable is categorical")
dat <- MY(x, space = type, level = level)
M <- dat$M
ymin <- min(M[, -1])
ymax <- max(M[, -1])
nlevels <- nrow(M)
if (type == "original")
{
ylabelpre <- switch(dat$ymeasure,
"geometric mean" = "Geometric mean",
"mean" = "Mean",
"median" = "Median",
"probability" = "Probability")
ylabel <- paste(ylabelpre, "of", yname)
} else {
ylabel <- switch(family,
"gaussian" = paste("Mean of", yname),
"binomial" = paste("Log(Odds of ", yname, ")", sep = ""),
"poisson" = paste("Log(Mean of ", yname, ")", sep = ""))
if (x$ypow == 0)
ylabel <- paste("Mean of log(", yname, ")", sep = "")
if (x$ypow != 1 & x$ypow != 0)
ylabel <- substitute("Mean of " * ynam * phantom("")^power, list(ynam = yname, power = attr(x, "ypowlabel")))
}
delta <- 0.2
plot(c(1 - delta, nlevels + delta), xlim = c(1 - delta, nlevels + delta), ylim = c(ymin, ymax), type = "n", xaxt = "n", xlab = xlabel, ylab = ylabel, ...)
axis(1, at = 1:nlevels, labels = levels(mf[, 2]))
lines(1:nlevels, M[, 2], lty = 2, col = "black")
points(1:nlevels, M[, 2], pch = 19, ...)
segments(1:nlevels, M[, 3], 1:nlevels, M[, 4], lwd = 1.5, ...)
} else {
dat <- MY(x, npoints = 500, space = type, level = level)
M <- dat$M
if (type == "original")
{
ylabelpre <- switch(dat$ymeasure,
"geometric mean" = "Geometric mean",
"mean" = "Mean",
"median" = "Median",
"probability" = "Probability")
ylabel <- paste(ylabelpre, "of", yname)
} else {
ylabel <- switch(family,
"gaussian" = paste("Mean of", yname),
"binomial" = paste("Log(Odds of ", yname, ")", sep = ""),
"poisson" = paste("Log(Mean of ", yname, ")", sep = ""))
if (x$xpow == 0)
xlabel <- paste("Log(", xname, ")", sep = "")
if (x$xpow != 1 & x$xpow != 0)
xlabel <- substitute(xnam * phantom("")^power, list(xnam = xname, power = attr(x, "xpowlabel")))
if (x$ypow == 0)
ylabel <- paste("Mean of log(", yname, ")", sep = "")
if (x$ypow != 1 & x$ypow != 0)
ylabel <- substitute("Mean of " * ynam * phantom("")^power, list(ynam = yname, power = attr(x, "ypowlabel")))
}
if (observed)
{
if (family != "gaussian")
warning("the observations are not shown in the plot for models different than the linear regression model (i.e., family 'gaussian'")
if (family == "gaussian")
{
# Plot with observations:
yobs <- model.response(mf)
xobs <- model.frame(mod)[, 2]
if (type == "original")
{
if (x$ypow == 0) yobs <- exp(yobs) else yobs <- yobs^(1 / x$ypow)
if (x$xpow == 0) xobs <- exp(xobs) else xobs <- xobs^(1 / x$xpow)
}
ymin <- min(M[, -1], yobs)
ymax <- max(M[, -1], yobs)
x <- xobs
y <- yobs
#plot(x, y, type = "p", col = "gray", pch = 19, cex = 0.6, ylim = c(ymin, ymax), xlab = xlabel, ylab = ylabel, ...)
plot(x, y, type = "p", col = "gray", ylim = c(ymin, ymax), xlab = xlabel, ylab = ylabel, ...)
lines(M[, 1], M[, 2], ...)
lines(M[, 1], M[, 3], lty = 2, ...)
lines(M[, 1], M[, 4], lty = 2, ...)
}
} else {
# Plot with no observations:
ymin <- min(M[, -1])
ymax <- max(M[, -1])
plot(M[, 1], M[, 2], type = "l", ylim = c(ymin, ymax), xlab = xlabel, ylab = ylabel, ...)
lines(M[, 1], M[, 3], lty = 2, ...)
lines(M[, 1], M[, 4], lty = 2, ...)
}
}
}
}
|
9e75cbeec6bf099f23215a7a4ed9446882875c77
|
f7200e99e92c04b4237a2f4055f73f98b196bbad
|
/R/resARMod_nlin1.R
|
aea90d3fca9bcbf7909f28e0eb77a40931374952
|
[] |
no_license
|
ChrisKust/rexpar
|
812d10a7eb8c3be26de46a44b4bf36d1bfc64abb
|
f2c3c7be835233729a0d12b723888ffaf0ef2a91
|
refs/heads/master
| 2020-04-06T07:01:50.286427
| 2016-07-28T12:02:59
| 2016-07-28T12:02:59
| 25,927,083
| 1
| 4
| null | 2016-06-22T17:22:10
| 2014-10-29T15:22:43
|
R
|
UTF-8
|
R
| false
| false
| 168
|
r
|
resARMod_nlin1.R
|
resARMod_nlin1 <- function(theta, dat)
{
dat1 <- dat[1:(length(dat) - 1)]
dat2 <- dat[2:length(dat)]
res <- dat2 - theta[1] * dat1^theta[2] - dat1
return(res)
}
|
9a5f59aab1cb5cfba715b5b1e6ae5531ee961dec
|
b2f61fde194bfcb362b2266da124138efd27d867
|
/code/dcnf-ankit-optimized/Results/QBFLIB-2018/A1/Database/Sauer-Reimer/ITC99/b15_PR_5_20/b15_PR_5_20.R
|
9cb1c340b0b389e94e4f9527e9bdc0b181a03a4a
|
[] |
no_license
|
arey0pushpa/dcnf-autarky
|
e95fddba85c035e8b229f5fe9ac540b692a4d5c0
|
a6c9a52236af11d7f7e165a4b25b32c538da1c98
|
refs/heads/master
| 2021-06-09T00:56:32.937250
| 2021-02-19T15:15:23
| 2021-02-19T15:15:23
| 136,440,042
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 64
|
r
|
b15_PR_5_20.R
|
aa4c54a1a16bdcf3e81fcaf6010e8be5 b15_PR_5_20.qdimacs 19981 59275
|
f4abb9db67aabd822d29f34672bc0774ffc8e19e
|
c7daf6d40483ed719ec1cfa9a6b6590df5bcdd5c
|
/products/Introduction_to_Computational_Science_Modules/02_System_Dynamics/R/R_Computational_Toolbox_Files/Ants_R/sense.R
|
adbc56c56b7f6186dc91daf0aa21fa398a7ec37c
|
[] |
no_license
|
wmmurrah/computationalScience
|
b28ab2837c157c9afcf432cc5c41caaff6fd96d1
|
a4d7df6b50f2ead22878ff68bfe39c5adb88bbbb
|
refs/heads/main
| 2023-06-01T17:40:29.525021
| 2021-06-20T01:42:54
| 2021-06-20T01:42:54
| 332,249,589
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 722
|
r
|
sense.R
|
sense<-function(site, na, ea, sa, wa, np, ep, sp, wp){
utils::globalVariables(c("EMPTY","STAY"))
EMPTY = 0
STAY = 5
if (site == EMPTY){
direction = EMPTY
return(direction)
}
lst = c(np, ep, sp, wp)
# don't allow ant to turn to previous cell, so make value artificially
# small
if (site < STAY){
lst[site] = -2
}
# don't allow ant to turn to cell with another ant, so make value
# artificially small
neighbors = c(na, ea, sa, wa)
for(i in 1:4){
if (neighbors[i] > 0){
lst[i] = -2
}
}
mx = max(lst)
if (mx < 0){
direction = STAY
}
else{
posList = which(lst == mx)
lng = length(posList)
rndPos = ceiling(runif(1,0,lng))
direction = posList[rndPos]
}
return(direction)
}
|
23e4f96e466d5f292bbdd05bd54164a25f89d8cd
|
d58c2100367455fa7247bd01f09b06c60a1552f1
|
/data/investment/stock_v1.R
|
9101b41b4947a300d7e2f3776cc0fb50d8529ee1
|
[] |
no_license
|
jinbin/lifebook
|
52cb8d8ca787631ecd29cb0143cf65f53c8e71e8
|
b1ed4cda692e70aff9909fb97232ff1ed7f020d2
|
refs/heads/master
| 2020-12-26T04:17:13.138709
| 2020-08-18T03:57:33
| 2020-08-18T03:57:33
| 18,483,302
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 109
|
r
|
stock_v1.R
|
#!/usr/bin/env RScript
library(hash)
stock <- read.csv("/Users/jinbin/lifebook/data/investment/stock.csv")
|
19079906af130238a53b49c500457bb3e0bca893
|
466c277d1035041e2306c65f4bf285e3edf505bb
|
/MCDS for 2 stage model.R
|
312878e4d03b51ad9d43f156a4e9ebbeed1fa44c
|
[] |
no_license
|
tmclaren1/tmclaren1
|
b935593c595625f846c4a674ccd3334f031e6b16
|
084843e688912a9a76eae6710dd7f59396fc1983
|
refs/heads/main
| 2023-07-05T14:16:47.748405
| 2021-08-12T16:09:07
| 2021-08-12T16:09:07
| 371,488,242
| 0
| 0
| null | 2021-05-27T20:30:34
| 2021-05-27T19:51:28
|
R
|
UTF-8
|
R
| false
| false
| 1,762
|
r
|
MCDS for 2 stage model.R
|
###MCDS for 2 stage model
library(Distance)
###reformat effort
distance.dat$o_effort<-distance.dat$Effort
distance.dat$Effort<-1
distance.dat$Effort[which(distance.dat$survey_period == "8")] = "2"
distance.dat$Effort<-as.integer(distance.dat$Effort)
distance.dat$o_sample.label<-distance.dat$Sample.Label
distance.dat$Sample.Label<-distance.dat$survey
str(distance.dat)
hist(CLNUdist_revised2$distance, xlab = "Distance (km)", main = "Histogram of detections, 2020")
plot(CDS3, pdf = TRUE)
distance.dat[,'survey_period']<-factor(distance.dat[,'survey_period'])
str(distance.dat)
dist.bins5<-c(0.0,0.3,0.4,0.45)
MCDS2<-ds(distance.dat, formula = ~Forest_type, transect = "point", key = "hn",
truncation = .45, cutpoints = dist.bins5)
plot(MCDS2, pdf = TRUE)
summary(MCDS2)
MCDS3<-ds(distance.dat, formula = ~Forest_type, transect = "point", key = "hr",
truncation = .45, cutpoints = dist.bins5)
plot(MCDS3, pdf = TRUE)
summary(MCDS3)
O_MCDS4<-ds(distance.dat, formula = ~survey_period, transect = "point", key = "hn",
truncation = .45, cutpoints = dist.bins5)
summary(O_MCDS4)
plot(MCDS4,pdf = TRUE)
MCDS5<-ds(distance.dat, formula = ~survey_period, transect = "point", key = "hr",
truncation = .45, cutpoints = dist.bins5)
summary(MCDS5)
MCDS6<-ds(distance.dat, formula = ~Observers, transect = "point", key = "hn",
truncation = .45, cutpoints = dist.bins5)
summary(MCDS6)
plot(MCDS6,pdf = TRUE)
MCDS7<-ds(distance.dat, formula = ~Observers, transect = "point", key = "hr",
truncation = .45, cutpoints = dist.bins5)
summary(MCDS7)
plot(MCDS7, pdf = TRUE)
MCDStable<-summarize_ds_models(CDS1,CDS2,CDS3,MCDS2,MCDS3,MCDS4,MCDS5)
MCDStable
|
8a45563abb50625da8ab97a3fb63b7ac0db0fe15
|
ef4c19bbcd2bb4915aa17654c7760fe0e6e14e5c
|
/man/pipeline.Rd
|
1a3e8ccd4f1605a0356bdf9c39a87ef04ac26e03
|
[] |
no_license
|
3shmawei/faraway
|
ee4f1442e5b28de2c4e436c57a7c34f2f3065a2b
|
dc770de1a2d55d31a217e14c911495644ea17b3f
|
refs/heads/master
| 2020-03-23T18:10:35.105916
| 2016-02-15T15:07:03
| 2016-02-15T15:07:03
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,003
|
rd
|
pipeline.Rd
|
\name{pipeline}
\alias{pipeline}
\docType{data}
\title{NIST data on ultrasonic measurements of defects in the Alaska pipeline}
\description{Researchers at National Institutes of Standards and Technology
(NIST) collected data on ultrasonic measurements
of the depths of defects in the Alaska pipeline in the field. The
depth of the defects were then remeasured in the laboratory. These
measurements were performed in six different batches. The laboratory
measurements are more
accurate than the in-field measurements, but more time consuming and
expensive.
}
\usage{data(pipeline)}
\format{
A data frame with 107 observations on the following 3 variables.
\describe{
\item{Field}{measurement of depth of defect on site}
\item{Lab}{measurement of depth of defect in the lab}
\item{Batch}{the batch of measurements}
}
}
\source{
Office of the Director of the Institute of Materials Research (now the
Materials Science and Engineering Laboratory) of NIST}
\keyword{datasets}
|
6f51d10df07de5961cfed4cf6430bb2b46cdf18a
|
56fe483e66c45da3505e883c3821db8c32685945
|
/R/setup.R
|
e8de5e737ba3370324f77baeb854e7b140e90b25
|
[
"Apache-2.0"
] |
permissive
|
bcgov/statscan-taxdata-tidying
|
28840781704b762403970fc375906085b89516fa
|
275d9ae6a9075aa8e2b1164ff3a5f9ed5cf747e4
|
refs/heads/master
| 2021-06-09T10:06:09.002358
| 2021-05-12T19:10:24
| 2021-05-12T19:10:24
| 172,797,380
| 3
| 1
|
Apache-2.0
| 2020-04-23T00:15:19
| 2019-02-26T21:59:11
|
R
|
UTF-8
|
R
| false
| false
| 1,900
|
r
|
setup.R
|
# Copyright 2019 Province of British Columbia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and limitations under the License.
## Install Packages/dependencies
library("plyr")
library("janitor")
library("tibble")
library("stringr")
library("tidyr")
library("here")
library("readr")
library("purrr")
library("readxl")
library("dplyr")
library("data.table")
#-------------------------------------------------------------------------------
## Make new directories if they do not exist
if (!exists(here("data-raw"))) dir.create(here("data-raw"), showWarnings = FALSE)
if (!exists(here("data-raw/fam"))) dir.create(here("data-raw/fam"), showWarnings = FALSE)
if (!exists(here("data-raw/ind"))) dir.create(here("data-raw/ind"), showWarnings = FALSE)
if (!exists(here("data-raw/ind13"))) dir.create(here("data-raw/ind13"), showWarnings = FALSE)
if (!exists(here("data-tidy"))) dir.create(here("data-tidy"), showWarnings = FALSE)
if (!exists(here("data-tidy/fam"))) dir.create(here("data-tidy/fam"), showWarnings = FALSE)
if (!exists(here("data-tidy/ind"))) dir.create(here("data-tidy/ind"), showWarnings = FALSE)
if (!exists(here("data-tidy/ind13"))) dir.create(here("data-tidy/ind13"), showWarnings = FALSE)
if (!exists(here("data-output"))) dir.create(here("data-output"), showWarnings = FALSE)
#-------------------------------------------------------------------------------
## Object to source setup script
.setup_sourced <- TRUE
|
b9ee92f6a8524785f8fd8651a2cffedc75658ff6
|
c858d45451d067679e34bf1b5d4d594db929f0cb
|
/R/newGame.R
|
e62f6993099cf27968034ebcf78ecfcd6fc8cfbe
|
[] |
no_license
|
mit5u/petanque
|
88a36ab22bc45b7cd2cd61248c2b48ed70924e10
|
4e45772a451e0cf0069ec10ca75a20f0bb74c013
|
refs/heads/master
| 2021-11-07T09:50:02.657442
| 2021-10-26T13:07:09
| 2021-10-26T13:07:09
| 208,126,170
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 850
|
r
|
newGame.R
|
#' Create a new petanque game.
#' @return Data.frame with ball positions.
newGame <- function() {
drawField(newPlot = TRUE)
drawHuman("orange")
# define the data frame which tracks the positions
posDF <- data.frame(x = numeric(7),
id = 1:7,
type = c("target", rep(c("p1", "p2"), 3)),
width = c(0.2, rep(0.4, 6)),
thrown = c(TRUE, rep(FALSE, 6)),
color = c("red", rep(c("orange", "blue"), 3)),
y = rep(0.05, 7),
travelDist = rep(0, 7))
posDF$color <- as.character(posDF$color)
# determine little ball positions
posDF$x[1] <- runif(1, 3, 7)
draw.circle(x = posDF$x[1], y = posDF$y[1], col = oaColors(posDF$color[1]),
radius = posDF$width[1]/2, nv = 120, border = oaColors(posDF$color[1]))
#points(x = posDF$x[1], y = 0.05, col = oaColors("red"), pch = 19, cex = 1.5)
return(posDF)
}
|
feeb6e749c003136905a78321a43c99e16c26f6a
|
5e93db89d08a1a3da82b7ac548a5f5a9c6fd6626
|
/Tutorials/class14.R
|
6a1cb1b3c0296c7c8f38d60e9cfaacb7abcd0863
|
[] |
no_license
|
msandim/tum-data-analysis-r
|
48b3687bf564221ca5d33e1bd49d8939407cfb3c
|
cf721e0f4d0e6e11a7dba8bd05369eed05ce4da9
|
refs/heads/master
| 2021-01-19T20:50:43.925466
| 2017-03-13T10:51:52
| 2017-03-13T10:52:13
| 72,001,688
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,019
|
r
|
class14.R
|
pima <- read.csv("data/pima-indians-diabetes.csv")
model <- glm(class ~ ., data=pima, family="binomial")
# Question 3
predictions <- predict(model, pima)
predictions_class <- predictions >= 0
real <- ifelse(pima$class == "1", TRUE, FALSE)
getMeasures <- function(real, predicted)
{
truePositives <- sum(predicted & real )
falsePositives <- sum(predicted & !real)
trueNegatives <- sum(!predicted & !real)
falseNegatives <- sum(!predicted & real)
sensitivity <- truePositives/sum(real)
specificity <- trueNegatives/sum(!real)
PPV <- truePositives/sum(predicted)
return(list(truePositives = truePositives,
falsePositives = falsePositives,
trueNegatives = trueNegatives,
falseNegatives = falseNegatives,
sensitivity = sensitivity,
specificity = specificity,
PPV = PPV))
}
getMeasures(real, predictions_class)
# Question 4
library(pracma)
getROC <- function(score, class)
{
x_values <- sapply(sort(unique(score)), function(x)
{
measures <- getMeasures(class, score > x)
return(1 - measures$specificity)
})
y_values <- sapply(sort(unique(score)), function(x)
{
measures <- getMeasures(class, score > x)
return(measures$sensitivity)
})
-trapz(x_values, y_values)
}
getROC(predictions, real)
# Question 5
getMetricCV <- function(data, f = getROC, nrFolds = 5)
{
# generate array containing fold-number for each sample (row)
folds <- sample(rep_len(1:nrFolds, nrow(data)), nrow(data))
rocs <- c()
# actual cross validation
for(k in 1:nrFolds)
{
# actual split of the data
fold <- which(folds == k)
data.train <- data[-fold,]
data.test <- data[fold,]
model <- glm(class ~ ., data=data.train, family="binomial")
preds <- predict(model, data.test)
real <- ifelse(data.test$class == "1", TRUE, FALSE)
rocs[k] <- getROC(preds, real)
}
return(rocs)
}
cv_values <- getMetricCV(pima, nrFolds=10)
boxplot(cv_values)
|
06a70c71a360c02d75ba149b0cace2683619e7d3
|
a85e536f8cbe2af99fab307509920955bd0fcf0a
|
/R/resample.R
|
d8910f785bf8453b0b917ef84cf227b7d069e704
|
[] |
no_license
|
ProjectMOSAIC/mosaic
|
87ea45d46fb50ee1fc7088e42bd35263e3bda45f
|
a64f2422667bc5f0a65667693fcf86d921ac7696
|
refs/heads/master
| 2022-12-13T12:19:40.946670
| 2022-12-07T16:52:46
| 2022-12-07T16:52:46
| 3,154,501
| 71
| 27
| null | 2021-02-17T21:52:00
| 2012-01-11T14:58:31
|
HTML
|
UTF-8
|
R
| false
| false
| 12,897
|
r
|
resample.R
|
##############################################
# coin toss
#
#' Tossing Coins
#'
#' These functions simplify simulating coin tosses for those (students primarily)
#' who are not yet familiar with the binomial distributions or just like this syntax
#' and verbosity better.
#'
#' @rdname rflip
#' @return for `rflip`, a cointoss object
#' @param n the number of coins to toss
#' @param prob probability of heads on each toss
#' @param quiet a logical. If `TRUE`, less verbose output is used.
#' @param verbose a logical. If `TRUE`, more verbose output is used.
#' @param summarize if `TRUE`, return a summary (as a data frame).
#' @param summarise alternative spelling for `summarize`.
#'
#' @examples
#' rflip(10)
#' rflip(10, prob = 1/6, quiet = TRUE)
#' rflip(10, prob = 1/6, summarize = TRUE)
#' do(5) * rflip(10)
#' @export
rflip <- function(n=1, prob=.5, quiet=FALSE, verbose = !quiet, summarize = FALSE,
summarise = summarize) {
if ( ( prob > 1 && is.integer(prob) ) ) {
# swap n and prob
temp <- prob
prob <- n
n <- temp
}
if (summarise) {
heads <- rbinom(1, n, prob)
return(data.frame(n = n, heads = heads, tails = n - heads, prob = prob))
} else {
r <- rbinom(n,1,prob)
result <- c('T','H')[ 1 + r ]
heads <- sum(r)
attr(heads,"n") <- n
attr(heads,"prob") <- prob
attr(heads,"sequence") <- result
attr(heads,"verbose") <- verbose
class(heads) <- 'cointoss'
return(heads)
}
}
#' @rdname rflip
#' @param x an object
#' @param \dots additional arguments
#' @export
print.cointoss <- function(x, ...) {
heads <- as.numeric(x)
other <- attributes(x)
if (other$verbose) {
cat(paste('\nFlipping ',
other$n,
' coin', ifelse( other$n > 1, "s", ""),
' [ Prob(Heads) = ', other$prob, ' ] ...\n', sep=""))
}
if (attributes(x)$verbose) {
cat('\n')
#print(other$sequence)
cat(paste(
strwrap( paste(other$sequence, collapse=" ") ),
collapse = "\n"))
cat('\n')
cat(paste('\nNumber of Heads: ', heads, ' [Proportion Heads: ', heads/other$n, ']\n\n', sep=""))
}
}
#' @rdname rflip
#' @return for `nflip`, a numeric vector
#' @examples
#' as.numeric(rflip(10))
#' nflip(10)
#' @export
nflip <- function(n=1, prob=.5, ...) {
as.numeric( rflip(n=n, prob=prob, ...) )
}
#' More Random Samples
#'
#' These functions simplify and unify sampling in various ways.
#'
#' @rdname resample
#'
#' @param x Either a vector of one or more elements from which to choose, or a positive integer.
#' @param size a non-negative integer giving the number of items to choose.
#' @param replace Should sampling be with replacement?
#' @param prob A vector of probability weights for obtaining the elements of the vector being sampled.
#'
#' @details These functions are wrappers around [sample()] providing different defaults and
#' natural names.
#' @examples
#' # 100 Bernoulli trials -- no need for replace=TRUE
#' resample(0:1, 100)
#' tally(resample(0:1, 100))
#' if (require(mosaicData)) {
#' Small <- sample(KidsFeet, 10)
#' resample(Small)
#' tally(~ sex, data=resample(Small))
#' tally(~ sex, data=resample(Small))
#' # fixed marginals for sex
#' tally(~ sex, data=Small)
#' tally(~ sex, data=resample(Small, groups=sex))
#' # shuffled can be used to reshuffle some variables within groups
#' # orig.id shows where the values were in original data frame.
#' Small <- mutate(Small,
#' id1 = paste(sex,1:10, sep=":"),
#' id2 = paste(sex,1:10, sep=":"))
#' resample(Small, groups=sex, shuffled=c("id1","id2"))
#' }
#' @export
resample <- function(..., replace=TRUE) {
sample(..., replace=replace)
}
#' @rdname resample
#' @export
#' @examples
#' deal(Cards, 13) # A Bridge hand
deal <- function(...) {
sample(...)
}
#' @rdname resample
#' @export
#' @examples
#' shuffle(Cards)
shuffle <- function(x, replace=FALSE, prob=NULL, groups=NULL, orig.ids=FALSE)
{
if (!is.null(groups)){
return( .shuffle_within(x, groups=groups, replace=replace) )
}
return( sample(x, replace=replace, prob=prob, groups=groups) )
}
##############################################
# override base::sample with something fancier
#
#' @rdname resample
#' @export
sample <- function (x, size, replace=FALSE, ...) {
UseMethod('sample')
}
.shuffle_within = function( x, groups=NULL, replace=FALSE, prob=NULL, orig.ids=FALSE, ... ){
if (is.null(groups)) {
stop("Must specify groups to resample within.")
}
# force groups to have the right size, recycling as needed.
if (is.null(dim(x))) {
groups <- rep(groups, length.out=length(x))
} else {
groups <- rep(groups, length.out=nrow(x))
}
groups = as.factor(groups)
flag = c()
levs = levels(groups);
for (lev in levs) { # k in 1:length(levs) ) {
ids = which( groups==lev )
if (length(ids)==1 ) { flag = c(lev) }
rids = sample(ids, replace=replace, orig.ids=orig.ids)
if( is.null(dim(x))) {
x[ ids] = x[rids]}
else {
if( is.data.frame(x) | is.matrix(x) ) {
x[ids,] = x[rids,]
} else {
x[ids] = x[rids]
}
}
}
if (length(flag) > 0) {
message <- paste ("The following groups had only 1 member and can't be shuffled: ", flag)
warning(message)
}
return(x)
}
#' @rdname resample
#' @export
sample.default <- function(x, size, replace=FALSE, prob=NULL,
groups=NULL, orig.ids=FALSE, ...) {
missingSize <- missing(size)
haveGroups <- ! is.null(groups)
if (length(x) == 1L && is.numeric(x) && x >= 1) {
n <- x
x <- 1:n
if (missingSize) size <- n
} else {
n <- length(x)
if (missingSize) size <- length(x)
}
if (haveGroups && size != n) {
warning("'size' is ignored when using groups.")
size <- n
}
ids <- 1:n
if (haveGroups) {
groups <- rep( groups, length.out=size) # recycle as needed
result <- aggregate( ids, by=list(groups), FUN=base::sample,
simplify=FALSE,
replace=replace, prob=prob)
result <- unlist(result$x)
if (orig.ids) { nms <- ids[result] }
result <- x[result]
if (orig.ids) { names(result) <- nms }
return(result)
}
result <- base::sample(x, size, replace=replace, prob=prob)
return(result)
}
#' @rdname resample
#' @param groups a vector (or variable in a data frame) specifying
#' groups to sample within. This will be recycled if necessary.
#' @param orig.ids a logical; should original ids be included in returned data frame?
#' @param \dots additional arguments passed to
#' [base::sample()]
#' or [mosaic::sample()].
#' @param shuffled a vector of column names.
#' these variables are reshuffled individually (within groups if `groups` is
#' specified), breaking associations among these columns.
#' examples.
#' @param fixed a vector of column names. These variables are shuffled en masse,
#' preserving associations among these columns.
#' @param invisibly.return a logical, should return be invisible?
#' @param drop.unused.levels a logical, should unused levels be dropped?
#' @export
sample.data.frame <- function(x, size, replace = FALSE, prob = NULL, groups=NULL,
orig.ids = TRUE, fixed = names(x), shuffled = c(),
invisibly.return = NULL, ...) {
if( missing(size) ) size = nrow(x)
if( is.null(invisibly.return) ) invisibly.return = size>50
shuffled <- intersect(shuffled, names(x))
fixed <- setdiff(intersect(fixed, names(x)), shuffled)
n <- nrow(x)
ids <- 1:n
groups <- eval( substitute(groups), x )
newids <- sample(n, size, replace=replace, prob=prob, groups=groups, ...)
origids <- ids[newids]
result <- x[newids, , drop=FALSE]
idsString <- as.character(origids)
for (column in shuffled) {
cids <- sample(newids, groups=groups[newids])
result[,column] <- x[cids,column]
idsString <- paste(idsString, ".", cids, sep="")
}
result <- result[ , union(fixed,shuffled), drop=FALSE]
if (orig.ids) result$orig.id <- idsString
if (invisibly.return) { return(invisible(result)) } else {return(result)}
}
#' @rdname resample
#' @export
sample.matrix <- function(x, size, replace = FALSE, prob = NULL, groups=NULL, orig.ids=FALSE, ...) {
if (! is.null(groups) ) {
return(
.shuffle_within(x, replace=replace, prob=prob, groups=groups, orig.ids=orig.ids)
)
}
n <- nrow(x)
ids <- base::sample(n, size, replace=replace, prob=prob)
data <- x [ ids, , drop=FALSE]
names(data) <- names(x)
if (orig.ids) {
attr(data,'orig.row') <- ids
}
if (length(ids) < 50) { return(data) } else {return(invisible(data))}
}
#' @rdname resample
#' @export
sample.factor <- function(x, size, replace = FALSE, prob = NULL, groups=NULL, orig.ids=FALSE,
drop.unused.levels = FALSE, ...) {
if (! is.null(groups) ) {
return(
.shuffle_within(x, replace=replace, prob=prob, groups=groups, orig.ids=orig.ids)
)
}
n <- length(x)
ids <- base::sample(n, size, replace=replace, prob=prob)
if (drop.unused.levels) {
data <- factor( x [ ids ] )
} else {
data <- factor( x [ ids ], levels=levels(x) )
}
return(data)
}
#' @rdname resample
#' @param parametric A logical indicating whether the resampling should be done parametrically.
#' @param transformation NULL or a function providing a transformation to be applied to the
#' synthetic responses. If NULL, an attempt it made to infer the appropriate transformation
#' from the original call as recorded in `x`.
#'
#' @export
#' @examples
#' model <- lm(width ~length * sex, data = KidsFeet)
#' KidsFeet %>% head()
#' resample(model) %>% head()
#' Boot <- do(500) * lm(width ~ length * sex, data = resample(KidsFeet))
#' df_stats(~ Intercept + length + sexG + length.sexG, data = Boot, sd)
#' head(Boot)
#' summary(coef(model))
#'
#'
sample.lm <-
function(
x, size, replace = FALSE, prob = NULL, groups=NULL,
orig.ids=FALSE, drop.unused.levels = FALSE,
parametric = FALSE,
transformation = NULL,
...) {
if (!is.null(prob)) {
warning("Unused argument: prob")
}
if (!is.null(groups)) {
warning("Unused argument: groups")
}
if (! replace) {
stop("Only resampling supported for linear model objects.")
}
# replace == TRUE
orig_data <- eval( x$call[["data"]], environment(formula(x)) )
dfx <- orig_data[, all.vars(formula(x))]
complete_idx <- which(complete.cases(dfx))
res <- dfx[complete_idx, ]
if (! missing(size)) {
if (size != nrow(res)) {
stop ("Invalid value for `size'.")
}
warning("`size' is ignored when resampling an `lm' object.")
}
size <- nrow(res)
res$resid <- resid(x)
if (parametric) {
res$new_resid <- rnorm(size, mean = 0, sd = summary(x)$sigma)
} else {
res$new_resid <-
(1 - 2 * rbinom(size, 1, 0.5)) * resample(resid(x))
}
res$new_response <- fitted(x) + res$new_resid
if (is.null(transformation)) {
transformation <- mosaicCore::infer_transformation(formula(x))
}
res[[1]] <- do.call(transformation, list(res$new_response))
# remove "scratch columns"
res <- res %>%
#select_(.dots = setdiff(names(res), c("resid", "new_resid", "new_response")))
select(any_of(setdiff(names(res), c("resid", "new_resid", "new_response"))))
res
}
#' Resample a Linear Model
#'
#' Fit a new model to data created using `resample(model)`.
#'
#'
#' @param model a linear model object produced using [lm()].
#' @param ... additional arguments passed through to [resample()].
#' @param envir an environment in which to (re)evaluate the linear model.
#' @seealso `resample()`
#'
#' @examples
#' mod <- lm(length ~ width, data = KidsFeet)
#' do(1) * mod
#' do(3) * relm(mod)
#' # use residual resampling to estimate standard error (very crude because so few replications)
#' Boot <- do(100) * relm(mod)
#' sd(~ width, data = Boot)
#' # standard error as produced by summary() for comparison
#' mod %>% summary() %>% coef()
#'
#' @export
relm <- function(model, ..., envir = environment(formula(model))) {
mcall <- model$call
mcall[["data"]] <- resample(model, ...)
eval(mcall, envir)
}
#'
#' Simulate spinning a spinner
#'
#' This is essentially `rmultinom` with a different interface.
#'
#' @param n number of spins of spinner
#' @param probs a vector of probabilities. If the sum is not 1, the
#' probabilities will be rescaled.
#' @param labels a character vector of labels for the categories
#' @export
#' @examples
#' rspin(20, prob=c(1,2,3), labels=c("Red", "Blue", "Green"))
#' do(2) * rspin(20, prob=c(1,2,3), labels=c("Red", "Blue", "Green"))
rspin <- function(n, probs, labels=1:length(probs)) {
if (any(probs < 0))
stop("All probs must be non-negative.")
probs <- probs/sum(probs)
res <- t(rmultinom(1, n, probs)) %>% as.data.frame()
names(res) <- labels
res
}
|
7934a5ca82553039542dbed5259b2c0df769484b
|
2629760084e472f389b13f5e8b9c6f690d8b5477
|
/man/parmafil.Rd
|
4664525c0e01e59dc00961d25549489ce92f47a0
|
[] |
no_license
|
cran/perARMA
|
b75018137817798ceb7a890e7f6e189478ebfe20
|
d10c97281cf83cb0e4b500b0185db0a4c1a02a19
|
refs/heads/master
| 2021-01-19T04:13:25.379842
| 2016-02-25T22:49:40
| 2016-02-25T22:49:40
| 17,698,467
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,640
|
rd
|
parmafil.Rd
|
\name{parmafil}
\alias{parmafil}
\title{
PARMA filtration
}
\description{
Procedure \code{parmafil} filters the vector \code{x} according to matrices \code{a, b} containing PARMA model parameters.
The function returns series \code{y} such that
\eqn{a(n,1)*y(n) = b(n,1)*x(n) + b(n,2)*x(n-1) + \ldots + b(n,nb+1)*x(n-nb)- a(n,2)*y(n-1) - \ldots - a(n,na+1)*y(n-na)}.
}
\usage{
parmafil(b, a, x)
}
\arguments{
\item{b}{matrix of size \eqn{T \times (nb+1)}, which elements satisfy \eqn{b(n,j)=b(n+T,j)}, usually in the literature \code{b} is called the periodic MA parameters and \eqn{nb}
is denoted by \eqn{q}.
}
\item{a}{matrix of size \eqn{T \times na}, which elements satisfy \eqn{a(n,j)=a(n+T,j)}, usually in the literature \code{a} is called the periodic AR parameters
and \eqn{na} is denoted \eqn{p}. If \eqn{a(n,1)} is not equal to 1 for all \eqn{n}, the values of \eqn{a(n,j)} are normalized by \eqn{a(n,j)=a(n,j)/a(n,1)}.
}
\item{x}{
input time series.
}
}
\value{
Filtered signal \code{y}.
}
\author{
Harry Hurd
}
\note{
To filter using the convention \eqn{\phi(t,B)x(t) = \theta(t,B) \xi(t)}
with \eqn{\phi(t,B)=1 - \phi(t,1)B - ... - \phi(t,p)B^p},
\eqn{\theta(t,B)=del(t,1) + \theta(t,1)B + ... + \theta(t,q)B^q}
set \code{a=[ones(T,1),-phi]}, \code{b=[theta]}, then \code{x=parmafil(b,a,xi)}.
}
\seealso{
\code{\link{loglikec}}, \code{\link{loglikef}}, \code{\link{makeparma}}
}
\examples{
b=matrix(c(1,1,0,0,.5,.5),2,3)
a=matrix(c(1,1,.5,.5),2,2)
s=sample(1:100,50, replace=TRUE)
x=matrix(s,50,1)
parmafil_out<-parmafil(a,b,x)
y=parmafil_out$y
plot(y,type="l")
}
|
43b4c298f45a6d44d7d5dfb9390785d64d3aaaac
|
9cfb7407d2ce87df982afda33ff9ac7f512e8e82
|
/etgKpi/weeklyKPI.R
|
9266e0cf09cea96a98e2ee371ef94d0f6a7c42da
|
[] |
no_license
|
loganscalder/EliteTechGear
|
3edacc595d2c00c56f01dd62389db69679390228
|
e3b0068c937f4afc43ce4166049fde325db88348
|
refs/heads/master
| 2021-01-10T21:47:06.455191
| 2017-12-06T14:44:57
| 2017-12-06T14:51:33
| 40,363,642
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,875
|
r
|
weeklyKPI.R
|
rm(list = ls())
require(plyr)
dateRanges = c(
"03-15--03-21",
"03-22--03-28",
"03-29--04-04",
"04-05--04-11",
"04-12--04-18",
"04-12--04-18",
"04-19--04-25",
"04-26--05-02"
)
weeklyList = list()
length(weeklyList) = 8
names(weeklyList) = dateRanges
for (i in 1:length(weeklyList)){
weeklyList[[i]] = read.csv(paste0("salesByAsin",dateRanges[i],".csv"),
stringsAsFactors = F)
weeklyList[[i]] =
mutate(weeklyList[[i]],
# Take out %
Session.Percentage = as.numeric(gsub("%", "", Session.Percentage)),
# Take out %
Page.Views.Percentage = as.numeric(gsub("%", "", Page.Views.Percentage)),
# Take out %
Buy.Box.Percentage = as.numeric(gsub("%", "", Buy.Box.Percentage)),
# Take out %
Unit.Session.Percentage = as.numeric(gsub("%", "", Unit.Session.Percentage)),
# Take out $ and ,
Ordered.Product.Sales =
as.numeric(gsub("\\$", "",
gsub(",", "", Ordered.Product.Sales))),
# Calculate data
itemsPerDay = Units.Ordered/7,
salesPerDay = Ordered.Product.Sales/7,
week = dateRanges[i])
} # read in the weekly data, to each data frame, cbind the date range
# should the dates be turned into factors? ..Or the time series class...?
head(weeklyList[[1]])
names(weeklyList[[1]])
names(weeklyList)
lapply(weeklyList, summary)
relevantList = lapply(weeklyList,
function(x){
x[,c("SKU", "Total.Order.Items", "Units.Ordered", "Ordered.Product.Sales",
"itemsPerDay", "salesPerDay", "week")]
})
lapply(relevantList, head)
relevantData = do.call(rbind, relevantList)
head(relevantData)
rownames(relevantData) <- with(relevantData, paste0(week,SKU))
# ToDo:
# Pull relevant Data (list?)
# Export 56, 28, 14, and 7 day report (this takes priority, cuz, I should prove we can automate what we have)
# Make Graphics :) this is the fun part.
# What I want to do is plot the sales per day, items per day for every sku and every week
require(lattice)
barchart(itemsPerDay~week | SKU, data = relevantData,
main = "items per day each week")
barchart(salesPerDay~week | SKU, data = relevantData,
main = "sales per day each week")
# OOO, you don't need this, I think
byWeek =
ddply(relevantData,
.var = .(week),
.fun = summarise,
totalOrderItems = sum(Total.Order.Items),
unitsOrdered = sum(Units.Ordered),
sales = sum(Ordered.Product.Sales),
itemsPerDay = sum(itemsPerDay)/length(week),
salesPerDay = sum(salesPerDay)/length(week)
)
byWeek
|
2a3634dfe7b73ae6b87ea278901f2d959bd0c20d
|
82b1c5655856b660c053d18ec7ad94f3aa30a964
|
/tests/testthat/test-function-print_replacement_template.R
|
9c217d31a06c0c8e99f31dc2dd732b6d3fe24163
|
[
"MIT"
] |
permissive
|
KWB-R/kwb.fakin
|
9792dfa732a8dd1aaa8d2634630411119604757f
|
17ab0e6e9a63a03c6cb40ef29ee3899c2b2724a0
|
refs/heads/master
| 2022-06-09T22:25:09.633343
| 2022-06-08T21:24:14
| 2022-06-08T21:24:14
| 136,065,795
| 1
| 0
|
MIT
| 2021-03-15T10:55:17
| 2018-06-04T18:21:30
|
R
|
UTF-8
|
R
| false
| false
| 159
|
r
|
test-function-print_replacement_template.R
|
test_that("print_replacement_template() works", {
f <- kwb.fakin:::print_replacement_template
expect_error(f())
expect_output(f(c("x", "y", "z")))
})
|
7d52c2ff973b9d22656d5f0a125b56b023b3f15e
|
a61eea3652bf5379ffc4a44d4ddba9ad189a4d56
|
/plot2.R
|
5eae1463639cb939a130fb8b32aa653b89850fbd
|
[] |
no_license
|
mahdaa/ExData_Plotting1
|
2adba1bfe5d26d9ecdee99675194b6efa39afc7d
|
87fe1b2787bd853dc09c0b968e056c6cc55fb877
|
refs/heads/master
| 2021-01-17T14:37:39.076047
| 2014-11-09T16:16:09
| 2014-11-09T16:16:09
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 603
|
r
|
plot2.R
|
setwd("C:/Users/mahda/Documents/Rcoursera")
hdata <-read.table("household_power_consumption.txt",nrows=4,header = T, sep=";")
head=names(hdata)
mahdata <-read.table("household_power_consumption.txt",skip=46*24*60,nrows=4000, sep=";")
names(mahdata)<-head
mahdata <-mahdata[mahdata$Date=="2/2/2007" | mahdata$Date=="1/2/2007",]
da <- as.Date(mahdata$Date, format="%d/%m/%Y")
dt<-paste(da, mahdata$Time)
ts<-strptime(dt, "%Y-%m-%d %H:%M:%S")
plot(ts,mahdata$Global_active_power, type="l",xlab="", ylab="Global Active Power (kilowatts)")
dev.copy(png, file="plot2.png", width=480, height=480)
dev.off()
|
57fd75c058cd5eaffcfeebbc305527e4cede83b6
|
3d5382f04c70b0f57d035ed5a50c977cf827c2b0
|
/data_visualization/data_type.R
|
59f3eca4c1ef5a83a41a1ed41ad150536e3ef1b9
|
[] |
no_license
|
Pongsasit/HavardX_data_sciece
|
56ffb84f45ac33deaddb516cfdc035037e7f2618
|
cd95720a2195f2d365fe2b6e89334785ef2f1309
|
refs/heads/master
| 2020-05-18T17:54:36.446168
| 2019-06-16T13:39:47
| 2019-06-16T13:39:47
| 184,569,792
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 285
|
r
|
data_type.R
|
library(dslabs)
data(heights)
names(heights)
x <- heights$height
# how many unique values are used by the heights varialbe
unique(x)
#count the unique by using length
length(unique(x))
#make table
tab <- table(x)
#Use logicals and the function sum to count
sum(tab==1)
|
fc78f9757a39caab97515d4b6d1bebaff45096b1
|
1e42b9829b85bc37d112ec5b8efa1682264297b2
|
/R/filter_throughput_time.R
|
26d6ae840557175d56cf070433a0aef931c79e4c
|
[] |
no_license
|
strategist922/edeaR
|
ca83bf91f58e685bc9333f4db3bfea3d8c019343
|
ad96118cccfdc90a7bed94f5aef2ee0cfab3aac8
|
refs/heads/master
| 2021-07-05T04:30:35.286640
| 2017-09-27T12:25:04
| 2017-09-27T12:25:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,755
|
r
|
filter_throughput_time.R
|
#' @title Filter: Throughput Time
#'
#' @description Filters cases based on their throughput time.
#'
#' @param eventlog The event log to be used. An object of class
#' \code{eventlog}.
#'
#' @param lower_threshold The lower duration threshold, specified in number of days.
#' When \code{reverse} is FALSE, all cases with a lower duration are discarded.
#'
#' @param upper_threshold The upper duration threshold, specified in number of days.
#' When \code{reverse} is FALSE, all cases with a higher duration are discarded.
#'
#' @param percentile_cut_off Alternatively to providing thresholds, a percentile cut off can be provided.
#' A percentile cut off value of 0.9 will return the 90\% shortest cases.
#' When \code{reverse} is set to TRUE, it will return the 10\% longest cases.
#'
#' @param reverse A logical parameter depicting whether the selection should be reversed.
#'
#' @param units The time unit used for filtering.
#'
#' @export filter_throughput_time
#'
filter_throughput_time <- function(eventlog,
lower_threshold = NULL,
upper_threshold = NULL,
percentile_cut_off = NULL,
reverse = F,
units = "days") {
stop_eventlog(eventlog)
if(is.null(lower_threshold) & is.null(upper_threshold) & is.null(percentile_cut_off))
stop("At least one threshold or a percentile cut off must be provided.")
if((!is.null(lower_threshold) & !is.null(percentile_cut_off)) | (!is.null(upper_threshold) & !is.null(percentile_cut_off)))
stop("Cannot filter on both thresholds and percentile cut off simultaneously.")
if(!is.null(percentile_cut_off))
return(filter_throughput_time_percentile(eventlog,
percentile_cut_off = percentile_cut_off,
reverse = reverse ))
else
return(filter_throughput_time_threshold(eventlog,
lower_threshold = lower_threshold,
upper_threshold = upper_threshold,
reverse = reverse,
units = units))
}
#' @rdname filter_throughput_time
#' @export ifilter_throughput_time
ifilter_throughput_time <- function(eventlog) {
ui <- miniPage(
gadgetTitleBar("Filter Througput Time"),
miniContentPanel(
fillCol(
fillRow(
radioButtons("filter_type", "Filter type:", choices = c("Interval" = "int", "Use percentile cutoff" = "percentile")),
radioButtons("units", "Time units: ", choices = c("weeks","days","hours","mins"), selected = "hours"),
radioButtons("reverse", "Reverse filter: ", choices = c("Yes","No"), selected = "No")
),
uiOutput("filter_ui")
)
)
)
server <- function(input, output, session){
output$filter_ui <- renderUI({
if(input$filter_type == "int") {
sliderInput("interval_slider", "Throguhput time interval",
min = 0, max = max(eventlog %>% throughput_time("case", units = input$units) %>% pull(throughput_time)), value = c(0,1))
}
else if(input$filter_type == "percentile") {
sliderInput("percentile_slider", "Percentile cut off:", min = 0, max = 100, value = 80)
}
})
observeEvent(input$done, {
if(input$filter_type == "int")
filtered_log <- filter_throughput_time(eventlog,
lower_threshold = input$interval_slider[1],
upper_threshold = input$interval_slider[2],
reverse = ifelse(input$reverse == "Yes", T, F),
units = input$units)
else if(input$filter_type == "percentile") {
filtered_log <- filter_throughput_time(eventlog,
percentile_cut_off = input$percentile_slider/100,
reverse = ifelse(input$reverse == "Yes", T, F),
units = input$units)
}
stopApp(filtered_log)
})
}
runGadget(ui, server, viewer = dialogViewer("Filter Througput Time", height = 400))
}
|
9a312faea7ce88bbf38ae387e0a8ad75a899eb36
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/MLZ/examples/summary-MLZ_model-method.Rd.R
|
08580adda7b5ea15fdfadd45f11059cf0698e76c
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 299
|
r
|
summary-MLZ_model-method.Rd.R
|
library(MLZ)
### Name: summary,MLZ_model-method
### Title: 'summary' method for S4 class 'MLZ_model'
### Aliases: summary,MLZ_model-method
### ** Examples
## Not run:
##D data(Goosefish)
##D goose.model <- ML(Goosefish, ncp = 2, grid.search = FALSE)
##D summary(goose.model)
## End(Not run)
|
38d6818e6c63bc669a48e10ea017fb862ca3669b
|
4e84a2a31e0762eccd3b513461f94e605adbe6df
|
/Homework_1_Thomas/CH2_PROB4.R
|
cf773645e13188a1aff9599916945b576c3b52ce
|
[] |
no_license
|
TaraAnn/Econometrics-UTA-Coursework
|
324715f3ca452d3202a0414c6c62e5963629488e
|
3d62a54f9d33c6979f0021d414a9efb756d9e5e9
|
refs/heads/master
| 2021-04-15T15:40:34.662733
| 2018-03-21T19:43:54
| 2018-03-21T19:43:54
| 126,229,515
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,511
|
r
|
CH2_PROB4.R
|
"Woolridge Chapter 2 C4"
"Use the data in WAGE2 to estimate a simple regression explaining monthly salary (wage) in terms of
IQ score (IQ).
(i) Find the average salary and average IQ in the sample. What is the sample standard deviation of
IQ? (IQ scores are standardized so that the average in the population is 100 with a standard deviation
equal to 15.)
(ii) Estimate a simple regression model where a one-point increase in IQ changes wage by a constant
dollar amount. Use this model to find the predicted increase in wage for an increase in
IQ of 15 points. Does IQ explain most of the variation in wage?
(iii)Now, estimate a model where each one-point increase in IQ has the same percentage effect on
wage. If IQ increases by 15 points, what is the approximate percentage increase in predicted wage?"
# Clear the workspace in R
rm(list=ls(all=TRUE))
#FYI Ctr+L clears the console
#Set the working directory
setwd("C:/Folders/Education/Course Work/Sem 2/Econometrics/Course Material/Homework/Homework 3")
"Following are the packages that we will require to perform operations like OLS, plotting,
max likelihood application, etc. Now this is just a set of library's we required for our course.
So there are a lot of library's that wont be used in this code."
install.packages(c("plm","mvtnorm","sandwich","lmtest",
"foreign","arm","rms", "readxl", "Hmisc",
"haven", "car", "compactr", "multcomp", "splines"))
# Load libraries
library(mvtnorm)
library(sandwich) #to help get Heteroskedasticity robust standard errors
library(lmtest) #lmtest provides a large collection of diagnostic tests
library(foreign)
library(arm) #to help estimated interaction terms
library(rms) #for OLS regressions
library(readxl) #to read excel files
library(Hmisc) #for adding variable descriptions
library(haven) #need for uploading stata files
library(car) #companion to applied regression
library(compactr)
library(multcomp) #we need this to run the Generalized least squares. This will help
#performing statistical tests using Robust Standard errors.
library(plm) #Helps to use the Wald test for joint restrictions on coefficients
library(splines) #if you need to perform a spline regressions
#########################################################################################
#Reading a csv file
#wage2 <- read.csv("wage2.csv")
#wage <- wage2[,1]
wage2 <- read_excel("wage2.xls")
wage <- wage2$wage
# When column names aren't present in the file, R assigns default column names
#wage <- wage2$X__1
#or
#names(wage2)[1]<- "wage"
wage
#part (i)
summary(wage)
#sum(wage)
#length(wage)
#The summary function already gives you the mean. However, if you need to just
#find the mean, then the mean function can be used
mean(wage)
#Manual calculation
#avg_sal <- (sum(sal)/length(sal))
#avg_sal
iq <- wage2$IQ
#or for csv files
#iq <- wage2[,3]
summary(iq)
mean(iq)
#Standard Deviation
sd(iq)
# Part (ii)
"So we want to estimate the following model
where we regress wage on IQ
wage_i = Beta0 + Beta1*iq_i + u_i"
m1 <- lm(wage~iq)
summary(m1)
"Once again, summary gives you everything you need but it just displays the result
and doesn't store any of the coefficients. We can extract the coeffs as follows."
#here are the coefficients
coefficients(m1)
coefficients(m1)[2]
coeffs <- coefficients(m1)
coeffs
"On regressing, we obtain the following fitted model
wagehat_i = 117.597300 + 8.298354*iq_i"
#Determine predicted increase in wage for a 15 point increase in iq
wagehat_delta = coeffs[2]* 15
wagehat_delta
"In order to determine how much of the variation in wage is explained by IQ,
we need to look at the Rsquare value in the summary we obtained after regression.
Or, we can calculate it manually!"
summary(m1)$r.squared
#Part (iii)
"Well, this is a log-level model. In a log-level model, each one unit increase
in the explanatory variable has the same percentage effect on the dependent
variable. So we run the following regression."
lnwage <- log(wage) #natural log
# Or we can use the lwage column in our file
m2 <- lm(lnwage~iq)
summary(m2)
coeffs1 <- coefficients(m2)
"And obtain the new estimated model
ln(wagehat_i) = 5.887301564 + 0.008804767*iq_i"
#Determine predicted percentage increase in wage for a 15 point increase in iq
wagehat_delta_per = coeffs1[2]* 15*100
wagehat_delta_per
|
088cf05c2fffea2884ca1c0281bc92a197a8ba99
|
a1e8befe54df1c88406ddb75015eaf499929b3ed
|
/chapter4_1.R
|
ae33307cb9ca13ec54435848d9c06725eff0177a
|
[] |
no_license
|
mk102/datamining_nyumon
|
f67027d33dd532e8f0f0c83b419ed4118b97e393
|
b6bb3cee7bda622453b4560b69cef7320fdbf7f1
|
refs/heads/master
| 2021-07-22T08:10:25.765780
| 2017-11-01T04:24:16
| 2017-11-01T04:24:16
| 108,372,491
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 532
|
r
|
chapter4_1.R
|
library(som)
動物データ1 <- read.csv("animal1.csv",header=T)
標準化動物データ <- normalize(動物データ1[,2:14], byrow=F)
動物SOM <- som(標準化動物データ, xdim=10, ydim=10, topol="rect")
plot(動物SOM)
head(動物SOM$visual)
乱数 <- cbind(rnorm(nrow(動物データ1), 0, 0.15), rnorm(nrow(動物データ1), 0, 0.15))
動物マップ <- 動物SOM$visual[,1:2] + 乱数 + 0.5
plot(動物マップ, xlim=c(0,10), ylim=c(0,10))
text(動物マップ[,1], 動物マップ[,2], 動物データ1$動物名)
|
61d87d776d7e7164b5b6237b07b60252119dd01b
|
f6528c07bea30b85de4ea788f649bbe3a4b0df08
|
/01fun_Explore_temp_data_functions.R
|
ac4873415d273674ee7545bde24790a2f3332f27
|
[] |
no_license
|
DagHjermann/get-fresh-temp
|
217785b0e284b3caf97b7104e13518a062e888a2
|
52d8aaa5045ff8c247af35d159d800bc22ce76e0
|
refs/heads/master
| 2020-04-20T04:38:58.723950
| 2019-02-01T13:46:19
| 2019-02-01T13:46:19
| 168,634,025
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,013
|
r
|
01fun_Explore_temp_data_functions.R
|
create_sequence_list <- function (param_values, values_are_text = FALSE) {
sel_na <- is.na(param_values)
param_values <- param_values[!sel_na]
if (sum(sel_na) > 0)
warning(paste("Supplied values contained", sum(sel_na),
"missing value(s)"))
L <- length(param_values)
sq1 <- seq(1, L, 1000)
sq2 <- c(seq(1, L, 1000)[-1] - 1, L)
sequence_list <- vector("list", length(sq1))
if (values_are_text)
old.o <- options(useFancyQuotes = FALSE)
for (i in seq_along(sq1)) {
pick_sequence <- seq(sq1[i], sq2[i])
if (!values_are_text) {
sequence_list[[i]] <- paste0(param_values[pick_sequence], collapse = ",")
}
else {
sequence_list[[i]] <- paste0(sQuote(param_values[pick_sequence]),
collapse = ",")
}
}
if (values_are_text)
options(old.o)
sequence_list
}
# Tests:
# create_sequence_list(5:15)
# create_sequence_list(letters[1:10], values_are_text = TRUE)
# test <- create_sequence_list(1:2005)
# str(test, 1)
|
e762173e1fde1704ea3dbe4aae03981457025070
|
29585dff702209dd446c0ab52ceea046c58e384e
|
/DNMF/R/DNMF.R
|
895650e6566a2bfcb19f78b9e1a299f0b58659f6
|
[] |
no_license
|
ingted/R-Examples
|
825440ce468ce608c4d73e2af4c0a0213b81c0fe
|
d0917dbaf698cb8bc0789db0c3ab07453016eab9
|
refs/heads/master
| 2020-04-14T12:29:22.336088
| 2016-07-21T14:01:14
| 2016-07-21T14:01:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,885
|
r
|
DNMF.R
|
#' Discriminant Non-Negative Matrix Factorization.
#'
#' Discriminant Non-Negative Matrix Factorization, DNMF, is to extend the Non-negative Matrix Factorization algorithm in
#' order to extract features that enforce not only the spatial locality, but
#' also the separability between classes in a discriminant manner.
#'
#' The main algorithm is based on
#' \href{http://www.ncbi.nlm.nih.gov/pubmed/16722172}{Zafeiriou, S., et al.
#' (2006) Exploiting discriminant information in
#' nonnegative matrix factorization with application to frontal face
#' verification, IEEE transactions on neural networks, 17, 683-695},
#' with some \strong{CORRECTIONs}.
#'
#' @param data a matrix, like expression profilings of some samples. the columns are samples and the rows are gene's expression.
#' @param trainlabel a numeric vector of sample type of all the samples, this vector should ONLY contain 1 and 2 so far and length of it should equal the column (sample) size of data.
#' @param r the dimension of expected reduction dimension, with the default value 2.
#' @param gamma the tradeoff value for the within scatter matrix, with the default value 0.1.
#' @param delta the tradeoff value for the between scatter matrix, with the default value 1e-4.
#' @param maxIter the maximum iteration of update rules, with the default value 1000.
#' @param log log2 data. Default is TRUE.
#' @param tol the toleration of coverange, with the default value 1e-7.
#' @param plotit whether plot H (V=WH). Default: FALSE.
#' @param checkH whether or not check H. Default: TRUE. This parameter aims to
#' check whether or not the H safisfy the discriminant metagenes. Usually, this
#' should be TRUE.
#' @param ... to gplots::heatmap.2
#' @import gplots
#' @author Zhilong Jia and Xiang Zhang
#' @export
#' @examples
#' dat <- rbind(matrix(c(rep(3, 16), rep(8, 24)), ncol=5),
#' matrix(c(rep(5, 16), rep(5, 24)), ncol=5),
#' matrix(c(rep(18, 16), rep(7, 24)), ncol=5)) +
#' matrix(runif(120,-1,1), ncol=5)
#' trainlabel <- c(1,1,2,2,2)
#'
#' DNMF_result <- DNMF(dat, trainlabel, r=2)
#'
#'
#' \dontrun{
#' # Gene ranking. dat is the raw read count maatrix with sample in column.
#'
#' #normalising dat
#' Sizefactors <- DESeq::estimateSizeFactorsForMatrix(dat)
#' dat = sweep(dat, 2, Sizefactors, `/`)
#'
#' res <- DNMF(dat, trainlabel, r=2)
#' rnk <- res$rnk
#'
#' #The end of gene ranking exmaples
#'
#' #Other exmaples
#' DNMF_result <- DNMF(dat, trainlabel, r=2, gamma=0.1, delta=0.0001, plotit=TRUE)
#' }
#'
DNMF <- function(data,trainlabel, r=2, gamma=0.1, delta=0.0001, maxIter=1000,
tol=1e-7, log=TRUE, plotit=FALSE, checkH=TRUE, ...) {
data <- as.matrix(data)
data[which(data==0)] <- 1
if (log){
data <- log2(data + 2)
}
nFea = nrow(data); nSmp = ncol(data)
eps = .Machine$double.eps
#init the H0 and W0 matrix
H = matrix(runif(r*nSmp, eps), r, nSmp)
# The 1st row of H is down-regualted genes,
# while the 2nd row of H is up-regualted genes)
for (i in 1:r){
H[i,which(trainlabel==names(table(trainlabel))[i])] = H[i,which(trainlabel==names(table(trainlabel))[i])] + sum(H)
}
H = pmax(H,0)
W = matrix(runif(nFea*r, eps), nFea, r)
W = W/colSums(W)
#calculate KL divergence of two matrix
b = pmax(abs(W %*% H), eps)
obj0 = sum(data*log((data+eps)/(b-eps))-data+b)
obj1 = obj0
##########################
E = matrix(1, nFea, nSmp)
# N is just 1/Nr in paper, the weighted matrix
SmpCount_withinClass = vector(mode="numeric", length(trainlabel))
SmpCount_withinClass = as.vector(table(trainlabel)[trainlabel])
N = matrix(1/SmpCount_withinClass, r, nSmp, byrow=T)
final = Inf
count = 1
Hclass = H
obj_stack = vector (mode="numeric", length = maxIter)
while (final > tol && count <= maxIter) {
#update H with the objective function includes KL divergence
for(i in unique(trainlabel)){
Hclass[,which(trainlabel==i)] = matrix( rep(rowSums(H[,which(trainlabel==i)]), length(which(trainlabel==i))), r, length(which(trainlabel==i)))
}
Hclass = Hclass - H
Hsum = matrix(rep(rowSums(H),ncol(H)),r, ncol(H)) - H
tmp_a = 4*gamma + 4*(delta/nSmp - (gamma+delta)*N) #2a
tmp_b = 1 + 2*delta/nSmp * Hsum - 2*(delta+gamma)*N * Hclass
tmp_c = - t(W) %*% (data / ( W %*% H + eps) ) * H
H = (sqrt(tmp_b^2 - 2*tmp_a*tmp_c) - tmp_b ) /tmp_a
H = pmax(H, eps)
#######################################
#update W
W = (W/(E%*%t(H)))*(data/(W%*%H)) %*% t(H)
H = diag(colSums(W)) %*% H
W = W/ matrix(rep(colSums(W), each=nrow(W)), nrow(W), ncol(W))
W = pmax(W,eps)
obj2 = obj1
b = pmax(abs(W%*%H),eps)
obj1 = sum( data*log((data+eps)/(b-eps)) - data + b )
final = abs(obj1-obj2) / abs(obj1-obj0)
#obj_stack[count] = final
obj_stack[count] = obj1
count = count + 1
}
# to plot H
if (plotit){
gplots::heatmap.2(H, scale="row", trace="none", density.info="none", keysize=1, cexCol=0.8, srtCol=30, ...)
}
# check H. Setting down-regulted metagene in row 1 of H,
# while up-regulated metagene in row 2 of H.
l1 <- apply(H[,which(trainlabel == names(table(trainlabel))[1])], 1, mean)
l2 <- apply(H[,which(trainlabel == names(table(trainlabel))[2])], 1, mean)
meanH <- cbind(l1, l2)
if (checkH){
if (l1[1] < l2[1] && l1[2] > l2[2]) {
H <- rbind(H[2,], H[1,])
W1 <- cbind(W[,2], W[,1])
} else if ((l1[1] < l2[1] && l1[2] < l2[2]) || (l1[1] > l2[1] && l1[2] > l2[2])) {
stop("Failed. Run DNMF again after restart R.")
}
}
list(V=data, W=W, H=H, rnk=W[,2]-W[,1], trainlabel=trainlabel, delta=delta, gamma=gamma, count=count,
final=final, obj_stack=obj_stack, r=r, call=match.call())
}
|
b32a0a2b0cbe1507e3f7ab81afe0fa91401f9005
|
96148d4bcf4dc8c4d67fac7f2e0774c68f2d8516
|
/plot2.R
|
2ab72825862755c93cabb710b4e9bc01cbb19ebe
|
[] |
no_license
|
rmullinnix/ExpData_ProgAssign2
|
0a8b2adbfddab268b219ad27fbe225639d48ee39
|
ac088499c50c121fc86ce95e80dabc3ba7b1b426
|
refs/heads/master
| 2016-09-05T19:05:28.146823
| 2014-10-24T21:48:44
| 2014-10-24T21:48:44
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 648
|
r
|
plot2.R
|
##
library(dplyr)
# check to see if files exist in the working directory, exit if not
if (!file.exists("summarySCC_PM25.rds"))
stop("Summary PM25 file not found")
# read in the two dataframes from the source files
summary.df <- readRDS("summarySCC_PM25.rds")
yr_sum <- tbl_df(summary.df)
balt_filt <- filter(yr_sum, fips == "24510")
by_year <- group_by(balt_filt, year)
total_by_year <- summarise(by_year, arr=sum(Emissions, na.rm=TRUE))
png("plot2.png")
plot(total_by_year$year, total_by_year$arr, type="b",
main=expression('Baltimore City Total Emissions from PM'[2.5]),
xlab="Year",
ylab=expression("PM"[2.5]))
dev.off()
|
4465de61e103f537f7a8df1613aaac0762a8d9c7
|
7f82da6b91a8fda7d9570ae1c0f8057d0a277c7f
|
/2_2_model_drivers/src/met_downscale/plot_downscaled_met.R
|
72dc2e34e9f2e5f40c30126a134a9ff1481adb4c
|
[
"CC0-1.0",
"LicenseRef-scancode-public-domain-disclaimer",
"LicenseRef-scancode-warranty-disclaimer"
] |
permissive
|
jzwart/da_pgdl_stream_temp
|
fa4f031cefbdac6f545a4baa01517633d2402bc3
|
8aafc4251c6c5e173df2480af519a11429d9198b
|
refs/heads/main
| 2023-04-01T12:01:31.296401
| 2021-04-14T14:11:36
| 2021-04-14T14:11:36
| 311,776,664
| 0
| 1
|
CC0-1.0
| 2021-01-11T19:28:28
| 2020-11-10T20:28:03
|
R
|
UTF-8
|
R
| false
| false
| 750
|
r
|
plot_downscaled_met.R
|
plot_downscaled_met <- function(met_file_names, VarNames, working_directory){
full.data = NULL
for(i in 2:length(met_file_names)){
tmp.data = read.csv(met_file_names[i]) %>%
dplyr::mutate(ens = i - 1)
full.data = rbind(full.data, tmp.data)
}
pdf(paste0(working_directory, "/downscaled_met_plots.pdf"))
for(i in 1:length(VarNames)){
print(ggplot(data = full.data, aes(x = time)) +
geom_line(aes(y = get(paste(VarNames[i])), color = "Downscaled", group = ens), alpha = 0.3) +
ylab(paste(VarNames[i]))+
xlab("time")+
theme_linedraw() +
theme(text = element_text(size = 14)) +
scale_color_manual(values = c("firebrick2","black")))
}
dev.off()
}
|
d1d4d97176db9e59bc3a7220b2dce34754ac9fe7
|
f606dc05a68407496aa3dccd0f56cf147d4bad82
|
/rprog_data_specdata/pollutantmean.R
|
ef76a49f8979aa4414244ad4135e226787dcda71
|
[] |
no_license
|
Osirisis/RProgramming
|
4fe68662b0779928bc683d00d773d900c1cb6cd6
|
95c94e78255e7dfa0e748eec93f1e2d3758e99da
|
refs/heads/master
| 2021-01-09T20:16:47.580362
| 2016-06-11T18:53:59
| 2016-06-11T18:53:59
| 60,922,725
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 533
|
r
|
pollutantmean.R
|
pollutantmean <- function(directory, pollutant, id = 1:332) {
myid <- id
for(i in 1:length(id)) {
if(id[i]<10) {
myid[i] <- paste("00",id[i], sep="")
}else if(id[i]<100) {
myid[i] <- paste("0",id[i], sep="")
}
}
# http://stackoverflow.com/questions/11433432/importing-multiple-csv-files-into-r
mydata = do.call(rbind, lapply(paste(getwd(), "/", directory, "/", myid, sep="", ".csv"), function(x) read.csv(x, stringsAsFactors = FALSE)))
print(mean(mydata[[pollutant]], na.rm = TRUE), digits = 4)
}
|
28264b80ea949743eda547cf07eebe2b11a44499
|
490e0fcca0ae7b0eb4922ee2aeaa8258690c669d
|
/Extra/Full R Scripts/Total N Full.R
|
0c948b3f72a7281e171ceb503c5de79e754d2c07
|
[] |
no_license
|
nezirii/Thesis
|
ef73556f1c2e0887118cccadb2e15ad7112f5586
|
507926945501af539ed893e789f225e0637d9451
|
refs/heads/master
| 2021-01-15T10:43:30.050932
| 2020-08-18T01:45:49
| 2020-08-18T01:45:49
| 99,593,185
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 10,550
|
r
|
Total N Full.R
|
#load data
sm<-read.table(file="soil.data.3.csv", header=T, sep=",")
#set factors
str(sm)
sm$f.time<-factor(sm$time)
sm$f.plot<-factor(sm$plot)
sm$nest <- with(sm, factor(paste(location,f.plot)))
#install packages
install.packages("nlme")
install.packages("lme4")
install.packages("lmerTest")
install.packages("dplyr")
install.packages("nortest")
install.packages("ggplot2")
library(nlme)
library(lme4)
library(lmerTest)
library(dplyr)
library(nortest)
library(ggplot2)
#Look at mixed effects model
#start without random factor
M0<-gls(TN.inorg ~ impact+f.time,
na.action=na.omit, data=sm, method="ML")
#add random factor - refer to chapter 5 of zuur
M1<-lme(TN.inorg ~ impact+f.time,
random=~ 1 | location, na.action=na.omit, data=sm, method="ML")
M2<-lme(TN.inorg ~ impact+f.time, random=~1|nest,
na.action=na.omit, data=sm, method="ML")
anova(M1,M2)
#M1 looks the best with no nesting and no random effect
#Look at residuals
E1<-residuals(M1)
plot(filter(sm, !is.na(TN.inorg)) %>%dplyr::select(location),
E1, xlab="Location", ylab="Residuals")
plot(filter(sm, !is.na(TN.inorg)) %>%dplyr::select(impact),
E1, xlab="Location", ylab="Residuals")
qqnorm(residuals(M1))
qqline(residuals(M1))
ad.test(residuals(M1))
x<-sm$TN.inorg[!is.na(sm$TN.inorg)]#removes na values from column
E1<-residuals(M1,type="normalized")
plot(M1) #residuals vs fitted values
plot(x, E1)
#try alternate variance structures
vf1=varIdent(form=~1|impact)
vf2=varIdent(form=~1|f.time)
vf3=varPower(form=~ fitted(.))
vf4=varExp(form=~ fitted(.))
vf5=varConstPower(form=~ fitted(.))
vf6=varPower(form = ~ fitted (.)|impact)
vf7=varPower(form = ~ fitted (.)|f.time)
vf8=varExp(form=~fitted(.)|impact)
vf9=varExp(form=~fitted(.)|f.time)
vf10=varConstPower(form=~ fitted(.)|impact)
vf11=varConstPower(form=~ fitted(.)|f.time)
M1<-gls(TN.inorg ~ impact+f.time,
na.action=na.omit, data=sm, method="REML")
M1.1<-gls(TN.inorg ~ impact+f.time,
na.action=na.omit, data=sm, weights=vf1)
M1.2<-gls(TN.inorg ~ impact+f.time,
na.action=na.omit, data=sm, weights=vf2)
M0.3<-gls(TN.inorg ~ impact+f.time,
na.action=na.omit, data=sm, weights=vf3)
#No Convergence
M1.4<-gls(TN.inorg ~ impact+f.time,
na.action=na.omit, data=sm, weights=vf4)
#No Convergence
M1.5<-gls(TN.inorg ~ impact+f.time,
na.action=na.omit, data=sm, weights=vf5)
M1.6<-gls(TN.inorg ~ impact+f.time,
na.action=na.omit, data=sm, weights=vf6)
M1.7<-gls(TN.inorg ~ impact+f.time,
na.action=na.omit, data=sm, weights=vf7)
M1.8<-gls(TN.inorg ~ impact+f.time,
na.action=na.omit, data=sm, weights=vf8)
#No Convergence
M1.9<-gls(TN.inorg ~ impact+f.time,
na.action=na.omit, data=sm, weights=vf9)
#No Convergence
M1.10<-gls(TN.inorg ~ impact+f.time,
na.action=na.omit, data=sm, weights=vf10)
#No Convergence
M1.11<-gls(TN.inorg ~ impact+f.time,
na.action=na.omit, data=sm, weights=vf11)
anova(M1, M1.1, M1.2, M1.5, M1.6, M1.7, M1.11)
#M1.2 is best with varIdent as a function of time
E1.2<-residuals(M1.2)
plot(filter(sm, !is.na(TN.inorg)) %>%dplyr::select(location),
E1.2, xlab="Location", ylab="Residuals")
plot(filter(sm, !is.na(TN.inorg)) %>%dplyr::select(impact),
E1.2, xlab="Location", ylab="Residuals")
qqnorm(residuals(M1.2))
qqline(residuals(M1.2))
ad.test(residuals(M1.2))
x<-sm$TN.inorg[!is.na(sm$TN.inorg)]#removes na values from column
E1.2<-residuals(M1.2,type="normalized")
plot(M1.2) #residuals vs fitted values
plot(x, E1.2)
summary(M1.2)
#Auto Correlation Plot
E1.2<-residuals(M1.2)
x<-!is.na(sm$pct.moisture)
Efull<-vector(length=length(sm$pct.moisture))
Efull<-NA
Efull[x]<-E1.2
acf(Efull, na.action=na.pass,
main="Auto-correlation plot for residuals")
#one option is to group by graph.interval instead of time
#but we will group by categorical time for graphing purposes
x <- group_by(sm, impact, f.time) %>% # Grouping function causes subsequent functions to aggregate by season and reach
summarize(TN.inorg.mean = mean(TN.inorg, na.rm = TRUE), # na.rm = TRUE to remove missing values
TN.inorg.sd=sd(TN.inorg, na.rm = TRUE), # na.rm = TRUE to remove missing values
n = sum(!is.na(TN.inorg)), # of observations, excluding NAs.
TN.inorg.se=TN.inorg.sd/sqrt(n))
#this code defines graphing.interval as date format, but we won't use it for now
#x$graph.interval <-as.Date(as.character(x$graph.interval), format="%m/%d/%Y")
#make a new vector with the categorical times
cat.time<-c("Sep 15", "Oct 15", "Nov 15", "Apr 16", "Jun 16", "Aug 16", "Sep 16", "Nov 16")
#force the new vector to be characters
x$cat.time<-as.character(cat.time)
#force the new vector to be ordered in the order you gave it instead of alphabetical
x$cat.time<-factor(x$cat.time, levels=unique(x$cat.time))
pd=position_dodge(0.1)
ggplot(x, aes(x=cat.time, y=TN.inorg.mean)) +
geom_errorbar(aes(ymin=TN.inorg.mean-TN.inorg.se, ymax=TN.inorg.mean+TN.inorg.se), color="black", width=0.1, position=pd) +
geom_line(position=pd, color="black", aes(group=impact)) +
geom_point(size=3, pch=21, aes(fill=impact)) +
xlab("Sample Month and Year") +
ylab(expression(Total~Soil~Inorganic~N~(mg~N~g^{-1}~soil))) +
scale_fill_manual(name="Budworm Activity", values=c("white", "black")) +
expand_limits(y=.03) +
annotate("Text", x=6, y=.05, label="Interaction: P<0.0001", size=4) +
annotate("Text", x=6, y=.048, label="Budworm Impact: P=0.4361", size=4) +
annotate("Text", x=6, y=.046, label="Sampling Event: P<0.0001", size=4) +
theme_bw() +
theme(legend.justification=c(0.03,0.6),
legend.position=c(0.03,0.88),
axis.text=element_text(size=12),
axis.title=element_text(size=12),
legend.title=element_text(size= 12),
legend.text=element_text(size=12),
axis.text.x = element_text(angle = 45, hjust = 1),
panel.grid.major=element_blank(),
panel.grid.minor=element_blank())
#this will save the file
ggsave('tn inorg.tiff',
units="in",
width=5,
height=4.5,
dpi=1200,
compression="lzw")
#Auto Correlation
M3<-gls(NH4 ~ impact+f.time,
na.action=na.omit, data=sm, correlation=corCompSymm(form=~f.time))
M4<-gls(NH4 ~ impact+f.time,
na.action=na.omit, data=sm, correlation=corAR1(form=~f.time))
#Doesn't work
M1.12<-lme(NH4 ~ impact+f.time,
random=~ 1 | location, na.action=na.omit, data=sm, weights=vf7, correlation=corCompSymm(form=~f.time))
M1.12b<-lme(NH4 ~ impact+f.time,
random=~ 1 | location, na.action=na.omit, data=sm, correlation=corCompSymm(form=~f.time))
M1.13<-lme(NH4 ~ impact+f.time,
random=~ 1 | location, na.action=na.omit, data=sm, weights=vf7, correlation=corAR1(form=~f.time))
#Doesn't Work
cs1<-corARMA(c(0.2), p=1, q=0)
cs2<-corARMA(c(0.3, -0.3), p=2, q=0)
M1.14<-lme(NH4 ~ impact+f.time,
random=~ 1 | location, na.action=na.omit, data=sm, weights=vf7, correlation=cs1)
M1.15<-lme(NH4 ~ impact+f.time,
random=~ 1 | location, na.action=na.omit, data=sm, weights=vf7, correlation=cs2)
anova(M1.7,M1.12,M1.12b,M1.14,M1.15,M3)
#M1.15 is slightly better
E1.15<-residuals(M1.15)
plot(filter(sm, !is.na(NH4)) %>%dplyr::select(location),
E1.15, xlab="Location", ylab="Residuals")
plot(filter(sm, !is.na(NH4)) %>%dplyr::select(impact),
E1.15, xlab="Location", ylab="Residuals")
qqnorm(residuals(M1.15))
qqline(residuals(M1.15))
ad.test(residuals(M1.15))
#Log Normalized data
#start without random factor
M0<-gls(log.TN.inorg ~ impact+f.time,
na.action=na.omit, data=sm, method="ML")
M1<-lme(log.TN.inorg ~ impact+f.time,
random=~ 1 | location, na.action=na.omit, data=sm, method="ML")
M2<-lme(log.TN.inorg ~ impact+f.time, random=~1|nest,
na.action=na.omit, data=sm, method="ML")
anova(M1,M0)
#M1 looks the best with no nesting and no random effect
#Look at residuals
E1<-residuals(M1)
plot(filter(sm, !is.na(log.TN.inorg)) %>%dplyr::select(location),
E1, xlab="Location", ylab="Residuals")
plot(filter(sm, !is.na(log.TN.inorg)) %>%dplyr::select(impact),
E1, xlab="Location", ylab="Residuals")
qqnorm(residuals(M1))
qqline(residuals(M1))
ad.test(residuals(M1))
x<-sm$log.TN.inorg[!is.na(sm$log.TN.inorg)]#removes na values from column
E1<-residuals(M1,type="normalized")
plot(M1) #residuals vs fitted values
plot(x, E1)
M1<-gls(log.TN.inorg ~ impact+f.time,
random=~ 1 | location, na.action=na.omit, data=sm, method="REML")
M1.1<-lme(log.TN.inorg ~ impact+f.time,
random=~ 1 | location, na.action=na.omit, data=sm, weights=vf1)
M1.2<-lme(log.TN.inorg ~ impact+f.time,
random=~ 1 | location, na.action=na.omit, data=sm, weights=vf2)
M1.3<-lme(log.TN.inorg ~ impact+f.time,
random=~ 1 | location, na.action=na.omit, data=sm, weights=vf3)
M1.4<-lme(log.TN.inorg ~ impact+f.time,
random=~ 1 | location, na.action=na.omit, data=sm, weights=vf4)
M1.5<-lme(log.TN.inorg ~ impact+f.time,
random=~ 1 | location, na.action=na.omit, data=sm, weights=vf5)
M1.6<-lme(log.TN.inorg ~ impact+f.time,
random=~ 1 | location, na.action=na.omit, data=sm, weights=vf6)
M1.7<-lme(log.TN.inorg ~ impact+f.time,
random=~ 1 | location, na.action=na.omit, data=sm, weights=vf7)
M1.8<-lme(log.TN.inorg ~ impact+f.time,
random=~ 1 | location, na.action=na.omit, data=sm, weights=vf8)
M1.9<-lme(log.TN.inorg ~ impact+f.time,
random=~ 1 | location, na.action=na.omit, data=sm, weights=vf9)
M1.10<-lme(log.TN.inorg ~ impact+f.time,
random=~ 1 | location, na.action=na.omit, data=sm, weights=vf10)
M1.11<-lme(log.TN.inorg ~ impact+f.time,
random=~ 1 | location, na.action=na.omit, data=sm, weights=vf11)
anova(M1.1,M1.2,M1.3,M1.4,M1.5,M1.6,M1.7,M1.8,M1.9,M1.10,M1.11)
#Try M1.2,M1.3,M1.4,M1.6,M1.7,M1.8,M1.9,M1.10,M1.11
#Base model with no random factor in GLS was no good either
#Log normalizing does not work
E1.11<-residuals(M1.11)
plot(filter(sm, !is.na(TN.inorg)) %>%dplyr::select(location),
E1.2, xlab="Location", ylab="Residuals")
plot(filter(sm, !is.na(TN.inorg)) %>%dplyr::select(impact),
E1.2, xlab="Location", ylab="Residuals")
qqnorm(residuals(M1.11))
qqline(residuals(M1.11))
ad.test(residuals(M1.11))
x<-sm$TN.inorg[!is.na(sm$TN.inorg)]#removes na values from column
E1.2<-residuals(M1.2,type="normalized")
plot(M1.2) #residuals vs fitted values
plot(x, E1.2)
summary(M1.2)
|
d4e53c58e66a7046d0eb153958170a8a2560566c
|
76f73ae8939ca7c1998a8270ae4907601159693b
|
/code/dv_tables.R
|
2b0335e22ba115ce2884a5f37e9407f553a5490b
|
[
"BSD-3-Clause",
"BSD-2-Clause"
] |
permissive
|
muuankarski/attributions
|
b9a003eed79556f92aa5ec2a846c2cfab714601d
|
fa9ac65c936fdfb917541266225a1947b93b0d26
|
refs/heads/master
| 2021-01-01T18:22:57.062986
| 2015-01-14T11:38:35
| 2015-01-14T11:38:35
| 16,984,436
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,647
|
r
|
dv_tables.R
|
load("~/workspace/lits/clone/attributions/data/lits.RData")
library(survey)
d.df <- svydesign(id = ~SerialID,
weights = ~weight,
data = df)
t <- data.frame(prop.table(svytable(~group_general+cntry+q309, d.df), 2)*100)
t$Freq <- round(t$Freq, 1)
t <- subset(t, Freq > 0)
### testailua
# maa <- subset(t, q309 == "Not stated")
# maa <- maa[order(maa$group_general),]
# t$cntry <- factor(t$cntry, levels=maa$cntry)
# library(ggplot2)
# ggplot(t, aes(x=cntry,y=Freq,group=cntry)) +
# geom_bar(stat="identity", position="dodge") +
# facet_wrap(~q309) + coord_flip()
####
library(reshape2)
t2 <- dcast(t, group_general+cntry ~ q309, value.var="Freq")
names(t2) <- c("group_general","country","socialBlame","individualBlame",
"individualFate","socialFate","notStated","dontKnow")
t2 <- t2[,c(1,2,3,4,6,5,7,8)]
t2 <- t2[order(-t2$socialBlame),]
########
#t2$group_general <- NULL
#---------------------------------------------------#
#---------------------------------------------------#
#---------------------------------------------------#
#---------------------------------------------------#
# t <- data.frame(prop.table(svytable(~group_general+cntry+poverty, d.df), 2)*100)
# t$Freq <- round(t$Freq, 2)
# t <- subset(t, Freq > 0)
# t2 <- dcast(t, group_general+cntry ~ poverty, value.var="Freq")
# names(t2) <- c("group_general","country","socialBlame","individualBlame","individualFate","socialFate")
# t2 <- t2[order(t2$group,-t2$socialBlame),]
########
y <- data.frame(prop.table(svytable(~group_general+q309, d.df), 1)*100)
y$Freq <- round(y$Freq, 2)
y$country <- c("CEE mean","CIS mean","Western Europe mean")
y2 <- dcast(y, group_general+country ~ q309, value.var="Freq")
names(y2) <- c("group_general","country","socialBlame","individualBlame",
"individualFate","socialFate","notStated","dontKnow")
y2 <- y2[,c(1,2,3,4,6,5,7,8)]
#
#
library(plyr)
co.var<-function(x)(100*sd(x)/mean(x))
#
func.CV <- function(t2)
{
return(data.frame(CV.sb = co.var(t2$socialBlame)))
}
df.CV.sb <- ddply(t2, .(group_general), func.CV)
#
func.CV <- function(t2)
{
return(data.frame(CV.ib = co.var(t2$individualBlame)))
}
df.CV.ib <- ddply(t2, .(group_general), func.CV)
#
func.CV <- function(t2)
{
return(data.frame(CV.sf = co.var(t2$socialFate)))
}
df.CV.sf <- ddply(t2, .(group_general), func.CV)
#
func.CV <- function(t2)
{
return(data.frame(CV.if = co.var(t2$individualFate)))
}
df.CV.if <- ddply(t2, .(group_general), func.CV)
#
func.CV <- function(t2)
{
return(data.frame(CV.nt = co.var(t2$notStated)))
}
df.CV.nt <- ddply(t2, .(group_general), func.CV)
#
func.CV <- function(t2)
{
return(data.frame(CV.dk = co.var(t2$dontKnow)))
}
df.CV.dk <- ddply(t2, .(group_general), func.CV)
##
df.CV <- merge(df.CV.sb,df.CV.ib,by="group_general")
df.CV <- merge(df.CV,df.CV.sf,by="group_general")
df.CV <- merge(df.CV,df.CV.if,by="group_general")
df.CV <- merge(df.CV,df.CV.nt,by="group_general")
df.CV <- merge(df.CV,df.CV.dk,by="group_general")
##
df.CV$group_general <- as.character(df.CV$group_general)
df.CV$group_general[df.CV$group_general == 'CEE'] <- 'CV CEE'
df.CV$group_general[df.CV$group_general == 'CIS'] <- 'CV CIS'
df.CV$group_general[df.CV$group_general == 'Western Europe'] <- 'CV Western Europe'
##
names(df.CV) <- c("country","socialBlame","individualBlame","socialFate",
"individualFate","notStated","dontKnow")
df.CV$group_general <- c("CV","CV","CV")
df.CV <- df.CV[,c(8,1,2,3,4,5,6,7)]
tbl5 <- rbind(t2,y2,df.CV)
#t5 <- arrange(t5, group_general,country)
tbl5$group_general <- NULL
tbl5[,2:7] <- round(tbl5[,2:7],1)
|
48398f066c3a6002ac7e41dbda14f56681b44af6
|
7e56e97392d5a48f2f9425462a1fed9453742bf7
|
/FunctionsLoads/Functions/calculateDet.R
|
ac5a5a11638959856b5f59cd478947dcd57d09a0
|
[] |
no_license
|
teamsoccult/CRUST-1
|
2d0f328254b1d2d099956ee8a78d96a1f9a794af
|
1668e732459132e240fb80c6b87737f4b1c447f9
|
refs/heads/master
| 2020-05-03T09:45:56.279755
| 2020-02-16T15:38:42
| 2020-02-16T15:38:42
| 178,562,903
| 0
| 0
| null | 2019-03-30T13:36:19
| 2019-03-30T13:36:19
| null |
UTF-8
|
R
| false
| false
| 928
|
r
|
calculateDet.R
|
################
##
## @description Calculate the deterministic part of the model
##
## @param model Model in matrix format
## @param xset X values randomly generated
## @param weights Beta weights
## @param betas Random betas
##
## @return Deterministic value of the model
##
## @lastChange 2018-03-01
##
## @changes
## Fixed the use of wrong beta values [2018-03-01]
## Included parameter weights [2018-03-01]
## Adjust model [2017-02-13]
##
################
calculateDet <- function(model, xset, weights, betas){
if(!is.matrix(model)){
model <- t(as.matrix(model))
}
k <- length(model[1,])
deterministic <- 0
f <- 10^((k - 1):0)
for(r in 1:nrow(model)){
x <- rowProds(as.matrix(xset[, model[r,] == 1]))
index <- weights[, 1] == sum(as.numeric(model[r,] == 1) * f)
deterministic <- deterministic + (betas[which(index == TRUE)] * x)
}
return(deterministic)
}
|
e3f8888eef83c482125b6558a3da7ec03dca2f00
|
a58702fdc93e352ee8a1f88a65e4b6bec8b165c8
|
/dataset.R
|
429e45fe5d280ddd64eee656ed924eee5e1fe07f
|
[] |
no_license
|
baleeiro17/HarvardX-s-Data-Science-
|
6a7feeb266f68597df6ffa39d642a1a3ded4ea19
|
c8b8a7d934a00d41761721e876c820d88c25089b
|
refs/heads/master
| 2022-11-22T21:13:57.986278
| 2020-07-22T19:42:25
| 2020-07-22T19:42:25
| 280,945,003
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,271
|
r
|
dataset.R
|
library(dslabs)
data("murders")
data(na_example)
# create dataframe using this example:
temp <- c(35, 88, 42, 84, 81, 30)
city <- c("Beijing", "Lagos", "Paris", "Rio de Janeiro", "San Juan", "Toronto")
# create columns from dataset:
city_temps <- data.frame(name = city, temperature = temp)
city_temps
# more example how to create one datase:
# Define a variable states to be the state names from the murders data frame
states <- murders$state
# Define a variable ranks to determine the population size ranks
ranks <- rank(murders$population)
# Define a variable ind to store the indexes needed to order the population values
ind <- order(murders$population)
# Create a data frame my_df with the state name and its rank and ordered from least populous to most
my_df <- data.frame( name = states[ind] , rank = ranks[ind] )
my_df
# Note what we can do with the ! operator
x <- c(1, 2, 3)
ind <- c(FALSE, TRUE, FALSE)
x[!ind]
# show data from dataset
str(na_example)
# Use is.na to create a logical index ind that tells which entries are NA
ind<- is.na(na_example)
# Determine how many NA ind has using the sum function
sum(ind)
# using ! to solve problem with NA
# Compute the average, for entries of na_example that are not NA
mean( na_example[!ind] )
|
5bf0922675af489489e64a53ed4727b4a15a5e35
|
150ddbd54cf97ddf83f614e956f9f7133e9778c0
|
/man/symmetrize.Rd
|
50192d79f1182c105fe226a3e9eb5b87cf03e8d5
|
[
"CC-BY-4.0"
] |
permissive
|
debruine/webmorphR
|
1119fd3bdca5be4049e8793075b409b7caa61aad
|
f46a9c8e1f1b5ecd89e8ca68bb6378f83f2e41cb
|
refs/heads/master
| 2023-04-14T22:37:58.281172
| 2022-08-14T12:26:57
| 2022-08-14T12:26:57
| 357,819,230
| 6
| 4
|
CC-BY-4.0
| 2023-02-23T04:56:01
| 2021-04-14T07:47:17
|
R
|
UTF-8
|
R
| false
| true
| 1,189
|
rd
|
symmetrize.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/symmetrise.R
\name{symmetrize}
\alias{symmetrize}
\alias{symmetrise}
\title{Symmetrize Images}
\usage{
symmetrize(stimuli, shape = 1, color = 1, tem_id = "frl", ...)
symmetrise(stimuli, shape = 1, color = 1, tem_id = "frl", ...)
}
\arguments{
\item{stimuli}{list of stimuli}
\item{shape, color}{amount of symmetry (0 for none, 1.0 for perfect)}
\item{tem_id}{template ID to be passed to \code{\link[=tem_def]{tem_def()}} (usually "frl" or "fpp106")}
\item{...}{Additional arguments to pass to \code{\link[=trans]{trans()}}}
}
\value{
list of stimuli with symmetrised images and templates
}
\description{
Use webmorph.org to make faces symmetric in shape and/or colour.
}
\examples{
\donttest{
if (webmorph_up()) {
stimuli <- demo_stim(1)
sym_both <- symmetrize(stimuli)
sym_shape <- symmetrize(stimuli, color = 0)
sym_color <- symmetrize(stimuli, shape = 0)
sym_anti <- symmetrize(stimuli, shape = -1.0, color = 0)
}
}
}
\seealso{
WebMorph.org functions
\code{\link{avg}()},
\code{\link{continuum}()},
\code{\link{loop}()},
\code{\link{trans}()},
\code{\link{webmorph_up}()}
}
\concept{webmorph}
|
7d5c16d70d9bc286c07afe6d44a333f3ac8f4b81
|
320bb31bba3f88ad0e940553db4f7f54e0c4c920
|
/man/BIOMOD_CrossValidation-deprecated.Rd
|
bc14124e8005cc7485f4710fa8eca38459414cec
|
[] |
no_license
|
biomodhub/biomod2
|
519d120381332c719fc23d1a5d0a4d1030fd2a01
|
ee9734d7dd9455cc8b76a000f74785512a119e2f
|
refs/heads/master
| 2023-08-31T03:29:40.910990
| 2023-08-28T14:10:59
| 2023-08-28T14:10:59
| 122,992,854
| 61
| 21
| null | 2023-09-12T12:29:52
| 2018-02-26T15:55:28
|
R
|
UTF-8
|
R
| false
| true
| 434
|
rd
|
BIOMOD_CrossValidation-deprecated.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/DEPRECATED.R
\name{BIOMOD_CrossValidation-deprecated}
\alias{BIOMOD_CrossValidation-deprecated}
\title{BIOMOD_CrossValidation}
\usage{
BIOMOD_CrossValidation(...)
}
\arguments{
\item{...}{Additional arguments}
}
\description{
Deprecated function name for
\code{\link{bm_CrossValidation}}
}
\seealso{
\code{\link{biomod2-deprecated}}
}
\keyword{internal}
|
8d0b4bd82defd58341f240d351231aeeb6b7de97
|
a5ebc917557b5a8a1a889859befe1d31b20b408c
|
/man/check_tx.Rd
|
e33ed1b8822a5706f972cc05a68c26a4aed8d0d6
|
[
"BSD-2-Clause"
] |
permissive
|
BUStools/BUSpaRse
|
f81e246ca905ace2bd947958f0235222e4a6c10f
|
5b23c9b609ea20259110eb2592720a6019751a90
|
refs/heads/master
| 2022-09-19T15:48:26.420285
| 2022-04-26T15:58:37
| 2022-04-26T15:58:37
| 161,709,230
| 7
| 2
|
BSD-2-Clause
| 2020-04-24T05:45:14
| 2018-12-14T00:04:47
|
R
|
UTF-8
|
R
| false
| true
| 689
|
rd
|
check_tx.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/velocity.R
\name{check_tx}
\alias{check_tx}
\title{Check if transcript ID in transcriptome and annotation match}
\usage{
check_tx(tx_annot, tx)
}
\arguments{
\item{tx_annot}{Character vector of transcript IDs from the annotation.}
\item{tx}{Character vector of transcript IDs from the transcriptome.}
}
\value{
Character vector of the overlapping transcript IDs.
}
\description{
This function throws an error if transcript IDs in transcriptome and
annotation do not overlap. If they do overlap, this function will give a
message about transcript IDs that do not agree in the transcriptome and the
annotation
}
|
99995a99864e46dd7e598750244a17e618bacefd
|
a8b6f5cbc0c677b0bb317b0740fac7e78938ab9e
|
/man/ex11.04.Rd
|
e8043426dfddb40a50a00447d9f666bb11005591
|
[] |
no_license
|
cran/Devore5
|
784f86cb950d39003a797b309bde9ba5ea239795
|
3c6de7a6447f3be4b54d00832e23995ea9655aa5
|
refs/heads/master
| 2020-12-24T14:36:21.931350
| 2004-10-03T00:00:00
| 2004-10-03T00:00:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 504
|
rd
|
ex11.04.Rd
|
\name{ex11.04}
\alias{ex11.04}
\non_function{}
\title{data from exercise 11.4}
\description{
The \code{ex11.04} data frame has 12 rows and 3 columns.
}
\format{
This data frame contains the following columns:
\describe{
\item{Coverage}{
a numeric vector
}
\item{Roller}{
a numeric vector
}
\item{Paint}{
a numeric vector
}
}
}
\details{
}
\source{
Devore, J. L. (2000) \emph{Probability and Statistics for Engineering and the Sciences (5th ed)}, Duxbury
}
\examples{
data(ex11.04)
}
\keyword{datasets}
|
52f1d600dcbcd1cff5d21a597f1a5b338d6b2e36
|
bf6e1b0aad04eeb1b47000ffeeb654432385cd7b
|
/Plot2.R
|
aa06d59860d31d936414f6e1587108906b02eb8c
|
[] |
no_license
|
Edilmo/ExploratoryDataAnalysisCP2
|
f439d707548389fc60cbb3d9dbcb4c05403e425c
|
eb1d0e6f5f3446c5ee4a98f789421b7dfb186d50
|
refs/heads/master
| 2021-01-10T17:11:41.750320
| 2015-09-27T20:40:48
| 2015-09-27T20:40:48
| 43,228,048
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 542
|
r
|
Plot2.R
|
# Plot 2
# Have total emissions from PM2.5 decreased in the Baltimore City, Maryland
# (fips == "24510") from 1999 to 2008? Use the base plotting system to make
# a plot answering this question.
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
png(filename = "plot2.png",width = 480,height = 480)
data <- subset(NEI, fips == "24510", select = c(Emissions, year))
data <- aggregate(list(Emissions = data$Emissions), list(Year = data$year), sum)
barplot(data$Emissions, names.arg = data$Year)
dev.off()
|
3a3915c732fd3cfda85f971e280958e637012704
|
9ca35958aee8e1d16e78b64b03a4cbd3ae1dc586
|
/man/getDESeqResults.Rd
|
7421059aa2be891b1df2954dc41a5b814b2e1e61
|
[] |
no_license
|
mdeber/BRGenomics
|
df68e7f6cf01e36db2a5dc1003abe8bf8f21c9f2
|
b89c4fd9fff3fd3e795be5d382617473a2358d05
|
refs/heads/master
| 2023-04-28T17:29:07.075368
| 2023-04-25T15:16:35
| 2023-04-25T15:16:35
| 228,493,638
| 8
| 3
| null | null | null | null |
UTF-8
|
R
| false
| true
| 7,260
|
rd
|
getDESeqResults.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/deseq_functions.R
\name{getDESeqResults}
\alias{getDESeqResults}
\title{Get DESeq2 results using reduced dispersion matrices}
\usage{
getDESeqResults(
dds,
contrast.numer,
contrast.denom,
comparisons = NULL,
sizeFactors = NULL,
alpha = 0.1,
lfcShrink = FALSE,
args.DESeq = NULL,
args.results = NULL,
args.lfcShrink = NULL,
ncores = getOption("mc.cores", 2L),
quiet = FALSE
)
}
\arguments{
\item{dds}{A DESeqDataSet object, produced using either
\code{\link[BRGenomics:getDESeqDataSet]{getDESeqDataSet}} from this package
or \code{\link[DESeq2:DESeqDataSet]{DESeqDataSet}} from \code{DESeq2}. If
\code{dds} was not created using \code{getDESeqDataSet}, \code{dds} must be
made with \code{design = ~condition} such that a unique \code{condition}
level exists for each sample/treatment condition.}
\item{contrast.numer}{A string naming the \code{condition} to use as the
numerator in the DESeq2 comparison, typically the perturbative condition.}
\item{contrast.denom}{A string naming the \code{condition} to use as the
denominator in the DESeq2 comparison, typically the control condition.}
\item{comparisons}{As an optional alternative to supplying a single
\code{contrast.numer} and \code{contrast.denom}, users can supply a list of
character vectors containing numerator-denominator pairs, e.g.
\code{list(c("B", "A"), c("C", "A"), c("C", "B"))}. \code{comparisons} can
also be a dataframe in which each row is a comparison, the first column
contains the numerators, and the second column contains the denominators.}
\item{sizeFactors}{A vector containing DESeq2 \code{sizeFactors} to apply to
each sample. Each sample's readcounts are \emph{divided} by its respective
DESeq2 \code{sizeFactor}. A warning will be generated if the
\code{DESeqDataSet} already contains \code{sizeFactors}, and the previous
\code{sizeFactors} will be over-written.}
\item{alpha}{The significance threshold passed to \code{DESeqResults}, which
is used for independent filtering of results (see DESeq2 documentation).}
\item{lfcShrink}{Logical indicating if log2FoldChanges and their standard
errors should be shrunk using \code{\link[DESeq2:lfcShrink]{lfcShrink}}.
LFC shrinkage is very useful for making fold-change values meaningful, as
low-expression/high variance genes are given low fold-changes.
Set to \code{FALSE} by default.}
\item{args.DESeq}{Additional arguments passed to
\code{\link[DESeq2:DESeq]{DESeq}}, given as a list of argument-value pairs,
e.g. \code{list(fitType = "local", useT = TRUE)}. All arguments given here
will be passed to \code{DESeq} except for \code{object} and
\code{parallel}. If no arguments are given, all defaults will be used.}
\item{args.results}{Additional arguments passed to
\link[DESeq2:results]{DESeq2::results}, given as a list of argument-value
pairs, e.g. \code{list(altHypothesis = "greater", lfcThreshold = 1.5)}. All
arguments given here will be passed to \code{results} except for
\code{object}, \code{contrast}, \code{alpha}, and \code{parallel}. If no
arguments are given, all defaults will be used.}
\item{args.lfcShrink}{Additional arguments passed to
\code{\link[DESeq2:lfcShrink]{lfcShrink}}, given as a list of
argument-value pairs. All arguments given here will be passed to
\code{lfcShrink} except for \code{dds}, \code{coef}, \code{contrast}, and
\code{parallel}. If no arguments are given, all defaults will be used.}
\item{ncores}{The number of cores to use for parallel processing. Multicore
processing is only used if more than one comparison is being made (i.e.
argument \code{comparisons} is used), and the number of cores utilized will
not be greater than the number of comparisons being performed.}
\item{quiet}{If \code{TRUE}, all output messages from calls to \code{DESeq}
and \code{results} will be suppressed, although passing option \code{quiet}
in \code{args.DESeq} will supersede this option for the call to
\code{DESeq}.}
}
\value{
For a single comparison, the output is the \code{DESeqResults} result
table. If a \code{comparisons} is used to make multiple comparisons, the
output is a named list of \code{DESeqResults} objects, with elements named
following the pattern \code{"X_vs_Y"}, where \code{X} is the name of the
numerator condition, and \code{Y} is the name of the denominator condition.
}
\description{
This function calls \code{\link[DESeq2:DESeq]{DESeq2::DESeq}} and
\code{\link[DESeq2:results]{DESeq2::results}} on a pre-existing
\code{DESeqDataSet} object and returns a \code{DESeqResults} table for one or
more pairwise comparisons. However, unlike a standard call to
\code{DESeq2::results} using the \code{contrast} argument, this function
subsets the dataset so that DESeq2 only estimates dispersion for the samples
being compared, and not for all samples present.
}
\section{Errors when \code{ncores > 1}}{
If this function returns an error,
set \code{ncores = 1}. Whether or not this occurs can depend on whether
users are using alternative BLAS libraries (e.g. OpenBLAS or Apple's
Accelerate framework) and/or how DESeq2 was installed. This is because some
DESeq2 functions (e.g. \code{\link[DESeq2:nbinomWaldTest]{
nbinomWaldTest}}) use C code that can be compiled to use parallelization,
and this conflicts with our use of process forking (via the
\code{\link[parallel:parallel-package]{parallel package}}) when
\code{ncores > 1}.
}
\examples{
#--------------------------------------------------#
# getDESeqDataSet
#--------------------------------------------------#
suppressPackageStartupMessages(require(DESeq2))
data("PROseq") # import included PROseq data
data("txs_dm6_chr4") # import included transcripts
# divide PROseq data into 6 toy datasets
ps_a_rep1 <- PROseq[seq(1, length(PROseq), 6)]
ps_b_rep1 <- PROseq[seq(2, length(PROseq), 6)]
ps_c_rep1 <- PROseq[seq(3, length(PROseq), 6)]
ps_a_rep2 <- PROseq[seq(4, length(PROseq), 6)]
ps_b_rep2 <- PROseq[seq(5, length(PROseq), 6)]
ps_c_rep2 <- PROseq[seq(6, length(PROseq), 6)]
ps_list <- list(A_rep1 = ps_a_rep1, A_rep2 = ps_a_rep2,
B_rep1 = ps_b_rep1, B_rep2 = ps_b_rep2,
C_rep1 = ps_c_rep1, C_rep2 = ps_c_rep2)
# make flawed dataset (ranges in txs_dm6_chr4 not disjoint)
# this means there is double-counting
# also using discontinuous gene regions, as gene_ids are repeated
dds <- getDESeqDataSet(ps_list,
txs_dm6_chr4,
gene_names = txs_dm6_chr4$gene_id,
ncores = 1)
dds
#--------------------------------------------------#
# getDESeqResults
#--------------------------------------------------#
res <- getDESeqResults(dds, "B", "A")
res
reslist <- getDESeqResults(dds, comparisons = list(c("B", "A"), c("C", "A")),
ncores = 1)
names(reslist)
reslist$B_vs_A
# or using a dataframe
reslist <- getDESeqResults(dds, comparisons = data.frame(num = c("B", "C"),
den = c("A", "A")),
ncores = 1)
reslist$B_vs_A
}
\seealso{
\code{\link[BRGenomics:getDESeqDataSet]{getDESeqDataSet}},
\code{\link[DESeq2:results]{DESeq2::results}}
}
\author{
Mike DeBerardine
}
|
acf36c40d94d51e5b831e779da91aaea8acd94d8
|
bbf3e37780ca496b568544cd89d94f886c139e2f
|
/man/bc_neighbours.Rd
|
35609091401006138a9163822006144087762ef6
|
[
"Apache-2.0"
] |
permissive
|
bcgov/bcmaps
|
e361ea94fb5fffb9280d01950d5436429e44152f
|
fa9e23e9c63bb414ce383f7e8bee38f5d8dc40d9
|
refs/heads/main
| 2023-08-31T21:51:26.418245
| 2023-08-23T21:26:31
| 2023-08-23T21:26:31
| 36,751,057
| 69
| 20
|
Apache-2.0
| 2023-08-22T20:13:03
| 2015-06-02T17:51:25
|
R
|
UTF-8
|
R
| false
| true
| 843
|
rd
|
bc_neighbours.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/bc_bound.R
\name{bc_neighbours}
\alias{bc_neighbours}
\title{Boundary of British Columbia, provinces/states and the portion of the Pacific Ocean that borders British Columbia}
\source{
\code{bcdata::bcdc_get_data('b9bd93e1-0226-4351-b943-05c6f80bd5da')}
}
\usage{
bc_neighbours(ask = interactive(), force = FALSE)
}
\arguments{
\item{ask}{Should the function ask the user before downloading the data to a cache? Defaults to the value of interactive().}
\item{force}{Should you force download the data?}
}
\value{
The spatial layer of \code{bc_neighbours} as an \code{sf} object
}
\description{
Boundary of British Columbia, provinces/states and the portion of the Pacific Ocean that borders British Columbia
}
\examples{
\dontrun{
my_layer <- bc_neighbours()
}
}
|
5c1f0a7ed5dc90e99d308288fa6451f73c2a86d5
|
13a430d486c1a52fc2ac470775012f2c7e13baae
|
/man/whale_sightings.Rd
|
d511ef4844f142194d7829746adbf3272a5ad911
|
[] |
no_license
|
ericmkeen/bangarang
|
66fe538e19d42c2b6c93b29cb8e31adb3230a36a
|
db710584811e108646c350f70fc025c8563f0fff
|
refs/heads/master
| 2023-03-15T16:42:09.605054
| 2023-03-09T16:47:09
| 2023-03-09T16:47:09
| 51,127,882
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 496
|
rd
|
whale_sightings.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data-doc-whale_sightings.R
\docType{data}
\name{whale_sightings}
\alias{whale_sightings}
\title{Metadata for 5km segments of Bangarang effort, 2013 - 2015.}
\format{
An object of class \code{spec_tbl_df} (inherits from \code{tbl_df}, \code{tbl}, \code{data.frame}) with 549 rows and 15 columns.
}
\usage{
whale_sightings
}
\description{
Metadata for 5km segments of Bangarang effort, 2013 - 2015.
}
\keyword{datasets}
|
8f6876f57548a90c9b551320d8b1faa53601ab6b
|
7f8466c3b57f17d6ca409e4b06287b69f2122b59
|
/Bayesian Stats/hw2part1.R
|
182134bef196562db2671da73a1b4b097d27394c
|
[] |
no_license
|
jlin99/Bayesian-Stats
|
7c362e12e63cdfd10badcbe5d51bf81562c0eeb0
|
f13bd753b95e780e454369d8fae60192ebc17f72
|
refs/heads/master
| 2022-03-22T02:33:32.241095
| 2019-12-02T08:53:51
| 2019-12-02T08:53:51
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 560
|
r
|
hw2part1.R
|
install.packages("TeachBayes")
library(TeachBayes)
bayes_table <- data.frame(p = seq(.3, .8, by=.1),
Prior = c(0.125, 0.125, 0.250,
0.250, 0.125, 0.125))
bayes_table$Likelihood <- dbinom(10, size=30, prob=bayes_table$p)
bayesian_crank(bayes_table) -> bayes_table
bayes_table
sum(bayes_table$Posterior[bayes_table$p == 0.3])
sum(bayes_table$Posterior[bayes_table$p > 0.5])
sum(bayes_table$Posterior[bayes_table$p <= 0.4]) -
sum(bayes_table$Posterior[bayes_table$p <= 0.2])
|
c615e14e3e26379af70e4ee925df5b6d6a54c18a
|
32000ddb834992aafb85c5954d520f7c1c930f75
|
/cachematrix.R
|
791345e0900c3df28eaf4dc5d7ffa185ccbbc1c9
|
[] |
no_license
|
Ethosien/ProgrammingAssignment2
|
a9ea3dfa86e829e7234d872f2296117fe56496b3
|
42e424fb3043e7aa920f93fc3daf85e6b662e018
|
refs/heads/master
| 2021-01-18T15:42:49.981101
| 2015-10-22T15:03:48
| 2015-10-22T15:03:48
| 44,702,070
| 0
| 0
| null | 2015-10-21T20:23:15
| 2015-10-21T20:23:15
| null |
UTF-8
|
R
| false
| false
| 1,886
|
r
|
cachematrix.R
|
# Coursera RProgramming Course (rprog-033) assignment 2 by Jeff Swartzel, 10-22-15
# This script contains 2 functions for programming assignment 2.
# Together these functions can be used to cache the potentially
# time consuming calculation of inverting a matrix. If the inverse has already
# been calculated the result will be retrieved. The inverse will be calculated
# if there isn't a cached result already available.
#############
# This function, makeCacheMatrix(), will create a list containing 4 separate functions
# that are used to create special objects that will store a matrix and cache its inverse.
# 1. set() will set the value of the matrix
# 2. get() will get the value of the matrix
# 3. getInverse() will get the value of the inverse matrix
# 4. setInverse() will set the value of the inverse matrix
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set<-function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setInverse <- function(inverse) m <<- inverse
getInverse <- function() m
list(set=set, get=get,
setInverse=setInverse,
getInverse=getInverse)
}
#############
# This function, cacheSolve(), calculates the inverse of the
# special matrix created by makeCacheMatrix. However, it will first check to see
# if the inverse has already been calculated. If the inverse has already been
# calculated, it will retrieve the already calculated inverse.
cacheSolve <- function(x, ...) {
m <- x$getInverse()
if(!is.null(m)){
message("getting cached inverse")
return(m)
}
data <- x$get()
m <- solve(data,...)
x$setInverse(m)
# Return a matrix that is the inverse of 'x'
m
}
#############
# This code will create a large, invertible matrix called "myMatrix".
# myMatrix<-matrix(runif(3000^2),3000)
# This can be used to test the functions.
#############
#this is my 4th commit
|
b8016336d2181d99829e72930e1a11729025acc5
|
73c89b343bb8530b01b0ad0498fd8e4f2b12c9c4
|
/R/Rmonkey.R
|
ff8d3935dec5981409f7cfa6166170199a173ba3
|
[] |
no_license
|
sfirke/Rmonkey
|
c2516d5660cf49152cf852cabd34c0a4172e72b0
|
4f5d1233362626ebf782cd4e273b3930b2e220b2
|
refs/heads/master
| 2021-01-18T16:01:54.185725
| 2017-04-26T02:58:26
| 2017-04-26T02:58:26
| 86,704,605
| 0
| 2
| null | 2017-03-30T13:17:06
| 2017-03-30T13:17:06
| null |
UTF-8
|
R
| false
| false
| 302
|
r
|
Rmonkey.R
|
#' janitor
#'
#' Rmonkey provides access to the SurveyMonkey API from R.
#' #'
#' @docType package
#' @name Rmonkey
#' @importFrom dplyr %>%
#' @keywords internal
NULL
## quiets concerns of R CMD check re: the .'s that appear in pipelines
if(getRversion() >= "2.15.1") utils::globalVariables(c("."))
|
babdfbbd13922098f9140ecb6104e09713b19279
|
0a906cf8b1b7da2aea87de958e3662870df49727
|
/distr6/inst/testfiles/C_EmpiricalMVPdf/libFuzzer_C_EmpiricalMVPdf/C_EmpiricalMVPdf_valgrind_files/1610035463-test.R
|
88e71593654aeb33055e338e20651df59e37fb29
|
[] |
no_license
|
akhikolla/updated-only-Issues
|
a85c887f0e1aae8a8dc358717d55b21678d04660
|
7d74489dfc7ddfec3955ae7891f15e920cad2e0c
|
refs/heads/master
| 2023-04-13T08:22:15.699449
| 2021-04-21T16:25:35
| 2021-04-21T16:25:35
| 360,232,775
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 299
|
r
|
1610035463-test.R
|
testlist <- list(data = structure(0, .Dim = c(1L, 1L)), x = structure(c(1.0650783571628e-255, 1.26575142481473e-309, 4.7847988524451e-304, 1.33613446060136e-309, 6.89903271560023e-310, 1.18480732817791e-303, 0), .Dim = c(7L, 1L)))
result <- do.call(distr6:::C_EmpiricalMVPdf,testlist)
str(result)
|
c0ea4e7c5a6e8709f7b671080918953dc47a5627
|
ca840df364b973c0ea2664c9fd0a4f30cf791f07
|
/Implementation/SVD_Iris.R
|
6452565e7a9022ac2ef13714710f47190029ad8b
|
[] |
no_license
|
Prachi-Agr/Dimensionality-Reduction
|
73751b4467810de0b3e0a5f2e328cc00c826453e
|
3f53be928eaff4ce5ad18fae063c8c4d6456114e
|
refs/heads/master
| 2022-11-24T07:27:28.199767
| 2020-07-24T06:30:07
| 2020-07-24T06:30:07
| 257,893,856
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,405
|
r
|
SVD_Iris.R
|
library(tidyverse)
dat <- read_csv("../input/iris.csv")
dat<-as.matrix(dat)
svd.mod <- svd(dat)
U <- dat %*% svd.mod$v %*% diag(1./svd.mod$d)\
U.reduced <-dat %*% svd.mod$v[,1:7,drop=FALSE] %*% diag((svd.mod$d)[1:7,drop=FALSE])
typeof(U.reduced)
U.reduced
dimReduce <- function(x, k=floor(ncol(x)/2), supplemental.cols=NULL) {
colIdxs <- which(colnames(x) %in% supplemental.cols)
colNames <- names(x[,-colIdxs])
sol <- svd(x[,-colIdxs])
sol.U <- as.matrix(x[,-colIdxs]) %*% (sol$v)[,1:k,drop=FALSE] %*%
diag((sol$d)[1:k,drop=FALSE])
sol.U = sol.U@data
res <- cbind(sol.U,x[,colIdxs,drop=FALSE])
names(res) <- c(names(sol.U@data),names(x[,colIdxs]))
res
}
dat <- read_csv("../input/iris.csv")
dat$ID <- seq_len(nrow(dat))
ore.drop("IRIS2")
ore.create(dat,table="IRIS2")
row.names(IRIS2) <- IRIS2$ID
IRIS2[1:5,]
IRIS2.reduced <- dimReduce(IRIS2, 2, supplemental.cols=c("ID","Species"))
dim(IRIS2.reduced)
#Decision tree classifier
library(rpart)
m1 <- rpart(Species~.,iris)
res1 <- predict(m1,iris,type="class")
table(res1,iris$Species)
dat2 <- ore.pull(IRIS2.reduced)
m2 <- rpart(Species~.-ID,dat2)
res2 <- predict(m2,dat2,type="class")
table(res2,iris$Species)
m2.1 <- ore.odmDT(Species~.-ID, IRIS2.reduced)
res2.1 <- predict(m2.1,IRIS2.reduced,type="class",supplemental.cols = "Species")
table(res2.1$PREDICTION, res2.1$Species)
|
a11e3c377ca6f077a9e7af209806dbc7d6c48b2d
|
038795d15a01edf43666d2cce836612660a50ad0
|
/R/tokens.R
|
0aec63f50704d0304cb0a81aedbefbf4f0304bc1
|
[
"MIT"
] |
permissive
|
akgold/onelogin
|
1d919d9863ed9877d1a2b2ef583136e1159dcaae
|
3d5a5f26f8893e2a3faa6ab24d73d9f4b23d7e79
|
refs/heads/master
| 2020-06-23T03:17:25.428226
| 2019-08-29T21:08:57
| 2019-08-29T21:08:57
| 198,491,663
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,078
|
r
|
tokens.R
|
# Manage API Access OAuth Tokens
#' Generate a 'OneLogin' token
#'
#' @param con a 'OneLogin' connection
#'
#' @return A 'OneLogin' connection with auth token
#' @export
#'
#' @examples
#' if(interactive()) ol_token_get(onelogin())
ol_token_get <- function(con) {
con$generate_token()
}
#' Refresh 'OneLogin' auth token
#'
#' @inheritParams ol_token_get
#'
#' @return A 'OneLogin' connection with refreshed auth token
#' @export
#'
#' @examples
#' if(interactive()) ol_token_refresh(onelogin())
ol_token_refresh <- function(con) {
con$get_refresh_token()
}
#' Revoke `OneLogin` access token
#'
#' @inheritParams ol_token_get
#'
#' @return A tibble of response status
#' @export
#'
#' @examples
#' #' if(interactive()) ol_token_revoke(onelogin())
ol_token_revoke <- function(con) {
con$revoke_token()
}
#' Get 'OneLogin' API rate limit
#'
#' @inheritParams ol_token_get
#'
#' @return A tibble of rate limit data
#' @export
#'
#' @examples
#' if(interactive()) ol_token_get_rate_limit(onelogin())
ol_token_get_rate_limit <- function(con) {
con$GET("auth/rate_limit")
}
|
6292a2742702d3ef0e59051a805af530a4fe8446
|
b8fb3ca5ed4ec1d87b5293ff9c2f11e361a9fd87
|
/plot2.R
|
17f8edb494b8c417f8d69134ee42e8be730c614e
|
[] |
no_license
|
shenqiny/Exploratory_Data_Analysis_project2
|
70a1f512638641ba1d54e0266fe162317aea9ade
|
7d539ab16b234a897814b25d6cd5b9033e57d4c0
|
refs/heads/master
| 2022-11-11T14:17:19.386590
| 2020-06-28T23:38:00
| 2020-06-28T23:38:00
| 275,686,413
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,111
|
r
|
plot2.R
|
library(dplyr)
###Data download
url<-"https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2FNEI_data.zip"
download.file(url, destfile = "exdata_data_FNEI_data.zip", method="curl")
##unzip and save files in the "assignment2" folder
if(!file.exists("assignment2")) {
unzip("exdata_data_FNEI_data.zip", exdir="./assignment2")
}
NEI <- readRDS("assignment2/summarySCC_PM25.rds")
SCC <- readRDS("assignment2/Source_Classification_Code.rds")
#Assignment
#The overall goal of this assignment is to explore the National Emissions Inventory database and see what it say about fine particulate matter pollution in the United states over the 10-year period 1999–2008.
#Have total emissions from PM2.5 decreased in the Baltimore City, Maryland (fips == "24510") from 1999 to 2008?
#Use the base plotting system to make a plot answering this question.
baltimore<-subset(NEI, fips=="24510")
sum_emission<-tapply(baltimore$Emissions, baltimore$year, sum)
png("plot2.png", height = 480, width = 480)
barplot(sum_emission, main = "Sum of PM2.5 emission-Baltimore", xlab = "Years", ylab="PM2.5 emitted (tons)")
dev.off()
|
1fe68780a0364313f3a8e5fc1791d13595957262
|
b76b8dfaa1f99ae26efd062cd8f1b443c6cbe900
|
/resources/app/source/FunFunc.R
|
4aa360489e99ce7067b4699d86fdc82117e6671d
|
[
"LicenseRef-scancode-unknown-license-reference",
"CC0-1.0",
"LicenseRef-scancode-public-domain",
"LicenseRef-scancode-fftpack-2004",
"LicenseRef-scancode-unicode",
"LGPL-3.0-only",
"NTP",
"BSD-4-Clause-UC",
"mif-exception",
"LicenseRef-scancode-warranty-disclaimer",
"GPL-2.0-or-later",
"Unicode-TOU",
"APSL-2.0",
"LicenseRef-scancode-x11-doc",
"FTL",
"BSD-Protection",
"LGPL-2.0-or-later",
"blessing",
"HPND-sell-variant",
"HPND",
"BSD-4.3TAHOE",
"LicenseRef-scancode-x11-dec1",
"LicenseRef-scancode-ibm-dhcp",
"CC-BY-4.0",
"GPL-1.0-or-later",
"curl",
"LicenseRef-scancode-google-patent-license-webm",
"LicenseRef-scancode-pcre",
"LGPL-2.1-or-later",
"GPL-3.0-only",
"ISC",
"CPL-1.0",
"NAIST-2003",
"SGI-B-2.0",
"NPL-1.1",
"LicenseRef-scancode-protobuf",
"Bitstream-Vera",
"MIT-open-group",
"LicenseRef-scancode-tekhvc",
"LicenseRef-scancode-bsd-x11",
"SMLNJ",
"BSD-3-Clause",
"GPL-2.0-only",
"MPL-2.0",
"bzip2-1.0.6",
"LicenseRef-scancode-xfree86-1.0",
"MIT-Modern-Variant",
"Minpack",
"Martin-Birgmeier",
"Libpng",
"CC-BY-3.0",
"LGPL-2.1-only",
"MS-PL",
"LicenseRef-scancode-android-sdk-license",
"AFL-2.1",
"BSD-4-Clause",
"ICU",
"LicenseRef-scancode-other-permissive",
"LicenseRef-scancode-x11-xconsortium-veillard",
"X11",
"NCSA",
"LGPL-3.0-or-later",
"IJG",
"LicenseRef-scancode-proprietary-license",
"Zlib",
"OpenSSL",
"Artistic-1.0",
"LicenseRef-scancode-openssl",
"dtoa",
"APSL-1.0",
"Classpath-exception-2.0",
"AFL-2.0",
"LicenseRef-scancode-ssleay-windows",
"LicenseRef-scancode-unrar",
"BSL-1.0",
"OFL-1.1",
"BSD-Source-Code",
"MIT",
"SunPro",
"Apache-2.0",
"LicenseRef-scancode-patent-disclaimer",
"MPL-1.1",
"LGPL-2.0-only",
"LicenseRef-scancode-khronos",
"BSD-2-Clause",
"LicenseRef-scancode-free-unknown",
"LicenseRef-scancode-other-copyleft",
"BSD-1-Clause",
"LicenseRef-scancode-mit-veillard-variant",
"Unlicense",
"LicenseRef-scancode-public-domain-disclaimer"
] |
permissive
|
tim7en/SedSatV2_electron
|
7e2ccd3b8d2927f2d6207123a6daa88e81c0cca9
|
244024b9b70ac0204bb84d86c29016bf20f80463
|
refs/heads/master
| 2022-12-22T16:59:01.704475
| 2020-03-29T00:46:33
| 2020-03-29T00:46:33
| 250,918,541
| 0
| 0
|
MIT
| 2022-12-10T23:23:44
| 2020-03-28T23:59:51
|
C++
|
UTF-8
|
R
| false
| false
| 1,641
|
r
|
FunFunc.R
|
FunFunc <- function() {
inputTrain <- NULL
inputValidate <- NULL
# dat_transform <- function(x) {
# if (all(x < 0)) {
# x <- x * (-1)
# } else if (any(x <= 0)) {
# const <- abs(min(x))
# formulas <- strsplit(negGlob, ',')
# x <- eval(parse(text = formulas[[1]][1]))
# x
# } else {
# x
# }
# }
for (i2 in seq(1, length(uniSource))) {
dat <- x[which(x[, 2] == uniSource[i2]), ]
print ('running convert')
dat <- convert (dat, negGlob,zeroConstant)
print ('finished convert')
train_index <- sample(1:nrow(dat), nrow(dat) * sourceSplitProportion)
training_dat <- dat[train_index, ]
validate_dat <- dat[-train_index, ]
inputTrain <- rbind(inputTrain, training_dat)
inputValidate <- rbind(inputValidate, validate_dat)
}
datas <- getSubsetmean(inputTrain[, -1])
DFA <- DFA[(which(colnames(DFA) %in% colnames(datas)))]
DFA <- DFA[, colSums(DFA != 0) > 0]
target <- target[, which(names(target) %in% colnames(DFA))]
datas <- datas[, which(colnames(datas) %in% colnames(DFA))]
dat <- inputValidate [, -c(1, 2)]
dat <- dat[, which(names(dat) %in% colnames(DFA))]
matchNames <- match(colnames(dat), colnames(target))
dat <- rbind(dat, target[matchNames])
#dat <- apply(dat, 2, dat_transform)
dat <- data.matrix(dat)
target <- dat[nrow(dat), ]
rownames(dat) <- c(as.character(inputValidate[, 1]), as.character(targetD[i, 1]))
d <- UseUnMixing(target, datas, DFA, method = "Nelder-Mead")
d <- round(d, 4)
d <- c(d, targetD[i, 1])
names(d) <- NULL
names(d) <- c(rownames(datas), "GOF", "target")
return(d)
}
|
f1686a95981a1b12e201e9ff3cd0ba99e3ee457d
|
f5a2a55b917190b5d6ef6c5f6cb4bfea47f43f59
|
/data/dataCleaning.R
|
f47c3b7fdd07a2ce8e39c0acb1604e3d5e669ca6
|
[] |
no_license
|
wjlnfgd/cpln692-final
|
a4a40822f41aed8f3e9c599aaa05381a2dd569be
|
aec38f4229c89262c0f617e313bf67f32478c7c2
|
refs/heads/master
| 2020-05-21T21:25:34.508457
| 2019-05-14T18:05:59
| 2019-05-14T18:05:59
| 186,150,599
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 894
|
r
|
dataCleaning.R
|
library(tidyverse)
library(sf)
library(geojsonio)
library(spdplyr)
library(rmapshaper)
rawData<-read.csv("C:/Users/wjlnf/Desktop/test/final/data/Final_CoffeeDatasetforR.csv")
########## Select the Brands ###########
chainStore<-
rawData %>%
group_by(CONAME)%>%
summarize(n = n()) %>%
filter(n>10) %>%
mutate(NAME = CONAME)
######## Clean the Dataset ########
cleanData<-
rawData %>%
filter(CONAME==chainStore$NAME[1])
for (i in 2:5){
cleanData=cleanData%>%rbind(rawData %>%
filter(CONAME == chainStore$NAME[i]))
}
cleanData <-
cleanData %>%
mutate(NAME = CONAME,ID = c(1:751)) %>%
select(-ISCODE)
######### for Geojson ########
shp<-st_as_sf(cleanData,coords = c("longitude","latitude"),crs=4326)
coffeeJson<-geojson_json(shp)
geojson_write(coffeeJson,file = "C:/Users/wjlnf/Desktop/test/final/data/coffeePA.geojson")
|
e06f0b38fc784aca17eeb3b3867a180cd53e2217
|
42dedcc81d5dc9a61a79dbcea9bdd7363cad97be
|
/nki/03_post-process/60_remove_global_signal.R
|
ace9f4da1cdb0016e45f2038790317312338b885
|
[] |
no_license
|
vishalmeeni/cwas-paper
|
31f4bf36919bba6caf287eca2abd7b57f03d2c99
|
7d8fe59e68bc7c242f9b3cfcd1ebe6fe6918225c
|
refs/heads/master
| 2020-04-05T18:32:46.641314
| 2015-09-02T18:45:10
| 2015-09-02T18:45:10
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,615
|
r
|
60_remove_global_signal.R
|
#!/usr/bin/env Rscript
# For each subject, this script will
# 1. read in the time-series data
# 2. extract the global signal
# 3. save the global signal
# 4. regress out the global signal
# 5. save the new GSR corrected functional
suppressPackageStartupMessages(library(connectir))
library(biganalytics)
library(tools)
# Read in arg
args <- commandArgs(trailingOnly = TRUE)
if (length(args) != 1) {
msg <- paste(
"usage: 60_remove_global_signal.R scan",
"scan: short, medium, or long",
sep="\n"
)
stop(msg)
}
scan <- as.character(args[1])
if (!(scan %in% c("short", "medium", "long"))) {
stop("incorrect argument")
} else {
cat(sprintf("Running scan: %s\n", scan))
}
subdir <- "/home2/data/Projects/CWAS/share/nki/subinfo/40_Set1_N104"
pathsfile <- file.path(subdir, sprintf("%s_compcor_funcpaths_4mm_fwhm08.txt", scan))
funcfiles <- as.character(read.table(pathsfile)[,])
# Main function where all the magic happens
remove_global_signal <- function(funcfile, overwrite=F) {
# 0. Background
cat("...background\n")
funcname <- sub(".nii.gz", "", basename(funcfile))
funcdir <- dirname(funcfile)
maskfile <- file.path(dirname(funcdir), "functional_brain_mask_to_standard_4mm.nii.gz")
tsdir <- file.path(funcdir, "ts")
gsrts <- file.path(tsdir, "global.1d")
gsrfile <- file.path(funcdir, sprintf("%s_global.nii.gz", funcname))
if (file.exists(gsrfile)) {
if (overwrite) {
cat("...overwriting existing file\n")
file.remove(gsrfile)
} else {
cat("...skipping since it already exists\n")
}
}
# 1. Read in the time-series data
cat("...read\n")
func <- read.big.nifti4d(funcfile)
mask <- read.mask(maskfile)
func_masked <- do.mask(func, mask)
rm(func)
# 2. Extract the global signal
cat("...extract\n")
gsr_ts <- rowMeans(func_masked[,])
# 3. Save global
cat("...save global ts\n")
write.table(gsr_ts, row.names=F, col.names=F, quote=F, file=gsrts)
# 4. Regress out global
cat("...regress out global\n")
X <- cbind(rep(1,length(gsr_ts)), gsr_ts)
X <- as.big.matrix(X)
resids <- qlm_residuals(func_masked, X)
# 5. Save new global corrected file
cat("...save new file\n")
resids <- as.big.nifti4d(resids, func_masked@header, func_masked@mask)
resids@header$fname <- gsrfile; resids@header$iname <- gsrfile
write.nifti(resids, outfile=gsrfile)
}
for (funcfile in funcfiles) {
cat(sprintf("Running: %s\n", funcfile))
remove_global_signal(funcfile)
}
|
b83998253185e004eebf4800fa803d89188d5964
|
29585dff702209dd446c0ab52ceea046c58e384e
|
/Rmpfr/R/Math.R
|
0abb0a6f12c542cec956c4a66ddbafbcc8dcbaa0
|
[] |
no_license
|
ingted/R-Examples
|
825440ce468ce608c4d73e2af4c0a0213b81c0fe
|
d0917dbaf698cb8bc0789db0c3ab07453016eab9
|
refs/heads/master
| 2020-04-14T12:29:22.336088
| 2016-07-21T14:01:14
| 2016-07-21T14:01:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 11,175
|
r
|
Math.R
|
#### Define mpfr methods for Math and Math2 group functions
#### ====== =====
### "Arith", "Compare",..., are in ./Arith.R
### ---- ~~~~~~~
## [1] "abs" "sign" "sqrt" "ceiling" "floor" "trunc" "cummax"
## [8] "cummin" "cumprod" "cumsum" "exp" "expm1" "log" "log10"
##[15] "log2" "log1p" "cos" "cosh" "sin" "sinh" "tan"
##[22] "tanh" "acos" "acosh" "asin" "asinh" "atan" "atanh"
##[29] "gamma" "lgamma" "digamma" "trigamma"
if(FALSE) ## here are the individual function
dput(getGroupMembers("Math"))
## Uniform interface to C:
##
## Pass integer code to call and do the rest in C
## Codes from ~/R/D/r-devel/R/src/main/names.c :
.Math.codes <-
c(
"floor" = 1,
"ceiling" = 2,
"sqrt" = 3,
"sign" = 4,
"exp" = 10,
"expm1" = 11,
"log1p" = 12,
"cos" = 20,
"sin" = 21,
"tan" = 22,
"acos" = 23,
"asin" = 24,
"cosh" = 30,
"sinh" = 31,
"tanh" = 32,
"acosh" = 33,
"asinh" = 34,
"atanh" = 35,
"lgamma" = 40,
"gamma" = 41,
"digamma" = 42,
"trigamma" = 43,
## R >= 3.1.0 :
"cospi" = 47,
"sinpi" = 48,
"tanpi" = 49
)
.Math.gen <- getGroupMembers("Math")
## Those "Math" group generics that are not in the do_math1 table above
.Math.codes <-
c(.Math.codes,
"trunc" = 0, "atan" = 25, # "abs" has own method!
"log" = 13, "log2" = 14, "log10" = 15,
"cummax" = 71, "cummin" = 72, "cumprod" = 73, "cumsum" = 74,
## These are *NOT* in R's Math group, but 1-argument math functions
## available in the mpfr - library:
"erf" = 101, "erfc" = 102, "zeta" = 104, "Eint" = 106, "Li2" = 107,
"j0" = 111, "j1" = 112, "y0" = 113, "y1" = 114,
"Ai" = 120) # Airy function (new in mpfr 3.0.0)
storage.mode(.Math.codes) <- "integer"
if(FALSE)
.Math.gen[!(.Math.gen %in% names(.Math.codes))]
## "abs" -- only one left
## A few ones have a very simple method:
## Note that the 'sign' slot is from the C-internal struct
## and is always +/- 1 , but R's sign(0) |--> 0
.getSign <- function(x) vapply(getD(x), slot, 1L, "sign")
.mpfr.sign <- function(x) {
r <- numeric(length(x))# all 0
not0 <- !mpfrIs0(x)
r[not0] <- .getSign(x[not0])
r
}
setMethod("sign", "mpfr", .mpfr.sign)
## R version, no longer used:
.abs.mpfr <- function(x) {
## FIXME: faster if this happened in a .Call
xD <- getDataPart(x) # << currently [2011] *faster* than x@Data
for(i in seq_along(x))
slot(xD[[i]], "sign", check=FALSE) <- 1L
setDataPart(x, xD, check=FALSE) ## faster than x@.Data <- xD
}
setMethod("abs", "mpfr",
function(x) .Call(Rmpfr_abs, x))
## Simple methods for "complex" numbers, just so "they work"
setMethod("Re", "mpfr", function(z) z)
setMethod("Im", "mpfr", function(z) 0*z)
setMethod("Conj","mpfr", function(z) z)
setMethod("Mod", "mpfr", function(z) abs(z))
setMethod("Arg", "mpfr", function(z) {
prec <- .getPrec(z)
r <- mpfr(0, prec)
neg <- !mpfrIs0(z) & .getSign(z) == -1
r[neg] <- Const("pi", prec = prec[neg])
r
})
## Note that factorial() and lfactorial() automagically work through [l]gamma()
## but for the sake of "exact for integer"
setMethod("factorial", "mpfr",
function(x) {
r <- gamma(x + 1)
isi <- .mpfr.is.whole(x)
r[isi] <- round(r[isi])
r
})
## The "real" thing is to use the MPFR-internal function:
factorialMpfr <- function(n, precBits = max(2, ceiling(lgamma(n+1)/log(2))),
rnd.mode = c('N','D','U','Z','A'))
{
stopifnot(n >= 0)
new("mpfr", .Call(R_mpfr_fac, n, precBits, match.arg(rnd.mode)))
}
##' Pochhammer rising factorial = Pochhammer(a,n) {1 of 2 definitions!}
##' we use the *rising* factorial for Pochhamer(a,n), i.e.,
##' the definition that the GSL and Mathematica use as well.
##' We want to do this well for *integer* n, only the general case is using
##' P(a,x) := Gamma(a+x)/Gamma(x)
pochMpfr <- function(a, n, rnd.mode = c('N','D','U','Z','A')) {
stopifnot(n >= 0)
if(!is(a, "mpfr")) ## use a high enough default precision (and recycle ..)
a <- mpfr(a, precBits = pmax(1,n)*getPrec(a))
else if((ln <- length(n)) != 1 && ln != length(a))
a <- a + 0*n
## a@.Data[] <- .Call(R_mpfr_poch, a, n)
## a
setDataPart(a, .Call(R_mpfr_poch, a, n, match.arg(rnd.mode)))
}
##' Binomial Coefficient choose(a,n)
##' We want to do this well for *integer* n
chooseMpfr <- function(a, n, rnd.mode = c('N','D','U','Z','A')) {
stopifnot(n >= 0)
if(!is(a, "mpfr")) { ## use high enough default precision
lc <- lchoose(a,n)
precB <- if(any(iF <- is.finite(lc))) ceiling(max(lc[iF])/log(2)) else 0
## add n bits for the n multiplications (and recycle {a,n} to same length)
a <- mpfr(a, precBits = n + max(2, precB))
} else if((ln <- length(n)) != 1 && ln != length(a))
a <- a + 0*n
## a@.Data[] <- .Call(R_mpfr_choose, a, n)
## a
setDataPart(a, .Call(R_mpfr_choose, a, n, match.arg(rnd.mode)))
}
chooseMpfr.all <- function(n, precBits=NULL, k0=1, alternating=FALSE) {
## return chooseMpfr(n, k0:n) or (-1)^k * choose... "but smartly"
if(!is.numeric(n) || (n <- as.integer(n)) < 1)
stop("n must be integer >= 1")
stopifnot(is.numeric(n. <- k0), n. == (k0 <- as.integer(k0)),
k0 <= n)
sig <- if(alternating) (-1)^(k0:n) else rep.int(1, (n-k0+1))
if(n == 1) return(mpfr(sig, 32))
## else : n >= 2
n2 <- n %/% 2 # >= 1
prec <- ceiling(lchoose(n,n2)/log(2)) # number of bits needed in result
precBxtr <- max(2, n2 + prec) # need more for cumprod(), and division
n2. <- mpfr(n2, precBxtr)
r <- cumprod(seqMpfr(mpfr(n, precBxtr), n+1-n2., length.out=n2)) /
cumprod(seqMpfr(1, n2., length.out=n2))
prec <- max(2,prec)
if(is.numeric(precBits) && (pB <- as.integer(round(precBits))) > prec)
prec <- pB
r <- roundMpfr(r, precBits = prec)
##
ii <- c(seq_len(n2-1+(n%%2)), n2:1)
if(k0 >= 2) ii <- ii[-seq_len(k0 - 1)]
one <- .d2mpfr1(1, precBits=prec)
r <- c(if(k0 == 0) one, getD(r)[ii], one)
if(alternating) {
for(i in seq_along(r)) if(sig[i] == -1)
slot(r[[i]], "sign", check=FALSE) <- - 1L
}
new("mpfr", r)
}## {chooseMpfr.all}
## http://en.wikipedia.org/wiki/N%C3%B6rlund%E2%80%93Rice_integral
## also deals with these alternating binomial sums
##'
##' version 1: already using the 'alternating' arg in chooseMpfr.all()
sumBinomMpfr.v1 <- function(n, f, n0=0, alternating=TRUE, precBits = 256)
{
## Note: n0 = 0, or 1 is typical, and hence chooseMpfr.all() makes sense
stopifnot(0 <= n0, n0 <= n, is.function(f))
sum(chooseMpfr.all(n, k0=n0, alternating=alternating) *
f(mpfr(n0:n, precBits=precBits)))
}
##' version 2: chooseZ()*(-1)^(.) is considerably faster than chooseMpfr.all()
sumBinomMpfr.v2 <- function(n, f, n0=0, alternating=TRUE, precBits = 256,
f.k = f(mpfr(k, precBits=precBits)))
{
## Note: n0 = 0, or 1 is typical..
stopifnot(0 <= n0, n0 <= n,
is.function(f) || (is(f.k, "mpfr") && length(f.k) == n-n0+1))
k <- n0:n
sum(if(alternating) chooseZ(n, k) * (-1)^(n-k) * f.k
else chooseZ(n, k) * f.k)
}
## NB: pbetaI() in ./special-fun.R uses a special version..
## --- if we do this *fast* in C -- do pbetaI() as well.
sumBinomMpfr <- sumBinomMpfr.v2
##' Rounding to binary bits, not decimal digits. Closer to the number
##' representation, this also allows to increase or decrease a number's precBits
##' @title Rounding to binary bits, "mpfr-internally"
##' @param x an mpfr number (vector)
##' @param precBits integer specifying the desired precision in bits.
##' @return an mpfr number as \code{x} but with the new 'precBits' precision
##' @author Martin Maechler
roundMpfr <- function(x, precBits, rnd.mode = c('N','D','U','Z','A')) {
stopifnot(is(x, "mpfr"))
setDataPart(x, .Call(R_mpfr_round, x, precBits, match.arg(rnd.mode)))
}
## "log" is still special with its 'base' :
setMethod("log", signature(x = "mpfr"),
function(x, base) {
if(!missing(base) && base != exp(1))
stop("base != exp(1) is not yet implemented")
setDataPart(x, .Call(Math_mpfr, x, .Math.codes[["log"]]))
})
setMethod("Math", signature(x = "mpfr"), function(x)
setDataPart(x, .Call(Math_mpfr, x, .Math.codes[[.Generic]])))
setMethod("Math2", signature(x = "mpfr"),
function(x, digits) {
## NOTA BENE: vectorized in 'x'
if(any(ret.x <- !is.finite(x) | mpfrIs0(x))) {
if(any(ok <- !ret.x))
x[ok] <- callGeneric(x[ok], digits=digits)
return(x)
}
if(!missing(digits)) {
digits <- as.integer(round(digits))
if(is.na(digits)) return(x + digits)
} ## else: default *depends* on the generic
## now: both x and digits are finite
pow10 <- function(d) mpfr(rep.int(10., length(d)),
precBits = ceiling(log2(10)*as.numeric(d)))^ d
rint <- function(x) { ## have x >= 0 here
sml.x <- (x < .Machine$integer.max)
r <- x
if(any(sml.x)) {
x.5 <- x[sml.x] + 0.5
ix <- as.integer(x.5)
## implement "round to even" :
if(any(doDec <- (abs(x.5 - ix) < 10*.Machine$double.eps & (ix %% 2))))
ix[doDec] <- ix[doDec] - 1L
r[sml.x] <- ix
}
if(!all(sml.x)) { ## large x - no longer care for round to even
r[!sml.x] <- floor(x[!sml.x] + 0.5)
}
r
}
neg.x <- x < 0
x[neg.x] <- - x[neg.x]
sgn <- ifelse(neg.x, -1, +1)
switch(.Generic,
"round" = { ## following ~/R/D/r-devel/R/src/nmath/fround.c :
if(missing(digits) || digits == 0)
sgn * rint(x)
else if(digits > 0) {
p10 <- pow10(digits)
intx <- floor(x)
sgn * (intx + rint((x-intx) * p10) / p10)
}
else { ## digits < 0
p10 <- pow10(-digits)
sgn * rint(x/p10) * p10
}
},
"signif" = { ## following ~/R/D/r-devel/R/src/nmath/fprec.c :
if(missing(digits)) digits <- 6L
if(digits > max(.getPrec(x)) * log10(2))
return(x)
if(digits < 1) digits <- 1L
l10 <- log10(x)
e10 <- digits - 1L - floor(l10)
r <- x
pos.e <- (e10 > 0) ##* 10 ^ e, with e >= 1 : exactly representable
if(any(pos.e)) {
p10 <- pow10(e10[pos.e])
r[pos.e] <- sgn[pos.e]* rint(x[pos.e]*p10) / p10
}
if(any(neg.e <- !pos.e)) {
p10 <- pow10(-e10[neg.e])
r[neg.e] <- sgn[neg.e]* rint(x[neg.e]/p10) * p10
}
r
},
stop(gettextf("Non-Math2 group generic '%s' -- should not happen",
.Generic)))
})
##---- mpfrArray / mpfrMatrix --- methods -----------------
## not many needed: "mpfrArray" contain "mpfr",
## i.e., if the above methods are written "general enough", they apply directly
setMethod("sign", "mpfrArray",
function(x) structure(.mpfr.sign(x),
dim = dim(x),
dimnames = dimnames(x)))
|
71c4bc7a374c636f2348c32fc56e738bc8b84630
|
4368637a1c42bcf59789e314d373ebd63bc15347
|
/Fused Adaptive Lasso Project/Rcode/function/QR.spat.R
|
3e748da0f74dcfc1bd8589627532a372392d2b2c
|
[] |
no_license
|
vickywang1628/Projects
|
726812321cb3d8a94af750239889821189403830
|
36c29b27d47c4d9d05e9492878f86ea0fd7df524
|
refs/heads/master
| 2021-01-20T13:50:27.408084
| 2017-02-21T19:05:01
| 2017-02-21T19:05:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 484
|
r
|
QR.spat.R
|
QR.temp = function(y, tau, beta, lambda, maxit=200, toler=1e-3)
{
n=length(y)
phi=
L=matrix(rep(1,n*n),n,n)
L[lower.tri(L)] <- 0
if(missing(beta)){
#beta=lm(y~L-1)$coef
beta=rep(1,n)
}
W = rep(1, n)
beta1 = abs(QRMM(L,y,tau,beta,W,lambda,toler,maxit))^{-1}
beta1[beta1>sqrt(n)] <- sqrt(n)
W = beta1;
betah = QRMM(L,y,tau, beta,W,lambda,toler,maxit)
#beta = matrix(betah[-1])
#b=betah[1]
return(beta=betah);
}
|
2c2de0c1ab5e53d14417650a1444c430ed5958a5
|
72d9009d19e92b721d5cc0e8f8045e1145921130
|
/eBsc/R/get_lambda.R
|
a33b239e6e62e6af627869f7a401c1b749e7e5c5
|
[] |
no_license
|
akhikolla/TestedPackages-NoIssues
|
be46c49c0836b3f0cf60e247087089868adf7a62
|
eb8d498cc132def615c090941bc172e17fdce267
|
refs/heads/master
| 2023-03-01T09:10:17.227119
| 2021-01-25T19:44:44
| 2021-01-25T19:44:44
| 332,027,727
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 407
|
r
|
get_lambda.R
|
get_lambda <- function(zero_range, coefs, eigens, rhos, neBsc, q){
eps <- .Machine$double.eps
aux <- try(uniroot(Elambda, zero_range, coefs = coefs, eigens = eigens, rhos = rhos, neBsc = neBsc, q = q, tol = eps, maxiter = 10^5, extendInt = "yes")$root)
if(class(aux)=="try-error"){
out <- 5; message("Warning: non-optimal smoothing parameter.")
}else{
out <- aux}
out
}
|
5914249a44c2828d2ae3f7d64ce521ce7119d245
|
562320d778b7c33103b2dbd2b30a687a413d2f64
|
/man/xanyy.Rd
|
ad926032d27e5c1740038f1e9bbb6bc9c7a50a80
|
[] |
no_license
|
joshuatimm/rtweet
|
6b39f5e0d2c7a6ebc283369bf3b437a4535f7305
|
e2a8bac542e6570ce2d45ecfcdf4e0cc1caba73a
|
refs/heads/master
| 2021-06-08T18:30:51.205508
| 2016-12-07T16:33:08
| 2016-12-07T16:33:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 607
|
rd
|
xanyy.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils.R
\name{xanyy}
\alias{\%any\%}
\alias{xanyy}
\title{xanyy}
\usage{
x \%any\% y
}
\arguments{
\item{x}{Character, name of variable of interest.}
\item{y}{Named object on which to search for \code{x}.}
}
\description{
Returns logical value indicating whether named
object includes var name. Functions do the following:
}
\details{
\itemize{
\item \code{\%xy\%} returns logical for each value of x
\item \code{\%any\%} returns TRUE if y contains any of x
\item \code{\%all\%} returns TRUE if y contains all of x
}
}
|
2aac948e9ef7a9ceadf036146c0c26db4dda3c86
|
9ec66856984869b5a28edb91d1ff6763de647a16
|
/R/katehelpr-package.r
|
db9f8106abe6fe480cdb7665fac4e7c2238464ea
|
[] |
no_license
|
ksedivyhaley/katehelpr
|
08f0d36fb39c20901457860fa915514f36cbaa0c
|
b8f147fd3f9bb146aa97e1fa158090f3c8b1361a
|
refs/heads/master
| 2020-06-11T12:13:55.014008
| 2016-12-05T22:05:21
| 2016-12-05T22:05:21
| 75,670,460
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,709
|
r
|
katehelpr-package.r
|
#' katehelpr: Tools for Analysis of Two-Variable Data
#'
#' This package can be used to analyse any data in which observations are
#' described by a primary and secondary categorical variable, and a
#' numeric measurement or "response". Within groups defined by the secondary
#' variable, it compares observations with different primary variables to a
#' reference group. For example, the package could be used to compare mouse weights
#' (response) when mice are fed different diets (secondary variable), comparing
#' mutant mice to wild type (primary variable with reference level).
#'
#' Included functions:
#'
#' - tidy_pol(), which tidies data from a specific human-friendly format
#'
#' - analyse_pol(), which calculates mean & standard deviation of groups and
#' performs t-tests.
#'
#' - get_pstar(), which converts numeric p-values to star symbols for graphing
#'
#' - plot_pol(), which graphs data in a format produced by analyse_pol()
#'
#' tidy_pol(), analyse_pol(), and plot_pol() can be used in a pipe to go from
#' untidy data to a finished plot.
#'
#' The tools were originally developed for studies on macrophage polarization,
#' in which a polarizing prestimulation is followed by a secondary stimulation,
#' and the effect of polarization on response to the secondary stimulation is of
#' interest.
#'
#'
#' @name katehelpr
#' @docType package
NULL
## quiets concerns of R CMD check re: the .'s that appear in pipelines
if(getRversion() >= "2.15.1") {
utils::globalVariables(c("data", "data.x", "data.y", "p.star",
"p.value", "primary", "ref_data", "sd",
"secondary", "stim", "t.test", "test",
"tidy"))
}
|
0d9a2cdaf350973b322d6fb34d7c566e747e8edc
|
71d55d78f21dd85258d134acfa87af754def3aa8
|
/man/ID-translation.Rd
|
d0c6bd435fce6cd9ccefe99ee8022e3da4a12555
|
[] |
no_license
|
ewail/TCGAutils
|
f1ffd38d25a37ccc0460db04670da7504a16c1eb
|
8d0aab2de733e24d172289ab7df82e12429a7b28
|
refs/heads/master
| 2020-03-21T05:51:04.838224
| 2018-06-20T15:28:20
| 2018-06-20T15:28:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 4,098
|
rd
|
ID-translation.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ID-translation.R
\name{ID-translation}
\alias{ID-translation}
\alias{UUIDtoBarcode}
\alias{UUIDtoUUID}
\alias{barcodeToUUID}
\alias{filenameToBarcode}
\title{Translate study identifiers from barcode to UUID and vice versa}
\usage{
UUIDtoBarcode(id_vector, id_type = c("case_id", "file_id"),
end_point = "participant", legacy = FALSE)
UUIDtoUUID(id_vector, to_type = c("case_id", "file_id"), legacy = FALSE)
barcodeToUUID(barcodes, id_type = c("case_id", "file_id"), legacy = FALSE)
filenameToBarcode(filenames, legacy = FALSE)
}
\arguments{
\item{id_vector}{A \code{character} vector of UUIDs corresponding to
either files or cases (default assumes case_ids)}
\item{id_type}{Either \code{case_id} or \code{file_id} indicating the type of
\code{id_vector} entered (default "case_id")}
\item{end_point}{The cutoff point of the barcode that should be returned,
only applies to \code{file_id} type queries. See details for options.}
\item{legacy}{(logical default FALSE) whether to search the legacy archives}
\item{to_type}{The desired UUID type to obtain, can either be "case_id" or
"file_id"}
\item{barcodes}{A \code{character} vector of TCGA barcodes}
\item{filenames}{A \code{character} vector of filenames obtained from
the GenomicDataCommons}
}
\value{
A \code{data.frame} of TCGA barcode identifiers and UUIDs
}
\description{
These functions allow the user to enter a character vector of
identifiers and use the GDC API to translate from TCGA barcodes to
Universally Unique Identifiers (UUID) and vice versa. These relationships
are not one-to-one. Therefore, a \code{data.frame} is returned for all
inputs. The UUID to TCGA barcode translation only applies to file and case
UUIDs. Two-way UUID translation is available from 'file_id' to 'case_id'
and vice versa. Please double check any results before using these
features for analysis. Case / submitter identifiers are translated by
default, see the \code{id_type} argument for details. All identifiers are
converted to lower case.
}
\details{
The \code{end_point} options reflect endpoints in the Genomic Data Commons
API. These are summarized as follows:
\itemize{
\item{participant}: This default snippet of information includes project,
tissue source site (TSS), and participant number
(barcode format: TCGA-XX-XXXX)
\item{sample}: This adds the sample information to the participant barcode
(TCGA-XX-XXXX-11X)
\item{portion, analyte}: Either of these options adds the portion and
analyte information to the sample barcode (TCGA-XX-XXXX-11X-01X)
\item{plate, center}: Additional plate and center information is returned,
i.e., the full barcode (TCGA-XX-XXXX-11X-01X-XXXX-XX)
}
Only these keywords need to be used to target the specific barcode endpoint.
These endpoints only apply to "file_id" type translations to TCGA barcodes
(see \code{id_type} argument).
}
\examples{
## Translate UUIDs >> TCGA Barcode
uuids <- c("0001801b-54b0-4551-8d7a-d66fb59429bf",
"002c67f2-ff52-4246-9d65-a3f69df6789e",
"003143c8-bbbf-46b9-a96f-f58530f4bb82")
UUIDtoBarcode(uuids, id_type = "file_id", end_point = "sample")
UUIDtoBarcode("ae55b2d3-62a1-419e-9f9a-5ddfac356db4", id_type = "case_id")
## Translate file UUIDs >> case UUIDs
uuids <- c("0001801b-54b0-4551-8d7a-d66fb59429bf",
"002c67f2-ff52-4246-9d65-a3f69df6789e",
"003143c8-bbbf-46b9-a96f-f58530f4bb82")
UUIDtoUUID(uuids)
## Translate TCGA Barcode >> UUIDs
fullBarcodes <- c("TCGA-B0-5117-11A-01D-1421-08",
"TCGA-B0-5094-11A-01D-1421-08",
"TCGA-E9-A295-10A-01D-A16D-09")
sample_ids <- TCGAbarcode(fullBarcodes, sample = TRUE)
barcodeToUUID(sample_ids)
participant_ids <- c("TCGA-CK-4948", "TCGA-D1-A17N",
"TCGA-4V-A9QX", "TCGA-4V-A9QM")
barcodeToUUID(participant_ids)
library(GenomicDataCommons)
fquery <- files() \%>\%
filter(~ cases.project.project_id == "TCGA-COAD" &
data_category == "Copy Number Variation" &
data_type == "Copy Number Segment")
fnames <- results(fquery)$file_name[1:6]
filenameToBarcode(fnames)
}
\author{
Sean Davis, M. Ramos
}
|
b1a50e71e76f12219ecab69d64c0481ba9c76011
|
6855ac1106597ae48483e129fda6510354efa2bd
|
/tests/testthat/test-supplementary_add.R
|
3e02cc441f3601894f5715adb0aec0df4cccb8b7
|
[
"MIT"
] |
permissive
|
rOpenGov/iotables
|
ad73aae57b410396995635d1c432744c06db32db
|
91cfdbc1d29ac6fe606d3a0deecdb4c90e7016b9
|
refs/heads/master
| 2022-10-02T13:03:54.563374
| 2022-09-24T11:47:20
| 2022-09-24T11:47:20
| 108,267,715
| 19
| 8
|
NOASSERTION
| 2021-12-17T15:09:35
| 2017-10-25T12:35:47
|
R
|
UTF-8
|
R
| false
| false
| 1,673
|
r
|
test-supplementary_add.R
|
context ("Adding a supplementary row")
de_io <- iotable_get()
CO2_coefficients <- data.frame(agriculture_group = 0.2379,
industry_group = 0.5172,
construction = 0.0456,
trade_group = 0.1320,
business_services_group = 0.0127,
other_services_group = 0.0530)
CH4_coefficients <- data.frame(agriculture_group = 0.0349,
industry_group = 0.0011,
construction = 0,
trade_group = 0,
business_services_group = 0,
other_services_group = 0.0021)
CO2 <- cbind (
data.frame ( iotables_row = "CO2_coefficients"),
CO2_coefficients
)
CH4 <- cbind(
data.frame ( iotables_row = "CH4_coefficients"),
CH4_coefficients
)
de_coeff <- input_coefficient_matrix_create ( iotable_get() )
emissions <- rbind ( CO2, CH4 )
supplementary_data <- emissions
extended <- supplementary_add ( data_table = de_io,
supplementary_data = emissions)
# Check against The Eurostat Manual page 494
test_that("correct data is returned", {
expect_equal(extended$construction [ which ( extended[,1] == "CO2_coefficients") ],
0.0456, tolerance=1e-6)
expect_equal(extended$other_services_group[ which ( extended[,1] == "CO2_coefficients" ) ],
0.0530, tolerance=1e-6)
expect_equal(extended$other_services_group[ which ( extended[,1] == "CH4_coefficients" ) ],
0.0021, tolerance=1e-6)
})
|
d20a013f9ab2be311edf45d23040432eb7303733
|
d19d8da8b0d88f94c2db20002adcaabb5c6ba620
|
/MattsNotes/Sampling_05-04.R
|
8bd4079163fb5e4d3cd419428a99e81a7a55eac5
|
[
"MIT"
] |
permissive
|
wzhang43/ST-599-Project-2-Group-1
|
75baf5b779e8fd1a0dd4592c6cbe34f23c31e493
|
ecac2b2ae4c7a9d446230b1ed90099bc2d85bcd8
|
refs/heads/master
| 2016-08-04T18:12:23.754227
| 2014-05-12T04:11:08
| 2014-05-12T04:11:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,191
|
r
|
Sampling_05-04.R
|
## 5.4 - matt sampling.
## i have concerns about what was done.
# explore Nandhita's sampling results file.
n.samp <- read.csv("data/samp13_14.csv", header=T)
n.samp
#e-mail & file says "2013 & 2014" but results say "2003 & 2004"
sum(n.samp$n_flights)
# what she did, was sample 10,000 flights over the entire year, which ended with some months/regions getting 1 flight. not acceptable
# ---------------------------------------------------------------------------------------#
# To query by region, which is our sampling unit, we should be able to do something like:
# WHERE (origin IN <list> )
library(dplyr)
iata.region = read.csv("data/iata_by_region.csv", header=T, stringsAsFactors=F)
# fiddling with pulling regional lists of airports
west.list <- iata.region%.%filter(Region=="West")%.%select(origin)
by.reg <- iata.region%.%group_by(Region)%.%summarise(n=n())
sum(by.reg$n) # that is the 360 airports we expect.
# summarise(iata.region)
## I want a list of lists for regions
region.df <- tbl_df(read.csv("data/iata_by_region.csv", header=T, stringsAsFactors=F))
region.df
nw.list <- region.df[which(region.df$Region=="Northwest"),1]
r.list = list()
r.list = c(r.list, nw.list)
w.list <- region.df[which(region.df$Region=="West"),1]
r.list = c(r.list, w.list) ## This does not work
r.list
str(nw.list)
str(w.list)
r.list <- list(nw.list, w.list) # this does work. now, do i have to do each region by hand? or can I loop it?
# it will be quicker if i just do each region by hand.
ak.list <- region.df[which(region.df$Region=="Alaska"),1]
c.list <- region.df[which(region.df$Region=="Central"),1]
hi.list <- region.df[which(region.df$Region=="Hawaii"),1]
nr.list <- region.df[which(region.df$Region=="NorthRockies"),1]
ne.list <- region.df[which(region.df$Region=="Northeast"),1]
nw.list <- region.df[which(region.df$Region=="Northwest"),1]
so.list <- region.df[which(region.df$Region=="South"),1]
se.list <- region.df[which(region.df$Region=="Southeast"),1]
sw.list <- region.df[which(region.df$Region=="Southwest"),1]
uk.list <- region.df[which(region.df$Region=="Unknown"),1]
um.list <- region.df[which(region.df$Region=="UpperMidwest"),1]
we.list <- region.df[which(region.df$Region=="West"),1]
r.list <- list(ak.list, c.list, hi.list, nr.list, ne.list, nw.list, so.list, se.list, sw.list, uk.list, um.list, we.list)
o.list <- r.list[[1]]
qry = flights %.% group_by(year, month, origin) %.%
summarise( ttl_del=sum(weatherdelay), avg_del=mean(weatherdelay), sd_del=sd(weatherdelay),
n_flights=n(),
n_wdelay=sum(if(weatherdelay>0) {1} else {0})
) %.%
filter(year==yr, month==10, origin %in% o.list )
explain(qry)
### this is generating a valid query. lets see what happens
system.time(reg.dat <- collect(qry))
sum(reg.dat$n_flights) ## 16994
sum(reg.dat$n_wdelay) # 47
## it works!
# --------------------------------------------------------- ##
# Updated to sample within a region for a specific month and year, since that is the sampling unit we discussed.
# Nandhita's code also is sampling from the Data.csv? which is a 19mb file which looks like it has every flight in it from 2003/2004. That's not going to work for what we need. We need to sample in the database, rather than pulling all the data, sampling from that then going back to the DB. That's horribly inefficent, and kind of the opposite of what we're trying to learn, IMO
#
samp.data = data.frame()
years = as.character(seq(2003, 2013,1))
# Make a list of lists of airport by region
region.df <- tbl_df(read.csv("data/iata_by_region.csv", header=T, stringsAsFactors=F))
# individual regions
ak.list <- region.df[which(region.df$Region=="Alaska"),1]
c.list <- region.df[which(region.df$Region=="Central"),1]
hi.list <- region.df[which(region.df$Region=="Hawaii"),1]
nr.list <- region.df[which(region.df$Region=="NorthRockies"),1]
ne.list <- region.df[which(region.df$Region=="Northeast"),1]
nw.list <- region.df[which(region.df$Region=="Northwest"),1]
so.list <- region.df[which(region.df$Region=="South"),1]
se.list <- region.df[which(region.df$Region=="Southeast"),1]
sw.list <- region.df[which(region.df$Region=="Southwest"),1]
uk.list <- region.df[which(region.df$Region=="Unknown"),1]
um.list <- region.df[which(region.df$Region=="UpperMidwest"),1]
we.list <- region.df[which(region.df$Region=="West"),1]
# combine into a list of lists
r.list <- list(ak.list, c.list, hi.list, nr.list, ne.list, nw.list, so.list, se.list, sw.list, uk.list, um.list, we.list)
# how to reference
# o.list <- r.list[[1]]
# j = 12 months
# k = 12 regions
# test:
i<-j<-k<-2
# ignoring the for loop, explain(qry) query was good for these years
# test 1 - got "error: index out of bounds". removed "random()<0.001" from filter and tried again
# that was successful.
for(i in 1:length(years)){
yr = years[i]
mo = j
o.list = r.list[[k]]
qry = flights %.% group_by(year, month, origin) %.%
summarise( ttl_del=sum(weatherdelay), avg_del=mean(weatherdelay), sd_del=sd(weatherdelay),
n_flights=n(),
n_wdelay=sum(if(weatherdelay>0) { 1} else {0})
) %.%
filter(year==yr, month==mo, origin %in% o.list )
qry.rand = arrange(qry, random())
system.time(wd.new <- head(qry.rand, n=1000L))
weather.data = rbind(weather.data, wd.new)
}
system.time(cen.Feb2002 <- collect(qry))
## this is essentially summarizing, and then attempting to take a random sample of the summaries, only there aren't enough of them.
# i think we're going to have to sample individual flights, then summarize on our side.
yr = years[i]
mo = j
o.list = r.list[[k]]
qry = flights %.% select(year, month, origin, weatherdelay) %.%
filter(year==yr, month==mo, origin %in% o.list, random() < 0.01)
system.time(wd.new <- collect(qry)) # 12 sec
# that only gave me 106 observations out of 96058
sum(cen.Feb2002$n_flights)
wd.new %.% group_by(origin) %.% summarise(n=n())
# 1000/96058 = 0.01
qry = flights %.% select(year, month, origin, weatherdelay) %.%
filter(year==yr, month==mo, origin %in% o.list, random() < 0.01)
system.time(wd.new <- collect(qry)) # 22 sec, this time 974 observations.
qry = flights %.% select(year, month, origin, weatherdelay) %.%
filter(year==yr, month==mo, origin %in% o.list)
qry.rnd <- arrange(qry, random())
system.time(cen.s3 <- head(qry.rnd, n=1000L)) #28.07
# I think we're going to have to go with the 2nd one, because otherwise, we can't control how many observations we get.
## test a for loop
rand.data <- data.frame()
n.samp <- 1000L
#for(i in 1:length(years)){ # years
for(i in 1:1){
for(j in 1:12){ # months
for(k in 1:12) { # Regions
yr = years[i]
mo = j
o.list = r.list[[k]]
qry = flights %.% select(year, month, origin, weatherdelay) %.%
filter(year==yr, month==mo, origin %in% o.list)
qry.rnd <- arrange(qry, random())
dat.temp <- head(qry.rnd, n=n.samp) # can adjust the size if we want to stratify proportionally
dat.summ <- dat.temp %.% group_by(origin, year, month) %.%
summarise(
ttl_del=sum(weatherdelay, na.rm=T),
avg_del=mean(weatherdelay, na.rm=T),
sd_del=sd(weatherdelay, na.rm=T),
n_flights=n(),
n_wdelay=sum(weatherdelay > 0) # we might want to leave the summarization till later, and only retain the original observations
)
rand.data <- rbind(rand.data, dat.summ)
}
}
}
## this worked, I didn't time it, but expect it took over a minute.
ita.reg <- read.csv("data/iata_by_region.csv", header=T, stringsAsFactors=F)
rand.data <- left_join(rand.data, ita.reg, by="origin")
rand.data %.% ungroup() %.% group_by(Region, year, month) %.%
summarise(ttl_del=sum(ttl_del), avg_del=mean(avg_del, na.rm=T), n_flights=sum(n_flights),
n_wdelay=sum(n_wdelay, na.rm=T))
## this gives me a 144 row table, which is 12 months x 12 REgions.
# ---------------------------------- #
# modify teh loop to pull 2.5% of observations per strata, and to not summarize.
rand.data <- data.frame()
n.samp <- 1000L
#for(i in 1:length(years)){ # years
for(i in 9:11){
for(j in 1:12){ # months
for(k in 1:12) { # Regions
yr = years[i]
mo = j
o.list = r.list[[k]]
qry = flights %.% select(year, month, origin, weatherdelay) %.%
filter(year==yr, month==mo, origin %in% o.list, random()<0.0025)
dat.temp <- collect(qry)
rand.data <- rbind(rand.data, dat.temp)
}
}
}
# running for 1 year 2003, got 160,000 lines? that's a weeee bit more than the 14,000 I was expectin
head(rand.data)
rand.2003 <- left_join(rand.data, iata.region, on="origin")
summ.2003 <- rand.2003 %.% group_by(Region) %.% summarise(n=n())
#looks like we pulled around 30% on average, instead of 2%
# replacing "random() < 0.025" with "random < 0.0025"
## more like expected.
# run for yrs 2-4 - have 70276 obs.
# run for yrs 5-8 - hae 139,033
# run for yrs 9-11 - now 185,527 obs total.
write.csv(rand.data, "data/sample_data.csv", row.names=F)
|
7fc49e21c83515093a9b8fca49c86f6af93b2b8a
|
c15f74ad13ec0656df788d0ab9de922ca35206b0
|
/R/gridgraphics.R
|
a200aa248840bc5e9249c1265eb661b5e02ff1b9
|
[] |
no_license
|
aliciatb/fars
|
f4d409207c60efeaa8feb07c9f393867add81cb6
|
7341de93fb0e85022fcf992ff3924fccea69aa5f
|
refs/heads/master
| 2021-03-22T00:11:54.828201
| 2018-03-24T18:36:08
| 2018-03-24T18:36:08
| 114,496,015
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,064
|
r
|
gridgraphics.R
|
library(dplyr)
library(faraway)
library(ggmap)
library(ggplot2)
library(grid)
library(gridExtra)
my_circle <- circleGrob(x = 0.5, y = 0.5, r = 0.5,
gp = gpar(col = "gray", lty = 3))
grid.draw(my_circle)
my_rect <- rectGrob(x = 0.5, y = 0.5, width = 0.8, height = 0.3)
grid.draw(my_rect)
grid.edit("my_circle", gp = gpar(col = "red", lty = 1))
data("worldcup")
wc_plot <- ggplot(worldcup, aes(x = Time, y = Passes)) +
geom_point()
grid.draw(wc_plot)
grid.draw(wc_plot)
grid.draw(my_circle)
wc_plot
grid.force()
grid.ls()
grid.edit("geom_point.points.1400", gp = gpar(col = "red"))
grid.edit("GRID.text.1419", gp = gpar(fontface = "bold"))
candy <- circleGrob(r = 0.1, x = 0.5, y = 0.6)
stick <- segmentsGrob(x0 = 0.5, x1 = 0.5, y0 = 0, y1 = 0.5)
lollipop <- gTree(children = gList(candy, stick))
grid.draw(lollipop)
grid.ls(lollipop)
grid.draw(rectGrob())
sample_vp <- viewport(x = 0.5, y = 0.5,
width = 0.5, height = 0.5,
just = c("left", "bottom"))
pushViewport(sample_vp)
grid.draw(roundrectGrob())
grid.draw(lollipop)
popViewport()
grid.draw(rectGrob())
sample_vp <- viewport(x = 0.5, y = 0.5,
width = 0.5, height = 0.5,
just = c("center", "center"))
pushViewport(sample_vp)
grid.draw(roundrectGrob())
grid.draw(lollipop)
popViewport()
grid.draw(rectGrob())
sample_vp <- viewport(x = 0.75, y = 0.75,
width = 0.25, height = 0.25,
just = c("left", "bottom"))
pushViewport(sample_vp)
grid.draw(roundrectGrob())
grid.draw(lollipop)
popViewport()
grid.draw(rectGrob())
sample_vp_1 <- viewport(x = 0.75, y = 0.75,
width = 0.25, height = 0.25,
just = c("left", "bottom"))
pushViewport(sample_vp_1)
grid.draw(roundrectGrob())
grid.draw(lollipop)
popViewport()
sample_vp_2 <- viewport(x = 0, y = 0,
width = 0.5, height = 0.5,
just = c("left", "bottom"))
pushViewport(sample_vp_2)
grid.draw(roundrectGrob())
grid.draw(lollipop)
popViewport()
grid.draw(rectGrob())
sample_vp_1 <- viewport(x = 0.5, y = 0.5,
width = 0.5, height = 0.5,
just = c("left", "bottom"))
sample_vp_2 <- viewport(x = 0.1, y = 0.1,
width = 0.4, height = 0.4,
just = c("left", "bottom"))
pushViewport(sample_vp_1)
grid.draw(roundrectGrob(gp = gpar(col = "red")))
pushViewport(sample_vp_2)
grid.draw(roundrectGrob())
grid.draw(lollipop)
popViewport(2)
grid.draw(rectGrob())
sample_vp_1 <- viewport(x = 0.5, y = 0.5,
width = 0.5, height = 0.5,
just = c("left", "bottom"))
pushViewport(sample_vp_1)
grid.draw(roundrectGrob())
grid.draw(lollipop)
popViewport()
grid.ls()
worldcup %>%
ggplot(aes(x = Time, y = Passes)) +
geom_point()
grid.force()
grid.ls()
balt_counties <- map_data("county", region = "maryland") %>%
mutate(our_counties = subregion %in% c("baltimore", "baltimore city"))
balt_map <- get_map("Baltimore County", zoom = 10) %>%
ggmap(extent = "device") +
geom_polygon(data = filter(balt_counties, our_counties == TRUE),
aes(x = long, y = lat, group = group),
fill = "red", color = "darkred", alpha = 0.2)
maryland_map <- balt_counties %>%
ggplot(aes(x = long, y = lat, group = group, fill = our_counties)) +
geom_polygon(color = "black") +
scale_fill_manual(values = c("white", "darkred"), guide = FALSE) +
theme_void() +
coord_map()
grid.draw(ggplotGrob(balt_map))
md_inset <- viewport(x = 0, y = 0,
just = c("left", "bottom"),
width = 0.35, height = 0.35)
pushViewport(md_inset)
grid.draw(rectGrob(gp = gpar(alpha = 0.5, col = "white")))
grid.draw(rectGrob(gp = gpar(fill = NA, col = "black")))
grid.draw(ggplotGrob(maryland_map))
popViewport()
ex_vp <- viewport(x = 0.5, y = 0.5,
just = c("center", "center"),
height = 0.8, width = 0.8,
xscale = c(0, 100), yscale = c(0, 10))
pushViewport(ex_vp)
grid.draw(rectGrob())
grid.draw(circleGrob(x = unit(20, "native"), y = unit(5, "native"),
r = 0.1, gp = gpar(fill = "lightblue")))
grid.draw(circleGrob(x = unit(85, "native"), y = unit(8, "native"),
r = 0.1, gp = gpar(fill = "darkred")))
popViewport()
grid.arrange(lollipop, circleGrob(),
rectGrob(), lollipop,
ncol = 2)
time_vs_shots <- ggplot(worldcup, aes(x = Time, y = Shots)) +
geom_point()
player_positions <- ggplot(worldcup, aes(x = Position)) +
geom_bar()
grid.arrange(time_vs_shots, player_positions, ncol = 2)
grid.arrange(time_vs_shots, player_positions,
layout_matrix = matrix(c(1, 2, 2), ncol = 3))
grid.arrange(time_vs_shots, player_positions,
layout_matrix = matrix(c(1, NA, NA, NA, 2, 2),
byrow = TRUE, ncol = 3))
worldcup_table <- worldcup %>%
filter(Team %in% c("Germany", "Spain", "Netherlands", "Uruguay")) %>%
group_by(Team) %>%
dplyr::summarize(`Average time` = round(mean(Time), 1),
`Average shots` = round(mean(Shots), 1)) %>%
tableGrob()
grid.draw(ggplotGrob(time_vs_shots))
wc_table_vp <- viewport(x = 0.22, y = 0.85,
just = c("left", "top"),
height = 0.1, width = 0.2)
pushViewport(wc_table_vp)
grid.draw(worldcup_table)
popViewport()
## Quiz
# The ggplot2 package is built on top of grid graphics, so the grid graphics system “plays well” with ggplot2 objects.
# Grid graphics and R’s base graphics are two separate systems.
# You cannot easily edit a plot created using base graphics with grid graphics functions.
# If you have to integrate output from these two systems, you may be able to using the gridBase package,
# Possible grobs that can be created using functions in the grid package include circles, rectangles, points, lines,
# polygons, curves, axes, rasters, segments, and plot frames
# The gridExtra package, provide functions that can be used to create addition grobs beyond those provided by the grid package.
# For example, the gridExtra package includes a function called tableGrob to create a table grob that can be added
# to grid graphics objects.
# The grob family of functions also includes a parameter called gp for setting graphical parameters like color, fill, line type, line width, etc.,
# for grob objects. The input to this function must be a gpar object, which can be created using the gpar function
# Aesthetics that you can set by specifying a gpar object for the gp parameter of a grob include color (col), fill (fill),
# transparency (alpha), line type (lty), line width (lwd), line end and join styles (lineend and linejoin, respectively),
# and font elements (fontsize, fontface, fontfamily).
# In many ways, ggplot objects can be treated as grid graphics grobs. For example, you can use the grid.draw function from grid to write
# a ggplot object to the current graphics device
# Viewports are the plotting windows that you can move into and out of to customize plots using grid graphics.
# You can use ggplot objects in plots with viewports.
# he native unit is often the most useful when creating extensions for ggplot2, for example. The npc units are also often
# useful in designing new plots– these set the x- and y-ranges to go from 0 to 1, so you can use these units if you need
# to place an object in, for example, the exact center of a viewport (c(0.5, 0.5) in npc units), or create a viewport in the
# top right quarter of the plot region. Grid graphics also allows the use of some units with absolute values, including
# inches (inches), centimeters (cm), and millimeters (mm).
# The grid.arrange function from the gridExtra package makes it easy to create a plot with multiple grid objects plotted on it.
# For example, you can use it to write out one or more grobs you’ve created to a graphics device:
|
3809fa5be2d785e2e13fe221a80e43cd89bee1bb
|
83dafb5709eb3ce48b4f33c61b496a3faa759f99
|
/06 svm v2.R
|
b52a9800f09656ece8c6be34913c90214b0ddd2e
|
[] |
no_license
|
Coelacanss/C_Adv_BA
|
1b75e80130c8f02c043426e03d088eb8b3a02e2e
|
623d6e032a03f400ed7c39b9af9aa8f2b90d0017
|
refs/heads/master
| 2020-09-28T08:16:01.874051
| 2016-08-17T17:04:38
| 2016-08-17T17:04:38
| 65,925,725
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,313
|
r
|
06 svm v2.R
|
# First, some simple experiments with svm
library(e1071)
load(file.choose()) # load in df1 from L06/06 svm.Rda
library(ggplot2)
#plot the points
ggplot(df1, aes(x1,x2)) +
geom_point(aes(color=y), shape=20, size=3)
#fit the hyperplane
# try a very large cost factor, this is like a hard margin, separating hyperplan
fit.df1 <- svm(y~., data=df1, kernel='linear', cost=100, scale=F) ### General scale = T
summary(fit.df1)
plot(fit.df1, df1) ### Xs are Support Vectors
# line equations
# hyperplane: coef1 x1 + coef2 x2 = rho
# margins: coef1 x1 + coef2 x2 = rho +/- 1
# if scale=T then use fit.df1$SV instead of df1
# fit.df1$index is the set of support vectors ### in this case, number 3,4,16 are SVs
coef1 = sum(fit.df1$coefs*df1[fit.df1$index,1])
coef2 = sum(fit.df1$coefs*df1[fit.df1$index,2])
sv = c(rep(0,20))
sv[fit.df1$index] = 1
sv = as.factor(sv)
ggplot(df1, aes(x1,x2)) + geom_point(aes(color=y, shape=sv), size=3) +
geom_abline(intercept = fit.df1$rho/coef2, slope= -coef1/coef2) +
geom_abline(intercept = (fit.df1$rho+1)/coef2, slope= -coef1/coef2, lty=2) +
geom_abline(intercept = (fit.df1$rho-1)/coef2, slope= -coef1/coef2, lty=2)
# decreasing the cost factor
fit.df1 <- svm(y~., data=df1, kernel='linear', cost=1, scale=F)
coef1 = sum(fit.df1$coefs*df1[fit.df1$index,1])
coef2 = sum(fit.df1$coefs*df1[fit.df1$index,2])
sv = c(rep(0,20))
sv[fit.df1$index] = 1
sv = as.factor(sv)
ggplot(df1, aes(x1,x2)) + geom_point(aes(color=y, shape=sv), size=3) +
geom_abline(intercept = fit.df1$rho/coef2, slope= -coef1/coef2) +
geom_abline(intercept = (fit.df1$rho+1)/coef2, slope= -coef1/coef2, lty=2) +
geom_abline(intercept = (fit.df1$rho-1)/coef2, slope= -coef1/coef2, lty=2)
# decreasing the cost factor
fit.df1 <- svm(y~., data=df1, kernel='linear', cost=.1, scale=F)
coef1 = sum(fit.df1$coefs*df1[fit.df1$index,1])
coef2 = sum(fit.df1$coefs*df1[fit.df1$index,2])
sv = c(rep(0,20))
sv[fit.df1$index] = 1
sv = as.factor(sv)
ggplot(df1, aes(x1,x2)) + geom_point(aes(color=y, shape=sv), size=3) +
geom_abline(intercept = fit.df1$rho/coef2, slope= -coef1/coef2) +
geom_abline(intercept = (fit.df1$rho+1)/coef2, slope= -coef1/coef2, lty=2) +
geom_abline(intercept = (fit.df1$rho-1)/coef2, slope= -coef1/coef2, lty=2)
# decreasing the cost factor
fit.df1 <- svm(y~., data=df1, kernel='linear', cost=.01, scale=F)
coef1 = sum(fit.df1$coefs*df1[fit.df1$index,1])
coef2 = sum(fit.df1$coefs*df1[fit.df1$index,2])
sv = c(rep(0,20))
sv[fit.df1$index] = 1
sv = as.factor(sv)
ggplot(df1, aes(x1,x2)) + geom_point(aes(color=y, shape=sv), size=3) +
geom_abline(intercept = fit.df1$rho/coef2, slope= -coef1/coef2) +
geom_abline(intercept = (fit.df1$rho+1)/coef2, slope= -coef1/coef2, lty=2) +
geom_abline(intercept = (fit.df1$rho-1)/coef2, slope= -coef1/coef2, lty=2)
# use the tune fuction in svm
set.seed(1)
tune.df1 <- tune(svm, y ~x1+x2, data=df1, kernel='linear',
ranges=list(cost=c(0.001,0.01,0.1,1,5,10,100)))
summary(tune.df1)
plot(tune.df1)
# let us try log scale using ggplot
tune.df1$performances #df of performance
ggplot(tune.df1$performance, aes(x=cost, y=error)) +
geom_line() + scale_x_log10() +
theme(text = element_text(size=20))
#
# now look at df2
#
ggplot(df2, aes(x1, x2, color = y)) + geom_point()
set.seed(1)
tune.df2 <- tune(svm, y ~x1+x2, data=df2, kernel='linear',
ranges=list(cost=c(0.001,0.01,0.1,1,10,100, 1000)))
ggplot(tune.df2$performance, aes(x=cost, y=error)) +
geom_line() + scale_x_log10() +
theme(text = element_text(size=20))
# pretty bad!
# need a different kernel - radial
fit.df2 <- svm(y~x1+x2, data=df2, kernel='radial', cost=.1, scale=F)
plot(fit.df2, df2)
fit.df2 <- svm(y~x1+x2, data=df2, kernel='radial', cost=1, scale=F)
plot(fit.df2, df2)
fit.df2 <- svm(y~x1+x2, data=df2, kernel='radial', cost=10, scale=F)
plot(fit.df2, df2)
fit.df2 <- svm(y~x1+x2, data=df2, kernel='radial', cost=100, scale=F)
plot(fit.df2, df2)
fit.df2 <- svm(y~x1+x2, data=df2, kernel='radial', cost=1000, scale=F)
plot(fit.df2, df2)
fit.df2 <- svm(y~x1+x2, data=df2, kernel='radial', cost=10000, scale=F)
plot(fit.df2, df2)
set.seed(1)
tune.df2 <- tune(svm, y ~x1+x2, data=df2, kernel='radial',
ranges=list(cost=c(0.001,0.01,0.1,1,10,100, 1000)))
ggplot(tune.df2$performance, aes(x=cost, y=error)) +
geom_line() + scale_x_log10() +
theme(text = element_text(size=20))
summary(tune.df2)
#
# ==== now experiment with default data
#
load(file.choose()) # load in L05/default.rda
#create test and train samples
set.seed(100)
train = sample(1:nrow(Default),nrow(Default)*0.667)
Default.train = Default[train,]
Default.test = Default[-train,]
# let us visualize the data
ggplot(Default.train, aes(balance, income)) +
geom_point(aes(color=default, shape=student))
# first with default options kernel: radial
fit.def = svm(default ~ student + balance + income,
data=Default.train)
summary(fit.def)
plot(fit.def, Default.train, balance~income) #need formula since more than 2 predictors
# now linear kernel
fit.def = svm(default ~ student + balance + income,
data=Default.train,
kernel='linear')
summary(fit.def)
plot(fit.def, Default.train, balance~income) #need formula since more than 2 predictors
# now polynomial kernel
fit.def = svm(default ~ student + balance + income,
data=Default.train,
kernel='polynomial')
summary(fit.def)
plot(fit.def, Default.train, balance~income)
#tuning the polynomial kernel
set.seed(1)
tune.def <- tune(svm, default ~ student + balance + income,
data=Default.train,
kernel='polynomial',
ranges=list(cost=c(0.001,0.01,0.1,1,10,100, 1000)))
ggplot(tune.def$performance, aes(x=cost, y=error)) +
geom_line() + scale_x_log10() +
theme(text = element_text(size=20))
# cost = 10 seems to be effective
fit.def = svm(default ~ student + balance + income,
data=Default.train,
kernel='polynomial', cost=10,
degree=3, coef0=50, gamma=1/2)
summary(fit.def)
plot(fit.def, Default.train, balance~income)
# class weights
# for unequal classes, weights can be used to weigh the errors in an unequal fashion
# we are ok with more error on the no class to reduce the error on the yes class
#tuning the polynomial kernel with class weights
set.seed(1)
tune.def <- tune(svm, default ~ student + balance + income,
data=Default.train,
kernel='polynomial',
degree=3, coef0=50, gamma=1/2,
# class.weights=c(No=0.1, Yes=1),
ranges=list(cost=c(0.1,1,10,100),
class.weights=list(c(No=0.1, Yes=1),
c(No=0.2, Yes=1))))
ggplot(tune.def$performance, aes(x=cost, y=error)) +
geom_line() + scale_x_log10() +
theme(text = element_text(size=20))
# now let us check this one out in detail
fit.def = svm(default ~ student + balance + income,
data=Default.train,
kernel='polynomial', cost=1,
degree=3, coef0=50, gamma=1/2,
class.weights=c(No=0.1, Yes=1))
summary(fit.def)
plot(fit.def, Default.train, balance~income)
# now for profit evaluation
#
# profit function
#
# lend $100 to every one predicted negative
# get back $110 from true negative, get back $50 from false negative
# profit = 10* TN - 50* FN
profit = function(cm) { #cm confusion matrix No class 1, yes clas 2, rows are predicted, cols are actual
return(10*cm[1,1]-50*cm[1,2])
}
# let us set up the cm matrix if we use the null classifier of
# classifying all observations as non-default
summary(Default.test)
# test has 3,233 actual no and 97 actual yes
cm = matrix(c(3233,97,0,0),
nrow=2, # number of rows
ncol=2, # number of columns
byrow = TRUE) # matrix(c(), nrow=2)
profit.null = profit(cm)
profit.null # $27,480
# tree 29020 - 27480 = $1,540
# linear regression 29420 - 27480 = $1,940
# logistic regression 29530 - 27480 = $2,050
# for SVM:
default.pred = predict(fit.def, Default.test[,-1])
cm = table(default.pred, Default.test$default)
cm
profit(cm) # $29,460
profit(cm) - profit.null # $1,980
|
2402e7289deabfbdf598dea680ec9c1a0a97a55b
|
e726b667d365a72cdf0fcd739aa830470f06d20a
|
/R/haven-sas.R
|
9d5472b31329b47dde9f56501b8dac9ef74afa98
|
[
"MIT"
] |
permissive
|
tidyverse/haven
|
49e3a1bc82a766612eec1ff0c4e5d56a26e42a66
|
9b3b21b5e9b64867eb53818faa7e9a22480f347d
|
refs/heads/main
| 2023-08-30T19:02:26.976602
| 2023-06-30T20:44:03
| 2023-06-30T20:44:03
| 30,308,523
| 283
| 115
|
NOASSERTION
| 2023-09-04T06:31:40
| 2015-02-04T16:28:17
|
C
|
UTF-8
|
R
| false
| false
| 7,540
|
r
|
haven-sas.R
|
#' Read SAS files
#'
#' `read_sas()` supports both sas7bdat files and the accompanying sas7bcat files
#' that SAS uses to record value labels.
#'
#' @param data_file,catalog_file Path to data and catalog files. The files are
#' processed with [readr::datasource()].
#' @param encoding,catalog_encoding The character encoding used for the
#' `data_file` and `catalog_encoding` respectively. A value of `NULL` uses the
#' encoding specified in the file; use this argument to override it if it is
#' incorrect.
#' @inheritParams tibble::as_tibble
#' @param col_select One or more selection expressions, like in
#' [dplyr::select()]. Use `c()` or `list()` to use more than one expression.
#' See `?dplyr::select` for details on available selection options. Only the
#' specified columns will be read from `data_file`.
#' @param skip Number of lines to skip before reading data.
#' @param n_max Maximum number of lines to read.
#' @param cols_only `r lifecycle::badge("deprecated")` `cols_only` is no longer
#' supported; use `col_select` instead.
#' @return A tibble, data frame variant with nice defaults.
#'
#' Variable labels are stored in the "label" attribute of each variable. It is
#' not printed on the console, but the RStudio viewer will show it.
#'
#' `write_sas()` returns the input `data` invisibly.
#' @export
#' @examples
#' path <- system.file("examples", "iris.sas7bdat", package = "haven")
#' read_sas(path)
read_sas <- function(data_file, catalog_file = NULL,
encoding = NULL, catalog_encoding = encoding,
col_select = NULL, skip = 0L, n_max = Inf, cols_only = deprecated(),
.name_repair = "unique") {
if (lifecycle::is_present(cols_only)) {
lifecycle::deprecate_warn("2.2.0", "read_sas(cols_only)", "read_sas(col_select)")
stopifnot(is.character(cols_only)) # used to only work with a char vector
# guarantee a quosure to keep NULL and tidyselect logic clean downstream
col_select <- quo(c(!!!cols_only))
} else {
col_select <- enquo(col_select)
}
if (is.null(encoding)) {
encoding <- ""
}
cols_skip <- skip_cols(read_sas, !!col_select, data_file, encoding = encoding)
n_max <- validate_n_max(n_max)
spec_data <- readr::datasource(data_file)
if (is.null(catalog_file)) {
spec_cat <- list()
} else {
spec_cat <- readr::datasource(catalog_file)
}
switch(class(spec_data)[1],
source_file = df_parse_sas_file(spec_data, spec_cat, encoding = encoding, catalog_encoding = catalog_encoding, cols_skip = cols_skip, n_max = n_max, rows_skip = skip, name_repair = .name_repair),
source_raw = df_parse_sas_raw(spec_data, spec_cat, encoding = encoding, catalog_encoding = catalog_encoding, cols_skip = cols_skip, n_max = n_max, rows_skip = skip, name_repair = .name_repair),
cli_abort("This kind of input is not handled.")
)
}
#' Write SAS files
#'
#' @description
#' `r lifecycle::badge("deprecated")`
#'
#' `write_sas()` creates sas7bdat files. Unfortunately the SAS file format is
#' complex and undocumented, so `write_sas()` is unreliable and in most cases
#' SAS will not read files that it produces.
#'
#' [write_xpt()] writes files in the open SAS transport format, which has
#' limitations but will be reliably read by SAS.
#'
#' @param data Data frame to write.
#' @param path Path to file where the data will be written.
#' @keywords internal
#' @export
write_sas <- function(data, path) {
lifecycle::deprecate_warn("2.5.2", "write_sas()", "write_xpt()")
validate_sas(data)
data_out <- adjust_tz(data)
write_sas_(data_out, normalizePath(path, mustWork = FALSE))
invisible(data)
}
#' Read and write SAS transport files
#'
#' The SAS transport format is a open format, as is required for submission
#' of the data to the FDA.
#'
#' @inheritParams read_spss
#' @return A tibble, data frame variant with nice defaults.
#'
#' Variable labels are stored in the "label" attribute of each variable.
#' It is not printed on the console, but the RStudio viewer will show it.
#'
#' If a dataset label is defined, it will be stored in the "label" attribute
#' of the tibble.
#'
#' `write_xpt()` returns the input `data` invisibly.
#' @export
#' @examples
#' tmp <- tempfile(fileext = ".xpt")
#' write_xpt(mtcars, tmp)
#' read_xpt(tmp)
read_xpt <- function(file, col_select = NULL, skip = 0, n_max = Inf, .name_repair = "unique") {
cols_skip <- skip_cols(read_xpt, {{ col_select }}, file)
n_max <- validate_n_max(n_max)
spec <- readr::datasource(file)
switch(class(spec)[1],
source_file = df_parse_xpt_file(spec, cols_skip, n_max, skip, name_repair = .name_repair),
source_raw = df_parse_xpt_raw(spec, cols_skip, n_max, skip, name_repair = .name_repair),
cli_abort("This kind of input is not handled.")
)
}
#' @export
#' @rdname read_xpt
#' @param version Version of transport file specification to use: either 5 or 8.
#' @param name Member name to record in file. Defaults to file name sans
#' extension. Must be <= 8 characters for version 5, and <= 32 characters
#' for version 8.
#' @param label Dataset label to use, or `NULL`. Defaults to the value stored in
#' the "label" attribute of `data`.
#'
#' Note that although SAS itself supports dataset labels up to 256 characters
#' long, dataset labels in SAS transport files must be <= 40 characters.
#' @param adjust_tz Stata, SPSS and SAS do not have a concept of time zone,
#' and all [date-time] variables are treated as UTC. `adjust_tz` controls
#' how the timezone of date-time values is treated when writing.
#'
#' * If `TRUE` (the default) the timezone of date-time values is ignored, and
#' they will display the same in R and Stata/SPSS/SAS, e.g.
#' `"2010-01-01 09:00:00 NZDT"` will be written as `"2010-01-01 09:00:00"`.
#' Note that this changes the underlying numeric data, so use caution if
#' preserving between-time-point differences is critical.
#' * If `FALSE`, date-time values are written as the corresponding UTC value,
#' e.g. `"2010-01-01 09:00:00 NZDT"` will be written as
#' `"2009-12-31 20:00:00"`.
write_xpt <- function(data, path, version = 8, name = NULL, label = attr(data, "label"), adjust_tz = TRUE) {
if (!version %in% c(5, 8)) {
cli_abort("SAS transport file version {.val {version}} is not currently supported.")
}
if (is.null(name)) {
name <- tools::file_path_sans_ext(basename(path))
}
name <- validate_xpt_name(name, version)
label <- validate_xpt_label(label)
data_out <- validate_sas(data)
if (isTRUE(adjust_tz)) {
data_out <- adjust_tz(data_out)
}
write_xpt_(
data_out,
normalizePath(path, mustWork = FALSE),
version = version,
name = name,
label = label
)
invisible(data)
}
# Validation --------------------------------------------------------------
validate_sas <- function(data) {
stopifnot(is.data.frame(data))
invisible(data)
}
validate_xpt_name <- function(name, version, call = caller_env()) {
if (version == 5) {
if (nchar(name) > 8) {
cli_abort("{.arg name} must be 8 characters or fewer.", call = call)
}
} else {
if (nchar(name) > 32) {
cli_abort("{.arg name} must be 32 characters or fewer.", call = call)
}
}
name
}
validate_xpt_label <- function(label, call = caller_env()) {
if (!is.null(label)) {
stopifnot(is.character(label), length(label) == 1)
if (nchar(label) > 40) {
cli_abort("{.arg label} must be 40 characters or fewer.", call = call)
}
}
label
}
|
98fd8be9d03eae97020cdbf845ed0ace17d690ff
|
f46ebf88f0274c36709a80be04e93eec25ebc416
|
/nubip/AWR/swork01/make-frame.R
|
764cd050ac27568564fbc402c3c70f78c94fcf52
|
[
"MIT"
] |
permissive
|
youryharchenko/r
|
e62fd2b4166ffad08ba4671ffea59e94d084459d
|
40e0dfbfb11e347cbdf8c9fd62c9c3a55c1a3fe5
|
refs/heads/master
| 2023-02-15T16:43:17.418520
| 2021-01-01T07:44:51
| 2021-01-01T07:44:51
| 298,995,455
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 294
|
r
|
make-frame.R
|
library(WDI)
cache <- WDIcache()
head(
WDIsearch(string = "SP.POP.TOTL",
field = "indicator",
short = TRUE,
cache = cache)
, n = 30L)
head(
WDIsearch(string = "NY.GDP.PCAP.PP.CD",
field = "indicator",
short = TRUE,
cache = cache)
, n = 30L)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.