content
large_stringlengths 0
6.46M
| path
large_stringlengths 3
331
| license_type
large_stringclasses 2
values | repo_name
large_stringlengths 5
125
| language
large_stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.46M
| extension
large_stringclasses 75
values | text
stringlengths 0
6.46M
|
|---|---|---|---|---|---|---|---|---|---|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tableApi.r
\name{table$getSecurity}
\alias{table$getSecurity}
\title{Get the security information of the specified security item associated with the table for a specified user.}
\arguments{
\item{webId}{The ID of the table for the security to be checked.}
\item{userIdentity}{The user identity for the security information to be checked. Multiple security identities may be specified with multiple instances of the parameter. If the parameter is not specified, only the current user's security rights will be returned.}
\item{forceRefresh}{Indicates if the security cache should be refreshed before getting security information. The default is 'false'.}
\item{selectedFields}{List of fields to be returned in the response, separated by semicolons (;). If this parameter is not specified, all available fields will be returned.}
\item{webIdType}{Optional parameter. Used to specify the type of WebID. Useful for URL brevity and other special cases. Default is the value of the configuration item "WebIDType".}
}
\value{
Security rights.
}
\description{
Get the security information of the specified security item associated with the table for a specified user.
}
|
/man/table-cash-getSecurity.Rd
|
permissive
|
frbl/PI-Web-API-Client-R
|
R
| false
| true
| 1,244
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tableApi.r
\name{table$getSecurity}
\alias{table$getSecurity}
\title{Get the security information of the specified security item associated with the table for a specified user.}
\arguments{
\item{webId}{The ID of the table for the security to be checked.}
\item{userIdentity}{The user identity for the security information to be checked. Multiple security identities may be specified with multiple instances of the parameter. If the parameter is not specified, only the current user's security rights will be returned.}
\item{forceRefresh}{Indicates if the security cache should be refreshed before getting security information. The default is 'false'.}
\item{selectedFields}{List of fields to be returned in the response, separated by semicolons (;). If this parameter is not specified, all available fields will be returned.}
\item{webIdType}{Optional parameter. Used to specify the type of WebID. Useful for URL brevity and other special cases. Default is the value of the configuration item "WebIDType".}
}
\value{
Security rights.
}
\description{
Get the security information of the specified security item associated with the table for a specified user.
}
|
library(plyr)
library(ape)
compare_all_order <- function(number_br,number_cell,ts){
####################################################################################################
####################################################################################################
#extract the tree, if mutation is on one branch, then the corresponding tips will have mutation
####################################################################################################
####################################################################################################
left_right <- function(edge,parent){
child = c()
for (i in 1:nrow(edge)) {
if (edge[i,1] == parent) {
child = c(child,edge[i,2])
}
}
return(child)
}
build_tree <- function(edge,branch){
child_node = left_right(edge,branch[length(branch)])
new_branch=matrix(c(branch,child_node[1],branch,child_node[2]),nrow=2,byrow = TRUE)
return(new_branch)
}
#####################################modify begin################################
# find node parent
find_ancestor <- function(edge,node){
parent = 0
for (i in 1:nrow(edge)) {
if (edge[i,2] == node) {
parent = edge[i,1]
}
}
return(parent)
}
# get all unique nodes in the tree
get_all_nodes <- function(edge)
{
all_nodes = integer(length(edge))
for (i in 1:nrow(edge))
{
all_nodes[(i-1)*2+1] = edge[i,1]
all_nodes[(i-1)*2+2] = edge[i,2]
}
all_nodes = unique(all_nodes)
return(all_nodes)
}
# find root node
find_root <- function(edge)
{
all_nodes = get_all_nodes(edge)
for (i in 1:length(all_nodes))
{
parent = find_ancestor(edge, all_nodes[i])
if (parent == 0)
{
root_node = all_nodes[i]
break
}
}
}
# find two child branches and nodes if they exist. Otherwise all zeros matrix output
find_child_branches_and_nodes <- function(edge, parent_node){
child_branches_and_nodes = matrix(0, 2, 2)
child_id = 1
# first row are two nodes, second row are two branches
for (i in 1:nrow(edge))
{
if (edge[i,1] == parent_node) {
child_branches_and_nodes[1,child_id] = edge[i,2]
child_branches_and_nodes[2,child_id] = i
child_id = child_id + 1
}
}
return(child_branches_and_nodes)
}
# find all child branch for current branch
find_child_branches <- function(edge, current_edge, child_branches)
{
id = length(child_branches)
right_node = edge[current_edge, 2]
child_branches_and_nodes = find_child_branches_and_nodes(edge, right_node)
if (child_branches_and_nodes[1,1] != 0)
{
# if not leaf node
left_node = child_branches_and_nodes[1,1]
right_node = child_branches_and_nodes[1,2]
left_branch = child_branches_and_nodes[2,1]
right_branch = child_branches_and_nodes[2,2]
id = id + 1
child_branches[id] = left_branch
id = id + 1
child_branches[id] = right_branch
child_branches = find_child_branches(edge, left_branch, child_branches)
child_branches = find_child_branches(edge, right_branch, child_branches)
return(child_branches)
}
else
{
return(child_branches)
}
}
# find all child branch for all branches
find_all_child_branches <- function(edge){
# get root node
root_node = find_root(edge)
all_child_branches = rep(list(list()), nrow(edge))
for (i in 1:nrow(edge))
{
current_edge = i
# iterative find all its child branches
child_branches = integer(0)
all_child_branches[[i]] = find_child_branches(edge, current_edge, child_branches)
}
return(all_child_branches)
}
all_child_branches = find_all_child_branches(ts$edge)
return(all_child_branches=all_child_branches)
}
parameter_setting = expand.grid(alpha=c(0.05,0.1,0.2,0.4),
beta =c(0.05,0.1,0.2,0.4))
for(parameterIndex in 5:dim(parameter_setting)[1]){
#get the parameters in each setting
alpha = parameter_setting[parameterIndex,1]#fpr
beta = parameter_setting[parameterIndex,2]#fnr
unit_theta = 10^(-7) #rate from 0 to 1
unit_gamma = 10^(-9) #rate from 0 to 2
unit_mu = 10 ^(-2) #rate from 1 to 2
number_br = 18 #number of branches in a tree
number_cell = 10 #number of cells in a tree
#return alpha and beta string used for data input
if (alpha < 0.1)
{
alpha_str = sprintf('0%s', alpha*100)
} else
{
alpha_str = sprintf('%s', alpha*10)
}
if (beta < 0.1)
{
beta_str = sprintf('0%s', beta*100)
} else
{
beta_str = sprintf('%s', beta*10)
}
scite_all_pair_result= data.frame(matrix(, nrow=0, ncol=3))
for (indexn in 1:1000){
form = sprintf('/fs/project/kubatko.2-temp/gao.957/DNA_alignment/bowtie2-2.3.4.2-linux-x86_64/reference/Simulation_Setting/SimulateData_EXP10/RandomTree/RandomTree_%s.tre', indexn)
sampletr=read.tree(form)
scite_order_form_0_1 = sprintf('/fs/project/kubatko.2-temp/gao.957/workspace/github_MO/Figures3_4_Scenarios1_2/Scenario2_result_SCITE/SCITE_Ternary_20missing/SCITE_ternary_alpha_0%s_beta_0%s/sub_obs_0_1_tip_alpha_0%s_beta_0%s_node_order_%s.txt',alpha_str, beta_str, alpha_str, beta_str,indexn)
mat_scite_order_form_0_1 = scan(file=scite_order_form_0_1)
root_tree=mat_scite_order_form_0_1[1]
root_index=which(mat_scite_order_form_0_1%in% root_tree)
mat_scite_order_form_0_1_trans=list()
scite_all_match_matchresult= data.frame(matrix(, nrow=0, ncol=2))
for (i in 1:length(root_index)){
if(i<length(root_index)){mat_scite_order_form_0_1_trans[[i]]=mat_scite_order_form_0_1[c(root_index[i]:(root_index[i+1]-1))]}
else{mat_scite_order_form_0_1_trans[[i]]=mat_scite_order_form_0_1[c(root_index[i]:length(mat_scite_order_form_0_1))]}
}
mat_scite_order_form_0_1_order_pair=c()
for (i in 1:length(mat_scite_order_form_0_1_trans)){
order_pair=c()
for (j in 1:length(mat_scite_order_form_0_1_trans[[i]])){
for(k in ((j+1):length(mat_scite_order_form_0_1_trans[[i]]))){
if(k>j){order_pair=rbind(order_pair,c((mat_scite_order_form_0_1_trans[[i]][j]),mat_scite_order_form_0_1_trans[[i]][k]))}
}
}
mat_scite_order_form_0_1_order_pair=rbind(mat_scite_order_form_0_1_order_pair,order_pair)
}
exclude_node = mat_scite_order_form_0_1_trans[[i]][1]
mat_scite_order_form_0_1_order_pair = as.data.frame(mat_scite_order_form_0_1_order_pair)
colnames(mat_scite_order_form_0_1_order_pair)=c("first_br_indexn","second_br_indexn")
mat_scite_order_form_0_1_order_pair = na.omit(mat_scite_order_form_0_1_order_pair)
mat_scite_order_form_0_1_order_pair<-mat_scite_order_form_0_1_order_pair[!(mat_scite_order_form_0_1_order_pair$first_br_indexn == (exclude_node) ),]
mat_scite_order_form_0_1_order_pair<-mat_scite_order_form_0_1_order_pair[!(mat_scite_order_form_0_1_order_pair$second_br_indexn == (exclude_node) ),]
true_all_branch = compare_all_order(length(sampletr$edge.length),length(sampletr$tip.label),sampletr)
all_true_all_branch = data.frame(matrix(, nrow=0, ncol=2))
all_match_all_branch_sub = data.frame(matrix(, nrow=0, ncol=2))
for (br in 1:length(true_all_branch)){
if(length(true_all_branch[[br]])>0){
for(br_sub in 1:length(true_all_branch[[br]])){
all_true_all_branch = rbind(all_true_all_branch,c(br,(true_all_branch[[br]])[br_sub]))
}
}
}
all_true_all_branch_sub = na.omit(all_true_all_branch)
colnames(all_true_all_branch_sub)= c("first_br_indexn","second_br_indexn")
unique_mat_scite_order_form_0_1_order_pair=unique(mat_scite_order_form_0_1_order_pair)
for(orderpair in 1:dim(unique_mat_scite_order_form_0_1_order_pair)[1]){
print(c(indexn,orderpair))
matchresult = match_df(all_true_all_branch_sub, unique_mat_scite_order_form_0_1_order_pair[orderpair,])
scite_all_match_matchresult=rbind(scite_all_match_matchresult,matchresult)
}
scite_all_pair_result[indexn,]=c(dim(scite_all_match_matchresult)[1],dim(unique_mat_scite_order_form_0_1_order_pair)[1],dim(all_true_all_branch_sub)[1])
}
colnames(scite_all_pair_result)=c("matched_paired","inferred_paired","true_pair")
scite_all_pair_result_form_0_1 = sprintf('/fs/project/kubatko.2-temp/gao.957/workspace/github_MO/Figures3_4_Scenarios1_2/Scenario2_result_SCITE/SCITE_Ternary_20missing/summary/ternary_all_order_alpha_0%s_beta_0%s.csv', alpha_str, beta_str)
write.csv(scite_all_pair_result,file=scite_all_pair_result_form_0_1)
}
|
/simulation/Figures3_4_Scenarios1_2/Scenario2_result_SCITE/accuracy/Ternary_All_order_20Missing.R
|
no_license
|
DavidSimone/MO
|
R
| false
| false
| 8,955
|
r
|
library(plyr)
library(ape)
compare_all_order <- function(number_br,number_cell,ts){
####################################################################################################
####################################################################################################
#extract the tree, if mutation is on one branch, then the corresponding tips will have mutation
####################################################################################################
####################################################################################################
left_right <- function(edge,parent){
child = c()
for (i in 1:nrow(edge)) {
if (edge[i,1] == parent) {
child = c(child,edge[i,2])
}
}
return(child)
}
build_tree <- function(edge,branch){
child_node = left_right(edge,branch[length(branch)])
new_branch=matrix(c(branch,child_node[1],branch,child_node[2]),nrow=2,byrow = TRUE)
return(new_branch)
}
#####################################modify begin################################
# find node parent
find_ancestor <- function(edge,node){
parent = 0
for (i in 1:nrow(edge)) {
if (edge[i,2] == node) {
parent = edge[i,1]
}
}
return(parent)
}
# get all unique nodes in the tree
get_all_nodes <- function(edge)
{
all_nodes = integer(length(edge))
for (i in 1:nrow(edge))
{
all_nodes[(i-1)*2+1] = edge[i,1]
all_nodes[(i-1)*2+2] = edge[i,2]
}
all_nodes = unique(all_nodes)
return(all_nodes)
}
# find root node
find_root <- function(edge)
{
all_nodes = get_all_nodes(edge)
for (i in 1:length(all_nodes))
{
parent = find_ancestor(edge, all_nodes[i])
if (parent == 0)
{
root_node = all_nodes[i]
break
}
}
}
# find two child branches and nodes if they exist. Otherwise all zeros matrix output
find_child_branches_and_nodes <- function(edge, parent_node){
child_branches_and_nodes = matrix(0, 2, 2)
child_id = 1
# first row are two nodes, second row are two branches
for (i in 1:nrow(edge))
{
if (edge[i,1] == parent_node) {
child_branches_and_nodes[1,child_id] = edge[i,2]
child_branches_and_nodes[2,child_id] = i
child_id = child_id + 1
}
}
return(child_branches_and_nodes)
}
# find all child branch for current branch
find_child_branches <- function(edge, current_edge, child_branches)
{
id = length(child_branches)
right_node = edge[current_edge, 2]
child_branches_and_nodes = find_child_branches_and_nodes(edge, right_node)
if (child_branches_and_nodes[1,1] != 0)
{
# if not leaf node
left_node = child_branches_and_nodes[1,1]
right_node = child_branches_and_nodes[1,2]
left_branch = child_branches_and_nodes[2,1]
right_branch = child_branches_and_nodes[2,2]
id = id + 1
child_branches[id] = left_branch
id = id + 1
child_branches[id] = right_branch
child_branches = find_child_branches(edge, left_branch, child_branches)
child_branches = find_child_branches(edge, right_branch, child_branches)
return(child_branches)
}
else
{
return(child_branches)
}
}
# find all child branch for all branches
find_all_child_branches <- function(edge){
# get root node
root_node = find_root(edge)
all_child_branches = rep(list(list()), nrow(edge))
for (i in 1:nrow(edge))
{
current_edge = i
# iterative find all its child branches
child_branches = integer(0)
all_child_branches[[i]] = find_child_branches(edge, current_edge, child_branches)
}
return(all_child_branches)
}
all_child_branches = find_all_child_branches(ts$edge)
return(all_child_branches=all_child_branches)
}
parameter_setting = expand.grid(alpha=c(0.05,0.1,0.2,0.4),
beta =c(0.05,0.1,0.2,0.4))
for(parameterIndex in 5:dim(parameter_setting)[1]){
#get the parameters in each setting
alpha = parameter_setting[parameterIndex,1]#fpr
beta = parameter_setting[parameterIndex,2]#fnr
unit_theta = 10^(-7) #rate from 0 to 1
unit_gamma = 10^(-9) #rate from 0 to 2
unit_mu = 10 ^(-2) #rate from 1 to 2
number_br = 18 #number of branches in a tree
number_cell = 10 #number of cells in a tree
#return alpha and beta string used for data input
if (alpha < 0.1)
{
alpha_str = sprintf('0%s', alpha*100)
} else
{
alpha_str = sprintf('%s', alpha*10)
}
if (beta < 0.1)
{
beta_str = sprintf('0%s', beta*100)
} else
{
beta_str = sprintf('%s', beta*10)
}
scite_all_pair_result= data.frame(matrix(, nrow=0, ncol=3))
for (indexn in 1:1000){
form = sprintf('/fs/project/kubatko.2-temp/gao.957/DNA_alignment/bowtie2-2.3.4.2-linux-x86_64/reference/Simulation_Setting/SimulateData_EXP10/RandomTree/RandomTree_%s.tre', indexn)
sampletr=read.tree(form)
scite_order_form_0_1 = sprintf('/fs/project/kubatko.2-temp/gao.957/workspace/github_MO/Figures3_4_Scenarios1_2/Scenario2_result_SCITE/SCITE_Ternary_20missing/SCITE_ternary_alpha_0%s_beta_0%s/sub_obs_0_1_tip_alpha_0%s_beta_0%s_node_order_%s.txt',alpha_str, beta_str, alpha_str, beta_str,indexn)
mat_scite_order_form_0_1 = scan(file=scite_order_form_0_1)
root_tree=mat_scite_order_form_0_1[1]
root_index=which(mat_scite_order_form_0_1%in% root_tree)
mat_scite_order_form_0_1_trans=list()
scite_all_match_matchresult= data.frame(matrix(, nrow=0, ncol=2))
for (i in 1:length(root_index)){
if(i<length(root_index)){mat_scite_order_form_0_1_trans[[i]]=mat_scite_order_form_0_1[c(root_index[i]:(root_index[i+1]-1))]}
else{mat_scite_order_form_0_1_trans[[i]]=mat_scite_order_form_0_1[c(root_index[i]:length(mat_scite_order_form_0_1))]}
}
mat_scite_order_form_0_1_order_pair=c()
for (i in 1:length(mat_scite_order_form_0_1_trans)){
order_pair=c()
for (j in 1:length(mat_scite_order_form_0_1_trans[[i]])){
for(k in ((j+1):length(mat_scite_order_form_0_1_trans[[i]]))){
if(k>j){order_pair=rbind(order_pair,c((mat_scite_order_form_0_1_trans[[i]][j]),mat_scite_order_form_0_1_trans[[i]][k]))}
}
}
mat_scite_order_form_0_1_order_pair=rbind(mat_scite_order_form_0_1_order_pair,order_pair)
}
exclude_node = mat_scite_order_form_0_1_trans[[i]][1]
mat_scite_order_form_0_1_order_pair = as.data.frame(mat_scite_order_form_0_1_order_pair)
colnames(mat_scite_order_form_0_1_order_pair)=c("first_br_indexn","second_br_indexn")
mat_scite_order_form_0_1_order_pair = na.omit(mat_scite_order_form_0_1_order_pair)
mat_scite_order_form_0_1_order_pair<-mat_scite_order_form_0_1_order_pair[!(mat_scite_order_form_0_1_order_pair$first_br_indexn == (exclude_node) ),]
mat_scite_order_form_0_1_order_pair<-mat_scite_order_form_0_1_order_pair[!(mat_scite_order_form_0_1_order_pair$second_br_indexn == (exclude_node) ),]
true_all_branch = compare_all_order(length(sampletr$edge.length),length(sampletr$tip.label),sampletr)
all_true_all_branch = data.frame(matrix(, nrow=0, ncol=2))
all_match_all_branch_sub = data.frame(matrix(, nrow=0, ncol=2))
for (br in 1:length(true_all_branch)){
if(length(true_all_branch[[br]])>0){
for(br_sub in 1:length(true_all_branch[[br]])){
all_true_all_branch = rbind(all_true_all_branch,c(br,(true_all_branch[[br]])[br_sub]))
}
}
}
all_true_all_branch_sub = na.omit(all_true_all_branch)
colnames(all_true_all_branch_sub)= c("first_br_indexn","second_br_indexn")
unique_mat_scite_order_form_0_1_order_pair=unique(mat_scite_order_form_0_1_order_pair)
for(orderpair in 1:dim(unique_mat_scite_order_form_0_1_order_pair)[1]){
print(c(indexn,orderpair))
matchresult = match_df(all_true_all_branch_sub, unique_mat_scite_order_form_0_1_order_pair[orderpair,])
scite_all_match_matchresult=rbind(scite_all_match_matchresult,matchresult)
}
scite_all_pair_result[indexn,]=c(dim(scite_all_match_matchresult)[1],dim(unique_mat_scite_order_form_0_1_order_pair)[1],dim(all_true_all_branch_sub)[1])
}
colnames(scite_all_pair_result)=c("matched_paired","inferred_paired","true_pair")
scite_all_pair_result_form_0_1 = sprintf('/fs/project/kubatko.2-temp/gao.957/workspace/github_MO/Figures3_4_Scenarios1_2/Scenario2_result_SCITE/SCITE_Ternary_20missing/summary/ternary_all_order_alpha_0%s_beta_0%s.csv', alpha_str, beta_str)
write.csv(scite_all_pair_result,file=scite_all_pair_result_form_0_1)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/geom_rootedge.R
\name{geom_rootedge}
\alias{geom_rootedge}
\title{geom_rootedge}
\usage{
geom_rootedge(rootedge = NULL)
}
\arguments{
\item{rootedge}{length of rootedge; use phylo$root.edge if rootedge = NULL (by default).}
}
\value{
ggplot layer
}
\description{
display root edge
}
\author{
Guangchuang Yu
}
|
/man/geom_rootedge.Rd
|
no_license
|
smyang2018/ggtree
|
R
| false
| true
| 387
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/geom_rootedge.R
\name{geom_rootedge}
\alias{geom_rootedge}
\title{geom_rootedge}
\usage{
geom_rootedge(rootedge = NULL)
}
\arguments{
\item{rootedge}{length of rootedge; use phylo$root.edge if rootedge = NULL (by default).}
}
\value{
ggplot layer
}
\description{
display root edge
}
\author{
Guangchuang Yu
}
|
\name{pogc}
\alias{pogc}
\title{
Parent Offspring Group Constructor
}
\description{
Assign offsprings to the parents.
}
\usage{
pogc(oh, genotypeError)
}
\arguments{
\item{oh}{ \code{integer} opposing homozygotes matrix (Output of \code{\link{ohg}})
}
\item{genotypeError}{\code{integer}
number of genotypeing error allowed in the \code{oh} matrix
}
}
\value{
Return a data frame with two columns. The first column is the animal ID and the second column is the parent ID.
}
\seealso{
\code{\link{ohg}}, \code{\link{hss}} and \code{\link{rpoh}}
}
\examples{
set.seed(100)
chr <- list()
sire <- list()
set.seed(1)
chr <- list()
for(i in 1:5)
{
chr[[i]] <- .simulateHalfsib(numInd = 20, numSNP = 5000, recbound = 1:10)
sire[[i]] <- ssp(bmh(chr[[i]]), chr[[i]])
sire[[i]] <- sire[[i]][1,] + sire[[i]][2,]
sire[[i]][sire[[i]] == 18] <- 9
}
Genotype <- do.call(rbind, chr)
rownames(Genotype) <- 6:(nrow(Genotype) + 5)
sire <- do.call(rbind, sire)
rownames(sire) <- 1:5
Genotype <- rbind(sire, Genotype)
oh <- ohg(Genotype) # creating the Opposing Homozygote matrix
pogc(oh, 5)
}
\keyword{pedigree}
\keyword{parentage}
|
/fuzzedpackages/hsphase/man/pogc.Rd
|
no_license
|
akhikolla/testpackages
|
R
| false
| false
| 1,132
|
rd
|
\name{pogc}
\alias{pogc}
\title{
Parent Offspring Group Constructor
}
\description{
Assign offsprings to the parents.
}
\usage{
pogc(oh, genotypeError)
}
\arguments{
\item{oh}{ \code{integer} opposing homozygotes matrix (Output of \code{\link{ohg}})
}
\item{genotypeError}{\code{integer}
number of genotypeing error allowed in the \code{oh} matrix
}
}
\value{
Return a data frame with two columns. The first column is the animal ID and the second column is the parent ID.
}
\seealso{
\code{\link{ohg}}, \code{\link{hss}} and \code{\link{rpoh}}
}
\examples{
set.seed(100)
chr <- list()
sire <- list()
set.seed(1)
chr <- list()
for(i in 1:5)
{
chr[[i]] <- .simulateHalfsib(numInd = 20, numSNP = 5000, recbound = 1:10)
sire[[i]] <- ssp(bmh(chr[[i]]), chr[[i]])
sire[[i]] <- sire[[i]][1,] + sire[[i]][2,]
sire[[i]][sire[[i]] == 18] <- 9
}
Genotype <- do.call(rbind, chr)
rownames(Genotype) <- 6:(nrow(Genotype) + 5)
sire <- do.call(rbind, sire)
rownames(sire) <- 1:5
Genotype <- rbind(sire, Genotype)
oh <- ohg(Genotype) # creating the Opposing Homozygote matrix
pogc(oh, 5)
}
\keyword{pedigree}
\keyword{parentage}
|
% Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/newnode_org_beamer.R, R/newnode_rmd.R
\name{newnode_rmd}
\alias{newnode_org_beamer}
\alias{newnode_rmd}
\title{newnode_rmd}
\usage{
newnode_org_beamer(indat = NULL, names_col = NULL, in_col = NULL,
out_col = NULL, desc_col = NULL, clusters_col = NULL, todo_col = NULL,
nchar_to_snip = 40)
newnode_rmd(indat = NULL, names_col = NULL, in_col = NULL,
out_col = NULL, desc_col = NULL, clusters_col = NULL, todo_col = NULL,
nchar_to_snip = 40)
}
\arguments{
\item{indat}{the input data.frame}
\item{names_col}{the name of each edge (the boxes)}
\item{in_col}{the name of the nodes that are inputs to each edge with comma seperated vals. whitespace will be stripped}
\item{out_col}{the nodes that are outputs of each edge.}
\item{desc_col}{description}
\item{clusters_col}{optional column identifying clusters}
\item{todo_col}{optional column with TODO status (DONE and WONTDO will be white, others are red)}
\item{indat}{the input data.frame}
\item{names_col}{the name of each edge (the boxes)}
\item{in_col}{the name of the nodes that are inputs to each edge with comma seperated vals. whitespace will be stripped}
\item{out_col}{the nodes that are outputs of each edge.}
\item{desc_col}{description}
\item{clusters_col}{optional column identifying clusters}
\item{todo_col}{optional column with TODO status (DONE and WONTDO will be white, others are red)}
}
\value{
character string object that has the DOT language representatio of the input
character string object that has the DOT language representatio of the input
}
\description{
newnode_rmd
newnode_rmd
}
|
/man/newnode_rmd.Rd
|
permissive
|
ivanhanigan/disentangle
|
R
| false
| false
| 1,672
|
rd
|
% Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/newnode_org_beamer.R, R/newnode_rmd.R
\name{newnode_rmd}
\alias{newnode_org_beamer}
\alias{newnode_rmd}
\title{newnode_rmd}
\usage{
newnode_org_beamer(indat = NULL, names_col = NULL, in_col = NULL,
out_col = NULL, desc_col = NULL, clusters_col = NULL, todo_col = NULL,
nchar_to_snip = 40)
newnode_rmd(indat = NULL, names_col = NULL, in_col = NULL,
out_col = NULL, desc_col = NULL, clusters_col = NULL, todo_col = NULL,
nchar_to_snip = 40)
}
\arguments{
\item{indat}{the input data.frame}
\item{names_col}{the name of each edge (the boxes)}
\item{in_col}{the name of the nodes that are inputs to each edge with comma seperated vals. whitespace will be stripped}
\item{out_col}{the nodes that are outputs of each edge.}
\item{desc_col}{description}
\item{clusters_col}{optional column identifying clusters}
\item{todo_col}{optional column with TODO status (DONE and WONTDO will be white, others are red)}
\item{indat}{the input data.frame}
\item{names_col}{the name of each edge (the boxes)}
\item{in_col}{the name of the nodes that are inputs to each edge with comma seperated vals. whitespace will be stripped}
\item{out_col}{the nodes that are outputs of each edge.}
\item{desc_col}{description}
\item{clusters_col}{optional column identifying clusters}
\item{todo_col}{optional column with TODO status (DONE and WONTDO will be white, others are red)}
}
\value{
character string object that has the DOT language representatio of the input
character string object that has the DOT language representatio of the input
}
\description{
newnode_rmd
newnode_rmd
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/traceback.r
\name{try_capture_stack}
\alias{try_capture_stack}
\title{Try, capturing stack on error.}
\usage{
try_capture_stack(quoted_code, env)
}
\arguments{
\item{quoted_code}{code to evaluate, in quoted form}
\item{env}{environment in which to execute code}
}
\description{
This is a variant of \code{\link{tryCatch}} that also captures the call
stack if an error occurs.
}
\keyword{internal}
|
/man/try_capture_stack.Rd
|
no_license
|
lanshiren/evaluate
|
R
| false
| true
| 476
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/traceback.r
\name{try_capture_stack}
\alias{try_capture_stack}
\title{Try, capturing stack on error.}
\usage{
try_capture_stack(quoted_code, env)
}
\arguments{
\item{quoted_code}{code to evaluate, in quoted form}
\item{env}{environment in which to execute code}
}
\description{
This is a variant of \code{\link{tryCatch}} that also captures the call
stack if an error occurs.
}
\keyword{internal}
|
color_exchange = function(image1){
image = image1
for (i in 1: nrow(image)){
for (j in 1:ncol(image)){
if(image[i,j] == 1){
image[i,j] = 0
}
else
image[i,j] = 1
}
}
return (image)
}
#image1 = readPNG("~/Desktop/Visible/initial1.png")
#image = color_exchange(image1)
#writePNG(image,"~/Desktop/Visible/initial2.png")
|
/R/Color_Exchange 2.R
|
no_license
|
rwang14/abc
|
R
| false
| false
| 345
|
r
|
color_exchange = function(image1){
image = image1
for (i in 1: nrow(image)){
for (j in 1:ncol(image)){
if(image[i,j] == 1){
image[i,j] = 0
}
else
image[i,j] = 1
}
}
return (image)
}
#image1 = readPNG("~/Desktop/Visible/initial1.png")
#image = color_exchange(image1)
#writePNG(image,"~/Desktop/Visible/initial2.png")
|
# TOOL bwa-mem-paired-end.R: "BWA MEM for paired-end reads" (Aligns reads to genomes using the BWA MEM algorithm. Results are sorted and indexed BAM files, which are ready for viewing in the Chipster genome browser.
# Note that this BWA tool uses publicly available genomes. If you would like to align reads against your own datasets, please use the tool \"BWA MEM for single end reads and own genome\".)
# INPUT reads1.txt: "Paired-end read set 1 to align" TYPE GENERIC
# INPUT reads2.txt: "Paired-end read set 2 to align" TYPE GENERIC
# OUTPUT bwa.bam
# OUTPUT bwa.bam.bai
# OUTPUT bwa.log
# PARAMETER organism: "Genome or transcriptome" TYPE [Arabidopsis_thaliana.TAIR10, Bos_taurus.UMD3.1, Canis_familiaris.CanFam3.1, Drosophila_melanogaster.BDGP6, Felis_catus.Felis_catus_6.2, Gallus_gallus.Galgal4, Gallus_gallus.Gallus_gallus-5.0, Gasterosteus_aculeatus.BROADS1, Halorubrum_lacusprofundi_atcc_49239.ASM2220v1, Homo_sapiens.GRCh37.75, Homo_sapiens.GRCh38, Homo_sapiens_mirna, Medicago_truncatula.MedtrA17_4.0, Mus_musculus.GRCm38, Mus_musculus_mirna, Oryza_sativa.IRGSP-1.0, Ovis_aries.Oar_v3.1, Populus_trichocarpa.JGI2.0, Rattus_norvegicus_mirna, Rattus_norvegicus.Rnor_5.0, Rattus_norvegicus.Rnor_6.0, Schizosaccharomyces_pombe.ASM294v2, Solanum_tuberosum.SolTub_3.0, Sus_scrofa.Sscrofa10.2, Vitis_vinifera.IGGP_12x, Yersinia_enterocolitica_subsp_palearctica_y11.ASM25317v1, Yersinia_pseudotuberculosis_ip_32953_gca_000834295.ASM83429v1] DEFAULT Homo_sapiens.GRCh38 (Genome or transcriptome that you would like to align your reads against.)
# PARAMETER mode: "Data source" TYPE [normal: " Illumina, 454, IonTorrent reads longer than 70 base pairs", pacbio: "PacBio subreads"] DEFAULT normal (Defining the type of reads will instruct the tool to use a predefined set of parameters optimized for that read type.)
# KM 11.11.2014
# check out if the file is compressed and if so unzip it
source(file.path(chipster.common.path, "zip-utils.R"))
unzipIfGZipFile("reads1.txt")
unzipIfGZipFile("reads2.txt")
# bwa
bwa.binary <- file.path(chipster.tools.path, "bwa", "bwa mem")
bwa.genome <- file.path(chipster.tools.path, "genomes", "indexes", "bwa", organism)
command.start <- paste("bash -c '", bwa.binary)
mode.parameters <- ifelse(mode == "pacbio", "-x pacbio", "")
# command ending
command.end <- paste(bwa.genome, "reads1.txt reads2.txt 1> alignment.sam 2>> bwa.log'")
# run bwa alignment
bwa.command <- paste(command.start, mode.parameters, command.end)
echo.command <- paste("echo '", bwa.binary , mode.parameters, bwa.genome, "reads1.txt reads2.txt' > bwa.log" )
#stop(paste('CHIPSTER-NOTE: ', bwa.command))
system(echo.command)
system(bwa.command)
# samtools binary
samtools.binary <- c(file.path(chipster.tools.path, "samtools", "samtools"))
# convert sam to bam
system(paste(samtools.binary, "view -bS alignment.sam -o alignment.bam"))
# sort bam
system(paste(samtools.binary, "sort alignment.bam alignment.sorted"))
# index bam
system(paste(samtools.binary, "index alignment.sorted.bam"))
# rename result files
system("mv alignment.sorted.bam bwa.bam")
system("mv alignment.sorted.bam.bai bwa.bam.bai")
# Handle output names
#
source(file.path(chipster.common.path, "tool-utils.R"))
# read input names
inputnames <- read_input_definitions()
# Determine base name
base1 <- strip_name(inputnames$reads1.txt)
base2 <- strip_name(inputnames$reads2.txt)
basename <- paired_name(base1, base2)
# Make a matrix of output names
outputnames <- matrix(NA, nrow=2, ncol=2)
outputnames[1,] <- c("bwa.bam", paste(basename, ".bam", sep =""))
outputnames[2,] <- c("bwa.bam.bai", paste(basename, ".bam.bai", sep =""))
# Write output definitions file
write_output_definitions(outputnames)
|
/tools/ngs/R/bwa-mem-paired-end.R
|
permissive
|
edwardtao/chipster-tools
|
R
| false
| false
| 3,708
|
r
|
# TOOL bwa-mem-paired-end.R: "BWA MEM for paired-end reads" (Aligns reads to genomes using the BWA MEM algorithm. Results are sorted and indexed BAM files, which are ready for viewing in the Chipster genome browser.
# Note that this BWA tool uses publicly available genomes. If you would like to align reads against your own datasets, please use the tool \"BWA MEM for single end reads and own genome\".)
# INPUT reads1.txt: "Paired-end read set 1 to align" TYPE GENERIC
# INPUT reads2.txt: "Paired-end read set 2 to align" TYPE GENERIC
# OUTPUT bwa.bam
# OUTPUT bwa.bam.bai
# OUTPUT bwa.log
# PARAMETER organism: "Genome or transcriptome" TYPE [Arabidopsis_thaliana.TAIR10, Bos_taurus.UMD3.1, Canis_familiaris.CanFam3.1, Drosophila_melanogaster.BDGP6, Felis_catus.Felis_catus_6.2, Gallus_gallus.Galgal4, Gallus_gallus.Gallus_gallus-5.0, Gasterosteus_aculeatus.BROADS1, Halorubrum_lacusprofundi_atcc_49239.ASM2220v1, Homo_sapiens.GRCh37.75, Homo_sapiens.GRCh38, Homo_sapiens_mirna, Medicago_truncatula.MedtrA17_4.0, Mus_musculus.GRCm38, Mus_musculus_mirna, Oryza_sativa.IRGSP-1.0, Ovis_aries.Oar_v3.1, Populus_trichocarpa.JGI2.0, Rattus_norvegicus_mirna, Rattus_norvegicus.Rnor_5.0, Rattus_norvegicus.Rnor_6.0, Schizosaccharomyces_pombe.ASM294v2, Solanum_tuberosum.SolTub_3.0, Sus_scrofa.Sscrofa10.2, Vitis_vinifera.IGGP_12x, Yersinia_enterocolitica_subsp_palearctica_y11.ASM25317v1, Yersinia_pseudotuberculosis_ip_32953_gca_000834295.ASM83429v1] DEFAULT Homo_sapiens.GRCh38 (Genome or transcriptome that you would like to align your reads against.)
# PARAMETER mode: "Data source" TYPE [normal: " Illumina, 454, IonTorrent reads longer than 70 base pairs", pacbio: "PacBio subreads"] DEFAULT normal (Defining the type of reads will instruct the tool to use a predefined set of parameters optimized for that read type.)
# KM 11.11.2014
# check out if the file is compressed and if so unzip it
source(file.path(chipster.common.path, "zip-utils.R"))
unzipIfGZipFile("reads1.txt")
unzipIfGZipFile("reads2.txt")
# bwa
bwa.binary <- file.path(chipster.tools.path, "bwa", "bwa mem")
bwa.genome <- file.path(chipster.tools.path, "genomes", "indexes", "bwa", organism)
command.start <- paste("bash -c '", bwa.binary)
mode.parameters <- ifelse(mode == "pacbio", "-x pacbio", "")
# command ending
command.end <- paste(bwa.genome, "reads1.txt reads2.txt 1> alignment.sam 2>> bwa.log'")
# run bwa alignment
bwa.command <- paste(command.start, mode.parameters, command.end)
echo.command <- paste("echo '", bwa.binary , mode.parameters, bwa.genome, "reads1.txt reads2.txt' > bwa.log" )
#stop(paste('CHIPSTER-NOTE: ', bwa.command))
system(echo.command)
system(bwa.command)
# samtools binary
samtools.binary <- c(file.path(chipster.tools.path, "samtools", "samtools"))
# convert sam to bam
system(paste(samtools.binary, "view -bS alignment.sam -o alignment.bam"))
# sort bam
system(paste(samtools.binary, "sort alignment.bam alignment.sorted"))
# index bam
system(paste(samtools.binary, "index alignment.sorted.bam"))
# rename result files
system("mv alignment.sorted.bam bwa.bam")
system("mv alignment.sorted.bam.bai bwa.bam.bai")
# Handle output names
#
source(file.path(chipster.common.path, "tool-utils.R"))
# read input names
inputnames <- read_input_definitions()
# Determine base name
base1 <- strip_name(inputnames$reads1.txt)
base2 <- strip_name(inputnames$reads2.txt)
basename <- paired_name(base1, base2)
# Make a matrix of output names
outputnames <- matrix(NA, nrow=2, ncol=2)
outputnames[1,] <- c("bwa.bam", paste(basename, ".bam", sep =""))
outputnames[2,] <- c("bwa.bam.bai", paste(basename, ".bam.bai", sep =""))
# Write output definitions file
write_output_definitions(outputnames)
|
#' Plot processed coefficients.
#'
#' @param coefficients_processed TO BE EDITED.
#' @return TO BE EDITED.
#' @family plot
#' @export
plot_coefficients_processed <- function(coefficients_processed) {
if (nrow(coefficients_processed) > 20)
warning("Number of predictors exceeds 20; plot may not render as nicely.")
g <-
ggplot2::ggplot(coefficients_processed, ggplot2::aes_string(x = "predictor", y = "mean", colour = "dot_color")) +
ggplot2::geom_errorbar(ggplot2::aes_string(ymin = "lower_bound", ymax = "upper_bound"), width = 0.1) +
ggplot2::geom_line() +
ggplot2::geom_point() +
ggplot2::scale_x_discrete("Predictors") +
ggplot2::scale_y_continuous("Beta estimates") +
ggplot2::scale_color_manual("", values = c("0" = "grey", "2" = "black"),
labels = c("0" = "Insignificant", "2" = "Significant")) +
ggplot2::ggtitle("Estimates of weights") +
ggplot2::theme_bw() +
ggplot2::coord_flip()
g
}
#' Plot gaussian predictions.
#'
#' @param y_true Ground truth (correct) target values.
#' @param y_pred Estimated target values.
#' @return TO BE EDITED.
#' @family plot
#' @export
plot_predictions_gaussian <- function(y_true, y_pred) {
df <- data.frame(y_true = y_true, y_pred = y_pred, stringsAsFactors = FALSE)
g <-
ggplot2::ggplot(df, ggplot2::aes(x = y_pred, y = y_true)) +
ggplot2::geom_point() +
ggplot2::scale_x_continuous("Predicted y values") +
ggplot2::scale_y_continuous("True y values") +
ggplot2::ggtitle("Actual vs. Predicted y values") +
ggplot2::theme_bw()
g
}
#' Plot binomial predictions.
#'
#' @param y_true Ground truth (correct) target values.
#' @param y_pred Estimated target values.
#' @return TO BE EDITED.
#' @family plot
#' @export
plot_predictions_binomial <- function(y_true, y_pred) {
results <- pROC::roc(y_true, y_pred)
auc <- as.numeric(results$auc)
auc_label <- paste("AUC = ", round(auc, digits = 3), sep = "")
df <- data.frame(sensitivities = results$sensitivities,
one_minus_specificities = 1 - results$specificities,
stringsAsFactors = FALSE)
g <-
ggplot2::ggplot(df, ggplot2::aes_string(x = "one_minus_specificities", y = "sensitivities")) +
ggplot2::geom_path(alpha = 1, size = 1) +
ggplot2::geom_segment(ggplot2::aes(x = 0, y = 0, xend = 1, yend = 1) , linetype = "dashed") +
ggplot2::annotate("text", label = auc_label, x = 0.85, y = 0.025, size = 8) +
ggplot2::scale_x_continuous("1 - Specificity") +
ggplot2::scale_y_continuous("Sensitivity") +
ggplot2::ggtitle("ROC Curve") +
ggplot2::theme_bw()
g
}
#' Plot mean squared error metrics.
#'
#' @param mses TO BE EDITED.
#' @return TO BE EDITED.
#' @family plot
#' @export
plot_metrics_gaussian_mean_squared_error <- function(mses) {
mean_mse <- mean(mses)
mse_label <- paste("Mean MSE = ", round(mean_mse, digits = 3), sep = "")
df <- data.frame(mses = mses, stringsAsFactors = FALSE)
g <-
ggplot2::ggplot(df, ggplot2::aes(x = mses)) +
ggplot2::geom_histogram(binwidth = 0.02) +
ggplot2::geom_vline(xintercept = mean_mse, linetype = "dotted") +
ggplot2::annotate("text", label = mse_label, x = 0.2, y = 0.2, size = 8) +
ggplot2::scale_x_continuous("MSE") +
ggplot2::scale_y_continuous("Frequency", label = scales::comma) +
ggplot2::ggtitle("Distribution of MSEs") +
ggplot2::theme_bw()
g
}
#' Plot R^2 metrics.
#'
#' @param r2_scores TO BE EDITED.
#' @return TO BE EDITED.
#' @family plot
#' @export
plot_metrics_gaussian_r2_score <- function(r2_scores) {
mean_r2_score <- mean(r2_scores)
r2_score_label <- paste("Mean R^2 Score = ", round(mean_r2_score, digits = 3), sep = "")
df <- data.frame(r2_scores = r2_scores, stringsAsFactors = FALSE)
g <-
ggplot2::ggplot(df, ggplot2::aes(x = r2_scores)) +
ggplot2::geom_histogram(binwidth = 0.02) +
ggplot2::geom_vline(xintercept = mean_r2_score, linetype = "dotted") +
ggplot2::annotate("text", label = r2_score_label, x = 0.2, y = 0.2, size = 8) +
ggplot2::scale_x_continuous("R^2 Score", limits = c(0, 1)) +
ggplot2::scale_y_continuous("Frequency", label = scales::comma) +
ggplot2::ggtitle("Distribution of R^2 Scores") +
ggplot2::theme_bw()
g
}
#' Plot AUC metrics.
#'
#' @param aucs TO BE EDITED.
#' @return TO BE EDITED.
#' @family plot
#' @export
plot_metrics_binomial_area_under_curve <- function(aucs) {
mean_auc <- mean(aucs)
auc_label <- paste("Mean AUC = ", round(mean_auc, digits = 3), sep = "")
df <- data.frame(aucs = aucs, stringsAsFactors = FALSE)
g <-
ggplot2::ggplot(df, ggplot2::aes(x = aucs)) +
ggplot2::geom_histogram(binwidth = 0.02) +
ggplot2::geom_vline(xintercept = mean_auc, linetype = "dotted") +
ggplot2::annotate("text", label = auc_label, x = 0.2, y = 0.2, size = 8) +
ggplot2::scale_x_continuous("AUC", limits = c(0, 1)) +
ggplot2::scale_y_continuous("Frequency", label = scales::comma) +
ggplot2::ggtitle("Distribution of AUCs") +
ggplot2::theme_bw()
g
}
|
/R/R/plot.R
|
permissive
|
youngahn/easyml
|
R
| false
| false
| 5,125
|
r
|
#' Plot processed coefficients.
#'
#' @param coefficients_processed TO BE EDITED.
#' @return TO BE EDITED.
#' @family plot
#' @export
plot_coefficients_processed <- function(coefficients_processed) {
if (nrow(coefficients_processed) > 20)
warning("Number of predictors exceeds 20; plot may not render as nicely.")
g <-
ggplot2::ggplot(coefficients_processed, ggplot2::aes_string(x = "predictor", y = "mean", colour = "dot_color")) +
ggplot2::geom_errorbar(ggplot2::aes_string(ymin = "lower_bound", ymax = "upper_bound"), width = 0.1) +
ggplot2::geom_line() +
ggplot2::geom_point() +
ggplot2::scale_x_discrete("Predictors") +
ggplot2::scale_y_continuous("Beta estimates") +
ggplot2::scale_color_manual("", values = c("0" = "grey", "2" = "black"),
labels = c("0" = "Insignificant", "2" = "Significant")) +
ggplot2::ggtitle("Estimates of weights") +
ggplot2::theme_bw() +
ggplot2::coord_flip()
g
}
#' Plot gaussian predictions.
#'
#' @param y_true Ground truth (correct) target values.
#' @param y_pred Estimated target values.
#' @return TO BE EDITED.
#' @family plot
#' @export
plot_predictions_gaussian <- function(y_true, y_pred) {
df <- data.frame(y_true = y_true, y_pred = y_pred, stringsAsFactors = FALSE)
g <-
ggplot2::ggplot(df, ggplot2::aes(x = y_pred, y = y_true)) +
ggplot2::geom_point() +
ggplot2::scale_x_continuous("Predicted y values") +
ggplot2::scale_y_continuous("True y values") +
ggplot2::ggtitle("Actual vs. Predicted y values") +
ggplot2::theme_bw()
g
}
#' Plot binomial predictions.
#'
#' @param y_true Ground truth (correct) target values.
#' @param y_pred Estimated target values.
#' @return TO BE EDITED.
#' @family plot
#' @export
plot_predictions_binomial <- function(y_true, y_pred) {
results <- pROC::roc(y_true, y_pred)
auc <- as.numeric(results$auc)
auc_label <- paste("AUC = ", round(auc, digits = 3), sep = "")
df <- data.frame(sensitivities = results$sensitivities,
one_minus_specificities = 1 - results$specificities,
stringsAsFactors = FALSE)
g <-
ggplot2::ggplot(df, ggplot2::aes_string(x = "one_minus_specificities", y = "sensitivities")) +
ggplot2::geom_path(alpha = 1, size = 1) +
ggplot2::geom_segment(ggplot2::aes(x = 0, y = 0, xend = 1, yend = 1) , linetype = "dashed") +
ggplot2::annotate("text", label = auc_label, x = 0.85, y = 0.025, size = 8) +
ggplot2::scale_x_continuous("1 - Specificity") +
ggplot2::scale_y_continuous("Sensitivity") +
ggplot2::ggtitle("ROC Curve") +
ggplot2::theme_bw()
g
}
#' Plot mean squared error metrics.
#'
#' @param mses TO BE EDITED.
#' @return TO BE EDITED.
#' @family plot
#' @export
plot_metrics_gaussian_mean_squared_error <- function(mses) {
mean_mse <- mean(mses)
mse_label <- paste("Mean MSE = ", round(mean_mse, digits = 3), sep = "")
df <- data.frame(mses = mses, stringsAsFactors = FALSE)
g <-
ggplot2::ggplot(df, ggplot2::aes(x = mses)) +
ggplot2::geom_histogram(binwidth = 0.02) +
ggplot2::geom_vline(xintercept = mean_mse, linetype = "dotted") +
ggplot2::annotate("text", label = mse_label, x = 0.2, y = 0.2, size = 8) +
ggplot2::scale_x_continuous("MSE") +
ggplot2::scale_y_continuous("Frequency", label = scales::comma) +
ggplot2::ggtitle("Distribution of MSEs") +
ggplot2::theme_bw()
g
}
#' Plot R^2 metrics.
#'
#' @param r2_scores TO BE EDITED.
#' @return TO BE EDITED.
#' @family plot
#' @export
plot_metrics_gaussian_r2_score <- function(r2_scores) {
mean_r2_score <- mean(r2_scores)
r2_score_label <- paste("Mean R^2 Score = ", round(mean_r2_score, digits = 3), sep = "")
df <- data.frame(r2_scores = r2_scores, stringsAsFactors = FALSE)
g <-
ggplot2::ggplot(df, ggplot2::aes(x = r2_scores)) +
ggplot2::geom_histogram(binwidth = 0.02) +
ggplot2::geom_vline(xintercept = mean_r2_score, linetype = "dotted") +
ggplot2::annotate("text", label = r2_score_label, x = 0.2, y = 0.2, size = 8) +
ggplot2::scale_x_continuous("R^2 Score", limits = c(0, 1)) +
ggplot2::scale_y_continuous("Frequency", label = scales::comma) +
ggplot2::ggtitle("Distribution of R^2 Scores") +
ggplot2::theme_bw()
g
}
#' Plot AUC metrics.
#'
#' @param aucs TO BE EDITED.
#' @return TO BE EDITED.
#' @family plot
#' @export
plot_metrics_binomial_area_under_curve <- function(aucs) {
mean_auc <- mean(aucs)
auc_label <- paste("Mean AUC = ", round(mean_auc, digits = 3), sep = "")
df <- data.frame(aucs = aucs, stringsAsFactors = FALSE)
g <-
ggplot2::ggplot(df, ggplot2::aes(x = aucs)) +
ggplot2::geom_histogram(binwidth = 0.02) +
ggplot2::geom_vline(xintercept = mean_auc, linetype = "dotted") +
ggplot2::annotate("text", label = auc_label, x = 0.2, y = 0.2, size = 8) +
ggplot2::scale_x_continuous("AUC", limits = c(0, 1)) +
ggplot2::scale_y_continuous("Frequency", label = scales::comma) +
ggplot2::ggtitle("Distribution of AUCs") +
ggplot2::theme_bw()
g
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/getdata.R
\name{get_index}
\alias{get_index}
\title{Get a list of GUID's}
\usage{
get_index(base_url, api_key)
}
\arguments{
\item{base_url}{The base URL of the Junar service}
\item{api_key}{The user's API key for the Junar service}
}
\description{
Get a list of all the available GUID's with datasets or views from the
base URL
}
\keyword{GUID}
|
/man/get_index.Rd
|
no_license
|
samuelsoc/junr
|
R
| false
| true
| 425
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/getdata.R
\name{get_index}
\alias{get_index}
\title{Get a list of GUID's}
\usage{
get_index(base_url, api_key)
}
\arguments{
\item{base_url}{The base URL of the Junar service}
\item{api_key}{The user's API key for the Junar service}
}
\description{
Get a list of all the available GUID's with datasets or views from the
base URL
}
\keyword{GUID}
|
library(ggplot2)
#------------------Histogram and Density Charts
s <-ggplot(data=movies, aes(x=BudgetMillions))
s + geom_histogram(binwidth=10) #To construct a histogram, the first step is to "bin" (or "bucket") the range of values-that is, divide the entire range of values into a series of intervals-and then count how many values fall into each interval.
#The bins are usually specified as consecutive, non-overlapping intervals of a variable.
#still doubt how 'binwidth' works.
#add colour
s + geom_histogram(binwidth=20, fill="green") #'fill' is used to give the color to bars(not 'color').
s + geom_histogram(binwidth=20, aes(fill=Genre))
#add a border
s + geom_histogram(binwidth=10, aes(fill=Genre), color="black") #'color' here gives the color to the boundaries of the graph.
#sometimes you may need density charts
s+ geom_density(aes(fill=Genre))
s+ geom_density(aes(fill=Genre), position='stack') #"position" allows stacking up of data which makes it distinctive.
#for making charts or histogram of only one genre:-
#ggplot(data=movies[movies$BudgetMillions > 30 & movies$BudgetMillions < 80 & movies$Genre == "Action",], aes(x=BudgetMillions)) + geom_density(aes(fill=Genre), position = "stack")
#Where we set budget range from 30$ millions to 80$ millions and movies genre action.
|
/Section-5/Histogram$densityCharts.R
|
no_license
|
akrai37/R
|
R
| false
| false
| 1,330
|
r
|
library(ggplot2)
#------------------Histogram and Density Charts
s <-ggplot(data=movies, aes(x=BudgetMillions))
s + geom_histogram(binwidth=10) #To construct a histogram, the first step is to "bin" (or "bucket") the range of values-that is, divide the entire range of values into a series of intervals-and then count how many values fall into each interval.
#The bins are usually specified as consecutive, non-overlapping intervals of a variable.
#still doubt how 'binwidth' works.
#add colour
s + geom_histogram(binwidth=20, fill="green") #'fill' is used to give the color to bars(not 'color').
s + geom_histogram(binwidth=20, aes(fill=Genre))
#add a border
s + geom_histogram(binwidth=10, aes(fill=Genre), color="black") #'color' here gives the color to the boundaries of the graph.
#sometimes you may need density charts
s+ geom_density(aes(fill=Genre))
s+ geom_density(aes(fill=Genre), position='stack') #"position" allows stacking up of data which makes it distinctive.
#for making charts or histogram of only one genre:-
#ggplot(data=movies[movies$BudgetMillions > 30 & movies$BudgetMillions < 80 & movies$Genre == "Action",], aes(x=BudgetMillions)) + geom_density(aes(fill=Genre), position = "stack")
#Where we set budget range from 30$ millions to 80$ millions and movies genre action.
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/DBConnection.R
\name{dbSendQuery}
\alias{dbSendQuery}
\title{Execute a statement on a given database connection.}
\usage{
dbSendQuery(conn, statement, ...)
}
\arguments{
\item{conn}{A \code{\linkS4class{DBIConnection}} object, as produced by
\code{\link{dbConnect}}.}
\item{statement}{a character vector of length 1 containing SQL.}
\item{...}{Other parameters passed on to methods.}
}
\value{
An object that inherits from \code{\linkS4class{DBIResult}}.
If the statement generates output (e.g., a \code{SELECT} statement) the
result set can be used with \code{\link{dbFetch}} to extract records.
Once you have finished using a result, make sure to disconnect it
with \code{\link{dbClearResult}}.
}
\description{
The function \code{dbSendQuery} only submits and synchronously executes the
SQL statement to the database engine. It does \emph{not} extract any
records --- for that you need to use the function \code{\link{dbFetch}}, and
then you must call \code{\link{dbClearResult}} when you finish fetching the
records you need. For interactive use, you should almost always prefer
\code{\link{dbGetQuery}}.
}
\section{Side Effects}{
The statement is submitted to the database server and the DBMS executes the
statement, possibly generating vast amounts of data. Where these data live
is driver-specific: some drivers may choose to leave the output on the server
and transfer them piecemeal to R, others may transfer all the data to the
client -- but not necessarily to the memory that R manages. See individual
drivers \code{dbSendQuery} documentation for details.
}
\examples{
if (require("RSQLite")) {
con <- dbConnect(RSQLite::SQLite(), ":memory:")
dbWriteTable(con, "mtcars", mtcars)
res <- dbSendQuery(con, "SELECT * FROM mtcars WHERE cyl = 4;")
dbFetch(res)
dbClearResult(res)
dbDisconnect(con)
}
}
\seealso{
Other connection methods: \code{\link{dbDisconnect}},
\code{\link{dbExecute}}, \code{\link{dbExistsTable}},
\code{\link{dbGetChunkedQuery}},
\code{\link{dbGetException}}, \code{\link{dbGetQuery}},
\code{\link{dbListFields}}, \code{\link{dbListResults}},
\code{\link{dbListTables}}, \code{\link{dbReadTable}},
\code{\link{dbRemoveTable}}
}
|
/man/dbSendQuery.Rd
|
no_license
|
bborgesr/DBI
|
R
| false
| true
| 2,261
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/DBConnection.R
\name{dbSendQuery}
\alias{dbSendQuery}
\title{Execute a statement on a given database connection.}
\usage{
dbSendQuery(conn, statement, ...)
}
\arguments{
\item{conn}{A \code{\linkS4class{DBIConnection}} object, as produced by
\code{\link{dbConnect}}.}
\item{statement}{a character vector of length 1 containing SQL.}
\item{...}{Other parameters passed on to methods.}
}
\value{
An object that inherits from \code{\linkS4class{DBIResult}}.
If the statement generates output (e.g., a \code{SELECT} statement) the
result set can be used with \code{\link{dbFetch}} to extract records.
Once you have finished using a result, make sure to disconnect it
with \code{\link{dbClearResult}}.
}
\description{
The function \code{dbSendQuery} only submits and synchronously executes the
SQL statement to the database engine. It does \emph{not} extract any
records --- for that you need to use the function \code{\link{dbFetch}}, and
then you must call \code{\link{dbClearResult}} when you finish fetching the
records you need. For interactive use, you should almost always prefer
\code{\link{dbGetQuery}}.
}
\section{Side Effects}{
The statement is submitted to the database server and the DBMS executes the
statement, possibly generating vast amounts of data. Where these data live
is driver-specific: some drivers may choose to leave the output on the server
and transfer them piecemeal to R, others may transfer all the data to the
client -- but not necessarily to the memory that R manages. See individual
drivers \code{dbSendQuery} documentation for details.
}
\examples{
if (require("RSQLite")) {
con <- dbConnect(RSQLite::SQLite(), ":memory:")
dbWriteTable(con, "mtcars", mtcars)
res <- dbSendQuery(con, "SELECT * FROM mtcars WHERE cyl = 4;")
dbFetch(res)
dbClearResult(res)
dbDisconnect(con)
}
}
\seealso{
Other connection methods: \code{\link{dbDisconnect}},
\code{\link{dbExecute}}, \code{\link{dbExistsTable}},
\code{\link{dbGetChunkedQuery}},
\code{\link{dbGetException}}, \code{\link{dbGetQuery}},
\code{\link{dbListFields}}, \code{\link{dbListResults}},
\code{\link{dbListTables}}, \code{\link{dbReadTable}},
\code{\link{dbRemoveTable}}
}
|
library(dplyr)
library(readr)
library(stringr)
library(sbtools)
library(whisker)
#Main directory should be: ~/ds-pipelines-targets-1
#1_fetch----
setwd('./1_fetch/src')
source('getData.R')
data <- getData(outDir = '../out')
#2_process----
setwd('../../2_process/src/')
source('processData.R')
# Prepare the data for plotting and save to file
data <- processData(data,
outDir = '../out/')
RMSEs <- writeSummary(data,
outDir = '../out/')
#3_visualize----
setwd('../../3_visualize/src/')
source('plotDiagnostics.R')
plotDiagnostics(data, outDir = '../out/')
|
/main.R
|
no_license
|
jds485/ds-pipelines-targets-1
|
R
| false
| false
| 602
|
r
|
library(dplyr)
library(readr)
library(stringr)
library(sbtools)
library(whisker)
#Main directory should be: ~/ds-pipelines-targets-1
#1_fetch----
setwd('./1_fetch/src')
source('getData.R')
data <- getData(outDir = '../out')
#2_process----
setwd('../../2_process/src/')
source('processData.R')
# Prepare the data for plotting and save to file
data <- processData(data,
outDir = '../out/')
RMSEs <- writeSummary(data,
outDir = '../out/')
#3_visualize----
setwd('../../3_visualize/src/')
source('plotDiagnostics.R')
plotDiagnostics(data, outDir = '../out/')
|
library(dplyr)
library(ggplot2)
setwd("/Users/WadieAlbakri/Desktop/Máster BME/Módulos/Módulo 1/R/practica_1")
#directorio_Melendez <- getwd()
#setwd(directorio_Melendez)
'Ejercicio 1'
#• Carga el set de datos en una variable llamada "titanic"
titanic <- read.table("train.csv",
header = TRUE,
sep = ",",
stringsAsFactors = FALSE,
dec = ".")
#• Obtén el número de filas y columnas incluidas en el set de datos.
dim(titanic)
#• Obtén las primeras 10 observaciones para ver qué pinta tienen.
head(titanic,10)
#• Obtén el listado de variables incluidas en el set de datos y sus tipos.
str(titanic)
#• Obtén la distribución de las variables categóricas
#(sex, embarked, survived, pclass) del set de datos.
#Es decir, valores únicos / valores totales.
length(unique(titanic$Sex))/length(titanic$Sex)
length(unique(titanic$Embarked))/length(titanic$Embarked)
length(unique(titanic$Survived))/length(titanic$Survived)
length(unique(titanic$Pclass))/length(titanic$Pclass)
#• Obtén los estadísticos básicos de las variables del set de datos.
summary(titanic)
'Ejercicio 2'
#• Indica en qué variables hay valores con NA, NULL o vacíos ""
#Bucle que identifica los Null o vacios
p <- 1
variables_NULL <- c()
for( i in 7:12){
columna <- sum(titanic[,i] == "")
if(columna != 0){
variables_NULL[p] <- colnames(titanic[i])
p <- p +1
}
}
rm(columna,i,p)
#Este bucle me identifica donde tengo NA
p <- 1
variables_NA <- c()
for( i in 1:dim(titanic)[2]){
if (any(is.na(titanic[i])) == TRUE){
variables_NA[p] <- colnames(titanic[i])
p <- p +1
}
}
rm(i,p)
variables_NA_NULL <- c(variables_NA, variables_NULL)
print(paste0("Variable con NA, NULL o vacíos: ", variables_NA_NULL))
rm(variables_NA, variables_NULL)
#Y cuál es su proporción dentro de la variable.
Propor_NA_Age <- c(Porcent_NA_Age = sum(is.na(titanic$Age))/nrow(titanic) * 100)
Propor_NULL_Embarked <- c(Porcent_NULL_Embarked = (sum(titanic$Embarked == "")/nrow(titanic) * 100))
Propor_NULL_Cabin <- c(Porcent_NULL_Cabin = (sum(titanic$Cabin == "")/nrow(titanic) * 100))
Porcentajes <- c(Propor_NA_Age,Propor_NULL_Embarked,Propor_NULL_Cabin)
Porcentajes
rm(Propor_NA_Age, Propor_NULL_Cabin, Propor_NULL_Embarked)
#apply(is.na(titanic), 2, mean) #Con esto me sale el % de NA's
#• Imputa la media del valor de la variable en el caso
#de las numéricas y "No disponible" en el caso de string.
#Primero quito los NA y los sustituyo en mi DF
copia_Age <- titanic$Age
copia_Age_sin_NA <- na.omit(copia_Age)
media_AGE <- mean(copia_Age_sin_NA)
for (i in 1:length(copia_Age)) {
if (any(is.na(copia_Age[i])) == TRUE){
copia_Age[i] <- media_AGE
}
}
titanic$Age <- copia_Age #sustituyo el vector del DF con NA por mi media.
rm(i, copia_Age, copia_Age_sin_NA, media_AGE)
#Sustituyo los espacios por "No disponible"
copia_Cabin <- titanic$Cabin
copia_Embarked <- titanic$Embarked
no_disponible <- "No disponible"
for( i in 1:length(copia_Cabin)){
columna <- ""
if(copia_Cabin[i] == columna){
copia_Cabin[i] <- no_disponible
}
}
titanic$Cabin <- copia_Cabin
for( i in 1:length(copia_Embarked)){
columna <- ""
if(copia_Embarked[i] == columna){
copia_Embarked[i] <- no_disponible
}
}
titanic$Embarked <- copia_Embarked
rm(i,copia_Cabin,copia_Embarked, no_disponible, columna)
#• Vuelve a comprobar la existencia de NAs y NULL
#para verificar que los has eliminado.
Na_Age_vector <- any(is.na(titanic$Age))
Null_Cabin_vector <- sum(titanic$Cabin == "")
Null_Embarked_vector <- sum(titanic$Embarked == "")
verificacion <- c(Na_Age = Na_Age_vector, Null_Cabin = Null_Cabin_vector, Null_Embarked = Null_Embarked_vector)
verificacion
rm(Na_Age_vector, Null_Embarked_vector, Null_Cabin_vector, verificacion)
'Ejercicio 3'
#• ¿Fallecieron más mujeres u hombres?
Survived_titanic <- as.numeric(titanic$Survived)
Sex_titanic <- as.character(titanic$Sex)
sexo_superviviente <- data.frame(Supervivientes = Survived_titanic, Sexo= Sex_titanic)
hombre_moridos <- sexo_superviviente %>%
filter(Supervivientes == 0 & Sexo == 'male')%>%
summarise(Hombres_caidos = n())
mujeres_moridas <- sexo_superviviente %>%
filter(Supervivientes == 0 & Sexo == 'female')%>%
summarise(Mujeres_caidas = n())
#Con este código lo tiro en una sola pipe
sexo_superviviente %>%
arrange(Sexo) %>%
group_by(Sexo) %>%
filter(Supervivientes == 0) %>%
summarise(Defunciones = n())
if( hombre_moridos[1,1] > mujeres_moridas[1,1]){
print(paste0("Fallecieron más hombres que mujeres total: ", hombre_moridos[1,1]))
}else{
print(paste0("Fallecieron más mujeres que hombres total", mujeres_moridas[1,1]))
}
#(Cuánto en porcentaje sobre el total de su género).
total_hombres <- sexo_superviviente %>%
filter(Sexo == 'male') %>%
summarise(Nº_total_hombres = n())
total_mujeres <- sexo_superviviente %>%
filter(Sexo == 'female') %>%
summarise(Nº_total_mujeres = n())
Porcentaje_fallecidos <- data.frame(Porcent_dead_men = (hombre_moridos[1,1]/total_hombres[1,1])*100, Porcent_dead_Women = (mujeres_moridas[1,1]/total_mujeres[1,1])*100)
Porcentaje_fallecidos
rm(hombre_moridos, mujeres_moridas, sexo_superviviente, Survived_titanic, Sex_titanic)
rm(total_hombres, total_mujeres)
#• ¿Qué clase de pasajeros (primera, segunda o tercera) sobrevivió más?
#Quiero que el resultado de, únicamente, el porcentaje y la clase que más
#sobrevivió (no el porcentaje de las 3 clases).
#Selecciono solo las columnas que me interesan
filtro_titanic <- data.frame(titanic$Pclass, titanic$Survived)
#Monto un DF filtrado
filtro1 <- data.frame(filtro_titanic %>%
arrange(titanic.Pclass) %>%
group_by(titanic.Pclass, titanic.Survived) %>%
summarise(Conteo=n()))
#Con esto cuento el numero total de pasajeros por clase
pasajeros_total_clase <- data.frame(filtro1 %>%
group_by(Clase = titanic.Pclass) %>%
summarise(Total_por_clase = sum(Conteo)))
#Con esto saco el numero de supervivientes por clase
supervivientes_por_clase <- data.frame(filtro1 %>%
filter(titanic.Survived == 1) %>%
select(Clase = titanic.Pclass, nº_Supervivientes = Conteo))
#Añado la columna para calcular estadisticos despues
pasajeros_total_clase$nº_Supervivientes <- cbind(supervivientes_por_clase$nº_Supervivientes)
pasajeros_total_clase %>%
mutate(Proporcion_supervivientes = (nº_Supervivientes/Total_por_clase)*100) %>%
select(Clase, Proporcion_supervivientes) %>%
mutate(rank = rank(desc(Proporcion_supervivientes))) %>%
filter(rank == 1)
rm(filtro_titanic, filtro1, pasajeros_total_clase, supervivientes_por_clase)
#• ¿Cuál fue la edad media y máxima de los supervivientes
#en cada una de las clases?
titanic %>%
arrange(Pclass) %>%
group_by(Pclass) %>%
filter(Survived == 1) %>%
summarise(Edad_media=mean(Age),Edad_maxima= max(Age))
'Ejercicio 4'
#• ¿Cuál de los puertos de embarque es el que tiene la media de precio de billete más barata?
#Indica solo ese puerto, no una lista con todos.
titanic %>%
group_by(Puerto = Embarked) %>%
summarise(Media_billete = mean(Fare)) %>%
mutate(Orden = rank(Media_billete)) %>%
filter(Orden == 1) %>%
select(Puerto, Media_billete)
#• ¿Qué correlación hay entre la longitud del nombre de un pasajero y el importe de su billete?
#No modifiques el nombre del pasajero para hacer el cálculo (úsalo tal y como venga).
titanic %>%
select(Fare, Name) %>%
mutate(Longitud_nombre = nchar(Name)) %>%
select(Fare,Longitud_nombre) %>%
mutate(cor(Longitud_nombre, Fare)) %>%
arrange(desc(Longitud_nombre))
'Ejercicio 5'
#• Obtén los nombres de los pasajeros que no sobrevivieron y el precio de su billete está
#en el decil superior.
titanic %>%
filter(Survived == 0 & Fare >= (quantile(Fare, probs = 0.9))) %>%
select(Name)
#• ¿En qué cabina deberías alojarte para tener una mayor probabilidad de sobrevivir
#siendo hombre de entre 30 y 40 años?
#Quitando el dato de cabina no disponible - E25 resultado.
titanic %>% #cuantas personas sobreviven por cabina sin 'no disponible'
select(Sex, Age, Cabin, Survived) %>%
filter(Sex == 'male' & Cabin != 'No disponible' & Age >= 30 & Age <= 40 & Survived == 1) %>%
group_by(Cabin) %>%
summarise(Cabina_reps = n()) %>%
arrange(desc(Cabina_reps)) %>%
mutate(rank = rank(desc(Cabina_reps))) %>%
filter(rank == 1)
'Ejercicio 6 '
'------------------------------------------'
#setwd("/Users/WadieAlbakri/Desktop/Máster BME/Módulos/Módulo 1/R/practica_1")
#directorio_Melendez <- getwd()
#setwd(directorio_Melendez)
ibex_data <- read.table("ibex_data.csv",
header = TRUE,
sep = ",",
stringsAsFactors = FALSE,
dec = ".")
names(ibex_data)[2] <- "Fecha"
capital_inicial <- 30000
prima_broker <- 0.0003
'Algoritmo de inversión - lógica'
ibex_data$Acciones_apertura <- capital_inicial/ibex_data$open
ibex_data$low_high_close <- 0
for(i in 1:length(ibex_data$X)){
if(ibex_data$low[i] <= (ibex_data$open[i]-0.10)){
ibex_data$low_high_close[i] <- (ibex_data$Acciones_apertura[i]*(ibex_data$open[i]-0.10))
}else if(ibex_data$high[i] >= (ibex_data$open[i]+0.03)){
ibex_data$low_high_close[i] <- (ibex_data$Acciones_apertura[i]*(ibex_data$open[i]+0.03))
}else{
ibex_data$low_high_close[i] <- ibex_data$Acciones_apertura[i]*ibex_data$close[i]
}
}
rm(i)
ibex_data$comision_compra <- prima_broker*capital_inicial
ibex_data$comision_venta <- prima_broker*ibex_data$low_high_close
ibex_data$comision_total <- ibex_data$comision_compra + ibex_data$comision_venta
ibex_data$beneficio_dia_ticker <- ibex_data$low_high_close - capital_inicial - ibex_data$comision_total
#filtrar la tabla(elimina) para aquellos tickers con menos de 30 datos
tickers_out <- data.frame(ibex_data %>%
select(ticker) %>%
group_by(ticker) %>%
summarise(numero_datos = n()) %>%
filter(numero_datos< 30))
vector_tickers <- tickers_out[,1]
ibex_data <- ibex_data[ibex_data$X != vector_tickers,]
rm(tickers_out, vector_tickers)
'código para obtener el beneficio medio por ticker'
df_resultado_6 <- data.frame(ibex_data %>%
select(ticker, beneficio_dia_ticker) %>%
group_by(ticker) %>%
summarise('Bº medio por operacion' = mean(beneficio_dia_ticker)))
'código para obtener el beneficio acumulado por ticker'
df_resultado_6$Beneficio_acumulado <- data.frame(ibex_data %>%
select(ticker, beneficio_dia_ticker) %>%
group_by(ticker) %>%
summarise(Beneficio_acumulado= sum(beneficio_dia_ticker)) %>%
select(Beneficio_acumulado))
'código para obtener porcentaje dias positivos y negativos'
#Dias positivos sin 0 incluido.
df_dias_positivos <- data.frame(ibex_data %>%
select(ticker, beneficio_dia_ticker) %>%
filter(beneficio_dia_ticker>0) %>%
group_by(ticker) %>%
summarise(dias_positivos = n()))
#Dias negativos
df_dias_negativos <- data.frame(ibex_data %>%
select(ticker, beneficio_dia_ticker) %>%
filter(beneficio_dia_ticker<0) %>%
group_by(ticker) %>%
summarise(dias_negativos = n()))
#matching total para homogeneizar datos. Los dias de beneficio 0 pone NA
matcheo_dias <- merge(df_dias_positivos, df_dias_negativos, by = c("ticker"), all = T)
#cambio los NA por 0 en dias positivos
for (i in 1:length(matcheo_dias$dias_positivos)) {
if (any(is.na(matcheo_dias$dias_positivos[i])) == TRUE){
matcheo_dias$dias_positivos[i] <- 0
}
}
rm(i)
#cambio los NA por 0 en dias negativos
for (i in 1:length(matcheo_dias$dias_negativos)) {
if (any(is.na(matcheo_dias$dias_negativos[i])) == TRUE){
matcheo_dias$dias_negativos[i] <- 0
}
}
rm(i)
df_porcen_dias <- data.frame(matcheo_dias %>%
mutate(dias_totales = dias_positivos+dias_negativos, '%_dias_positivos' = dias_positivos/dias_totales , '%_dias_negativos'= dias_negativos/dias_totales) %>%
select('%_dias_positivos', '%_dias_negativos'))
df_resultado_6$porcen_dias_positivos <- df_porcen_dias[,1]
df_resultado_6$porcen_dias_negativos <- df_porcen_dias[,2]
rm(df_dias_positivos,df_dias_negativos, matcheo_dias, df_porcen_dias)
'código para obtener las horquillas'
df_horquillas <- data.frame(ibex_data %>%
select(ticker, high, open, low) %>%
mutate(horquilla_sup = high-open, horquilla_inf = open - low) %>%
group_by(ticker) %>%
summarise(horquilla_sup_media = mean(horquilla_sup), horquilla_inf_media = mean(horquilla_inf)))
df_resultado_6$Horquilla_superior_media <- df_horquillas[,2]
df_resultado_6$Horqilla_inferior_media <- df_horquillas[,3]
rm(df_horquillas)
'codigo para obtener el numero de operaciones'
df_resultado_6$Numero_de_operaciones <- data.frame(ibex_data %>%
select(ticker) %>%
group_by(ticker) %>%
summarise(numero_datos = n()) %>%
select(numero_datos))
#Estructuramos los resultados tal y como pide el enunciado
df_resultado_6T <- data.frame(t(df_resultado_6[-1]))
colnames(df_resultado_6T) <- df_resultado_6[, 1]
vector_nombres <- c('Bº medio por operación','Beneficio acumulado', '% días positivos','% días negativos','Horquilla superior media','Horquilla inferior media','Número de operaciones')
row.names(df_resultado_6T) <- vector_nombres
df_resultado_6T <- round(df_resultado_6T,3)
rm(df_resultado_6)
'ploteo del beneficio medio acumulado vs numero de operaciones'
ibex_data$beneficio_acum_for <- 0
ibex_data$numero_operacion <- 0
#inicializar la primera celda para que tome un valor, obtengo primero el beneficio acumulado por dia y un conteo de las ops
ibex_data$beneficio_acum_for[1] <- ibex_data$beneficio_dia_ticker[1]
ibex_data$numero_operacion[1] <- 1
for(i in 2:(length(ibex_data$X))){
if (ibex_data$X[i-1] == ibex_data$X[i]){
ibex_data$beneficio_acum_for[i] <- ibex_data$beneficio_acum_for[i-1] + ibex_data$beneficio_dia_ticker[i]
ibex_data$numero_operacion[i] <- ibex_data$numero_operacion[i-1] + 1
}else{
ibex_data$beneficio_acum_for[i] = ibex_data$beneficio_dia_ticker[i]
ibex_data$numero_operacion[i] = 1
}
}
rm(i)
'código para plotear todos los tickers a la vez'
for(activo in unique(ibex_data$ticker)){
plot_activo <- data.frame(ibex_data %>%
select(ticker, numero_operacion, beneficio_acum_for) %>%
filter(ticker == activo ) %>%
select(numero_operacion, beneficio_acum_for))
plot(plot_activo$numero_operacion, plot_activo$beneficio_acum_for,
xlab = 'Nº operacion',
ylab = 'Beneficio acumulado',
main = activo,
type="l")
}
rm(activo, ibex_data)
'Ejercicio 7'
#--------------------------------------------------------------
price_departures <- read.table("price_departures.csv",
header = TRUE,
sep = ",",
stringsAsFactors = FALSE,
dec = ".")
#Primero voy a transformar mi matriz de Price departures para dejarla como ibex_data y reutilizar código
vector_fechas <- as.character(price_departures$X)
vector_tickers_price <- colnames(price_departures)
vector_tickers_price <- vector_tickers_price[2:80]
vector_valores_price <- c()
for (i in 2:(length(colnames(price_departures)))){
vector_valores_price <- append(vector_valores_price, price_departures[,i] )
}
rm(i)
vector_fechas <- rep(vector_fechas, 79)
vector_tickers_price <- rep(vector_tickers_price, 3953)
vector_tickers_price <- sort(vector_tickers_price)
df_price_departures <- data.frame(X = vector_tickers_price,Fecha = vector_fechas,price_departures = vector_valores_price)
rm(vector_tickers_price,vector_valores_price,vector_fechas)
colnames(df_price_departures) <- c("X","Fecha","price_departure")
# Matcheo por fechas, elimino las filas con NA y tickers con menos de 30 datos
df_price_departures <- na.omit(df_price_departures)
ibex_data_7 <- read.table("ibex_data.csv",
header = TRUE,
sep = ",",
stringsAsFactors = FALSE,
dec = ".")
names(ibex_data_7)[2] <- "Fecha"
ibex_data_7<- merge(ibex_data_7, df_price_departures, by = c("X","Fecha"), all.ibex_data_7 = T)
'Código para quitar los valores con menos de 30 datos'
tickers_out_price <- (data.frame(ibex_data_7) %>%
select(X) %>%
group_by(X) %>%
summarise(numero_datos = n()) %>%
filter(numero_datos< 30))
vector_tickers_out_price <- as.character(tickers_out_price[,1])
ibex_data_7 <- ibex_data_7[ibex_data_7$X != vector_tickers_out_price,]
ibex_data_7 <- data.frame(ibex_data_7 %>%
select(Fecha, X, price_departure, open, low, high, close, vol))
rm(tickers_out_price, vector_tickers_out_price)
'Algoritmo de inversión - lógica'
#compra a precio de apertura cuando price_departure >= 0.75
salida_price_departure <- 0.75
ibex_data_7$Acciones_apertura <- 0
ibex_data_7$low_high_close <- 0
ibex_data_7$comision_compra <- 0
#asigno el capital a aquellos que cumplen el price departure, los que no los dejo en 0
for(i in 1:length(ibex_data_7$Fecha)){
if(ibex_data_7$price_departure[i] >= salida_price_departure){
ibex_data_7$Acciones_apertura[i] <- capital_inicial/ibex_data_7$open[i]
ibex_data_7$comision_compra[i] <- prima_broker*capital_inicial
}
}
rm(i)
#algoritmo de inversión
for(i in 1:length(ibex_data_7$Fecha)){
if(ibex_data_7$low[i] <= (ibex_data_7$open[i]-0.10)){
ibex_data_7$low_high_close[i] <- (ibex_data_7$Acciones_apertura[i]*(ibex_data_7$open[i]-0.10))
}else if(ibex_data_7$high[i] >= (ibex_data_7$open[i]+0.03)){
ibex_data_7$low_high_close[i] <- (ibex_data_7$Acciones_apertura[i]*(ibex_data_7$open[i]+0.03))
}else{
ibex_data_7$low_high_close[i] <- ibex_data_7$Acciones_apertura[i]*ibex_data_7$close[i]
}
}
rm(i)
ibex_data_7$comision_venta <- prima_broker*ibex_data_7$low_high_close
ibex_data_7$comision_total <- ibex_data_7$comision_compra + ibex_data_7$comision_venta
'Cálculo del beneficio/dia ticker'
for(i in 1:length(ibex_data_7$Fecha)){
if(ibex_data_7$Acciones_apertura[i] == 0){
ibex_data_7$beneficio_dia_ticker[i] <- 0
}else{
ibex_data_7$beneficio_dia_ticker[i] <- ibex_data_7$low_high_close[i] - capital_inicial - ibex_data_7$comision_total[i]
}
}
rm(i)
'código para obtener el beneficio medio por ticker'
df_resultado_7 <- data.frame(ibex_data_7 %>%
select(X, beneficio_dia_ticker, price_departure) %>%
filter(price_departure >= salida_price_departure) %>%
group_by(X) %>%
summarise('Bº medio por operacion' = mean(beneficio_dia_ticker)))
'código para obtener el beneficio acumulado por ticker'
df_resultado_7$Beneficio_acumulado <- data.frame(ibex_data_7 %>%
select(X, beneficio_dia_ticker, price_departure) %>%
filter(price_departure >= salida_price_departure) %>%
group_by(X) %>%
summarise(Beneficio_acumulado= sum(beneficio_dia_ticker)) %>%
select(Beneficio_acumulado))
'código para obtener porcentaje dias positivos y negativos'
#Dias positivos sin 0 incluido.
df_dias_positivos_7 <- data.frame(ibex_data_7 %>%
select(X, beneficio_dia_ticker) %>%
filter(beneficio_dia_ticker>0) %>%
group_by(X) %>%
summarise(dias_positivos = n()))
#Dias negativos
df_dias_negativos_7 <- data.frame(ibex_data_7 %>%
select(X, beneficio_dia_ticker) %>%
filter(beneficio_dia_ticker<0) %>%
group_by(X) %>%
summarise(dias_negativos = n()))
#matching total para homogeneizar datos. Los dias de beneficio 0 pone NA
matcheo_dias_7 <- merge(df_dias_positivos_7, df_dias_negativos_7, by = c("X"), all = T)
#cambio los NA por 0 en dias positivos
for (i in 1:length(matcheo_dias_7$dias_positivos)) {
if (any(is.na(matcheo_dias_7$dias_positivos[i])) == TRUE){
matcheo_dias_7$dias_positivos[i] <- 0
}
}
rm(i)
#cambio los NA por 0 en dias negativos
for (i in 1:length(matcheo_dias_7$dias_negativos)) {
if (any(is.na(matcheo_dias_7$dias_negativos[i])) == TRUE){
matcheo_dias_7$dias_negativos[i] <- 0
}
}
rm(i)
df_porcen_dias_7 <- data.frame(matcheo_dias_7 %>%
mutate(dias_totales = dias_positivos+dias_negativos, '%_dias_positivos' = dias_positivos/dias_totales , '%_dias_negativos'= dias_negativos/dias_totales) %>%
select('%_dias_positivos', '%_dias_negativos'))
df_resultado_7$porcen_dias_positivos <- df_porcen_dias_7[,1]
df_resultado_7$porcen_dias_negativos <- df_porcen_dias_7[,2]
rm(df_dias_positivos_7,df_dias_negativos_7, matcheo_dias_7, df_porcen_dias_7)
'código para obtener las horquillas'
df_horquillas_7 <- data.frame(ibex_data_7 %>%
select(X, high, open, low, price_departure) %>%
filter(price_departure >= salida_price_departure) %>%
mutate(horquilla_sup = high - open, horquilla_inf = open - low) %>%
group_by(X) %>%
summarise(horquilla_sup_media = mean(horquilla_sup), horquilla_inf_media = mean(horquilla_inf)))
df_resultado_7$Horquilla_superior_media <- df_horquillas_7[,2]
df_resultado_7$Horqilla_inferior_media <- df_horquillas_7[,3]
rm(df_horquillas_7)
'codigo para obtener el numero de operaciones'
df_resultado_7$Numero_de_operaciones <- data.frame(ibex_data_7 %>%
select(X, price_departure) %>%
group_by(X) %>%
filter(price_departure >= salida_price_departure) %>%
summarise(numero_datos = n()) %>%
select(numero_datos))
#Estructuramos los resultados tal y como pide el enunciado
df_resultado_7T <- data.frame(t(df_resultado_7[-1]))
colnames(df_resultado_7T) <- df_resultado_7[, 1]
vector_nombres <- c('Bº medio por operación','Beneficio acumulado', '% días positivos','% días negativos','Horquilla superior media','Horquilla inferior media','Número de operaciones')
row.names(df_resultado_7T) <- vector_nombres
df_resultado_7T <- round(df_resultado_7T,3)
rm(df_resultado_7)
'ploteo del beneficio medio acumulado vs numero de operaciones'
ibex_data_7$beneficio_acum_for <- 0
ibex_data_7$numero_operacion <- 0
ibex_data_7 <- ibex_data_7[ibex_data_7$price_departure >= salida_price_departure,]
#inicializar la primera celda para que tome un valor, obtengo primero el beneficio acumulado por dia y un conteo de las ops
ibex_data_7$beneficio_acum_for[1] <- ibex_data_7$beneficio_dia_ticker[1]
ibex_data_7$numero_operacion[1] <- 1
for(i in 2:(length(ibex_data_7$X))){
if (ibex_data_7$X[i-1] == ibex_data_7$X[i]){
ibex_data_7$beneficio_acum_for[i] <- ibex_data_7$beneficio_acum_for[i-1] + ibex_data_7$beneficio_dia_ticker[i]
ibex_data_7$numero_operacion[i] <- ibex_data_7$numero_operacion[i-1] + 1
}else{
ibex_data_7$beneficio_acum_for[i] <- ibex_data_7$beneficio_dia_ticker[i]
ibex_data_7$numero_operacion[i] <- 1
}
}
rm(i)
#código para plotear todos los tickers a la vez
for(activo in unique(ibex_data_7$X)){
plot_activo_7 <- data.frame(ibex_data_7 %>%
select(X, numero_operacion, beneficio_acum_for) %>%
filter(X == activo) %>%
select(numero_operacion, beneficio_acum_for))
plot(plot_activo_7$numero_operacion, plot_activo_7$beneficio_acum_for,
xlab = 'Nº operacion',
ylab = 'Beneficio acumulado',
main = activo,
type="l")
}
rm(activo, price_departures)
'Ejercicio 8'
'-------------------------------------------------------------'
ibex_data_8 <- ibex_data_7[,1:9]
'vamos a calcular la media de los datos de cierre del dia y asignarla'
ibex_data_8$media_datos_cierre <- (ibex_data_8$open + ibex_data_8$low + ibex_data_8$high + ibex_data_8$close)/4
ibex_data_8 <- data.frame(ibex_data_8 %>%
select(Fecha, X,price_departure, open, low, high, close,vol, media_datos_cierre))
#introduzco el capital asignado a cada activo cada dia.
ibex_data_8$capital_asignado <- ibex_data_8$media_datos_cierre * ibex_data_8$vol * 0.005
'calculo del stop_loss & stop_profit objetivo para cada acitvo'
vector_tickers_cuantil <- unique(ibex_data_8$X)
stop_loss <- 1:length(vector_tickers_cuantil)
stop_profit <- 1:length(vector_tickers_cuantil)
matriz_cuantil <- rbind(stop_loss, stop_profit)
#df auxiliar donde introducire mis valores de stop loss y profit para cada activo
df_cuantil_stop_loss_profit <- data.frame(matriz_cuantil)
rm(matriz_cuantil, stop_loss, stop_profit)
colnames(df_cuantil_stop_loss_profit) <- vector_tickers_cuantil
'Para calcular el stop_loss y el stop_profit de cada activo monto un vector dinámico que metere en un for y enchufare cada valor en un df'
#inicializar el vector con los primeros datos del primer ticker
vector_tickers_dinamico_loss <- c(ibex_data_8$open[1] - ibex_data_8$low[1])
vector_tickers_dinamico_profit <- c(ibex_data_8$high[1] - ibex_data_8$open[1])
j <- 1
#For para calcular para cada activo el cuantil correspondiente del stop loss y el stop profit y enchufarlo en un df auxiliar
for(i in 2:(length(ibex_data_8$X))){
if (ibex_data_8$X[i] == ibex_data_8$X[i-1]){
vector_tickers_dinamico_loss <- c(vector_tickers_dinamico_loss,(ibex_data_8$open[i]-ibex_data_8$low[i]))
vector_tickers_dinamico_profit <- c(vector_tickers_dinamico_profit,(ibex_data_8$high[i]-ibex_data_8$open[i]))
}else{
df_cuantil_stop_loss_profit[1,j] <- quantile(vector_tickers_dinamico_loss,0.8)
df_cuantil_stop_loss_profit[2,j] <- quantile(vector_tickers_dinamico_profit,0.3)
j = j +1
vector_tickers_dinamico_loss <- c(ibex_data_8$low[i],ibex_data_8$open[i])
vector_tickers_dinamico_profit <- c(ibex_data_8$open[i],ibex_data_8$high[i])
}
}
#como el último else no lo ejecuta cierro el data frame 'a mano' fuera del for.
df_cuantil_stop_loss_profit[1,j] <- quantile(vector_tickers_dinamico_loss,0.8)
df_cuantil_stop_loss_profit[2,j] <- quantile(vector_tickers_dinamico_profit,0.3)
rm(i,j,vector_tickers_dinamico_loss,vector_tickers_dinamico_profit)
'logica del algoritmo'
ibex_data_8$Acciones_apertura <- ibex_data_8$capital_asignado/ibex_data_8$open
ibex_data_8$low_high_close <- 0
ibex_data_8$comision_compra <- prima_broker*ibex_data_8$capital_asignado
for(i in 1:length(ibex_data_8$Fecha)){
if(ibex_data_8$low[i] <= (ibex_data_8$open[i]-(df_cuantil_stop_loss_profit[1,(ibex_data_8$X[i])]))){
ibex_data_8$low_high_close[i] <- (ibex_data_8$Acciones_apertura[i]*(ibex_data_8$open[i]-(df_cuantil_stop_loss_profit[1,(ibex_data_8$X[i])])))
}else if(ibex_data_8$high[i] >= (ibex_data_8$open[i]+(df_cuantil_stop_loss_profit[2,(ibex_data_8$X[i])]))){
ibex_data_8$low_high_close[i] <- (ibex_data_8$Acciones_apertura[i]*(ibex_data_8$open[i]+(df_cuantil_stop_loss_profit[2,(ibex_data_8$X[i])])))
}else{
ibex_data_8$low_high_close[i] <- ibex_data_8$Acciones_apertura[i]*ibex_data_8$close[i]
}
}
rm(i)
ibex_data_8$comision_venta <- prima_broker*ibex_data_8$low_high_close
ibex_data_8$comision_total <- ibex_data_8$comision_compra + ibex_data_8$comision_venta
ibex_data_8$beneficio_dia_ticker <- ibex_data_8$low_high_close - ibex_data_8$capital_asignado - ibex_data_8$comision_total
'código para obtener el beneficio medio por ticker'
df_resultado_8 <- data.frame(ibex_data_8 %>%
select(X, beneficio_dia_ticker) %>%
group_by(X) %>%
summarise('Bº medio por operacion' = mean(beneficio_dia_ticker)))
'código para obtener el beneficio acumulado por ticker'
df_resultado_8$Beneficio_acumulado <- data.frame(ibex_data_8 %>%
select(X, beneficio_dia_ticker) %>%
group_by(X) %>%
summarise(Beneficio_acumulado= sum(beneficio_dia_ticker)) %>%
select(Beneficio_acumulado))
'código para obtener porcentaje dias positivos y negativos'
#Dias positivos sin 0 incluido.
df_dias_positivos_8 <- data.frame(ibex_data_8 %>%
select(X, beneficio_dia_ticker) %>%
filter(beneficio_dia_ticker>0) %>%
group_by(X) %>%
summarise(dias_positivos = n()))
#Dias negativos
df_dias_negativos_8 <- data.frame(ibex_data_8 %>%
select(X, beneficio_dia_ticker) %>%
filter(beneficio_dia_ticker<0) %>%
group_by(X) %>%
summarise(dias_negativos = n()))
#matching total para homogeneizar datos. Los dias de beneficio 0 pone NA
matcheo_dias_8 <- merge(df_dias_positivos_8, df_dias_negativos_8, by = c("X"), all = T)
#cambio los NA por 0 en dias positivos
for (i in 1:length(matcheo_dias_8$dias_positivos)) {
if (any(is.na(matcheo_dias_8$dias_positivos[i])) == TRUE){
matcheo_dias_8$dias_positivos[i] <- 0
}
}
rm(i)
#cambio los NA por 0 en dias negativos
for (i in 1:length(matcheo_dias_8$dias_negativos)) {
if (any(is.na(matcheo_dias_8$dias_negativos[i])) == TRUE){
matcheo_dias_8$dias_negativos[i] <- 0
}
}
rm(i)
df_porcen_dias_8 <- data.frame(matcheo_dias_8 %>%
mutate(dias_totales = dias_positivos+dias_negativos, '%_dias_positivos' = dias_positivos/dias_totales , '%_dias_negativos'= dias_negativos/dias_totales) %>%
select('%_dias_positivos', '%_dias_negativos'))
df_resultado_8$porcen_dias_positivos <- df_porcen_dias_8[,1]
df_resultado_8$porcen_dias_negativos <- df_porcen_dias_8[,2]
rm(df_dias_positivos_8,df_dias_negativos_8, matcheo_dias_8, df_porcen_dias_8)
'código para obtener las horquillas'
df_horquillas_8 <- data.frame(ibex_data_8 %>%
select(X, high, open, low, price_departure) %>%
mutate(horquilla_sup = high - open, horquilla_inf = open - low) %>%
group_by(X) %>%
summarise(horquilla_sup_media = mean(horquilla_sup), horquilla_inf_media = mean(horquilla_inf)))
df_resultado_8$Horquilla_superior_media <- df_horquillas_8[,2]
df_resultado_8$Horquilla_inferior_media <- df_horquillas_8[,3]
rm(df_horquillas_8)
'codigo para obtener el numero de operaciones'
df_resultado_8$Numero_de_operaciones <- data.frame(ibex_data_8 %>%
select(X, price_departure) %>%
group_by(X) %>%
summarise(numero_datos = n()) %>%
select(numero_datos))
'codigo para obtener el beneficio medio por euro invertido'
df_resultado_8$total_invertido <- data.frame(ibex_data_8 %>%
select(X, capital_asignado) %>%
group_by(X) %>%
summarise(total_invertido = sum(capital_asignado)) %>%
select(total_invertido))
df_resultado_8$Beneficio_medio_euro <- (Beneficio_medio_euro = df_resultado_8$Beneficio_acumulado/df_resultado_8$total_invertido)
df_resultado_8$importe_medio_operacion <- df_resultado_8$total_invertido/df_resultado_8$Numero_de_operaciones
#Estructuramos los resultados tal y como pide el enunciado
df_resultado_8 <- data.frame(df_resultado_8 %>%
select(X,
importe_medio_operacion,
Bº.medio.por.operacion,
Beneficio_medio_euro,
Beneficio_acumulado,
porcen_dias_positivos,
porcen_dias_negativos,
Horquilla_superior_media,
Horquilla_inferior_media,
Numero_de_operaciones))
df_resultado_8T <- data.frame(t(df_resultado_8[-1]))
colnames(df_resultado_8T) <- df_resultado_8[,1]
df_resultado_8T_1 <- df_resultado_8T[9,]
df_resultado_8T <- df_resultado_8T[1:8,]
df_resultado_8T <- rbind(df_resultado_8T,df_cuantil_stop_loss_profit, df_resultado_8T_1)
df_resultado_8T <- round(df_resultado_8T,3)
vector_nombres <- c('Importe medio por operación','Bº medio por operación','Bº medio por euro invertido','Beneficio acumulado', '% días positivos','% días negativos','Horquilla superior media','Horquilla inferior media','Stop profit objetivo','Stop loss','Número de operaciones')
row.names(df_resultado_8T) <- vector_nombres
rm(df_resultado_8,df_resultado_8T_1, df_cuantil_stop_loss_profit, df_price_departures,ibex_data_7, ibex_data_8, Beneficio_medio_euro)
rm(capital_inicial, prima_broker, salida_price_departure, vector_nombres, vector_tickers_cuantil)
rm(titanic, plot_activo, plot_activo_7)
|
/practica_1/Wadie_Albarki copia.R
|
no_license
|
WadieAC/R-CODES
|
R
| false
| false
| 35,212
|
r
|
library(dplyr)
library(ggplot2)
setwd("/Users/WadieAlbakri/Desktop/Máster BME/Módulos/Módulo 1/R/practica_1")
#directorio_Melendez <- getwd()
#setwd(directorio_Melendez)
'Ejercicio 1'
#• Carga el set de datos en una variable llamada "titanic"
titanic <- read.table("train.csv",
header = TRUE,
sep = ",",
stringsAsFactors = FALSE,
dec = ".")
#• Obtén el número de filas y columnas incluidas en el set de datos.
dim(titanic)
#• Obtén las primeras 10 observaciones para ver qué pinta tienen.
head(titanic,10)
#• Obtén el listado de variables incluidas en el set de datos y sus tipos.
str(titanic)
#• Obtén la distribución de las variables categóricas
#(sex, embarked, survived, pclass) del set de datos.
#Es decir, valores únicos / valores totales.
length(unique(titanic$Sex))/length(titanic$Sex)
length(unique(titanic$Embarked))/length(titanic$Embarked)
length(unique(titanic$Survived))/length(titanic$Survived)
length(unique(titanic$Pclass))/length(titanic$Pclass)
#• Obtén los estadísticos básicos de las variables del set de datos.
summary(titanic)
'Ejercicio 2'
#• Indica en qué variables hay valores con NA, NULL o vacíos ""
#Bucle que identifica los Null o vacios
p <- 1
variables_NULL <- c()
for( i in 7:12){
columna <- sum(titanic[,i] == "")
if(columna != 0){
variables_NULL[p] <- colnames(titanic[i])
p <- p +1
}
}
rm(columna,i,p)
#Este bucle me identifica donde tengo NA
p <- 1
variables_NA <- c()
for( i in 1:dim(titanic)[2]){
if (any(is.na(titanic[i])) == TRUE){
variables_NA[p] <- colnames(titanic[i])
p <- p +1
}
}
rm(i,p)
variables_NA_NULL <- c(variables_NA, variables_NULL)
print(paste0("Variable con NA, NULL o vacíos: ", variables_NA_NULL))
rm(variables_NA, variables_NULL)
#Y cuál es su proporción dentro de la variable.
Propor_NA_Age <- c(Porcent_NA_Age = sum(is.na(titanic$Age))/nrow(titanic) * 100)
Propor_NULL_Embarked <- c(Porcent_NULL_Embarked = (sum(titanic$Embarked == "")/nrow(titanic) * 100))
Propor_NULL_Cabin <- c(Porcent_NULL_Cabin = (sum(titanic$Cabin == "")/nrow(titanic) * 100))
Porcentajes <- c(Propor_NA_Age,Propor_NULL_Embarked,Propor_NULL_Cabin)
Porcentajes
rm(Propor_NA_Age, Propor_NULL_Cabin, Propor_NULL_Embarked)
#apply(is.na(titanic), 2, mean) #Con esto me sale el % de NA's
#• Imputa la media del valor de la variable en el caso
#de las numéricas y "No disponible" en el caso de string.
#Primero quito los NA y los sustituyo en mi DF
copia_Age <- titanic$Age
copia_Age_sin_NA <- na.omit(copia_Age)
media_AGE <- mean(copia_Age_sin_NA)
for (i in 1:length(copia_Age)) {
if (any(is.na(copia_Age[i])) == TRUE){
copia_Age[i] <- media_AGE
}
}
titanic$Age <- copia_Age #sustituyo el vector del DF con NA por mi media.
rm(i, copia_Age, copia_Age_sin_NA, media_AGE)
#Sustituyo los espacios por "No disponible"
copia_Cabin <- titanic$Cabin
copia_Embarked <- titanic$Embarked
no_disponible <- "No disponible"
for( i in 1:length(copia_Cabin)){
columna <- ""
if(copia_Cabin[i] == columna){
copia_Cabin[i] <- no_disponible
}
}
titanic$Cabin <- copia_Cabin
for( i in 1:length(copia_Embarked)){
columna <- ""
if(copia_Embarked[i] == columna){
copia_Embarked[i] <- no_disponible
}
}
titanic$Embarked <- copia_Embarked
rm(i,copia_Cabin,copia_Embarked, no_disponible, columna)
#• Vuelve a comprobar la existencia de NAs y NULL
#para verificar que los has eliminado.
Na_Age_vector <- any(is.na(titanic$Age))
Null_Cabin_vector <- sum(titanic$Cabin == "")
Null_Embarked_vector <- sum(titanic$Embarked == "")
verificacion <- c(Na_Age = Na_Age_vector, Null_Cabin = Null_Cabin_vector, Null_Embarked = Null_Embarked_vector)
verificacion
rm(Na_Age_vector, Null_Embarked_vector, Null_Cabin_vector, verificacion)
'Ejercicio 3'
#• ¿Fallecieron más mujeres u hombres?
Survived_titanic <- as.numeric(titanic$Survived)
Sex_titanic <- as.character(titanic$Sex)
sexo_superviviente <- data.frame(Supervivientes = Survived_titanic, Sexo= Sex_titanic)
hombre_moridos <- sexo_superviviente %>%
filter(Supervivientes == 0 & Sexo == 'male')%>%
summarise(Hombres_caidos = n())
mujeres_moridas <- sexo_superviviente %>%
filter(Supervivientes == 0 & Sexo == 'female')%>%
summarise(Mujeres_caidas = n())
#Con este código lo tiro en una sola pipe
sexo_superviviente %>%
arrange(Sexo) %>%
group_by(Sexo) %>%
filter(Supervivientes == 0) %>%
summarise(Defunciones = n())
if( hombre_moridos[1,1] > mujeres_moridas[1,1]){
print(paste0("Fallecieron más hombres que mujeres total: ", hombre_moridos[1,1]))
}else{
print(paste0("Fallecieron más mujeres que hombres total", mujeres_moridas[1,1]))
}
#(Cuánto en porcentaje sobre el total de su género).
total_hombres <- sexo_superviviente %>%
filter(Sexo == 'male') %>%
summarise(Nº_total_hombres = n())
total_mujeres <- sexo_superviviente %>%
filter(Sexo == 'female') %>%
summarise(Nº_total_mujeres = n())
Porcentaje_fallecidos <- data.frame(Porcent_dead_men = (hombre_moridos[1,1]/total_hombres[1,1])*100, Porcent_dead_Women = (mujeres_moridas[1,1]/total_mujeres[1,1])*100)
Porcentaje_fallecidos
rm(hombre_moridos, mujeres_moridas, sexo_superviviente, Survived_titanic, Sex_titanic)
rm(total_hombres, total_mujeres)
#• ¿Qué clase de pasajeros (primera, segunda o tercera) sobrevivió más?
#Quiero que el resultado de, únicamente, el porcentaje y la clase que más
#sobrevivió (no el porcentaje de las 3 clases).
#Selecciono solo las columnas que me interesan
filtro_titanic <- data.frame(titanic$Pclass, titanic$Survived)
#Monto un DF filtrado
filtro1 <- data.frame(filtro_titanic %>%
arrange(titanic.Pclass) %>%
group_by(titanic.Pclass, titanic.Survived) %>%
summarise(Conteo=n()))
#Con esto cuento el numero total de pasajeros por clase
pasajeros_total_clase <- data.frame(filtro1 %>%
group_by(Clase = titanic.Pclass) %>%
summarise(Total_por_clase = sum(Conteo)))
#Con esto saco el numero de supervivientes por clase
supervivientes_por_clase <- data.frame(filtro1 %>%
filter(titanic.Survived == 1) %>%
select(Clase = titanic.Pclass, nº_Supervivientes = Conteo))
#Añado la columna para calcular estadisticos despues
pasajeros_total_clase$nº_Supervivientes <- cbind(supervivientes_por_clase$nº_Supervivientes)
pasajeros_total_clase %>%
mutate(Proporcion_supervivientes = (nº_Supervivientes/Total_por_clase)*100) %>%
select(Clase, Proporcion_supervivientes) %>%
mutate(rank = rank(desc(Proporcion_supervivientes))) %>%
filter(rank == 1)
rm(filtro_titanic, filtro1, pasajeros_total_clase, supervivientes_por_clase)
#• ¿Cuál fue la edad media y máxima de los supervivientes
#en cada una de las clases?
titanic %>%
arrange(Pclass) %>%
group_by(Pclass) %>%
filter(Survived == 1) %>%
summarise(Edad_media=mean(Age),Edad_maxima= max(Age))
'Ejercicio 4'
#• ¿Cuál de los puertos de embarque es el que tiene la media de precio de billete más barata?
#Indica solo ese puerto, no una lista con todos.
titanic %>%
group_by(Puerto = Embarked) %>%
summarise(Media_billete = mean(Fare)) %>%
mutate(Orden = rank(Media_billete)) %>%
filter(Orden == 1) %>%
select(Puerto, Media_billete)
#• ¿Qué correlación hay entre la longitud del nombre de un pasajero y el importe de su billete?
#No modifiques el nombre del pasajero para hacer el cálculo (úsalo tal y como venga).
titanic %>%
select(Fare, Name) %>%
mutate(Longitud_nombre = nchar(Name)) %>%
select(Fare,Longitud_nombre) %>%
mutate(cor(Longitud_nombre, Fare)) %>%
arrange(desc(Longitud_nombre))
'Ejercicio 5'
#• Obtén los nombres de los pasajeros que no sobrevivieron y el precio de su billete está
#en el decil superior.
titanic %>%
filter(Survived == 0 & Fare >= (quantile(Fare, probs = 0.9))) %>%
select(Name)
#• ¿En qué cabina deberías alojarte para tener una mayor probabilidad de sobrevivir
#siendo hombre de entre 30 y 40 años?
#Quitando el dato de cabina no disponible - E25 resultado.
titanic %>% #cuantas personas sobreviven por cabina sin 'no disponible'
select(Sex, Age, Cabin, Survived) %>%
filter(Sex == 'male' & Cabin != 'No disponible' & Age >= 30 & Age <= 40 & Survived == 1) %>%
group_by(Cabin) %>%
summarise(Cabina_reps = n()) %>%
arrange(desc(Cabina_reps)) %>%
mutate(rank = rank(desc(Cabina_reps))) %>%
filter(rank == 1)
'Ejercicio 6 '
'------------------------------------------'
#setwd("/Users/WadieAlbakri/Desktop/Máster BME/Módulos/Módulo 1/R/practica_1")
#directorio_Melendez <- getwd()
#setwd(directorio_Melendez)
ibex_data <- read.table("ibex_data.csv",
header = TRUE,
sep = ",",
stringsAsFactors = FALSE,
dec = ".")
names(ibex_data)[2] <- "Fecha"
capital_inicial <- 30000
prima_broker <- 0.0003
'Algoritmo de inversión - lógica'
ibex_data$Acciones_apertura <- capital_inicial/ibex_data$open
ibex_data$low_high_close <- 0
for(i in 1:length(ibex_data$X)){
if(ibex_data$low[i] <= (ibex_data$open[i]-0.10)){
ibex_data$low_high_close[i] <- (ibex_data$Acciones_apertura[i]*(ibex_data$open[i]-0.10))
}else if(ibex_data$high[i] >= (ibex_data$open[i]+0.03)){
ibex_data$low_high_close[i] <- (ibex_data$Acciones_apertura[i]*(ibex_data$open[i]+0.03))
}else{
ibex_data$low_high_close[i] <- ibex_data$Acciones_apertura[i]*ibex_data$close[i]
}
}
rm(i)
ibex_data$comision_compra <- prima_broker*capital_inicial
ibex_data$comision_venta <- prima_broker*ibex_data$low_high_close
ibex_data$comision_total <- ibex_data$comision_compra + ibex_data$comision_venta
ibex_data$beneficio_dia_ticker <- ibex_data$low_high_close - capital_inicial - ibex_data$comision_total
#filtrar la tabla(elimina) para aquellos tickers con menos de 30 datos
tickers_out <- data.frame(ibex_data %>%
select(ticker) %>%
group_by(ticker) %>%
summarise(numero_datos = n()) %>%
filter(numero_datos< 30))
vector_tickers <- tickers_out[,1]
ibex_data <- ibex_data[ibex_data$X != vector_tickers,]
rm(tickers_out, vector_tickers)
'código para obtener el beneficio medio por ticker'
df_resultado_6 <- data.frame(ibex_data %>%
select(ticker, beneficio_dia_ticker) %>%
group_by(ticker) %>%
summarise('Bº medio por operacion' = mean(beneficio_dia_ticker)))
'código para obtener el beneficio acumulado por ticker'
df_resultado_6$Beneficio_acumulado <- data.frame(ibex_data %>%
select(ticker, beneficio_dia_ticker) %>%
group_by(ticker) %>%
summarise(Beneficio_acumulado= sum(beneficio_dia_ticker)) %>%
select(Beneficio_acumulado))
'código para obtener porcentaje dias positivos y negativos'
#Dias positivos sin 0 incluido.
df_dias_positivos <- data.frame(ibex_data %>%
select(ticker, beneficio_dia_ticker) %>%
filter(beneficio_dia_ticker>0) %>%
group_by(ticker) %>%
summarise(dias_positivos = n()))
#Dias negativos
df_dias_negativos <- data.frame(ibex_data %>%
select(ticker, beneficio_dia_ticker) %>%
filter(beneficio_dia_ticker<0) %>%
group_by(ticker) %>%
summarise(dias_negativos = n()))
#matching total para homogeneizar datos. Los dias de beneficio 0 pone NA
matcheo_dias <- merge(df_dias_positivos, df_dias_negativos, by = c("ticker"), all = T)
#cambio los NA por 0 en dias positivos
for (i in 1:length(matcheo_dias$dias_positivos)) {
if (any(is.na(matcheo_dias$dias_positivos[i])) == TRUE){
matcheo_dias$dias_positivos[i] <- 0
}
}
rm(i)
#cambio los NA por 0 en dias negativos
for (i in 1:length(matcheo_dias$dias_negativos)) {
if (any(is.na(matcheo_dias$dias_negativos[i])) == TRUE){
matcheo_dias$dias_negativos[i] <- 0
}
}
rm(i)
df_porcen_dias <- data.frame(matcheo_dias %>%
mutate(dias_totales = dias_positivos+dias_negativos, '%_dias_positivos' = dias_positivos/dias_totales , '%_dias_negativos'= dias_negativos/dias_totales) %>%
select('%_dias_positivos', '%_dias_negativos'))
df_resultado_6$porcen_dias_positivos <- df_porcen_dias[,1]
df_resultado_6$porcen_dias_negativos <- df_porcen_dias[,2]
rm(df_dias_positivos,df_dias_negativos, matcheo_dias, df_porcen_dias)
'código para obtener las horquillas'
df_horquillas <- data.frame(ibex_data %>%
select(ticker, high, open, low) %>%
mutate(horquilla_sup = high-open, horquilla_inf = open - low) %>%
group_by(ticker) %>%
summarise(horquilla_sup_media = mean(horquilla_sup), horquilla_inf_media = mean(horquilla_inf)))
df_resultado_6$Horquilla_superior_media <- df_horquillas[,2]
df_resultado_6$Horqilla_inferior_media <- df_horquillas[,3]
rm(df_horquillas)
'codigo para obtener el numero de operaciones'
df_resultado_6$Numero_de_operaciones <- data.frame(ibex_data %>%
select(ticker) %>%
group_by(ticker) %>%
summarise(numero_datos = n()) %>%
select(numero_datos))
#Estructuramos los resultados tal y como pide el enunciado
df_resultado_6T <- data.frame(t(df_resultado_6[-1]))
colnames(df_resultado_6T) <- df_resultado_6[, 1]
vector_nombres <- c('Bº medio por operación','Beneficio acumulado', '% días positivos','% días negativos','Horquilla superior media','Horquilla inferior media','Número de operaciones')
row.names(df_resultado_6T) <- vector_nombres
df_resultado_6T <- round(df_resultado_6T,3)
rm(df_resultado_6)
'ploteo del beneficio medio acumulado vs numero de operaciones'
ibex_data$beneficio_acum_for <- 0
ibex_data$numero_operacion <- 0
#inicializar la primera celda para que tome un valor, obtengo primero el beneficio acumulado por dia y un conteo de las ops
ibex_data$beneficio_acum_for[1] <- ibex_data$beneficio_dia_ticker[1]
ibex_data$numero_operacion[1] <- 1
for(i in 2:(length(ibex_data$X))){
if (ibex_data$X[i-1] == ibex_data$X[i]){
ibex_data$beneficio_acum_for[i] <- ibex_data$beneficio_acum_for[i-1] + ibex_data$beneficio_dia_ticker[i]
ibex_data$numero_operacion[i] <- ibex_data$numero_operacion[i-1] + 1
}else{
ibex_data$beneficio_acum_for[i] = ibex_data$beneficio_dia_ticker[i]
ibex_data$numero_operacion[i] = 1
}
}
rm(i)
'código para plotear todos los tickers a la vez'
for(activo in unique(ibex_data$ticker)){
plot_activo <- data.frame(ibex_data %>%
select(ticker, numero_operacion, beneficio_acum_for) %>%
filter(ticker == activo ) %>%
select(numero_operacion, beneficio_acum_for))
plot(plot_activo$numero_operacion, plot_activo$beneficio_acum_for,
xlab = 'Nº operacion',
ylab = 'Beneficio acumulado',
main = activo,
type="l")
}
rm(activo, ibex_data)
'Ejercicio 7'
#--------------------------------------------------------------
price_departures <- read.table("price_departures.csv",
header = TRUE,
sep = ",",
stringsAsFactors = FALSE,
dec = ".")
#Primero voy a transformar mi matriz de Price departures para dejarla como ibex_data y reutilizar código
vector_fechas <- as.character(price_departures$X)
vector_tickers_price <- colnames(price_departures)
vector_tickers_price <- vector_tickers_price[2:80]
vector_valores_price <- c()
for (i in 2:(length(colnames(price_departures)))){
vector_valores_price <- append(vector_valores_price, price_departures[,i] )
}
rm(i)
vector_fechas <- rep(vector_fechas, 79)
vector_tickers_price <- rep(vector_tickers_price, 3953)
vector_tickers_price <- sort(vector_tickers_price)
df_price_departures <- data.frame(X = vector_tickers_price,Fecha = vector_fechas,price_departures = vector_valores_price)
rm(vector_tickers_price,vector_valores_price,vector_fechas)
colnames(df_price_departures) <- c("X","Fecha","price_departure")
# Matcheo por fechas, elimino las filas con NA y tickers con menos de 30 datos
df_price_departures <- na.omit(df_price_departures)
ibex_data_7 <- read.table("ibex_data.csv",
header = TRUE,
sep = ",",
stringsAsFactors = FALSE,
dec = ".")
names(ibex_data_7)[2] <- "Fecha"
ibex_data_7<- merge(ibex_data_7, df_price_departures, by = c("X","Fecha"), all.ibex_data_7 = T)
'Código para quitar los valores con menos de 30 datos'
tickers_out_price <- (data.frame(ibex_data_7) %>%
select(X) %>%
group_by(X) %>%
summarise(numero_datos = n()) %>%
filter(numero_datos< 30))
vector_tickers_out_price <- as.character(tickers_out_price[,1])
ibex_data_7 <- ibex_data_7[ibex_data_7$X != vector_tickers_out_price,]
ibex_data_7 <- data.frame(ibex_data_7 %>%
select(Fecha, X, price_departure, open, low, high, close, vol))
rm(tickers_out_price, vector_tickers_out_price)
'Algoritmo de inversión - lógica'
#compra a precio de apertura cuando price_departure >= 0.75
salida_price_departure <- 0.75
ibex_data_7$Acciones_apertura <- 0
ibex_data_7$low_high_close <- 0
ibex_data_7$comision_compra <- 0
#asigno el capital a aquellos que cumplen el price departure, los que no los dejo en 0
for(i in 1:length(ibex_data_7$Fecha)){
if(ibex_data_7$price_departure[i] >= salida_price_departure){
ibex_data_7$Acciones_apertura[i] <- capital_inicial/ibex_data_7$open[i]
ibex_data_7$comision_compra[i] <- prima_broker*capital_inicial
}
}
rm(i)
#algoritmo de inversión
for(i in 1:length(ibex_data_7$Fecha)){
if(ibex_data_7$low[i] <= (ibex_data_7$open[i]-0.10)){
ibex_data_7$low_high_close[i] <- (ibex_data_7$Acciones_apertura[i]*(ibex_data_7$open[i]-0.10))
}else if(ibex_data_7$high[i] >= (ibex_data_7$open[i]+0.03)){
ibex_data_7$low_high_close[i] <- (ibex_data_7$Acciones_apertura[i]*(ibex_data_7$open[i]+0.03))
}else{
ibex_data_7$low_high_close[i] <- ibex_data_7$Acciones_apertura[i]*ibex_data_7$close[i]
}
}
rm(i)
ibex_data_7$comision_venta <- prima_broker*ibex_data_7$low_high_close
ibex_data_7$comision_total <- ibex_data_7$comision_compra + ibex_data_7$comision_venta
'Cálculo del beneficio/dia ticker'
for(i in 1:length(ibex_data_7$Fecha)){
if(ibex_data_7$Acciones_apertura[i] == 0){
ibex_data_7$beneficio_dia_ticker[i] <- 0
}else{
ibex_data_7$beneficio_dia_ticker[i] <- ibex_data_7$low_high_close[i] - capital_inicial - ibex_data_7$comision_total[i]
}
}
rm(i)
'código para obtener el beneficio medio por ticker'
df_resultado_7 <- data.frame(ibex_data_7 %>%
select(X, beneficio_dia_ticker, price_departure) %>%
filter(price_departure >= salida_price_departure) %>%
group_by(X) %>%
summarise('Bº medio por operacion' = mean(beneficio_dia_ticker)))
'código para obtener el beneficio acumulado por ticker'
df_resultado_7$Beneficio_acumulado <- data.frame(ibex_data_7 %>%
select(X, beneficio_dia_ticker, price_departure) %>%
filter(price_departure >= salida_price_departure) %>%
group_by(X) %>%
summarise(Beneficio_acumulado= sum(beneficio_dia_ticker)) %>%
select(Beneficio_acumulado))
'código para obtener porcentaje dias positivos y negativos'
#Dias positivos sin 0 incluido.
df_dias_positivos_7 <- data.frame(ibex_data_7 %>%
select(X, beneficio_dia_ticker) %>%
filter(beneficio_dia_ticker>0) %>%
group_by(X) %>%
summarise(dias_positivos = n()))
#Dias negativos
df_dias_negativos_7 <- data.frame(ibex_data_7 %>%
select(X, beneficio_dia_ticker) %>%
filter(beneficio_dia_ticker<0) %>%
group_by(X) %>%
summarise(dias_negativos = n()))
#matching total para homogeneizar datos. Los dias de beneficio 0 pone NA
matcheo_dias_7 <- merge(df_dias_positivos_7, df_dias_negativos_7, by = c("X"), all = T)
#cambio los NA por 0 en dias positivos
for (i in 1:length(matcheo_dias_7$dias_positivos)) {
if (any(is.na(matcheo_dias_7$dias_positivos[i])) == TRUE){
matcheo_dias_7$dias_positivos[i] <- 0
}
}
rm(i)
#cambio los NA por 0 en dias negativos
for (i in 1:length(matcheo_dias_7$dias_negativos)) {
if (any(is.na(matcheo_dias_7$dias_negativos[i])) == TRUE){
matcheo_dias_7$dias_negativos[i] <- 0
}
}
rm(i)
df_porcen_dias_7 <- data.frame(matcheo_dias_7 %>%
mutate(dias_totales = dias_positivos+dias_negativos, '%_dias_positivos' = dias_positivos/dias_totales , '%_dias_negativos'= dias_negativos/dias_totales) %>%
select('%_dias_positivos', '%_dias_negativos'))
df_resultado_7$porcen_dias_positivos <- df_porcen_dias_7[,1]
df_resultado_7$porcen_dias_negativos <- df_porcen_dias_7[,2]
rm(df_dias_positivos_7,df_dias_negativos_7, matcheo_dias_7, df_porcen_dias_7)
'código para obtener las horquillas'
df_horquillas_7 <- data.frame(ibex_data_7 %>%
select(X, high, open, low, price_departure) %>%
filter(price_departure >= salida_price_departure) %>%
mutate(horquilla_sup = high - open, horquilla_inf = open - low) %>%
group_by(X) %>%
summarise(horquilla_sup_media = mean(horquilla_sup), horquilla_inf_media = mean(horquilla_inf)))
df_resultado_7$Horquilla_superior_media <- df_horquillas_7[,2]
df_resultado_7$Horqilla_inferior_media <- df_horquillas_7[,3]
rm(df_horquillas_7)
'codigo para obtener el numero de operaciones'
df_resultado_7$Numero_de_operaciones <- data.frame(ibex_data_7 %>%
select(X, price_departure) %>%
group_by(X) %>%
filter(price_departure >= salida_price_departure) %>%
summarise(numero_datos = n()) %>%
select(numero_datos))
#Estructuramos los resultados tal y como pide el enunciado
df_resultado_7T <- data.frame(t(df_resultado_7[-1]))
colnames(df_resultado_7T) <- df_resultado_7[, 1]
vector_nombres <- c('Bº medio por operación','Beneficio acumulado', '% días positivos','% días negativos','Horquilla superior media','Horquilla inferior media','Número de operaciones')
row.names(df_resultado_7T) <- vector_nombres
df_resultado_7T <- round(df_resultado_7T,3)
rm(df_resultado_7)
'ploteo del beneficio medio acumulado vs numero de operaciones'
ibex_data_7$beneficio_acum_for <- 0
ibex_data_7$numero_operacion <- 0
ibex_data_7 <- ibex_data_7[ibex_data_7$price_departure >= salida_price_departure,]
#inicializar la primera celda para que tome un valor, obtengo primero el beneficio acumulado por dia y un conteo de las ops
ibex_data_7$beneficio_acum_for[1] <- ibex_data_7$beneficio_dia_ticker[1]
ibex_data_7$numero_operacion[1] <- 1
for(i in 2:(length(ibex_data_7$X))){
if (ibex_data_7$X[i-1] == ibex_data_7$X[i]){
ibex_data_7$beneficio_acum_for[i] <- ibex_data_7$beneficio_acum_for[i-1] + ibex_data_7$beneficio_dia_ticker[i]
ibex_data_7$numero_operacion[i] <- ibex_data_7$numero_operacion[i-1] + 1
}else{
ibex_data_7$beneficio_acum_for[i] <- ibex_data_7$beneficio_dia_ticker[i]
ibex_data_7$numero_operacion[i] <- 1
}
}
rm(i)
#código para plotear todos los tickers a la vez
for(activo in unique(ibex_data_7$X)){
plot_activo_7 <- data.frame(ibex_data_7 %>%
select(X, numero_operacion, beneficio_acum_for) %>%
filter(X == activo) %>%
select(numero_operacion, beneficio_acum_for))
plot(plot_activo_7$numero_operacion, plot_activo_7$beneficio_acum_for,
xlab = 'Nº operacion',
ylab = 'Beneficio acumulado',
main = activo,
type="l")
}
rm(activo, price_departures)
'Ejercicio 8'
'-------------------------------------------------------------'
ibex_data_8 <- ibex_data_7[,1:9]
'vamos a calcular la media de los datos de cierre del dia y asignarla'
ibex_data_8$media_datos_cierre <- (ibex_data_8$open + ibex_data_8$low + ibex_data_8$high + ibex_data_8$close)/4
ibex_data_8 <- data.frame(ibex_data_8 %>%
select(Fecha, X,price_departure, open, low, high, close,vol, media_datos_cierre))
#introduzco el capital asignado a cada activo cada dia.
ibex_data_8$capital_asignado <- ibex_data_8$media_datos_cierre * ibex_data_8$vol * 0.005
'calculo del stop_loss & stop_profit objetivo para cada acitvo'
vector_tickers_cuantil <- unique(ibex_data_8$X)
stop_loss <- 1:length(vector_tickers_cuantil)
stop_profit <- 1:length(vector_tickers_cuantil)
matriz_cuantil <- rbind(stop_loss, stop_profit)
#df auxiliar donde introducire mis valores de stop loss y profit para cada activo
df_cuantil_stop_loss_profit <- data.frame(matriz_cuantil)
rm(matriz_cuantil, stop_loss, stop_profit)
colnames(df_cuantil_stop_loss_profit) <- vector_tickers_cuantil
'Para calcular el stop_loss y el stop_profit de cada activo monto un vector dinámico que metere en un for y enchufare cada valor en un df'
#inicializar el vector con los primeros datos del primer ticker
vector_tickers_dinamico_loss <- c(ibex_data_8$open[1] - ibex_data_8$low[1])
vector_tickers_dinamico_profit <- c(ibex_data_8$high[1] - ibex_data_8$open[1])
j <- 1
#For para calcular para cada activo el cuantil correspondiente del stop loss y el stop profit y enchufarlo en un df auxiliar
for(i in 2:(length(ibex_data_8$X))){
if (ibex_data_8$X[i] == ibex_data_8$X[i-1]){
vector_tickers_dinamico_loss <- c(vector_tickers_dinamico_loss,(ibex_data_8$open[i]-ibex_data_8$low[i]))
vector_tickers_dinamico_profit <- c(vector_tickers_dinamico_profit,(ibex_data_8$high[i]-ibex_data_8$open[i]))
}else{
df_cuantil_stop_loss_profit[1,j] <- quantile(vector_tickers_dinamico_loss,0.8)
df_cuantil_stop_loss_profit[2,j] <- quantile(vector_tickers_dinamico_profit,0.3)
j = j +1
vector_tickers_dinamico_loss <- c(ibex_data_8$low[i],ibex_data_8$open[i])
vector_tickers_dinamico_profit <- c(ibex_data_8$open[i],ibex_data_8$high[i])
}
}
#como el último else no lo ejecuta cierro el data frame 'a mano' fuera del for.
df_cuantil_stop_loss_profit[1,j] <- quantile(vector_tickers_dinamico_loss,0.8)
df_cuantil_stop_loss_profit[2,j] <- quantile(vector_tickers_dinamico_profit,0.3)
rm(i,j,vector_tickers_dinamico_loss,vector_tickers_dinamico_profit)
'logica del algoritmo'
ibex_data_8$Acciones_apertura <- ibex_data_8$capital_asignado/ibex_data_8$open
ibex_data_8$low_high_close <- 0
ibex_data_8$comision_compra <- prima_broker*ibex_data_8$capital_asignado
for(i in 1:length(ibex_data_8$Fecha)){
if(ibex_data_8$low[i] <= (ibex_data_8$open[i]-(df_cuantil_stop_loss_profit[1,(ibex_data_8$X[i])]))){
ibex_data_8$low_high_close[i] <- (ibex_data_8$Acciones_apertura[i]*(ibex_data_8$open[i]-(df_cuantil_stop_loss_profit[1,(ibex_data_8$X[i])])))
}else if(ibex_data_8$high[i] >= (ibex_data_8$open[i]+(df_cuantil_stop_loss_profit[2,(ibex_data_8$X[i])]))){
ibex_data_8$low_high_close[i] <- (ibex_data_8$Acciones_apertura[i]*(ibex_data_8$open[i]+(df_cuantil_stop_loss_profit[2,(ibex_data_8$X[i])])))
}else{
ibex_data_8$low_high_close[i] <- ibex_data_8$Acciones_apertura[i]*ibex_data_8$close[i]
}
}
rm(i)
ibex_data_8$comision_venta <- prima_broker*ibex_data_8$low_high_close
ibex_data_8$comision_total <- ibex_data_8$comision_compra + ibex_data_8$comision_venta
ibex_data_8$beneficio_dia_ticker <- ibex_data_8$low_high_close - ibex_data_8$capital_asignado - ibex_data_8$comision_total
'código para obtener el beneficio medio por ticker'
df_resultado_8 <- data.frame(ibex_data_8 %>%
select(X, beneficio_dia_ticker) %>%
group_by(X) %>%
summarise('Bº medio por operacion' = mean(beneficio_dia_ticker)))
'código para obtener el beneficio acumulado por ticker'
df_resultado_8$Beneficio_acumulado <- data.frame(ibex_data_8 %>%
select(X, beneficio_dia_ticker) %>%
group_by(X) %>%
summarise(Beneficio_acumulado= sum(beneficio_dia_ticker)) %>%
select(Beneficio_acumulado))
'código para obtener porcentaje dias positivos y negativos'
#Dias positivos sin 0 incluido.
df_dias_positivos_8 <- data.frame(ibex_data_8 %>%
select(X, beneficio_dia_ticker) %>%
filter(beneficio_dia_ticker>0) %>%
group_by(X) %>%
summarise(dias_positivos = n()))
#Dias negativos
df_dias_negativos_8 <- data.frame(ibex_data_8 %>%
select(X, beneficio_dia_ticker) %>%
filter(beneficio_dia_ticker<0) %>%
group_by(X) %>%
summarise(dias_negativos = n()))
#matching total para homogeneizar datos. Los dias de beneficio 0 pone NA
matcheo_dias_8 <- merge(df_dias_positivos_8, df_dias_negativos_8, by = c("X"), all = T)
#cambio los NA por 0 en dias positivos
for (i in 1:length(matcheo_dias_8$dias_positivos)) {
if (any(is.na(matcheo_dias_8$dias_positivos[i])) == TRUE){
matcheo_dias_8$dias_positivos[i] <- 0
}
}
rm(i)
#cambio los NA por 0 en dias negativos
for (i in 1:length(matcheo_dias_8$dias_negativos)) {
if (any(is.na(matcheo_dias_8$dias_negativos[i])) == TRUE){
matcheo_dias_8$dias_negativos[i] <- 0
}
}
rm(i)
df_porcen_dias_8 <- data.frame(matcheo_dias_8 %>%
mutate(dias_totales = dias_positivos+dias_negativos, '%_dias_positivos' = dias_positivos/dias_totales , '%_dias_negativos'= dias_negativos/dias_totales) %>%
select('%_dias_positivos', '%_dias_negativos'))
df_resultado_8$porcen_dias_positivos <- df_porcen_dias_8[,1]
df_resultado_8$porcen_dias_negativos <- df_porcen_dias_8[,2]
rm(df_dias_positivos_8,df_dias_negativos_8, matcheo_dias_8, df_porcen_dias_8)
'código para obtener las horquillas'
df_horquillas_8 <- data.frame(ibex_data_8 %>%
select(X, high, open, low, price_departure) %>%
mutate(horquilla_sup = high - open, horquilla_inf = open - low) %>%
group_by(X) %>%
summarise(horquilla_sup_media = mean(horquilla_sup), horquilla_inf_media = mean(horquilla_inf)))
df_resultado_8$Horquilla_superior_media <- df_horquillas_8[,2]
df_resultado_8$Horquilla_inferior_media <- df_horquillas_8[,3]
rm(df_horquillas_8)
'codigo para obtener el numero de operaciones'
df_resultado_8$Numero_de_operaciones <- data.frame(ibex_data_8 %>%
select(X, price_departure) %>%
group_by(X) %>%
summarise(numero_datos = n()) %>%
select(numero_datos))
'codigo para obtener el beneficio medio por euro invertido'
df_resultado_8$total_invertido <- data.frame(ibex_data_8 %>%
select(X, capital_asignado) %>%
group_by(X) %>%
summarise(total_invertido = sum(capital_asignado)) %>%
select(total_invertido))
df_resultado_8$Beneficio_medio_euro <- (Beneficio_medio_euro = df_resultado_8$Beneficio_acumulado/df_resultado_8$total_invertido)
df_resultado_8$importe_medio_operacion <- df_resultado_8$total_invertido/df_resultado_8$Numero_de_operaciones
#Estructuramos los resultados tal y como pide el enunciado
df_resultado_8 <- data.frame(df_resultado_8 %>%
select(X,
importe_medio_operacion,
Bº.medio.por.operacion,
Beneficio_medio_euro,
Beneficio_acumulado,
porcen_dias_positivos,
porcen_dias_negativos,
Horquilla_superior_media,
Horquilla_inferior_media,
Numero_de_operaciones))
df_resultado_8T <- data.frame(t(df_resultado_8[-1]))
colnames(df_resultado_8T) <- df_resultado_8[,1]
df_resultado_8T_1 <- df_resultado_8T[9,]
df_resultado_8T <- df_resultado_8T[1:8,]
df_resultado_8T <- rbind(df_resultado_8T,df_cuantil_stop_loss_profit, df_resultado_8T_1)
df_resultado_8T <- round(df_resultado_8T,3)
vector_nombres <- c('Importe medio por operación','Bº medio por operación','Bº medio por euro invertido','Beneficio acumulado', '% días positivos','% días negativos','Horquilla superior media','Horquilla inferior media','Stop profit objetivo','Stop loss','Número de operaciones')
row.names(df_resultado_8T) <- vector_nombres
rm(df_resultado_8,df_resultado_8T_1, df_cuantil_stop_loss_profit, df_price_departures,ibex_data_7, ibex_data_8, Beneficio_medio_euro)
rm(capital_inicial, prima_broker, salida_price_departure, vector_nombres, vector_tickers_cuantil)
rm(titanic, plot_activo, plot_activo_7)
|
\name{GeoVariogram}
\alias{GeoVariogram}
\encoding{UTF-8}
\title{Empirical Variogram(variants) estimation}
\description{
The function returns an empirical estimate of the variogram for spatio (temporal) and bivariate
random fields.
}
\usage{
GeoVariogram(data, coordx, coordy=NULL, coordt=NULL, coordx_dyn=NULL, cloud=FALSE,
distance='Eucl', grid=FALSE, maxdist=NULL,
maxtime=NULL, numbins=NULL, radius=6378.388,
type='variogram',bivariate=FALSE)
}
\arguments{
\item{data}{A \eqn{d}{d}-dimensional vector (a single spatial realisation) or a (\eqn{n \times d}{n x d})-matrix
(\eqn{n} iid spatial realisations) or a (\eqn{d \times d}{d x d})-matrix (a single spatial realisation on regular grid)
or an (\eqn{d \times d \times n}{d x d x n})-array (\eqn{n} iid spatial realisations on regular grid) or a
(\eqn{t \times d}{t x d})-matrix (a single spatial-temporal realisation) or an (\eqn{t \times d \times n }{t x d x n})-array
(\eqn{n} iid spatial-temporal realisations) or or an (\eqn{d \times d \times t \times n }{d x d x t})-array
(a single spatial-temporal realisation on regular grid) or an (\eqn{d \times d \times t \times n }{d x d x t x n})-array
(\eqn{n} iid spatial-temporal realisations on regular grid). See \code{\link{GeoFit}} for details.}
\item{coordx}{A numeric (\eqn{d \times 2}{d x 2})-matrix (where
\code{d} is the number of spatial sites) assigning 2-dimensions of spatial coordinates or a numeric \eqn{d}{d}-dimensional vector assigning
1-dimension of spatial coordinates. Coordinates on a sphere for a fixed radius \code{radius}
are passed in lon/lat format expressed in decimal degrees.}
\item{coordy}{A numeric vector assigning 1-dimension of
spatial coordinates; \code{coordy} is interpreted only if \code{coordx} is a numeric
vector or \code{grid=TRUE} otherwise it will be ignored. Optional argument, the default is \code{NULL} then \code{coordx} is expected to
be numeric a (\eqn{d \times 2}{d x 2})-matrix.}
\item{coordt}{A numeric vector assigning 1-dimension of
temporal coordinates. Optional argument, the default is \code{NULL} then a spatial random field is expected.}
\item{coordx_dyn}{A list of \eqn{m} numeric (\eqn{d_t \times 2}{d x 2})-matrices
containing dynamical (in time) spatial coordinates. Optional argument, the default is \code{NULL}
}
\item{cloud}{Logical; if \code{TRUE} the variogram cloud is computed,
otherwise if \code{FALSE} (the default) the empirical (binned)
variogram is returned.}
\item{distance}{String; the name of the spatial distance. The default
is \code{Eucl}, the euclidean distance. See the Section
\bold{Details} of \code{\link{GeoFit}}.}
\item{grid}{Logical; if \code{FALSE} (the default) the data
are interpreted as spatial or spatial-temporal realisations on a set of
non-equispaced spatial sites.}
\item{maxdist}{A numeric value denoting the spatial maximum distance,
see the Section \bold{Details}.}
\item{maxtime}{A numeric value denoting the temporal maximum distance,
see the Section \bold{Details}.}
\item{numbins}{A numeric value denoting the numbers of bins, see the
Section \bold{Details}.}
\item{radius}{Numeric; a value indicating the radius of the sphere when
using the great circle distance. Default value is the radius of the earth in Km (i.e. 6378.88)}
\item{type}{A String denoting the type of variogram. Two options
are available: \code{variogram},
and \code{lorelogram}. It is returned respectively,
the standard variogram with the first (Gaussian responses),
lorelogram with the fourth (Binary data).}
\item{bivariate}{Logical; if \code{FALSE} (the default) the data
are interpreted as univariate spatial or spatial-temporal realisations.
Otherwise they are intrepreted as a a realization from a bivariate field.}
}
\details{
We briefly report the definitions of variogram used in this function.
In the case of a spatial Gaussian random field
the sample \code{variogram} estimator is defined by
\deqn{\hat{\gamma}(h) = 0.5 \sum_{x_i, x_j \in N(h)} (Z(x_i) - Z(x_j))^2 / |N(h)|}
where \eqn{N(h)} is the set of all the sample pairs whose distances fall into a tolerance region with size \eqn{h}
(equispaced intervalls are considered).
Observe, that in the literature often the above definition is termed semivariogram (see e.g. the first reference).
Nevertheless, here this defition has been used in order to be consistent with the variogram defition used for the extremes
(see e.g. the third reference).
In the case of a spatial binary random field, the sample \code{lorelogram}
estimator (the analogue of the correlation) is defined by
\deqn{\hat{L}(h) = (N_{11}(h) N_{00}(h) )/ (N_{01}(h) N_{10}(h)).}
where \eqn{N_{11}(h)} is the number of pairs who are both equal to
\eqn{1}{1} and that falls in the bin
\eqn{h}{h}. Similarly are defined the other quantities.
In the case of a spatio-temporal Gaussian random field the sample
\code{variogram} estimator is defined by
\deqn{\hat{\gamma}(h, u) = 0.5 \sum_{(x_i, l), (x_j, k) \in N(h, u)} (Z(x_i, l) - Z(x_j, k))^2 / |N(h, u)|}
where \eqn{N(h, u)}{N(h, u)} is the set of all the sample pairs whose
spatial distances fall into a tolerance region with size \eqn{h}{h}
and \eqn{|k - l| = u}{\|k-l\|=u}. Note, that \eqn{Z(x_i, l)}{Z(x_i,l)}
is the observation at site \eqn{x_i}{x_i} and time
\eqn{l}{l}. Taking this in mind and given the above definition of
lorelogram, the spatio-temporal extention is straightforward.
The \code{numbins} parameter indicates the number of adjacent
intervals to consider in order to grouped distances with which to
compute the (weighted) lest squares.
The \code{maxdist} parameter indicates the maximum spatial distance below which
the shorter distances will be considered in the calculation of
the (weigthed) least squares.
The \code{maxtime} parameter indicates the maximum temporal distance below which
the shorter distances will be considered in the calculation of
the (weigthed) least squares.
}
\value{
Returns an object of class \code{Variogram}.
An object of class \code{Variogram} is a list containing
at most the following components:
\item{bins}{Adjacent intervals of grouped spatial distances if
\code{cloud=FALSE}. Otherwise if \code{cloud=TRUE} all the spatial pairwise distances;}
\item{bint}{Adjacent intervals of grouped temporal distances if
\code{cloud=FALSE}. Otherwise if \code{cloud=TRUE} all the temporal pairwise distances;}
\item{cloud}{If the variogram cloud is returned (\code{TRUE}) or the
empirical variogram (\code{FALSE});}
\item{centers}{The centers of the spatial bins;}
\item{distance}{The type of spatial distance;}
\item{lenbins}{The number of pairs in each spatial bin;}
\item{lenbinst}{The number of pairs in each spatial-temporal bin;}
\item{lenbint}{The number of pairs in each temporal bin;}
\item{maxdist}{The maximum spatial distance used for the calculation of the variogram.
If no spatial distance is specified then it is NULL;}
\item{maxtime}{The maximum temporal distance used for the calculation of the variogram.
If no temporal distance is specified then it is NULL;}
\item{variograms}{The empirical spatial variogram;}
\item{variogramst}{The empirical spatial-temporal variogram;}
\item{variogramt}{The empirical temporal variogram;}
\item{type}{The type of estimated variogram}
}
\references{
Cressie, N. A. C. (1993) \emph{Statistics for Spatial Data}. New York: Wiley.
Gaetan, C. and Guyon, X. (2010) \emph{Spatial Statistics and Modelling}.
Spring Verlang, New York.
Heagerty, P. J., and Zeger, S. L. (1998). Lorelogram: A Regression
Approach to Exploring Dependence in Longitudinal Categorical
Responses.
\emph{Journal of the American Statistical Association},
\bold{93}(441), 150--162
}
\seealso{\code{\link{GeoFit}}}
\author{Moreno Bevilacqua, \email{moreno.bevilacqua@uv.cl},\url{https://sites.google.com/a/uv.cl/moreno-bevilacqua/home},
Víctor Morales Oñate, \email{victor.morales@uv.cl}
}
\examples{
library(GeoModels)
################################################################
###
### Example 1. Empirical estimation of the semi-variogram from a
### spatial Gaussian random field with exponential correlation.
###
###############################################################
set.seed(514)
# Set the coordinates of the sites:
x <- runif(200, 0, 1)
y <- runif(200, 0, 1)
coords <- cbind(x,y)
# Set the model's parameters:
corrmodel <- "exponential"
mean <- 0
sill <- 1
nugget <- 0
scale <- 0.3/3
# Simulation of the spatial Gaussian random field:
data <- GeoSim(coordx=coords, corrmodel=corrmodel, param=list(mean=mean,
sill=sill, nugget=nugget, scale=scale))$data
# Empirical spatial semi-variogram estimation:
fit <- GeoVariogram(coordx=coords,data=data,maxdist=0.6)
# Results:
plot(fit$centers, fit$variograms, xlab='h', ylab=expression(gamma(h)),
ylim=c(0, max(fit$variograms)), pch=20,
main="Semi-variogram")
################################################################
###
### Example 2. Empirical estimation of the variogram from a
### spatio-temporal Gaussian random fields with Gneiting
### correlation function.
###
###############################################################
set.seed(331)
# Define the temporal sequence:
# Set the coordinates of the sites:
x <- runif(400, 0, 1)
y <- runif(400, 0, 1)
coords <- cbind(x,y)
times <- seq(1,5,1)
# Simulation of a spatio-temporal Gaussian random field:
data <- GeoSim(coordx=coords, coordt=times, corrmodel="gneiting",
param=list(mean=0,scale_s=0.1,scale_t=0.1,sill=1,
nugget=0,power_s=1,power_t=1,sep=0.5))$data
# Empirical spatio-temporal semi-variogram estimation:
fit <- GeoVariogram(data=data, coordx=coords, coordt=times, maxtime=5,maxdist=0.5)
# Results: Marginal spatial empirical semi-variogram
par(mfrow=c(2,2), mai=c(.5,.5,.3,.3), mgp=c(1.4,.5, 0))
plot(fit$centers, fit$variograms, xlab='h', ylab=expression(gamma(h)),
ylim=c(0, max(fit$variograms)), xlim=c(0, max(fit$centers)),
pch=20,main="Marginal spatial semi-variogram",cex.axis=.8)
# Results: Marginal temporal empirical semi-variogram
plot(fit$bint, fit$variogramt, xlab='t', ylab=expression(gamma(t)),
ylim=c(0, max(fit$variogramt)),xlim=c(0,max(fit$bint)),
pch=20,main="Marginal temporal semi-variogram",cex.axis=.8)
# Building space-time semi-variogram
st.vario <- matrix(fit$variogramst,length(fit$centers),length(fit$bint))
st.vario <- cbind(c(0,fit$variograms), rbind(fit$variogramt,st.vario))
# Results: 3d Spatio-temporal semi-variogram
require(scatterplot3d)
st.grid <- expand.grid(c(0,fit$centers),c(0,fit$bint))
scatterplot3d(st.grid[,1], st.grid[,2], c(st.vario),
highlight.3d=TRUE, xlab="h",ylab="t",
zlab=expression(gamma(h,t)), pch=20,
main="Space-time semi-variogram",cex.axis=.7,
mar=c(2,2,2,2), mgp=c(0,0,0),
cex.lab=.7)
# A smoothed version
par(mai=c(.2,.2,.2,.2),mgp=c(1,.3, 0))
persp(c(0,fit$centers), c(0,fit$bint), st.vario,
xlab="h", ylab="u", zlab=expression(gamma(h,u)),
ltheta=90, shade=0.75, ticktype="detailed", phi=30,
theta=30,main="Space-time semi-variogram",cex.axis=.8,
cex.lab=.8)
################################################################
###
### Example 3. Empirical estimation of the (cross) semivariograms
### from a bivariate Gaussian random fields with Matern
### correlation function.
###
###############################################################
# Simulation of a bivariate spatial Gaussian random field:
set.seed(29)
# Define the spatial-coordinates of the points:
x <- runif(200, 0, 1)
set.seed(7)
y <- runif(200, 0, 1)
coords=cbind(x,y)
# Simulation of a bivariate Gaussian Random field
# with matern (cross) covariance function
param=list(mean_1=0,mean_2=0,scale_1=0.15/3,scale_2=0.2/3,scale_12=0.15/3,
sill_1=1,sill_2=1,nugget_1=0,nugget_2=0,
smooth_1=0.5,smooth_12=0.5,smooth_2=0.5,pcol=-0.45)
data <- GeoSim(coordx=coords, corrmodel="Bi_matern", param=param)$data
# Empirical semi-(cross)variogram estimation:
biv_vario=GeoVariogram(data,coordx=coords, bivariate=TRUE,maxdist=c(0.5,0.5,0.5))
# Variograms plots
par(mfrow=c(2,2))
plot(biv_vario$centers,biv_vario$variograms[1,],pch=20,xlab="h",ylim=c(0,1.2),
ylab="",main=expression(gamma[11](h)))
plot(biv_vario$centers,biv_vario$variogramst,pch=20,xlab="h",
ylab="",main=expression(gamma[12](h)))
plot(biv_vario$centers,biv_vario$variogramst,pch=20,xlab="h",ylab="",
main=expression(gamma[21](h)))
plot(biv_vario$centers,biv_vario$variograms[2,],pch=20,xlab="h",,ylim=c(0,1.2),
ylab="",main=expression(gamma[22](h)))
}
\keyword{Variogram}
|
/man/GeoVariogram.Rd
|
no_license
|
morenobevilacqua/GeoModels-OCL
|
R
| false
| false
| 13,084
|
rd
|
\name{GeoVariogram}
\alias{GeoVariogram}
\encoding{UTF-8}
\title{Empirical Variogram(variants) estimation}
\description{
The function returns an empirical estimate of the variogram for spatio (temporal) and bivariate
random fields.
}
\usage{
GeoVariogram(data, coordx, coordy=NULL, coordt=NULL, coordx_dyn=NULL, cloud=FALSE,
distance='Eucl', grid=FALSE, maxdist=NULL,
maxtime=NULL, numbins=NULL, radius=6378.388,
type='variogram',bivariate=FALSE)
}
\arguments{
\item{data}{A \eqn{d}{d}-dimensional vector (a single spatial realisation) or a (\eqn{n \times d}{n x d})-matrix
(\eqn{n} iid spatial realisations) or a (\eqn{d \times d}{d x d})-matrix (a single spatial realisation on regular grid)
or an (\eqn{d \times d \times n}{d x d x n})-array (\eqn{n} iid spatial realisations on regular grid) or a
(\eqn{t \times d}{t x d})-matrix (a single spatial-temporal realisation) or an (\eqn{t \times d \times n }{t x d x n})-array
(\eqn{n} iid spatial-temporal realisations) or or an (\eqn{d \times d \times t \times n }{d x d x t})-array
(a single spatial-temporal realisation on regular grid) or an (\eqn{d \times d \times t \times n }{d x d x t x n})-array
(\eqn{n} iid spatial-temporal realisations on regular grid). See \code{\link{GeoFit}} for details.}
\item{coordx}{A numeric (\eqn{d \times 2}{d x 2})-matrix (where
\code{d} is the number of spatial sites) assigning 2-dimensions of spatial coordinates or a numeric \eqn{d}{d}-dimensional vector assigning
1-dimension of spatial coordinates. Coordinates on a sphere for a fixed radius \code{radius}
are passed in lon/lat format expressed in decimal degrees.}
\item{coordy}{A numeric vector assigning 1-dimension of
spatial coordinates; \code{coordy} is interpreted only if \code{coordx} is a numeric
vector or \code{grid=TRUE} otherwise it will be ignored. Optional argument, the default is \code{NULL} then \code{coordx} is expected to
be numeric a (\eqn{d \times 2}{d x 2})-matrix.}
\item{coordt}{A numeric vector assigning 1-dimension of
temporal coordinates. Optional argument, the default is \code{NULL} then a spatial random field is expected.}
\item{coordx_dyn}{A list of \eqn{m} numeric (\eqn{d_t \times 2}{d x 2})-matrices
containing dynamical (in time) spatial coordinates. Optional argument, the default is \code{NULL}
}
\item{cloud}{Logical; if \code{TRUE} the variogram cloud is computed,
otherwise if \code{FALSE} (the default) the empirical (binned)
variogram is returned.}
\item{distance}{String; the name of the spatial distance. The default
is \code{Eucl}, the euclidean distance. See the Section
\bold{Details} of \code{\link{GeoFit}}.}
\item{grid}{Logical; if \code{FALSE} (the default) the data
are interpreted as spatial or spatial-temporal realisations on a set of
non-equispaced spatial sites.}
\item{maxdist}{A numeric value denoting the spatial maximum distance,
see the Section \bold{Details}.}
\item{maxtime}{A numeric value denoting the temporal maximum distance,
see the Section \bold{Details}.}
\item{numbins}{A numeric value denoting the numbers of bins, see the
Section \bold{Details}.}
\item{radius}{Numeric; a value indicating the radius of the sphere when
using the great circle distance. Default value is the radius of the earth in Km (i.e. 6378.88)}
\item{type}{A String denoting the type of variogram. Two options
are available: \code{variogram},
and \code{lorelogram}. It is returned respectively,
the standard variogram with the first (Gaussian responses),
lorelogram with the fourth (Binary data).}
\item{bivariate}{Logical; if \code{FALSE} (the default) the data
are interpreted as univariate spatial or spatial-temporal realisations.
Otherwise they are intrepreted as a a realization from a bivariate field.}
}
\details{
We briefly report the definitions of variogram used in this function.
In the case of a spatial Gaussian random field
the sample \code{variogram} estimator is defined by
\deqn{\hat{\gamma}(h) = 0.5 \sum_{x_i, x_j \in N(h)} (Z(x_i) - Z(x_j))^2 / |N(h)|}
where \eqn{N(h)} is the set of all the sample pairs whose distances fall into a tolerance region with size \eqn{h}
(equispaced intervalls are considered).
Observe, that in the literature often the above definition is termed semivariogram (see e.g. the first reference).
Nevertheless, here this defition has been used in order to be consistent with the variogram defition used for the extremes
(see e.g. the third reference).
In the case of a spatial binary random field, the sample \code{lorelogram}
estimator (the analogue of the correlation) is defined by
\deqn{\hat{L}(h) = (N_{11}(h) N_{00}(h) )/ (N_{01}(h) N_{10}(h)).}
where \eqn{N_{11}(h)} is the number of pairs who are both equal to
\eqn{1}{1} and that falls in the bin
\eqn{h}{h}. Similarly are defined the other quantities.
In the case of a spatio-temporal Gaussian random field the sample
\code{variogram} estimator is defined by
\deqn{\hat{\gamma}(h, u) = 0.5 \sum_{(x_i, l), (x_j, k) \in N(h, u)} (Z(x_i, l) - Z(x_j, k))^2 / |N(h, u)|}
where \eqn{N(h, u)}{N(h, u)} is the set of all the sample pairs whose
spatial distances fall into a tolerance region with size \eqn{h}{h}
and \eqn{|k - l| = u}{\|k-l\|=u}. Note, that \eqn{Z(x_i, l)}{Z(x_i,l)}
is the observation at site \eqn{x_i}{x_i} and time
\eqn{l}{l}. Taking this in mind and given the above definition of
lorelogram, the spatio-temporal extention is straightforward.
The \code{numbins} parameter indicates the number of adjacent
intervals to consider in order to grouped distances with which to
compute the (weighted) lest squares.
The \code{maxdist} parameter indicates the maximum spatial distance below which
the shorter distances will be considered in the calculation of
the (weigthed) least squares.
The \code{maxtime} parameter indicates the maximum temporal distance below which
the shorter distances will be considered in the calculation of
the (weigthed) least squares.
}
\value{
Returns an object of class \code{Variogram}.
An object of class \code{Variogram} is a list containing
at most the following components:
\item{bins}{Adjacent intervals of grouped spatial distances if
\code{cloud=FALSE}. Otherwise if \code{cloud=TRUE} all the spatial pairwise distances;}
\item{bint}{Adjacent intervals of grouped temporal distances if
\code{cloud=FALSE}. Otherwise if \code{cloud=TRUE} all the temporal pairwise distances;}
\item{cloud}{If the variogram cloud is returned (\code{TRUE}) or the
empirical variogram (\code{FALSE});}
\item{centers}{The centers of the spatial bins;}
\item{distance}{The type of spatial distance;}
\item{lenbins}{The number of pairs in each spatial bin;}
\item{lenbinst}{The number of pairs in each spatial-temporal bin;}
\item{lenbint}{The number of pairs in each temporal bin;}
\item{maxdist}{The maximum spatial distance used for the calculation of the variogram.
If no spatial distance is specified then it is NULL;}
\item{maxtime}{The maximum temporal distance used for the calculation of the variogram.
If no temporal distance is specified then it is NULL;}
\item{variograms}{The empirical spatial variogram;}
\item{variogramst}{The empirical spatial-temporal variogram;}
\item{variogramt}{The empirical temporal variogram;}
\item{type}{The type of estimated variogram}
}
\references{
Cressie, N. A. C. (1993) \emph{Statistics for Spatial Data}. New York: Wiley.
Gaetan, C. and Guyon, X. (2010) \emph{Spatial Statistics and Modelling}.
Spring Verlang, New York.
Heagerty, P. J., and Zeger, S. L. (1998). Lorelogram: A Regression
Approach to Exploring Dependence in Longitudinal Categorical
Responses.
\emph{Journal of the American Statistical Association},
\bold{93}(441), 150--162
}
\seealso{\code{\link{GeoFit}}}
\author{Moreno Bevilacqua, \email{moreno.bevilacqua@uv.cl},\url{https://sites.google.com/a/uv.cl/moreno-bevilacqua/home},
Víctor Morales Oñate, \email{victor.morales@uv.cl}
}
\examples{
library(GeoModels)
################################################################
###
### Example 1. Empirical estimation of the semi-variogram from a
### spatial Gaussian random field with exponential correlation.
###
###############################################################
set.seed(514)
# Set the coordinates of the sites:
x <- runif(200, 0, 1)
y <- runif(200, 0, 1)
coords <- cbind(x,y)
# Set the model's parameters:
corrmodel <- "exponential"
mean <- 0
sill <- 1
nugget <- 0
scale <- 0.3/3
# Simulation of the spatial Gaussian random field:
data <- GeoSim(coordx=coords, corrmodel=corrmodel, param=list(mean=mean,
sill=sill, nugget=nugget, scale=scale))$data
# Empirical spatial semi-variogram estimation:
fit <- GeoVariogram(coordx=coords,data=data,maxdist=0.6)
# Results:
plot(fit$centers, fit$variograms, xlab='h', ylab=expression(gamma(h)),
ylim=c(0, max(fit$variograms)), pch=20,
main="Semi-variogram")
################################################################
###
### Example 2. Empirical estimation of the variogram from a
### spatio-temporal Gaussian random fields with Gneiting
### correlation function.
###
###############################################################
set.seed(331)
# Define the temporal sequence:
# Set the coordinates of the sites:
x <- runif(400, 0, 1)
y <- runif(400, 0, 1)
coords <- cbind(x,y)
times <- seq(1,5,1)
# Simulation of a spatio-temporal Gaussian random field:
data <- GeoSim(coordx=coords, coordt=times, corrmodel="gneiting",
param=list(mean=0,scale_s=0.1,scale_t=0.1,sill=1,
nugget=0,power_s=1,power_t=1,sep=0.5))$data
# Empirical spatio-temporal semi-variogram estimation:
fit <- GeoVariogram(data=data, coordx=coords, coordt=times, maxtime=5,maxdist=0.5)
# Results: Marginal spatial empirical semi-variogram
par(mfrow=c(2,2), mai=c(.5,.5,.3,.3), mgp=c(1.4,.5, 0))
plot(fit$centers, fit$variograms, xlab='h', ylab=expression(gamma(h)),
ylim=c(0, max(fit$variograms)), xlim=c(0, max(fit$centers)),
pch=20,main="Marginal spatial semi-variogram",cex.axis=.8)
# Results: Marginal temporal empirical semi-variogram
plot(fit$bint, fit$variogramt, xlab='t', ylab=expression(gamma(t)),
ylim=c(0, max(fit$variogramt)),xlim=c(0,max(fit$bint)),
pch=20,main="Marginal temporal semi-variogram",cex.axis=.8)
# Building space-time semi-variogram
st.vario <- matrix(fit$variogramst,length(fit$centers),length(fit$bint))
st.vario <- cbind(c(0,fit$variograms), rbind(fit$variogramt,st.vario))
# Results: 3d Spatio-temporal semi-variogram
require(scatterplot3d)
st.grid <- expand.grid(c(0,fit$centers),c(0,fit$bint))
scatterplot3d(st.grid[,1], st.grid[,2], c(st.vario),
highlight.3d=TRUE, xlab="h",ylab="t",
zlab=expression(gamma(h,t)), pch=20,
main="Space-time semi-variogram",cex.axis=.7,
mar=c(2,2,2,2), mgp=c(0,0,0),
cex.lab=.7)
# A smoothed version
par(mai=c(.2,.2,.2,.2),mgp=c(1,.3, 0))
persp(c(0,fit$centers), c(0,fit$bint), st.vario,
xlab="h", ylab="u", zlab=expression(gamma(h,u)),
ltheta=90, shade=0.75, ticktype="detailed", phi=30,
theta=30,main="Space-time semi-variogram",cex.axis=.8,
cex.lab=.8)
################################################################
###
### Example 3. Empirical estimation of the (cross) semivariograms
### from a bivariate Gaussian random fields with Matern
### correlation function.
###
###############################################################
# Simulation of a bivariate spatial Gaussian random field:
set.seed(29)
# Define the spatial-coordinates of the points:
x <- runif(200, 0, 1)
set.seed(7)
y <- runif(200, 0, 1)
coords=cbind(x,y)
# Simulation of a bivariate Gaussian Random field
# with matern (cross) covariance function
param=list(mean_1=0,mean_2=0,scale_1=0.15/3,scale_2=0.2/3,scale_12=0.15/3,
sill_1=1,sill_2=1,nugget_1=0,nugget_2=0,
smooth_1=0.5,smooth_12=0.5,smooth_2=0.5,pcol=-0.45)
data <- GeoSim(coordx=coords, corrmodel="Bi_matern", param=param)$data
# Empirical semi-(cross)variogram estimation:
biv_vario=GeoVariogram(data,coordx=coords, bivariate=TRUE,maxdist=c(0.5,0.5,0.5))
# Variograms plots
par(mfrow=c(2,2))
plot(biv_vario$centers,biv_vario$variograms[1,],pch=20,xlab="h",ylim=c(0,1.2),
ylab="",main=expression(gamma[11](h)))
plot(biv_vario$centers,biv_vario$variogramst,pch=20,xlab="h",
ylab="",main=expression(gamma[12](h)))
plot(biv_vario$centers,biv_vario$variogramst,pch=20,xlab="h",ylab="",
main=expression(gamma[21](h)))
plot(biv_vario$centers,biv_vario$variograms[2,],pch=20,xlab="h",,ylim=c(0,1.2),
ylab="",main=expression(gamma[22](h)))
}
\keyword{Variogram}
|
# Track20, Avenir Health
# Kristin Bietsch, PhD and Emily Sonneveldt, PhD
library(dplyr)
library(tidyr)
library(ggplot2)
setwd("C:/Users/KristinBietsch/files/Track20/Win Requests/Self Injections")
# Baseline Data
baseline <- read.csv("ModelData062220.csv")
# Adaptable Parameters
#Proportion of Injectable users that will switch to self-injectable contraceptives
#inj_to_si <- .16
#Proportion of short-term method users that will switch to self-injectable contraceptives
#stm_to_si <- .08
#Proportion of Non-users, whose reasons for not using would be alleviated by benefits of self-injection (decreased side-effects, increased availability, ease of use) that will uptake self-injectable contraceptives
#nu_to_si <- .04
# SI bonus- increase in each parameter whem SI of SC becomes fully available (partial bonus awarded during scale up)
#si_bonus <- .01
# New Adjustable Parament
# Year SC full scale
#year_sc <- 2020
# Year SI is reaches maximum proportion of SC use
# if the two years are differect, assume 0% in first year and max% in last year and linearly interpolate
#year_si <- 2021
#max_siofsc <- .4
##############################################################
# Share of SI that are provider provided versus SI
baseline$SC <- baseline$year_sc
baseline$SI <- baseline$year_si
baseline <- baseline %>% mutate(time_sc_si = SI-SC,
years_after_sc=Year-SC,
share_between_scsi = (years_after_sc/time_sc_si)*max_siofsc,
per_si =case_when(Year>=SI ~ max_siofsc,
Year<=SC ~ 0,
Year>SC & Year<SI ~ share_between_scsi),
share_bonus=per_si/max_siofsc)
baseline <- baseline %>% mutate(high_scale=case_when(high==1 ~ 1, high==0 ~ .9))
# take the 2019 numbers, and put them as their own variable, then apply the bonsus
base2019 <- baseline %>% filter(Year==2019) %>% select(iso, Injection, STM, NonUser) %>%
rename(Injection_2019=Injection,
STM_2019=STM,
NonUser_2019=NonUser)
baseline <- full_join(baseline, base2019, by="iso")
baseline <- baseline %>% mutate(inj_switch_si_a=(((inj_to_si+(si_bonus*share_bonus)))*Injection_2019)/11,
stm_switch_si_a=(((stm_to_si+(si_bonus*share_bonus))*scale)*STM_2019)/11,
nu_switch_si_a=(((nu_to_si+(si_bonus*share_bonus))*scale)*NonUser_2019*ReasonNotUsingSI)/11)
equations <- baseline
# Here is where the years since full scale matter- making annual numbers before it 0
equations <- equations %>% mutate(year_fullscale= case_when(Year-year_sc<0 ~ 0, Year-year_sc>=0 ~ Year-year_sc+1 )) %>%
mutate(binary_full_scale=case_when(year_fullscale>0 ~ 1, year_fullscale==0 ~ 0)) %>%
mutate(inj_switch_si_a=inj_switch_si_a*binary_full_scale,
stm_switch_si_a=stm_switch_si_a*binary_full_scale,
nu_switch_si_a=nu_switch_si_a*binary_full_scale) %>%
group_by(iso) %>%
mutate(inj_switch_si = cumsum(inj_switch_si_a),
stm_switch_si = cumsum(stm_switch_si_a),
inj_stay= Injection-inj_switch_si,
stm_stay=STM-stm_switch_si)
#SI users who would have been non-users because of discontinuation from IM
equations <- equations %>% mutate(si_non_disc_im = case_when(year_fullscale==0 ~ 0, year_fullscale!=0 ~ inj_switch_si*(discon_inj*.26)))
# SI users who would have been non-users because of discontinuation from STM
equations <- equations %>% mutate(si_non_disc_stm = case_when(year_fullscale==0 ~ 0, year_fullscale!=0 ~ stm_switch_si*(discon_stm*.26)))
#Nonusers Not SI Relevant
equations <- equations %>% mutate(nu_not_sirelevant=(NonUser-si_non_disc_im-si_non_disc_stm)*(1-ReasonNotUsingSI),
nu_sirelevant=(NonUser-si_non_disc_im-si_non_disc_stm)*(ReasonNotUsingSI))
# Nonusers SI Relevant Uptake
equations <- equations %>% mutate( nu_sirelevant_uptake= cumsum(nu_switch_si_a))
equations <- equations %>% mutate(nu_sirelevant_notuptake=nu_sirelevant-nu_sirelevant_uptake)
names(equations)
equations <- equations %>% mutate(si_users=inj_switch_si+stm_switch_si+si_non_disc_im+si_non_disc_stm+nu_sirelevant_uptake,
im_users=inj_stay,
stm_users=stm_stay,
ltm_users=LTM,
non_users=nu_not_sirelevant+nu_sirelevant_notuptake,
per_users_si=si_users/(si_users+im_users+stm_users+ltm_users))
# Proportion of SI Users from Various sources
equations <- equations %>% mutate(prop_si_im= case_when(si_users==0 ~ 0, si_users!=0 ~ inj_switch_si/si_users),
prop_si_stm= case_when(si_users==0 ~ 0, si_users!=0 ~ stm_switch_si/si_users),
prop_si_disc= case_when(si_users==0 ~ 0, si_users!=0 ~ (si_non_disc_im+si_non_disc_stm)/si_users),
prop_si_uptake= case_when(si_users==0 ~ 0, si_users!=0 ~ nu_sirelevant_uptake/si_users))
equations <- equations %>% mutate(baseline_users=LTM+STM+Injection,
total_user_w_si=si_users+im_users+stm_users+ltm_users)
# Injectable Users
equations <- equations %>% mutate(injec_user_w_si=si_users+im_users)
# IM Users, SI Users, and SC Users
equations <- equations %>% mutate(IM_injec_user_w_si=round(im_users),
SCP_injec_user_w_si=round(si_users*(1-per_si)),
SI_injec_user_w_si= round(si_users*(per_si)),
non_users <- Total-total_user_w_si,
mcpr_w_si=round((total_user_w_si/Total)*100,1),
inject_share=injec_user_w_si/total_user_w_si,
baseline_mcpr=round((baseline_users/Total)*100,1),
additional_users=round(total_user_w_si-baseline_users))
# Gates Priority Coutries
gates <- equations %>% filter(iso== 566 | iso==404 | iso==854 | iso==586 | iso==800 | iso==686 | iso==454 ) %>% filter(Year==2030) %>% select(Country, iso, additional_users)
write.csv(gates, "Gates Priority Countries SI 062220 With Change in NonUsers.csv", row.names = F)
# Results 2030
results <- equations %>% select(iso, Year, baseline_mcpr, mcpr_w_si, additional_users, total_user_w_si, baseline_users, Total, SI_injec_user_w_si, SCP_injec_user_w_si, IM_injec_user_w_si, non_users) %>%
filter(Year==2030)
# Overall change in mcpr/number of users
sum2030 <- results %>% ungroup() %>% summarise(total_user_w_si=sum(total_user_w_si), baseline_users=sum(baseline_users), Total=sum(Total)) %>% mutate(addition=total_user_w_si-baseline_users, mcpr_si=total_user_w_si/Total, mcpr_nosi=baseline_users/Total)
############################################################
data2030 <- equations %>% ungroup() %>%
filter(Year==2030) %>%
mutate(additional_users=total_user_w_si-baseline_users) %>% filter(iso==4) %>% select(additional_users)
add <- data2030$additional_users[1]
############################################################
############################################################
sc_users2030 <- equations %>% ungroup() %>%
filter(Year==2030) %>% filter(iso==4) %>% select(prop_si_im, prop_si_stm, prop_si_disc, prop_si_uptake) %>%
gather(Type, Number, prop_si_im:prop_si_uptake ) %>% mutate(Number=100*Number, x=1) %>%
mutate(Type=case_when(Type=="prop_si_im" ~ "Former IM",
Type=="prop_si_stm" ~ "Former STM",
Type=="prop_si_disc" ~ "Reduced Discontinuation",
Type=="prop_si_uptake" ~ "Uptake"))
ggplot(sc_users2030,aes(x=x,y=Number, fill=Type))+
geom_bar(stat="identity") +
labs(title="Where SC Users Come From?", x="", y="", fill="")+
theme_bw()+
theme(legend.position = "bottom",
legend.text=element_text(size=12),
axis.text.y=element_text(size=12),
axis.text.x=element_blank(),
axis.ticks.x=element_blank())
############################################################
graph <- equations %>% select(iso, Year, IM_injec_user_w_si, SCP_injec_user_w_si, SI_injec_user_w_si, stm_users , ltm_users, non_users ) %>%
gather(Type, Number, IM_injec_user_w_si:non_users ) %>%
mutate(Type=case_when(Type=="stm_users" ~ "STM",
Type=="ltm_users" ~ "LTM",
Type=="IM_injec_user_w_si" ~ "Injectable: IM",
Type=="SCP_injec_user_w_si" ~ "Injectable: SC (Provider)",
Type=="SI_injec_user_w_si" ~ "Injectable: SC (Self)",
Type=="non_users" ~ "Nonuser"))
graph$Type <- factor(graph$Type, levels = c( "Injectable: SC (Self)", "Injectable: SC (Provider)", "Injectable: IM", "LTM", "STM", "Nonuser" ))
graph <- graph %>% filter(iso==4)
ggplot(graph,aes(x=Year,y=Number, fill=Type))+
geom_bar(stat="identity") +
labs(title="Number of Women", x="", y="", fill="")+
theme_bw()+
theme(legend.position = "bottom",
legend.text=element_text(size=12))
############################################################
mcpr <- equations %>% select(iso, Year, mcpr_w_si, baseline_mcpr ) %>% gather(Type, Number, mcpr_w_si:baseline_mcpr ) %>%
mutate(Type= case_when(Type=="mcpr_w_si" ~ "mCPR with SC Introduction",
Type=="baseline_mcpr" ~ "mCPR without SC Introduction"))
mcpr <- mcpr %>% filter(iso==4)
ggplot(mcpr,aes(x=Year,y=Number, color=Type))+
geom_line(size=1.5)+
labs(title="Effect for Self-Injectable Introduction on mCPR", x="", y="mCPR (AW)", color="")+
theme_bw()+
theme(legend.position = "bottom",
legend.text=element_text(size=12))
|
/SCSI Model Track20 062220.R
|
no_license
|
brettkeller/SCSIModel
|
R
| false
| false
| 10,121
|
r
|
# Track20, Avenir Health
# Kristin Bietsch, PhD and Emily Sonneveldt, PhD
library(dplyr)
library(tidyr)
library(ggplot2)
setwd("C:/Users/KristinBietsch/files/Track20/Win Requests/Self Injections")
# Baseline Data
baseline <- read.csv("ModelData062220.csv")
# Adaptable Parameters
#Proportion of Injectable users that will switch to self-injectable contraceptives
#inj_to_si <- .16
#Proportion of short-term method users that will switch to self-injectable contraceptives
#stm_to_si <- .08
#Proportion of Non-users, whose reasons for not using would be alleviated by benefits of self-injection (decreased side-effects, increased availability, ease of use) that will uptake self-injectable contraceptives
#nu_to_si <- .04
# SI bonus- increase in each parameter whem SI of SC becomes fully available (partial bonus awarded during scale up)
#si_bonus <- .01
# New Adjustable Parament
# Year SC full scale
#year_sc <- 2020
# Year SI is reaches maximum proportion of SC use
# if the two years are differect, assume 0% in first year and max% in last year and linearly interpolate
#year_si <- 2021
#max_siofsc <- .4
##############################################################
# Share of SI that are provider provided versus SI
baseline$SC <- baseline$year_sc
baseline$SI <- baseline$year_si
baseline <- baseline %>% mutate(time_sc_si = SI-SC,
years_after_sc=Year-SC,
share_between_scsi = (years_after_sc/time_sc_si)*max_siofsc,
per_si =case_when(Year>=SI ~ max_siofsc,
Year<=SC ~ 0,
Year>SC & Year<SI ~ share_between_scsi),
share_bonus=per_si/max_siofsc)
baseline <- baseline %>% mutate(high_scale=case_when(high==1 ~ 1, high==0 ~ .9))
# take the 2019 numbers, and put them as their own variable, then apply the bonsus
base2019 <- baseline %>% filter(Year==2019) %>% select(iso, Injection, STM, NonUser) %>%
rename(Injection_2019=Injection,
STM_2019=STM,
NonUser_2019=NonUser)
baseline <- full_join(baseline, base2019, by="iso")
baseline <- baseline %>% mutate(inj_switch_si_a=(((inj_to_si+(si_bonus*share_bonus)))*Injection_2019)/11,
stm_switch_si_a=(((stm_to_si+(si_bonus*share_bonus))*scale)*STM_2019)/11,
nu_switch_si_a=(((nu_to_si+(si_bonus*share_bonus))*scale)*NonUser_2019*ReasonNotUsingSI)/11)
equations <- baseline
# Here is where the years since full scale matter- making annual numbers before it 0
equations <- equations %>% mutate(year_fullscale= case_when(Year-year_sc<0 ~ 0, Year-year_sc>=0 ~ Year-year_sc+1 )) %>%
mutate(binary_full_scale=case_when(year_fullscale>0 ~ 1, year_fullscale==0 ~ 0)) %>%
mutate(inj_switch_si_a=inj_switch_si_a*binary_full_scale,
stm_switch_si_a=stm_switch_si_a*binary_full_scale,
nu_switch_si_a=nu_switch_si_a*binary_full_scale) %>%
group_by(iso) %>%
mutate(inj_switch_si = cumsum(inj_switch_si_a),
stm_switch_si = cumsum(stm_switch_si_a),
inj_stay= Injection-inj_switch_si,
stm_stay=STM-stm_switch_si)
#SI users who would have been non-users because of discontinuation from IM
equations <- equations %>% mutate(si_non_disc_im = case_when(year_fullscale==0 ~ 0, year_fullscale!=0 ~ inj_switch_si*(discon_inj*.26)))
# SI users who would have been non-users because of discontinuation from STM
equations <- equations %>% mutate(si_non_disc_stm = case_when(year_fullscale==0 ~ 0, year_fullscale!=0 ~ stm_switch_si*(discon_stm*.26)))
#Nonusers Not SI Relevant
equations <- equations %>% mutate(nu_not_sirelevant=(NonUser-si_non_disc_im-si_non_disc_stm)*(1-ReasonNotUsingSI),
nu_sirelevant=(NonUser-si_non_disc_im-si_non_disc_stm)*(ReasonNotUsingSI))
# Nonusers SI Relevant Uptake
equations <- equations %>% mutate( nu_sirelevant_uptake= cumsum(nu_switch_si_a))
equations <- equations %>% mutate(nu_sirelevant_notuptake=nu_sirelevant-nu_sirelevant_uptake)
names(equations)
equations <- equations %>% mutate(si_users=inj_switch_si+stm_switch_si+si_non_disc_im+si_non_disc_stm+nu_sirelevant_uptake,
im_users=inj_stay,
stm_users=stm_stay,
ltm_users=LTM,
non_users=nu_not_sirelevant+nu_sirelevant_notuptake,
per_users_si=si_users/(si_users+im_users+stm_users+ltm_users))
# Proportion of SI Users from Various sources
equations <- equations %>% mutate(prop_si_im= case_when(si_users==0 ~ 0, si_users!=0 ~ inj_switch_si/si_users),
prop_si_stm= case_when(si_users==0 ~ 0, si_users!=0 ~ stm_switch_si/si_users),
prop_si_disc= case_when(si_users==0 ~ 0, si_users!=0 ~ (si_non_disc_im+si_non_disc_stm)/si_users),
prop_si_uptake= case_when(si_users==0 ~ 0, si_users!=0 ~ nu_sirelevant_uptake/si_users))
equations <- equations %>% mutate(baseline_users=LTM+STM+Injection,
total_user_w_si=si_users+im_users+stm_users+ltm_users)
# Injectable Users
equations <- equations %>% mutate(injec_user_w_si=si_users+im_users)
# IM Users, SI Users, and SC Users
equations <- equations %>% mutate(IM_injec_user_w_si=round(im_users),
SCP_injec_user_w_si=round(si_users*(1-per_si)),
SI_injec_user_w_si= round(si_users*(per_si)),
non_users <- Total-total_user_w_si,
mcpr_w_si=round((total_user_w_si/Total)*100,1),
inject_share=injec_user_w_si/total_user_w_si,
baseline_mcpr=round((baseline_users/Total)*100,1),
additional_users=round(total_user_w_si-baseline_users))
# Gates Priority Coutries
gates <- equations %>% filter(iso== 566 | iso==404 | iso==854 | iso==586 | iso==800 | iso==686 | iso==454 ) %>% filter(Year==2030) %>% select(Country, iso, additional_users)
write.csv(gates, "Gates Priority Countries SI 062220 With Change in NonUsers.csv", row.names = F)
# Results 2030
results <- equations %>% select(iso, Year, baseline_mcpr, mcpr_w_si, additional_users, total_user_w_si, baseline_users, Total, SI_injec_user_w_si, SCP_injec_user_w_si, IM_injec_user_w_si, non_users) %>%
filter(Year==2030)
# Overall change in mcpr/number of users
sum2030 <- results %>% ungroup() %>% summarise(total_user_w_si=sum(total_user_w_si), baseline_users=sum(baseline_users), Total=sum(Total)) %>% mutate(addition=total_user_w_si-baseline_users, mcpr_si=total_user_w_si/Total, mcpr_nosi=baseline_users/Total)
############################################################
data2030 <- equations %>% ungroup() %>%
filter(Year==2030) %>%
mutate(additional_users=total_user_w_si-baseline_users) %>% filter(iso==4) %>% select(additional_users)
add <- data2030$additional_users[1]
############################################################
############################################################
sc_users2030 <- equations %>% ungroup() %>%
filter(Year==2030) %>% filter(iso==4) %>% select(prop_si_im, prop_si_stm, prop_si_disc, prop_si_uptake) %>%
gather(Type, Number, prop_si_im:prop_si_uptake ) %>% mutate(Number=100*Number, x=1) %>%
mutate(Type=case_when(Type=="prop_si_im" ~ "Former IM",
Type=="prop_si_stm" ~ "Former STM",
Type=="prop_si_disc" ~ "Reduced Discontinuation",
Type=="prop_si_uptake" ~ "Uptake"))
ggplot(sc_users2030,aes(x=x,y=Number, fill=Type))+
geom_bar(stat="identity") +
labs(title="Where SC Users Come From?", x="", y="", fill="")+
theme_bw()+
theme(legend.position = "bottom",
legend.text=element_text(size=12),
axis.text.y=element_text(size=12),
axis.text.x=element_blank(),
axis.ticks.x=element_blank())
############################################################
graph <- equations %>% select(iso, Year, IM_injec_user_w_si, SCP_injec_user_w_si, SI_injec_user_w_si, stm_users , ltm_users, non_users ) %>%
gather(Type, Number, IM_injec_user_w_si:non_users ) %>%
mutate(Type=case_when(Type=="stm_users" ~ "STM",
Type=="ltm_users" ~ "LTM",
Type=="IM_injec_user_w_si" ~ "Injectable: IM",
Type=="SCP_injec_user_w_si" ~ "Injectable: SC (Provider)",
Type=="SI_injec_user_w_si" ~ "Injectable: SC (Self)",
Type=="non_users" ~ "Nonuser"))
graph$Type <- factor(graph$Type, levels = c( "Injectable: SC (Self)", "Injectable: SC (Provider)", "Injectable: IM", "LTM", "STM", "Nonuser" ))
graph <- graph %>% filter(iso==4)
ggplot(graph,aes(x=Year,y=Number, fill=Type))+
geom_bar(stat="identity") +
labs(title="Number of Women", x="", y="", fill="")+
theme_bw()+
theme(legend.position = "bottom",
legend.text=element_text(size=12))
############################################################
mcpr <- equations %>% select(iso, Year, mcpr_w_si, baseline_mcpr ) %>% gather(Type, Number, mcpr_w_si:baseline_mcpr ) %>%
mutate(Type= case_when(Type=="mcpr_w_si" ~ "mCPR with SC Introduction",
Type=="baseline_mcpr" ~ "mCPR without SC Introduction"))
mcpr <- mcpr %>% filter(iso==4)
ggplot(mcpr,aes(x=Year,y=Number, color=Type))+
geom_line(size=1.5)+
labs(title="Effect for Self-Injectable Introduction on mCPR", x="", y="mCPR (AW)", color="")+
theme_bw()+
theme(legend.position = "bottom",
legend.text=element_text(size=12))
|
% Generated by roxygen2 (4.0.2): do not edit by hand
\docType{data}
\name{data-Praw}
\alias{data-Praw}
\title{Modeled phenotypic data used in thesis}
\usage{
Praw
}
\description{
Modeled phenotypic data used in thesis
}
\details{
The data comes from a field trial that is part of a cotton
breeding program. The trial was set up in 2012 across 7 locations in the
US Cotton Belt. At every location the same bi--parental BC_3F_2 was grown
together with a number of entries serving as checks. Yield performance
measurements were averaged per plot. Location 7 was excluded from the
analysis. Next, a linear mixed model was applied to obtain BLUP predictions
for the yield performance measures where the yield performance measures were
adjusted using the information of the checks, and where a spatial (AR1xAR1)
covariance structure was applied for the rows and ranges in the fields.
The resulting data frame holds information of 1774 observations and 19 features.
The features are detailed below and represent the columns in the data frame.
\describe{
\item{\code{MERGE}:}{Observation names, which are the combined names of
the entries and the locations.}
\item{\code{GERMPLASM}:}{The entry names.}
\item{\code{LOCATION}:}{The name of the locations.}
\item{\code{OVERALL_RANGE}:}{The range coordinates when all fields are
seen as being part of one big field, i.e. same reference grid for all fields.}
\item{\code{OVERALL_ROW}:}{The row coordinates when all fields are
seen as being part of one big field, i.e. same reference grid for all fields.}
\item{\code{RANGEROW}:}{A combination of the range and row coordinates.}
\item{\code{ROW}:}{The coordinates for the rows linked to the locations.
Here the reference grid is the location itself.}
\item{\code{RANGE}:}{The coordinates for the ranges linked to the locations.
Here the reference grid is the location itself.}
\item{\code{PLOT}:}{The reference to the plot of the observation.}
\item{\code{YIELD}:}{The average yield performance measures of the plots for
the respective observations.}
\item{\code{EXPERIMENT}:}{Name of the experiment.}
\item{\code{SUBSUBBLOCK}:}{Placeholder in case subsubblocks were defined
in case of blocking. Not used here.}
\item{\code{SUBBLOCK}:}{Placeholder in case subblocks were defined
in case of blocking. Not used here.}
\item{\code{BLOCK}:}{Placeholder in case blocks were defined in case of
blocking. Not used here.}
\item{\code{CHECK}:}{Numeric showing whether the observation is a check.
Here the checks were already filtered out.}
\item{\code{ROWf}:}{Factor for ROW.}
\item{\code{RANGEf}:}{Factor for RANGE.}
\item{\code{residuals}:}{Residuals obtained after applying the above
described linear mixed model.}
\item{\code{predicted.values}:}{BLUP predictions from the fitted above
described linear mixed model.}
}
}
\examples{
data(Praw)
str(Praw)
}
\author{
Ruud Derijcker
}
\keyword{Praw}
|
/man/data-Praw.Rd
|
no_license
|
digiYozhik/msc_thesis
|
R
| false
| false
| 3,032
|
rd
|
% Generated by roxygen2 (4.0.2): do not edit by hand
\docType{data}
\name{data-Praw}
\alias{data-Praw}
\title{Modeled phenotypic data used in thesis}
\usage{
Praw
}
\description{
Modeled phenotypic data used in thesis
}
\details{
The data comes from a field trial that is part of a cotton
breeding program. The trial was set up in 2012 across 7 locations in the
US Cotton Belt. At every location the same bi--parental BC_3F_2 was grown
together with a number of entries serving as checks. Yield performance
measurements were averaged per plot. Location 7 was excluded from the
analysis. Next, a linear mixed model was applied to obtain BLUP predictions
for the yield performance measures where the yield performance measures were
adjusted using the information of the checks, and where a spatial (AR1xAR1)
covariance structure was applied for the rows and ranges in the fields.
The resulting data frame holds information of 1774 observations and 19 features.
The features are detailed below and represent the columns in the data frame.
\describe{
\item{\code{MERGE}:}{Observation names, which are the combined names of
the entries and the locations.}
\item{\code{GERMPLASM}:}{The entry names.}
\item{\code{LOCATION}:}{The name of the locations.}
\item{\code{OVERALL_RANGE}:}{The range coordinates when all fields are
seen as being part of one big field, i.e. same reference grid for all fields.}
\item{\code{OVERALL_ROW}:}{The row coordinates when all fields are
seen as being part of one big field, i.e. same reference grid for all fields.}
\item{\code{RANGEROW}:}{A combination of the range and row coordinates.}
\item{\code{ROW}:}{The coordinates for the rows linked to the locations.
Here the reference grid is the location itself.}
\item{\code{RANGE}:}{The coordinates for the ranges linked to the locations.
Here the reference grid is the location itself.}
\item{\code{PLOT}:}{The reference to the plot of the observation.}
\item{\code{YIELD}:}{The average yield performance measures of the plots for
the respective observations.}
\item{\code{EXPERIMENT}:}{Name of the experiment.}
\item{\code{SUBSUBBLOCK}:}{Placeholder in case subsubblocks were defined
in case of blocking. Not used here.}
\item{\code{SUBBLOCK}:}{Placeholder in case subblocks were defined
in case of blocking. Not used here.}
\item{\code{BLOCK}:}{Placeholder in case blocks were defined in case of
blocking. Not used here.}
\item{\code{CHECK}:}{Numeric showing whether the observation is a check.
Here the checks were already filtered out.}
\item{\code{ROWf}:}{Factor for ROW.}
\item{\code{RANGEf}:}{Factor for RANGE.}
\item{\code{residuals}:}{Residuals obtained after applying the above
described linear mixed model.}
\item{\code{predicted.values}:}{BLUP predictions from the fitted above
described linear mixed model.}
}
}
\examples{
data(Praw)
str(Praw)
}
\author{
Ruud Derijcker
}
\keyword{Praw}
|
# edphys_data_build2.r
# write.csv(edphys_data_test2,"edphys_data_test2.csv")
library(xlsx)
library(readr)
# January consolidation
ed_kendall_0117 <- read.xlsx("q:/Anesthesia/_DropOff/ED_Reports/Site_Data_Capture/R/ED_Physician_Report-0117-East_Florida.xlsx",sheetName="Kendall")
ed_JFK_0117 <- read.xlsx("q:/Anesthesia/_DropOff/ED_Reports/Site_Data_Capture/R/ED_Physician_Report-0117-East_Florida.xlsx",sheetName="JFK")
ed_JFKPBG_0117 <- read.xlsx("q:/Anesthesia/_DropOff/ED_Reports/Site_Data_Capture/R/ED_Physician_Report-0117-East_Florida.xlsx",sheetName="PBG FSED")
ed_JFKBB_0117 <- read.xlsx("q:/Anesthesia/_DropOff/ED_Reports/Site_Data_Capture/R/ED_Physician_Report-0117-East_Florida.xlsx",sheetName="BB FSED")
ed_hunters_0117 <- read.xlsx("q:/Anesthesia/_DropOff/ED_Reports/Site_Data_Capture/R/ED_Physician_Report-0117-North_Florida.xlsx",sheetName="Hunters Creek")
ed_ocala_0117 <- read.xlsx("q:/Anesthesia/_DropOff/ED_Reports/Site_Data_Capture/R/ED_Physician_Report-0117-North_Florida.xlsx",sheetName="Ocala")
ed_osceola_0117 <- read.xlsx("q:/Anesthesia/_DropOff/ED_Reports/Site_Data_Capture/R/ED_Physician_Report-0117-North_Florida.xlsx",sheetName="Osceola")
ed_osceolaPeds_0117 <- read.xlsx("q:/Anesthesia/_DropOff/ED_Reports/Site_Data_Capture/R/ED_Physician_Report-0117-North_Florida.xlsx",sheetName="Osceola Peds")
ed_summerfield_0117 <- read.xlsx("q:/Anesthesia/_DropOff/ED_Reports/Site_Data_Capture/R/ED_Physician_Report-0117-North_Florida.xlsx",sheetName="Summerfield FSED")
ed_wmarion_0117 <- read.xlsx("q:/Anesthesia/_DropOff/ED_Reports/Site_Data_Capture/R/ED_Physician_Report-0117-North_Florida.xlsx",sheetName="West Marion")
ed_data_jan_17 <- rbind(ed_kendall_0117,ed_JFK_0117,ed_JFKPBG_0117,ed_JFKBB_0117,ed_hunters_0117,ed_ocala_0117,ed_osceola_0117,ed_osceolaPeds_0117,ed_summerfield_0117,ed_wmarion_0117)
write.xlsx2(ed_data_jan_17, "q:/Anesthesia/_DropOff/ED_Reports/Site_Data_Capture/R/Consolidated/ed_data_jan_17.xlsx", sheetName="jan", col.names=TRUE, row.names=FALSE, append=FALSE)
# February consolidation
ed_kendall_0217 <- read.xlsx("q:/Anesthesia/_DropOff/ED_Reports/Site_Data_Capture/R/ED_Physician_Report-0217-East_Florida.xlsx",sheetName="Kendall")
ed_JFK_0217 <- read.xlsx("q:/Anesthesia/_DropOff/ED_Reports/Site_Data_Capture/R/ED_Physician_Report-0217-East_Florida.xlsx",sheetName="JFK")
ed_JFKPBG_0217 <- read.xlsx("q:/Anesthesia/_DropOff/ED_Reports/Site_Data_Capture/R/ED_Physician_Report-0217-East_Florida.xlsx",sheetName="PBG FSED")
ed_JFKBB_0217 <- read.xlsx("q:/Anesthesia/_DropOff/ED_Reports/Site_Data_Capture/R/ED_Physician_Report-0217-East_Florida.xlsx",sheetName="BB FSED")
ed_hunters_0217 <- read.xlsx("q:/Anesthesia/_DropOff/ED_Reports/Site_Data_Capture/R/ED_Physician_Report-0217-North_Florida.xlsx",sheetName="Hunters Creek")
ed_ocala_0217 <- read.xlsx("q:/Anesthesia/_DropOff/ED_Reports/Site_Data_Capture/R/ED_Physician_Report-0217-North_Florida.xlsx",sheetName="Ocala")
ed_osceola_0217 <- read.xlsx("q:/Anesthesia/_DropOff/ED_Reports/Site_Data_Capture/R/ED_Physician_Report-0217-North_Florida.xlsx",sheetName="Osceola")
ed_osceolaPeds_0217 <- read.xlsx("q:/Anesthesia/_DropOff/ED_Reports/Site_Data_Capture/R/ED_Physician_Report-0217-North_Florida.xlsx",sheetName="Osceola Peds")
ed_summerfield_0217 <- read.xlsx("q:/Anesthesia/_DropOff/ED_Reports/Site_Data_Capture/R/ED_Physician_Report-0217-North_Florida.xlsx",sheetName="Summerfield FSED")
ed_wmarion_0217 <- read.xlsx("q:/Anesthesia/_DropOff/ED_Reports/Site_Data_Capture/R/ED_Physician_Report-0217-North_Florida.xlsx",sheetName="West Marion")
ed_data_feb_17 <- rbind(ed_kendall_0217,ed_JFK_0217,ed_JFKPBG_0217,ed_JFKBB_0217,ed_hunters_0217,ed_ocala_0217,ed_osceola_0217,ed_osceolaPeds_0217,ed_summerfield_0217,ed_wmarion_0217)
write.xlsx2(ed_data_feb_17, "q:/Anesthesia/_DropOff/ED_Reports/Site_Data_Capture/R/Consolidated/ed_data_feb_17.xlsx", sheetName="feb", col.names=TRUE, row.names=FALSE, append=FALSE)
# March consolidation
ed_kendall_0317 <- read.xlsx("q:/Anesthesia/_DropOff/ED_Reports/Site_Data_Capture/R/ED_Physician_Report-0317-East_Florida.xlsx",sheetName="Kendall")
ed_JFK_0317 <- read.xlsx("q:/Anesthesia/_DropOff/ED_Reports/Site_Data_Capture/R/ED_Physician_Report-0317-East_Florida.xlsx",sheetName="JFK")
ed_JFKPBG_0317 <- read.xlsx("q:/Anesthesia/_DropOff/ED_Reports/Site_Data_Capture/R/ED_Physician_Report-0317-East_Florida.xlsx",sheetName="PBG FSED")
ed_JFKBB_0317 <- read.xlsx("q:/Anesthesia/_DropOff/ED_Reports/Site_Data_Capture/R/ED_Physician_Report-0317-East_Florida.xlsx",sheetName="BB FSED")
ed_hunters_0317 <- read.xlsx("q:/Anesthesia/_DropOff/ED_Reports/Site_Data_Capture/R/ED_Physician_Report-0317-North_Florida.xlsx",sheetName="Hunters Creek")
ed_ocala_0317 <- read.xlsx("q:/Anesthesia/_DropOff/ED_Reports/Site_Data_Capture/R/ED_Physician_Report-0317-North_Florida.xlsx",sheetName="Ocala")
ed_osceola_0317 <- read.xlsx("q:/Anesthesia/_DropOff/ED_Reports/Site_Data_Capture/R/ED_Physician_Report-0317-North_Florida.xlsx",sheetName="Osceola")
ed_osceolaPeds_0317 <- read.xlsx("q:/Anesthesia/_DropOff/ED_Reports/Site_Data_Capture/R/ED_Physician_Report-0317-North_Florida.xlsx",sheetName="Osceola Peds")
ed_summerfield_0317 <- read.xlsx("q:/Anesthesia/_DropOff/ED_Reports/Site_Data_Capture/R/ED_Physician_Report-0317-North_Florida.xlsx",sheetName="Summerfield FSED")
ed_wmarion_0317 <- read.xlsx("q:/Anesthesia/_DropOff/ED_Reports/Site_Data_Capture/R/ED_Physician_Report-0317-North_Florida.xlsx",sheetName="West Marion")
ed_data_mar_17 <- rbind(ed_kendall_0317,ed_JFK_0317,ed_JFKPBG_0317,ed_JFKBB_0317,ed_hunters_0317,ed_ocala_0317,ed_osceola_0317,ed_osceolaPeds_0317,ed_summerfield_0317,ed_wmarion_0317)
write.xlsx2(ed_data_mar_17, "q:/Anesthesia/_DropOff/ED_Reports/Site_Data_Capture/R/Consolidated/ed_data_mar_17.xlsx", sheetName="mar", col.names=TRUE, row.names=FALSE, append=FALSE)
# April consolidation
ed_kendall_0417 <- read.xlsx("q:/Anesthesia/_DropOff/ED_Reports/Site_Data_Capture/R/ED_Physician_Report-0417-East_Florida.xlsx",sheetName="Kendall")
ed_JFK_0417 <- read.xlsx("q:/Anesthesia/_DropOff/ED_Reports/Site_Data_Capture/R/ED_Physician_Report-0417-East_Florida.xlsx",sheetName="JFK")
ed_JFKPBG_0417 <- read.xlsx("q:/Anesthesia/_DropOff/ED_Reports/Site_Data_Capture/R/ED_Physician_Report-0417-East_Florida.xlsx",sheetName="PBG FSED")
ed_JFKBB_0417 <- read.xlsx("q:/Anesthesia/_DropOff/ED_Reports/Site_Data_Capture/R/ED_Physician_Report-0417-East_Florida.xlsx",sheetName="BB FSED")
ed_hunters_0417 <- read.xlsx("q:/Anesthesia/_DropOff/ED_Reports/Site_Data_Capture/R/ED_Physician_Report-0417-North_Florida.xlsx",sheetName="Hunters Creek")
ed_ocala_0417 <- read.xlsx("q:/Anesthesia/_DropOff/ED_Reports/Site_Data_Capture/R/ED_Physician_Report-0417-North_Florida.xlsx",sheetName="Ocala")
ed_osceola_0417 <- read.xlsx("q:/Anesthesia/_DropOff/ED_Reports/Site_Data_Capture/R/ED_Physician_Report-0417-North_Florida.xlsx",sheetName="Osceola")
ed_osceolaPeds_0417 <- read.xlsx("q:/Anesthesia/_DropOff/ED_Reports/Site_Data_Capture/R/ED_Physician_Report-0417-North_Florida.xlsx",sheetName="Osceola Peds")
ed_summerfield_0417 <- read.xlsx("q:/Anesthesia/_DropOff/ED_Reports/Site_Data_Capture/R/ED_Physician_Report-0417-North_Florida.xlsx",sheetName="Summerfield FSED")
ed_wmarion_0417 <- read.xlsx("q:/Anesthesia/_DropOff/ED_Reports/Site_Data_Capture/R/ED_Physician_Report-0417-North_Florida.xlsx",sheetName="West Marion")
ed_data_apr_17 <- rbind(ed_kendall_0417,ed_JFK_0417,ed_JFKPBG_0417,ed_JFKBB_0417,ed_hunters_0417,ed_ocala_0417,ed_osceola_0417,ed_osceolaPeds_0417,ed_summerfield_0417,ed_wmarion_0417)
write.xlsx2(ed_data_apr_17, "q:/Anesthesia/_DropOff/ED_Reports/Site_Data_Capture/R/Consolidated/ed_data_apr_17.xlsx", sheetName="apr", col.names=TRUE, row.names=FALSE, append=FALSE)
# May consolidation
ed_kendall_0517 <- read.xlsx("q:/Anesthesia/_DropOff/ED_Reports/Site_Data_Capture/R/ED_Physician_Report-0517-East_Florida.xlsx",sheetName="Kendall")
ed_JFK_0517 <- read.xlsx("q:/Anesthesia/_DropOff/ED_Reports/Site_Data_Capture/R/ED_Physician_Report-0517-East_Florida.xlsx",sheetName="JFK")
ed_JFKPBG_0517 <- read.xlsx("q:/Anesthesia/_DropOff/ED_Reports/Site_Data_Capture/R/ED_Physician_Report-0517-East_Florida.xlsx",sheetName="PBG FSED")
ed_JFKBB_0517 <- read.xlsx("q:/Anesthesia/_DropOff/ED_Reports/Site_Data_Capture/R/ED_Physician_Report-0517-East_Florida.xlsx",sheetName="BB FSED")
ed_hunters_0517 <- read.xlsx("q:/Anesthesia/_DropOff/ED_Reports/Site_Data_Capture/R/ED_Physician_Report-0517-North_Florida.xlsx",sheetName="Hunters Creek")
ed_ocala_0517 <- read.xlsx("q:/Anesthesia/_DropOff/ED_Reports/Site_Data_Capture/R/ED_Physician_Report-0517-North_Florida.xlsx",sheetName="Ocala")
ed_osceola_0517 <- read.xlsx("q:/Anesthesia/_DropOff/ED_Reports/Site_Data_Capture/R/ED_Physician_Report-0517-North_Florida.xlsx",sheetName="Osceola")
ed_osceolaPeds_0517 <- read.xlsx("q:/Anesthesia/_DropOff/ED_Reports/Site_Data_Capture/R/ED_Physician_Report-0517-North_Florida.xlsx",sheetName="Osceola Peds")
ed_summerfield_0517 <- read.xlsx("q:/Anesthesia/_DropOff/ED_Reports/Site_Data_Capture/R/ED_Physician_Report-0517-North_Florida.xlsx",sheetName="Summerfield FSED")
ed_wmarion_0517 <- read.xlsx("q:/Anesthesia/_DropOff/ED_Reports/Site_Data_Capture/R/ED_Physician_Report-0517-North_Florida.xlsx",sheetName="West Marion")
ed_data_may_17 <- rbind(ed_kendall_0517,ed_JFK_0517,ed_JFKPBG_0517,ed_JFKBB_0517,ed_hunters_0517,ed_ocala_0517,ed_osceola_0517,ed_osceolaPeds_0517,ed_summerfield_0517,ed_wmarion_0517)
write.xlsx2(ed_data_may_17, "q:/Anesthesia/_DropOff/ED_Reports/Site_Data_Capture/R/Consolidated/ed_data_may_17.xlsx", sheetName="may", col.names=TRUE, row.names=FALSE, append=FALSE)
# June consolidation
ed_kendall_0617 <- read.xlsx("q:/Anesthesia/_DropOff/ED_Reports/Site_Data_Capture/R/ED_Physician_Report-0617-East_Florida.xlsx",sheetName="Kendall")
ed_JFK_0617 <- read.xlsx("q:/Anesthesia/_DropOff/ED_Reports/Site_Data_Capture/R/ED_Physician_Report-0617-East_Florida.xlsx",sheetName="JFK")
ed_JFKPBG_0617 <- read.xlsx("q:/Anesthesia/_DropOff/ED_Reports/Site_Data_Capture/R/ED_Physician_Report-0617-East_Florida.xlsx",sheetName="PBG FSED")
ed_JFKBB_0617 <- read.xlsx("q:/Anesthesia/_DropOff/ED_Reports/Site_Data_Capture/R/ED_Physician_Report-0617-East_Florida.xlsx",sheetName="BB FSED")
ed_hunters_0617 <- read.xlsx("q:/Anesthesia/_DropOff/ED_Reports/Site_Data_Capture/R/ED_Physician_Report-0617-North_Florida.xlsx",sheetName="Hunters Creek")
ed_ocala_0617 <- read.xlsx("q:/Anesthesia/_DropOff/ED_Reports/Site_Data_Capture/R/ED_Physician_Report-0617-North_Florida.xlsx",sheetName="Ocala")
ed_osceola_0617 <- read.xlsx("q:/Anesthesia/_DropOff/ED_Reports/Site_Data_Capture/R/ED_Physician_Report-0617-North_Florida.xlsx",sheetName="Osceola")
ed_summerfield_0617 <- read.xlsx("q:/Anesthesia/_DropOff/ED_Reports/Site_Data_Capture/R/ED_Physician_Report-0617-North_Florida.xlsx",sheetName="Summerfield FSED")
ed_wmarion_0617 <- read.xlsx("q:/Anesthesia/_DropOff/ED_Reports/Site_Data_Capture/R/ED_Physician_Report-0617-North_Florida.xlsx",sheetName="West Marion")
ed_data_jun_17 <- rbind(ed_kendall_0617,ed_JFK_0617,ed_JFKPBG_0617,ed_JFKBB_0617,ed_hunters_0617,ed_ocala_0617,ed_osceola_0617,ed_summerfield_0617,ed_wmarion_0617)
write.xlsx2(ed_data_jun_17, "q:/Anesthesia/_DropOff/ED_Reports/Site_Data_Capture/R/Consolidated/ed_data_jun_17.xlsx", sheetName="jun", col.names=TRUE, row.names=FALSE, append=FALSE)
# July consolidation
ed_kendall_0717 <- read.xlsx("q:/Anesthesia/_DropOff/ED_Reports/Site_Data_Capture/R/ED_Physician_Report-0717-East_Florida.xlsx",sheetName="Kendall")
ed_JFK_0717 <- read.xlsx("q:/Anesthesia/_DropOff/ED_Reports/Site_Data_Capture/R/ED_Physician_Report-0717-East_Florida.xlsx",sheetName="JFK")
ed_JFKPBG_0717 <- read.xlsx("q:/Anesthesia/_DropOff/ED_Reports/Site_Data_Capture/R/ED_Physician_Report-0717-East_Florida.xlsx",sheetName="PBG FSED")
ed_JFKBB_0717 <- read.xlsx("q:/Anesthesia/_DropOff/ED_Reports/Site_Data_Capture/R/ED_Physician_Report-0717-East_Florida.xlsx",sheetName="BB FSED")
ed_hunters_0717 <- read.xlsx("q:/Anesthesia/_DropOff/ED_Reports/Site_Data_Capture/R/ED_Physician_Report-0717-North_Florida.xlsx",sheetName="Hunters Creek")
ed_ocala_0717 <- read.xlsx("q:/Anesthesia/_DropOff/ED_Reports/Site_Data_Capture/R/ED_Physician_Report-0717-North_Florida.xlsx",sheetName="Ocala")
ed_osceola_0717 <- read.xlsx("q:/Anesthesia/_DropOff/ED_Reports/Site_Data_Capture/R/ED_Physician_Report-0717-North_Florida.xlsx",sheetName="Osceola")
ed_summerfield_0717 <- read.xlsx("q:/Anesthesia/_DropOff/ED_Reports/Site_Data_Capture/R/ED_Physician_Report-0717-North_Florida.xlsx",sheetName="Summerfield FSED")
ed_wmarion_0717 <- read.xlsx("q:/Anesthesia/_DropOff/ED_Reports/Site_Data_Capture/R/ED_Physician_Report-0717-North_Florida.xlsx",sheetName="West Marion")
ed_data_jul_17 <- rbind(ed_kendall_0717,ed_JFK_0717,ed_JFKPBG_0717,ed_JFKBB_0717,ed_hunters_0717,ed_ocala_0717,ed_osceola_0717,ed_summerfield_0717,ed_wmarion_0717)
write.xlsx2(ed_data_jul_17, "q:/Anesthesia/_DropOff/ED_Reports/Site_Data_Capture/R/Consolidated/ed_data_jul_17.xlsx", sheetName="jul", col.names=TRUE, row.names=FALSE, append=FALSE)
# August consolidation
ed_kendall_0817 <- read.xlsx("q:/Anesthesia/_DropOff/ED_Reports/Site_Data_Capture/R/ED_Physician_Report-0817-East_Florida.xlsx",sheetName="Kendall")
ed_JFK_0817 <- read.xlsx("q:/Anesthesia/_DropOff/ED_Reports/Site_Data_Capture/R/ED_Physician_Report-0817-East_Florida.xlsx",sheetName="JFK")
ed_JFKPBG_0817 <- read.xlsx("q:/Anesthesia/_DropOff/ED_Reports/Site_Data_Capture/R/ED_Physician_Report-0817-East_Florida.xlsx",sheetName="PBG FSED")
ed_JFKBB_0817 <- read.xlsx("q:/Anesthesia/_DropOff/ED_Reports/Site_Data_Capture/R/ED_Physician_Report-0817-East_Florida.xlsx",sheetName="BB FSED")
ed_hunters_0817 <- read.xlsx("q:/Anesthesia/_DropOff/ED_Reports/Site_Data_Capture/R/ED_Physician_Report-0817-North_Florida.xlsx",sheetName="Hunters Creek")
ed_ocala_0817 <- read.xlsx("q:/Anesthesia/_DropOff/ED_Reports/Site_Data_Capture/R/ED_Physician_Report-0817-North_Florida.xlsx",sheetName="Ocala")
ed_osceola_0817 <- read.xlsx("q:/Anesthesia/_DropOff/ED_Reports/Site_Data_Capture/R/ED_Physician_Report-0817-North_Florida.xlsx",sheetName="Osceola")
ed_summerfield_0817 <- read.xlsx("q:/Anesthesia/_DropOff/ED_Reports/Site_Data_Capture/R/ED_Physician_Report-0817-North_Florida.xlsx",sheetName="Summerfield FSED")
ed_wmarion_0817 <- read.xlsx("q:/Anesthesia/_DropOff/ED_Reports/Site_Data_Capture/R/ED_Physician_Report-0817-North_Florida.xlsx",sheetName="West Marion")
ed_data_aug_17 <- rbind(ed_kendall_0817,ed_JFK_0817,ed_JFKPBG_0817,ed_JFKBB_0817,ed_hunters_0817,ed_ocala_0817,ed_osceola_0817,ed_summerfield_0817,ed_wmarion_0817)
write.xlsx2(ed_data_aug_17, "q:/Anesthesia/_DropOff/ED_Reports/Site_Data_Capture/R/Consolidated/ed_data_aug_17.xlsx", sheetName="aug", col.names=TRUE, row.names=FALSE, append=FALSE)
# September consolidation
ed_kendall_0917 <- read.xlsx("q:/Anesthesia/_DropOff/ED_Reports/Site_Data_Capture/R/ED_Physician_Report-0917-East_Florida.xlsx",sheetName="Kendall")
ed_JFK_0917 <- read.xlsx("q:/Anesthesia/_DropOff/ED_Reports/Site_Data_Capture/R/ED_Physician_Report-0917-East_Florida.xlsx",sheetName="JFK")
ed_JFKPBG_0917 <- read.xlsx("q:/Anesthesia/_DropOff/ED_Reports/Site_Data_Capture/R/ED_Physician_Report-0917-East_Florida.xlsx",sheetName="PBG FSED")
ed_JFKBB_0917 <- read.xlsx("q:/Anesthesia/_DropOff/ED_Reports/Site_Data_Capture/R/ED_Physician_Report-0917-East_Florida.xlsx",sheetName="BB FSED")
ed_hunters_0917 <- read.xlsx("q:/Anesthesia/_DropOff/ED_Reports/Site_Data_Capture/R/ED_Physician_Report-0917-North_Florida.xlsx",sheetName="Hunters Creek")
ed_ocala_0917 <- read.xlsx("q:/Anesthesia/_DropOff/ED_Reports/Site_Data_Capture/R/ED_Physician_Report-0917-North_Florida.xlsx",sheetName="Ocala")
ed_osceola_0917 <- read.xlsx("q:/Anesthesia/_DropOff/ED_Reports/Site_Data_Capture/R/ED_Physician_Report-0917-North_Florida.xlsx",sheetName="Osceola")
ed_summerfield_0917 <- read.xlsx("q:/Anesthesia/_DropOff/ED_Reports/Site_Data_Capture/R/ED_Physician_Report-0917-North_Florida.xlsx",sheetName="Summerfield FSED")
ed_wmarion_0917 <- read.xlsx("q:/Anesthesia/_DropOff/ED_Reports/Site_Data_Capture/R/ED_Physician_Report-0917-North_Florida.xlsx",sheetName="West Marion")
ed_data_sep_17 <- rbind(ed_kendall_0917,ed_JFK_0917,ed_JFKPBG_0917,ed_JFKBB_0917,ed_hunters_0917,ed_ocala_0917,ed_osceola_0917,ed_summerfield_0917,ed_wmarion_0917)
write.xlsx2(ed_data_sep_17, "q:/Anesthesia/_DropOff/ED_Reports/Site_Data_Capture/R/Consolidated/ed_data_sep_17.xlsx", sheetName="sep", col.names=TRUE, row.names=FALSE, append=FALSE)
# October consolidation
ed_kendall_1017 <- read.xlsx("q:/Anesthesia/_DropOff/ED_Reports/Site_Data_Capture/R/ED_Physician_Report-1017-East_Florida.xlsx",sheetName="Kendall")
ed_JFK_1017 <- read.xlsx("q:/Anesthesia/_DropOff/ED_Reports/Site_Data_Capture/R/ED_Physician_Report-1017-East_Florida.xlsx",sheetName="JFK")
ed_JFKPBG_1017 <- read.xlsx("q:/Anesthesia/_DropOff/ED_Reports/Site_Data_Capture/R/ED_Physician_Report-1017-East_Florida.xlsx",sheetName="PBG FSED")
ed_JFKBB_1017 <- read.xlsx("q:/Anesthesia/_DropOff/ED_Reports/Site_Data_Capture/R/ED_Physician_Report-1017-East_Florida.xlsx",sheetName="BB FSED")
ed_hunters_1017 <- read.xlsx("q:/Anesthesia/_DropOff/ED_Reports/Site_Data_Capture/R/ED_Physician_Report-1017-North_Florida.xlsx",sheetName="Hunters Creek")
ed_ocala_1017 <- read.xlsx("q:/Anesthesia/_DropOff/ED_Reports/Site_Data_Capture/R/ED_Physician_Report-1017-North_Florida.xlsx",sheetName="Ocala")
ed_osceola_1017 <- read.xlsx("q:/Anesthesia/_DropOff/ED_Reports/Site_Data_Capture/R/ED_Physician_Report-1017-North_Florida.xlsx",sheetName="Osceola")
ed_summerfield_1017 <- read.xlsx("q:/Anesthesia/_DropOff/ED_Reports/Site_Data_Capture/R/ED_Physician_Report-1017-North_Florida.xlsx",sheetName="Summerfield FSED")
ed_wmarion_1017 <- read.xlsx("q:/Anesthesia/_DropOff/ED_Reports/Site_Data_Capture/R/ED_Physician_Report-1017-North_Florida.xlsx",sheetName="West Marion")
ed_data_oct_17 <- rbind(ed_kendall_1017,ed_JFK_1017,ed_JFKPBG_1017,ed_JFKBB_1017,ed_hunters_1017,ed_ocala_1017,ed_osceola_1017,ed_summerfield_1017,ed_wmarion_1017)
write.xlsx2(ed_data_oct_17, "q:/Anesthesia/_DropOff/ED_Reports/Site_Data_Capture/R/Consolidated/ed_data_oct_17.xlsx", sheetName="oct", col.names=TRUE, row.names=FALSE, append=FALSE)
ed_data_2017 <- rbind(ed_data_jan_17,ed_data_feb_17,ed_data_mar_17,ed_data_apr_17,ed_data_may_17,ed_data_jun_17,ed_data_jul_17,ed_data_aug_17,ed_data_sep_17,ed_data_oct_17)
# ed_data_2017$In_Bed_to_Provider_Greet_Avg_Wait <- as.numeric(ed_data_2017$In_Bed_to_Provider_Greet_Avg_Wait)
# ed_data_2017$Arrival_to_Provider_Avg_Wait <- as.numeric(ed_data_2017$Arrival_to_Provider_Avg_Wait)
# ed_data_2017$Dup_MSE_Pcnt <- as.numeric(ed_data_2017$Dup_MSE_Pcnt)
# ed_data_2017$Depart_to_PDOC_sign_Avg_Wait <- as.numeric(ed_data_2017$Depart_to_PDOC_sign_Avg_Wait)
# ed_data_2017$Arrival_to_First_Ord_Avg_Wait <- as.numeric(ed_data_2017$Arrival_to_First_Ord_Avg_Wait)
# ed_data_2017$Arrival_to_PainMed_Avg_Wait <- as.numeric(ed_data_2017$Arrival_to_PainMed_Avg_Wait)
# ed_data_2017$Greet_to_Disp_Avg_Wait <- as.numeric(ed_data_2017$Greet_to_Disp_Avg_Wait)
# ed_data_2017$Disp_to_A.ord_a_Avg_Wait <- as.numeric(ed_data_2017$Disp_to_A.ord_a_Avg_Wait)
# ed_data_2017$Greet_to_Disp_dep_Avg_Wait <- as.numeric(ed_data_2017$Greet_to_Disp_dep_Avg_Wait)
# ed_data_2017$Admitted_Patients_Avg_LOS <- as.numeric(ed_data_2017$Admitted_Patients_Avg_LOS)
# ed_data_2017$ED_Arrival_to_Departure_ALOS_Min.DP <- as.numeric(ed_data_2017$ED_Arrival_to_Departure_ALOS_Min.DP)
# ed_data_2017$Low_Acuity_Patients_Avg_LOS <- as.numeric(ed_data_2017$Low_Acuity_Patients_Avg_LOS)
# ed_data_2017$CT_Util_Admitted_Patients_Pcnt <- as.numeric(ed_data_2017$CT_Util_Admitted_Patients_Pcnt)
# ed_data_2017$US_Util_Admitted_Patients_Pcnt <- as.numeric(ed_data_2017$US_Util_Admitted_Patients_Pcnt)
# ed_data_2017$CT_Util_Departed_Patients_Pcnt <- as.numeric(ed_data_2017$CT_Util_Departed_Patients_Pcnt)
# ed_data_2017$US_Util_Departed_Patients_Pcnt <- as.numeric(ed_data_2017$US_Util_Departed_Patients_Pcnt)
# ed_data_2017$Patients_per_Hour <- as.numeric(ed_data_2017$Patients_per_Hour)
# ed_data_2017$PMD_to_Admin_order_Avg_Wait <- as.numeric(ed_data_2017$PMD_to_Admin_order_Avg_Wait)
write.xlsx2(ed_data_2017, "q:/Anesthesia/_DropOff/ED_Reports/Site_Data_Capture/R/Consolidated/ed_data_2017.xlsx", sheetName="ytd", col.names=TRUE, row.names=FALSE, append=FALSE)
|
/edphys_data_build2.R
|
no_license
|
AllenBaum/My-Data-ver-1
|
R
| false
| false
| 20,554
|
r
|
# edphys_data_build2.r
# write.csv(edphys_data_test2,"edphys_data_test2.csv")
library(xlsx)
library(readr)
# January consolidation
ed_kendall_0117 <- read.xlsx("q:/Anesthesia/_DropOff/ED_Reports/Site_Data_Capture/R/ED_Physician_Report-0117-East_Florida.xlsx",sheetName="Kendall")
ed_JFK_0117 <- read.xlsx("q:/Anesthesia/_DropOff/ED_Reports/Site_Data_Capture/R/ED_Physician_Report-0117-East_Florida.xlsx",sheetName="JFK")
ed_JFKPBG_0117 <- read.xlsx("q:/Anesthesia/_DropOff/ED_Reports/Site_Data_Capture/R/ED_Physician_Report-0117-East_Florida.xlsx",sheetName="PBG FSED")
ed_JFKBB_0117 <- read.xlsx("q:/Anesthesia/_DropOff/ED_Reports/Site_Data_Capture/R/ED_Physician_Report-0117-East_Florida.xlsx",sheetName="BB FSED")
ed_hunters_0117 <- read.xlsx("q:/Anesthesia/_DropOff/ED_Reports/Site_Data_Capture/R/ED_Physician_Report-0117-North_Florida.xlsx",sheetName="Hunters Creek")
ed_ocala_0117 <- read.xlsx("q:/Anesthesia/_DropOff/ED_Reports/Site_Data_Capture/R/ED_Physician_Report-0117-North_Florida.xlsx",sheetName="Ocala")
ed_osceola_0117 <- read.xlsx("q:/Anesthesia/_DropOff/ED_Reports/Site_Data_Capture/R/ED_Physician_Report-0117-North_Florida.xlsx",sheetName="Osceola")
ed_osceolaPeds_0117 <- read.xlsx("q:/Anesthesia/_DropOff/ED_Reports/Site_Data_Capture/R/ED_Physician_Report-0117-North_Florida.xlsx",sheetName="Osceola Peds")
ed_summerfield_0117 <- read.xlsx("q:/Anesthesia/_DropOff/ED_Reports/Site_Data_Capture/R/ED_Physician_Report-0117-North_Florida.xlsx",sheetName="Summerfield FSED")
ed_wmarion_0117 <- read.xlsx("q:/Anesthesia/_DropOff/ED_Reports/Site_Data_Capture/R/ED_Physician_Report-0117-North_Florida.xlsx",sheetName="West Marion")
ed_data_jan_17 <- rbind(ed_kendall_0117,ed_JFK_0117,ed_JFKPBG_0117,ed_JFKBB_0117,ed_hunters_0117,ed_ocala_0117,ed_osceola_0117,ed_osceolaPeds_0117,ed_summerfield_0117,ed_wmarion_0117)
write.xlsx2(ed_data_jan_17, "q:/Anesthesia/_DropOff/ED_Reports/Site_Data_Capture/R/Consolidated/ed_data_jan_17.xlsx", sheetName="jan", col.names=TRUE, row.names=FALSE, append=FALSE)
# February consolidation
ed_kendall_0217 <- read.xlsx("q:/Anesthesia/_DropOff/ED_Reports/Site_Data_Capture/R/ED_Physician_Report-0217-East_Florida.xlsx",sheetName="Kendall")
ed_JFK_0217 <- read.xlsx("q:/Anesthesia/_DropOff/ED_Reports/Site_Data_Capture/R/ED_Physician_Report-0217-East_Florida.xlsx",sheetName="JFK")
ed_JFKPBG_0217 <- read.xlsx("q:/Anesthesia/_DropOff/ED_Reports/Site_Data_Capture/R/ED_Physician_Report-0217-East_Florida.xlsx",sheetName="PBG FSED")
ed_JFKBB_0217 <- read.xlsx("q:/Anesthesia/_DropOff/ED_Reports/Site_Data_Capture/R/ED_Physician_Report-0217-East_Florida.xlsx",sheetName="BB FSED")
ed_hunters_0217 <- read.xlsx("q:/Anesthesia/_DropOff/ED_Reports/Site_Data_Capture/R/ED_Physician_Report-0217-North_Florida.xlsx",sheetName="Hunters Creek")
ed_ocala_0217 <- read.xlsx("q:/Anesthesia/_DropOff/ED_Reports/Site_Data_Capture/R/ED_Physician_Report-0217-North_Florida.xlsx",sheetName="Ocala")
ed_osceola_0217 <- read.xlsx("q:/Anesthesia/_DropOff/ED_Reports/Site_Data_Capture/R/ED_Physician_Report-0217-North_Florida.xlsx",sheetName="Osceola")
ed_osceolaPeds_0217 <- read.xlsx("q:/Anesthesia/_DropOff/ED_Reports/Site_Data_Capture/R/ED_Physician_Report-0217-North_Florida.xlsx",sheetName="Osceola Peds")
ed_summerfield_0217 <- read.xlsx("q:/Anesthesia/_DropOff/ED_Reports/Site_Data_Capture/R/ED_Physician_Report-0217-North_Florida.xlsx",sheetName="Summerfield FSED")
ed_wmarion_0217 <- read.xlsx("q:/Anesthesia/_DropOff/ED_Reports/Site_Data_Capture/R/ED_Physician_Report-0217-North_Florida.xlsx",sheetName="West Marion")
ed_data_feb_17 <- rbind(ed_kendall_0217,ed_JFK_0217,ed_JFKPBG_0217,ed_JFKBB_0217,ed_hunters_0217,ed_ocala_0217,ed_osceola_0217,ed_osceolaPeds_0217,ed_summerfield_0217,ed_wmarion_0217)
write.xlsx2(ed_data_feb_17, "q:/Anesthesia/_DropOff/ED_Reports/Site_Data_Capture/R/Consolidated/ed_data_feb_17.xlsx", sheetName="feb", col.names=TRUE, row.names=FALSE, append=FALSE)
# March consolidation
ed_kendall_0317 <- read.xlsx("q:/Anesthesia/_DropOff/ED_Reports/Site_Data_Capture/R/ED_Physician_Report-0317-East_Florida.xlsx",sheetName="Kendall")
ed_JFK_0317 <- read.xlsx("q:/Anesthesia/_DropOff/ED_Reports/Site_Data_Capture/R/ED_Physician_Report-0317-East_Florida.xlsx",sheetName="JFK")
ed_JFKPBG_0317 <- read.xlsx("q:/Anesthesia/_DropOff/ED_Reports/Site_Data_Capture/R/ED_Physician_Report-0317-East_Florida.xlsx",sheetName="PBG FSED")
ed_JFKBB_0317 <- read.xlsx("q:/Anesthesia/_DropOff/ED_Reports/Site_Data_Capture/R/ED_Physician_Report-0317-East_Florida.xlsx",sheetName="BB FSED")
ed_hunters_0317 <- read.xlsx("q:/Anesthesia/_DropOff/ED_Reports/Site_Data_Capture/R/ED_Physician_Report-0317-North_Florida.xlsx",sheetName="Hunters Creek")
ed_ocala_0317 <- read.xlsx("q:/Anesthesia/_DropOff/ED_Reports/Site_Data_Capture/R/ED_Physician_Report-0317-North_Florida.xlsx",sheetName="Ocala")
ed_osceola_0317 <- read.xlsx("q:/Anesthesia/_DropOff/ED_Reports/Site_Data_Capture/R/ED_Physician_Report-0317-North_Florida.xlsx",sheetName="Osceola")
ed_osceolaPeds_0317 <- read.xlsx("q:/Anesthesia/_DropOff/ED_Reports/Site_Data_Capture/R/ED_Physician_Report-0317-North_Florida.xlsx",sheetName="Osceola Peds")
ed_summerfield_0317 <- read.xlsx("q:/Anesthesia/_DropOff/ED_Reports/Site_Data_Capture/R/ED_Physician_Report-0317-North_Florida.xlsx",sheetName="Summerfield FSED")
ed_wmarion_0317 <- read.xlsx("q:/Anesthesia/_DropOff/ED_Reports/Site_Data_Capture/R/ED_Physician_Report-0317-North_Florida.xlsx",sheetName="West Marion")
ed_data_mar_17 <- rbind(ed_kendall_0317,ed_JFK_0317,ed_JFKPBG_0317,ed_JFKBB_0317,ed_hunters_0317,ed_ocala_0317,ed_osceola_0317,ed_osceolaPeds_0317,ed_summerfield_0317,ed_wmarion_0317)
write.xlsx2(ed_data_mar_17, "q:/Anesthesia/_DropOff/ED_Reports/Site_Data_Capture/R/Consolidated/ed_data_mar_17.xlsx", sheetName="mar", col.names=TRUE, row.names=FALSE, append=FALSE)
# April consolidation
ed_kendall_0417 <- read.xlsx("q:/Anesthesia/_DropOff/ED_Reports/Site_Data_Capture/R/ED_Physician_Report-0417-East_Florida.xlsx",sheetName="Kendall")
ed_JFK_0417 <- read.xlsx("q:/Anesthesia/_DropOff/ED_Reports/Site_Data_Capture/R/ED_Physician_Report-0417-East_Florida.xlsx",sheetName="JFK")
ed_JFKPBG_0417 <- read.xlsx("q:/Anesthesia/_DropOff/ED_Reports/Site_Data_Capture/R/ED_Physician_Report-0417-East_Florida.xlsx",sheetName="PBG FSED")
ed_JFKBB_0417 <- read.xlsx("q:/Anesthesia/_DropOff/ED_Reports/Site_Data_Capture/R/ED_Physician_Report-0417-East_Florida.xlsx",sheetName="BB FSED")
ed_hunters_0417 <- read.xlsx("q:/Anesthesia/_DropOff/ED_Reports/Site_Data_Capture/R/ED_Physician_Report-0417-North_Florida.xlsx",sheetName="Hunters Creek")
ed_ocala_0417 <- read.xlsx("q:/Anesthesia/_DropOff/ED_Reports/Site_Data_Capture/R/ED_Physician_Report-0417-North_Florida.xlsx",sheetName="Ocala")
ed_osceola_0417 <- read.xlsx("q:/Anesthesia/_DropOff/ED_Reports/Site_Data_Capture/R/ED_Physician_Report-0417-North_Florida.xlsx",sheetName="Osceola")
ed_osceolaPeds_0417 <- read.xlsx("q:/Anesthesia/_DropOff/ED_Reports/Site_Data_Capture/R/ED_Physician_Report-0417-North_Florida.xlsx",sheetName="Osceola Peds")
ed_summerfield_0417 <- read.xlsx("q:/Anesthesia/_DropOff/ED_Reports/Site_Data_Capture/R/ED_Physician_Report-0417-North_Florida.xlsx",sheetName="Summerfield FSED")
ed_wmarion_0417 <- read.xlsx("q:/Anesthesia/_DropOff/ED_Reports/Site_Data_Capture/R/ED_Physician_Report-0417-North_Florida.xlsx",sheetName="West Marion")
ed_data_apr_17 <- rbind(ed_kendall_0417,ed_JFK_0417,ed_JFKPBG_0417,ed_JFKBB_0417,ed_hunters_0417,ed_ocala_0417,ed_osceola_0417,ed_osceolaPeds_0417,ed_summerfield_0417,ed_wmarion_0417)
write.xlsx2(ed_data_apr_17, "q:/Anesthesia/_DropOff/ED_Reports/Site_Data_Capture/R/Consolidated/ed_data_apr_17.xlsx", sheetName="apr", col.names=TRUE, row.names=FALSE, append=FALSE)
# May consolidation
ed_kendall_0517 <- read.xlsx("q:/Anesthesia/_DropOff/ED_Reports/Site_Data_Capture/R/ED_Physician_Report-0517-East_Florida.xlsx",sheetName="Kendall")
ed_JFK_0517 <- read.xlsx("q:/Anesthesia/_DropOff/ED_Reports/Site_Data_Capture/R/ED_Physician_Report-0517-East_Florida.xlsx",sheetName="JFK")
ed_JFKPBG_0517 <- read.xlsx("q:/Anesthesia/_DropOff/ED_Reports/Site_Data_Capture/R/ED_Physician_Report-0517-East_Florida.xlsx",sheetName="PBG FSED")
ed_JFKBB_0517 <- read.xlsx("q:/Anesthesia/_DropOff/ED_Reports/Site_Data_Capture/R/ED_Physician_Report-0517-East_Florida.xlsx",sheetName="BB FSED")
ed_hunters_0517 <- read.xlsx("q:/Anesthesia/_DropOff/ED_Reports/Site_Data_Capture/R/ED_Physician_Report-0517-North_Florida.xlsx",sheetName="Hunters Creek")
ed_ocala_0517 <- read.xlsx("q:/Anesthesia/_DropOff/ED_Reports/Site_Data_Capture/R/ED_Physician_Report-0517-North_Florida.xlsx",sheetName="Ocala")
ed_osceola_0517 <- read.xlsx("q:/Anesthesia/_DropOff/ED_Reports/Site_Data_Capture/R/ED_Physician_Report-0517-North_Florida.xlsx",sheetName="Osceola")
ed_osceolaPeds_0517 <- read.xlsx("q:/Anesthesia/_DropOff/ED_Reports/Site_Data_Capture/R/ED_Physician_Report-0517-North_Florida.xlsx",sheetName="Osceola Peds")
ed_summerfield_0517 <- read.xlsx("q:/Anesthesia/_DropOff/ED_Reports/Site_Data_Capture/R/ED_Physician_Report-0517-North_Florida.xlsx",sheetName="Summerfield FSED")
ed_wmarion_0517 <- read.xlsx("q:/Anesthesia/_DropOff/ED_Reports/Site_Data_Capture/R/ED_Physician_Report-0517-North_Florida.xlsx",sheetName="West Marion")
ed_data_may_17 <- rbind(ed_kendall_0517,ed_JFK_0517,ed_JFKPBG_0517,ed_JFKBB_0517,ed_hunters_0517,ed_ocala_0517,ed_osceola_0517,ed_osceolaPeds_0517,ed_summerfield_0517,ed_wmarion_0517)
write.xlsx2(ed_data_may_17, "q:/Anesthesia/_DropOff/ED_Reports/Site_Data_Capture/R/Consolidated/ed_data_may_17.xlsx", sheetName="may", col.names=TRUE, row.names=FALSE, append=FALSE)
# June consolidation
ed_kendall_0617 <- read.xlsx("q:/Anesthesia/_DropOff/ED_Reports/Site_Data_Capture/R/ED_Physician_Report-0617-East_Florida.xlsx",sheetName="Kendall")
ed_JFK_0617 <- read.xlsx("q:/Anesthesia/_DropOff/ED_Reports/Site_Data_Capture/R/ED_Physician_Report-0617-East_Florida.xlsx",sheetName="JFK")
ed_JFKPBG_0617 <- read.xlsx("q:/Anesthesia/_DropOff/ED_Reports/Site_Data_Capture/R/ED_Physician_Report-0617-East_Florida.xlsx",sheetName="PBG FSED")
ed_JFKBB_0617 <- read.xlsx("q:/Anesthesia/_DropOff/ED_Reports/Site_Data_Capture/R/ED_Physician_Report-0617-East_Florida.xlsx",sheetName="BB FSED")
ed_hunters_0617 <- read.xlsx("q:/Anesthesia/_DropOff/ED_Reports/Site_Data_Capture/R/ED_Physician_Report-0617-North_Florida.xlsx",sheetName="Hunters Creek")
ed_ocala_0617 <- read.xlsx("q:/Anesthesia/_DropOff/ED_Reports/Site_Data_Capture/R/ED_Physician_Report-0617-North_Florida.xlsx",sheetName="Ocala")
ed_osceola_0617 <- read.xlsx("q:/Anesthesia/_DropOff/ED_Reports/Site_Data_Capture/R/ED_Physician_Report-0617-North_Florida.xlsx",sheetName="Osceola")
ed_summerfield_0617 <- read.xlsx("q:/Anesthesia/_DropOff/ED_Reports/Site_Data_Capture/R/ED_Physician_Report-0617-North_Florida.xlsx",sheetName="Summerfield FSED")
ed_wmarion_0617 <- read.xlsx("q:/Anesthesia/_DropOff/ED_Reports/Site_Data_Capture/R/ED_Physician_Report-0617-North_Florida.xlsx",sheetName="West Marion")
ed_data_jun_17 <- rbind(ed_kendall_0617,ed_JFK_0617,ed_JFKPBG_0617,ed_JFKBB_0617,ed_hunters_0617,ed_ocala_0617,ed_osceola_0617,ed_summerfield_0617,ed_wmarion_0617)
write.xlsx2(ed_data_jun_17, "q:/Anesthesia/_DropOff/ED_Reports/Site_Data_Capture/R/Consolidated/ed_data_jun_17.xlsx", sheetName="jun", col.names=TRUE, row.names=FALSE, append=FALSE)
# July consolidation
ed_kendall_0717 <- read.xlsx("q:/Anesthesia/_DropOff/ED_Reports/Site_Data_Capture/R/ED_Physician_Report-0717-East_Florida.xlsx",sheetName="Kendall")
ed_JFK_0717 <- read.xlsx("q:/Anesthesia/_DropOff/ED_Reports/Site_Data_Capture/R/ED_Physician_Report-0717-East_Florida.xlsx",sheetName="JFK")
ed_JFKPBG_0717 <- read.xlsx("q:/Anesthesia/_DropOff/ED_Reports/Site_Data_Capture/R/ED_Physician_Report-0717-East_Florida.xlsx",sheetName="PBG FSED")
ed_JFKBB_0717 <- read.xlsx("q:/Anesthesia/_DropOff/ED_Reports/Site_Data_Capture/R/ED_Physician_Report-0717-East_Florida.xlsx",sheetName="BB FSED")
ed_hunters_0717 <- read.xlsx("q:/Anesthesia/_DropOff/ED_Reports/Site_Data_Capture/R/ED_Physician_Report-0717-North_Florida.xlsx",sheetName="Hunters Creek")
ed_ocala_0717 <- read.xlsx("q:/Anesthesia/_DropOff/ED_Reports/Site_Data_Capture/R/ED_Physician_Report-0717-North_Florida.xlsx",sheetName="Ocala")
ed_osceola_0717 <- read.xlsx("q:/Anesthesia/_DropOff/ED_Reports/Site_Data_Capture/R/ED_Physician_Report-0717-North_Florida.xlsx",sheetName="Osceola")
ed_summerfield_0717 <- read.xlsx("q:/Anesthesia/_DropOff/ED_Reports/Site_Data_Capture/R/ED_Physician_Report-0717-North_Florida.xlsx",sheetName="Summerfield FSED")
ed_wmarion_0717 <- read.xlsx("q:/Anesthesia/_DropOff/ED_Reports/Site_Data_Capture/R/ED_Physician_Report-0717-North_Florida.xlsx",sheetName="West Marion")
ed_data_jul_17 <- rbind(ed_kendall_0717,ed_JFK_0717,ed_JFKPBG_0717,ed_JFKBB_0717,ed_hunters_0717,ed_ocala_0717,ed_osceola_0717,ed_summerfield_0717,ed_wmarion_0717)
write.xlsx2(ed_data_jul_17, "q:/Anesthesia/_DropOff/ED_Reports/Site_Data_Capture/R/Consolidated/ed_data_jul_17.xlsx", sheetName="jul", col.names=TRUE, row.names=FALSE, append=FALSE)
# August consolidation
ed_kendall_0817 <- read.xlsx("q:/Anesthesia/_DropOff/ED_Reports/Site_Data_Capture/R/ED_Physician_Report-0817-East_Florida.xlsx",sheetName="Kendall")
ed_JFK_0817 <- read.xlsx("q:/Anesthesia/_DropOff/ED_Reports/Site_Data_Capture/R/ED_Physician_Report-0817-East_Florida.xlsx",sheetName="JFK")
ed_JFKPBG_0817 <- read.xlsx("q:/Anesthesia/_DropOff/ED_Reports/Site_Data_Capture/R/ED_Physician_Report-0817-East_Florida.xlsx",sheetName="PBG FSED")
ed_JFKBB_0817 <- read.xlsx("q:/Anesthesia/_DropOff/ED_Reports/Site_Data_Capture/R/ED_Physician_Report-0817-East_Florida.xlsx",sheetName="BB FSED")
ed_hunters_0817 <- read.xlsx("q:/Anesthesia/_DropOff/ED_Reports/Site_Data_Capture/R/ED_Physician_Report-0817-North_Florida.xlsx",sheetName="Hunters Creek")
ed_ocala_0817 <- read.xlsx("q:/Anesthesia/_DropOff/ED_Reports/Site_Data_Capture/R/ED_Physician_Report-0817-North_Florida.xlsx",sheetName="Ocala")
ed_osceola_0817 <- read.xlsx("q:/Anesthesia/_DropOff/ED_Reports/Site_Data_Capture/R/ED_Physician_Report-0817-North_Florida.xlsx",sheetName="Osceola")
ed_summerfield_0817 <- read.xlsx("q:/Anesthesia/_DropOff/ED_Reports/Site_Data_Capture/R/ED_Physician_Report-0817-North_Florida.xlsx",sheetName="Summerfield FSED")
ed_wmarion_0817 <- read.xlsx("q:/Anesthesia/_DropOff/ED_Reports/Site_Data_Capture/R/ED_Physician_Report-0817-North_Florida.xlsx",sheetName="West Marion")
ed_data_aug_17 <- rbind(ed_kendall_0817,ed_JFK_0817,ed_JFKPBG_0817,ed_JFKBB_0817,ed_hunters_0817,ed_ocala_0817,ed_osceola_0817,ed_summerfield_0817,ed_wmarion_0817)
write.xlsx2(ed_data_aug_17, "q:/Anesthesia/_DropOff/ED_Reports/Site_Data_Capture/R/Consolidated/ed_data_aug_17.xlsx", sheetName="aug", col.names=TRUE, row.names=FALSE, append=FALSE)
# September consolidation
ed_kendall_0917 <- read.xlsx("q:/Anesthesia/_DropOff/ED_Reports/Site_Data_Capture/R/ED_Physician_Report-0917-East_Florida.xlsx",sheetName="Kendall")
ed_JFK_0917 <- read.xlsx("q:/Anesthesia/_DropOff/ED_Reports/Site_Data_Capture/R/ED_Physician_Report-0917-East_Florida.xlsx",sheetName="JFK")
ed_JFKPBG_0917 <- read.xlsx("q:/Anesthesia/_DropOff/ED_Reports/Site_Data_Capture/R/ED_Physician_Report-0917-East_Florida.xlsx",sheetName="PBG FSED")
ed_JFKBB_0917 <- read.xlsx("q:/Anesthesia/_DropOff/ED_Reports/Site_Data_Capture/R/ED_Physician_Report-0917-East_Florida.xlsx",sheetName="BB FSED")
ed_hunters_0917 <- read.xlsx("q:/Anesthesia/_DropOff/ED_Reports/Site_Data_Capture/R/ED_Physician_Report-0917-North_Florida.xlsx",sheetName="Hunters Creek")
ed_ocala_0917 <- read.xlsx("q:/Anesthesia/_DropOff/ED_Reports/Site_Data_Capture/R/ED_Physician_Report-0917-North_Florida.xlsx",sheetName="Ocala")
ed_osceola_0917 <- read.xlsx("q:/Anesthesia/_DropOff/ED_Reports/Site_Data_Capture/R/ED_Physician_Report-0917-North_Florida.xlsx",sheetName="Osceola")
ed_summerfield_0917 <- read.xlsx("q:/Anesthesia/_DropOff/ED_Reports/Site_Data_Capture/R/ED_Physician_Report-0917-North_Florida.xlsx",sheetName="Summerfield FSED")
ed_wmarion_0917 <- read.xlsx("q:/Anesthesia/_DropOff/ED_Reports/Site_Data_Capture/R/ED_Physician_Report-0917-North_Florida.xlsx",sheetName="West Marion")
ed_data_sep_17 <- rbind(ed_kendall_0917,ed_JFK_0917,ed_JFKPBG_0917,ed_JFKBB_0917,ed_hunters_0917,ed_ocala_0917,ed_osceola_0917,ed_summerfield_0917,ed_wmarion_0917)
write.xlsx2(ed_data_sep_17, "q:/Anesthesia/_DropOff/ED_Reports/Site_Data_Capture/R/Consolidated/ed_data_sep_17.xlsx", sheetName="sep", col.names=TRUE, row.names=FALSE, append=FALSE)
# October consolidation
ed_kendall_1017 <- read.xlsx("q:/Anesthesia/_DropOff/ED_Reports/Site_Data_Capture/R/ED_Physician_Report-1017-East_Florida.xlsx",sheetName="Kendall")
ed_JFK_1017 <- read.xlsx("q:/Anesthesia/_DropOff/ED_Reports/Site_Data_Capture/R/ED_Physician_Report-1017-East_Florida.xlsx",sheetName="JFK")
ed_JFKPBG_1017 <- read.xlsx("q:/Anesthesia/_DropOff/ED_Reports/Site_Data_Capture/R/ED_Physician_Report-1017-East_Florida.xlsx",sheetName="PBG FSED")
ed_JFKBB_1017 <- read.xlsx("q:/Anesthesia/_DropOff/ED_Reports/Site_Data_Capture/R/ED_Physician_Report-1017-East_Florida.xlsx",sheetName="BB FSED")
ed_hunters_1017 <- read.xlsx("q:/Anesthesia/_DropOff/ED_Reports/Site_Data_Capture/R/ED_Physician_Report-1017-North_Florida.xlsx",sheetName="Hunters Creek")
ed_ocala_1017 <- read.xlsx("q:/Anesthesia/_DropOff/ED_Reports/Site_Data_Capture/R/ED_Physician_Report-1017-North_Florida.xlsx",sheetName="Ocala")
ed_osceola_1017 <- read.xlsx("q:/Anesthesia/_DropOff/ED_Reports/Site_Data_Capture/R/ED_Physician_Report-1017-North_Florida.xlsx",sheetName="Osceola")
ed_summerfield_1017 <- read.xlsx("q:/Anesthesia/_DropOff/ED_Reports/Site_Data_Capture/R/ED_Physician_Report-1017-North_Florida.xlsx",sheetName="Summerfield FSED")
ed_wmarion_1017 <- read.xlsx("q:/Anesthesia/_DropOff/ED_Reports/Site_Data_Capture/R/ED_Physician_Report-1017-North_Florida.xlsx",sheetName="West Marion")
ed_data_oct_17 <- rbind(ed_kendall_1017,ed_JFK_1017,ed_JFKPBG_1017,ed_JFKBB_1017,ed_hunters_1017,ed_ocala_1017,ed_osceola_1017,ed_summerfield_1017,ed_wmarion_1017)
write.xlsx2(ed_data_oct_17, "q:/Anesthesia/_DropOff/ED_Reports/Site_Data_Capture/R/Consolidated/ed_data_oct_17.xlsx", sheetName="oct", col.names=TRUE, row.names=FALSE, append=FALSE)
ed_data_2017 <- rbind(ed_data_jan_17,ed_data_feb_17,ed_data_mar_17,ed_data_apr_17,ed_data_may_17,ed_data_jun_17,ed_data_jul_17,ed_data_aug_17,ed_data_sep_17,ed_data_oct_17)
# ed_data_2017$In_Bed_to_Provider_Greet_Avg_Wait <- as.numeric(ed_data_2017$In_Bed_to_Provider_Greet_Avg_Wait)
# ed_data_2017$Arrival_to_Provider_Avg_Wait <- as.numeric(ed_data_2017$Arrival_to_Provider_Avg_Wait)
# ed_data_2017$Dup_MSE_Pcnt <- as.numeric(ed_data_2017$Dup_MSE_Pcnt)
# ed_data_2017$Depart_to_PDOC_sign_Avg_Wait <- as.numeric(ed_data_2017$Depart_to_PDOC_sign_Avg_Wait)
# ed_data_2017$Arrival_to_First_Ord_Avg_Wait <- as.numeric(ed_data_2017$Arrival_to_First_Ord_Avg_Wait)
# ed_data_2017$Arrival_to_PainMed_Avg_Wait <- as.numeric(ed_data_2017$Arrival_to_PainMed_Avg_Wait)
# ed_data_2017$Greet_to_Disp_Avg_Wait <- as.numeric(ed_data_2017$Greet_to_Disp_Avg_Wait)
# ed_data_2017$Disp_to_A.ord_a_Avg_Wait <- as.numeric(ed_data_2017$Disp_to_A.ord_a_Avg_Wait)
# ed_data_2017$Greet_to_Disp_dep_Avg_Wait <- as.numeric(ed_data_2017$Greet_to_Disp_dep_Avg_Wait)
# ed_data_2017$Admitted_Patients_Avg_LOS <- as.numeric(ed_data_2017$Admitted_Patients_Avg_LOS)
# ed_data_2017$ED_Arrival_to_Departure_ALOS_Min.DP <- as.numeric(ed_data_2017$ED_Arrival_to_Departure_ALOS_Min.DP)
# ed_data_2017$Low_Acuity_Patients_Avg_LOS <- as.numeric(ed_data_2017$Low_Acuity_Patients_Avg_LOS)
# ed_data_2017$CT_Util_Admitted_Patients_Pcnt <- as.numeric(ed_data_2017$CT_Util_Admitted_Patients_Pcnt)
# ed_data_2017$US_Util_Admitted_Patients_Pcnt <- as.numeric(ed_data_2017$US_Util_Admitted_Patients_Pcnt)
# ed_data_2017$CT_Util_Departed_Patients_Pcnt <- as.numeric(ed_data_2017$CT_Util_Departed_Patients_Pcnt)
# ed_data_2017$US_Util_Departed_Patients_Pcnt <- as.numeric(ed_data_2017$US_Util_Departed_Patients_Pcnt)
# ed_data_2017$Patients_per_Hour <- as.numeric(ed_data_2017$Patients_per_Hour)
# ed_data_2017$PMD_to_Admin_order_Avg_Wait <- as.numeric(ed_data_2017$PMD_to_Admin_order_Avg_Wait)
write.xlsx2(ed_data_2017, "q:/Anesthesia/_DropOff/ED_Reports/Site_Data_Capture/R/Consolidated/ed_data_2017.xlsx", sheetName="ytd", col.names=TRUE, row.names=FALSE, append=FALSE)
|
# Load the library to read the data
library("data.table")
library("qdapTools")
library("writexl")
##Train data loading (Three different categories:x, y, subject)
X_train <- read.table("./data/train/X_train.txt")
y_train <- read.table("./data/train/y_train.txt")
sub_train <- read.table("./data/train/subject_train.txt")
##Test data loading (Three different categories:x, y, subject)
X_test <- read.table("./data/test/X_test.txt")
y_test <- read.table("./data/test/y_test.txt")
sub_test <- read.table("./data/test/subject_test.txt")
## Load the features file to assign the column names
features <- read.table("./data/features.txt")
## Merge train and test data using rbind function (Task 1)
x <- rbind(X_train, X_test)
y <- rbind(y_train,y_test)
sub <- rbind(sub_train,sub_test)
alldata <- x
alldata$labels <- y
alldata$sub <- sub
##Specify the name of the columns (Task 3)
colnames(alldata) <- features$V2
## Find the measurement the mean and standard deviation (Task 2)
dataselective <- alldata[,grepl("mean()|std()", names(alldata))]
dataselective <- cbind(subjects = rbind(sub_train, sub_test), labels = rbind(y_train, y_test), dataselective)
colnames(dataselective)[1]<-"subject"
colnames(dataselective)[2]<-"activity"
## reads the class labels with their activity name
activity_names <- read.table("./data/activity_labels.txt")
dataselective[,2] <- lookup(dataselective[,2], activity_names, key.reassign = NULL,
missing = NA)
## writes the new data into txt format
write.table(dataselective,"./data/tidy-data.txt", row.name=FALSE)
|
/run_analysis.R
|
no_license
|
farnazkgn/Getting-and-clening-data-Coursera-
|
R
| false
| false
| 1,700
|
r
|
# Load the library to read the data
library("data.table")
library("qdapTools")
library("writexl")
##Train data loading (Three different categories:x, y, subject)
X_train <- read.table("./data/train/X_train.txt")
y_train <- read.table("./data/train/y_train.txt")
sub_train <- read.table("./data/train/subject_train.txt")
##Test data loading (Three different categories:x, y, subject)
X_test <- read.table("./data/test/X_test.txt")
y_test <- read.table("./data/test/y_test.txt")
sub_test <- read.table("./data/test/subject_test.txt")
## Load the features file to assign the column names
features <- read.table("./data/features.txt")
## Merge train and test data using rbind function (Task 1)
x <- rbind(X_train, X_test)
y <- rbind(y_train,y_test)
sub <- rbind(sub_train,sub_test)
alldata <- x
alldata$labels <- y
alldata$sub <- sub
##Specify the name of the columns (Task 3)
colnames(alldata) <- features$V2
## Find the measurement the mean and standard deviation (Task 2)
dataselective <- alldata[,grepl("mean()|std()", names(alldata))]
dataselective <- cbind(subjects = rbind(sub_train, sub_test), labels = rbind(y_train, y_test), dataselective)
colnames(dataselective)[1]<-"subject"
colnames(dataselective)[2]<-"activity"
## reads the class labels with their activity name
activity_names <- read.table("./data/activity_labels.txt")
dataselective[,2] <- lookup(dataselective[,2], activity_names, key.reassign = NULL,
missing = NA)
## writes the new data into txt format
write.table(dataselective,"./data/tidy-data.txt", row.name=FALSE)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/paws.lightsail_operations.R
\name{export_snapshot}
\alias{export_snapshot}
\title{Exports an Amazon Lightsail instance or block storage disk snapshot to Amazon Elastic Compute Cloud (Amazon EC2)}
\usage{
export_snapshot(sourceSnapshotName)
}
\arguments{
\item{sourceSnapshotName}{[required] The name of the instance or disk snapshot to be exported to Amazon EC2.}
}
\description{
Exports an Amazon Lightsail instance or block storage disk snapshot to Amazon Elastic Compute Cloud (Amazon EC2). This operation results in an export snapshot record that can be used with the \code{create cloud formation stack} operation to create new Amazon EC2 instances.
}
\details{
Exported instance snapshots appear in Amazon EC2 as Amazon Machine Images (AMIs), and the instance system disk appears as an Amazon Elastic Block Store (Amazon EBS) volume. Exported disk snapshots appear in Amazon EC2 as Amazon EBS volumes. Snapshots are exported to the same Amazon Web Services Region in Amazon EC2 as the source Lightsail snapshot.
The \code{export snapshot} operation supports tag-based access control via resource tags applied to the resource identified by sourceSnapshotName. For more information, see the \href{https://lightsail.aws.amazon.com/ls/docs/en/articles/amazon-lightsail-controlling-access-using-tags}{Lightsail Dev Guide}.
Use the \code{get instance snapshots} or \code{get disk snapshots} operations to get a list of snapshots that you can export to Amazon EC2.
}
\section{Accepted Parameters}{
\preformatted{export_snapshot(
sourceSnapshotName = "string"
)
}
}
|
/service/paws.lightsail/man/export_snapshot.Rd
|
permissive
|
CR-Mercado/paws
|
R
| false
| true
| 1,646
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/paws.lightsail_operations.R
\name{export_snapshot}
\alias{export_snapshot}
\title{Exports an Amazon Lightsail instance or block storage disk snapshot to Amazon Elastic Compute Cloud (Amazon EC2)}
\usage{
export_snapshot(sourceSnapshotName)
}
\arguments{
\item{sourceSnapshotName}{[required] The name of the instance or disk snapshot to be exported to Amazon EC2.}
}
\description{
Exports an Amazon Lightsail instance or block storage disk snapshot to Amazon Elastic Compute Cloud (Amazon EC2). This operation results in an export snapshot record that can be used with the \code{create cloud formation stack} operation to create new Amazon EC2 instances.
}
\details{
Exported instance snapshots appear in Amazon EC2 as Amazon Machine Images (AMIs), and the instance system disk appears as an Amazon Elastic Block Store (Amazon EBS) volume. Exported disk snapshots appear in Amazon EC2 as Amazon EBS volumes. Snapshots are exported to the same Amazon Web Services Region in Amazon EC2 as the source Lightsail snapshot.
The \code{export snapshot} operation supports tag-based access control via resource tags applied to the resource identified by sourceSnapshotName. For more information, see the \href{https://lightsail.aws.amazon.com/ls/docs/en/articles/amazon-lightsail-controlling-access-using-tags}{Lightsail Dev Guide}.
Use the \code{get instance snapshots} or \code{get disk snapshots} operations to get a list of snapshots that you can export to Amazon EC2.
}
\section{Accepted Parameters}{
\preformatted{export_snapshot(
sourceSnapshotName = "string"
)
}
}
|
# Code to process skip generation proportions
library(readxl)
# USA
process_usa_bystate_skip_generation = function(country,s,r){
#country = 'usa'
d_summary = read.csv('data/usa_states.csv')
d_summary <- subset(d_summary,State==s & Race.and.Hispanic.Origin.Group==r)
#setnames(d_summary, c('death'), c('deaths'))
d_summary = d_summary %>% group_by(age, gender) %>% mutate(nb_deaths = sum(deaths)) %>% ungroup() %>%
select(age, gender, nb_deaths) %>% distinct()
setnames(d_summary, 'nb_deaths', 'deaths')
d_summary$age = as.character(d_summary$age)
d_summary_30 = d_summary %>% filter( age %in% c('25-34'))
d_summary_30$age = '30-34'
d_summary_30$deaths = d_summary_30$deaths/2
d_summary = rbind(d_summary, d_summary_30)
d_summary$age = ifelse(d_summary$age %in% c("30-49","50-64",'65-74', "75-84","85+" ),'30+', 'others')
d_summary = d_summary[order(d_summary$gender),]
d_summary <- data.table(d_summary)
data <- d_summary[, list(grand_deaths=sum(deaths)),by=c('age','gender')]
## read in grandparent data
dat <- read.csv('data/ACSST5Y2019.S1002_grandparentdata_2021-03-24T054217.csv',header = T,stringsAsFactors = F)
vars <- read.csv('data/grandparents_variables.csv',stringsAsFactors = F)
pc <- subset(vars,group!='' & category=='primary caregiver')
cr <- subset(vars,group!='' & category=='coresident')
dg <- dat[,c('NAME',pc$GEO_ID)]
colnames(dg) <- c('state',pc$group)
dg <- subset(dg, state!='Geographic Area Name')
dg <- data.table(reshape2::melt(dg,id.vars=c('state','Total_pc','Male','Female','sg'),
variable.name='race.eth',value.name='prop',na.rm=F))
hisp <- subset(dg,race.eth=='Hispanic',select=c('state','prop'))
setnames(hisp,'prop','Hispanic')
dg <- merge(dg, hisp,by=c('state'),all.x=T)
dg[, cat:='primary caregiver']
dg[, Total_pc:=as.numeric(Total_pc)]
dg[, Male:=as.numeric(Male)]
dg[, Female:=as.numeric(Female)]
dg[, sg:=as.numeric(sg)]
dg[, prop:=as.numeric(prop)] # prop of each race
dg[, Hispanic:=as.numeric(Hispanic)] # prop hispanic
dg[race.eth!='Hispanic',prop:=(prop/100)*(1-(Hispanic/100))*100]
dg[, sg_female:=Total_pc*(sg/100)*(Female/100)*(prop/100)]
dg[, sg_male:=Total_pc*(sg/100)*(Male/100)*(prop/100)]
dg[, mg_female:=Total_pc*(1-(sg/100))*(Female/100)*(prop/100)]
dg[, mg_male:=Total_pc*(1-(sg/100))*(Male/100)*(prop/100)]
tmp <- dat[,c('NAME',cr$GEO_ID)]
colnames(tmp) <- c('state',cr$group)
tmp <- subset(tmp, state!='Geographic Area Name')
tmp <- data.table(reshape2::melt(tmp,id.vars=c('state','Total_cr','Male','Female'),
variable.name='race.eth',value.name='prop',na.rm=F))
hisp <- subset(tmp,race.eth=='Hispanic',select=c('state','prop'))
setnames(hisp,'prop','Hispanic')
tmp <- merge(tmp, hisp,by=c('state'),all.x=T)
tmp[, cat:='coresident']
tmp[, Total_cr:=as.numeric(Total_cr)]
tmp[, Male:=as.numeric(Male)]
tmp[, Female:=as.numeric(Female)]
tmp[, prop:=as.numeric(prop)]
tmp[, Hispanic:=as.numeric(Hispanic)]
tmp[race.eth!='Hispanic',prop:=(prop/100)*(1-(Hispanic/100))*100]
dg <- merge(tmp,subset(dg,select=c('state','race.eth','sg_female','sg_male','mg_female','mg_male')),by=c('state','race.eth'),all=T)
dg[prop==0, prop:=0.001]
dg[, cr_female:=(Total_cr*(Female/100)*(prop/100)) - sg_female - mg_female]
dg[, cr_male:=(Total_cr*(Male/100)*(prop/100)) - sg_male - mg_male]
#dg[cr_female<0, cr_female:=(Total_cr*(Female/100)*(0.1/100)) - sg_female - mg_female]
#dg[cr_male<0, cr_male:=(Total_cr*(Male/100)*(0.1/100)) - sg_male - mg_male]
dg[cr_female<0, cr_female:= 0]
dg[cr_male<0, cr_male:= 0]
dg[, age:='30+']
# add Native Hawaiian/PI to NH Asian
dg[race.eth=='Non-Hispanic Native Hawaiian or Other Pacific Islander',race.eth:='Non-Hispanic Asian']
dg <- dg[, list(sg_female=sum(sg_female,na.rm=T),sg_male=sum(sg_male,na.rm=T),
mg_female=sum(mg_female,na.rm=T),mg_male=sum(mg_male,na.rm=T),
cr_female=sum(cr_female,na.rm=T),cr_male=sum(cr_male,na.rm=T)),by=c('state','race.eth','cat','age')]
# get population over 30
# for men
data_pop_m = read.delim('data/pop/pop_m_2018-2019.txt',header = TRUE, sep = "\t")
data_pop_m <- data_pop_m[!is.na(data_pop_m$States.Code),]
data_pop_m <- subset(data_pop_m,Yearly.July.1st.Estimates==2019)
data_pop_m <- data_pop_m %>%
mutate(age:= case_when(Five.Year.Age.Groups.Code %in% c('30-34','35-39','40-44', '45-49','50-54','55-59','60-69','70-74','75-79','80-84','85+') ~ '30+',
TRUE ~'Under 30'),
race.eth:= case_when(Ethnicity=='Hispanic or Latino'~'Hispanic',
Ethnicity=='Not Hispanic or Latino' & Race=='American Indian or Alaska Native'~'Non-Hispanic American Indian or Alaska Native',
Ethnicity=='Not Hispanic or Latino' & Race=='Asian'~'Non-Hispanic Asian',
Ethnicity=='Not Hispanic or Latino' & Race=='Native Hawaiian or Other Pacific Islander'~'Non-Hispanic Asian',
Ethnicity=='Not Hispanic or Latino' & Race=='More than one race'~'Non-Hispanic More than one race',
Ethnicity=='Not Hispanic or Latino' & Race=='Black or African American'~'Non-Hispanic Black',
Ethnicity=='Not Hispanic or Latino' & Race=='White'~'Non-Hispanic White',
TRUE~'Unknown'))
data_pop_m <- data.table(data_pop_m)
setnames(data_pop_m,c('States','Yearly.July.1st.Estimates','Race','Ethnicity','Population'),c('state','year','race','hispanic','population'))
data_pop_m_agec <- data_pop_m[, list(population_m=sum(population)),by=c('state',
'age', 'race.eth')]
# women
data_pop_f = read.delim('data/pop/pop_f_2019_singlerace.txt',header = TRUE, sep = "\t")
data_pop_f <- data_pop_f[!is.na(data_pop_f$States.Code),]
data_pop_f <- data_pop_f %>%
mutate(age:= case_when(Five.Year.Age.Groups.Code %in% c('30-34','35-39','40-44', '45-49','50-54','55-59','60-69','70-74','75-79','80-84','85+') ~ '30+',
TRUE ~'Under 30'),
race.eth:= case_when(Ethnicity=='Hispanic or Latino'~'Hispanic',
Ethnicity=='Not Hispanic or Latino' & Race=='American Indian or Alaska Native'~'Non-Hispanic American Indian or Alaska Native',
Ethnicity=='Not Hispanic or Latino' & Race=='Asian'~'Non-Hispanic Asian',
Ethnicity=='Not Hispanic or Latino' & Race=='Native Hawaiian or Other Pacific Islander'~'Non-Hispanic Asian',
Ethnicity=='Not Hispanic or Latino' & Race=='More than one race'~'Non-Hispanic More than one race',
Ethnicity=='Not Hispanic or Latino' & Race=='Black or African American'~'Non-Hispanic Black',
Ethnicity=='Not Hispanic or Latino' & Race=='White'~'Non-Hispanic White',
TRUE~'Unknown'))
data_pop_f <- data.table(data_pop_f)
setnames(data_pop_f,c('States','Race','Ethnicity','Population'),c('state','race','hispanic','population'))
data_pop_f_agec <- data_pop_f[, list(population_f=sum(population)),by=c('state',
'age', 'race.eth')]
# merge with grandparent data
dg <- merge(dg, subset(data_pop_f_agec,age %in% c('30+')),by=c('state','race.eth','age'),all.x=T)
dg <- merge(dg, subset(data_pop_m_agec,age %in% c('30+')),by=c('state','race.eth','age'),all.x=T)
dg[, sg_female:=sg_female/population_f]
dg[, sg_male:=sg_male/population_m]
dg[, mg_female:=mg_female/population_f]
dg[, mg_male:=mg_male/population_m]
dg[, cr_female:=cr_female/population_f]
dg[, cr_male:=cr_male/population_m]
gen = dg[which(state == s & race.eth==r),]
skip_gen <- subset(gen,select=c('age','sg_female','sg_male'))
skip_gen <- data.table(reshape2::melt(skip_gen,id.vars=c('age'),
variable.name='gender',value.name='sg',na.rm=F))
skip_gen[gender=='sg_female',gender:='Female']
skip_gen[gender=='sg_male',gender:='Male']
multi_gen <- subset(gen,select=c('age','mg_female','mg_male'))
multi_gen <- data.table(reshape2::melt(multi_gen,id.vars=c('age'),
variable.name='gender',value.name='mg',na.rm=F))
multi_gen[gender=='mg_female',gender:='Female']
multi_gen[gender=='mg_male',gender:='Male']
cores <- subset(gen,select=c('age','cr_female','cr_male'))
cores <- data.table(reshape2::melt(cores,id.vars=c('age'),
variable.name='gender',value.name='cr',na.rm=F))
cores[gender=='cr_female',gender:='Female']
cores[gender=='cr_male',gender:='Male']
data <- merge(data, skip_gen,by=c('age','gender'))
data <- merge(data, multi_gen,by=c('age','gender'))
data <- merge(data, cores,by=c('age','gender'))
data[,skip_generation:=sg*100]
data[,value:= grand_deaths * skip_generation/100]
data[,coresiding_caregiver:=mg*100]
data[,value_cc:= grand_deaths * coresiding_caregiver/100]
data[,'older persons co-residing':= cr*100]
data[,number:= `older persons co-residing` * grand_deaths/100]
data[,number:= round(number)]
data[,grand_deaths:= round(grand_deaths)]
data[,value:= round(value)]
data[,value_cc:= round(value_cc)]
data[,sg:=NULL]
data[,mg:=NULL]
data[,cr:=NULL]
write_csv(path = paste0('data/fertility/skip_generation_', "usa","_",gsub(' ','',s),"_",gsub(' ','',r),'.csv'), data)
print(data)
}
|
/Pediatrics_US_estimates_disparities_2021/R/process_skip_generation.R
|
permissive
|
ImperialCollegeLondon/covid19_orphans
|
R
| false
| false
| 9,759
|
r
|
# Code to process skip generation proportions
library(readxl)
# USA
process_usa_bystate_skip_generation = function(country,s,r){
#country = 'usa'
d_summary = read.csv('data/usa_states.csv')
d_summary <- subset(d_summary,State==s & Race.and.Hispanic.Origin.Group==r)
#setnames(d_summary, c('death'), c('deaths'))
d_summary = d_summary %>% group_by(age, gender) %>% mutate(nb_deaths = sum(deaths)) %>% ungroup() %>%
select(age, gender, nb_deaths) %>% distinct()
setnames(d_summary, 'nb_deaths', 'deaths')
d_summary$age = as.character(d_summary$age)
d_summary_30 = d_summary %>% filter( age %in% c('25-34'))
d_summary_30$age = '30-34'
d_summary_30$deaths = d_summary_30$deaths/2
d_summary = rbind(d_summary, d_summary_30)
d_summary$age = ifelse(d_summary$age %in% c("30-49","50-64",'65-74', "75-84","85+" ),'30+', 'others')
d_summary = d_summary[order(d_summary$gender),]
d_summary <- data.table(d_summary)
data <- d_summary[, list(grand_deaths=sum(deaths)),by=c('age','gender')]
## read in grandparent data
dat <- read.csv('data/ACSST5Y2019.S1002_grandparentdata_2021-03-24T054217.csv',header = T,stringsAsFactors = F)
vars <- read.csv('data/grandparents_variables.csv',stringsAsFactors = F)
pc <- subset(vars,group!='' & category=='primary caregiver')
cr <- subset(vars,group!='' & category=='coresident')
dg <- dat[,c('NAME',pc$GEO_ID)]
colnames(dg) <- c('state',pc$group)
dg <- subset(dg, state!='Geographic Area Name')
dg <- data.table(reshape2::melt(dg,id.vars=c('state','Total_pc','Male','Female','sg'),
variable.name='race.eth',value.name='prop',na.rm=F))
hisp <- subset(dg,race.eth=='Hispanic',select=c('state','prop'))
setnames(hisp,'prop','Hispanic')
dg <- merge(dg, hisp,by=c('state'),all.x=T)
dg[, cat:='primary caregiver']
dg[, Total_pc:=as.numeric(Total_pc)]
dg[, Male:=as.numeric(Male)]
dg[, Female:=as.numeric(Female)]
dg[, sg:=as.numeric(sg)]
dg[, prop:=as.numeric(prop)] # prop of each race
dg[, Hispanic:=as.numeric(Hispanic)] # prop hispanic
dg[race.eth!='Hispanic',prop:=(prop/100)*(1-(Hispanic/100))*100]
dg[, sg_female:=Total_pc*(sg/100)*(Female/100)*(prop/100)]
dg[, sg_male:=Total_pc*(sg/100)*(Male/100)*(prop/100)]
dg[, mg_female:=Total_pc*(1-(sg/100))*(Female/100)*(prop/100)]
dg[, mg_male:=Total_pc*(1-(sg/100))*(Male/100)*(prop/100)]
tmp <- dat[,c('NAME',cr$GEO_ID)]
colnames(tmp) <- c('state',cr$group)
tmp <- subset(tmp, state!='Geographic Area Name')
tmp <- data.table(reshape2::melt(tmp,id.vars=c('state','Total_cr','Male','Female'),
variable.name='race.eth',value.name='prop',na.rm=F))
hisp <- subset(tmp,race.eth=='Hispanic',select=c('state','prop'))
setnames(hisp,'prop','Hispanic')
tmp <- merge(tmp, hisp,by=c('state'),all.x=T)
tmp[, cat:='coresident']
tmp[, Total_cr:=as.numeric(Total_cr)]
tmp[, Male:=as.numeric(Male)]
tmp[, Female:=as.numeric(Female)]
tmp[, prop:=as.numeric(prop)]
tmp[, Hispanic:=as.numeric(Hispanic)]
tmp[race.eth!='Hispanic',prop:=(prop/100)*(1-(Hispanic/100))*100]
dg <- merge(tmp,subset(dg,select=c('state','race.eth','sg_female','sg_male','mg_female','mg_male')),by=c('state','race.eth'),all=T)
dg[prop==0, prop:=0.001]
dg[, cr_female:=(Total_cr*(Female/100)*(prop/100)) - sg_female - mg_female]
dg[, cr_male:=(Total_cr*(Male/100)*(prop/100)) - sg_male - mg_male]
#dg[cr_female<0, cr_female:=(Total_cr*(Female/100)*(0.1/100)) - sg_female - mg_female]
#dg[cr_male<0, cr_male:=(Total_cr*(Male/100)*(0.1/100)) - sg_male - mg_male]
dg[cr_female<0, cr_female:= 0]
dg[cr_male<0, cr_male:= 0]
dg[, age:='30+']
# add Native Hawaiian/PI to NH Asian
dg[race.eth=='Non-Hispanic Native Hawaiian or Other Pacific Islander',race.eth:='Non-Hispanic Asian']
dg <- dg[, list(sg_female=sum(sg_female,na.rm=T),sg_male=sum(sg_male,na.rm=T),
mg_female=sum(mg_female,na.rm=T),mg_male=sum(mg_male,na.rm=T),
cr_female=sum(cr_female,na.rm=T),cr_male=sum(cr_male,na.rm=T)),by=c('state','race.eth','cat','age')]
# get population over 30
# for men
data_pop_m = read.delim('data/pop/pop_m_2018-2019.txt',header = TRUE, sep = "\t")
data_pop_m <- data_pop_m[!is.na(data_pop_m$States.Code),]
data_pop_m <- subset(data_pop_m,Yearly.July.1st.Estimates==2019)
data_pop_m <- data_pop_m %>%
mutate(age:= case_when(Five.Year.Age.Groups.Code %in% c('30-34','35-39','40-44', '45-49','50-54','55-59','60-69','70-74','75-79','80-84','85+') ~ '30+',
TRUE ~'Under 30'),
race.eth:= case_when(Ethnicity=='Hispanic or Latino'~'Hispanic',
Ethnicity=='Not Hispanic or Latino' & Race=='American Indian or Alaska Native'~'Non-Hispanic American Indian or Alaska Native',
Ethnicity=='Not Hispanic or Latino' & Race=='Asian'~'Non-Hispanic Asian',
Ethnicity=='Not Hispanic or Latino' & Race=='Native Hawaiian or Other Pacific Islander'~'Non-Hispanic Asian',
Ethnicity=='Not Hispanic or Latino' & Race=='More than one race'~'Non-Hispanic More than one race',
Ethnicity=='Not Hispanic or Latino' & Race=='Black or African American'~'Non-Hispanic Black',
Ethnicity=='Not Hispanic or Latino' & Race=='White'~'Non-Hispanic White',
TRUE~'Unknown'))
data_pop_m <- data.table(data_pop_m)
setnames(data_pop_m,c('States','Yearly.July.1st.Estimates','Race','Ethnicity','Population'),c('state','year','race','hispanic','population'))
data_pop_m_agec <- data_pop_m[, list(population_m=sum(population)),by=c('state',
'age', 'race.eth')]
# women
data_pop_f = read.delim('data/pop/pop_f_2019_singlerace.txt',header = TRUE, sep = "\t")
data_pop_f <- data_pop_f[!is.na(data_pop_f$States.Code),]
data_pop_f <- data_pop_f %>%
mutate(age:= case_when(Five.Year.Age.Groups.Code %in% c('30-34','35-39','40-44', '45-49','50-54','55-59','60-69','70-74','75-79','80-84','85+') ~ '30+',
TRUE ~'Under 30'),
race.eth:= case_when(Ethnicity=='Hispanic or Latino'~'Hispanic',
Ethnicity=='Not Hispanic or Latino' & Race=='American Indian or Alaska Native'~'Non-Hispanic American Indian or Alaska Native',
Ethnicity=='Not Hispanic or Latino' & Race=='Asian'~'Non-Hispanic Asian',
Ethnicity=='Not Hispanic or Latino' & Race=='Native Hawaiian or Other Pacific Islander'~'Non-Hispanic Asian',
Ethnicity=='Not Hispanic or Latino' & Race=='More than one race'~'Non-Hispanic More than one race',
Ethnicity=='Not Hispanic or Latino' & Race=='Black or African American'~'Non-Hispanic Black',
Ethnicity=='Not Hispanic or Latino' & Race=='White'~'Non-Hispanic White',
TRUE~'Unknown'))
data_pop_f <- data.table(data_pop_f)
setnames(data_pop_f,c('States','Race','Ethnicity','Population'),c('state','race','hispanic','population'))
data_pop_f_agec <- data_pop_f[, list(population_f=sum(population)),by=c('state',
'age', 'race.eth')]
# merge with grandparent data
dg <- merge(dg, subset(data_pop_f_agec,age %in% c('30+')),by=c('state','race.eth','age'),all.x=T)
dg <- merge(dg, subset(data_pop_m_agec,age %in% c('30+')),by=c('state','race.eth','age'),all.x=T)
dg[, sg_female:=sg_female/population_f]
dg[, sg_male:=sg_male/population_m]
dg[, mg_female:=mg_female/population_f]
dg[, mg_male:=mg_male/population_m]
dg[, cr_female:=cr_female/population_f]
dg[, cr_male:=cr_male/population_m]
gen = dg[which(state == s & race.eth==r),]
skip_gen <- subset(gen,select=c('age','sg_female','sg_male'))
skip_gen <- data.table(reshape2::melt(skip_gen,id.vars=c('age'),
variable.name='gender',value.name='sg',na.rm=F))
skip_gen[gender=='sg_female',gender:='Female']
skip_gen[gender=='sg_male',gender:='Male']
multi_gen <- subset(gen,select=c('age','mg_female','mg_male'))
multi_gen <- data.table(reshape2::melt(multi_gen,id.vars=c('age'),
variable.name='gender',value.name='mg',na.rm=F))
multi_gen[gender=='mg_female',gender:='Female']
multi_gen[gender=='mg_male',gender:='Male']
cores <- subset(gen,select=c('age','cr_female','cr_male'))
cores <- data.table(reshape2::melt(cores,id.vars=c('age'),
variable.name='gender',value.name='cr',na.rm=F))
cores[gender=='cr_female',gender:='Female']
cores[gender=='cr_male',gender:='Male']
data <- merge(data, skip_gen,by=c('age','gender'))
data <- merge(data, multi_gen,by=c('age','gender'))
data <- merge(data, cores,by=c('age','gender'))
data[,skip_generation:=sg*100]
data[,value:= grand_deaths * skip_generation/100]
data[,coresiding_caregiver:=mg*100]
data[,value_cc:= grand_deaths * coresiding_caregiver/100]
data[,'older persons co-residing':= cr*100]
data[,number:= `older persons co-residing` * grand_deaths/100]
data[,number:= round(number)]
data[,grand_deaths:= round(grand_deaths)]
data[,value:= round(value)]
data[,value_cc:= round(value_cc)]
data[,sg:=NULL]
data[,mg:=NULL]
data[,cr:=NULL]
write_csv(path = paste0('data/fertility/skip_generation_', "usa","_",gsub(' ','',s),"_",gsub(' ','',r),'.csv'), data)
print(data)
}
|
% Generated by roxygen2 (4.0.0): do not edit by hand
\name{chi2}
\alias{chi2}
\title{Compute the chi^2 statistic for a 2x2 crosstab containing the values
[[a, b], [c, d]]}
\usage{
chi2(a, b, c, d)
}
\description{
Compute the chi^2 statistic for a 2x2 crosstab containing the values
[[a, b], [c, d]]
}
|
/man/chi2.Rd
|
permissive
|
vanatteveldt/corpus-tools
|
R
| false
| false
| 302
|
rd
|
% Generated by roxygen2 (4.0.0): do not edit by hand
\name{chi2}
\alias{chi2}
\title{Compute the chi^2 statistic for a 2x2 crosstab containing the values
[[a, b], [c, d]]}
\usage{
chi2(a, b, c, d)
}
\description{
Compute the chi^2 statistic for a 2x2 crosstab containing the values
[[a, b], [c, d]]
}
|
returned_resources <- curatedTBData(c("GSE107104", "GSE19435", "GSE19443"),
dryrun = FALSE, curated.only = TRUE) %>%
base::suppressWarnings()
test_that("Argument \"experment_name\" cannot be missing", {
expect_error(combineObjects(returned_resources))
})
test_that("return type is SummarizedExperiment", {
re1 <- combineObjects(returned_resources, experiment_name = "assay_curated")
re2 <- combineObjects(returned_resources, experiment_name = "assay_curated",
update_genes = FALSE)
expect_s4_class(re1, "SummarizedExperiment")
expect_s4_class(re2, "SummarizedExperiment")
})
test_that("Input list does not have unique name for each element.
Input list contains only one element", {
returned_resources_noname <- returned_resources
names(returned_resources_noname) <- NULL
expect_error(combineObjects(returned_resources_noname,
experiment_name = "assay_curated"))
expect_error(combineObjects(returned_resources[1],
experiment_name = "assay_curated"))
})
test_that("return type is SummarizedExperiment", {
expect_s4_class(combineObjects(returned_resources,
experiment_name = "assay_curated"),
"SummarizedExperiment")
})
|
/tests/testthat/test-combineObjects.R
|
permissive
|
SpadeKKK/curatedTBData
|
R
| false
| false
| 1,324
|
r
|
returned_resources <- curatedTBData(c("GSE107104", "GSE19435", "GSE19443"),
dryrun = FALSE, curated.only = TRUE) %>%
base::suppressWarnings()
test_that("Argument \"experment_name\" cannot be missing", {
expect_error(combineObjects(returned_resources))
})
test_that("return type is SummarizedExperiment", {
re1 <- combineObjects(returned_resources, experiment_name = "assay_curated")
re2 <- combineObjects(returned_resources, experiment_name = "assay_curated",
update_genes = FALSE)
expect_s4_class(re1, "SummarizedExperiment")
expect_s4_class(re2, "SummarizedExperiment")
})
test_that("Input list does not have unique name for each element.
Input list contains only one element", {
returned_resources_noname <- returned_resources
names(returned_resources_noname) <- NULL
expect_error(combineObjects(returned_resources_noname,
experiment_name = "assay_curated"))
expect_error(combineObjects(returned_resources[1],
experiment_name = "assay_curated"))
})
test_that("return type is SummarizedExperiment", {
expect_s4_class(combineObjects(returned_resources,
experiment_name = "assay_curated"),
"SummarizedExperiment")
})
|
library(knitr)
summary(cars)
knit(cars, output = "RGitHub.Rmd")
?knit
|
/initial.R
|
no_license
|
ssrin0522/r-codes
|
R
| false
| false
| 72
|
r
|
library(knitr)
summary(cars)
knit(cars, output = "RGitHub.Rmd")
?knit
|
## File Name: Rcppfunction_remove_classes.R
## File Version: 0.07
Rcppfunction_remove_classes <- function(string, maxlen=70, remove=TRUE)
{
string <- gsub("\n", "", string )
string <- gsub("\t", "", string )
string <- gsub(" ", "", string )
ind1 <- string_find_first(string=string, symbol="(" )
a1 <- c( substring(string,1, ind1-1), substring(string, ind1+1, nchar(string) ) )
s1 <- a1[2]
ind1 <- string_find_last(string=s1, symbol=")" )
s1 <- substring(s1,1, ind1-1)
s1 <- strsplit( s1, split=",", fixed=TRUE )[[1]]
#*** Rcpp classes
rcpp_classes <- c("double", "bool", "int", "arma::mat", "arma::colvec", "arma::umat",
"Rcpp::NumericVector", "Rcpp::IntegerVector", "Rcpp::LogicalVector",
"Rcpp::CharacterVector", "Rcpp::CharacterMatrix", "Rcpp::List",
"Rcpp::NumericMatrix", "Rcpp::IntegerMatrix",
"Rcpp::LogicalMatrix", "char" )
rcpp_classes1 <- paste0( rcpp_classes, " " )
if (remove){
for (rr in rcpp_classes1 ){
s1 <- gsub( rr, "", s1, fixed=TRUE )
a1[1] <- gsub( rr, "", a1[1], fixed=TRUE )
}
a1[1] <- gsub( " ", "", a1[1] )
}
NS <- length(s1)
s2 <- s1
if (remove){
s2 <- gsub( " ", "", s2 )
}
M0 <- nchar(a1[1])
for (ss in 1:NS){
if (remove){
s2[ss] <- gsub( " ", "", s2[ss] )
}
nss <- nchar(s2[ss])
M0 <- M0 + nss
if (M0 > maxlen ){
s2[ss] <- paste0("\n ", s2[ss] )
M0 <- nss
}
}
s2 <- paste0( a1[1], "( ", paste0( s2, collapse=", " ), " )\n" )
s2 <- gsub( ", ", ", ", s2, fixed=TRUE)
s2 <- gsub( "( ", "( ", s2, fixed=TRUE)
s2 <- gsub( " )", " )", s2, fixed=TRUE)
#--- delete blanks at begin of lines
for (uu in 1:2){
s2 <- gsub("\n ", "\n", s2, fixed=TRUE)
}
#--- output
return(s2)
}
|
/R/Rcppfunction_remove_classes.R
|
no_license
|
cran/miceadds
|
R
| false
| false
| 1,996
|
r
|
## File Name: Rcppfunction_remove_classes.R
## File Version: 0.07
Rcppfunction_remove_classes <- function(string, maxlen=70, remove=TRUE)
{
string <- gsub("\n", "", string )
string <- gsub("\t", "", string )
string <- gsub(" ", "", string )
ind1 <- string_find_first(string=string, symbol="(" )
a1 <- c( substring(string,1, ind1-1), substring(string, ind1+1, nchar(string) ) )
s1 <- a1[2]
ind1 <- string_find_last(string=s1, symbol=")" )
s1 <- substring(s1,1, ind1-1)
s1 <- strsplit( s1, split=",", fixed=TRUE )[[1]]
#*** Rcpp classes
rcpp_classes <- c("double", "bool", "int", "arma::mat", "arma::colvec", "arma::umat",
"Rcpp::NumericVector", "Rcpp::IntegerVector", "Rcpp::LogicalVector",
"Rcpp::CharacterVector", "Rcpp::CharacterMatrix", "Rcpp::List",
"Rcpp::NumericMatrix", "Rcpp::IntegerMatrix",
"Rcpp::LogicalMatrix", "char" )
rcpp_classes1 <- paste0( rcpp_classes, " " )
if (remove){
for (rr in rcpp_classes1 ){
s1 <- gsub( rr, "", s1, fixed=TRUE )
a1[1] <- gsub( rr, "", a1[1], fixed=TRUE )
}
a1[1] <- gsub( " ", "", a1[1] )
}
NS <- length(s1)
s2 <- s1
if (remove){
s2 <- gsub( " ", "", s2 )
}
M0 <- nchar(a1[1])
for (ss in 1:NS){
if (remove){
s2[ss] <- gsub( " ", "", s2[ss] )
}
nss <- nchar(s2[ss])
M0 <- M0 + nss
if (M0 > maxlen ){
s2[ss] <- paste0("\n ", s2[ss] )
M0 <- nss
}
}
s2 <- paste0( a1[1], "( ", paste0( s2, collapse=", " ), " )\n" )
s2 <- gsub( ", ", ", ", s2, fixed=TRUE)
s2 <- gsub( "( ", "( ", s2, fixed=TRUE)
s2 <- gsub( " )", " )", s2, fixed=TRUE)
#--- delete blanks at begin of lines
for (uu in 1:2){
s2 <- gsub("\n ", "\n", s2, fixed=TRUE)
}
#--- output
return(s2)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/predict.density.R
\name{predict.density}
\alias{predict.density}
\title{predict.density}
\usage{
\method{predict}{density}(den, xnew)
}
\arguments{
\item{xnew}{}
}
\value{
ynew
}
\description{
Simple interpolation of a density object to new points
}
\author{
Michael Dietze
}
|
/modules/emulator/man/predict.density.Rd
|
permissive
|
yogeshdarji/pecan
|
R
| false
| true
| 355
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/predict.density.R
\name{predict.density}
\alias{predict.density}
\title{predict.density}
\usage{
\method{predict}{density}(den, xnew)
}
\arguments{
\item{xnew}{}
}
\value{
ynew
}
\description{
Simple interpolation of a density object to new points
}
\author{
Michael Dietze
}
|
plot2 <- function () {
powerdata <- read.table("household_power_consumption.txt",header = FALSE, sep = ";", col.names = c("Date", "Time", "Global_Active_Power", "Global_Reactive_Power", "Voltage", "Global_Intensity", "Energy_Sub-metering_1", "Energy_Sub-metering_2", "Energy_Sub-metering_3"), na.strings = "?", colClasses = c("character","character","numeric","numeric","numeric","numeric","numeric"), skip = 66637, nrows = 2880)
dt <- paste(powerdata$Date,powerdata$Time)
st <- strptime(dt, format = "%d/%m/%Y %H:%M:%S")
powerdata <- cbind(powerdata, st)
png(file = "plot2.png", width = 480, height = 480)
with(powerdata, plot(st, Global_Active_Power, xlab = "", ylab = "Global Active Power (kilowatts)", type = "n"))
with(powerdata, lines(st, Global_Active_Power))
dev.off()
}
|
/plot2.R
|
no_license
|
saomakyvay/ExData_Plotting1
|
R
| false
| false
| 787
|
r
|
plot2 <- function () {
powerdata <- read.table("household_power_consumption.txt",header = FALSE, sep = ";", col.names = c("Date", "Time", "Global_Active_Power", "Global_Reactive_Power", "Voltage", "Global_Intensity", "Energy_Sub-metering_1", "Energy_Sub-metering_2", "Energy_Sub-metering_3"), na.strings = "?", colClasses = c("character","character","numeric","numeric","numeric","numeric","numeric"), skip = 66637, nrows = 2880)
dt <- paste(powerdata$Date,powerdata$Time)
st <- strptime(dt, format = "%d/%m/%Y %H:%M:%S")
powerdata <- cbind(powerdata, st)
png(file = "plot2.png", width = 480, height = 480)
with(powerdata, plot(st, Global_Active_Power, xlab = "", ylab = "Global Active Power (kilowatts)", type = "n"))
with(powerdata, lines(st, Global_Active_Power))
dev.off()
}
|
# 03 ANALYSIS WITH THE IMPOSTERS' METHOD
# In order to run this RScript, you need the following:
# - some results of "02_Hyperparameter and Feature Tuning Imposters' Method.R"
# saved on your computer
# - the results of "01_Preprocessing.R" in your global environment. (You may
# also load the results of that script from your computer:
if (file.exists("03_Output/Data/Results_of_01_Preprocessing.RData")) {
load(file = "03_Output/Data/Results_of_01_Preprocessing.RData")
}
#Check dependencies
if(require("stylo") == FALSE) {
install.packages("stylo")
library("stylo")
}
if(require("data.table") == FALSE) {
install.packages("data.table")
library("data.table")
}
if(require("stringi") == FALSE) {
install.packages("stringi")
library("stringi")
}
source("02_Script/Functions/01_check_p1_p2_constraints.R")
#Before running the imposters' method, we have to find the good results from the
#imposters.optimize() function, saved in different files.
#Prepare the iteration through all csv files with results of imposters.optimize
list.of.files = list.files(path = "03_Output/Data/Results_of_imposters.optimize")
good.params = data.frame()
#Iterate through all csv files
for (filename03 in list.of.files) {
#Read each file from line 4 on
file03 = read.table(
file = paste("03_Output/Data/Results_of_imposters.optimize/", filename03, sep = ""),
dec = ".",
sep = ";",
skip = 3,
header = TRUE
)
#Add the file info to the data frame
fileinfos = unlist(stri_split_regex(str = filename03, pattern = "-"))
file03 = cbind(
base = rep(x = fileinfos[3], times = nrow(file03)),
level = rep(x = fileinfos[4], times = nrow(file03)),
n = rep(x = fileinfos[5], times = nrow(file03)),
file03
)
#Create an index of the suitable rows
found.lines.index = vector(mode = "logical", length = 0)
for (i in 1:nrow(file03)) {
#Add TRUE or FALSE to found.lines, depending on the result of the
#constraints-check for the line in question
line.result = check_p1_p2_constraints(file03[i,])
found.lines.index = append(found.lines.index, line.result)
}
#Add the to good.params
good.params = rbind(good.params, file03[found.lines.index,])
}
#Name the rows by their number
rownames(good.params) = 1:nrow(good.params)
#Set filename where to save the results of the actual analysis
filename03 = paste("Results_of_Imposters_Method_",
format(Sys.time(), format = "%Y-%m-%d_%H-%M-%S"),
".csv",
sep = "")
#Now that the good parameters are found, the actual analysis can start. To do so,
#iterate through the rows of good.params, and for each row, execute the imposters'
#method with this row's parameters. Save the results in one file.
#Prepare a 3-dimensional array for the final results
imposters.final.results =
array(dim = c(length(test.names),
length(candidates),
nrow(good.params)
),
dimnames = list(test.names,
candidates,
param.config = NULL
))
for (i in 1:nrow(good.params)) {
#First, load the corpora with the specifications of the current row in good.params
test =
parse.corpus(
input.data = eval(parse(
text = paste("test.corpus.",
good.params[i, "base"],
sep = "")
)),
splitting.rule = " ",
features = good.params[i, "level"],
ngram.size = as.numeric(good.params[i, "n"])
)
training =
parse.corpus(
input.data = eval(parse(
text = paste("training.corpus.",
good.params[i, "base"],
sep = "")
)),
splitting.rule = " ",
features = good.params[i, "level"],
ngram.size = as.numeric(good.params[i, "n"])
)
#Prepare an empty table for the results
imposters.results =
matrix(nrow = length(test.names),
ncol = length(candidates),
dimnames = list(test.names, candidates))
#Iterate through all test texts, in order to fill the table "imposters.results"
for (n in 1:length(test.names)) {
#Build table of frequencies of training corpus incl. text to be tested
appended.corpus = training
appended.corpus[[names(test[n])]] = test[[n]] #add 1 item of test
appended.word.list = make.frequency.list(appended.corpus)
freq.table = make.table.of.frequencies(appended.corpus, appended.word.list)
#Split table of frequencies in two
text.to.be.tested = freq.table[length(appended.corpus),] #the last row
remaining.texts = freq.table[-length(appended.corpus),] #all other rows
#Fill the results' table by rows: the text to be tested receives a probability
#for each author candidate
imposters.results[n,] =
imposters(
reference.set = remaining.texts,
test = text.to.be.tested,
iterations = 50,
distance = good.params[i, "dist"],
features = good.params[i, "feat"],
imposters = good.params[i, "imp"]
)
#Set the insignificant values (inside the range p1-p2) to NA
insignificant = data.table::inrange(x = imposters.results[n,],
lower = good.params[i, "p1_avg"],
upper = good.params[i, "p2_avg"])
imposters.results[n, insignificant] = NA
#Make a copy of the above in the final results' array
imposters.final.results[n, , i] = imposters.results[n,]
}
#Write the resulting table into the file, first adding some metainformation
write.table(x = good.params[i, c(1:6, 19:20)],
file = filename03,
dec = ".",
sep = ";",
col.names = T,
row.names = F,
append = T)
write.table(x = imposters.results,
file = filename03,
dec = ".",
sep = ";",
append = T,
col.names = NA,
row.names = T)
#Adding two blank lines
write.table(x = matrix(data = c(" ", " "), nrow = 2, ncol = 1),
file = filename03,
dec = ".",
sep = ";",
col.names = F,
row.names = F,
append = T)
}
#While the CSV saved in the for-loops is human-readable, the machine-readable
#version of it will be saved now for further processing with
#04 STATISTICAL ANALYSIS OF THE IMPOSTERS' METHOD'S RESULTS
save(imposters.final.results,
file = "03_Output/Data/imposters.final.results.RData")
|
/02_Script/03_Analysis_with_Imposters_Method.R
|
no_license
|
jnussbaum/authorship-attribution
|
R
| false
| false
| 6,553
|
r
|
# 03 ANALYSIS WITH THE IMPOSTERS' METHOD
# In order to run this RScript, you need the following:
# - some results of "02_Hyperparameter and Feature Tuning Imposters' Method.R"
# saved on your computer
# - the results of "01_Preprocessing.R" in your global environment. (You may
# also load the results of that script from your computer:
if (file.exists("03_Output/Data/Results_of_01_Preprocessing.RData")) {
load(file = "03_Output/Data/Results_of_01_Preprocessing.RData")
}
#Check dependencies
if(require("stylo") == FALSE) {
install.packages("stylo")
library("stylo")
}
if(require("data.table") == FALSE) {
install.packages("data.table")
library("data.table")
}
if(require("stringi") == FALSE) {
install.packages("stringi")
library("stringi")
}
source("02_Script/Functions/01_check_p1_p2_constraints.R")
#Before running the imposters' method, we have to find the good results from the
#imposters.optimize() function, saved in different files.
#Prepare the iteration through all csv files with results of imposters.optimize
list.of.files = list.files(path = "03_Output/Data/Results_of_imposters.optimize")
good.params = data.frame()
#Iterate through all csv files
for (filename03 in list.of.files) {
#Read each file from line 4 on
file03 = read.table(
file = paste("03_Output/Data/Results_of_imposters.optimize/", filename03, sep = ""),
dec = ".",
sep = ";",
skip = 3,
header = TRUE
)
#Add the file info to the data frame
fileinfos = unlist(stri_split_regex(str = filename03, pattern = "-"))
file03 = cbind(
base = rep(x = fileinfos[3], times = nrow(file03)),
level = rep(x = fileinfos[4], times = nrow(file03)),
n = rep(x = fileinfos[5], times = nrow(file03)),
file03
)
#Create an index of the suitable rows
found.lines.index = vector(mode = "logical", length = 0)
for (i in 1:nrow(file03)) {
#Add TRUE or FALSE to found.lines, depending on the result of the
#constraints-check for the line in question
line.result = check_p1_p2_constraints(file03[i,])
found.lines.index = append(found.lines.index, line.result)
}
#Add the to good.params
good.params = rbind(good.params, file03[found.lines.index,])
}
#Name the rows by their number
rownames(good.params) = 1:nrow(good.params)
#Set filename where to save the results of the actual analysis
filename03 = paste("Results_of_Imposters_Method_",
format(Sys.time(), format = "%Y-%m-%d_%H-%M-%S"),
".csv",
sep = "")
#Now that the good parameters are found, the actual analysis can start. To do so,
#iterate through the rows of good.params, and for each row, execute the imposters'
#method with this row's parameters. Save the results in one file.
#Prepare a 3-dimensional array for the final results
imposters.final.results =
array(dim = c(length(test.names),
length(candidates),
nrow(good.params)
),
dimnames = list(test.names,
candidates,
param.config = NULL
))
for (i in 1:nrow(good.params)) {
#First, load the corpora with the specifications of the current row in good.params
test =
parse.corpus(
input.data = eval(parse(
text = paste("test.corpus.",
good.params[i, "base"],
sep = "")
)),
splitting.rule = " ",
features = good.params[i, "level"],
ngram.size = as.numeric(good.params[i, "n"])
)
training =
parse.corpus(
input.data = eval(parse(
text = paste("training.corpus.",
good.params[i, "base"],
sep = "")
)),
splitting.rule = " ",
features = good.params[i, "level"],
ngram.size = as.numeric(good.params[i, "n"])
)
#Prepare an empty table for the results
imposters.results =
matrix(nrow = length(test.names),
ncol = length(candidates),
dimnames = list(test.names, candidates))
#Iterate through all test texts, in order to fill the table "imposters.results"
for (n in 1:length(test.names)) {
#Build table of frequencies of training corpus incl. text to be tested
appended.corpus = training
appended.corpus[[names(test[n])]] = test[[n]] #add 1 item of test
appended.word.list = make.frequency.list(appended.corpus)
freq.table = make.table.of.frequencies(appended.corpus, appended.word.list)
#Split table of frequencies in two
text.to.be.tested = freq.table[length(appended.corpus),] #the last row
remaining.texts = freq.table[-length(appended.corpus),] #all other rows
#Fill the results' table by rows: the text to be tested receives a probability
#for each author candidate
imposters.results[n,] =
imposters(
reference.set = remaining.texts,
test = text.to.be.tested,
iterations = 50,
distance = good.params[i, "dist"],
features = good.params[i, "feat"],
imposters = good.params[i, "imp"]
)
#Set the insignificant values (inside the range p1-p2) to NA
insignificant = data.table::inrange(x = imposters.results[n,],
lower = good.params[i, "p1_avg"],
upper = good.params[i, "p2_avg"])
imposters.results[n, insignificant] = NA
#Make a copy of the above in the final results' array
imposters.final.results[n, , i] = imposters.results[n,]
}
#Write the resulting table into the file, first adding some metainformation
write.table(x = good.params[i, c(1:6, 19:20)],
file = filename03,
dec = ".",
sep = ";",
col.names = T,
row.names = F,
append = T)
write.table(x = imposters.results,
file = filename03,
dec = ".",
sep = ";",
append = T,
col.names = NA,
row.names = T)
#Adding two blank lines
write.table(x = matrix(data = c(" ", " "), nrow = 2, ncol = 1),
file = filename03,
dec = ".",
sep = ";",
col.names = F,
row.names = F,
append = T)
}
#While the CSV saved in the for-loops is human-readable, the machine-readable
#version of it will be saved now for further processing with
#04 STATISTICAL ANALYSIS OF THE IMPOSTERS' METHOD'S RESULTS
save(imposters.final.results,
file = "03_Output/Data/imposters.final.results.RData")
|
tabPanel("About",
HTML(
'<p style="text-align:justify">This R Shiny web application presents climate outlooks for various Alaska and western Canada communities.
There are several versions of the Community Charts application. See the <a href="http://leonawicz.github.io/CommCharts4/">CommCharts4</a> documentation for more information.
The various versions depend on different versions of the <code>shinyBS</code> package, but I only have the latest version of this package on the server which is running all versions of this app.
This is why the version you are currently looking at is at worst broken or at best has a non-ideal bootstrap button display.</p>'),
HTML('
<div style="clear: left;"><img src="http://www.gravatar.com/avatar/52c27b8719a7543b4b343775183122ea.png" alt="" style="float: left; margin-right:5px" /></div>
<p>Matthew Leonawicz<br/>
Statistician | useR<br/>
<a href="http://leonawicz.github.io" target="_blank">Github.io</a> |
<a href="http://blog.snap.uaf.edu" target="_blank">Blog</a> |
<a href="https://twitter.com/leonawicz" target="_blank">Twitter</a> |
<a href="http://www.linkedin.com/in/leonawicz" target="_blank">Linkedin</a> <br/>
<a href="http://www.snap.uaf.edu/", target="_blank">Scenarios Network for Alaska and Arctic Planning</a>
</p>'),
fluidRow(
column(4,
HTML('<strong>References</strong>
<p><ul>
<li><a href="http://www.r-project.org/" target="_blank">Coded in R</a></li>
<li><a href="http://www.rstudio.com/shiny/" target="_blank">Built with the Shiny package</a></li>
<li>Additional supporting R packages</li>
<ul>
<li><a href="http://rstudio.github.io/shinythemes/" target="_blank">shinythemes</a></li>
<li><a href="https://github.com/ebailey78/shinyBS" target="_blank">shinyBS</a></li>
<li><a href="http://rcharts.io/" target="_blank">rCharts</a></li>
<li><a href="http://plyr.had.co.nz/" target="_blank">plyr</a></li>
</ul>
<li>Source code on <a href="https://github.com/ua-snap/shiny-apps/tree/master/cc4lite2/" target="_blank">GitHub</a></li>
</ul>')
)
),
value="about"
)
|
/cc4lite2/about.R
|
no_license
|
jiansfoggy/shiny-apps
|
R
| false
| false
| 2,099
|
r
|
tabPanel("About",
HTML(
'<p style="text-align:justify">This R Shiny web application presents climate outlooks for various Alaska and western Canada communities.
There are several versions of the Community Charts application. See the <a href="http://leonawicz.github.io/CommCharts4/">CommCharts4</a> documentation for more information.
The various versions depend on different versions of the <code>shinyBS</code> package, but I only have the latest version of this package on the server which is running all versions of this app.
This is why the version you are currently looking at is at worst broken or at best has a non-ideal bootstrap button display.</p>'),
HTML('
<div style="clear: left;"><img src="http://www.gravatar.com/avatar/52c27b8719a7543b4b343775183122ea.png" alt="" style="float: left; margin-right:5px" /></div>
<p>Matthew Leonawicz<br/>
Statistician | useR<br/>
<a href="http://leonawicz.github.io" target="_blank">Github.io</a> |
<a href="http://blog.snap.uaf.edu" target="_blank">Blog</a> |
<a href="https://twitter.com/leonawicz" target="_blank">Twitter</a> |
<a href="http://www.linkedin.com/in/leonawicz" target="_blank">Linkedin</a> <br/>
<a href="http://www.snap.uaf.edu/", target="_blank">Scenarios Network for Alaska and Arctic Planning</a>
</p>'),
fluidRow(
column(4,
HTML('<strong>References</strong>
<p><ul>
<li><a href="http://www.r-project.org/" target="_blank">Coded in R</a></li>
<li><a href="http://www.rstudio.com/shiny/" target="_blank">Built with the Shiny package</a></li>
<li>Additional supporting R packages</li>
<ul>
<li><a href="http://rstudio.github.io/shinythemes/" target="_blank">shinythemes</a></li>
<li><a href="https://github.com/ebailey78/shinyBS" target="_blank">shinyBS</a></li>
<li><a href="http://rcharts.io/" target="_blank">rCharts</a></li>
<li><a href="http://plyr.had.co.nz/" target="_blank">plyr</a></li>
</ul>
<li>Source code on <a href="https://github.com/ua-snap/shiny-apps/tree/master/cc4lite2/" target="_blank">GitHub</a></li>
</ul>')
)
),
value="about"
)
|
# the center coordinates of blocks
block_x <- cumsum(c(0, road_y$importance)) +
cumsum(c(block_width / 2, rep(block_width, times = road_dim)))
block_y <- cumsum(c(0, road_x$importance)) +
cumsum(c(block_width / 2, rep(block_width, times = road_dim)))
road_network <- expand.grid(
x = block_x,
y = block_y
)
# plot
plot1 <- ggplot(road_network) +
# the road blocks
geom_tile(
aes(x = x, y = y),
width = block_width,
height = block_width,
fill = "white",
colour = "black",
size = 0.5,
linejoin = "mitre"
) +
# remove the outline of the outmost rectangle
geom_rect(
data = tibble(
xmin = 0,
xmax = last(block_x) + block_width / 2,
ymin = 0,
ymax = last(block_y) + block_width / 2
),
aes(xmin = xmin, xmax = xmax, ymin = ymin, ymax = ymax),
fill = "white",
colour = "white",
alpha = 0,
size = 1,
linejoin = "mitre"
) +
# the key points
geom_point(
data = tibble(
x = c(block_width, last(block_x) - block_width / 2),
y = c(block_width, last(block_y) - block_width / 2),
),
aes(x = x, y = y),
color = c("blue", "blue"),
size = 1.5
) +
# labels for the key points
geom_text(
data = tibble(
x = c(first(block_x) + block_width / 4, last(block_x) - block_width / 4),
y = c(first(block_y) + block_width / 4, last(block_y) - block_width / 4),
label = c("A", "B")
),
aes(x = x, y = y, label = label)
) +
coord_fixed() +
scale_y_reverse() +
theme_minimal() +
theme(
text = element_blank(),
axis.title.x = element_blank(),
axis.title.y = element_blank(),
panel.grid = element_blank()
)
plot2 <- plot1 +
# the key points cont'd
geom_point(
data = tibble(
x = c(nth(block_x, road_dim) + block_width / 2, last(block_x) - block_width / 2),
y = c(last(block_y) - block_width / 2, nth(block_y, road_dim) + block_width / 2),
),
aes(x = x, y = y),
color = c("red", "red"),
size = 1.5
) +
# labels for the key points
geom_text(
data = tibble(
x = c(nth(block_x, road_dim) + block_width / 4, last(block_x) - block_width / 4),
y = c(last(block_y) - block_width / 4, nth(block_y, road_dim) + block_width / 4),
label = c("C", "D")
),
aes(x = x, y = y, label = label)
)
#==== output ====
save_svg(plot = plot1, file_name = "output/to_cross_or_not_to_cross/plot1.svg", width = 3.5, height = 3.5)
save_svg(plot = plot2, file_name = "output/to_cross_or_not_to_cross/plot2.svg", width = 3.5, height = 3.5)
# play sound when finished
beep(sound = 2)
|
/main/to_wait_or_not_to_wait/plot_crossroads.R
|
no_license
|
shawenyao/R
|
R
| false
| false
| 2,634
|
r
|
# the center coordinates of blocks
block_x <- cumsum(c(0, road_y$importance)) +
cumsum(c(block_width / 2, rep(block_width, times = road_dim)))
block_y <- cumsum(c(0, road_x$importance)) +
cumsum(c(block_width / 2, rep(block_width, times = road_dim)))
road_network <- expand.grid(
x = block_x,
y = block_y
)
# plot
plot1 <- ggplot(road_network) +
# the road blocks
geom_tile(
aes(x = x, y = y),
width = block_width,
height = block_width,
fill = "white",
colour = "black",
size = 0.5,
linejoin = "mitre"
) +
# remove the outline of the outmost rectangle
geom_rect(
data = tibble(
xmin = 0,
xmax = last(block_x) + block_width / 2,
ymin = 0,
ymax = last(block_y) + block_width / 2
),
aes(xmin = xmin, xmax = xmax, ymin = ymin, ymax = ymax),
fill = "white",
colour = "white",
alpha = 0,
size = 1,
linejoin = "mitre"
) +
# the key points
geom_point(
data = tibble(
x = c(block_width, last(block_x) - block_width / 2),
y = c(block_width, last(block_y) - block_width / 2),
),
aes(x = x, y = y),
color = c("blue", "blue"),
size = 1.5
) +
# labels for the key points
geom_text(
data = tibble(
x = c(first(block_x) + block_width / 4, last(block_x) - block_width / 4),
y = c(first(block_y) + block_width / 4, last(block_y) - block_width / 4),
label = c("A", "B")
),
aes(x = x, y = y, label = label)
) +
coord_fixed() +
scale_y_reverse() +
theme_minimal() +
theme(
text = element_blank(),
axis.title.x = element_blank(),
axis.title.y = element_blank(),
panel.grid = element_blank()
)
plot2 <- plot1 +
# the key points cont'd
geom_point(
data = tibble(
x = c(nth(block_x, road_dim) + block_width / 2, last(block_x) - block_width / 2),
y = c(last(block_y) - block_width / 2, nth(block_y, road_dim) + block_width / 2),
),
aes(x = x, y = y),
color = c("red", "red"),
size = 1.5
) +
# labels for the key points
geom_text(
data = tibble(
x = c(nth(block_x, road_dim) + block_width / 4, last(block_x) - block_width / 4),
y = c(last(block_y) - block_width / 4, nth(block_y, road_dim) + block_width / 4),
label = c("C", "D")
),
aes(x = x, y = y, label = label)
)
#==== output ====
save_svg(plot = plot1, file_name = "output/to_cross_or_not_to_cross/plot1.svg", width = 3.5, height = 3.5)
save_svg(plot = plot2, file_name = "output/to_cross_or_not_to_cross/plot2.svg", width = 3.5, height = 3.5)
# play sound when finished
beep(sound = 2)
|
setwd("C:/Users/chliu/Documents/R Workplace/yelp_dataset_challenge_academic_dataset/yelp_dataset_challenge_academic_dataset/")
library(rjson)
review_file <- "yelp_academic_dataset_review.json"
#review_data <- fromJSON(file=review_file, method = "C", unexpected.escape = "error" )
#review_df <- do.call("rbind", review_data)
#review_data[1:10]
#length(review_data)
lines <- readLines(review_file)
lines_10 <- lines[1:10]
lines <- lines[1:10000]
review_data <- lapply(X=lines, fromJSON)
length(lines)
lines[100]
review_5_stars <- 0
for (i in 1:length(lines)){
if (review_data[[i]]$stars == 5){
review_5_stars <- review_5_stars + 1
}
}
review_5_stars/length(lines)
text <- ""
for (i in 1:100){
flag1 <- review_data[[i]]$business_id %in% res_id
flag2 <- review_data[[i]]$user_id %in% female_id
if (flag1 && flag2){
text <- cat(text, review_data[[i]]$review)
}
}
text <- ""
sum <- 0
review_num<- list()
for (i in 1:length(lines)){
flag1 <- review_data[[i]]$business_id %in% res_id
flag2 <- review_data[[i]]$user_id %in% female_id
if (flag1 && flag2){
sum <- sum+1
review_num[sum] <- i
}
}
for (i in 1:sum){
write(x=review_data[[review_num[[i]]]]$text,file="reviews",ncolumns = if(is.character(x)) 1 else 5,
append = TRUE, sep = " ")
}
review_text <- paste(readLines("reviews"),collapse = " ")
library(tm)
review_source <- VectorSource(review_text)
corpus <- Corpus(review_source)
corpus <- tm_map(corpus, content_transformer(tolower))
corpus <- tm_map(corpus, removePunctuation)
corpus <- tm_map(corpus, stripWhitespace)
corpus <- tm_map(corpus, removeWords, stopwords("english"))
dtm <- DocumentTermMatrix(corpus)
dtm2 <- as.matrix(dtm)
frequency <- colSums(dtm2)
frequency <- sort(frequency, decreasing=TRUE)
head(frequency)
library(wordcloud)
words <- names(frequency)
wordcloud(words[1:50], frequency[1:50])
#df <- data.frame(matrix(unlist(review_data), nrow=length(lines_10), byrow=T))
#aggregate(df,by=list(x1),FUN=sum, na.rm=TRUE)
business_no <- rep(1,7985)
business_file <- "yelp_academic_dataset_business.json"
b_lines <- readLines(business_file)
length(b_lines)
business_data <- lapply(X=b_lines, fromJSON)
sum <- 0
key <- 1
az_res_id <- list()
az_res_star <- list()
az_res_price <- list()
for (i in 1:length(b_lines)){
if (business_data[[i]]$state == "AZ" &&
sum(grepl("Restaurants",business_data[[i]]$categories))>0){
sum <- sum+1
business_no[key] <- i
az_res_id[[key]]<- business_data[i]$business_id
az_res_star[[key]] <- business_data[[i]]$stars
az_res_price[[key]] <- business_data[[i]]$attributes$'Price Range'
key <- key+1
}
}
hist(unlist(az_res_star))
hist(unlist(az_res_price))
#There are 7985 restaurants in AZ
sum <- 0
text <- ""
res_id <- list()
for (i in 1:length(b_lines)){
if (sum(grepl("Restaurants",business_data[[i]]$categories))>0){
sum <- sum+1
res_id[sum] <- business_data[[i]]$business_id
}
}# 21892 restaurants all together
# Now get all the potential female reviews from restaurants
key <- 1
res_all_text <- list()
female_id <- list()
words <- c('salad','vegetable','chia seeds','juice','frozen yogurt','cupcake','healthy','lettuce','grilled
cheese sandwich','latte','edamane','avocado roll','plantain','chinese','japanese','thai','vietnames','korean','mexican','middle eastern','indian',
'atmosphere','friendly','table','female','vegetarian','pinkberry','daughter','son','clean','sushi','curly','style','highlight','cookies')
for(i in 1:length(lines))
{
sum <- 0
for (j in 1: length(words)){
sum <- sum + (grepl(words[j],review_data[[1]]$text))
}
if(sum >=1){
female_id[key] <- review_data[[i]]$user_id
key <- key+1
}
}
female_id
# if(review_data[[i]]$business_id %in% res_id){
#business_data[[6]]$attributes$`Wi-Fi`
sum_wifi <- 0
free_wifi <- 0
for (i in 1:length(b_lines)){
if (!is.null(business_data[[i]]$attributes$`Wi-Fi`)){
sum_wifi <- sum_wifi + 1
if(business_data[[i]]$attributes$`Wi-Fi` == "free"){
free_wifi <- free_wifi +1
}
}
}
sum_b <-0
for (i in 1:length(b_lines)){
if (business_data[[i]]$state == "CA"){
sum_b <- sum_b+1
}
}
free_wifi/sum_wifi
tip_file <- "yelp_academic_dataset_tip.json"
tip_lines <- readLines(tip_file)
length(tip_lines)
user_file <- "yelp_academic_dataset_user.json"
lines <- readLines(user_file)
user_data <- lapply(X=lines, fromJSON)
#366715 users on file
# names <- paste(readLines("yob2014.txt"), collapse=" ")
female_id <- list()
key <- 1
female_names <- read.table("yob2014_f.txt",sep=",",header = FALSE)
male_names <- read.table("yob2014_m.txt",sep=",",header = FALSE)
for (i in 1:length(lines)){
flag_female <- user_data[[i]]$name %in% female_names[,1]
flag_female_count <- female_names[which(user_data[[i]]$name==female_names[,1]),3]
flag_male <- user_data[[i]]$name %in% male_names[,1]
flag_male_count <- male_names[which(user_data[[i]]$name ==male_names[,1]),3]
flag = (flag_female && !flag_male ) || (flag_female_count > flag_male_count)
if (!is.na(flag) && flag == TRUE){
female_id[key] <- user_data[[i]]$user_id
key <- key+1
}
}
for (i in 1:length(lines)){
#print(i)
if (!is.null(user_data[[i]]$compliments$funny) && user_data[[i]]$compliments$funny > 10000){
print(user_data[[i]]$name)
}
}
# dt1 <- rbind(list(user_data_10[[1]]$compliments$funny,user_data_10[[1]]$fans))
# for (i in 2:length(lines)) {
# #print(i)
# if (!is.null(user_data[[i]]$compliments$funny) &&
# !is.null(user_data[[i]]$fans) &&
# user_data[[i]]$compliments$funny > 1 &&
# user_data[[i]]$fans > 1){
# dt1 <- rbind(dt1, list(user_data[[i]]$compliments$funny,user_data[[i]]$fans))
# }
# }
sum <- 0
for (i in 1:length(lines)) {
#print(i)
if ((is.null(user_data[[i]]$fans) || user_data[[i]]$fans <= 1) &&
(is.null(user_data[[i]]$compliments$funny) ||
user_data[[i]]$compliments$funny <= 1 )){
sum <- sum+1
}
}
|
/quiz1.R
|
no_license
|
eeliuchang/yelp-prediction
|
R
| false
| false
| 6,147
|
r
|
setwd("C:/Users/chliu/Documents/R Workplace/yelp_dataset_challenge_academic_dataset/yelp_dataset_challenge_academic_dataset/")
library(rjson)
review_file <- "yelp_academic_dataset_review.json"
#review_data <- fromJSON(file=review_file, method = "C", unexpected.escape = "error" )
#review_df <- do.call("rbind", review_data)
#review_data[1:10]
#length(review_data)
lines <- readLines(review_file)
lines_10 <- lines[1:10]
lines <- lines[1:10000]
review_data <- lapply(X=lines, fromJSON)
length(lines)
lines[100]
review_5_stars <- 0
for (i in 1:length(lines)){
if (review_data[[i]]$stars == 5){
review_5_stars <- review_5_stars + 1
}
}
review_5_stars/length(lines)
text <- ""
for (i in 1:100){
flag1 <- review_data[[i]]$business_id %in% res_id
flag2 <- review_data[[i]]$user_id %in% female_id
if (flag1 && flag2){
text <- cat(text, review_data[[i]]$review)
}
}
text <- ""
sum <- 0
review_num<- list()
for (i in 1:length(lines)){
flag1 <- review_data[[i]]$business_id %in% res_id
flag2 <- review_data[[i]]$user_id %in% female_id
if (flag1 && flag2){
sum <- sum+1
review_num[sum] <- i
}
}
for (i in 1:sum){
write(x=review_data[[review_num[[i]]]]$text,file="reviews",ncolumns = if(is.character(x)) 1 else 5,
append = TRUE, sep = " ")
}
review_text <- paste(readLines("reviews"),collapse = " ")
library(tm)
review_source <- VectorSource(review_text)
corpus <- Corpus(review_source)
corpus <- tm_map(corpus, content_transformer(tolower))
corpus <- tm_map(corpus, removePunctuation)
corpus <- tm_map(corpus, stripWhitespace)
corpus <- tm_map(corpus, removeWords, stopwords("english"))
dtm <- DocumentTermMatrix(corpus)
dtm2 <- as.matrix(dtm)
frequency <- colSums(dtm2)
frequency <- sort(frequency, decreasing=TRUE)
head(frequency)
library(wordcloud)
words <- names(frequency)
wordcloud(words[1:50], frequency[1:50])
#df <- data.frame(matrix(unlist(review_data), nrow=length(lines_10), byrow=T))
#aggregate(df,by=list(x1),FUN=sum, na.rm=TRUE)
business_no <- rep(1,7985)
business_file <- "yelp_academic_dataset_business.json"
b_lines <- readLines(business_file)
length(b_lines)
business_data <- lapply(X=b_lines, fromJSON)
sum <- 0
key <- 1
az_res_id <- list()
az_res_star <- list()
az_res_price <- list()
for (i in 1:length(b_lines)){
if (business_data[[i]]$state == "AZ" &&
sum(grepl("Restaurants",business_data[[i]]$categories))>0){
sum <- sum+1
business_no[key] <- i
az_res_id[[key]]<- business_data[i]$business_id
az_res_star[[key]] <- business_data[[i]]$stars
az_res_price[[key]] <- business_data[[i]]$attributes$'Price Range'
key <- key+1
}
}
hist(unlist(az_res_star))
hist(unlist(az_res_price))
#There are 7985 restaurants in AZ
sum <- 0
text <- ""
res_id <- list()
for (i in 1:length(b_lines)){
if (sum(grepl("Restaurants",business_data[[i]]$categories))>0){
sum <- sum+1
res_id[sum] <- business_data[[i]]$business_id
}
}# 21892 restaurants all together
# Now get all the potential female reviews from restaurants
key <- 1
res_all_text <- list()
female_id <- list()
words <- c('salad','vegetable','chia seeds','juice','frozen yogurt','cupcake','healthy','lettuce','grilled
cheese sandwich','latte','edamane','avocado roll','plantain','chinese','japanese','thai','vietnames','korean','mexican','middle eastern','indian',
'atmosphere','friendly','table','female','vegetarian','pinkberry','daughter','son','clean','sushi','curly','style','highlight','cookies')
for(i in 1:length(lines))
{
sum <- 0
for (j in 1: length(words)){
sum <- sum + (grepl(words[j],review_data[[1]]$text))
}
if(sum >=1){
female_id[key] <- review_data[[i]]$user_id
key <- key+1
}
}
female_id
# if(review_data[[i]]$business_id %in% res_id){
#business_data[[6]]$attributes$`Wi-Fi`
sum_wifi <- 0
free_wifi <- 0
for (i in 1:length(b_lines)){
if (!is.null(business_data[[i]]$attributes$`Wi-Fi`)){
sum_wifi <- sum_wifi + 1
if(business_data[[i]]$attributes$`Wi-Fi` == "free"){
free_wifi <- free_wifi +1
}
}
}
sum_b <-0
for (i in 1:length(b_lines)){
if (business_data[[i]]$state == "CA"){
sum_b <- sum_b+1
}
}
free_wifi/sum_wifi
tip_file <- "yelp_academic_dataset_tip.json"
tip_lines <- readLines(tip_file)
length(tip_lines)
user_file <- "yelp_academic_dataset_user.json"
lines <- readLines(user_file)
user_data <- lapply(X=lines, fromJSON)
#366715 users on file
# names <- paste(readLines("yob2014.txt"), collapse=" ")
female_id <- list()
key <- 1
female_names <- read.table("yob2014_f.txt",sep=",",header = FALSE)
male_names <- read.table("yob2014_m.txt",sep=",",header = FALSE)
for (i in 1:length(lines)){
flag_female <- user_data[[i]]$name %in% female_names[,1]
flag_female_count <- female_names[which(user_data[[i]]$name==female_names[,1]),3]
flag_male <- user_data[[i]]$name %in% male_names[,1]
flag_male_count <- male_names[which(user_data[[i]]$name ==male_names[,1]),3]
flag = (flag_female && !flag_male ) || (flag_female_count > flag_male_count)
if (!is.na(flag) && flag == TRUE){
female_id[key] <- user_data[[i]]$user_id
key <- key+1
}
}
for (i in 1:length(lines)){
#print(i)
if (!is.null(user_data[[i]]$compliments$funny) && user_data[[i]]$compliments$funny > 10000){
print(user_data[[i]]$name)
}
}
# dt1 <- rbind(list(user_data_10[[1]]$compliments$funny,user_data_10[[1]]$fans))
# for (i in 2:length(lines)) {
# #print(i)
# if (!is.null(user_data[[i]]$compliments$funny) &&
# !is.null(user_data[[i]]$fans) &&
# user_data[[i]]$compliments$funny > 1 &&
# user_data[[i]]$fans > 1){
# dt1 <- rbind(dt1, list(user_data[[i]]$compliments$funny,user_data[[i]]$fans))
# }
# }
sum <- 0
for (i in 1:length(lines)) {
#print(i)
if ((is.null(user_data[[i]]$fans) || user_data[[i]]$fans <= 1) &&
(is.null(user_data[[i]]$compliments$funny) ||
user_data[[i]]$compliments$funny <= 1 )){
sum <- sum+1
}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/all_passed.R
\name{all_passed}
\alias{all_passed}
\title{Did all of the validations fully \emph{pass}?}
\usage{
all_passed(agent)
}
\arguments{
\item{agent}{An agent object of class \code{ptblank_agent}.}
}
\value{
A logical value.
}
\description{
Given an agent's validation plan that had undergone interrogation via
\code{interrogate()}, did every single validation step result in zero \emph{fail}
levels? Using the \code{all_passed()} function will let us know whether that's
\code{TRUE} or not.
}
\section{Function ID}{
8-4
}
\examples{
# Create a simple table with
# a column of numerical values
tbl <-
dplyr::tibble(a = c(5, 7, 8, 5))
# Validate that values in column
# `a` are always greater than 4
agent <-
create_agent(tbl = tbl) \%>\%
col_vals_gt(vars(a), 4) \%>\%
interrogate()
# Determine if these column
# validations have all passed
# by using `all_passed()`
all_passed(agent)
}
\seealso{
Other Post-interrogation:
\code{\link{get_agent_x_list}()},
\code{\link{get_data_extracts}()},
\code{\link{get_sundered_data}()},
\code{\link{write_testthat_file}()}
}
\concept{Post-interrogation}
|
/man/all_passed.Rd
|
permissive
|
dwtcourses/pointblank
|
R
| false
| true
| 1,193
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/all_passed.R
\name{all_passed}
\alias{all_passed}
\title{Did all of the validations fully \emph{pass}?}
\usage{
all_passed(agent)
}
\arguments{
\item{agent}{An agent object of class \code{ptblank_agent}.}
}
\value{
A logical value.
}
\description{
Given an agent's validation plan that had undergone interrogation via
\code{interrogate()}, did every single validation step result in zero \emph{fail}
levels? Using the \code{all_passed()} function will let us know whether that's
\code{TRUE} or not.
}
\section{Function ID}{
8-4
}
\examples{
# Create a simple table with
# a column of numerical values
tbl <-
dplyr::tibble(a = c(5, 7, 8, 5))
# Validate that values in column
# `a` are always greater than 4
agent <-
create_agent(tbl = tbl) \%>\%
col_vals_gt(vars(a), 4) \%>\%
interrogate()
# Determine if these column
# validations have all passed
# by using `all_passed()`
all_passed(agent)
}
\seealso{
Other Post-interrogation:
\code{\link{get_agent_x_list}()},
\code{\link{get_data_extracts}()},
\code{\link{get_sundered_data}()},
\code{\link{write_testthat_file}()}
}
\concept{Post-interrogation}
|
library(shiny)
shinyServer(function(input, output) {
})
|
/DNA_tool/server.R
|
permissive
|
w4356y/shiny_ui_template
|
R
| false
| false
| 61
|
r
|
library(shiny)
shinyServer(function(input, output) {
})
|
#### This scripts stores the old function and code in generatePlots.R
# most of these I suspect are no longer relevant. However, just in case....
# Import RCC and NB datasets
rcc.srat = readRDS('/lustre/scratch117/casm/team274/mt22/CN_methods/RCC_PCT_ann3sub.rds')
nb.srat = readRDS('/lustre/scratch117/casm/team274/mt22/CN_methods/NB_ann.rds')
chromInfo = read.delim('/lustre/scratch117/casm/team274/mt22/chrom_abspos_kb.txt',sep = '\t')
# Figure 2C - Example of CopyKat output for PTC and tumour RCC cells ####
sampleList = 'PD37228'
rccCK = function(sampleList='all'){
rcc.srat = readRDS('/lustre/scratch117/casm/team274/mt22/CN_methods/RCC_PCT_ann3sub.rds')
rcc.srat$finalAnn = rcc.srat$finalAnn2
# Import chromInfo
chromInfo = read.delim('/lustre/scratch117/casm/team274/mt22/chrom_abspos_kb.txt',sep = '\t')
# Import Manifest
projMani = read_excel("/lustre/scratch117/casm/team274/mt22/projectManifest.xlsx",sheet = "RCC_mani")
# CopyKat results
copykat.results = readRDS('/lustre/scratch117/casm/team274/mt22/CN_methods/CopyKAT_output/v5_rcc.CK.normREF.default.2397/v5_CKresults_default_normREF_80perc_2397.rds')
if(sampleList != 'all'){
rcc.srat = subset(rcc.srat,subset = PDID %in% sampleList)
}
# Extract data for PD36793 only (as examples)
#----- Processing CopyKat results -------#
for(i in 1:length(copykat.results)){
# Get copyKat CNA matrix and prediction
CNA_summary_byCellType = data.frame()
CNA_mat = copykat.results[[i]]$CNAmat
colnames(CNA_mat) = gsub('^X','',colnames(CNA_mat))
colnames(CNA_mat) = gsub('\\.','-',colnames(CNA_mat))
pred = as.data.frame(copykat.results[[i]]$prediction)
sample = unique(rcc.srat@meta.data[rownames(rcc.srat@meta.data) %in% rownames(pred),]$PDID)
PDID=sample
if(length(sample) > 1){
message(paste0('More than 1 sample detected: i=',i,', samples are ',sample))
}else if(length(sample) <1){
next
}else if(length(sample)==1){
message(paste0('Checking sample ',sample))
}
# subset annrcc.srat object to keep only cells of that sample
srat = subset(rcc.srat, subset = PDID == sample)
# subset by annotated cell type
for(celltype in unique(srat$finalAnn)){
CNA_mat_sub = CNA_mat[,c(1:3,which(colnames(CNA_mat) %in% rownames(srat@meta.data[srat@meta.data$finalAnn == celltype,])))]
if(ncol(CNA_mat_sub) == 4){
chrom_tmp=data.frame(celltype = celltype,CNA_mat_sub)
colnames(chrom_tmp)[5] = 'mean_logCN'
}else if (ncol(CNA_mat_sub) > 4){
chrom_tmp = data.frame(celltype = celltype,CNA_mat_sub[,c(1:3)],mean_logCN = apply(CNA_mat_sub[,-c(1:3)],MARGIN = 1,FUN = mean))
}else if (ncol(CNA_mat_sub) < 4){
chrom_tmp = data.frame(celltype = celltype,CNA_mat_sub[,c(1:3)],mean_logCN = NA)
}
CNA_summary_byCellType = rbind(CNA_summary_byCellType,chrom_tmp)
}
# Remove X chromosome
CNA_summary_byCellType = CNA_summary_byCellType[CNA_summary_byCellType$chrom != 23,]
CNA_summary_byCellType$celltype = factor(CNA_summary_byCellType$celltype,levels=c('Tumour','PTC','Leukocytes'))
if(sampleList != 'all'){
CNA_summary_byCellType = CNA_summary_byCellType[CNA_summary_byCellType$celltype != 'Leukocytes',]
CNA_summary_byCellType$celltype = factor(CNA_summary_byCellType$celltype,levels=c('Tumour','PTC'))
}
####------------------ Generate Battenberg CN summary file ----------------####
# Battenberg .summary.csv file - only summarize Major Clone CNV, does not included CN states of minor clones
donorMani = projMani[projMani$PDID == sample,]
btb.fp = unique(donorMani$battenbergFp[!grepl('^n_',donorMani$SampleID)])
#----- Processing Battenberg data -------#
dna.data = annotateBTB(btb.fp,subCl.minSegLen = 1e7,PDID,tgtChrs=c(1:22),removeBalancedSegs=F,longFormat = T,method = 'totalCN')
# Remove chromosome X
dna.data = dna.data[dna.data$Chr != 23,]
# Add equivalent lines for other cell types
dna.data$celltype = 'Tumour'
tmp = rbind(data.frame(Idx=0,Chr=1,matNum=1,patNum=1,frac=1,segLen=0,type='maj',tumTot=2,tot2min='2:1',tumFrac=0.5,newIdx=0,posID=0,posType='Start',pos=1,idx=0,abspos_kb=0,celltype='PTC'),
data.frame(Idx=0,Chr=1,matNum=1,patNum=1,frac=1,segLen=0,type='maj',tumTot=2,tot2min='2:1',tumFrac=0.5,newIdx=0,posID=0,posType='Stop',pos=2874771,idx=0,abspos_kb=2874771,celltype='PTC'),
data.frame(Idx=0,Chr=1,matNum=1,patNum=1,frac=1,segLen=0,type='maj',tumTot=2,tot2min='2:1',tumFrac=0.5,newIdx=0,posID=0,posType='Start',pos=1,idx=0,abspos_kb=0,celltype='Leukocytes'),
data.frame(Idx=0,Chr=1,matNum=1,patNum=1,frac=1,segLen=0,type='maj',tumTot=2,tot2min='2:1',tumFrac=0.5,newIdx=0,posID=0,posType='Stop',pos=2874771,idx=0,abspos_kb=2874771,celltype='Leukocytes'))
dna.data = rbind(dna.data,tmp)
dna.data$log_CNratio = log(as.numeric(dna.data$tumTot)/2)
# Separate subclone and major clone CN profile
subCl.dna = dna.data[dna.data$type == 'sub',]
majCl.dna = dna.data[dna.data$type == 'maj',]
#----- Plotting copykat results! -------#
chromInfo2 = chromInfo[chromInfo$chrom != 23,]
plotFun = function(noFrame=T,noPlot=FALSE){
noFrame=T
layout(mat=matrix(c(1:nlevels(CNA_summary_byCellType$celltype)),ncol=1),
heights = rep(2,nlevels(CNA_summary_byCellType$celltype)))
for(celltype in levels(CNA_summary_byCellType$celltype)){
if(sampleList == 'all'){
par(mar=c(0.2,0.9,0.2,0.4),xpd=TRUE)
ylim = c(round(min(CNA_summary_byCellType$mean_logCN)-0.1,digits = 1),round(max(CNA_summary_byCellType$mean_logCN)+0.2,digits = 1))
text.cex = 0.7
ybottom=min(round(min(CNA_summary_byCellType$mean_logCN)-0.1,digits = 1),round(log(0.5)/2,2))
if((round(max(dna.data$log_CNratio),2) - round(max(CNA_summary_byCellType$mean_logCN)+0.1,digits = 1))>0.2){
ytop=max(round(max(CNA_summary_byCellType$mean_logCN)+0.1,digits = 1),round(max(dna.data$log_CNratio)/2,2))
type=2
}else{
ytop=max(round(max(CNA_summary_byCellType$mean_logCN)+0.1,digits = 1),round(max(dna.data$log_CNratio),2))
type=1
}
ytext = ytop + 0.1
chrom.y = 0.85
}else{
par(mar=c(0.2,0.9,0.2,0.4),xpd=TRUE)
ylim = c(round(min(CNA_summary_byCellType$mean_logCN)-0.1,digits = 1),round(max(CNA_summary_byCellType$mean_logCN)+0.2,digits = 1))
text.cex = 0.7
ybottom=min(round(min(CNA_summary_byCellType$mean_logCN)-0.1,digits = 1),round(log(0.5)/2,2))
ytop=max(round(max(CNA_summary_byCellType$mean_logCN)+0.1,digits = 1),round(max(dna.data$log_CNratio),2))
ytext = ytop + 0.1
chrom.y = 0.7
}
tmp = CNA_summary_byCellType[CNA_summary_byCellType$celltype == celltype,]
tmp = tmp[order(tmp$abspos,decreasing = F),]
dna = majCl.dna[majCl.dna$celltype == celltype,]
ncells = nrow(rcc.srat@meta.data[rcc.srat@meta.data$PDID == PDID & rcc.srat@meta.data$finalAnn == celltype,])
# Plot main frame
plot(CNA_summary_byCellType$abspos, CNA_summary_byCellType$mean_logCN,
las=1,
type='n',
#xlim=c(-15.5,15),
ylim=ylim,
xlab=ifelse(noFrame,'','Genomic Position'),
ylab=ifelse(noFrame,'',''),
#main=ifelse(noFrame,'',''),
xaxt=ifelse(noFrame,'n','s'),
yaxt=ifelse(noFrame,'n','s'),
frame.plot=F)
#text(x=1e3,y=ytext,celltype,cex=text.cex[1],family = 'Helvetica',font=2,adj = 0)
#text(x=2.86e9,y=ytext,paste0('n=',ncells),cex=text.cex[2],family = 'Helvetica',font=1,adj = 1)
#axis(2,at=c(0),labels = c(0),las=1,pos = 0,tck = -.02,lwd = 0.3,cex.axis=0.65,hadj = -0.8,padj = 0.5)
if(sampleList=='all'){
#axis(2,at=c(0),labels = c(0),las=1,pos = 0,tck = -.02,lwd = 0.3,cex.axis=0.75,hadj = -0.8,padj = 0.5)
axis(2,at=c(ybottom+0.05,0,ytop-0.05),labels = c('Low',0,'High'),las=1,pos = 0,tck = -.00,lwd = 0.3,cex.axis=0.6,hadj = 0.3,padj = 0.5)
#if((type==1) & (round(log(0.5),2) < ybottom)){
# axis(4,las=1,pos = max(chromInfo2$abspos_kb*1000),tck = -.02,lwd = 0.4,cex.axis=0.6,hadj = 2.1,
# at=c(0,round(log(3/2),2),round(log(4/2),2)),col.axis = '#b02c46',
# labels = c(2,3,4))
#}else if((type==1) & (round(log(0.5),2) > ybottom)){
# axis(4,las=1,pos = max(chromInfo2$abspos_kb*1000),tck = -.02,lwd = 0.4,cex.axis=0.6,hadj = 2.1,
# at=c(round(log(0.5),2),0,round(log(3/2),2),round(log(4/2),2)),col.axis = '#b02c46',
# labels = c(1,2,3,4))
#}else if((type==2) & (round(log(0.5)/2,2) < ybottom)){
# axis(4,las=1,pos = max(chromInfo2$abspos_kb*1000),tck = -.02,lwd = 0.4,cex.axis=0.6,hadj = 2.1,
# at=c(0,round(log(3/2)/2,2),round(log(4/2)/2,2)),col.axis = '#b02c46',
# labels = c(2,3,4))
#}else if((type==2) & (round(log(0.5)/2,2) > ybottom)){
# axis(4,las=1,pos = max(chromInfo2$abspos_kb*1000),tck = -.02,lwd = 0.4,cex.axis=0.6,hadj = 2.1,
# at=c(round(log(0.5)/2,2),0,round(log(3/2)/2,2),round(log(4/2)/2,2)),col.axis = '#b02c46',
# labels = c(1,2,3,4))
axis4 = c(round(log(0.5)/2,2),0,round(log(3/2)/2,2),round(log(4/2)/2,2))
names(axis4) = c(1,2,3,4)
axis(4,las=1,pos = max(chromInfo2$abspos_kb*1000),tck = -.02,lwd = 0.4,cex.axis=0.6,hadj = 2.1,
at=axis4[axis4>ybottom & ybottom < ytop],col.axis = '#b02c46',
labels = names(axis4[axis4>ybottom & ybottom < ytop]))
}else{
axis(2,at=c(ybottom+0.05,0,ytop-0.05),labels = c('Low',0,'High'),las=1,pos = 0,tck = -.00,lwd = 0.3,cex.axis=0.6,hadj = 0.1,padj = 0.5)
axis(4,las=1,pos = max(chromInfo2$abspos_kb*1000),tck = -.02,lwd = 0.3,cex.axis=0.6,hadj = 1.9,col.axis = '#b02c46',
at=c(round(log(0.5)/2,2),0,round(log(3/2)/2,2)),
labels = c(1,2,3))
}
#Plot background chromosome
xleft = c(0,chromInfo2[chromInfo2$arm == 'q' & chromInfo2$chrom!=22,]$abspos*1000)
xright = c(chromInfo2[chromInfo2$arm == 'q',]$abspos*1000)
text(x=1.2e9,y=ytext,paste0(celltype,'_',PDID,' (n=',ncells,')'),cex=text.cex[1],family = 'Helvetica',font=1,adj = 0)
col = replicate(c('white','#dbdbdb'),n = 22/2)
rect(xleft=xleft,
xright=xright,
ybottom=ybottom,
ytop=ytop,
col = col,
lty = 'blank')
#Black surrounding border
rect(xleft=min(xleft),
xright=max(xright),
ybottom=ybottom,
ytop=ytop,
col = colAlpha('white',0.0001),
border = 'black',lwd = 0.5)
#textSize = (xright-xleft)/xright[1]
#text(x=(xleft+xright)/2,y = 0.9,labels = c(1:22),cex = c(1.5*textSize),font = 2)
#text(x=(xleft+xright)/2,y = chrom.y,labels = c(1:22),cex = c(rep(0.7,10),rep(0.62,4),rep(0.5,4),rep(0.31,4)),font = 1)
# Plot ground truth
#segments(x0=min(xleft),x1 = max(xright),
# y0=0, y1=0,
# col = 'black')
rect(xleft = dna[dna$posType=='Start',]$abspos_kb*1000,
xright = dna[dna$posType=='Stop',]$abspos_kb*1000,
ybottom = 0,
ytop=dna[dna$posType=='Start',]$log_CNratio/2,
col=colAlpha('#b02c46',1),
border=colAlpha('#b02c46',1),lwd = 1.0)
# Plot subclone total CN
if(celltype == 'Tumour' & nrow(subCl.dna) > 0){
for(chr in unique(subCl.dna$Chr)){
if(unique(subCl.dna[subCl.dna$Chr == chr,]$log_CNratio == 0)){
lines(subCl.dna[subCl.dna$Chr == chr,]$abspos_kb*1000,
subCl.dna[subCl.dna$Chr == chr,]$log_CNratio/2,col='#4169E1',lwd=1.5)
}else{
rect(xleft = subCl.dna[subCl.dna$Chr == chr &subCl.dna$posType=='Start',]$abspos_kb*1000,
xright = subCl.dna[subCl.dna$Chr == chr &subCl.dna$posType=='Stop',]$abspos_kb*1000,
ybottom = 0,
ytop=subCl.dna[subCl.dna$Chr == chr & subCl.dna$posType=='Start',]$log_CNratio/2,
col=colAlpha('#4169E1',0.7),
border=colAlpha('#4169E1',0.7),lwd = 0.1)
}
}
}
# Plot CopyKat output
lines(x=tmp$abspos,tmp$mean_logCN,col='black',lwd=ifelse(sampleList == 'all',0.8,0.5))
segments(x0=min(xleft),x1 = max(xright),
y0=0.2, y1=0.2,
col = 'darkgrey',lty = 'dashed',lwd = 0.4)
segments(x0=min(xleft),x1 = max(xright),
y0=-0.2, y1=-0.2,
col = 'darkgrey',lty = 'dashed',lwd = 0.4)
}
}
if(sampleList != 'all'){
saveFig(file.path(resd,paste0('RCC_CK_',PDID)),plotFun,width = 2.6,height = 2.1,res=500)
}else{
saveFig(file.path(res,paste0('FigS1_CK_',PDID,'_v1e7')),plotFun,width = 2.3,height = 3.5,res=500)
}
}
}
# Figure 2Ba - Barplots of RCC CK classification ####
rccBarplot.CK = function(){
rcc.srat = readRDS('/lustre/scratch117/casm/team274/mt22/CN_methods/RCC_PCT_ann3sub.rds')
# Extract CK output
dd = rcc.srat@meta.data
dd$finalAnn = dd$finalAnn2
dd$finalAnn = factor(dd$finalAnn,levels = c('Leukocytes','PTC','Tumour'))
dd$CKpred.normREF.default.80perc.2397 = ifelse(is.na(dd$CKpred.normREF.default.80perc.2397),'Uncalled',
ifelse(dd$CKpred.normREF.default.80perc.2397 == 'aneuploid','Aneuploid','Diploid'))
dd = dd[dd$CKpred.normREF.default.80perc.2397 != 'Uncalled',]
#dd$CKpred.normREF.default.80perc.2397 = factor(dd$CKpred.normREF.default.80perc.2397,levels = c('Aneuploid','Diploid','Uncalled'))
dd$CKpred.normREF.default.80perc.2397 = factor(dd$CKpred.normREF.default.80perc.2397,levels = c('Aneuploid','Diploid'))
#Define the layout
plotFun = function(noFrame=FALSE,noPlot=FALSE){
layout(matrix(c(1,2,3,4),ncol=1),heights = c(0.2,1,1,1.8))
par(mar=c(0,0.6,1,0.6))
plot(0, 0,
las=1,
type='n',frame.plot = F,axes = F)
title('RCC',cex.main=1,family = 'Helvetica',font=2)
for(celltype in levels(dd$finalAnn)){
print(celltype)
par(mar=c(0.2,1.5,0.5,0.1),xpd=TRUE)
tmp=as.matrix(table(dd$CKpred.normREF.default.80perc.2397[dd$finalAnn == celltype],dd$PDID[dd$finalAnn == celltype]))
tmp = sweep(tmp,2,colSums(tmp),'/')
cols = c(Diploid = '#c8c8c8',
Aneuploid = '#b02c46',
Uncalled = '#474646')
if(celltype == levels(dd$finalAnn)[length(levels(dd$finalAnn))]){
par(mar=c(4.4,1.5,0.5,0.1),xpd=TRUE)
barplot(tmp,
col=cols[rownames(tmp)],
space=0.1,axes = FALSE,
las = 1,names.arg = rep(NA,ncol(tmp)),border = F)
#axis(2,at=c(1,0.5,0),las=1,pos = 0.1,tck=-0.02,cex.axis=0.8,lwd.ticks = 0,hadj = 0.5)
text(x = seq(0.6,4.5,by = 1.105),y = -0.1,colnames(tmp),cex=0.7,family = 'Helvetica',font=1,srt=90,adj = 1)
#text(x = 4.8,y = 0.5,celltype,cex=0.85,family = 'Helvetica',font=1,srt=270)
text(x =-1.3,y = 0.5,celltype,cex=0.7,family = 'Helvetica',font=1,srt=90)
}else{
barplot(tmp,
col=cols[rownames(tmp)],
space=0.1,axes = FALSE,names.arg = rep(' ',ncol(tmp)),
las = 1,main = '',border = F)
#axis(2,at=c(1,0.5,0),las=1,pos = 0.1,tck=-0.02,cex.axis =0.8,lwd.ticks = 0,hadj = 0.5,)
#text(x = 4.8,y = 0.5,celltype,cex=0.85,family = 'Helvetica',font=1,srt=270)
text(x =-1.3,y = 0.5,celltype,cex=0.7,family = 'Helvetica',font=1,srt=90)
}
}
}
saveFig(file.path(res,paste0('Fig2Ba_RCC_CK_barplot_noUncalled')),plotFun,width = 0.6,height = 2.75,rawData = dd,res=500)
}
# Figure 2Ea - Barplots of NB CK classification ####
nbBarplot.CK = function(){
nb.srat = readRDS('/lustre/scratch117/casm/team274/mt22/CN_methods/NB_ann.rds')
# Extract CK output
dd = nb.srat@meta.data
dd$finalAnn = factor(dd$finalAnn,levels = c('Leukocytes','Endothelium','Mesenchyme','Tumour'))
dd$PDID = factor(dd$PD_ID,levels = c("PD42184","PD42752-1","PD42752-2","PD46693","PD43255"))
dd$CKpred.normREF.default.80perc.2397 = ifelse(is.na(dd$CKpred.normREF.default.80perc.2397),'Uncalled',
ifelse(dd$CKpred.normREF.default.80perc.2397 == 'aneuploid','Aneuploid','Diploid'))
dd = dd[dd$CKpred.normREF.default.80perc.2397 != 'Uncalled',]
#dd$CKpred.normREF.default.80perc.2397 = factor(dd$CKpred.normREF.default.80perc.2397,levels = c('Aneuploid','Diploid','Uncalled'))
dd$CKpred.normREF.default.80perc.2397 = factor(dd$CKpred.normREF.default.80perc.2397,levels = c('Aneuploid','Diploid'))
#Define the layout
plotFun = function(noFrame=FALSE,noPlot=FALSE){
layout(matrix(c(1,2,3,4,5),ncol=1),heights = c(0.2,1,1,1,2))
par(mar=c(0,0.6,0.8,0.6))
plot(0,0,
las=1,
type='n',frame.plot = F,axes = F)
title('Neuroblastoma',cex.main=1,family = 'Helvetica',font=2)
for(celltype in levels(dd$finalAnn)){
print(celltype)
par(mar=c(0.05,1.5,0.2,0.1),xpd=TRUE)
#Define the empty plot area
#plot(0,0,las=1,
# type='n',
# xlab='',xaxt='n',
# ylab='',yaxt='n',frame.plot=T)
tmp=as.matrix(table(dd$CKpred.normREF.default.80perc.2397[dd$finalAnn == celltype],dd$PDID[dd$finalAnn == celltype]))
tmp = sweep(tmp,2,colSums(tmp),'/')
cols = c(Diploid = '#c8c8c8',
Aneuploid = '#b02c46',
Uncalled = '#474646')
if(celltype == levels(dd$finalAnn)[length(levels(dd$finalAnn))]){
par(mar=c(4.9,1.5,0.2,0.1),xpd=TRUE)
barplot(tmp,
col=cols[rownames(tmp)],
space=0.1,axes = FALSE,
las = 1,names.arg = rep(NA,ncol(tmp)),border = F)
#axis(2,at=c(1,0.5,0),las=1,pos = 0.1,tck=-0.02,cex.axis=0.75,lwd.ticks = 0,hadj = 0.5)
#text(x = seq(0.5,6,by = 1.2),y = -0.1,colnames(tmp),cex=0.8,family = 'Helvetica',font=1,srt=45,adj = 1)
#text(x = 5.8,y = 0.5,celltype,cex=0.65,family = 'Helvetica',font=1,srt=270)
#axis(2,at=c(1,0.5,0),las=1,pos = 0.1,tck=-0.02,cex.axis=0.8,lwd.ticks = 0,hadj = 0.5)
#text(x = seq(0.5,6,by = 1.2),y = -0.1,colnames(tmp),cex=0.9,family = 'Helvetica',font=1,srt=90,adj = 0.95)
text(x = seq(0.65,6,by = 1.105),y = -0.1,colnames(tmp),cex=0.7,family = 'Helvetica',font=1,srt=90,adj = 0.95)
text(x = -1.3,y = 0.5,celltype,cex=0.7,family = 'Helvetica',font=1,srt=90)
}else{
barplot(tmp,
col=cols[rownames(tmp)],
space=0.1,axes = FALSE,names.arg = rep(' ',ncol(tmp)),
las = 1,main = '',border = F)
#axis(2,at=c(1,0.5,0),las=1,pos = 0.1,tck=-0.02,cex.axis =0.75,lwd.ticks = 0,hadj = 0.5,)
text(x =-1.3,y = 0.5,celltype,cex=0.7,family = 'Helvetica',font=1,srt=90)
}
}
}
#saveFig(file.path(resDir,paste0('TEST')),plotFun,width = 1.7,height = 3.2,rawData = dd)
saveFig(file.path(res,paste0('NB_CK_barplot_noUncalled')),plotFun,width = 0.7,height = 3.0,rawData = dd,res=500)
}
# Figure 2Eb - Barplots of NB AI classification ####
nbBarplot.AI = function(){
nb.srat = readRDS('/lustre/scratch117/casm/team274/mt22/CN_methods/NB_ann.rds')
# Extract CK output
dd = nb.srat@meta.data
dd$finalAnn = factor(dd$finalAnn,levels = c('Leukocytes','Endothelium','Mesenchyme','Tumour'))
dd$PDID = factor(dd$PD_ID,levels = c("PD42752-1","PD42752-2","PD46693","PD43255","PD42184"))
dd$AI_sc_call = ifelse((is.na(dd$AI_sc_call)|dd$AI_sc_call == 'Uncalled'),'Uncalled',
ifelse(dd$AI_sc_call == 'abbFrac','Tumour','Normal'))
dd = dd[dd$AI_sc_call != 'Uncalled',]
#dd$AI_sc_call = factor(dd$AI_sc_call,levels = c('Tumour','Normal','Uncalled'))
dd$AI_sc_call = factor(dd$AI_sc_call,levels = c('Tumour','Normal'))
#Define the layout
plotFun = function(noFrame=FALSE,noPlot=FALSE){
layout(matrix(c(1,2,3,4,5),ncol=1),heights = c(0.2,1,1,1,2.2))
par(mar=c(0,0.6,0.8,0.6))
plot(0,0,
las=1,
type='n',frame.plot = F,axes = F)
title('Neuroblastoma',cex.main=1,family = 'Helvetica',font=2)
for(celltype in levels(dd$finalAnn)){
par(mar=c(0.05,1.5,0.2,0.1),xpd=TRUE)
tmp=as.matrix(table(dd$AI_sc_call[dd$finalAnn == celltype],dd$PDID[dd$finalAnn == celltype]))
tmp = sweep(tmp,2,colSums(tmp),'/')
cols = c(Normal = '#c8c8c8',
Tumour = '#b02c46',
Uncalled = '#474646')
if(celltype == levels(dd$finalAnn)[length(levels(dd$finalAnn))]){
par(mar=c(4.9,1.5,0.2,0.1),xpd=TRUE)
barplot(tmp,
col=cols[rownames(tmp)],
space=0.1,axes = FALSE,
las = 1,names.arg = rep(NA,ncol(tmp)),border = F)
text(x = seq(0.65,6,by = 1.105),y = -0.1,colnames(tmp),cex=0.7,family = 'Helvetica',font=1,srt=90,adj = 0.95)
text(x = -1.3,y = 0.5,celltype,cex=0.7,family = 'Helvetica',font=1,srt=90)
}else{
barplot(tmp,
col=cols[rownames(tmp)],
space=0.1,axes = FALSE,names.arg = rep(' ',ncol(tmp)),
las = 1,main = '',border = F)
text(x =-1.3,y = 0.5,celltype,cex=0.7,family = 'Helvetica',font=1,srt=90)
}
}
}
saveFig(file.path(res,paste0('Fig2Eb_NB_AI_barplot_noUncalled')),plotFun,width = 0.7,height = 3.0,rawData = dd,res=500)
}
# Figure 2F - Example of CopyKat output for normal and tumour NB cells ####
sampleList = c('PD42184','PD42752-2')
# Figure S1 NB - CopyKat output for normal and tumour NB cells ####
subCl.minSegLen = 2e7
nbCK = function(){
nb.srat = readRDS('/lustre/scratch117/casm/team274/mt22/CN_methods/NB_ann.rds')
nb.srat$PDID = as.character(nb.srat$PD_ID)
# Import chromInfo
chromInfo = read.delim('/lustre/scratch117/casm/team274/mt22/chrom_abspos_kb.txt',sep = '\t')
# Import Manifest
projMani = read_excel("/lustre/scratch117/casm/team274/mt22/projectManifest.xlsx",sheet = "NB_mani")
# CopyKat results
copykat.results = readRDS('/lustre/scratch117/casm/team274/mt22/CN_methods/CopyKAT_output/v5_nb.CK.normREF.default.2397/v5_CKresults_default_normREF_80perc_2397.rds')
#----- Processing CopyKat results -------#
for(i in 1:length(copykat.results)){
# Get copyKat CNA matrix and prediction
CNA_summary_byCellType = data.frame()
CNA_mat = copykat.results[[i]]$CNAmat
colnames(CNA_mat) = gsub('^X','',colnames(CNA_mat))
colnames(CNA_mat) = gsub('\\.','-',colnames(CNA_mat))
pred = as.data.frame(copykat.results[[i]]$prediction)
sample = unique(nb.srat@meta.data[rownames(nb.srat@meta.data) %in% rownames(pred),]$PDID)
PDID=sample
if(length(sample) > 1){
message(paste0('More than 1 sample detected: i=',i,', samples are ',sample))
}else if(length(sample) <1){
next
}else if(length(sample)==1){
message(paste0('Checking sample ',sample))
}
# subset annnb.srat object to keep only cells of that sample
srat = subset(nb.srat, subset = PDID == sample)
# subset by annotated cell type
for(celltype in unique(srat$finalAnn)){
CNA_mat_sub = CNA_mat[,c(1:3,which(colnames(CNA_mat) %in% rownames(srat@meta.data[srat@meta.data$finalAnn == celltype,])))]
if(ncol(CNA_mat_sub) == 4){
chrom_tmp=data.frame(celltype = celltype,CNA_mat_sub)
colnames(chrom_tmp)[5] = 'mean_logCN'
}else if (ncol(CNA_mat_sub) > 4){
chrom_tmp = data.frame(celltype = celltype,CNA_mat_sub[,c(1:3)],mean_logCN = apply(CNA_mat_sub[,-c(1:3)],MARGIN = 1,FUN = mean))
}else if (ncol(CNA_mat_sub) < 4){
chrom_tmp = data.frame(celltype = celltype,CNA_mat_sub[,c(1:3)],mean_logCN = NA)
}
CNA_summary_byCellType = rbind(CNA_summary_byCellType,chrom_tmp)
}
# Remove X chromosome
CNA_summary_byCellType = CNA_summary_byCellType[CNA_summary_byCellType$chrom != 23,]
CNA_summary_byCellType$celltype = factor(CNA_summary_byCellType$celltype,levels=c("Tumour","Mesenchyme", "Endothelium",'Leukocytes'))
####------------------ Generate Battenberg CN summary file ----------------####
donorMani = projMani[projMani$PDID == sample,]
btb.fp = unique(donorMani$battenbergFp)
#----- Processing Battenberg data -------#
dna.data = annotateBTB(btb.fp,subCl.minSegLen = subCl.minSegLen,PDID,tgtChrs=c(1:22),removeBalancedSegs=F,longFormat = T,method = 'totalCN')
# Remove X chromosome
dna.data = dna.data[dna.data$Chr != 23,]
dna.data$celltype = 'Tumour'
tmp = rbind(data.frame(Idx=0,Chr=1,matNum=1,patNum=1,frac=1,segLen=0,type='maj',tumTot=2,tot2min='2:1',tumFrac=0.5,newIdx=0,posID=0,posType='Start',pos=1,idx=0,abspos_kb=0,celltype='Endothelium'),
data.frame(Idx=0,Chr=1,matNum=1,patNum=1,frac=1,segLen=0,type='maj',tumTot=2,tot2min='2:1',tumFrac=0.5,newIdx=0,posID=0,posType='Stop',pos=2874771,idx=0,abspos_kb=2874771,celltype='Endothelium'),
data.frame(Idx=0,Chr=1,matNum=1,patNum=1,frac=1,segLen=0,type='maj',tumTot=2,tot2min='2:1',tumFrac=0.5,newIdx=0,posID=0,posType='Start',pos=1,idx=0,abspos_kb=0,celltype='Leukocytes'),
data.frame(Idx=0,Chr=1,matNum=1,patNum=1,frac=1,segLen=0,type='maj',tumTot=2,tot2min='2:1',tumFrac=0.5,newIdx=0,posID=0,posType='Stop',pos=2874771,idx=0,abspos_kb=2874771,celltype='Leukocytes'),
data.frame(Idx=0,Chr=1,matNum=1,patNum=1,frac=1,segLen=0,type='maj',tumTot=2,tot2min='2:1',tumFrac=0.5,newIdx=0,posID=0,posType='Start',pos=1,idx=0,abspos_kb=0,celltype='Mesenchyme'),
data.frame(Idx=0,Chr=1,matNum=1,patNum=1,frac=1,segLen=0,type='maj',tumTot=2,tot2min='2:1',tumFrac=0.5,newIdx=0,posID=0,posType='Stop',pos=2874771,idx=0,abspos_kb=2874771,celltype='Mesenchyme'))
dna.data = rbind(dna.data,tmp)
dna.data$log_CNratio = log(as.numeric(dna.data$tumTot)/2)
subCl.dna = dna.data[dna.data$type == 'sub',]
majCl.dna = dna.data[dna.data$type == 'maj',]
#----- Plotting copykat results! -------#
chromInfo2 = chromInfo[chromInfo$chrom != 23,]
plotFun = function(noFrame=TRUE,noPlot=FALSE){
noFrame=T
# Set layout
layout(mat=matrix(c(1:nlevels(CNA_summary_byCellType$celltype)),ncol=1),
heights = rep(2,nlevels(CNA_summary_byCellType$celltype)))
for(celltype in levels(CNA_summary_byCellType$celltype)){
par(mar=c(0.2,0.6,0.8,0.6),xpd=TRUE)
tmp = CNA_summary_byCellType[CNA_summary_byCellType$celltype == celltype,]
dna = majCl.dna[majCl.dna$celltype == celltype,]
ncells = nrow(nb.srat@meta.data[nb.srat@meta.data$PDID == PDID & nb.srat@meta.data$finalAnn == celltype,])
# Set params for plotting
ylim = c(round(min(CNA_summary_byCellType[!is.na(CNA_summary_byCellType$mean_logCN),]$mean_logCN)-0.1,digits = 1),round(max(CNA_summary_byCellType[!is.na(CNA_summary_byCellType$mean_logCN),]$mean_logCN)+0.2,digits = 1))
ybottom=min(round(min(CNA_summary_byCellType[!is.na(CNA_summary_byCellType$mean_logCN),]$mean_logCN)-0.1,digits = 1),round(log(0.5)/2,2))
ytop=max(round(max(CNA_summary_byCellType[!is.na(CNA_summary_byCellType$mean_logCN),]$mean_logCN)+0.1,digits = 1),round(max(dna.data$log_CNratio)/2,2))
ytext = ytop + 0.1
# Plot main frame
plot(CNA_summary_byCellType$abspos, CNA_summary_byCellType$mean_logCN,
las=1,
type='n',
ylim=ylim,
xlab=ifelse(noFrame,'','Genomic Position'),
ylab=ifelse(noFrame,'',''),
xaxt=ifelse(noFrame,'n','s'),
yaxt=ifelse(noFrame,'n','s'),
frame.plot=F)
#Plot background chromosome
xleft = c(0,chromInfo2[chromInfo2$arm == 'q' & chromInfo2$chrom!=22,]$abspos*1000)
xright = c(chromInfo2[chromInfo2$arm == 'q',]$abspos*1000)
text(x=9e8,y=ytext,paste0(celltype,'_',PDID,' (n=',ncells,')'),cex=0.6,family = 'Helvetica',font=2,adj = 0)
axis(2,at=c(ybottom+0.05,0,ytop-0.05),labels = c('Low',0,'High'),las=1,pos = 0,tck = -.00,lwd = 0.3,cex.axis=0.6,hadj = 0.3,padj = 0.5)
axis(4,las=1,pos = max(chromInfo2$abspos_kb*1000),tck = -.02,lwd = 0.7,cex.axis=0.6,hadj = 1.5,col='black',
at=c(round(log(0.5)/2,2),0,round(log(3/2)/2,2),round(log(4/2)/2,2)),col.axis = '#b02c46',
labels = c(1,2,3,4))
col = replicate(c('white','lightgrey'),n = 22/2)
rect(xleft=xleft,
xright=xright,
ybottom=ybottom,
ytop=ytop,
col = col,
lty = 'blank')
#Black surrounding border
rect(xleft=min(xleft),
xright=max(xright),
ybottom=ybottom,
ytop=ytop,
col = colAlpha('white',0.0001),
border = 'black',lwd = 0.4)
#text(x=(xleft+xright)/2,y = 0.72,labels = c(1:22),cex = c(rep(0.7,10),rep(0.62,4),rep(0.53,4),rep(0.31,4)),font = 1)
# Plot ground truth
rect(xleft = dna[dna$posType=='Start',]$abspos_kb*1000,
xright = dna[dna$posType=='Stop',]$abspos_kb*1000,
ybottom = 0,
ytop=dna[dna$posType=='Start',]$log_CNratio/2,
col=colAlpha('#b02c46',1),
border=colAlpha('#b02c46',1),lwd = 0.8)
# Plot subclone total CN
if(celltype == 'Tumour' & nrow(subCl.dna) > 0){
for(chr in unique(subCl.dna$Chr)){
for(logR in unique(subCl.dna[subCl.dna$Chr == chr,]$log_CNratio)){
d = subCl.dna[subCl.dna$Chr == chr & subCl.dna$log_CNratio == logR,]
if(logR == 0){
lines(d$abspos_kb*1000,d$log_CNratio/2,col='#4169E1',lwd=1.0)
}else{
rect(xleft = d[d$posType=='Start',]$abspos_kb*1000,
xright = d[d$posType=='Stop',]$abspos_kb*1000,
ybottom = 0,
ytop=d[d$posType=='Start',]$log_CNratio/2,
col=colAlpha('#4169E1',0.7),
border=colAlpha('#4169E1',0.7),lwd = 0)
}
}
}
}
# Plot CopyKat output
lines(x=tmp$abspos,tmp$mean_logCN,col='black',lwd=0.7)
segments(x0=min(xleft),x1 = max(xright),
y0=0.2, y1=0.2,
col = 'darkgrey',lty = 'dashed',lwd = 0.4)
segments(x0=min(xleft),x1 = max(xright),
y0=-0.2, y1=-0.2,
col = colAlpha('darkgrey',0.9),lty = 'dashed',lwd = 0.4)
}
}
saveFig(file.path(res,paste0('FigS2_NB_CK_',PDID,'_',subCl.minSegLen)),plotFun,width = 2.0,height = 3.2,res=500)
}
}
# Figure 2C and S2 - Example of AlleleIntegrator output for normal and tumour RCC cells ####
subCl.minSegLen = 2e7
cov=500
tumour = 'NB'
sampleList = 'all'
plotMAF = function(tumour,sampleList='all'){
if(tumour == 'RCC'){
sheet = 'RCC_mani'
}else if(tumour == 'NB'){
sheet = 'NB_mani'
}
# Import Manifest
projMani = read_excel("/lustre/scratch117/casm/team274/mt22/projectManifest.xlsx",sheet = sheet)
if(sheet == 'RCC_mani'){
projMani = projMani[!grepl('^n_',projMani$SampleID),]
}
outdir = '/lustre/scratch117/casm/team274/mt22/CN_methods/alleleIntegrator_output/'
#PDID = unique(projMani$PDID)[1]
for(PDID in unique(projMani$PDID)){
donorMani = projMani[projMani$PDID == PDID,]
####------------------ Get MAF output ----------------####
if(sheet == 'RCC_mani'){
f = list.files(outdir,pattern = paste0(PDID,"_gCnts_allhSNPs.RDS"))
}else if(sheet == 'NB_mani'){
f = list.files(outdir,pattern = paste0(PDID,"tmp_gCnts_allhSNPs.RDS"))
}
gCnts = readRDS(file.path(outdir,f))
ctmp = aggregateByLists(gCnts, assays = c("matCount", "patCount"), gCnts$clusterID)
colnames(ctmp) = gsub("^cellID$", "clusterID", colnames(ctmp))
ctmp$totCount = ctmp$patCount + ctmp$matCount
ctmp$MAF = ctmp$matCount/ctmp$totCount
ctmp$chr = sapply(strsplit(ctmp$regionID,split = ':'),'[',1)
ctmp$chr = gsub('X',23,ctmp$chr)
ctmp$chr = as.numeric(as.character(ctmp$chr))
ctmp$pos = sapply(strsplit(ctmp$regionID,split = ':'),'[',2)
ctmp$pos = sapply(strsplit(ctmp$pos,split = '_'),'[',1)
ctmp$pos = as.numeric(ctmp$pos)
# Get absolute genomic position
ctmp$abspos_kb = ctmp$pos/1000 # if chromosome 1, abspos = pos
for(r in 1:nrow(ctmp)){
chrom = as.numeric(ctmp$chr[r])
if (chrom > 1){
ctmp$abspos_kb[r] = ctmp$abspos_kb[r] + (chromInfo[(chromInfo$chrom == (chrom-1)) & (chromInfo$arm == 'q'),]$abspos_kb)
}
}
ctmp = ctmp[order(c(ctmp$chr,ctmp$abspos_kb),decreasing = F),]
ctmp$regionID = paste0(ctmp$clusterID,'_',ctmp$regionID)
#### Aggregating by read coverage
out = ctmp %>% arrange(clusterID,abspos_kb) %>% group_by(clusterID) %>% summarise(totCnt.cumsum = cumsum(totCount),regionID=regionID)
m=match(out$regionID,ctmp$regionID)
sum(is.na(m))
out = cbind(out[,-c(1,3)],ctmp[m,])
out$readCovBin = floor(out$totCnt.cumsum / cov) + 1
out2 = out %>% arrange(clusterID,readCovBin,abspos_kb) %>%
group_by(clusterID,readCovBin) %>%
mutate(patCnt.cumsum = cumsum(patCount),matCnt.cumsum = cumsum(matCount)) %>%
filter(totCnt.cumsum == max(totCnt.cumsum))
startPos = out %>% arrange(clusterID,readCovBin,abspos_kb) %>%
group_by(clusterID,readCovBin) %>%
filter(totCnt.cumsum == min(totCnt.cumsum))
startPos$start = startPos$abspos_kb
out2 = merge(out2,startPos[,c(2,11,12)],by=c('clusterID','readCovBin'))
out2$midPos = (out2$abspos_kb + out2$start)/2
out2$MAF.readCovBin = out2$matCnt.cumsum / (out2$matCnt.cumsum+out2$patCnt.cumsum)
out2 = arrange(out2,clusterID,readCovBin)
#----- Plotting AlleleIntegrator MAF results! -------#
out2 = out2[out2$chr != 23,]
chromInfo2 = chromInfo[chromInfo$chrom != 23,]
if(sheet == 'RCC_mani'){
out2$clusterID[out2$clusterID == 'Renal_cell_carcinoma'] = 'Tumour'
out2$clusterID[out2$clusterID == 'Proximal_tubuluar_cells'] = 'PTC'
out2$clusterID = factor(out2$clusterID,levels = c('Tumour','PTC','Leukocytes'))
if(sampleList != 'all'){
out2=out2[out2$clusterID != 'Leukocytes',]
out2$clusterID = factor(out2$clusterID,levels = c('Tumour','PTC'))
}
}else if(sheet == 'NB_mani'){
out2$clusterID = factor(out2$clusterID,levels = c('Tumour','Mesenchyme','Endothelium','Leukocytes'))
}
####------------------ Generate Battenberg CN summary file ----------------####
btb.fp = unique(donorMani$battenbergFp)[!is.na(unique(donorMani$battenbergFp))]
#----- Processing Battenberg data -------#
dna.data = annotateBTB(btb.fp,subCl.minSegLen = subCl.minSegLen,PDID,tgtChrs=c(1:22),removeBalancedSegs=F,longFormat = T,method = 'allelicRatio')
# Remove X chromosome
dna.data = dna.data[dna.data$Chr != 23,]
if(sheet=='NB_mani'){
dna.data$clusterID = 'Tumour'
tmp = rbind(data.frame(Idx=0,Chr=1,matNum=1,patNum=1,frac=1,segLen=0,type='maj',tumTot=2,tot2min='2:1',tumFrac=0.5,newIdx=0,posID=0,posType='Start',pos=1,idx=0,abspos_kb=0,clusterID='Endothelium'),
data.frame(Idx=0,Chr=1,matNum=1,patNum=1,frac=1,segLen=0,type='maj',tumTot=2,tot2min='2:1',tumFrac=0.5,newIdx=0,posID=0,posType='Stop',pos=2874771,idx=0,abspos_kb=2874771,clusterID='Endothelium'),
data.frame(Idx=0,Chr=1,matNum=1,patNum=1,frac=1,segLen=0,type='maj',tumTot=2,tot2min='2:1',tumFrac=0.5,newIdx=0,posID=0,posType='Start',pos=1,idx=0,abspos_kb=0,clusterID='Leukocytes'),
data.frame(Idx=0,Chr=1,matNum=1,patNum=1,frac=1,segLen=0,type='maj',tumTot=2,tot2min='2:1',tumFrac=0.5,newIdx=0,posID=0,posType='Stop',pos=2874771,idx=0,abspos_kb=2874771,clusterID='Leukocytes'),
data.frame(Idx=0,Chr=1,matNum=1,patNum=1,frac=1,segLen=0,type='maj',tumTot=2,tot2min='2:1',tumFrac=0.5,newIdx=0,posID=0,posType='Start',pos=1,idx=0,abspos_kb=0,clusterID='Mesenchyme'),
data.frame(Idx=0,Chr=1,matNum=1,patNum=1,frac=1,segLen=0,type='maj',tumTot=2,tot2min='2:1',tumFrac=0.5,newIdx=0,posID=0,posType='Stop',pos=2874771,idx=0,abspos_kb=2874771,clusterID='Mesenchyme'))
}else if(sheet=='RCC_mani'){
dna.data$clusterID = 'Tumour'
tmp = rbind(data.frame(Idx=0,Chr=1,matNum=1,patNum=1,frac=1,segLen=0,type='maj',tumTot=2,tot2min='2:1',tumFrac=0.5,newIdx=0,posID=0,posType='Start',pos=1,idx=0,abspos_kb=0,clusterID='PTC'),
data.frame(Idx=0,Chr=1,matNum=1,patNum=1,frac=1,segLen=0,type='maj',tumTot=2,tot2min='2:1',tumFrac=0.5,newIdx=0,posID=0,posType='Stop',pos=2874771,idx=0,abspos_kb=2874771,clusterID='PTC'),
data.frame(Idx=0,Chr=1,matNum=1,patNum=1,frac=1,segLen=0,type='maj',tumTot=2,tot2min='2:1',tumFrac=0.5,newIdx=0,posID=0,posType='Start',pos=1,idx=0,abspos_kb=0,clusterID='Leukocytes'),
data.frame(Idx=0,Chr=1,matNum=1,patNum=1,frac=1,segLen=0,type='maj',tumTot=2,tot2min='2:1',tumFrac=0.5,newIdx=0,posID=0,posType='Stop',pos=2874771,idx=0,abspos_kb=2874771,clusterID='Leukocytes'))
}
dna.data = rbind(dna.data,tmp)
subCl.dna = dna.data[dna.data$type == 'sub',]
majCl.dna = dna.data[dna.data$type == 'maj',]
plotFun = function(noFrame=T,noPlot=FALSE){
layout(mat=matrix(c(1:nlevels(CNA_summary_byCellType$celltype)),ncol=1),
heights = rep(2,nlevels(CNA_summary_byCellType$celltype)))
# Set plotting params
params = data.frame(sheet = c('NB_mani','RCC_mani','NB_mani','RCC_mani'),
sampleList=c('all','all','PD42184','PD37228'),
mar = c('0.1,0.6,0.8,0.6','0.1,0.6,0.8,0.6','0.5,0.8,0.2,0.6','0.4,0.8,0.2,0.6'),
text.cex2=c(0.7,0.7,0.7,0.7),
text.cex1=c(0.7,0.7,0.7,0.7),
text.y = c(1.37,1.37,1.32,1.32))
for(celltype in levels(out2$clusterID)){
par(mar=as.vector(as.numeric(strsplit(params[params$sheet == sheet & params$sampleList == sampleList,]$mar,split = ',')[[1]])),xpd=TRUE)
tmp = out2[out2$clusterID == celltype,]
dna = majCl.dna[majCl.dna$clusterID == celltype,]
dna = dna[order(dna$abspos_kb,decreasing = F),]
if(sheet == 'RCC_mani'){
ncells = nrow(rcc.srat@meta.data[rcc.srat@meta.data$PDID == PDID & rcc.srat@meta.data$finalAnn == celltype,])
}else if (sheet == 'NB_mani'){
ncells = nrow(nb.srat@meta.data[nb.srat@meta.data$PD_ID == PDID & nb.srat@meta.data$finalAnn == celltype,])
}
# Plot main frame
plot(out2$midPos*1000, out2$MAF.readCovBin,
las=1,
type='n',xaxt='n',yaxt='n',
ylim=c(-0.1,1.3),
frame.plot=F)
text(x=1e3,y=ifelse('all'%in% sampleList,1.37,1.32),paste0(celltype,'_',PDID),cex=params[params$sheet == sheet & params$sampleList == sampleList,]$text.cex1,family = 'Helvetica',font=2,adj = 0)
text(x=2.86e9,y=ifelse('all'%in% sampleList,1.37,1.32),paste0('n=',ncells),cex=params[params$sheet == sheet & params$sampleList == sampleList,]$text.cex2,family = 'Helvetica',font=1,adj = 1)
axis(2,at=c(0,0.5,1),labels = c(0,'1/2',1),las=1,pos = 0,tck = -.02,lwd = 0.3,cex.axis=0.7,hadj = 0.2,padj = 0.5)
axis(4,at=c(0,0.5,1),labels = c(0,'1/2',1),las=1,pos = max(chromInfo2$abspos_kb*1000),tck = -.02,lwd = 0.3,cex.axis=0.7,hadj = 1.0,padj = 0.5,col.axis='#b02c46')
#Plot background chromosome
xleft = c(0,chromInfo2[chromInfo2$arm == 'q' & chromInfo2$chrom!=22,]$abspos_kb*1000)
xright = c(chromInfo2[chromInfo2$arm == 'q',]$abspos_kb*1000)
col = replicate(c('white','lightgrey'),n = 22/2)
rect(xleft=xleft,
xright=xright,
ybottom=-0.1,
ytop=1.18,
col = col,
lty = 'blank')
#Black surrounding border
rect(xleft=min(xleft),
xright=max(xright),
ybottom=-0.1,
ytop=1.18,
col = colAlpha('white',0.0001),
border = 'black',lwd = 0.4)
# Plot chromosome number
#text(x=(xleft+xright)/2,y = 1.095,labels = c(1:22),cex = c(rep(0.7,10),rep(0.62,4),rep(0.53,4),rep(0.31,4)),font = 1)
# Plot AI MAF
a = ifelse(celltype %in% c('Tumour','Leukocyte'),0.7,0.85)
points(tmp$midPos*1000,tmp$MAF.readCovBin,
pch=19,
cex=0.03,col=colAlpha('black',a))
# Plot ground truth
# Subclone
if(celltype == 'Tumour' & (nrow(subCl.dna) > 0)){
for(chr in unique(subCl.dna$Chr)){
for(Idx in unique(subCl.dna[subCl.dna$Chr == chr,]$Idx)){
lines(x=subCl.dna[subCl.dna$Chr == chr & subCl.dna$Idx == Idx,]$abspos_kb*1000,subCl.dna[subCl.dna$Chr == chr & subCl.dna$Idx == Idx,]$tumFrac,col='#4169E1',lwd=1.1)
}
}
}
# Major clone CN profile
lines(x=dna$abspos_kb*1000,dna$tumFrac,col='#b02c46',lwd=1.0)
}
}
if(sampleList != 'all' & sheet == 'RCC_mani'){
saveFig(file.path(res,paste0('v2_RCC_AI_',PDID)),plotFun,width = 2.65,height = 2.15,res=500)
}else if(sampleList == 'all' & sheet == 'RCC_mani'){
saveFig(file.path(res,paste0('FigS2_AI_',PDID,'_',subCl.minSegLen)),plotFun,width = 2.3,height = 3.5,res=500)
}else if(sampleList != 'all' & sheet == 'NB_mani'){
saveFig(file.path(res,paste0('v2_NB_AI_',PDID)),plotFun,width = 2.5,height = 2.265,res=500)
}else if(sampleList == 'all' & sheet == 'NB_mani'){
saveFig(file.path(res,paste0('FigS2_AI_',PDID,'_',subCl.minSegLen)),plotFun,width = 2.0,height = 3.2,res=500)
}
}
}
# GOSH25 - bulkDNA BAF plot ####
# Plot Tumour bulkDNA BAF ####
setwd('~/lustre_mt22/CN_methods/')
#############
# Libraries #
#############
library(alleleIntegrator)
#########################
# Set Global parameters #
#########################
tgtChrs=c(1:22)
minSegLen=1e6
subCl.minSegLen=2e7
skipIfExists = T
# Import Manifest
projMani = read_excel("../projectManifest.xlsx",sheet = "alleleIntegrator")
mainDir = '~/lustre_mt22/CN_methods/revision_2204'
refGenome = '/lustre/scratch119/realdata/mdt1/team78pipelines/reference/Human/GRCH37d5/genome.fa'
nParallel=25
#----------- Run AlleleIntegrator on NB dataset (5 samples)
#-----------------------------------------------------------
for(tumourType in unique(projMani$TumourType)){
if(tumourType %in% c('Ewings','Wilms','ATRT')){
for(PDID in unique(projMani$PDID[projMani$TumourType == tumourType])){
message(sprintf('Generating BAF plot from bulkDNA for Sample %s - tumourType: %s',PDID,tumourType))
# Set output directory
outDir = file.path(mainDir,'alleleIntegrator_output',tumourType,PDID)
if(!file.exists(outDir)){
message(sprintf('[%s]: Cannot find output dir - Please check!...'))
next()
}
# Set Sample specific params
donorMani = projMani[projMani$PDID == PDID,]
tumourDNA = unique(donorMani$tumourDNA[!is.na(donorMani$tumourDNA)])
patientDNA = unique(donorMani$patientDNA[!is.na(donorMani$patientDNA)])
######################
# Call heterozygous SNPs
hSNPs = findHetSNPs(patientDNA,refGenome,file.path(outDir,paste0(PDID,'_patient_hetSNPs.vcf')),nParallel=nParallel)
#Expectation is that we'll find ~ 3 million of them
message(sprintf("Found %s heterozygous SNPs",prettyNum(length(hSNPs),big.mark=',')))
baf.out = generateCoverageAndBAF(BAM = tumourDNA,refGenome = refGenome,hSNPs=hSNPs,
outPath = file.path(outDir,paste0(PDID,'_cov_BAF.RDS')),nParallel=nParallel)
plotFun = function(noFrame=T,noPlot=FALSE,minCoverage=10){
#Filter to just the ones that we trust
filt = baf.out[baf.out$coverage>=minCoverage,]
#Work out the chromosome boundaries
chrsToPlot=c(1:22)
chrs = chrsToPlot
chrLens = seqlengths(filt)
tmp = sapply(split(start(filt),as.character(seqnames(filt))),max)
chrLens[is.na(chrLens)] = tmp[names(chrLens)[is.na(chrLens)]]
chrLens = as.numeric(chrLens[chrs])
x = start(filt) +cumsum(c(0,chrLens))[match(as.character(seqnames(filt)),chrs)]
# Subset randomly 50% of the points
set.seed(2397)
idx = sample(1:nrow(mcols(filt)), nrow(mcols(filt))/2, replace=FALSE)
filt.sub = filt[idx,]
# BAF plot
par(mfrow=c(1,1),mar=c(2.1,4.1,1.1,1.1))
alpha = max(0.002,min(1,1e5/length(filt.sub)))
plot(x[idx],filt.sub$BAF,
col=rgb(0,0,0,alpha=alpha/2),
cex=0.01,
las=2,
xaxt='n',
yaxt='n',
xlab='',
ylab='BAF',
xaxs='i',
yaxs='i',
ylim=c(0,1),
xlim=c(1,sum(chrLens)))
#axis(side=1, at=cumsum(c(0,chrLens[-length(chrLens)]))+chrLens/2, labels = chrs)
axis(side=2, at=c(0,0.5,1),labels=c(0,0.5,1),las=1)
abline(v=cumsum(chrLens),col='lightgrey')
}
saveFig(file.path(res,paste0('BAF_',PDID,'_',tumourType)),plotFun,width = 5.8,height = 2.2,res=1000)
}
}
}
baf.out = generateCoverageAndBAF(BAM = tumourDNA,refGenome = refGenome,hSNPs=hSNPs,outPath = paste0('/lustre/scratch117/casm/team274/mt22/CN_methods/',PD46693,'_cov_BAF.RDS',nParallel=24)
plotFun = function(noFrame=T,noPlot=FALSE,minCoverage=10){
#Work out the chromosome boundaries
chrsToPlot=c(1:22)
chrs = chrsToPlot
chrLens = seqlengths(filt)
tmp = sapply(split(start(filt),as.character(seqnames(filt))),max)
chrLens[is.na(chrLens)] = tmp[names(chrLens)[is.na(chrLens)]]
chrLens = as.numeric(chrLens[chrs])
#Filter to just the ones that we trust
filt = hSNPs[hSNPs$coverage>=minCoverage,]
x = start(filt) +cumsum(c(0,chrLens))[match(as.character(seqnames(filt)),chrs)]
# Subset randomly 50% of the points
set.seed(2397)
idx = sample(1:nrow(mcols(filt)), nrow(mcols(filt))/2, replace=FALSE)
filt.sub = filt[idx,]
# BAF plot
par(mfrow=c(1,1),mar=c(2.1,4.1,1.1,1.1))
alpha = max(0.002,min(1,1e5/length(filt.sub)))
plot(x[idx],filt.sub$BAF,
col=rgb(0,0,0,alpha=alpha/2),
cex=0.01,
las=2,
xaxt='n',
yaxt='n',
xlab='',
ylab='BAF',
xaxs='i',
yaxs='i',
ylim=c(0,1),
xlim=c(1,sum(chrLens)))
#axis(side=1, at=cumsum(c(0,chrLens[-length(chrLens)]))+chrLens/2, labels = chrs)
axis(side=2, at=c(0,0.5,1),labels=c(0,0.5,1),las=1)
abline(v=cumsum(chrLens),col='lightgrey')
}
saveFig(file.path(resd,paste0('BAF_',PDID)),plotFun,width = 5.8,height = 2.2,res=1000)
|
/R/old_scripts/oldstuff_from_generatePlots.R
|
no_license
|
miktrinh/scGenotyping
|
R
| false
| false
| 49,419
|
r
|
#### This scripts stores the old function and code in generatePlots.R
# most of these I suspect are no longer relevant. However, just in case....
# Import RCC and NB datasets
rcc.srat = readRDS('/lustre/scratch117/casm/team274/mt22/CN_methods/RCC_PCT_ann3sub.rds')
nb.srat = readRDS('/lustre/scratch117/casm/team274/mt22/CN_methods/NB_ann.rds')
chromInfo = read.delim('/lustre/scratch117/casm/team274/mt22/chrom_abspos_kb.txt',sep = '\t')
# Figure 2C - Example of CopyKat output for PTC and tumour RCC cells ####
sampleList = 'PD37228'
rccCK = function(sampleList='all'){
rcc.srat = readRDS('/lustre/scratch117/casm/team274/mt22/CN_methods/RCC_PCT_ann3sub.rds')
rcc.srat$finalAnn = rcc.srat$finalAnn2
# Import chromInfo
chromInfo = read.delim('/lustre/scratch117/casm/team274/mt22/chrom_abspos_kb.txt',sep = '\t')
# Import Manifest
projMani = read_excel("/lustre/scratch117/casm/team274/mt22/projectManifest.xlsx",sheet = "RCC_mani")
# CopyKat results
copykat.results = readRDS('/lustre/scratch117/casm/team274/mt22/CN_methods/CopyKAT_output/v5_rcc.CK.normREF.default.2397/v5_CKresults_default_normREF_80perc_2397.rds')
if(sampleList != 'all'){
rcc.srat = subset(rcc.srat,subset = PDID %in% sampleList)
}
# Extract data for PD36793 only (as examples)
#----- Processing CopyKat results -------#
for(i in 1:length(copykat.results)){
# Get copyKat CNA matrix and prediction
CNA_summary_byCellType = data.frame()
CNA_mat = copykat.results[[i]]$CNAmat
colnames(CNA_mat) = gsub('^X','',colnames(CNA_mat))
colnames(CNA_mat) = gsub('\\.','-',colnames(CNA_mat))
pred = as.data.frame(copykat.results[[i]]$prediction)
sample = unique(rcc.srat@meta.data[rownames(rcc.srat@meta.data) %in% rownames(pred),]$PDID)
PDID=sample
if(length(sample) > 1){
message(paste0('More than 1 sample detected: i=',i,', samples are ',sample))
}else if(length(sample) <1){
next
}else if(length(sample)==1){
message(paste0('Checking sample ',sample))
}
# subset annrcc.srat object to keep only cells of that sample
srat = subset(rcc.srat, subset = PDID == sample)
# subset by annotated cell type
for(celltype in unique(srat$finalAnn)){
CNA_mat_sub = CNA_mat[,c(1:3,which(colnames(CNA_mat) %in% rownames(srat@meta.data[srat@meta.data$finalAnn == celltype,])))]
if(ncol(CNA_mat_sub) == 4){
chrom_tmp=data.frame(celltype = celltype,CNA_mat_sub)
colnames(chrom_tmp)[5] = 'mean_logCN'
}else if (ncol(CNA_mat_sub) > 4){
chrom_tmp = data.frame(celltype = celltype,CNA_mat_sub[,c(1:3)],mean_logCN = apply(CNA_mat_sub[,-c(1:3)],MARGIN = 1,FUN = mean))
}else if (ncol(CNA_mat_sub) < 4){
chrom_tmp = data.frame(celltype = celltype,CNA_mat_sub[,c(1:3)],mean_logCN = NA)
}
CNA_summary_byCellType = rbind(CNA_summary_byCellType,chrom_tmp)
}
# Remove X chromosome
CNA_summary_byCellType = CNA_summary_byCellType[CNA_summary_byCellType$chrom != 23,]
CNA_summary_byCellType$celltype = factor(CNA_summary_byCellType$celltype,levels=c('Tumour','PTC','Leukocytes'))
if(sampleList != 'all'){
CNA_summary_byCellType = CNA_summary_byCellType[CNA_summary_byCellType$celltype != 'Leukocytes',]
CNA_summary_byCellType$celltype = factor(CNA_summary_byCellType$celltype,levels=c('Tumour','PTC'))
}
####------------------ Generate Battenberg CN summary file ----------------####
# Battenberg .summary.csv file - only summarize Major Clone CNV, does not included CN states of minor clones
donorMani = projMani[projMani$PDID == sample,]
btb.fp = unique(donorMani$battenbergFp[!grepl('^n_',donorMani$SampleID)])
#----- Processing Battenberg data -------#
dna.data = annotateBTB(btb.fp,subCl.minSegLen = 1e7,PDID,tgtChrs=c(1:22),removeBalancedSegs=F,longFormat = T,method = 'totalCN')
# Remove chromosome X
dna.data = dna.data[dna.data$Chr != 23,]
# Add equivalent lines for other cell types
dna.data$celltype = 'Tumour'
tmp = rbind(data.frame(Idx=0,Chr=1,matNum=1,patNum=1,frac=1,segLen=0,type='maj',tumTot=2,tot2min='2:1',tumFrac=0.5,newIdx=0,posID=0,posType='Start',pos=1,idx=0,abspos_kb=0,celltype='PTC'),
data.frame(Idx=0,Chr=1,matNum=1,patNum=1,frac=1,segLen=0,type='maj',tumTot=2,tot2min='2:1',tumFrac=0.5,newIdx=0,posID=0,posType='Stop',pos=2874771,idx=0,abspos_kb=2874771,celltype='PTC'),
data.frame(Idx=0,Chr=1,matNum=1,patNum=1,frac=1,segLen=0,type='maj',tumTot=2,tot2min='2:1',tumFrac=0.5,newIdx=0,posID=0,posType='Start',pos=1,idx=0,abspos_kb=0,celltype='Leukocytes'),
data.frame(Idx=0,Chr=1,matNum=1,patNum=1,frac=1,segLen=0,type='maj',tumTot=2,tot2min='2:1',tumFrac=0.5,newIdx=0,posID=0,posType='Stop',pos=2874771,idx=0,abspos_kb=2874771,celltype='Leukocytes'))
dna.data = rbind(dna.data,tmp)
dna.data$log_CNratio = log(as.numeric(dna.data$tumTot)/2)
# Separate subclone and major clone CN profile
subCl.dna = dna.data[dna.data$type == 'sub',]
majCl.dna = dna.data[dna.data$type == 'maj',]
#----- Plotting copykat results! -------#
chromInfo2 = chromInfo[chromInfo$chrom != 23,]
plotFun = function(noFrame=T,noPlot=FALSE){
noFrame=T
layout(mat=matrix(c(1:nlevels(CNA_summary_byCellType$celltype)),ncol=1),
heights = rep(2,nlevels(CNA_summary_byCellType$celltype)))
for(celltype in levels(CNA_summary_byCellType$celltype)){
if(sampleList == 'all'){
par(mar=c(0.2,0.9,0.2,0.4),xpd=TRUE)
ylim = c(round(min(CNA_summary_byCellType$mean_logCN)-0.1,digits = 1),round(max(CNA_summary_byCellType$mean_logCN)+0.2,digits = 1))
text.cex = 0.7
ybottom=min(round(min(CNA_summary_byCellType$mean_logCN)-0.1,digits = 1),round(log(0.5)/2,2))
if((round(max(dna.data$log_CNratio),2) - round(max(CNA_summary_byCellType$mean_logCN)+0.1,digits = 1))>0.2){
ytop=max(round(max(CNA_summary_byCellType$mean_logCN)+0.1,digits = 1),round(max(dna.data$log_CNratio)/2,2))
type=2
}else{
ytop=max(round(max(CNA_summary_byCellType$mean_logCN)+0.1,digits = 1),round(max(dna.data$log_CNratio),2))
type=1
}
ytext = ytop + 0.1
chrom.y = 0.85
}else{
par(mar=c(0.2,0.9,0.2,0.4),xpd=TRUE)
ylim = c(round(min(CNA_summary_byCellType$mean_logCN)-0.1,digits = 1),round(max(CNA_summary_byCellType$mean_logCN)+0.2,digits = 1))
text.cex = 0.7
ybottom=min(round(min(CNA_summary_byCellType$mean_logCN)-0.1,digits = 1),round(log(0.5)/2,2))
ytop=max(round(max(CNA_summary_byCellType$mean_logCN)+0.1,digits = 1),round(max(dna.data$log_CNratio),2))
ytext = ytop + 0.1
chrom.y = 0.7
}
tmp = CNA_summary_byCellType[CNA_summary_byCellType$celltype == celltype,]
tmp = tmp[order(tmp$abspos,decreasing = F),]
dna = majCl.dna[majCl.dna$celltype == celltype,]
ncells = nrow(rcc.srat@meta.data[rcc.srat@meta.data$PDID == PDID & rcc.srat@meta.data$finalAnn == celltype,])
# Plot main frame
plot(CNA_summary_byCellType$abspos, CNA_summary_byCellType$mean_logCN,
las=1,
type='n',
#xlim=c(-15.5,15),
ylim=ylim,
xlab=ifelse(noFrame,'','Genomic Position'),
ylab=ifelse(noFrame,'',''),
#main=ifelse(noFrame,'',''),
xaxt=ifelse(noFrame,'n','s'),
yaxt=ifelse(noFrame,'n','s'),
frame.plot=F)
#text(x=1e3,y=ytext,celltype,cex=text.cex[1],family = 'Helvetica',font=2,adj = 0)
#text(x=2.86e9,y=ytext,paste0('n=',ncells),cex=text.cex[2],family = 'Helvetica',font=1,adj = 1)
#axis(2,at=c(0),labels = c(0),las=1,pos = 0,tck = -.02,lwd = 0.3,cex.axis=0.65,hadj = -0.8,padj = 0.5)
if(sampleList=='all'){
#axis(2,at=c(0),labels = c(0),las=1,pos = 0,tck = -.02,lwd = 0.3,cex.axis=0.75,hadj = -0.8,padj = 0.5)
axis(2,at=c(ybottom+0.05,0,ytop-0.05),labels = c('Low',0,'High'),las=1,pos = 0,tck = -.00,lwd = 0.3,cex.axis=0.6,hadj = 0.3,padj = 0.5)
#if((type==1) & (round(log(0.5),2) < ybottom)){
# axis(4,las=1,pos = max(chromInfo2$abspos_kb*1000),tck = -.02,lwd = 0.4,cex.axis=0.6,hadj = 2.1,
# at=c(0,round(log(3/2),2),round(log(4/2),2)),col.axis = '#b02c46',
# labels = c(2,3,4))
#}else if((type==1) & (round(log(0.5),2) > ybottom)){
# axis(4,las=1,pos = max(chromInfo2$abspos_kb*1000),tck = -.02,lwd = 0.4,cex.axis=0.6,hadj = 2.1,
# at=c(round(log(0.5),2),0,round(log(3/2),2),round(log(4/2),2)),col.axis = '#b02c46',
# labels = c(1,2,3,4))
#}else if((type==2) & (round(log(0.5)/2,2) < ybottom)){
# axis(4,las=1,pos = max(chromInfo2$abspos_kb*1000),tck = -.02,lwd = 0.4,cex.axis=0.6,hadj = 2.1,
# at=c(0,round(log(3/2)/2,2),round(log(4/2)/2,2)),col.axis = '#b02c46',
# labels = c(2,3,4))
#}else if((type==2) & (round(log(0.5)/2,2) > ybottom)){
# axis(4,las=1,pos = max(chromInfo2$abspos_kb*1000),tck = -.02,lwd = 0.4,cex.axis=0.6,hadj = 2.1,
# at=c(round(log(0.5)/2,2),0,round(log(3/2)/2,2),round(log(4/2)/2,2)),col.axis = '#b02c46',
# labels = c(1,2,3,4))
axis4 = c(round(log(0.5)/2,2),0,round(log(3/2)/2,2),round(log(4/2)/2,2))
names(axis4) = c(1,2,3,4)
axis(4,las=1,pos = max(chromInfo2$abspos_kb*1000),tck = -.02,lwd = 0.4,cex.axis=0.6,hadj = 2.1,
at=axis4[axis4>ybottom & ybottom < ytop],col.axis = '#b02c46',
labels = names(axis4[axis4>ybottom & ybottom < ytop]))
}else{
axis(2,at=c(ybottom+0.05,0,ytop-0.05),labels = c('Low',0,'High'),las=1,pos = 0,tck = -.00,lwd = 0.3,cex.axis=0.6,hadj = 0.1,padj = 0.5)
axis(4,las=1,pos = max(chromInfo2$abspos_kb*1000),tck = -.02,lwd = 0.3,cex.axis=0.6,hadj = 1.9,col.axis = '#b02c46',
at=c(round(log(0.5)/2,2),0,round(log(3/2)/2,2)),
labels = c(1,2,3))
}
#Plot background chromosome
xleft = c(0,chromInfo2[chromInfo2$arm == 'q' & chromInfo2$chrom!=22,]$abspos*1000)
xright = c(chromInfo2[chromInfo2$arm == 'q',]$abspos*1000)
text(x=1.2e9,y=ytext,paste0(celltype,'_',PDID,' (n=',ncells,')'),cex=text.cex[1],family = 'Helvetica',font=1,adj = 0)
col = replicate(c('white','#dbdbdb'),n = 22/2)
rect(xleft=xleft,
xright=xright,
ybottom=ybottom,
ytop=ytop,
col = col,
lty = 'blank')
#Black surrounding border
rect(xleft=min(xleft),
xright=max(xright),
ybottom=ybottom,
ytop=ytop,
col = colAlpha('white',0.0001),
border = 'black',lwd = 0.5)
#textSize = (xright-xleft)/xright[1]
#text(x=(xleft+xright)/2,y = 0.9,labels = c(1:22),cex = c(1.5*textSize),font = 2)
#text(x=(xleft+xright)/2,y = chrom.y,labels = c(1:22),cex = c(rep(0.7,10),rep(0.62,4),rep(0.5,4),rep(0.31,4)),font = 1)
# Plot ground truth
#segments(x0=min(xleft),x1 = max(xright),
# y0=0, y1=0,
# col = 'black')
rect(xleft = dna[dna$posType=='Start',]$abspos_kb*1000,
xright = dna[dna$posType=='Stop',]$abspos_kb*1000,
ybottom = 0,
ytop=dna[dna$posType=='Start',]$log_CNratio/2,
col=colAlpha('#b02c46',1),
border=colAlpha('#b02c46',1),lwd = 1.0)
# Plot subclone total CN
if(celltype == 'Tumour' & nrow(subCl.dna) > 0){
for(chr in unique(subCl.dna$Chr)){
if(unique(subCl.dna[subCl.dna$Chr == chr,]$log_CNratio == 0)){
lines(subCl.dna[subCl.dna$Chr == chr,]$abspos_kb*1000,
subCl.dna[subCl.dna$Chr == chr,]$log_CNratio/2,col='#4169E1',lwd=1.5)
}else{
rect(xleft = subCl.dna[subCl.dna$Chr == chr &subCl.dna$posType=='Start',]$abspos_kb*1000,
xright = subCl.dna[subCl.dna$Chr == chr &subCl.dna$posType=='Stop',]$abspos_kb*1000,
ybottom = 0,
ytop=subCl.dna[subCl.dna$Chr == chr & subCl.dna$posType=='Start',]$log_CNratio/2,
col=colAlpha('#4169E1',0.7),
border=colAlpha('#4169E1',0.7),lwd = 0.1)
}
}
}
# Plot CopyKat output
lines(x=tmp$abspos,tmp$mean_logCN,col='black',lwd=ifelse(sampleList == 'all',0.8,0.5))
segments(x0=min(xleft),x1 = max(xright),
y0=0.2, y1=0.2,
col = 'darkgrey',lty = 'dashed',lwd = 0.4)
segments(x0=min(xleft),x1 = max(xright),
y0=-0.2, y1=-0.2,
col = 'darkgrey',lty = 'dashed',lwd = 0.4)
}
}
if(sampleList != 'all'){
saveFig(file.path(resd,paste0('RCC_CK_',PDID)),plotFun,width = 2.6,height = 2.1,res=500)
}else{
saveFig(file.path(res,paste0('FigS1_CK_',PDID,'_v1e7')),plotFun,width = 2.3,height = 3.5,res=500)
}
}
}
# Figure 2Ba - Barplots of RCC CK classification ####
rccBarplot.CK = function(){
rcc.srat = readRDS('/lustre/scratch117/casm/team274/mt22/CN_methods/RCC_PCT_ann3sub.rds')
# Extract CK output
dd = rcc.srat@meta.data
dd$finalAnn = dd$finalAnn2
dd$finalAnn = factor(dd$finalAnn,levels = c('Leukocytes','PTC','Tumour'))
dd$CKpred.normREF.default.80perc.2397 = ifelse(is.na(dd$CKpred.normREF.default.80perc.2397),'Uncalled',
ifelse(dd$CKpred.normREF.default.80perc.2397 == 'aneuploid','Aneuploid','Diploid'))
dd = dd[dd$CKpred.normREF.default.80perc.2397 != 'Uncalled',]
#dd$CKpred.normREF.default.80perc.2397 = factor(dd$CKpred.normREF.default.80perc.2397,levels = c('Aneuploid','Diploid','Uncalled'))
dd$CKpred.normREF.default.80perc.2397 = factor(dd$CKpred.normREF.default.80perc.2397,levels = c('Aneuploid','Diploid'))
#Define the layout
plotFun = function(noFrame=FALSE,noPlot=FALSE){
layout(matrix(c(1,2,3,4),ncol=1),heights = c(0.2,1,1,1.8))
par(mar=c(0,0.6,1,0.6))
plot(0, 0,
las=1,
type='n',frame.plot = F,axes = F)
title('RCC',cex.main=1,family = 'Helvetica',font=2)
for(celltype in levels(dd$finalAnn)){
print(celltype)
par(mar=c(0.2,1.5,0.5,0.1),xpd=TRUE)
tmp=as.matrix(table(dd$CKpred.normREF.default.80perc.2397[dd$finalAnn == celltype],dd$PDID[dd$finalAnn == celltype]))
tmp = sweep(tmp,2,colSums(tmp),'/')
cols = c(Diploid = '#c8c8c8',
Aneuploid = '#b02c46',
Uncalled = '#474646')
if(celltype == levels(dd$finalAnn)[length(levels(dd$finalAnn))]){
par(mar=c(4.4,1.5,0.5,0.1),xpd=TRUE)
barplot(tmp,
col=cols[rownames(tmp)],
space=0.1,axes = FALSE,
las = 1,names.arg = rep(NA,ncol(tmp)),border = F)
#axis(2,at=c(1,0.5,0),las=1,pos = 0.1,tck=-0.02,cex.axis=0.8,lwd.ticks = 0,hadj = 0.5)
text(x = seq(0.6,4.5,by = 1.105),y = -0.1,colnames(tmp),cex=0.7,family = 'Helvetica',font=1,srt=90,adj = 1)
#text(x = 4.8,y = 0.5,celltype,cex=0.85,family = 'Helvetica',font=1,srt=270)
text(x =-1.3,y = 0.5,celltype,cex=0.7,family = 'Helvetica',font=1,srt=90)
}else{
barplot(tmp,
col=cols[rownames(tmp)],
space=0.1,axes = FALSE,names.arg = rep(' ',ncol(tmp)),
las = 1,main = '',border = F)
#axis(2,at=c(1,0.5,0),las=1,pos = 0.1,tck=-0.02,cex.axis =0.8,lwd.ticks = 0,hadj = 0.5,)
#text(x = 4.8,y = 0.5,celltype,cex=0.85,family = 'Helvetica',font=1,srt=270)
text(x =-1.3,y = 0.5,celltype,cex=0.7,family = 'Helvetica',font=1,srt=90)
}
}
}
saveFig(file.path(res,paste0('Fig2Ba_RCC_CK_barplot_noUncalled')),plotFun,width = 0.6,height = 2.75,rawData = dd,res=500)
}
# Figure 2Ea - Barplots of NB CK classification ####
nbBarplot.CK = function(){
nb.srat = readRDS('/lustre/scratch117/casm/team274/mt22/CN_methods/NB_ann.rds')
# Extract CK output
dd = nb.srat@meta.data
dd$finalAnn = factor(dd$finalAnn,levels = c('Leukocytes','Endothelium','Mesenchyme','Tumour'))
dd$PDID = factor(dd$PD_ID,levels = c("PD42184","PD42752-1","PD42752-2","PD46693","PD43255"))
dd$CKpred.normREF.default.80perc.2397 = ifelse(is.na(dd$CKpred.normREF.default.80perc.2397),'Uncalled',
ifelse(dd$CKpred.normREF.default.80perc.2397 == 'aneuploid','Aneuploid','Diploid'))
dd = dd[dd$CKpred.normREF.default.80perc.2397 != 'Uncalled',]
#dd$CKpred.normREF.default.80perc.2397 = factor(dd$CKpred.normREF.default.80perc.2397,levels = c('Aneuploid','Diploid','Uncalled'))
dd$CKpred.normREF.default.80perc.2397 = factor(dd$CKpred.normREF.default.80perc.2397,levels = c('Aneuploid','Diploid'))
#Define the layout
plotFun = function(noFrame=FALSE,noPlot=FALSE){
layout(matrix(c(1,2,3,4,5),ncol=1),heights = c(0.2,1,1,1,2))
par(mar=c(0,0.6,0.8,0.6))
plot(0,0,
las=1,
type='n',frame.plot = F,axes = F)
title('Neuroblastoma',cex.main=1,family = 'Helvetica',font=2)
for(celltype in levels(dd$finalAnn)){
print(celltype)
par(mar=c(0.05,1.5,0.2,0.1),xpd=TRUE)
#Define the empty plot area
#plot(0,0,las=1,
# type='n',
# xlab='',xaxt='n',
# ylab='',yaxt='n',frame.plot=T)
tmp=as.matrix(table(dd$CKpred.normREF.default.80perc.2397[dd$finalAnn == celltype],dd$PDID[dd$finalAnn == celltype]))
tmp = sweep(tmp,2,colSums(tmp),'/')
cols = c(Diploid = '#c8c8c8',
Aneuploid = '#b02c46',
Uncalled = '#474646')
if(celltype == levels(dd$finalAnn)[length(levels(dd$finalAnn))]){
par(mar=c(4.9,1.5,0.2,0.1),xpd=TRUE)
barplot(tmp,
col=cols[rownames(tmp)],
space=0.1,axes = FALSE,
las = 1,names.arg = rep(NA,ncol(tmp)),border = F)
#axis(2,at=c(1,0.5,0),las=1,pos = 0.1,tck=-0.02,cex.axis=0.75,lwd.ticks = 0,hadj = 0.5)
#text(x = seq(0.5,6,by = 1.2),y = -0.1,colnames(tmp),cex=0.8,family = 'Helvetica',font=1,srt=45,adj = 1)
#text(x = 5.8,y = 0.5,celltype,cex=0.65,family = 'Helvetica',font=1,srt=270)
#axis(2,at=c(1,0.5,0),las=1,pos = 0.1,tck=-0.02,cex.axis=0.8,lwd.ticks = 0,hadj = 0.5)
#text(x = seq(0.5,6,by = 1.2),y = -0.1,colnames(tmp),cex=0.9,family = 'Helvetica',font=1,srt=90,adj = 0.95)
text(x = seq(0.65,6,by = 1.105),y = -0.1,colnames(tmp),cex=0.7,family = 'Helvetica',font=1,srt=90,adj = 0.95)
text(x = -1.3,y = 0.5,celltype,cex=0.7,family = 'Helvetica',font=1,srt=90)
}else{
barplot(tmp,
col=cols[rownames(tmp)],
space=0.1,axes = FALSE,names.arg = rep(' ',ncol(tmp)),
las = 1,main = '',border = F)
#axis(2,at=c(1,0.5,0),las=1,pos = 0.1,tck=-0.02,cex.axis =0.75,lwd.ticks = 0,hadj = 0.5,)
text(x =-1.3,y = 0.5,celltype,cex=0.7,family = 'Helvetica',font=1,srt=90)
}
}
}
#saveFig(file.path(resDir,paste0('TEST')),plotFun,width = 1.7,height = 3.2,rawData = dd)
saveFig(file.path(res,paste0('NB_CK_barplot_noUncalled')),plotFun,width = 0.7,height = 3.0,rawData = dd,res=500)
}
# Figure 2Eb - Barplots of NB AI classification ####
nbBarplot.AI = function(){
nb.srat = readRDS('/lustre/scratch117/casm/team274/mt22/CN_methods/NB_ann.rds')
# Extract CK output
dd = nb.srat@meta.data
dd$finalAnn = factor(dd$finalAnn,levels = c('Leukocytes','Endothelium','Mesenchyme','Tumour'))
dd$PDID = factor(dd$PD_ID,levels = c("PD42752-1","PD42752-2","PD46693","PD43255","PD42184"))
dd$AI_sc_call = ifelse((is.na(dd$AI_sc_call)|dd$AI_sc_call == 'Uncalled'),'Uncalled',
ifelse(dd$AI_sc_call == 'abbFrac','Tumour','Normal'))
dd = dd[dd$AI_sc_call != 'Uncalled',]
#dd$AI_sc_call = factor(dd$AI_sc_call,levels = c('Tumour','Normal','Uncalled'))
dd$AI_sc_call = factor(dd$AI_sc_call,levels = c('Tumour','Normal'))
#Define the layout
plotFun = function(noFrame=FALSE,noPlot=FALSE){
layout(matrix(c(1,2,3,4,5),ncol=1),heights = c(0.2,1,1,1,2.2))
par(mar=c(0,0.6,0.8,0.6))
plot(0,0,
las=1,
type='n',frame.plot = F,axes = F)
title('Neuroblastoma',cex.main=1,family = 'Helvetica',font=2)
for(celltype in levels(dd$finalAnn)){
par(mar=c(0.05,1.5,0.2,0.1),xpd=TRUE)
tmp=as.matrix(table(dd$AI_sc_call[dd$finalAnn == celltype],dd$PDID[dd$finalAnn == celltype]))
tmp = sweep(tmp,2,colSums(tmp),'/')
cols = c(Normal = '#c8c8c8',
Tumour = '#b02c46',
Uncalled = '#474646')
if(celltype == levels(dd$finalAnn)[length(levels(dd$finalAnn))]){
par(mar=c(4.9,1.5,0.2,0.1),xpd=TRUE)
barplot(tmp,
col=cols[rownames(tmp)],
space=0.1,axes = FALSE,
las = 1,names.arg = rep(NA,ncol(tmp)),border = F)
text(x = seq(0.65,6,by = 1.105),y = -0.1,colnames(tmp),cex=0.7,family = 'Helvetica',font=1,srt=90,adj = 0.95)
text(x = -1.3,y = 0.5,celltype,cex=0.7,family = 'Helvetica',font=1,srt=90)
}else{
barplot(tmp,
col=cols[rownames(tmp)],
space=0.1,axes = FALSE,names.arg = rep(' ',ncol(tmp)),
las = 1,main = '',border = F)
text(x =-1.3,y = 0.5,celltype,cex=0.7,family = 'Helvetica',font=1,srt=90)
}
}
}
saveFig(file.path(res,paste0('Fig2Eb_NB_AI_barplot_noUncalled')),plotFun,width = 0.7,height = 3.0,rawData = dd,res=500)
}
# Figure 2F - Example of CopyKat output for normal and tumour NB cells ####
sampleList = c('PD42184','PD42752-2')
# Figure S1 NB - CopyKat output for normal and tumour NB cells ####
subCl.minSegLen = 2e7
nbCK = function(){
nb.srat = readRDS('/lustre/scratch117/casm/team274/mt22/CN_methods/NB_ann.rds')
nb.srat$PDID = as.character(nb.srat$PD_ID)
# Import chromInfo
chromInfo = read.delim('/lustre/scratch117/casm/team274/mt22/chrom_abspos_kb.txt',sep = '\t')
# Import Manifest
projMani = read_excel("/lustre/scratch117/casm/team274/mt22/projectManifest.xlsx",sheet = "NB_mani")
# CopyKat results
copykat.results = readRDS('/lustre/scratch117/casm/team274/mt22/CN_methods/CopyKAT_output/v5_nb.CK.normREF.default.2397/v5_CKresults_default_normREF_80perc_2397.rds')
#----- Processing CopyKat results -------#
for(i in 1:length(copykat.results)){
# Get copyKat CNA matrix and prediction
CNA_summary_byCellType = data.frame()
CNA_mat = copykat.results[[i]]$CNAmat
colnames(CNA_mat) = gsub('^X','',colnames(CNA_mat))
colnames(CNA_mat) = gsub('\\.','-',colnames(CNA_mat))
pred = as.data.frame(copykat.results[[i]]$prediction)
sample = unique(nb.srat@meta.data[rownames(nb.srat@meta.data) %in% rownames(pred),]$PDID)
PDID=sample
if(length(sample) > 1){
message(paste0('More than 1 sample detected: i=',i,', samples are ',sample))
}else if(length(sample) <1){
next
}else if(length(sample)==1){
message(paste0('Checking sample ',sample))
}
# subset annnb.srat object to keep only cells of that sample
srat = subset(nb.srat, subset = PDID == sample)
# subset by annotated cell type
for(celltype in unique(srat$finalAnn)){
CNA_mat_sub = CNA_mat[,c(1:3,which(colnames(CNA_mat) %in% rownames(srat@meta.data[srat@meta.data$finalAnn == celltype,])))]
if(ncol(CNA_mat_sub) == 4){
chrom_tmp=data.frame(celltype = celltype,CNA_mat_sub)
colnames(chrom_tmp)[5] = 'mean_logCN'
}else if (ncol(CNA_mat_sub) > 4){
chrom_tmp = data.frame(celltype = celltype,CNA_mat_sub[,c(1:3)],mean_logCN = apply(CNA_mat_sub[,-c(1:3)],MARGIN = 1,FUN = mean))
}else if (ncol(CNA_mat_sub) < 4){
chrom_tmp = data.frame(celltype = celltype,CNA_mat_sub[,c(1:3)],mean_logCN = NA)
}
CNA_summary_byCellType = rbind(CNA_summary_byCellType,chrom_tmp)
}
# Remove X chromosome
CNA_summary_byCellType = CNA_summary_byCellType[CNA_summary_byCellType$chrom != 23,]
CNA_summary_byCellType$celltype = factor(CNA_summary_byCellType$celltype,levels=c("Tumour","Mesenchyme", "Endothelium",'Leukocytes'))
####------------------ Generate Battenberg CN summary file ----------------####
donorMani = projMani[projMani$PDID == sample,]
btb.fp = unique(donorMani$battenbergFp)
#----- Processing Battenberg data -------#
dna.data = annotateBTB(btb.fp,subCl.minSegLen = subCl.minSegLen,PDID,tgtChrs=c(1:22),removeBalancedSegs=F,longFormat = T,method = 'totalCN')
# Remove X chromosome
dna.data = dna.data[dna.data$Chr != 23,]
dna.data$celltype = 'Tumour'
tmp = rbind(data.frame(Idx=0,Chr=1,matNum=1,patNum=1,frac=1,segLen=0,type='maj',tumTot=2,tot2min='2:1',tumFrac=0.5,newIdx=0,posID=0,posType='Start',pos=1,idx=0,abspos_kb=0,celltype='Endothelium'),
data.frame(Idx=0,Chr=1,matNum=1,patNum=1,frac=1,segLen=0,type='maj',tumTot=2,tot2min='2:1',tumFrac=0.5,newIdx=0,posID=0,posType='Stop',pos=2874771,idx=0,abspos_kb=2874771,celltype='Endothelium'),
data.frame(Idx=0,Chr=1,matNum=1,patNum=1,frac=1,segLen=0,type='maj',tumTot=2,tot2min='2:1',tumFrac=0.5,newIdx=0,posID=0,posType='Start',pos=1,idx=0,abspos_kb=0,celltype='Leukocytes'),
data.frame(Idx=0,Chr=1,matNum=1,patNum=1,frac=1,segLen=0,type='maj',tumTot=2,tot2min='2:1',tumFrac=0.5,newIdx=0,posID=0,posType='Stop',pos=2874771,idx=0,abspos_kb=2874771,celltype='Leukocytes'),
data.frame(Idx=0,Chr=1,matNum=1,patNum=1,frac=1,segLen=0,type='maj',tumTot=2,tot2min='2:1',tumFrac=0.5,newIdx=0,posID=0,posType='Start',pos=1,idx=0,abspos_kb=0,celltype='Mesenchyme'),
data.frame(Idx=0,Chr=1,matNum=1,patNum=1,frac=1,segLen=0,type='maj',tumTot=2,tot2min='2:1',tumFrac=0.5,newIdx=0,posID=0,posType='Stop',pos=2874771,idx=0,abspos_kb=2874771,celltype='Mesenchyme'))
dna.data = rbind(dna.data,tmp)
dna.data$log_CNratio = log(as.numeric(dna.data$tumTot)/2)
subCl.dna = dna.data[dna.data$type == 'sub',]
majCl.dna = dna.data[dna.data$type == 'maj',]
#----- Plotting copykat results! -------#
chromInfo2 = chromInfo[chromInfo$chrom != 23,]
plotFun = function(noFrame=TRUE,noPlot=FALSE){
noFrame=T
# Set layout
layout(mat=matrix(c(1:nlevels(CNA_summary_byCellType$celltype)),ncol=1),
heights = rep(2,nlevels(CNA_summary_byCellType$celltype)))
for(celltype in levels(CNA_summary_byCellType$celltype)){
par(mar=c(0.2,0.6,0.8,0.6),xpd=TRUE)
tmp = CNA_summary_byCellType[CNA_summary_byCellType$celltype == celltype,]
dna = majCl.dna[majCl.dna$celltype == celltype,]
ncells = nrow(nb.srat@meta.data[nb.srat@meta.data$PDID == PDID & nb.srat@meta.data$finalAnn == celltype,])
# Set params for plotting
ylim = c(round(min(CNA_summary_byCellType[!is.na(CNA_summary_byCellType$mean_logCN),]$mean_logCN)-0.1,digits = 1),round(max(CNA_summary_byCellType[!is.na(CNA_summary_byCellType$mean_logCN),]$mean_logCN)+0.2,digits = 1))
ybottom=min(round(min(CNA_summary_byCellType[!is.na(CNA_summary_byCellType$mean_logCN),]$mean_logCN)-0.1,digits = 1),round(log(0.5)/2,2))
ytop=max(round(max(CNA_summary_byCellType[!is.na(CNA_summary_byCellType$mean_logCN),]$mean_logCN)+0.1,digits = 1),round(max(dna.data$log_CNratio)/2,2))
ytext = ytop + 0.1
# Plot main frame
plot(CNA_summary_byCellType$abspos, CNA_summary_byCellType$mean_logCN,
las=1,
type='n',
ylim=ylim,
xlab=ifelse(noFrame,'','Genomic Position'),
ylab=ifelse(noFrame,'',''),
xaxt=ifelse(noFrame,'n','s'),
yaxt=ifelse(noFrame,'n','s'),
frame.plot=F)
#Plot background chromosome
xleft = c(0,chromInfo2[chromInfo2$arm == 'q' & chromInfo2$chrom!=22,]$abspos*1000)
xright = c(chromInfo2[chromInfo2$arm == 'q',]$abspos*1000)
text(x=9e8,y=ytext,paste0(celltype,'_',PDID,' (n=',ncells,')'),cex=0.6,family = 'Helvetica',font=2,adj = 0)
axis(2,at=c(ybottom+0.05,0,ytop-0.05),labels = c('Low',0,'High'),las=1,pos = 0,tck = -.00,lwd = 0.3,cex.axis=0.6,hadj = 0.3,padj = 0.5)
axis(4,las=1,pos = max(chromInfo2$abspos_kb*1000),tck = -.02,lwd = 0.7,cex.axis=0.6,hadj = 1.5,col='black',
at=c(round(log(0.5)/2,2),0,round(log(3/2)/2,2),round(log(4/2)/2,2)),col.axis = '#b02c46',
labels = c(1,2,3,4))
col = replicate(c('white','lightgrey'),n = 22/2)
rect(xleft=xleft,
xright=xright,
ybottom=ybottom,
ytop=ytop,
col = col,
lty = 'blank')
#Black surrounding border
rect(xleft=min(xleft),
xright=max(xright),
ybottom=ybottom,
ytop=ytop,
col = colAlpha('white',0.0001),
border = 'black',lwd = 0.4)
#text(x=(xleft+xright)/2,y = 0.72,labels = c(1:22),cex = c(rep(0.7,10),rep(0.62,4),rep(0.53,4),rep(0.31,4)),font = 1)
# Plot ground truth
rect(xleft = dna[dna$posType=='Start',]$abspos_kb*1000,
xright = dna[dna$posType=='Stop',]$abspos_kb*1000,
ybottom = 0,
ytop=dna[dna$posType=='Start',]$log_CNratio/2,
col=colAlpha('#b02c46',1),
border=colAlpha('#b02c46',1),lwd = 0.8)
# Plot subclone total CN
if(celltype == 'Tumour' & nrow(subCl.dna) > 0){
for(chr in unique(subCl.dna$Chr)){
for(logR in unique(subCl.dna[subCl.dna$Chr == chr,]$log_CNratio)){
d = subCl.dna[subCl.dna$Chr == chr & subCl.dna$log_CNratio == logR,]
if(logR == 0){
lines(d$abspos_kb*1000,d$log_CNratio/2,col='#4169E1',lwd=1.0)
}else{
rect(xleft = d[d$posType=='Start',]$abspos_kb*1000,
xright = d[d$posType=='Stop',]$abspos_kb*1000,
ybottom = 0,
ytop=d[d$posType=='Start',]$log_CNratio/2,
col=colAlpha('#4169E1',0.7),
border=colAlpha('#4169E1',0.7),lwd = 0)
}
}
}
}
# Plot CopyKat output
lines(x=tmp$abspos,tmp$mean_logCN,col='black',lwd=0.7)
segments(x0=min(xleft),x1 = max(xright),
y0=0.2, y1=0.2,
col = 'darkgrey',lty = 'dashed',lwd = 0.4)
segments(x0=min(xleft),x1 = max(xright),
y0=-0.2, y1=-0.2,
col = colAlpha('darkgrey',0.9),lty = 'dashed',lwd = 0.4)
}
}
saveFig(file.path(res,paste0('FigS2_NB_CK_',PDID,'_',subCl.minSegLen)),plotFun,width = 2.0,height = 3.2,res=500)
}
}
# Figure 2C and S2 - Example of AlleleIntegrator output for normal and tumour RCC cells ####
subCl.minSegLen = 2e7
cov=500
tumour = 'NB'
sampleList = 'all'
plotMAF = function(tumour,sampleList='all'){
if(tumour == 'RCC'){
sheet = 'RCC_mani'
}else if(tumour == 'NB'){
sheet = 'NB_mani'
}
# Import Manifest
projMani = read_excel("/lustre/scratch117/casm/team274/mt22/projectManifest.xlsx",sheet = sheet)
if(sheet == 'RCC_mani'){
projMani = projMani[!grepl('^n_',projMani$SampleID),]
}
outdir = '/lustre/scratch117/casm/team274/mt22/CN_methods/alleleIntegrator_output/'
#PDID = unique(projMani$PDID)[1]
for(PDID in unique(projMani$PDID)){
donorMani = projMani[projMani$PDID == PDID,]
####------------------ Get MAF output ----------------####
if(sheet == 'RCC_mani'){
f = list.files(outdir,pattern = paste0(PDID,"_gCnts_allhSNPs.RDS"))
}else if(sheet == 'NB_mani'){
f = list.files(outdir,pattern = paste0(PDID,"tmp_gCnts_allhSNPs.RDS"))
}
gCnts = readRDS(file.path(outdir,f))
ctmp = aggregateByLists(gCnts, assays = c("matCount", "patCount"), gCnts$clusterID)
colnames(ctmp) = gsub("^cellID$", "clusterID", colnames(ctmp))
ctmp$totCount = ctmp$patCount + ctmp$matCount
ctmp$MAF = ctmp$matCount/ctmp$totCount
ctmp$chr = sapply(strsplit(ctmp$regionID,split = ':'),'[',1)
ctmp$chr = gsub('X',23,ctmp$chr)
ctmp$chr = as.numeric(as.character(ctmp$chr))
ctmp$pos = sapply(strsplit(ctmp$regionID,split = ':'),'[',2)
ctmp$pos = sapply(strsplit(ctmp$pos,split = '_'),'[',1)
ctmp$pos = as.numeric(ctmp$pos)
# Get absolute genomic position
ctmp$abspos_kb = ctmp$pos/1000 # if chromosome 1, abspos = pos
for(r in 1:nrow(ctmp)){
chrom = as.numeric(ctmp$chr[r])
if (chrom > 1){
ctmp$abspos_kb[r] = ctmp$abspos_kb[r] + (chromInfo[(chromInfo$chrom == (chrom-1)) & (chromInfo$arm == 'q'),]$abspos_kb)
}
}
ctmp = ctmp[order(c(ctmp$chr,ctmp$abspos_kb),decreasing = F),]
ctmp$regionID = paste0(ctmp$clusterID,'_',ctmp$regionID)
#### Aggregating by read coverage
out = ctmp %>% arrange(clusterID,abspos_kb) %>% group_by(clusterID) %>% summarise(totCnt.cumsum = cumsum(totCount),regionID=regionID)
m=match(out$regionID,ctmp$regionID)
sum(is.na(m))
out = cbind(out[,-c(1,3)],ctmp[m,])
out$readCovBin = floor(out$totCnt.cumsum / cov) + 1
out2 = out %>% arrange(clusterID,readCovBin,abspos_kb) %>%
group_by(clusterID,readCovBin) %>%
mutate(patCnt.cumsum = cumsum(patCount),matCnt.cumsum = cumsum(matCount)) %>%
filter(totCnt.cumsum == max(totCnt.cumsum))
startPos = out %>% arrange(clusterID,readCovBin,abspos_kb) %>%
group_by(clusterID,readCovBin) %>%
filter(totCnt.cumsum == min(totCnt.cumsum))
startPos$start = startPos$abspos_kb
out2 = merge(out2,startPos[,c(2,11,12)],by=c('clusterID','readCovBin'))
out2$midPos = (out2$abspos_kb + out2$start)/2
out2$MAF.readCovBin = out2$matCnt.cumsum / (out2$matCnt.cumsum+out2$patCnt.cumsum)
out2 = arrange(out2,clusterID,readCovBin)
#----- Plotting AlleleIntegrator MAF results! -------#
out2 = out2[out2$chr != 23,]
chromInfo2 = chromInfo[chromInfo$chrom != 23,]
if(sheet == 'RCC_mani'){
out2$clusterID[out2$clusterID == 'Renal_cell_carcinoma'] = 'Tumour'
out2$clusterID[out2$clusterID == 'Proximal_tubuluar_cells'] = 'PTC'
out2$clusterID = factor(out2$clusterID,levels = c('Tumour','PTC','Leukocytes'))
if(sampleList != 'all'){
out2=out2[out2$clusterID != 'Leukocytes',]
out2$clusterID = factor(out2$clusterID,levels = c('Tumour','PTC'))
}
}else if(sheet == 'NB_mani'){
out2$clusterID = factor(out2$clusterID,levels = c('Tumour','Mesenchyme','Endothelium','Leukocytes'))
}
####------------------ Generate Battenberg CN summary file ----------------####
btb.fp = unique(donorMani$battenbergFp)[!is.na(unique(donorMani$battenbergFp))]
#----- Processing Battenberg data -------#
dna.data = annotateBTB(btb.fp,subCl.minSegLen = subCl.minSegLen,PDID,tgtChrs=c(1:22),removeBalancedSegs=F,longFormat = T,method = 'allelicRatio')
# Remove X chromosome
dna.data = dna.data[dna.data$Chr != 23,]
if(sheet=='NB_mani'){
dna.data$clusterID = 'Tumour'
tmp = rbind(data.frame(Idx=0,Chr=1,matNum=1,patNum=1,frac=1,segLen=0,type='maj',tumTot=2,tot2min='2:1',tumFrac=0.5,newIdx=0,posID=0,posType='Start',pos=1,idx=0,abspos_kb=0,clusterID='Endothelium'),
data.frame(Idx=0,Chr=1,matNum=1,patNum=1,frac=1,segLen=0,type='maj',tumTot=2,tot2min='2:1',tumFrac=0.5,newIdx=0,posID=0,posType='Stop',pos=2874771,idx=0,abspos_kb=2874771,clusterID='Endothelium'),
data.frame(Idx=0,Chr=1,matNum=1,patNum=1,frac=1,segLen=0,type='maj',tumTot=2,tot2min='2:1',tumFrac=0.5,newIdx=0,posID=0,posType='Start',pos=1,idx=0,abspos_kb=0,clusterID='Leukocytes'),
data.frame(Idx=0,Chr=1,matNum=1,patNum=1,frac=1,segLen=0,type='maj',tumTot=2,tot2min='2:1',tumFrac=0.5,newIdx=0,posID=0,posType='Stop',pos=2874771,idx=0,abspos_kb=2874771,clusterID='Leukocytes'),
data.frame(Idx=0,Chr=1,matNum=1,patNum=1,frac=1,segLen=0,type='maj',tumTot=2,tot2min='2:1',tumFrac=0.5,newIdx=0,posID=0,posType='Start',pos=1,idx=0,abspos_kb=0,clusterID='Mesenchyme'),
data.frame(Idx=0,Chr=1,matNum=1,patNum=1,frac=1,segLen=0,type='maj',tumTot=2,tot2min='2:1',tumFrac=0.5,newIdx=0,posID=0,posType='Stop',pos=2874771,idx=0,abspos_kb=2874771,clusterID='Mesenchyme'))
}else if(sheet=='RCC_mani'){
dna.data$clusterID = 'Tumour'
tmp = rbind(data.frame(Idx=0,Chr=1,matNum=1,patNum=1,frac=1,segLen=0,type='maj',tumTot=2,tot2min='2:1',tumFrac=0.5,newIdx=0,posID=0,posType='Start',pos=1,idx=0,abspos_kb=0,clusterID='PTC'),
data.frame(Idx=0,Chr=1,matNum=1,patNum=1,frac=1,segLen=0,type='maj',tumTot=2,tot2min='2:1',tumFrac=0.5,newIdx=0,posID=0,posType='Stop',pos=2874771,idx=0,abspos_kb=2874771,clusterID='PTC'),
data.frame(Idx=0,Chr=1,matNum=1,patNum=1,frac=1,segLen=0,type='maj',tumTot=2,tot2min='2:1',tumFrac=0.5,newIdx=0,posID=0,posType='Start',pos=1,idx=0,abspos_kb=0,clusterID='Leukocytes'),
data.frame(Idx=0,Chr=1,matNum=1,patNum=1,frac=1,segLen=0,type='maj',tumTot=2,tot2min='2:1',tumFrac=0.5,newIdx=0,posID=0,posType='Stop',pos=2874771,idx=0,abspos_kb=2874771,clusterID='Leukocytes'))
}
dna.data = rbind(dna.data,tmp)
subCl.dna = dna.data[dna.data$type == 'sub',]
majCl.dna = dna.data[dna.data$type == 'maj',]
plotFun = function(noFrame=T,noPlot=FALSE){
layout(mat=matrix(c(1:nlevels(CNA_summary_byCellType$celltype)),ncol=1),
heights = rep(2,nlevels(CNA_summary_byCellType$celltype)))
# Set plotting params
params = data.frame(sheet = c('NB_mani','RCC_mani','NB_mani','RCC_mani'),
sampleList=c('all','all','PD42184','PD37228'),
mar = c('0.1,0.6,0.8,0.6','0.1,0.6,0.8,0.6','0.5,0.8,0.2,0.6','0.4,0.8,0.2,0.6'),
text.cex2=c(0.7,0.7,0.7,0.7),
text.cex1=c(0.7,0.7,0.7,0.7),
text.y = c(1.37,1.37,1.32,1.32))
for(celltype in levels(out2$clusterID)){
par(mar=as.vector(as.numeric(strsplit(params[params$sheet == sheet & params$sampleList == sampleList,]$mar,split = ',')[[1]])),xpd=TRUE)
tmp = out2[out2$clusterID == celltype,]
dna = majCl.dna[majCl.dna$clusterID == celltype,]
dna = dna[order(dna$abspos_kb,decreasing = F),]
if(sheet == 'RCC_mani'){
ncells = nrow(rcc.srat@meta.data[rcc.srat@meta.data$PDID == PDID & rcc.srat@meta.data$finalAnn == celltype,])
}else if (sheet == 'NB_mani'){
ncells = nrow(nb.srat@meta.data[nb.srat@meta.data$PD_ID == PDID & nb.srat@meta.data$finalAnn == celltype,])
}
# Plot main frame
plot(out2$midPos*1000, out2$MAF.readCovBin,
las=1,
type='n',xaxt='n',yaxt='n',
ylim=c(-0.1,1.3),
frame.plot=F)
text(x=1e3,y=ifelse('all'%in% sampleList,1.37,1.32),paste0(celltype,'_',PDID),cex=params[params$sheet == sheet & params$sampleList == sampleList,]$text.cex1,family = 'Helvetica',font=2,adj = 0)
text(x=2.86e9,y=ifelse('all'%in% sampleList,1.37,1.32),paste0('n=',ncells),cex=params[params$sheet == sheet & params$sampleList == sampleList,]$text.cex2,family = 'Helvetica',font=1,adj = 1)
axis(2,at=c(0,0.5,1),labels = c(0,'1/2',1),las=1,pos = 0,tck = -.02,lwd = 0.3,cex.axis=0.7,hadj = 0.2,padj = 0.5)
axis(4,at=c(0,0.5,1),labels = c(0,'1/2',1),las=1,pos = max(chromInfo2$abspos_kb*1000),tck = -.02,lwd = 0.3,cex.axis=0.7,hadj = 1.0,padj = 0.5,col.axis='#b02c46')
#Plot background chromosome
xleft = c(0,chromInfo2[chromInfo2$arm == 'q' & chromInfo2$chrom!=22,]$abspos_kb*1000)
xright = c(chromInfo2[chromInfo2$arm == 'q',]$abspos_kb*1000)
col = replicate(c('white','lightgrey'),n = 22/2)
rect(xleft=xleft,
xright=xright,
ybottom=-0.1,
ytop=1.18,
col = col,
lty = 'blank')
#Black surrounding border
rect(xleft=min(xleft),
xright=max(xright),
ybottom=-0.1,
ytop=1.18,
col = colAlpha('white',0.0001),
border = 'black',lwd = 0.4)
# Plot chromosome number
#text(x=(xleft+xright)/2,y = 1.095,labels = c(1:22),cex = c(rep(0.7,10),rep(0.62,4),rep(0.53,4),rep(0.31,4)),font = 1)
# Plot AI MAF
a = ifelse(celltype %in% c('Tumour','Leukocyte'),0.7,0.85)
points(tmp$midPos*1000,tmp$MAF.readCovBin,
pch=19,
cex=0.03,col=colAlpha('black',a))
# Plot ground truth
# Subclone
if(celltype == 'Tumour' & (nrow(subCl.dna) > 0)){
for(chr in unique(subCl.dna$Chr)){
for(Idx in unique(subCl.dna[subCl.dna$Chr == chr,]$Idx)){
lines(x=subCl.dna[subCl.dna$Chr == chr & subCl.dna$Idx == Idx,]$abspos_kb*1000,subCl.dna[subCl.dna$Chr == chr & subCl.dna$Idx == Idx,]$tumFrac,col='#4169E1',lwd=1.1)
}
}
}
# Major clone CN profile
lines(x=dna$abspos_kb*1000,dna$tumFrac,col='#b02c46',lwd=1.0)
}
}
if(sampleList != 'all' & sheet == 'RCC_mani'){
saveFig(file.path(res,paste0('v2_RCC_AI_',PDID)),plotFun,width = 2.65,height = 2.15,res=500)
}else if(sampleList == 'all' & sheet == 'RCC_mani'){
saveFig(file.path(res,paste0('FigS2_AI_',PDID,'_',subCl.minSegLen)),plotFun,width = 2.3,height = 3.5,res=500)
}else if(sampleList != 'all' & sheet == 'NB_mani'){
saveFig(file.path(res,paste0('v2_NB_AI_',PDID)),plotFun,width = 2.5,height = 2.265,res=500)
}else if(sampleList == 'all' & sheet == 'NB_mani'){
saveFig(file.path(res,paste0('FigS2_AI_',PDID,'_',subCl.minSegLen)),plotFun,width = 2.0,height = 3.2,res=500)
}
}
}
# GOSH25 - bulkDNA BAF plot ####
# Plot Tumour bulkDNA BAF ####
setwd('~/lustre_mt22/CN_methods/')
#############
# Libraries #
#############
library(alleleIntegrator)
#########################
# Set Global parameters #
#########################
tgtChrs=c(1:22)
minSegLen=1e6
subCl.minSegLen=2e7
skipIfExists = T
# Import Manifest
projMani = read_excel("../projectManifest.xlsx",sheet = "alleleIntegrator")
mainDir = '~/lustre_mt22/CN_methods/revision_2204'
refGenome = '/lustre/scratch119/realdata/mdt1/team78pipelines/reference/Human/GRCH37d5/genome.fa'
nParallel=25
#----------- Run AlleleIntegrator on NB dataset (5 samples)
#-----------------------------------------------------------
for(tumourType in unique(projMani$TumourType)){
if(tumourType %in% c('Ewings','Wilms','ATRT')){
for(PDID in unique(projMani$PDID[projMani$TumourType == tumourType])){
message(sprintf('Generating BAF plot from bulkDNA for Sample %s - tumourType: %s',PDID,tumourType))
# Set output directory
outDir = file.path(mainDir,'alleleIntegrator_output',tumourType,PDID)
if(!file.exists(outDir)){
message(sprintf('[%s]: Cannot find output dir - Please check!...'))
next()
}
# Set Sample specific params
donorMani = projMani[projMani$PDID == PDID,]
tumourDNA = unique(donorMani$tumourDNA[!is.na(donorMani$tumourDNA)])
patientDNA = unique(donorMani$patientDNA[!is.na(donorMani$patientDNA)])
######################
# Call heterozygous SNPs
hSNPs = findHetSNPs(patientDNA,refGenome,file.path(outDir,paste0(PDID,'_patient_hetSNPs.vcf')),nParallel=nParallel)
#Expectation is that we'll find ~ 3 million of them
message(sprintf("Found %s heterozygous SNPs",prettyNum(length(hSNPs),big.mark=',')))
baf.out = generateCoverageAndBAF(BAM = tumourDNA,refGenome = refGenome,hSNPs=hSNPs,
outPath = file.path(outDir,paste0(PDID,'_cov_BAF.RDS')),nParallel=nParallel)
plotFun = function(noFrame=T,noPlot=FALSE,minCoverage=10){
#Filter to just the ones that we trust
filt = baf.out[baf.out$coverage>=minCoverage,]
#Work out the chromosome boundaries
chrsToPlot=c(1:22)
chrs = chrsToPlot
chrLens = seqlengths(filt)
tmp = sapply(split(start(filt),as.character(seqnames(filt))),max)
chrLens[is.na(chrLens)] = tmp[names(chrLens)[is.na(chrLens)]]
chrLens = as.numeric(chrLens[chrs])
x = start(filt) +cumsum(c(0,chrLens))[match(as.character(seqnames(filt)),chrs)]
# Subset randomly 50% of the points
set.seed(2397)
idx = sample(1:nrow(mcols(filt)), nrow(mcols(filt))/2, replace=FALSE)
filt.sub = filt[idx,]
# BAF plot
par(mfrow=c(1,1),mar=c(2.1,4.1,1.1,1.1))
alpha = max(0.002,min(1,1e5/length(filt.sub)))
plot(x[idx],filt.sub$BAF,
col=rgb(0,0,0,alpha=alpha/2),
cex=0.01,
las=2,
xaxt='n',
yaxt='n',
xlab='',
ylab='BAF',
xaxs='i',
yaxs='i',
ylim=c(0,1),
xlim=c(1,sum(chrLens)))
#axis(side=1, at=cumsum(c(0,chrLens[-length(chrLens)]))+chrLens/2, labels = chrs)
axis(side=2, at=c(0,0.5,1),labels=c(0,0.5,1),las=1)
abline(v=cumsum(chrLens),col='lightgrey')
}
saveFig(file.path(res,paste0('BAF_',PDID,'_',tumourType)),plotFun,width = 5.8,height = 2.2,res=1000)
}
}
}
baf.out = generateCoverageAndBAF(BAM = tumourDNA,refGenome = refGenome,hSNPs=hSNPs,outPath = paste0('/lustre/scratch117/casm/team274/mt22/CN_methods/',PD46693,'_cov_BAF.RDS',nParallel=24)
plotFun = function(noFrame=T,noPlot=FALSE,minCoverage=10){
#Work out the chromosome boundaries
chrsToPlot=c(1:22)
chrs = chrsToPlot
chrLens = seqlengths(filt)
tmp = sapply(split(start(filt),as.character(seqnames(filt))),max)
chrLens[is.na(chrLens)] = tmp[names(chrLens)[is.na(chrLens)]]
chrLens = as.numeric(chrLens[chrs])
#Filter to just the ones that we trust
filt = hSNPs[hSNPs$coverage>=minCoverage,]
x = start(filt) +cumsum(c(0,chrLens))[match(as.character(seqnames(filt)),chrs)]
# Subset randomly 50% of the points
set.seed(2397)
idx = sample(1:nrow(mcols(filt)), nrow(mcols(filt))/2, replace=FALSE)
filt.sub = filt[idx,]
# BAF plot
par(mfrow=c(1,1),mar=c(2.1,4.1,1.1,1.1))
alpha = max(0.002,min(1,1e5/length(filt.sub)))
plot(x[idx],filt.sub$BAF,
col=rgb(0,0,0,alpha=alpha/2),
cex=0.01,
las=2,
xaxt='n',
yaxt='n',
xlab='',
ylab='BAF',
xaxs='i',
yaxs='i',
ylim=c(0,1),
xlim=c(1,sum(chrLens)))
#axis(side=1, at=cumsum(c(0,chrLens[-length(chrLens)]))+chrLens/2, labels = chrs)
axis(side=2, at=c(0,0.5,1),labels=c(0,0.5,1),las=1)
abline(v=cumsum(chrLens),col='lightgrey')
}
saveFig(file.path(resd,paste0('BAF_',PDID)),plotFun,width = 5.8,height = 2.2,res=1000)
|
memory.limit(size = 50000)
library(XML)
library(rjson)
library(jsonlite)
library(data.table)
library(simsalapar)
library(readxl)
library(cluster)
library(ggplot2)
library(splitstackshape)
library(stringr)
library(plyr)
library(dplyr)
library(httr)
library(lubridate)
library(curl)
library(h2o)
#localH2o <- h2o.init(nthreads = -1) #doesn't work with Java 9 , try 8 or lower version
localH2o <- h2o.init(ip ='localhost', port =54321, nthreads = -1, max_mem_size = '4g')
h2o.startLogging()
query <- "https://sdfhjjnpgc.execute-api.ap-southeast-2.amazonaws.com/prod/api/metafields/?"
getdata<-GET(url=query, add_headers(Authorization="Token 8e70597d2eec1c4cdb0a1f260a1756e3d24396d8")) %>% stop_for_status() %>% json_parse
data_api <- getdata$results
next_page <- getdata$'next'
withTimeout(while(!is.null(next_page)){
more_data <- GET(next_page,add_headers(Authorization="Token 8e70597d2eec1c4cdb0a1f260a1756e3d24396d8")) %>% stop_for_status() %>% json_parse
data_api <- rbind(data_api, more_data$results)
next_page <- more_data$'next'
Sys.sleep(2)
}
,timeout = 600, onTimeout = "error") #change timeout according to your wish
data2<- read.csv("E:/Machine Learning A-Z/metastate.csv/api_metastate.csv/pageviews.csv")
data3<- data2[!apply(data2 == "", 1, all),] # removes empty rows from data2
data4 <- data3[-(grep("[a-zA-Z]",data3$id)), ] #removes characters from id
gc()
data_table_1 <- data.table(data_api, key= "pageviewid")
data_table_2 <- data.table(data4, key= "pageviewid")
system.time(dt.merged <- merge(data_table_1, data_table_2))
gc()
dt.merged$epc <- as.numeric(levels(dt.merged$epc))[dt.merged$epc]
gc()
dt.merged$epc[is.na(dt.merged$epc)] <- 0
gc()
nondeleteddata <- dt.merged[dt.merged$deleted== 0, ]
# split the data 80% train/20% test
splitdata<- sample.int(n=nrow(nondeleteddata),size=floor(.8*nrow(nondeleteddata)),replace = F)
data_train <- nondeleteddata[splitdata, ]
data_test <- nondeleteddata[-splitdata, ]
gc()
|
/beta_1401.R
|
no_license
|
heybhai/rconnect
|
R
| false
| false
| 1,956
|
r
|
memory.limit(size = 50000)
library(XML)
library(rjson)
library(jsonlite)
library(data.table)
library(simsalapar)
library(readxl)
library(cluster)
library(ggplot2)
library(splitstackshape)
library(stringr)
library(plyr)
library(dplyr)
library(httr)
library(lubridate)
library(curl)
library(h2o)
#localH2o <- h2o.init(nthreads = -1) #doesn't work with Java 9 , try 8 or lower version
localH2o <- h2o.init(ip ='localhost', port =54321, nthreads = -1, max_mem_size = '4g')
h2o.startLogging()
query <- "https://sdfhjjnpgc.execute-api.ap-southeast-2.amazonaws.com/prod/api/metafields/?"
getdata<-GET(url=query, add_headers(Authorization="Token 8e70597d2eec1c4cdb0a1f260a1756e3d24396d8")) %>% stop_for_status() %>% json_parse
data_api <- getdata$results
next_page <- getdata$'next'
withTimeout(while(!is.null(next_page)){
more_data <- GET(next_page,add_headers(Authorization="Token 8e70597d2eec1c4cdb0a1f260a1756e3d24396d8")) %>% stop_for_status() %>% json_parse
data_api <- rbind(data_api, more_data$results)
next_page <- more_data$'next'
Sys.sleep(2)
}
,timeout = 600, onTimeout = "error") #change timeout according to your wish
data2<- read.csv("E:/Machine Learning A-Z/metastate.csv/api_metastate.csv/pageviews.csv")
data3<- data2[!apply(data2 == "", 1, all),] # removes empty rows from data2
data4 <- data3[-(grep("[a-zA-Z]",data3$id)), ] #removes characters from id
gc()
data_table_1 <- data.table(data_api, key= "pageviewid")
data_table_2 <- data.table(data4, key= "pageviewid")
system.time(dt.merged <- merge(data_table_1, data_table_2))
gc()
dt.merged$epc <- as.numeric(levels(dt.merged$epc))[dt.merged$epc]
gc()
dt.merged$epc[is.na(dt.merged$epc)] <- 0
gc()
nondeleteddata <- dt.merged[dt.merged$deleted== 0, ]
# split the data 80% train/20% test
splitdata<- sample.int(n=nrow(nondeleteddata),size=floor(.8*nrow(nondeleteddata)),replace = F)
data_train <- nondeleteddata[splitdata, ]
data_test <- nondeleteddata[-splitdata, ]
gc()
|
data<-read.table("household_power_consumption.txt",header=TRUE,sep=";",dec=".")
subSetData <- data[data$Date %in% c("1/2/2007","2/2/2007") ,]
globalActivePower <- as.numeric(subSetData$Global_active_power)
png("plot1.png")
hist(globalActivePower, col="red", main="Global Active Power", xlab="Global Active Power (kilowatts)")
dev.off()
|
/plot.R
|
no_license
|
aryamity/ExData_Plotting1
|
R
| false
| false
| 336
|
r
|
data<-read.table("household_power_consumption.txt",header=TRUE,sep=";",dec=".")
subSetData <- data[data$Date %in% c("1/2/2007","2/2/2007") ,]
globalActivePower <- as.numeric(subSetData$Global_active_power)
png("plot1.png")
hist(globalActivePower, col="red", main="Global Active Power", xlab="Global Active Power (kilowatts)")
dev.off()
|
rm(list=ls())
library ( MASS )
############### CLASSIFICATION D'IMAGES DE CHATS ET DE CHIENS ########################
############################# Gautier Appert #########################################
# Groupe : Alexandre Junius - Xavier Van Poelvoorde - Marie Guegain
# 1. DECOUVERTE DE LA BASE DE DONNEES ET REDUCTION DE LA DIMENSION ----
#QUESTION 1: Importer les bases de donnees ----
setwd("/Users/zoefontier/Desktop/ensae 2A/stats")
load("Xtest.RData")
load("Xtrain.RData")
load("Ytrain.RData")
load("Ytest.RData")
# Conversion de quelques images en matrices 200x200
Image1 <- matrix(Xtest[1,], nrow=200, ncol=200)
Image2 <- matrix(Xtest[2,], nrow=200, ncol=200)
Image3 <- matrix(Xtest[3,], nrow=200, ncol=200)
Image30 <- matrix(Xtest[13,], nrow=200, ncol=200)
Image1bis <-matrix(Xtrain[1,], nrow=200, ncol=200)
Image2bis <-matrix(Xtrain[2,], nrow=200, ncol=200)
Image3bis <-matrix(Xtrain[3,], nrow=200, ncol=200)
#Fonction de rotation pour les images
rotate = function(x) t(apply(x, 2, rev))
# Affichage de quelques images de Xtest
image(rotate(Image1), col=grey(seq(0,1, length = 256)))
image(rotate(Image2), col=grey(seq(0,1, length = 256)))
image(rotate(Image3), col=grey(seq(0,1, length = 256)))
image(rotate(Image30), col=grey(seq(0,1, length = 256)))
rasterImage(Image1,0,0,dim(Image1)[2],dim(Image1)[1])
# Affichage de quelques images de Xtrain
image(rotate(Image1bis), col=grey(seq(0,1, length = 256)))
image(rotate(Image2bis), col=grey(seq(0,1, length = 256)))
image(rotate(Image3bis), col=grey(seq(0,1, length = 256)))
#QUESTION 2 : Réduction de la dimension des donnees par une ACP ----
#Concatenation des bases Xtrain et Xtest
Xtot = rbind(Xtrain,Xtest)
#Centrage des vecteurs colonnes
X = scale(Xtot,scale=F)
#ACP en utilisant une decomposition en valeur singulière (SVD) de X.
svdres = svd(X, nu=5, nv=15)
#On ne retient que les 30 premieres composantes principales
str(svdres)
#Part de variance expliquee par 30 composantes principales ?
L=svdres$d^2
pct=cumsum(L)/sum(L)
plot(pct,type='l')
pct[15]
sum(pct[1:30])
#On obtient 80,4%
#Composantes principales C
C=X%*%svdres$v
#Decoupage de la base en Ctrain et Ctest
Ctrain = C[1:315,]
Ctest = C[316:363,]
indice1=which(Ytest==1)
indice0=which(Ytest==0)
plot(Ctest[indice1,1],Ctest[indice1,2],col="blue",xlim=c(-200,200),ylim=c(-100,100))
points(Ctest[indice0,1],Ctest[indice0,2],col="red")
############################################################
## Function that computes Maximum Likelihood estimators---##
############################################################
computeML=function(C,Y){
indice1=which(Y==1)
indice0=which(Y==0)
## piHat
piHat= length(indice1)/length(Y)
## muHat
mu1Hat= colMeans(C[indice1,])
mu0Hat= colMeans(C[indice0,])
## SigmaHat
Cmu1=apply(C[indice1,],1,FUN=function(x){x-mu1Hat})
Sigma1Hat = Cmu1%*%t(Cmu1)/length(indice1)
Cmu0=apply(C[indice0,],1,FUN=function(x){x-mu0Hat})
Sigma0Hat = Cmu0%*%t(Cmu0)/length(indice0)
return(list(piHat=piHat,mu1Hat=mu1Hat, mu0Hat= mu0Hat, Sigma1Hat=Sigma1Hat, Sigma0Hat=Sigma0Hat))
}
computeLogRatio=function(cNew,pi,mu1,mu0,Sigma1,Sigma0){
return(
-(1/2)*log(det(Sigma1))-(1/2)*(cNew-mu1)%*%
solve(Sigma1)%*%(cNew-mu1)
+log(pi)+(1/2)*log(det(Sigma0))
+(1/2)*(cNew-mu0)%*%solve(Sigma0)%*%(cNew-mu0)-log(1-pi)
)
}
computePred=function(C,pi,mu1,mu0,Sigma1,Sigma0){
return(ifelse(apply(C,1,FUN=function(x){ computeLogRatio(x,pi,mu1,mu0,Sigma1,Sigma0) })>0,1,0))
}
ML=computeML(Ctrain,Ytrain)
QDA.Pred = computePred(Ctest,ML$piHat,ML$mu1Hat,ML$mu0Hat,ML$Sigma1Hat,ML$Sigma0Hat)
sum(abs(Ytest-QDA.Pred))/length(Ytest)
indice1=which(QDA.Pred==1)
indice0=which(QDA.Pred==0)
plot(Ctest[indice1,1],Ctest[indice1,2],col="blue",xlim=c(-200,200),ylim=c(-200,100),pch=19)
points(Ctest[indice0,1],Ctest[indice0,2],col="red",pch=19)
indexDiff=which(QDA.Pred != Ytest)
# Conversion de quelques images en matrices 200x200
Image1 <- matrix(Xtest[indexDiff[6],], nrow=200, ncol=200)
# Affichage de quelques images de Xtest
image(rotate(Image1), col=grey(seq(0,1, length = 256)))
rasterImage(Image1,0,0,dim(Image1)[2],dim(Image1)[1])
# Affichage de quelques images de Xtrain
image(rotate(Image1bis), col=grey(seq(0,1, length = 256)))
indexDiff=which(QDA.Pred != Ytest)
length(ML$mu1Hat)
dim(Ctrain)
a=Ctrain-ML$mu1Hat
dim(a)
b=sweep(Ctrain,2,ML$mu1Hat)
a==b
a=matrix(1:12,nrow=3,ncol=4)
a-1:4
t(apply(a,1,FUN=function(x){x-1:4}))
computeML()
lda.fit = lda ( Ctrain,Ytrain )
lda.fit
plot ( lda.fit )
Ctest = data.frame(Ctest)
head(Ctest)
dim(Ctest)
lda.pred = predict (lda.fit ,Ctest)
lda.pred$class
qda.fit = qda (Ctrain,Ytrain )
str(qda.fit)
ML$mu1Hat
qda.fit$ldet
qda.fit$means
qda.pred=predict (qda.fit , as.data.frame(Ctest))$class
aa=as.data.frame(Ctest)
head(aa)
class(aa)
sum(abs(Ytest-as.numeric(as.character(qda.pred))))/length(Ytest)
qda.pred
logit=glm(Ytrain~Ctrain, family = "binomial")
summary(logit)
# 2. MODELE LOGISTIQUE ET PENALITE RIDGE ----
#Question 6: Coder une fonction qui calcule le maximum de vraisemblance penalise ----
#Importation de la library Matrix pour sa fonction norm qui permet de calculer la norme2 d'un vecteur :
library(Matrix)
#Exemple : norm(matrix(1:3, ncol = 1), "F")
#Fonction pi qui permettra d'ecrire le vecteur PI
pi=function(x){
exp(x)/(1+exp(x))
}
#Fonction pi(1-pi) qui permettra d'écrire W
pi2=function(x){
exp(x)/((1+exp(x))^2)
}
#Algorithme de Newton-Raphson:
newton <- function(C, y, lambda, eps){
beta_old=rep(1,30)
beta_new=rep(0,30) #Valeur initiale de Beta
while (norm(as.matrix(beta_new-beta_old), "F")>eps) {
beta_old<-beta_new
c_beta = C%*%beta_old
PI=sapply(c_beta,FUN=pi) #vecteur PI
W=diag(sapply(c_beta,FUN=pi2)) #Création de la matrice diagonale W
z=solve(W)%*%(y-PI)+C%*%beta_old #Création de z
beta_new= solve(t(C)%*%W%*%C+2*lambda*diag(1,30))%*%t(C)%*%W%*%z #Calcul de la nouvelle valeur de Beta
}
beta_new
}
# 3. VALIDATION CROISEE ET PREDICTION DES LABELS SUR LA BASE TEST ----
#QUESTION 8: Validation croisee ----
#Creation des echantillons de la base de donnees
library(caret)
echantillon <-createFolds(1:nrow(Ctrain),k=15)
#Calcul de prediction des yi
prediction = function(base,beta) {
c_beta <- base%*%beta
PI <- sapply(c_beta, FUN=pi)
y <- rep(0,nrow(C))
y <- ifelse(PI>=0.5,1,0) #selon la regle de decision
y
}
#Support de lambda
support <- c(0,100,1000,2000,10000,15000,45000)
#Fonction pour calculer la bonne valeur de lambda parmi celles du support
lambda_max <- function(support,echantillon) {
vector_lambda=rep(0,length(support))
vector_beta=matrix(data=0,length(echantillon),ncol(Ctrain))
vector_MSE=rep(0,length(echantillon))
for (i in 1:length(support)) {
for (j in 1:length(echantillon)) {
Ctrain_VC = Ctrain[unlist(echantillon[-j]),]
Ytrain_VC=Ytrain[unlist(echantillon[-j])]
Ctrain_init = Ctrain[unlist(echantillon[j]),]
Ytrain_init=Ytrain[unlist(echantillon[j])]
vector_prediction = rep(0,nrow(Ctrain_init))
vector_beta[j,]=newton(Ctrain_VC,Ytrain_VC,support[i],0.0001)
vector_prediction=prediction(Ctrain_init,vector_beta[j,])
vector_MSE[j]=(1/length(Ytrain_init))*norm(as.matrix(vector_prediction) - as.matrix(Ytrain_init),"1")
}
vector_lambda[i]=(1/length(echantillon))*sum(vector_MSE)
}
k=which.min(vector_lambda)
support[k]
}
#Valeur de lambda apres validation croisee:
lambda <- lambda_max(support,echantillon)
#On obtient lambda = 45 000
#Il faut donc choisir le modele avec penalite
#QUESTION 9: Prediction sur toute la base ----
#Estimation du modele logistique sur toute la base de donnees Ctrain
Beta=newton(Ctrain,Ytrain,lambda, 0.0001)
#Prediction des labels sur la base Ctest
Prediction_C_test=prediction(Ctest,Beta)
#Fonction calculant le pourcentage d'erreurs
error <- function(x,y) {
E=0
for (i in 1:length(x)) {
if (x[i]!=y[i]) {
E=E+1
}
}
E/length(x)
}
#Calcul de l'erreur
error (Ytest,lda.pred$class)
#On trouve 12,5% d'erreurs (rejet si > 50%)
|
/script-3.R
|
no_license
|
FontierZoe/projet_stat
|
R
| false
| false
| 8,650
|
r
|
rm(list=ls())
library ( MASS )
############### CLASSIFICATION D'IMAGES DE CHATS ET DE CHIENS ########################
############################# Gautier Appert #########################################
# Groupe : Alexandre Junius - Xavier Van Poelvoorde - Marie Guegain
# 1. DECOUVERTE DE LA BASE DE DONNEES ET REDUCTION DE LA DIMENSION ----
#QUESTION 1: Importer les bases de donnees ----
setwd("/Users/zoefontier/Desktop/ensae 2A/stats")
load("Xtest.RData")
load("Xtrain.RData")
load("Ytrain.RData")
load("Ytest.RData")
# Conversion de quelques images en matrices 200x200
Image1 <- matrix(Xtest[1,], nrow=200, ncol=200)
Image2 <- matrix(Xtest[2,], nrow=200, ncol=200)
Image3 <- matrix(Xtest[3,], nrow=200, ncol=200)
Image30 <- matrix(Xtest[13,], nrow=200, ncol=200)
Image1bis <-matrix(Xtrain[1,], nrow=200, ncol=200)
Image2bis <-matrix(Xtrain[2,], nrow=200, ncol=200)
Image3bis <-matrix(Xtrain[3,], nrow=200, ncol=200)
#Fonction de rotation pour les images
rotate = function(x) t(apply(x, 2, rev))
# Affichage de quelques images de Xtest
image(rotate(Image1), col=grey(seq(0,1, length = 256)))
image(rotate(Image2), col=grey(seq(0,1, length = 256)))
image(rotate(Image3), col=grey(seq(0,1, length = 256)))
image(rotate(Image30), col=grey(seq(0,1, length = 256)))
rasterImage(Image1,0,0,dim(Image1)[2],dim(Image1)[1])
# Affichage de quelques images de Xtrain
image(rotate(Image1bis), col=grey(seq(0,1, length = 256)))
image(rotate(Image2bis), col=grey(seq(0,1, length = 256)))
image(rotate(Image3bis), col=grey(seq(0,1, length = 256)))
#QUESTION 2 : Réduction de la dimension des donnees par une ACP ----
#Concatenation des bases Xtrain et Xtest
Xtot = rbind(Xtrain,Xtest)
#Centrage des vecteurs colonnes
X = scale(Xtot,scale=F)
#ACP en utilisant une decomposition en valeur singulière (SVD) de X.
svdres = svd(X, nu=5, nv=15)
#On ne retient que les 30 premieres composantes principales
str(svdres)
#Part de variance expliquee par 30 composantes principales ?
L=svdres$d^2
pct=cumsum(L)/sum(L)
plot(pct,type='l')
pct[15]
sum(pct[1:30])
#On obtient 80,4%
#Composantes principales C
C=X%*%svdres$v
#Decoupage de la base en Ctrain et Ctest
Ctrain = C[1:315,]
Ctest = C[316:363,]
indice1=which(Ytest==1)
indice0=which(Ytest==0)
plot(Ctest[indice1,1],Ctest[indice1,2],col="blue",xlim=c(-200,200),ylim=c(-100,100))
points(Ctest[indice0,1],Ctest[indice0,2],col="red")
############################################################
## Function that computes Maximum Likelihood estimators---##
############################################################
computeML=function(C,Y){
indice1=which(Y==1)
indice0=which(Y==0)
## piHat
piHat= length(indice1)/length(Y)
## muHat
mu1Hat= colMeans(C[indice1,])
mu0Hat= colMeans(C[indice0,])
## SigmaHat
Cmu1=apply(C[indice1,],1,FUN=function(x){x-mu1Hat})
Sigma1Hat = Cmu1%*%t(Cmu1)/length(indice1)
Cmu0=apply(C[indice0,],1,FUN=function(x){x-mu0Hat})
Sigma0Hat = Cmu0%*%t(Cmu0)/length(indice0)
return(list(piHat=piHat,mu1Hat=mu1Hat, mu0Hat= mu0Hat, Sigma1Hat=Sigma1Hat, Sigma0Hat=Sigma0Hat))
}
computeLogRatio=function(cNew,pi,mu1,mu0,Sigma1,Sigma0){
return(
-(1/2)*log(det(Sigma1))-(1/2)*(cNew-mu1)%*%
solve(Sigma1)%*%(cNew-mu1)
+log(pi)+(1/2)*log(det(Sigma0))
+(1/2)*(cNew-mu0)%*%solve(Sigma0)%*%(cNew-mu0)-log(1-pi)
)
}
computePred=function(C,pi,mu1,mu0,Sigma1,Sigma0){
return(ifelse(apply(C,1,FUN=function(x){ computeLogRatio(x,pi,mu1,mu0,Sigma1,Sigma0) })>0,1,0))
}
ML=computeML(Ctrain,Ytrain)
QDA.Pred = computePred(Ctest,ML$piHat,ML$mu1Hat,ML$mu0Hat,ML$Sigma1Hat,ML$Sigma0Hat)
sum(abs(Ytest-QDA.Pred))/length(Ytest)
indice1=which(QDA.Pred==1)
indice0=which(QDA.Pred==0)
plot(Ctest[indice1,1],Ctest[indice1,2],col="blue",xlim=c(-200,200),ylim=c(-200,100),pch=19)
points(Ctest[indice0,1],Ctest[indice0,2],col="red",pch=19)
indexDiff=which(QDA.Pred != Ytest)
# Conversion de quelques images en matrices 200x200
Image1 <- matrix(Xtest[indexDiff[6],], nrow=200, ncol=200)
# Affichage de quelques images de Xtest
image(rotate(Image1), col=grey(seq(0,1, length = 256)))
rasterImage(Image1,0,0,dim(Image1)[2],dim(Image1)[1])
# Affichage de quelques images de Xtrain
image(rotate(Image1bis), col=grey(seq(0,1, length = 256)))
indexDiff=which(QDA.Pred != Ytest)
length(ML$mu1Hat)
dim(Ctrain)
a=Ctrain-ML$mu1Hat
dim(a)
b=sweep(Ctrain,2,ML$mu1Hat)
a==b
a=matrix(1:12,nrow=3,ncol=4)
a-1:4
t(apply(a,1,FUN=function(x){x-1:4}))
computeML()
lda.fit = lda ( Ctrain,Ytrain )
lda.fit
plot ( lda.fit )
Ctest = data.frame(Ctest)
head(Ctest)
dim(Ctest)
lda.pred = predict (lda.fit ,Ctest)
lda.pred$class
qda.fit = qda (Ctrain,Ytrain )
str(qda.fit)
ML$mu1Hat
qda.fit$ldet
qda.fit$means
qda.pred=predict (qda.fit , as.data.frame(Ctest))$class
aa=as.data.frame(Ctest)
head(aa)
class(aa)
sum(abs(Ytest-as.numeric(as.character(qda.pred))))/length(Ytest)
qda.pred
logit=glm(Ytrain~Ctrain, family = "binomial")
summary(logit)
# 2. MODELE LOGISTIQUE ET PENALITE RIDGE ----
#Question 6: Coder une fonction qui calcule le maximum de vraisemblance penalise ----
#Importation de la library Matrix pour sa fonction norm qui permet de calculer la norme2 d'un vecteur :
library(Matrix)
#Exemple : norm(matrix(1:3, ncol = 1), "F")
#Fonction pi qui permettra d'ecrire le vecteur PI
pi=function(x){
exp(x)/(1+exp(x))
}
#Fonction pi(1-pi) qui permettra d'écrire W
pi2=function(x){
exp(x)/((1+exp(x))^2)
}
#Algorithme de Newton-Raphson:
newton <- function(C, y, lambda, eps){
beta_old=rep(1,30)
beta_new=rep(0,30) #Valeur initiale de Beta
while (norm(as.matrix(beta_new-beta_old), "F")>eps) {
beta_old<-beta_new
c_beta = C%*%beta_old
PI=sapply(c_beta,FUN=pi) #vecteur PI
W=diag(sapply(c_beta,FUN=pi2)) #Création de la matrice diagonale W
z=solve(W)%*%(y-PI)+C%*%beta_old #Création de z
beta_new= solve(t(C)%*%W%*%C+2*lambda*diag(1,30))%*%t(C)%*%W%*%z #Calcul de la nouvelle valeur de Beta
}
beta_new
}
# 3. VALIDATION CROISEE ET PREDICTION DES LABELS SUR LA BASE TEST ----
#QUESTION 8: Validation croisee ----
#Creation des echantillons de la base de donnees
library(caret)
echantillon <-createFolds(1:nrow(Ctrain),k=15)
#Calcul de prediction des yi
prediction = function(base,beta) {
c_beta <- base%*%beta
PI <- sapply(c_beta, FUN=pi)
y <- rep(0,nrow(C))
y <- ifelse(PI>=0.5,1,0) #selon la regle de decision
y
}
#Support de lambda
support <- c(0,100,1000,2000,10000,15000,45000)
#Fonction pour calculer la bonne valeur de lambda parmi celles du support
lambda_max <- function(support,echantillon) {
vector_lambda=rep(0,length(support))
vector_beta=matrix(data=0,length(echantillon),ncol(Ctrain))
vector_MSE=rep(0,length(echantillon))
for (i in 1:length(support)) {
for (j in 1:length(echantillon)) {
Ctrain_VC = Ctrain[unlist(echantillon[-j]),]
Ytrain_VC=Ytrain[unlist(echantillon[-j])]
Ctrain_init = Ctrain[unlist(echantillon[j]),]
Ytrain_init=Ytrain[unlist(echantillon[j])]
vector_prediction = rep(0,nrow(Ctrain_init))
vector_beta[j,]=newton(Ctrain_VC,Ytrain_VC,support[i],0.0001)
vector_prediction=prediction(Ctrain_init,vector_beta[j,])
vector_MSE[j]=(1/length(Ytrain_init))*norm(as.matrix(vector_prediction) - as.matrix(Ytrain_init),"1")
}
vector_lambda[i]=(1/length(echantillon))*sum(vector_MSE)
}
k=which.min(vector_lambda)
support[k]
}
#Valeur de lambda apres validation croisee:
lambda <- lambda_max(support,echantillon)
#On obtient lambda = 45 000
#Il faut donc choisir le modele avec penalite
#QUESTION 9: Prediction sur toute la base ----
#Estimation du modele logistique sur toute la base de donnees Ctrain
Beta=newton(Ctrain,Ytrain,lambda, 0.0001)
#Prediction des labels sur la base Ctest
Prediction_C_test=prediction(Ctest,Beta)
#Fonction calculant le pourcentage d'erreurs
error <- function(x,y) {
E=0
for (i in 1:length(x)) {
if (x[i]!=y[i]) {
E=E+1
}
}
E/length(x)
}
#Calcul de l'erreur
error (Ytest,lda.pred$class)
#On trouve 12,5% d'erreurs (rejet si > 50%)
|
.name_split0 <- function(gz.files)
{
if(grepl('/',gz.files[1])){
ii <- tail(which(strsplit(gz.files, "")[[1]]=="/"),1)
# folder <- substr(gz.files,1,ii)
# folder <- paste0(folder, "/"); folder <- gsub('//','/',folder)
gz.files <- substr(gz.files,ii+1,nchar(gz.files))
}
#+
# Function that returns a structure of strings that composed an image name :
# of the form:
#
# - "area_source_parameter_resolution_time-step_date1_date2[.optional-part]"
# example: 'b3_modis_sst2_1km_1d_20090910_20090910.gz'
#
# Note: the end of the compulsory part is either the end of the date
# or the character before the first "." that begins the optional part
#
# Example : parts = name_split('w05d_seawifs_pp_50km_1m_19970901_19970930.gz')
#
# See also: name_join.pro (join into a name parts extracted by name_parts.pro)
#
# Author: H. Demarcq
#
# Date: February 2010
#
# -----
#
# translated and extended R version by R. Bauer
#
# Date: April 2013
#-
# Checking file_name validity
# Determination of the type of list (text file OR array of names)
dim <- length(gz.files) #size() contains : number_of_dim, dimensions, type_code, nb_of_elements
if(dim != 1){ # First argument is a string array of the file names
aparts <- list()
for (i in 1:dim)
{
aparts[[i]] <- .name_split_org(gz.files[i])
}
parts <- matrix(as.character(unlist(aparts)),ncol=length(aparts[[i]]),byrow=T)
colnames(parts) <- names(data.frame((aparts[1])))
}else{
parts <- .name_split_org(gz.files)
}
return(data.frame(lapply(as.data.frame(parts), as.character), stringsAsFactors=F))#as.data.frame(parts))
}
.name_split_org <- function(name)
{
str <- unlist(strsplit(name,"\\_"))
count <- n_parts <- length(str)
if(n_parts != 7) stop("name_parts: the number of ''_'' separator in name must be only 6, please check)")
if(n_parts == 3){ # old format (Usefull for AOOS_rename.pro !)
parts <- list(area="", source="", parameter="", resolution="", timestep="", date1="", date2="", option="")#short=1)
parts$area = str[1]
parts$parameter = str[2]
parts$date1 = str[3]
# Extract the optional part of the name (after the date and after the first '.')
pos <- which(strsplit(str[n_parts], '')[[1]]=='.')[1]
if(is.finite(pos)){
parts$option <- substr(str[n_parts], pos, nchar(str[n_parts]))
before <- substr(str[n_parts], 1, pos-1)
parts$date1 <- substr(str[n_parts], 1, pos-1)
parts$date2 <- parts$date1
}
}else{
parts <- list(area="", source="", parameter="", resolution="", timestep="", date1="", date2="", option="")#, short=0)
parts$area <- str[1]
parts$source <- str[2]
parts$parameter <- str[3]
parts$resolution <- str[4]
parts$timestep <- str[5]
parts$date1 <- str[6]
parts$date2 <- str[7]
# Extract the optional part of the name (after the date and after the first '.')
pos <- which(strsplit(str[n_parts], '')[[1]]=='.')[1]
if(is.finite(pos)){
parts$option <- substr(str[n_parts], pos, nchar(str[n_parts]))
before <- substr(str[n_parts], 1, pos-1)
parts$date2 <- substr(str[n_parts], 1, pos-1)
}
parts$filetype <- tail(unlist(strsplit(parts$option, "[.]")),1)
parts$option <- unlist(strsplit(parts$option,paste0(".",parts$filetype)))
}
return(parts)
}
get.gz.info <- name_split <- function(gz.files){
if(missing(gz.files)) gz.files <- '*.gz'
if(length(gz.files) == 1) {
if(grepl('\\*',gz.files[1])) gz.files <- Sys.glob(gz.files)
}
if(length(gz.files > 0)){
k <- .name_split0(gz.files)
head(k)
k$date1 <- as.Date(k$date1,"%Y%m%d")
k$date2 <- as.Date(k$date2,"%Y%m%d")
return(k)
}else{
cat('no files found!\n')
}
}
|
/R/name_split.r
|
no_license
|
mdsumner/oceanmap
|
R
| false
| false
| 3,862
|
r
|
.name_split0 <- function(gz.files)
{
if(grepl('/',gz.files[1])){
ii <- tail(which(strsplit(gz.files, "")[[1]]=="/"),1)
# folder <- substr(gz.files,1,ii)
# folder <- paste0(folder, "/"); folder <- gsub('//','/',folder)
gz.files <- substr(gz.files,ii+1,nchar(gz.files))
}
#+
# Function that returns a structure of strings that composed an image name :
# of the form:
#
# - "area_source_parameter_resolution_time-step_date1_date2[.optional-part]"
# example: 'b3_modis_sst2_1km_1d_20090910_20090910.gz'
#
# Note: the end of the compulsory part is either the end of the date
# or the character before the first "." that begins the optional part
#
# Example : parts = name_split('w05d_seawifs_pp_50km_1m_19970901_19970930.gz')
#
# See also: name_join.pro (join into a name parts extracted by name_parts.pro)
#
# Author: H. Demarcq
#
# Date: February 2010
#
# -----
#
# translated and extended R version by R. Bauer
#
# Date: April 2013
#-
# Checking file_name validity
# Determination of the type of list (text file OR array of names)
dim <- length(gz.files) #size() contains : number_of_dim, dimensions, type_code, nb_of_elements
if(dim != 1){ # First argument is a string array of the file names
aparts <- list()
for (i in 1:dim)
{
aparts[[i]] <- .name_split_org(gz.files[i])
}
parts <- matrix(as.character(unlist(aparts)),ncol=length(aparts[[i]]),byrow=T)
colnames(parts) <- names(data.frame((aparts[1])))
}else{
parts <- .name_split_org(gz.files)
}
return(data.frame(lapply(as.data.frame(parts), as.character), stringsAsFactors=F))#as.data.frame(parts))
}
.name_split_org <- function(name)
{
str <- unlist(strsplit(name,"\\_"))
count <- n_parts <- length(str)
if(n_parts != 7) stop("name_parts: the number of ''_'' separator in name must be only 6, please check)")
if(n_parts == 3){ # old format (Usefull for AOOS_rename.pro !)
parts <- list(area="", source="", parameter="", resolution="", timestep="", date1="", date2="", option="")#short=1)
parts$area = str[1]
parts$parameter = str[2]
parts$date1 = str[3]
# Extract the optional part of the name (after the date and after the first '.')
pos <- which(strsplit(str[n_parts], '')[[1]]=='.')[1]
if(is.finite(pos)){
parts$option <- substr(str[n_parts], pos, nchar(str[n_parts]))
before <- substr(str[n_parts], 1, pos-1)
parts$date1 <- substr(str[n_parts], 1, pos-1)
parts$date2 <- parts$date1
}
}else{
parts <- list(area="", source="", parameter="", resolution="", timestep="", date1="", date2="", option="")#, short=0)
parts$area <- str[1]
parts$source <- str[2]
parts$parameter <- str[3]
parts$resolution <- str[4]
parts$timestep <- str[5]
parts$date1 <- str[6]
parts$date2 <- str[7]
# Extract the optional part of the name (after the date and after the first '.')
pos <- which(strsplit(str[n_parts], '')[[1]]=='.')[1]
if(is.finite(pos)){
parts$option <- substr(str[n_parts], pos, nchar(str[n_parts]))
before <- substr(str[n_parts], 1, pos-1)
parts$date2 <- substr(str[n_parts], 1, pos-1)
}
parts$filetype <- tail(unlist(strsplit(parts$option, "[.]")),1)
parts$option <- unlist(strsplit(parts$option,paste0(".",parts$filetype)))
}
return(parts)
}
get.gz.info <- name_split <- function(gz.files){
if(missing(gz.files)) gz.files <- '*.gz'
if(length(gz.files) == 1) {
if(grepl('\\*',gz.files[1])) gz.files <- Sys.glob(gz.files)
}
if(length(gz.files > 0)){
k <- .name_split0(gz.files)
head(k)
k$date1 <- as.Date(k$date1,"%Y%m%d")
k$date2 <- as.Date(k$date2,"%Y%m%d")
return(k)
}else{
cat('no files found!\n')
}
}
|
#
# httpd.R, 8 Oct 13
#
# Date from:
# Intensive Metrics for the Study of the Evolution of Open Source Projects: Case studies from Apache Software Foundation projects
# Santiago Gala-P{\'e}rez and Gregorio Robles and Jes{\'u}s M. Gonz{\'a}lez-Barahona and Israel Herraiz
#
# Example from:
# Empirical Software Engineering using R
# Derek M. Jones
source("ESEUR_config.r")
# year month build issues commits wiki devel user total
httpd=read.csv(paste0(ESEUR_dir, "time-series/httpd.csv.xz"), as.is=TRUE)
httpd$date=as.Date(paste0(httpd$year, "-", httpd$month, "-01"), format="%Y-%m-%d")
commits=subset(httpd, commits > 0)
plot(commits$date, commits$commits)
acf(diff(commits$commits))
#ccf(diff(log(commits$devel)), diff(log(commits$commits)), lag.max=20)
ccf(diff(commits$devel), diff(commits$commits), lag.max=20)
plot(commits$date, commits$devel)
plot(commits$date, commits$user)
plot(commits$date, commits$total)
|
/time-series/httpd.R
|
no_license
|
alanponce/ESEUR-code-data
|
R
| false
| false
| 926
|
r
|
#
# httpd.R, 8 Oct 13
#
# Date from:
# Intensive Metrics for the Study of the Evolution of Open Source Projects: Case studies from Apache Software Foundation projects
# Santiago Gala-P{\'e}rez and Gregorio Robles and Jes{\'u}s M. Gonz{\'a}lez-Barahona and Israel Herraiz
#
# Example from:
# Empirical Software Engineering using R
# Derek M. Jones
source("ESEUR_config.r")
# year month build issues commits wiki devel user total
httpd=read.csv(paste0(ESEUR_dir, "time-series/httpd.csv.xz"), as.is=TRUE)
httpd$date=as.Date(paste0(httpd$year, "-", httpd$month, "-01"), format="%Y-%m-%d")
commits=subset(httpd, commits > 0)
plot(commits$date, commits$commits)
acf(diff(commits$commits))
#ccf(diff(log(commits$devel)), diff(log(commits$commits)), lag.max=20)
ccf(diff(commits$devel), diff(commits$commits), lag.max=20)
plot(commits$date, commits$devel)
plot(commits$date, commits$user)
plot(commits$date, commits$total)
|
rare.variants.pp <- function(d,ai,bad,center,cov,prior,dbsnpID){
##assign a smaller covariance to the het cluster
posterior <- matrix(NA,nrow(d),3)
for(k in 1:sum(!bad)){
ll1 <- c(calc.ll(d[!bad,][k,1],center[1],error=sqrt(cov[1,1])),
calc.ll(d[!bad,][k,2],center[2],error=sqrt(cov[2,2])))
ll2 <- c(calc.ll(d[!bad,][k,1],0,error=sqrt(cov[1,1])),
calc.ll(d[!bad,][k,2],0,error=sqrt(cov[2,2])))
## ll2 <- c(calc.ll(d[!bad,][k,1],0,error=0.1),
## calc.ll(d[!bad,][k,2],0,error=0.1))
ll3 <- c(calc.ll(d[!bad,][k,1],-1.7,error=sqrt(cov[1,1])),
calc.ll(d[!bad,][k,2],-1.7,error=sqrt(cov[2,2])))
s <- c(ll1[1],ll2[1],ll3[1])
as <- c(ll1[2],ll2[2],ll3[2])
## if(!identical(order(s),order(as))){
####begin, change on 051311
if(order(s)[3]<order(as)[3]&order(s)[3]==1){
##comb.s <- s
if(length(dbsnpID)>0){
comb.s <- as
}else{
comb.s <- s
}
}
if(order(s)[3]>order(as)[3]&order(as)[3]==1){
## comb.s <- as
if(length(dbsnpID)>0){
comb.s <- s
}else{
comb.s <- as
}
}
if((order(s)[3]>1&order(as)[3]>1)|(order(s)[3]==order(as)[3])){
####end, change on 051311
## browser()
##only use one strand, if the other strand has very low average intensities
ai1 <- sum(ai[!bad,1]<9)
ai2 <- sum(ai[!bad,2]<9)
ai3 <- sum(!bad)/3
if(ai1>ai3&ai1>ai2){
comb.s <- as
}else{
if(ai2>ai3&ai1<ai2){
comb.s <- s
}else{
comb.s <- s+as
}
}
}
##log.denom <- comb.s+log(prior)
##posterior[!bad,][k,] <- exp(log.denom)/sum(exp(log.denom))
num <- exp(comb.s+log(prior))
if(sum(num==0)==length(num)){
num <- rep(1,length(num))
}
posterior[!bad,][k,] <- num/sum(num)
}
return(posterior)
}
|
/R/rare.variants.pp.R
|
no_license
|
cran/SRMA
|
R
| false
| false
| 1,970
|
r
|
rare.variants.pp <- function(d,ai,bad,center,cov,prior,dbsnpID){
##assign a smaller covariance to the het cluster
posterior <- matrix(NA,nrow(d),3)
for(k in 1:sum(!bad)){
ll1 <- c(calc.ll(d[!bad,][k,1],center[1],error=sqrt(cov[1,1])),
calc.ll(d[!bad,][k,2],center[2],error=sqrt(cov[2,2])))
ll2 <- c(calc.ll(d[!bad,][k,1],0,error=sqrt(cov[1,1])),
calc.ll(d[!bad,][k,2],0,error=sqrt(cov[2,2])))
## ll2 <- c(calc.ll(d[!bad,][k,1],0,error=0.1),
## calc.ll(d[!bad,][k,2],0,error=0.1))
ll3 <- c(calc.ll(d[!bad,][k,1],-1.7,error=sqrt(cov[1,1])),
calc.ll(d[!bad,][k,2],-1.7,error=sqrt(cov[2,2])))
s <- c(ll1[1],ll2[1],ll3[1])
as <- c(ll1[2],ll2[2],ll3[2])
## if(!identical(order(s),order(as))){
####begin, change on 051311
if(order(s)[3]<order(as)[3]&order(s)[3]==1){
##comb.s <- s
if(length(dbsnpID)>0){
comb.s <- as
}else{
comb.s <- s
}
}
if(order(s)[3]>order(as)[3]&order(as)[3]==1){
## comb.s <- as
if(length(dbsnpID)>0){
comb.s <- s
}else{
comb.s <- as
}
}
if((order(s)[3]>1&order(as)[3]>1)|(order(s)[3]==order(as)[3])){
####end, change on 051311
## browser()
##only use one strand, if the other strand has very low average intensities
ai1 <- sum(ai[!bad,1]<9)
ai2 <- sum(ai[!bad,2]<9)
ai3 <- sum(!bad)/3
if(ai1>ai3&ai1>ai2){
comb.s <- as
}else{
if(ai2>ai3&ai1<ai2){
comb.s <- s
}else{
comb.s <- s+as
}
}
}
##log.denom <- comb.s+log(prior)
##posterior[!bad,][k,] <- exp(log.denom)/sum(exp(log.denom))
num <- exp(comb.s+log(prior))
if(sum(num==0)==length(num)){
num <- rep(1,length(num))
}
posterior[!bad,][k,] <- num/sum(num)
}
return(posterior)
}
|
##################################################################################
# A T E N T I O N !!! #
# =================== #
# Please, first set your current working directory to the current file location. #
# E.g.: > setwd("C:/Users/Pedro/Documents/Coursera/R_Work/ExploratoryDA") #
##################################################################################
# Clearing memory for calculation safety...
rm(list=ls())
# Downloading and Unzipping data...
if(!file.exists("household_power_consumption.txt")) {
message("Downloading zip file...")
myZipFile <- tempfile()
download.file("http://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip", myZipFile)
message("Unzipping file...")
myFile <- unzip(myZipFile)
unlink(myZipFile)
} else {
myFile <- "household_power_consumption.txt"
}
# Loading data only for the 2 days specified...
message("Reading file contents (loading data only for the 2 days specified). PLEASE WAIT...")
powerDF <- read.table(text = grep("^[1,2]/2/2007"
, readLines(myFile)
, value = TRUE)
, col.names = c("Date", "Time", "Global_active_power"
, "Global_reactive_power", "Voltage", "Global_intensity"
, "Sub_metering_1", "Sub_metering_2", "Sub_metering_3")
, sep = ";"
, header = TRUE)
# Setting one display per container...
par(mfrow=c(1,1))
# Generating Plot 1...
doPlot <- function(x) {
message("Generating plot...") # Message saying the plot is on its way...
hist(x$Global_active_power
, main = paste("Global Active Power")
, col="red"
, xlab="Global Active Power (kilowatts)") # Generating the graphic...
message("Copying plot to PNG file format...") # More messaging...
dev.copy(png, file="plot1.png", width=480, height=480) # Copying plot to PNG file format...
dev.off() # Disconnecting from PNG output device...
message("******************************************************************************")
message("File Plot1.png saved in ", getwd()) # Message saying where I've placed the PNG file.
message("******************************************************************************")
}
doPlot(powerDF)
|
/plot1.R
|
no_license
|
pedrokarneiro/ExData_Plotting1
|
R
| false
| false
| 2,549
|
r
|
##################################################################################
# A T E N T I O N !!! #
# =================== #
# Please, first set your current working directory to the current file location. #
# E.g.: > setwd("C:/Users/Pedro/Documents/Coursera/R_Work/ExploratoryDA") #
##################################################################################
# Clearing memory for calculation safety...
rm(list=ls())
# Downloading and Unzipping data...
if(!file.exists("household_power_consumption.txt")) {
message("Downloading zip file...")
myZipFile <- tempfile()
download.file("http://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip", myZipFile)
message("Unzipping file...")
myFile <- unzip(myZipFile)
unlink(myZipFile)
} else {
myFile <- "household_power_consumption.txt"
}
# Loading data only for the 2 days specified...
message("Reading file contents (loading data only for the 2 days specified). PLEASE WAIT...")
powerDF <- read.table(text = grep("^[1,2]/2/2007"
, readLines(myFile)
, value = TRUE)
, col.names = c("Date", "Time", "Global_active_power"
, "Global_reactive_power", "Voltage", "Global_intensity"
, "Sub_metering_1", "Sub_metering_2", "Sub_metering_3")
, sep = ";"
, header = TRUE)
# Setting one display per container...
par(mfrow=c(1,1))
# Generating Plot 1...
doPlot <- function(x) {
message("Generating plot...") # Message saying the plot is on its way...
hist(x$Global_active_power
, main = paste("Global Active Power")
, col="red"
, xlab="Global Active Power (kilowatts)") # Generating the graphic...
message("Copying plot to PNG file format...") # More messaging...
dev.copy(png, file="plot1.png", width=480, height=480) # Copying plot to PNG file format...
dev.off() # Disconnecting from PNG output device...
message("******************************************************************************")
message("File Plot1.png saved in ", getwd()) # Message saying where I've placed the PNG file.
message("******************************************************************************")
}
doPlot(powerDF)
|
##############################################
# This is some code from Regression III
# designed to provide a basic
# introduction to the R language.
#
# File created June 17, 2017
#
# File last updated June 17, 2017
###############################################
#
# Basics: OBJECTS
#
# Assign the number "6" to an object
# named "A":
A <- 6
# Now, whenever you refer to "A",
# you're referring to the number 6.
A # returns "6" in the window below.
# The square braces and [1] indicate
# that this is a single value / scalar
# (one row, one column)
A + 5 # returns "11" in the window below.
A^4 # returns "1296" (6x6x6x6).
"A" # prints "A". Using quotes means
# "take me literally; actually return
# whatever is in the quotes."
# If we want to get rid of A, we can remove
# it:
rm(A)
# Objects can have multiple elements.
# Here is the PSU women's volleyball
# team's overall winning percentages,
# 2005-2014:
PSUWVB<-c(31/34,32/35,34/36,38/38,38/38,
32/37,25/32,33/36,34/36,36/39)
# The "c()" says "combine these values into
# a vector or list." Note the new object
# called "PSUWVB" in the "Environment" window.
#
# We can list this object:
PSUWVB
# We can do transformations on it:
PSUWVB * 100
# Note that we now have numbers in the
# square braces that indicate the position
# of each element (number) in the object;
# the first one is [1], the second is [2],
# etc. We can assign those transformations
# to other objects:
WinPct <- PSUWVB * 100
# We can also combine objects; usually, we
# do this by creating a "data frame."
# Think of a data frame as like an Excel
# spreadsheet.
#
# So, we might create another object that lists
# the years from 2005 to 2014, in order:
Year <- c(2005,2006,2007,2008,2009,
2010,2011,2012,2013,2014)
Year
# Note that a faster way to do this is to
# use the "seq()" (for "sequence") command:
Year <- seq(2005,2014)
# We just "overwrote" the old "Year" object
# with the new one. This happens without
# warning, so be careful.
#
# Now we can combine these two objects into
# a single data frame, using the
# "data.frame" command:
VB <- data.frame(Year = Year,
WinProp = PSUWVB,
WinPct = WinPct)
# Note that there's now a new data frame
# called "DF" in the Environment window;
# it has 10 observations (rows) and
# three variables (columns).
#
# NOTE: If you want to "see" a data frame,
# you can simply click on it in the
# Environment window; this has the same
# effect as running "View()""
View(VB)
# There's lots more to say about objects, but
# that's enough for now.
#
###################################################
# READING IN DATA
#
# Most of the time, you won't want to enter
# data by hand. Instead, you will read data
# from something like a spreadsheet into
# Rstudio from some other source file. There
# are many ways to do this.
#
# RStudio can read data in many different
# formats, but the simplest is ".csv"
# ("comma-separated values"), which is
# simply an Excel-type spreadsheet that
# has been converted into plain text, with
# "cells" of the spreadsheet separated
# from each other by a comma.
#
# One way we can read a file is to have it
# "locally" on our own machine. The line at
# the top of the "Console" window below
# tells you where "local" is; at the moment,
# it's just "~/", the "root" of the drive.
# We can change this by clicking the "Session"
# menu option above, or by using "setwd". So,
# for example:
setwd("~/Dropbox (Personal)/PLSC 503/Data")
# Reading the data is then easy:
SCOTUS <- read.csv("SCOTUS-votes.csv")
# (Note that this command does not work unless
# you have the data stored "locally".)
#
# These data are on U.S. Supreme Court
# justices who have served since 1946.
# The first column is the justice's name,
# the second is the percentage of cases
# involving civil rights and liberties in
# which s/he voted in a pro-civil rights
# direction, and the third column records
# whether (=1) or not (=0) the justice
# was appointed by a Republican president.
#
# We can also read the same data directly
# from the web, using a URL for the file.
# This requires a bit more complexity.
# To do this, we first need to install an
# R "package" called "RCurl" that handles
# URLs.
install.packages("RCurl")
# Once we've installed the package (that
# is, put it on our machine) we then need
# to load it up for use:
library(RCurl)
# This is the part that actually gets
# the data:
url <- getURL("https://raw.githubusercontent.com/PrisonRodeo/GSERM-2017-git/master/Data/SCOTUS-votes.csv")
SCOTUS <- read.csv(text = url)
rm(url) # clean up
# From there, we can do things like "look
# at" the data:
View(SCOTUS)
# ...and summarize it:
summary(SCOTUS)
###############################################
# SINGLING OUT ELEMENTS:
#
# A data frame is rows and columns. We can
# extract rows, columns, or cells by specifying
# which one we need using the square-brace
# ("[]") notation. Watch what happens when we
# print SCOTUS:
SCOTUS
# The notation "FOO[R,C]" denotes the Rth
# row and the Cth column of the object called
# "FOO." So if we wanted to get Samuel Alito's
# row from the SCOTUS data, we could enter:
SCOTUS[24,]
# The fact that we left the second value
# blank means "give me all the columns
# from that row." Similarly, if we just
# wanted the column with the justices'
# voting percentages, we would enter:
SCOTUS[,2]
# which means "give me the values of
# all the rows from column 2 of SCOTUS."
# A single value would then just be:
SCOTUS[24,3]
# which tells us that Alito was appointed
# by a Republican. This is useful for a
# lot of things; for example, we can use
# it along with conditional statements to
# subset the data, like this:
GOPJustices <- SCOTUS[SCOTUS$GOP==1,]
# That means "Create an object called
# 'GOPJustices' that consists of all of
# the columns of SCOTUS, but that only
# includes the rows that represent GOP
# appointees." We can see that the command
# worked:
View(GOPJustices)
#############################################
# PLOTTING / GRAPHS
#
# R / RStudio is fantastic for visualizing and
# plotting / graphing data. This is a fast
# introduction to the most basic graphing
# command, called "plot."
#
# A (very) basic plot of the volleyball team's
# winning percentage by year looks like this:
plot(VB$Year,VB$WinPct,t="l")
# The plot appears at the lower right.
# Note that the dollar-sign "XXX$YYY"
# notation means "use the element YYY
# from object XXX." We can get around this
# in a few ways, the best probably being
# to use the "with()" command:
with(VB, plot(Year,WinPct,t="l"))
# This says "Use the object called 'VB'
# to draw a plot of the objects 'WinPct'
# and 'Year'." We can add a bunch of things
# to make it nicer looking:
with(VB, plot(Year,WinPct,t="l",lwd=2,col="navy",
main="PSU Women's Volleyball Winning\nPercentages, 2005-2014",
xlab="Season",ylab="Winning Percentage"))
# We can do a similar thing with the
# Supreme Court data:
with(SCOTUS, plot(CRPercent~GOP))
# This is a (bad) "scatterplot"; the values
# of CRPercent are vertical, and the values
# of GOP (either 1 for Republican appointees
# or 0 for Democratic ones) are horizontal.
#
#
#############################################
# LOOPS AND BASIC PROGRAMMING
#
# We'll do a lot of work with simulations
# in this course. That means repetition,
# and that means loops. The basic structure
# of a loop in R is:
#
# for (i in 1:N) {
# <<activity>>
# }
#
# This means "Take the activity, and repeat
# it N times." Note that you don't need to
# use "i" specifically as the index.
#
# A simple example: we'll generate 100
# "coin flips": draws of either heads (=1)
# or tails (=0) from a binary (bernoulli)
# distribution with P = 0.5:
set.seed(7222009) # set seed
N <- 100 # how many flips?
flips <- rbinom(N,1,0.5) # simulate
# (type "?rbinom" for details on this). We
# can summarize this by (e.g.) the number of
# "heads" out of 100:
sum(flips) # tally up heads
# Now, suppose we wanted to repeat our
# flip-100-coins simulation many (say, 1000)
# times, and each time we want to record
# the number of heads we get.
#
# First, we need to create a "container" to
# put the head-counts in, one with 1000 bins:
counts <- numeric(1000) # count storage
# Next, we write a loop that repeats the
# 100-flip simulation 1000 times, and each
# time stores the count of "heads" as an
# element of "counts":
NSims <- 1000 # N of simulations
for(i in 1:NSims) {
flips <- rbinom(N,1,0.5) # simulate
counts[i] <- sum(flips) # store count
}
# Now we can take a look at "counts":
summary(counts)
hist(counts)
plot(density(counts))
# Note that one can put loops inside of
# other loops. So, suppose I wanted to
# do the same simulation, but vary the
# number of coins I flipped (say, from
# 10 to 20 to 50 to 100 to 1000). I
# can do that all with a single set
# of nested loops:
NFlips <- c(10,20,50,100,1000) # N of flips
Sums <- data.frame(matrix(ncol=length(NFlips),
nrow=NSims))
for(j in 1:length(NFlips)) {
for(i in 1:NSims) {
flips <- rbinom(NFlips[j],1,0.5) # simulate
Sums[i,j] <- sum(flips) # store count
}
}
# We can then summarize the results:
summary(Sums)
# ...and we could plot the results, etc.
#
#
#############################################
# GETTING HELP
#
# R has extensive documentation, and a help
# function that is available at the command
# line / console. The basic help syntax uses
# a question mark:
?summary
# Note that this will bring up a help file
# on any command that is part of a currently-
# loaded package. If the package is not loaded,
# you'll get an error:
?read.dta
library(foreign)
?read.dta
# One can also do a omnibus search for help on
# a topic; this can be valuable if you know what
# you want to do, but don't know the package
# to do it with. That uses two question marks:
??spss
# The "??" command will work with almost anything:
??sunspots
??tuna
??Belgium
############################################
|
/Code/GSERM-2017-R-Intro.R
|
no_license
|
anhnguyendepocen/GSERM-2017-git
|
R
| false
| false
| 10,152
|
r
|
##############################################
# This is some code from Regression III
# designed to provide a basic
# introduction to the R language.
#
# File created June 17, 2017
#
# File last updated June 17, 2017
###############################################
#
# Basics: OBJECTS
#
# Assign the number "6" to an object
# named "A":
A <- 6
# Now, whenever you refer to "A",
# you're referring to the number 6.
A # returns "6" in the window below.
# The square braces and [1] indicate
# that this is a single value / scalar
# (one row, one column)
A + 5 # returns "11" in the window below.
A^4 # returns "1296" (6x6x6x6).
"A" # prints "A". Using quotes means
# "take me literally; actually return
# whatever is in the quotes."
# If we want to get rid of A, we can remove
# it:
rm(A)
# Objects can have multiple elements.
# Here is the PSU women's volleyball
# team's overall winning percentages,
# 2005-2014:
PSUWVB<-c(31/34,32/35,34/36,38/38,38/38,
32/37,25/32,33/36,34/36,36/39)
# The "c()" says "combine these values into
# a vector or list." Note the new object
# called "PSUWVB" in the "Environment" window.
#
# We can list this object:
PSUWVB
# We can do transformations on it:
PSUWVB * 100
# Note that we now have numbers in the
# square braces that indicate the position
# of each element (number) in the object;
# the first one is [1], the second is [2],
# etc. We can assign those transformations
# to other objects:
WinPct <- PSUWVB * 100
# We can also combine objects; usually, we
# do this by creating a "data frame."
# Think of a data frame as like an Excel
# spreadsheet.
#
# So, we might create another object that lists
# the years from 2005 to 2014, in order:
Year <- c(2005,2006,2007,2008,2009,
2010,2011,2012,2013,2014)
Year
# Note that a faster way to do this is to
# use the "seq()" (for "sequence") command:
Year <- seq(2005,2014)
# We just "overwrote" the old "Year" object
# with the new one. This happens without
# warning, so be careful.
#
# Now we can combine these two objects into
# a single data frame, using the
# "data.frame" command:
VB <- data.frame(Year = Year,
WinProp = PSUWVB,
WinPct = WinPct)
# Note that there's now a new data frame
# called "DF" in the Environment window;
# it has 10 observations (rows) and
# three variables (columns).
#
# NOTE: If you want to "see" a data frame,
# you can simply click on it in the
# Environment window; this has the same
# effect as running "View()""
View(VB)
# There's lots more to say about objects, but
# that's enough for now.
#
###################################################
# READING IN DATA
#
# Most of the time, you won't want to enter
# data by hand. Instead, you will read data
# from something like a spreadsheet into
# Rstudio from some other source file. There
# are many ways to do this.
#
# RStudio can read data in many different
# formats, but the simplest is ".csv"
# ("comma-separated values"), which is
# simply an Excel-type spreadsheet that
# has been converted into plain text, with
# "cells" of the spreadsheet separated
# from each other by a comma.
#
# One way we can read a file is to have it
# "locally" on our own machine. The line at
# the top of the "Console" window below
# tells you where "local" is; at the moment,
# it's just "~/", the "root" of the drive.
# We can change this by clicking the "Session"
# menu option above, or by using "setwd". So,
# for example:
setwd("~/Dropbox (Personal)/PLSC 503/Data")
# Reading the data is then easy:
SCOTUS <- read.csv("SCOTUS-votes.csv")
# (Note that this command does not work unless
# you have the data stored "locally".)
#
# These data are on U.S. Supreme Court
# justices who have served since 1946.
# The first column is the justice's name,
# the second is the percentage of cases
# involving civil rights and liberties in
# which s/he voted in a pro-civil rights
# direction, and the third column records
# whether (=1) or not (=0) the justice
# was appointed by a Republican president.
#
# We can also read the same data directly
# from the web, using a URL for the file.
# This requires a bit more complexity.
# To do this, we first need to install an
# R "package" called "RCurl" that handles
# URLs.
install.packages("RCurl")
# Once we've installed the package (that
# is, put it on our machine) we then need
# to load it up for use:
library(RCurl)
# This is the part that actually gets
# the data:
url <- getURL("https://raw.githubusercontent.com/PrisonRodeo/GSERM-2017-git/master/Data/SCOTUS-votes.csv")
SCOTUS <- read.csv(text = url)
rm(url) # clean up
# From there, we can do things like "look
# at" the data:
View(SCOTUS)
# ...and summarize it:
summary(SCOTUS)
###############################################
# SINGLING OUT ELEMENTS:
#
# A data frame is rows and columns. We can
# extract rows, columns, or cells by specifying
# which one we need using the square-brace
# ("[]") notation. Watch what happens when we
# print SCOTUS:
SCOTUS
# The notation "FOO[R,C]" denotes the Rth
# row and the Cth column of the object called
# "FOO." So if we wanted to get Samuel Alito's
# row from the SCOTUS data, we could enter:
SCOTUS[24,]
# The fact that we left the second value
# blank means "give me all the columns
# from that row." Similarly, if we just
# wanted the column with the justices'
# voting percentages, we would enter:
SCOTUS[,2]
# which means "give me the values of
# all the rows from column 2 of SCOTUS."
# A single value would then just be:
SCOTUS[24,3]
# which tells us that Alito was appointed
# by a Republican. This is useful for a
# lot of things; for example, we can use
# it along with conditional statements to
# subset the data, like this:
GOPJustices <- SCOTUS[SCOTUS$GOP==1,]
# That means "Create an object called
# 'GOPJustices' that consists of all of
# the columns of SCOTUS, but that only
# includes the rows that represent GOP
# appointees." We can see that the command
# worked:
View(GOPJustices)
#############################################
# PLOTTING / GRAPHS
#
# R / RStudio is fantastic for visualizing and
# plotting / graphing data. This is a fast
# introduction to the most basic graphing
# command, called "plot."
#
# A (very) basic plot of the volleyball team's
# winning percentage by year looks like this:
plot(VB$Year,VB$WinPct,t="l")
# The plot appears at the lower right.
# Note that the dollar-sign "XXX$YYY"
# notation means "use the element YYY
# from object XXX." We can get around this
# in a few ways, the best probably being
# to use the "with()" command:
with(VB, plot(Year,WinPct,t="l"))
# This says "Use the object called 'VB'
# to draw a plot of the objects 'WinPct'
# and 'Year'." We can add a bunch of things
# to make it nicer looking:
with(VB, plot(Year,WinPct,t="l",lwd=2,col="navy",
main="PSU Women's Volleyball Winning\nPercentages, 2005-2014",
xlab="Season",ylab="Winning Percentage"))
# We can do a similar thing with the
# Supreme Court data:
with(SCOTUS, plot(CRPercent~GOP))
# This is a (bad) "scatterplot"; the values
# of CRPercent are vertical, and the values
# of GOP (either 1 for Republican appointees
# or 0 for Democratic ones) are horizontal.
#
#
#############################################
# LOOPS AND BASIC PROGRAMMING
#
# We'll do a lot of work with simulations
# in this course. That means repetition,
# and that means loops. The basic structure
# of a loop in R is:
#
# for (i in 1:N) {
# <<activity>>
# }
#
# This means "Take the activity, and repeat
# it N times." Note that you don't need to
# use "i" specifically as the index.
#
# A simple example: we'll generate 100
# "coin flips": draws of either heads (=1)
# or tails (=0) from a binary (bernoulli)
# distribution with P = 0.5:
set.seed(7222009) # set seed
N <- 100 # how many flips?
flips <- rbinom(N,1,0.5) # simulate
# (type "?rbinom" for details on this). We
# can summarize this by (e.g.) the number of
# "heads" out of 100:
sum(flips) # tally up heads
# Now, suppose we wanted to repeat our
# flip-100-coins simulation many (say, 1000)
# times, and each time we want to record
# the number of heads we get.
#
# First, we need to create a "container" to
# put the head-counts in, one with 1000 bins:
counts <- numeric(1000) # count storage
# Next, we write a loop that repeats the
# 100-flip simulation 1000 times, and each
# time stores the count of "heads" as an
# element of "counts":
NSims <- 1000 # N of simulations
for(i in 1:NSims) {
flips <- rbinom(N,1,0.5) # simulate
counts[i] <- sum(flips) # store count
}
# Now we can take a look at "counts":
summary(counts)
hist(counts)
plot(density(counts))
# Note that one can put loops inside of
# other loops. So, suppose I wanted to
# do the same simulation, but vary the
# number of coins I flipped (say, from
# 10 to 20 to 50 to 100 to 1000). I
# can do that all with a single set
# of nested loops:
NFlips <- c(10,20,50,100,1000) # N of flips
Sums <- data.frame(matrix(ncol=length(NFlips),
nrow=NSims))
for(j in 1:length(NFlips)) {
for(i in 1:NSims) {
flips <- rbinom(NFlips[j],1,0.5) # simulate
Sums[i,j] <- sum(flips) # store count
}
}
# We can then summarize the results:
summary(Sums)
# ...and we could plot the results, etc.
#
#
#############################################
# GETTING HELP
#
# R has extensive documentation, and a help
# function that is available at the command
# line / console. The basic help syntax uses
# a question mark:
?summary
# Note that this will bring up a help file
# on any command that is part of a currently-
# loaded package. If the package is not loaded,
# you'll get an error:
?read.dta
library(foreign)
?read.dta
# One can also do a omnibus search for help on
# a topic; this can be valuable if you know what
# you want to do, but don't know the package
# to do it with. That uses two question marks:
??spss
# The "??" command will work with almost anything:
??sunspots
??tuna
??Belgium
############################################
|
require(dplyr)
require(tidyr)
require(ggplot2)
library(gridExtra)
source("src/rf_grid2.R")
superconductivity_data <- maml.mapInputPort(1)
require(randomForest)
model_rf <- randomForest(data = superconductivity_data,
critical_temp ~ .,
mtry = 5,
ntree = 500,
importance = TRUE)
graphics.off()
# Create new plot with desired size
png("myplot.png",width=1000,height=800)
# Get rid of default rViz file
file.remove(Sys.glob("*rViz*png"))
plot1 <- plot_feature_IncMSE(model_rf)
plot2 <- plot_feature_IncNodePurity(model_rf)
grid.arrange(plot1,plot2,nrow=2,widths=c(200,200))
|
/Using_grid_arrange_in_azure.R
|
no_license
|
richinex/Azure-ML-
|
R
| false
| false
| 701
|
r
|
require(dplyr)
require(tidyr)
require(ggplot2)
library(gridExtra)
source("src/rf_grid2.R")
superconductivity_data <- maml.mapInputPort(1)
require(randomForest)
model_rf <- randomForest(data = superconductivity_data,
critical_temp ~ .,
mtry = 5,
ntree = 500,
importance = TRUE)
graphics.off()
# Create new plot with desired size
png("myplot.png",width=1000,height=800)
# Get rid of default rViz file
file.remove(Sys.glob("*rViz*png"))
plot1 <- plot_feature_IncMSE(model_rf)
plot2 <- plot_feature_IncNodePurity(model_rf)
grid.arrange(plot1,plot2,nrow=2,widths=c(200,200))
|
############################################################################################
#
# datagraber.R
# Grabs and cleans COVID-19 Data
#
############################################################################################
## LIBRARIES ####
library(tidyverse)
library(lubridate)
## VARIABLES AND PATHS ####
covidtracking.path = "https://covidtracking.com/api/v1/states/daily.csv"
ohana.states = c("AR", "HI", "UT")
## READ IN DATA ####
indata = read_csv(covidtracking.path)
## CLEAN DATA ####
cleandata = indata %>%
mutate(date = ymd(date),
state = as.factor(state)) %>%
filter(state %in% ohana.states) %>%
select(state, date, positive, negative, recovered, death, hospitalized, totalTestResults, deathIncrease, positiveIncrease)
slimtable = cleandata %>%
filter(date >= max(date)-1) %>%
select(state, date, positive, death, recovered) %>%
group_by(state) %>%
summarize(`Confirmed Cases` = max(positive), `Prior Day Cases` = min(positive), Deaths = max(death), Recoveries = max(recovered)) %>%
mutate_if(is.double, as.integer)
## CLEAN UP ####
rm(indata, ohana.states, covidtracking.path)
|
/dailycovid/datagraber.R
|
no_license
|
seangyoung/ohanacovid
|
R
| false
| false
| 1,136
|
r
|
############################################################################################
#
# datagraber.R
# Grabs and cleans COVID-19 Data
#
############################################################################################
## LIBRARIES ####
library(tidyverse)
library(lubridate)
## VARIABLES AND PATHS ####
covidtracking.path = "https://covidtracking.com/api/v1/states/daily.csv"
ohana.states = c("AR", "HI", "UT")
## READ IN DATA ####
indata = read_csv(covidtracking.path)
## CLEAN DATA ####
cleandata = indata %>%
mutate(date = ymd(date),
state = as.factor(state)) %>%
filter(state %in% ohana.states) %>%
select(state, date, positive, negative, recovered, death, hospitalized, totalTestResults, deathIncrease, positiveIncrease)
slimtable = cleandata %>%
filter(date >= max(date)-1) %>%
select(state, date, positive, death, recovered) %>%
group_by(state) %>%
summarize(`Confirmed Cases` = max(positive), `Prior Day Cases` = min(positive), Deaths = max(death), Recoveries = max(recovered)) %>%
mutate_if(is.double, as.integer)
## CLEAN UP ####
rm(indata, ohana.states, covidtracking.path)
|
library(tidyverse)
# brazil_m <- read.csv("data/children/brazil_m.csv")
# brazil_m$country <- rep("Brazil", length(brazil_m$age))
# brazil_f <- read.csv("data/children/brazil_f.csv")
# brazil_f$country <- rep("Brazil", length(brazil_f$age))
colombia_m <- read.csv("DATA/children/colombia_m.csv")
colombia_m$country <- rep("Colombia", length(colombia_m$age))
colombia_f <- read.csv("DATA/children/colombia_f.csv")
colombia_f$country <- rep("Colombia", length(colombia_f$age))
england_m <- read.csv("DATA/children/england_wales_m.csv")
england_m$country <- rep("England & Wales", length(england_m$age))
england_f <- read.csv("DATA/children/england_wales_f.csv")
england_f$country <- rep("England & Wales", length(england_f$age))
france_m <- read.csv("DATA/children/france_m.csv")
france_m$country <- rep("France", length(france_m$age))
france_f <- read.csv("DATA/children/france_f.csv")
france_f$country <- rep("France", length(france_f$age))
germany_m <- read.csv("DATA/children/germany_m.csv")
germany_m$country <- rep("Germany", length(germany_m$age))
germany_f <- read.csv("DATA/children/germany_f.csv")
germany_f$country <- rep("Germany", length(germany_f$age))
# india_m <- read.csv("DATA/children/india_m.csv")
# india_m$country <- rep("India", length(india_m$age))
# india_f <- read.csv("DATA/children/india_f.csv")
# india_f$country <- rep("India", length(india_f$age))
#
# iran_m <- read.csv("DATA/children/iran_m.csv")
# iran_m$country <- rep("I.R. Iran", length(iran_m$age))
# iran_f <- read.csv("DATA/children/iran_f.csv")
# iran_f$country <- rep("I.R. Iran", length(iran_f$age))
italy_m <- read.csv("DATA/children/italy_m.csv")
italy_m$country <- rep("Italy", length(italy_m$age))
italy_f <- read.csv("DATA/children/italy_f.csv")
italy_f$country <- rep("Italy", length(italy_f$age))
# kenya_m <- read.csv("DATA/children/kenya_m.csv")
# kenya_m$country <- rep("Kenya", length(kenya_m$age))
# kenya_f <- read.csv("DATA/children/kenya_f.csv")
# kenya_f$country <- rep("Kenya", length(kenya_f$age))
#
malawi_m <- read.csv("DATA/children/malawi_m.csv")
malawi_m$country <- rep("Malawi", length(malawi_m$age))
malawi_f <- read.csv("DATA/children/malawi_f.csv")
malawi_f$country <- rep("Malawi", length(malawi_f$age))
mexico_m <- read.csv("DATA/children/mexico_m.csv")
mexico_m$country <- rep("Mexico", length(mexico_m$age))
mexico_f <- read.csv("DATA/children/mexico_f.csv")
mexico_f$country <- rep("Mexico", length(mexico_f$age))
# nigeria_m <- read.csv("DATA/children/nigeria_m.csv")
# nigeria_m$country <- rep("Nigeria", length(nigeria_m$age))
# nigeria_f <- read.csv("DATA/children/nigeria_f.csv")
# nigeria_f$country <- rep("Nigeria", length(nigeria_f$age))
peru_m <- read.csv("DATA/children/peru_m.csv")
peru_m$country <- rep("Peru", length(peru_m$age))
peru_f <- read.csv("DATA/children/peru_f.csv")
peru_f$country <- rep("Peru", length(peru_f$age))
poland_m <- read.csv("DATA/children/poland_m.csv")
poland_m$country <- rep("Poland", length(poland_m$age))
poland_f <- read.csv("DATA/children/poland_f.csv")
poland_f$country <- rep("Poland", length(poland_f$age))
# russia_m <- read.csv("DATA/children/russian_federation_m.csv")
# russia_m$country <- rep("Russian Federation", length(russia_m$age))
# russia_f <- read.csv("DATA/children/russian_federation_f.csv")
# russia_f$country <- rep("Russian Federation", length(russia_f$age))
southafrica_m <- read.csv("DATA/children/south_africa_m.csv")
southafrica_m$country <- rep("South Africa", length(southafrica_m$age))
southafrica_f <- read.csv("DATA/children/south_africa_f.csv")
southafrica_f$country <- rep("South Africa", length(southafrica_f$age))
spain_m <- read.csv("DATA/children/spain_m.csv")
spain_m$country <- rep("Spain", length(spain_m$age))
spain_f <- read.csv("DATA/children/spain_f.csv")
spain_f$country <- rep("Spain", length(spain_f$age))
usa_m <- read.csv("DATA/children/usa_m.csv")
usa_m$country <- rep("USA", length(usa_m$age))
usa_f <- read.csv("DATA/children/usa_f.csv")
usa_f$country <- rep("USA", length(usa_f$age))
# zimbabwe_m <- read.csv("DATA/children/zimbabwe_m.csv")
# zimbabwe_m$country <- rep("Zimbabwe", length(zimbabwe_m$age))
# zimbabwe_f <- read.csv("DATA/children/zimbabwe_f.csv")
# zimbabwe_f$country <- rep("Zimbabwe", length(zimbabwe_f$age))
data = rbind(colombia_m, colombia_f,
england_m, england_f,
france_m, france_f,
germany_m, germany_f,
italy_m, italy_f,
malawi_m,malawi_f,
mexico_m, mexico_f,
peru_m, peru_f,
poland_m, poland_f,
southafrica_m, southafrica_f,
spain_m, spain_f,
usa_m, usa_f)
data$gender <- ifelse(data$gender == "female", "Female", data$gender)
data$gender <- ifelse(data$gender == "male", "Male", data$gender)
p <- ggplot(data) +
geom_line(aes(age, children, group = interaction(gender, country), col = gender)) +
facet_wrap(~country, ncol=3) +
theme_bw() +
theme(legend.position="bottom",
strip.background =element_rect(fill="white")) +
xlab("Age") + ylab("Average number of children") + labs(col = "Sex of parent")
print(p)
ggsave("figures/num_children.pdf", p, width = 8, height = 8)
p1 <- ggplot(data %>% filter(country %in% c("England & Wales", "Brazil", "Kenya"))) +
geom_line(aes(age, children, group = interaction(gender, country), col = gender)) +
facet_wrap(~country, ncol=3) +
theme_bw() +
theme(legend.position="bottom",
strip.background =element_rect(fill="white")) +
xlab("Age") + ylab("Average number of children") + labs(col = "Sex of parent")
ggsave("figures/children_presentation.png", p1, width = 5, height = 3)
|
/script/plots_num_children_xy.R
|
no_license
|
ANXUNWXY179/project_wxy
|
R
| false
| false
| 5,670
|
r
|
library(tidyverse)
# brazil_m <- read.csv("data/children/brazil_m.csv")
# brazil_m$country <- rep("Brazil", length(brazil_m$age))
# brazil_f <- read.csv("data/children/brazil_f.csv")
# brazil_f$country <- rep("Brazil", length(brazil_f$age))
colombia_m <- read.csv("DATA/children/colombia_m.csv")
colombia_m$country <- rep("Colombia", length(colombia_m$age))
colombia_f <- read.csv("DATA/children/colombia_f.csv")
colombia_f$country <- rep("Colombia", length(colombia_f$age))
england_m <- read.csv("DATA/children/england_wales_m.csv")
england_m$country <- rep("England & Wales", length(england_m$age))
england_f <- read.csv("DATA/children/england_wales_f.csv")
england_f$country <- rep("England & Wales", length(england_f$age))
france_m <- read.csv("DATA/children/france_m.csv")
france_m$country <- rep("France", length(france_m$age))
france_f <- read.csv("DATA/children/france_f.csv")
france_f$country <- rep("France", length(france_f$age))
germany_m <- read.csv("DATA/children/germany_m.csv")
germany_m$country <- rep("Germany", length(germany_m$age))
germany_f <- read.csv("DATA/children/germany_f.csv")
germany_f$country <- rep("Germany", length(germany_f$age))
# india_m <- read.csv("DATA/children/india_m.csv")
# india_m$country <- rep("India", length(india_m$age))
# india_f <- read.csv("DATA/children/india_f.csv")
# india_f$country <- rep("India", length(india_f$age))
#
# iran_m <- read.csv("DATA/children/iran_m.csv")
# iran_m$country <- rep("I.R. Iran", length(iran_m$age))
# iran_f <- read.csv("DATA/children/iran_f.csv")
# iran_f$country <- rep("I.R. Iran", length(iran_f$age))
italy_m <- read.csv("DATA/children/italy_m.csv")
italy_m$country <- rep("Italy", length(italy_m$age))
italy_f <- read.csv("DATA/children/italy_f.csv")
italy_f$country <- rep("Italy", length(italy_f$age))
# kenya_m <- read.csv("DATA/children/kenya_m.csv")
# kenya_m$country <- rep("Kenya", length(kenya_m$age))
# kenya_f <- read.csv("DATA/children/kenya_f.csv")
# kenya_f$country <- rep("Kenya", length(kenya_f$age))
#
malawi_m <- read.csv("DATA/children/malawi_m.csv")
malawi_m$country <- rep("Malawi", length(malawi_m$age))
malawi_f <- read.csv("DATA/children/malawi_f.csv")
malawi_f$country <- rep("Malawi", length(malawi_f$age))
mexico_m <- read.csv("DATA/children/mexico_m.csv")
mexico_m$country <- rep("Mexico", length(mexico_m$age))
mexico_f <- read.csv("DATA/children/mexico_f.csv")
mexico_f$country <- rep("Mexico", length(mexico_f$age))
# nigeria_m <- read.csv("DATA/children/nigeria_m.csv")
# nigeria_m$country <- rep("Nigeria", length(nigeria_m$age))
# nigeria_f <- read.csv("DATA/children/nigeria_f.csv")
# nigeria_f$country <- rep("Nigeria", length(nigeria_f$age))
peru_m <- read.csv("DATA/children/peru_m.csv")
peru_m$country <- rep("Peru", length(peru_m$age))
peru_f <- read.csv("DATA/children/peru_f.csv")
peru_f$country <- rep("Peru", length(peru_f$age))
poland_m <- read.csv("DATA/children/poland_m.csv")
poland_m$country <- rep("Poland", length(poland_m$age))
poland_f <- read.csv("DATA/children/poland_f.csv")
poland_f$country <- rep("Poland", length(poland_f$age))
# russia_m <- read.csv("DATA/children/russian_federation_m.csv")
# russia_m$country <- rep("Russian Federation", length(russia_m$age))
# russia_f <- read.csv("DATA/children/russian_federation_f.csv")
# russia_f$country <- rep("Russian Federation", length(russia_f$age))
southafrica_m <- read.csv("DATA/children/south_africa_m.csv")
southafrica_m$country <- rep("South Africa", length(southafrica_m$age))
southafrica_f <- read.csv("DATA/children/south_africa_f.csv")
southafrica_f$country <- rep("South Africa", length(southafrica_f$age))
spain_m <- read.csv("DATA/children/spain_m.csv")
spain_m$country <- rep("Spain", length(spain_m$age))
spain_f <- read.csv("DATA/children/spain_f.csv")
spain_f$country <- rep("Spain", length(spain_f$age))
usa_m <- read.csv("DATA/children/usa_m.csv")
usa_m$country <- rep("USA", length(usa_m$age))
usa_f <- read.csv("DATA/children/usa_f.csv")
usa_f$country <- rep("USA", length(usa_f$age))
# zimbabwe_m <- read.csv("DATA/children/zimbabwe_m.csv")
# zimbabwe_m$country <- rep("Zimbabwe", length(zimbabwe_m$age))
# zimbabwe_f <- read.csv("DATA/children/zimbabwe_f.csv")
# zimbabwe_f$country <- rep("Zimbabwe", length(zimbabwe_f$age))
data = rbind(colombia_m, colombia_f,
england_m, england_f,
france_m, france_f,
germany_m, germany_f,
italy_m, italy_f,
malawi_m,malawi_f,
mexico_m, mexico_f,
peru_m, peru_f,
poland_m, poland_f,
southafrica_m, southafrica_f,
spain_m, spain_f,
usa_m, usa_f)
data$gender <- ifelse(data$gender == "female", "Female", data$gender)
data$gender <- ifelse(data$gender == "male", "Male", data$gender)
p <- ggplot(data) +
geom_line(aes(age, children, group = interaction(gender, country), col = gender)) +
facet_wrap(~country, ncol=3) +
theme_bw() +
theme(legend.position="bottom",
strip.background =element_rect(fill="white")) +
xlab("Age") + ylab("Average number of children") + labs(col = "Sex of parent")
print(p)
ggsave("figures/num_children.pdf", p, width = 8, height = 8)
p1 <- ggplot(data %>% filter(country %in% c("England & Wales", "Brazil", "Kenya"))) +
geom_line(aes(age, children, group = interaction(gender, country), col = gender)) +
facet_wrap(~country, ncol=3) +
theme_bw() +
theme(legend.position="bottom",
strip.background =element_rect(fill="white")) +
xlab("Age") + ylab("Average number of children") + labs(col = "Sex of parent")
ggsave("figures/children_presentation.png", p1, width = 5, height = 3)
|
\name{inSide}
\alias{inSide}
\title{Are points inside a polygon?}
\usage{
inSide(bnd, x, y)
}
\arguments{
\item{bnd}{a polygon boundary (a list with elements x and
y)}
\item{x}{x coordinates of the points to test}
\item{y}{y coordinates of the points to test}
}
\value{
logical vector
}
\description{
This is Simon Wood's inSide from soap
}
|
/man/inSide.Rd
|
no_license
|
dill/msg
|
R
| false
| false
| 358
|
rd
|
\name{inSide}
\alias{inSide}
\title{Are points inside a polygon?}
\usage{
inSide(bnd, x, y)
}
\arguments{
\item{bnd}{a polygon boundary (a list with elements x and
y)}
\item{x}{x coordinates of the points to test}
\item{y}{y coordinates of the points to test}
}
\value{
logical vector
}
\description{
This is Simon Wood's inSide from soap
}
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/predictions.R
\name{get_random}
\alias{get_random}
\title{Get coefficients for the random intercepts and random slopes.}
\usage{
get_random(model, cond = NULL, print.summary = getOption("itsadug_print"))
}
\arguments{
\item{model}{A gam object, produced by \code{\link[mgcv]{gam}} or
\code{\link[mgcv]{bam}}.}
\item{cond}{A named list of the values to restrict the estimates for the
random predictor terms. When NULL (default) all levels are returned.
Only relevant for complex interactions, which involve more than two
dimensions.}
\item{print.summary}{Logical: whether or not to print a summary of the
values selected for each predictor.
Default set to the print info messages option
(see \code{\link{infoMessages}}).}
}
\value{
The coefficients of the random intercepts
and slopes.
}
\description{
Get coefficients for the random intercepts and random slopes.
}
\examples{
data(simdat)
\dontrun{
# Condition as factor, to have a random intercept
# for illustration purposes:
simdat$Condition <- as.factor(simdat$Condition)
# Model with random effect and interactions:
m2 <- bam(Y ~ s(Time) + s(Trial)
+ ti(Time, Trial)
+ s(Condition, bs='re')
+ s(Time, Subject, bs='re'),
data=simdat)
# extract all random effects combined:
newd <- get_random(m2)
head(newd)
# extract coefficients for the random intercept for Condition:
# Make bar plot:
barplot(newd[[1]])
abline(h=0)
# or select:
get_random(m2, cond=list(Condition=c('2','3')))
}
}
\author{
Jacolien van Rij
}
\seealso{
Other functions for model predictions: \code{\link{get_coefs}};
\code{\link{get_difference}};
\code{\link{get_modelterm}};
\code{\link{get_predictions}};
\code{\link{inspect_random}}; \code{\link{plotsurface}}
}
|
/man/get_random.Rd
|
no_license
|
vr-vr/itsadug
|
R
| false
| false
| 1,789
|
rd
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/predictions.R
\name{get_random}
\alias{get_random}
\title{Get coefficients for the random intercepts and random slopes.}
\usage{
get_random(model, cond = NULL, print.summary = getOption("itsadug_print"))
}
\arguments{
\item{model}{A gam object, produced by \code{\link[mgcv]{gam}} or
\code{\link[mgcv]{bam}}.}
\item{cond}{A named list of the values to restrict the estimates for the
random predictor terms. When NULL (default) all levels are returned.
Only relevant for complex interactions, which involve more than two
dimensions.}
\item{print.summary}{Logical: whether or not to print a summary of the
values selected for each predictor.
Default set to the print info messages option
(see \code{\link{infoMessages}}).}
}
\value{
The coefficients of the random intercepts
and slopes.
}
\description{
Get coefficients for the random intercepts and random slopes.
}
\examples{
data(simdat)
\dontrun{
# Condition as factor, to have a random intercept
# for illustration purposes:
simdat$Condition <- as.factor(simdat$Condition)
# Model with random effect and interactions:
m2 <- bam(Y ~ s(Time) + s(Trial)
+ ti(Time, Trial)
+ s(Condition, bs='re')
+ s(Time, Subject, bs='re'),
data=simdat)
# extract all random effects combined:
newd <- get_random(m2)
head(newd)
# extract coefficients for the random intercept for Condition:
# Make bar plot:
barplot(newd[[1]])
abline(h=0)
# or select:
get_random(m2, cond=list(Condition=c('2','3')))
}
}
\author{
Jacolien van Rij
}
\seealso{
Other functions for model predictions: \code{\link{get_coefs}};
\code{\link{get_difference}};
\code{\link{get_modelterm}};
\code{\link{get_predictions}};
\code{\link{inspect_random}}; \code{\link{plotsurface}}
}
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/PASWR-package.R
\docType{data}
\name{HardWater}
\alias{HardWater}
\title{Mortality and Water Hardness}
\format{A data frame with 61 observations on the following 4 variables.
\describe{
\item{location}{a factor with levels \code{North}
\code{South} indicating whether the town is as north as Derby}
\item{town}{the name of the town}
\item{mortality}{averaged annual mortality per 100,000 males}
\item{hardness}{calcium concentration (in parts per million)}
}}
\source{
D. J. Hand, F. Daly, A. D. Lunn, K. J. McConway and E. Ostrowski
(1994) \emph{A Handbook of Small Datasets}. Chapman and Hall/CRC, London.
}
\description{
Mortality and drinking water hardness for 61 cities in England and Wales.
}
\details{
These data were collected in an investigation of environmental causes of
disease. They show the annual mortality rate per 100,000 for males,
averaged over the years 1958-1964, and the calcium concentration (in parts
per million) in the drinking water supply for 61 large towns in England and
Wales. (The higher the calcium concentration, the harder the water.)
}
\examples{
plot(mortality ~ hardness, data = HardWater)
}
\keyword{datasets}
|
/man/HardWater.Rd
|
no_license
|
anouel/PASWR
|
R
| false
| false
| 1,239
|
rd
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/PASWR-package.R
\docType{data}
\name{HardWater}
\alias{HardWater}
\title{Mortality and Water Hardness}
\format{A data frame with 61 observations on the following 4 variables.
\describe{
\item{location}{a factor with levels \code{North}
\code{South} indicating whether the town is as north as Derby}
\item{town}{the name of the town}
\item{mortality}{averaged annual mortality per 100,000 males}
\item{hardness}{calcium concentration (in parts per million)}
}}
\source{
D. J. Hand, F. Daly, A. D. Lunn, K. J. McConway and E. Ostrowski
(1994) \emph{A Handbook of Small Datasets}. Chapman and Hall/CRC, London.
}
\description{
Mortality and drinking water hardness for 61 cities in England and Wales.
}
\details{
These data were collected in an investigation of environmental causes of
disease. They show the annual mortality rate per 100,000 for males,
averaged over the years 1958-1964, and the calcium concentration (in parts
per million) in the drinking water supply for 61 large towns in England and
Wales. (The higher the calcium concentration, the harder the water.)
}
\examples{
plot(mortality ~ hardness, data = HardWater)
}
\keyword{datasets}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/getSunlightPosition.R
\name{getSunlightPosition}
\alias{getSunlightPosition}
\title{Get Sunlight position}
\usage{
getSunlightPosition(date = NULL, lat = NULL, lon = NULL, data = NULL,
keep = c("altitude", "azimuth"))
}
\arguments{
\item{date}{: Single or multiple DateTime. Can be a \code{Date} (YYYY-MM-DD),
a \code{character} in UTC (YYYY-MM-DD HH:mm:ss) or a \code{POSIXct}}
\item{lat}{: \code{numeric}. Single latitude}
\item{lon}{: \code{numeric}. Single longitude}
\item{data}{: \code{data.frame}. Alternative to use \code{date}, \code{lat}, \code{lon} for passing multiple coordinates}
\item{keep}{: \code{character}. Vector of variables to keep. See \code{Details}}
}
\value{
\code{data.frame}
}
\description{
Get Sunlight position
}
\details{
Returns an object with the following properties:
\itemize{
\item{"altitude"}{ : sun altitude above the horizon in radians, e.g. 0 at the horizon and PI/2 at the zenith (straight over your head)}
\item{"azimuth"}{ : sun azimuth in radians (direction along the horizon, measured from south to west), e.g. 0 is south and Math.PI * 3/4 is northwest}
}
}
\examples{
# one date
getSunlightPosition(date = Sys.Date(), lat = 50.1, lon = 1.83)
# in character
getSunlightPosition(date = c("2017-05-12", "2017-05-12 00:00:00"),
lat = 50.1, lon = 1.83)
# in POSIXct
getSunlightPosition(date = as.POSIXct("2017-05-12 00:00:00", tz = "UTC"),
lat = 50.1, lon = 1.83)
getSunlightPosition(date = as.POSIXct("2017-05-12 02:00:00", tz = "CET"),
lat = 50.1, lon = 1.83)
# multiple date + subset
getSunlightPosition(date = seq.Date(Sys.Date()-9, Sys.Date(), by = 1),
keep = c("altitude"),
lat = 50.1, lon = 1.83)
# multiple coordinates
data <- data.frame(date = seq.Date(Sys.Date()-9, Sys.Date(), by = 1),
lat = c(rep(50.1, 10), rep(49, 10)),
lon = c(rep(1.83, 10), rep(2, 10)))
getSunlightPosition(data = data,
keep = c("altitude", "azimuth"))
}
\seealso{
\link{getSunlightTimes}, \link{getMoonTimes}, \link{getMoonIllumination},
\link{getMoonPosition},\link{getSunlightPosition}
}
|
/man/getSunlightPosition.Rd
|
permissive
|
AchrafElmar/suncalc
|
R
| false
| true
| 2,157
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/getSunlightPosition.R
\name{getSunlightPosition}
\alias{getSunlightPosition}
\title{Get Sunlight position}
\usage{
getSunlightPosition(date = NULL, lat = NULL, lon = NULL, data = NULL,
keep = c("altitude", "azimuth"))
}
\arguments{
\item{date}{: Single or multiple DateTime. Can be a \code{Date} (YYYY-MM-DD),
a \code{character} in UTC (YYYY-MM-DD HH:mm:ss) or a \code{POSIXct}}
\item{lat}{: \code{numeric}. Single latitude}
\item{lon}{: \code{numeric}. Single longitude}
\item{data}{: \code{data.frame}. Alternative to use \code{date}, \code{lat}, \code{lon} for passing multiple coordinates}
\item{keep}{: \code{character}. Vector of variables to keep. See \code{Details}}
}
\value{
\code{data.frame}
}
\description{
Get Sunlight position
}
\details{
Returns an object with the following properties:
\itemize{
\item{"altitude"}{ : sun altitude above the horizon in radians, e.g. 0 at the horizon and PI/2 at the zenith (straight over your head)}
\item{"azimuth"}{ : sun azimuth in radians (direction along the horizon, measured from south to west), e.g. 0 is south and Math.PI * 3/4 is northwest}
}
}
\examples{
# one date
getSunlightPosition(date = Sys.Date(), lat = 50.1, lon = 1.83)
# in character
getSunlightPosition(date = c("2017-05-12", "2017-05-12 00:00:00"),
lat = 50.1, lon = 1.83)
# in POSIXct
getSunlightPosition(date = as.POSIXct("2017-05-12 00:00:00", tz = "UTC"),
lat = 50.1, lon = 1.83)
getSunlightPosition(date = as.POSIXct("2017-05-12 02:00:00", tz = "CET"),
lat = 50.1, lon = 1.83)
# multiple date + subset
getSunlightPosition(date = seq.Date(Sys.Date()-9, Sys.Date(), by = 1),
keep = c("altitude"),
lat = 50.1, lon = 1.83)
# multiple coordinates
data <- data.frame(date = seq.Date(Sys.Date()-9, Sys.Date(), by = 1),
lat = c(rep(50.1, 10), rep(49, 10)),
lon = c(rep(1.83, 10), rep(2, 10)))
getSunlightPosition(data = data,
keep = c("altitude", "azimuth"))
}
\seealso{
\link{getSunlightTimes}, \link{getMoonTimes}, \link{getMoonIllumination},
\link{getMoonPosition},\link{getSunlightPosition}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/datatools.R
\name{sample.clones}
\alias{sample.clones}
\title{Get a random subset from a data.frame.}
\usage{
sample.clones(.data, .n, .replace = T)
}
\arguments{
\item{.data}{Data.frame or a list with data.frames}
\item{.n}{Sample size if integer. If in bounds [0;1] than percent of rows to extract. "1" is a percent, not one row!}
\item{.replace}{if T then choose with replacement, else without.}
}
\value{
Data.frame of nrow .n or a list with such data.frames.
}
\description{
Sample rows of the given data frame with replacement.
}
|
/fuzzedpackages/tcR/man/sample.clones.Rd
|
no_license
|
akhikolla/testpackages
|
R
| false
| true
| 616
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/datatools.R
\name{sample.clones}
\alias{sample.clones}
\title{Get a random subset from a data.frame.}
\usage{
sample.clones(.data, .n, .replace = T)
}
\arguments{
\item{.data}{Data.frame or a list with data.frames}
\item{.n}{Sample size if integer. If in bounds [0;1] than percent of rows to extract. "1" is a percent, not one row!}
\item{.replace}{if T then choose with replacement, else without.}
}
\value{
Data.frame of nrow .n or a list with such data.frames.
}
\description{
Sample rows of the given data frame with replacement.
}
|
# Women in East Africa
# Enterprise-Tanzania-model.R
# Bastiaan Quast
# bquast@gmail.com
# load data
load(file = 'data/Enterprise/Tanzania/Tanzania-Enterprise.RData')
# load libraries
library(ggplot2)
library(dplyr)
library(labelled)
library(broom)
# t06 = Tanzania 2006
# t13 = Tanzania 2013
# pr = female share of production
# np = female share of non production
# fo = female owner
# t = tariff
# define models
t06_pr <- formula(female_share_prod ~ international + capital_city + business_city + multi_establ + intern_certif)
t06_np <- formula(female_share_nonprod ~ international + capital_city + business_city + multi_establ + intern_certif)
t13_pr <- formula(female_share_prod ~ international + capital_city + business_city + multi_establ + intern_certif + eac_exporter + industry)
t13_np <- formula(female_share_nonprod ~ international + capital_city + business_city + multi_establ + intern_certif + eac_exporter + industry)
t13_pr_t <- formula(log(female_share_prod+1) ~ international + capital_city + business_city + multi_establ + intern_certif + eac_exporter + log(Tariff+1) )
t13_np_t <- formula(log(female_share_nonprod+1) ~ international + capital_city + business_city + multi_establ + intern_certif + eac_exporter + log(Tariff+1) )
t13_fo <- formula(female_owner ~ international + capital_city + business_city + multi_establ + intern_certif + eac_exporter + industry)
t13_fs <- formula(female_ownership ~ international + capital_city + business_city + multi_establ + intern_certif + eac_exporter + industry)
# estimate models
t06_pr_r1 <- lm(t06_pr, data = Tanzania_2006)
t06_pr_r2 <- glm(t06_pr, data = Tanzania_2006, family = quasibinomial(link='logit') )
t06_np_r1 <- lm(t06_np, data = Tanzania_2006)
t06_np_r2 <- glm(t06_np, data = Tanzania_2006, family = quasibinomial(link='logit') )
t13_pr_r1 <- lm(t13_pr, data = Tanzania_2013)
t13_pr_r2 <- glm(t13_pr, data = Tanzania_2013, family = quasibinomial(link='logit') )
t13_np_r1 <- lm(t13_np, data = Tanzania_2013)
t13_np_r2 <- glm(t13_np, data = Tanzania_2013, family = quasibinomial(link='logit') )
t13_fo_r1 <- lm(t13_fo, data = Tanzania_2013)
t13_fo_r2 <- glm(t13_fo, data = Tanzania_2013, family = quasibinomial(link='logit') )
t13_fs_r1 <- lm(t13_fs, data = Tanzania_2013)
t13_fs_r2 <- glm(t13_fs, data = Tanzania_2013, family = quasibinomial(link='logit') )
t13_pr_t_r1 <- lm(t13_pr_t, data = Tanzania_2013t)
t13_pr_t_r2 <- glm(t13_pr_t, data = Tanzania_2013t, family = quasibinomial(link='logit') )
t13_np_t_r1 <- lm(t13_np_t, data = Tanzania_2013t)
t13_np_t_r2 <- glm(t13_np_t, data = Tanzania_2013t, family = quasibinomial(link='logit') )
# view output
summary(t06_pr_r1) # capital multi_est
summary(t06_pr_r2) # capital multi_est
summary(t06_np_r1)
summary(t06_np_r2)
summary(t13_pr_r1) # -manufacturing
summary(t13_pr_r2) # -manufacturing
summary(t13_np_r1) # international
summary(t13_np_r2) # international
summary(t13_pr_t_r1) # tariff
summary(t13_pr_t_r2) # tariff
summary(t13_np_t_r1) # international
summary(t13_np_t_r2) # international
summary(t13_fo_r1) # international business manufacturing
summary(t13_fo_r2) # international business manufacturing
summary(t13_fs_r1) # intern_certif manufacturing
summary(t13_fs_r2) # intern_certif manufacturing
|
/R/Enterprise-Tanzania-model.R
|
no_license
|
bquast/Women-Trade-EAC
|
R
| false
| false
| 3,273
|
r
|
# Women in East Africa
# Enterprise-Tanzania-model.R
# Bastiaan Quast
# bquast@gmail.com
# load data
load(file = 'data/Enterprise/Tanzania/Tanzania-Enterprise.RData')
# load libraries
library(ggplot2)
library(dplyr)
library(labelled)
library(broom)
# t06 = Tanzania 2006
# t13 = Tanzania 2013
# pr = female share of production
# np = female share of non production
# fo = female owner
# t = tariff
# define models
t06_pr <- formula(female_share_prod ~ international + capital_city + business_city + multi_establ + intern_certif)
t06_np <- formula(female_share_nonprod ~ international + capital_city + business_city + multi_establ + intern_certif)
t13_pr <- formula(female_share_prod ~ international + capital_city + business_city + multi_establ + intern_certif + eac_exporter + industry)
t13_np <- formula(female_share_nonprod ~ international + capital_city + business_city + multi_establ + intern_certif + eac_exporter + industry)
t13_pr_t <- formula(log(female_share_prod+1) ~ international + capital_city + business_city + multi_establ + intern_certif + eac_exporter + log(Tariff+1) )
t13_np_t <- formula(log(female_share_nonprod+1) ~ international + capital_city + business_city + multi_establ + intern_certif + eac_exporter + log(Tariff+1) )
t13_fo <- formula(female_owner ~ international + capital_city + business_city + multi_establ + intern_certif + eac_exporter + industry)
t13_fs <- formula(female_ownership ~ international + capital_city + business_city + multi_establ + intern_certif + eac_exporter + industry)
# estimate models
t06_pr_r1 <- lm(t06_pr, data = Tanzania_2006)
t06_pr_r2 <- glm(t06_pr, data = Tanzania_2006, family = quasibinomial(link='logit') )
t06_np_r1 <- lm(t06_np, data = Tanzania_2006)
t06_np_r2 <- glm(t06_np, data = Tanzania_2006, family = quasibinomial(link='logit') )
t13_pr_r1 <- lm(t13_pr, data = Tanzania_2013)
t13_pr_r2 <- glm(t13_pr, data = Tanzania_2013, family = quasibinomial(link='logit') )
t13_np_r1 <- lm(t13_np, data = Tanzania_2013)
t13_np_r2 <- glm(t13_np, data = Tanzania_2013, family = quasibinomial(link='logit') )
t13_fo_r1 <- lm(t13_fo, data = Tanzania_2013)
t13_fo_r2 <- glm(t13_fo, data = Tanzania_2013, family = quasibinomial(link='logit') )
t13_fs_r1 <- lm(t13_fs, data = Tanzania_2013)
t13_fs_r2 <- glm(t13_fs, data = Tanzania_2013, family = quasibinomial(link='logit') )
t13_pr_t_r1 <- lm(t13_pr_t, data = Tanzania_2013t)
t13_pr_t_r2 <- glm(t13_pr_t, data = Tanzania_2013t, family = quasibinomial(link='logit') )
t13_np_t_r1 <- lm(t13_np_t, data = Tanzania_2013t)
t13_np_t_r2 <- glm(t13_np_t, data = Tanzania_2013t, family = quasibinomial(link='logit') )
# view output
summary(t06_pr_r1) # capital multi_est
summary(t06_pr_r2) # capital multi_est
summary(t06_np_r1)
summary(t06_np_r2)
summary(t13_pr_r1) # -manufacturing
summary(t13_pr_r2) # -manufacturing
summary(t13_np_r1) # international
summary(t13_np_r2) # international
summary(t13_pr_t_r1) # tariff
summary(t13_pr_t_r2) # tariff
summary(t13_np_t_r1) # international
summary(t13_np_t_r2) # international
summary(t13_fo_r1) # international business manufacturing
summary(t13_fo_r2) # international business manufacturing
summary(t13_fs_r1) # intern_certif manufacturing
summary(t13_fs_r2) # intern_certif manufacturing
|
#
phenoData_WCM=read.csv('/athena/masonlab/scratch/users/nai2008/PNET_thesisProject/_WCM_PNET_RNAseq_data/PNET_metadata.csv',header=TRUE)
table(phenoData_WCM$Sex)
# F M
# 9 12
phenoData_Chan=read.csv('/athena/masonlab/scratch/users/nai2008/PNET_thesisProject/_Chan_et_al_GSE118014_PNET_RNAseq_data/pheno_data_for_publication.csv',header=TRUE)
table(phenoData_Chan$Sex)
# Female Male
# 10 14
phenoData_Fadista=read.csv('/athena/masonlab/scratch/users/nai2008/PNET_thesisProject/_Fadista_GSE50244_control_RNAseq_data/pheno_data_for_publication.csv',header=TRUE)
table(phenoData_Fadista$Sex)
# Female Male
# 35 54
TFs=read.csv("/athena/masonlab/scratch/users/nai2008/Human_TFs_DatabaseExtract_v_1.01.csv")
TFs$Ensembl_ID=as.vector(TFs$Ensembl_ID)
nrow(TFs) # 2765
length(unique(TFs$HGNC_symbol)) # 2765
length(unique(TFs$Ensembl_ID)) # 2765
length(unique(TFs$EntrezGene.ID)) # 2656
load('/athena/masonlab/scratch/users/nai2008/PNET_thesisProject/RNAseq_analysis/rdas/Male_vs_Female_WCM_dataset_DESeq2_DEbySex_BUNDLE.rda')# # se, gse, tpm, ddsClean, rr, vsd
rr=rr[-which(is.na(rr$padj)),]
nrow(rr) # 31579
length(which(rr$padj<=0.1)) # 559
m=match(rr$Ensembl,TFs$Ensembl_ID)
length(which(!is.na(m))) # 2734
length(which(rr$padj<=0.1 & rr$log2FoldChange > 0)) # overexpressed in males: 338
length(which(rr$padj<=0.1 & rr$log2FoldChange < 0)) # overexpressed in females: 221
load('/athena/masonlab/scratch/users/nai2008/PNET_thesisProject/RNAseq_analysis/rdas/Male_vs_Female_Chan_dataset_DESeq2_DEbySex_BUNDLE.rda')
which(is.na(rr$padj)) # none
nrow(rr) # 33797
length(which(rr$padj<=0.1)) # 659
m=match(rr$Ensembl,TFs$Ensembl_ID)
length(which(!is.na(m))) # 2743
length(which(rr$padj<=0.1 & rr$log2FoldChange > 0)) # overexpressed in males: 396
length(which(rr$padj<=0.1 & rr$log2FoldChange < 0)) # overexpressed in females: 263
load('/athena/masonlab/scratch/users/nai2008/PNET_thesisProject/RNAseq_analysis/rdas/Male_vs_Female_Fadista_dataset_DESeq2_DEbySex_BUNDLE.rda') # se, gse, tpm, ddsClean, rr, vsd
rr=rr[-which(is.na(rr$padj)),]
nrow(rr) # 28753
length(which(rr$padj<=0.1)) # 173
m=match(rr$Ensembl,TFs$Ensembl_ID)
length(which(!is.na(m))) # 2694
length(which(rr$padj<=0.1 & rr$log2FoldChange > 0)) # overexpressed in males: 104
length(which(rr$padj<=0.1 & rr$log2FoldChange < 0)) # overexpressed in females: 69
load('/athena/masonlab/scratch/users/nai2008/PNET_thesisProject/RNAseq_analysis/rdas/Male_vs_Female_WCM_dataset_DE_genes_bySex.rda')
DE_genes_WCM=out
nrow(DE_genes_WCM) # 559
length(which(DE_genes_WCM$TF==TRUE)) #55
DE_genes_WCM_TFsOnly=DE_genes_WCM[which(DE_genes_WCM$TF==TRUE),]
length(DE_genes_WCM$Ensembl) == length(unique(DE_genes_WCM$Ensembl)) #TRUE
load('/athena/masonlab/scratch/users/nai2008/PNET_thesisProject/RNAseq_analysis/rdas/Male_vs_Female_Chan_dataset_DE_genes_bySex.rda')
DE_genes_Chan=out
nrow(DE_genes_Chan) #659
length(which(DE_genes_Chan$TF==TRUE)) #65
DE_genes_Chan_TFsOnly=DE_genes_Chan[which(DE_genes_Chan$TF==TRUE),]
length(DE_genes_Chan$Ensembl) == length(unique(DE_genes_Chan$Ensembl)) #TRUE
load('/athena/masonlab/scratch/users/nai2008/PNET_thesisProject/RNAseq_analysis/rdas/Male_vs_Female_Fadista_dataset_DE_genes_bySex.rda')
DE_genes_Fadista=out
nrow(DE_genes_Fadista) # 173
length(which(DE_genes_Fadista$TF==TRUE)) # 15
DE_genes_Fadista_TFsOnly=DE_genes_Fadista[which(DE_genes_Fadista$TF==TRUE),]
length(DE_genes_Fadista$Ensembl) == length(unique(DE_genes_Fadista$Ensembl)) #TRUE
mm=match(DE_genes_WCM$Ensembl,DE_genes_Chan$Ensembl)
length(which(!is.na(mm))) # 56
mm=match(DE_genes_Chan$Ensembl,DE_genes_Fadista$Ensembl)
length(which(!is.na(mm))) # 28
mm=match(DE_genes_WCM$Ensembl,DE_genes_Fadista$Ensembl)
length(which(!is.na(mm))) # 30
length(Reduce(intersect, list(DE_genes_WCM$Ensembl,DE_genes_Chan$Ensembl,DE_genes_Fadista$Ensembl))) # 19
# make triple Venn Diagram
library(VennDiagram)
pdf('/athena/masonlab/scratch/users/nai2008/PNET_thesisProject/RNAseq_analysis/pdfs/Male_vs_Female_Chan_WCM_Fadista_datasets_TripleVennDiagram.pdf')
overrideTriple=TRUE
draw.triple.venn(area1=559, area2=659, area3=173, n12=56, n23=28, n13=30, n123=19, category =c('PNET\nDiscovery\nDataset','PNET\nValidation\nDataset','Control Dataset'),
rotation = 1, reverse = FALSE, euler.d =
TRUE, scaled = TRUE, lwd = rep(2, 3), lty =
rep("solid", 3), col = rep("black", 3), fill = c('red','blue','green'),
alpha = rep(0.5, 3), label.col = rep("black", 7), cex
= c(2,2,2,2,.8,2,2), fontface = rep("plain", 7), fontfamily =
rep("sans", 7), cat.pos = c(-40, 40, 180), cat.dist =
c(0.077, 0.077, 0.025), cat.col = rep("black", 3),
cat.cex = rep(1, 3), cat.fontface = rep("bold", 3),
cat.fontfamily = rep("sans", 3), cat.just =
list(c(0.5, 1), c(0.5, 1), c(0.5, 0)), cat.default.pos
= "outer", cat.prompts = FALSE, rotation.degree = 0,
rotation.centre = c(0.5, 0.5), ind = TRUE, sep.dist =
0.05, offset = 0, cex.prop = NULL, print.mode = "raw",
sigdigs = 3, direct.area = FALSE, area.vector = 0, margin =0.02)
grid.newpage()
overrideTriple=TRUE
draw.triple.venn(area1=559, area2=659, area3=173, n12=56, n23=28, n13=30, n123=19, category = NA,
rotation = 1, reverse = FALSE, euler.d = TRUE, scaled = TRUE, lwd = rep(2, 3), lty =
rep("solid", 3), col = rep("black", 3), fill = c('red','blue','green'),
alpha = rep(0.5, 3), label.col = rep("black", 7), cex
= c(2,2,2,2,.8,2,2), fontface = rep("plain", 7), fontfamily =
rep("sans", 7), cat.pos = c(-40, 40, 180), cat.dist =
c(0.07, 0.07, 0.025), cat.col = rep("white", 3),
cat.cex = rep(1, 3), cat.fontface = rep("bold", 3),
cat.fontfamily = rep("sans", 3), cat.just =
list(c(0.5, 1), c(0.5, 1), c(0.5, 0)), cat.default.pos
= "outer", cat.prompts = FALSE, rotation.degree = 0,
rotation.centre = c(0.5, 0.5), ind = TRUE, sep.dist =
0.05, offset = 0, cex.prop = NULL, print.mode = "raw",
sigdigs = 3, direct.area = FALSE, area.vector = 0, margin =0.02)
dev.off()
# Make a table of genes DE by sex in both the validation and discovery PNET cohorts, and NOT DE by sex in controls
mm=match(DE_genes_WCM$Ensembl,DE_genes_Chan$Ensembl)
validated_genes=DE_genes_WCM[which(!is.na(mm)),1:3]
nrow(validated_genes) # 56
mm=match(validated_genes$Ensembl,DE_genes_Fadista$Ensembl)
validated_genes_not_DEbysex_in_controls = validated_genes[which(is.na(mm)),]
nrow(validated_genes_not_DEbysex_in_controls) # 37
m=match(validated_genes_not_DEbysex_in_controls$Ensembl,DE_genes_WCM$Ensembl)
validated_genes_not_DEbysex_in_controls$log2FoldChange_discovery = DE_genes_WCM$log2FoldChange[m]
validated_genes_not_DEbysex_in_controls$FDR_discovery = DE_genes_WCM$FDR[m]
m=match(validated_genes_not_DEbysex_in_controls$Ensembl,DE_genes_Chan$Ensembl)
validated_genes_not_DEbysex_in_controls$log2FoldChange_validation = DE_genes_WCM$log2FoldChange[m]
validated_genes_not_DEbysex_in_controls$FDR_validation = DE_genes_Chan$FDR[m]
validated_genes_not_DEbysex_in_controls=validated_genes_not_DEbysex_in_controls[order(validated_genes_not_DEbysex_in_controls$log2FoldChange_discovery),]
validated_genes_not_DEbysex_in_controls_concordantLFC_only=validated_genes_not_DEbysex_in_controls[which(sign(validated_genes_not_DEbysex_in_controls$log2FoldChange_discovery)==sign(validated_genes_not_DEbysex_in_controls$log2FoldChange_validation)),]
nrow(validated_genes_not_DEbysex_in_controls_concordantLFC_only) # 16
write.csv(validated_genes_not_DEbysex_in_controls_concordantLFC_only, file='/athena/masonlab/scratch/users/nai2008/PNET_thesisProject/RNAseq_analysis/data_tables/Male_vs_Female_validated_genes_DEbySex_in_PNETs_but_not_in_controls_concordantLFC_only.csv', row.names=FALSE)
## Overlaps b/w DE TFs
mm=match(DE_genes_WCM_TFsOnly$Ensembl,DE_genes_Chan_TFsOnly$Ensembl)
validated_TFs=DE_genes_WCM_TFsOnly[which(!is.na(mm)),1:3]
nrow(validated_TFs) # 8
mm=match(validated_TFs$Ensembl,DE_genes_Fadista_TFsOnly$Ensembl)
validated_TFs_not_DEbysex_in_controls = validated_TFs[which(is.na(mm)),]
row(validated_TFs_not_DEbysex_in_controls) # 3
###### Chi Sq. tests
#### DE genes
# WCM (Discovery datset) vs control dataset
WCM_vs_Control_DEG=matrix(NA,nrow=2,ncol=2)
rownames(WCM_vs_Control_DEG)=c('WCM','Control')
colnames(WCM_vs_Control_DEG)=c('DE','not_DE')
WCM_vs_Control_DEG[1,]=c(559,31579-559)
WCM_vs_Control_DEG[2,]=c(173,28735-173)
cs=chisq.test(WCM_vs_Control_DEG, correct = TRUE)
cs$p.value # 6.515072e-39
# Chan (Validation datset) vs control dataset
Chan_vs_Control_DEG=matrix(NA,nrow=2,ncol=2)
rownames(Chan_vs_Control_DEG)=c('Chan','Control')
colnames(Chan_vs_Control_DEG)=c('DE','not_DE')
Chan_vs_Control_DEG[1,]=c(659,33797-659)
Chan_vs_Control_DEG[2,]=c(173,28735-173)
cs=chisq.test(Chan_vs_Control_DEG, correct = TRUE)
cs$p.value # 1.955091e-48
#### Transcription Factors
# WCM (Discovery datset) vs control dataset
WCM_vs_Control_TFs=matrix(NA,nrow=2,ncol=2)
rownames(WCM_vs_Control_TFs)=c('WCM','Control')
colnames(WCM_vs_Control_TFs)=c('DE','not_DE')
WCM_vs_Control_TFs[1,]=c(55,2765-55)
WCM_vs_Control_TFs[2,]=c(15,2765-15)
cs=chisq.test(WCM_vs_Control_TFs, correct = TRUE)
cs$p.value # 2.716377e-06
# Chan (Validation datset) vs control dataset
Chan_vs_Control_TFs=matrix(NA,nrow=2,ncol=2)
rownames(Chan_vs_Control_TFs)=c('Chan','Control')
colnames(Chan_vs_Control_TFs)=c('DE','not_DE')
Chan_vs_Control_TFs[1,]=c(65,2765-65)
Chan_vs_Control_TFs[2,]=c(15,2765-15)
cs=chisq.test(Chan_vs_Control_TFs, correct = TRUE)
cs$p.value # 3.420451e-08
|
/male_vs_female.R
|
no_license
|
nikolayaivanov/PNET_sex_dimorphisms
|
R
| false
| false
| 9,493
|
r
|
#
phenoData_WCM=read.csv('/athena/masonlab/scratch/users/nai2008/PNET_thesisProject/_WCM_PNET_RNAseq_data/PNET_metadata.csv',header=TRUE)
table(phenoData_WCM$Sex)
# F M
# 9 12
phenoData_Chan=read.csv('/athena/masonlab/scratch/users/nai2008/PNET_thesisProject/_Chan_et_al_GSE118014_PNET_RNAseq_data/pheno_data_for_publication.csv',header=TRUE)
table(phenoData_Chan$Sex)
# Female Male
# 10 14
phenoData_Fadista=read.csv('/athena/masonlab/scratch/users/nai2008/PNET_thesisProject/_Fadista_GSE50244_control_RNAseq_data/pheno_data_for_publication.csv',header=TRUE)
table(phenoData_Fadista$Sex)
# Female Male
# 35 54
TFs=read.csv("/athena/masonlab/scratch/users/nai2008/Human_TFs_DatabaseExtract_v_1.01.csv")
TFs$Ensembl_ID=as.vector(TFs$Ensembl_ID)
nrow(TFs) # 2765
length(unique(TFs$HGNC_symbol)) # 2765
length(unique(TFs$Ensembl_ID)) # 2765
length(unique(TFs$EntrezGene.ID)) # 2656
load('/athena/masonlab/scratch/users/nai2008/PNET_thesisProject/RNAseq_analysis/rdas/Male_vs_Female_WCM_dataset_DESeq2_DEbySex_BUNDLE.rda')# # se, gse, tpm, ddsClean, rr, vsd
rr=rr[-which(is.na(rr$padj)),]
nrow(rr) # 31579
length(which(rr$padj<=0.1)) # 559
m=match(rr$Ensembl,TFs$Ensembl_ID)
length(which(!is.na(m))) # 2734
length(which(rr$padj<=0.1 & rr$log2FoldChange > 0)) # overexpressed in males: 338
length(which(rr$padj<=0.1 & rr$log2FoldChange < 0)) # overexpressed in females: 221
load('/athena/masonlab/scratch/users/nai2008/PNET_thesisProject/RNAseq_analysis/rdas/Male_vs_Female_Chan_dataset_DESeq2_DEbySex_BUNDLE.rda')
which(is.na(rr$padj)) # none
nrow(rr) # 33797
length(which(rr$padj<=0.1)) # 659
m=match(rr$Ensembl,TFs$Ensembl_ID)
length(which(!is.na(m))) # 2743
length(which(rr$padj<=0.1 & rr$log2FoldChange > 0)) # overexpressed in males: 396
length(which(rr$padj<=0.1 & rr$log2FoldChange < 0)) # overexpressed in females: 263
load('/athena/masonlab/scratch/users/nai2008/PNET_thesisProject/RNAseq_analysis/rdas/Male_vs_Female_Fadista_dataset_DESeq2_DEbySex_BUNDLE.rda') # se, gse, tpm, ddsClean, rr, vsd
rr=rr[-which(is.na(rr$padj)),]
nrow(rr) # 28753
length(which(rr$padj<=0.1)) # 173
m=match(rr$Ensembl,TFs$Ensembl_ID)
length(which(!is.na(m))) # 2694
length(which(rr$padj<=0.1 & rr$log2FoldChange > 0)) # overexpressed in males: 104
length(which(rr$padj<=0.1 & rr$log2FoldChange < 0)) # overexpressed in females: 69
load('/athena/masonlab/scratch/users/nai2008/PNET_thesisProject/RNAseq_analysis/rdas/Male_vs_Female_WCM_dataset_DE_genes_bySex.rda')
DE_genes_WCM=out
nrow(DE_genes_WCM) # 559
length(which(DE_genes_WCM$TF==TRUE)) #55
DE_genes_WCM_TFsOnly=DE_genes_WCM[which(DE_genes_WCM$TF==TRUE),]
length(DE_genes_WCM$Ensembl) == length(unique(DE_genes_WCM$Ensembl)) #TRUE
load('/athena/masonlab/scratch/users/nai2008/PNET_thesisProject/RNAseq_analysis/rdas/Male_vs_Female_Chan_dataset_DE_genes_bySex.rda')
DE_genes_Chan=out
nrow(DE_genes_Chan) #659
length(which(DE_genes_Chan$TF==TRUE)) #65
DE_genes_Chan_TFsOnly=DE_genes_Chan[which(DE_genes_Chan$TF==TRUE),]
length(DE_genes_Chan$Ensembl) == length(unique(DE_genes_Chan$Ensembl)) #TRUE
load('/athena/masonlab/scratch/users/nai2008/PNET_thesisProject/RNAseq_analysis/rdas/Male_vs_Female_Fadista_dataset_DE_genes_bySex.rda')
DE_genes_Fadista=out
nrow(DE_genes_Fadista) # 173
length(which(DE_genes_Fadista$TF==TRUE)) # 15
DE_genes_Fadista_TFsOnly=DE_genes_Fadista[which(DE_genes_Fadista$TF==TRUE),]
length(DE_genes_Fadista$Ensembl) == length(unique(DE_genes_Fadista$Ensembl)) #TRUE
mm=match(DE_genes_WCM$Ensembl,DE_genes_Chan$Ensembl)
length(which(!is.na(mm))) # 56
mm=match(DE_genes_Chan$Ensembl,DE_genes_Fadista$Ensembl)
length(which(!is.na(mm))) # 28
mm=match(DE_genes_WCM$Ensembl,DE_genes_Fadista$Ensembl)
length(which(!is.na(mm))) # 30
length(Reduce(intersect, list(DE_genes_WCM$Ensembl,DE_genes_Chan$Ensembl,DE_genes_Fadista$Ensembl))) # 19
# make triple Venn Diagram
library(VennDiagram)
pdf('/athena/masonlab/scratch/users/nai2008/PNET_thesisProject/RNAseq_analysis/pdfs/Male_vs_Female_Chan_WCM_Fadista_datasets_TripleVennDiagram.pdf')
overrideTriple=TRUE
draw.triple.venn(area1=559, area2=659, area3=173, n12=56, n23=28, n13=30, n123=19, category =c('PNET\nDiscovery\nDataset','PNET\nValidation\nDataset','Control Dataset'),
rotation = 1, reverse = FALSE, euler.d =
TRUE, scaled = TRUE, lwd = rep(2, 3), lty =
rep("solid", 3), col = rep("black", 3), fill = c('red','blue','green'),
alpha = rep(0.5, 3), label.col = rep("black", 7), cex
= c(2,2,2,2,.8,2,2), fontface = rep("plain", 7), fontfamily =
rep("sans", 7), cat.pos = c(-40, 40, 180), cat.dist =
c(0.077, 0.077, 0.025), cat.col = rep("black", 3),
cat.cex = rep(1, 3), cat.fontface = rep("bold", 3),
cat.fontfamily = rep("sans", 3), cat.just =
list(c(0.5, 1), c(0.5, 1), c(0.5, 0)), cat.default.pos
= "outer", cat.prompts = FALSE, rotation.degree = 0,
rotation.centre = c(0.5, 0.5), ind = TRUE, sep.dist =
0.05, offset = 0, cex.prop = NULL, print.mode = "raw",
sigdigs = 3, direct.area = FALSE, area.vector = 0, margin =0.02)
grid.newpage()
overrideTriple=TRUE
draw.triple.venn(area1=559, area2=659, area3=173, n12=56, n23=28, n13=30, n123=19, category = NA,
rotation = 1, reverse = FALSE, euler.d = TRUE, scaled = TRUE, lwd = rep(2, 3), lty =
rep("solid", 3), col = rep("black", 3), fill = c('red','blue','green'),
alpha = rep(0.5, 3), label.col = rep("black", 7), cex
= c(2,2,2,2,.8,2,2), fontface = rep("plain", 7), fontfamily =
rep("sans", 7), cat.pos = c(-40, 40, 180), cat.dist =
c(0.07, 0.07, 0.025), cat.col = rep("white", 3),
cat.cex = rep(1, 3), cat.fontface = rep("bold", 3),
cat.fontfamily = rep("sans", 3), cat.just =
list(c(0.5, 1), c(0.5, 1), c(0.5, 0)), cat.default.pos
= "outer", cat.prompts = FALSE, rotation.degree = 0,
rotation.centre = c(0.5, 0.5), ind = TRUE, sep.dist =
0.05, offset = 0, cex.prop = NULL, print.mode = "raw",
sigdigs = 3, direct.area = FALSE, area.vector = 0, margin =0.02)
dev.off()
# Make a table of genes DE by sex in both the validation and discovery PNET cohorts, and NOT DE by sex in controls
mm=match(DE_genes_WCM$Ensembl,DE_genes_Chan$Ensembl)
validated_genes=DE_genes_WCM[which(!is.na(mm)),1:3]
nrow(validated_genes) # 56
mm=match(validated_genes$Ensembl,DE_genes_Fadista$Ensembl)
validated_genes_not_DEbysex_in_controls = validated_genes[which(is.na(mm)),]
nrow(validated_genes_not_DEbysex_in_controls) # 37
m=match(validated_genes_not_DEbysex_in_controls$Ensembl,DE_genes_WCM$Ensembl)
validated_genes_not_DEbysex_in_controls$log2FoldChange_discovery = DE_genes_WCM$log2FoldChange[m]
validated_genes_not_DEbysex_in_controls$FDR_discovery = DE_genes_WCM$FDR[m]
m=match(validated_genes_not_DEbysex_in_controls$Ensembl,DE_genes_Chan$Ensembl)
validated_genes_not_DEbysex_in_controls$log2FoldChange_validation = DE_genes_WCM$log2FoldChange[m]
validated_genes_not_DEbysex_in_controls$FDR_validation = DE_genes_Chan$FDR[m]
validated_genes_not_DEbysex_in_controls=validated_genes_not_DEbysex_in_controls[order(validated_genes_not_DEbysex_in_controls$log2FoldChange_discovery),]
validated_genes_not_DEbysex_in_controls_concordantLFC_only=validated_genes_not_DEbysex_in_controls[which(sign(validated_genes_not_DEbysex_in_controls$log2FoldChange_discovery)==sign(validated_genes_not_DEbysex_in_controls$log2FoldChange_validation)),]
nrow(validated_genes_not_DEbysex_in_controls_concordantLFC_only) # 16
write.csv(validated_genes_not_DEbysex_in_controls_concordantLFC_only, file='/athena/masonlab/scratch/users/nai2008/PNET_thesisProject/RNAseq_analysis/data_tables/Male_vs_Female_validated_genes_DEbySex_in_PNETs_but_not_in_controls_concordantLFC_only.csv', row.names=FALSE)
## Overlaps b/w DE TFs
mm=match(DE_genes_WCM_TFsOnly$Ensembl,DE_genes_Chan_TFsOnly$Ensembl)
validated_TFs=DE_genes_WCM_TFsOnly[which(!is.na(mm)),1:3]
nrow(validated_TFs) # 8
mm=match(validated_TFs$Ensembl,DE_genes_Fadista_TFsOnly$Ensembl)
validated_TFs_not_DEbysex_in_controls = validated_TFs[which(is.na(mm)),]
row(validated_TFs_not_DEbysex_in_controls) # 3
###### Chi Sq. tests
#### DE genes
# WCM (Discovery datset) vs control dataset
WCM_vs_Control_DEG=matrix(NA,nrow=2,ncol=2)
rownames(WCM_vs_Control_DEG)=c('WCM','Control')
colnames(WCM_vs_Control_DEG)=c('DE','not_DE')
WCM_vs_Control_DEG[1,]=c(559,31579-559)
WCM_vs_Control_DEG[2,]=c(173,28735-173)
cs=chisq.test(WCM_vs_Control_DEG, correct = TRUE)
cs$p.value # 6.515072e-39
# Chan (Validation datset) vs control dataset
Chan_vs_Control_DEG=matrix(NA,nrow=2,ncol=2)
rownames(Chan_vs_Control_DEG)=c('Chan','Control')
colnames(Chan_vs_Control_DEG)=c('DE','not_DE')
Chan_vs_Control_DEG[1,]=c(659,33797-659)
Chan_vs_Control_DEG[2,]=c(173,28735-173)
cs=chisq.test(Chan_vs_Control_DEG, correct = TRUE)
cs$p.value # 1.955091e-48
#### Transcription Factors
# WCM (Discovery datset) vs control dataset
WCM_vs_Control_TFs=matrix(NA,nrow=2,ncol=2)
rownames(WCM_vs_Control_TFs)=c('WCM','Control')
colnames(WCM_vs_Control_TFs)=c('DE','not_DE')
WCM_vs_Control_TFs[1,]=c(55,2765-55)
WCM_vs_Control_TFs[2,]=c(15,2765-15)
cs=chisq.test(WCM_vs_Control_TFs, correct = TRUE)
cs$p.value # 2.716377e-06
# Chan (Validation datset) vs control dataset
Chan_vs_Control_TFs=matrix(NA,nrow=2,ncol=2)
rownames(Chan_vs_Control_TFs)=c('Chan','Control')
colnames(Chan_vs_Control_TFs)=c('DE','not_DE')
Chan_vs_Control_TFs[1,]=c(65,2765-65)
Chan_vs_Control_TFs[2,]=c(15,2765-15)
cs=chisq.test(Chan_vs_Control_TFs, correct = TRUE)
cs$p.value # 3.420451e-08
|
testlist <- list(m = NULL, repetitions = 0L, in_m = structure(c(4.63168614785354e+77, 9.53818252170339e+295, 1.22810536108214e+146, 4.12396251261199e-221, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(10L, 3L)))
result <- do.call(CNull:::communities_individual_based_sampling_alpha,testlist)
str(result)
|
/CNull/inst/testfiles/communities_individual_based_sampling_alpha/AFL_communities_individual_based_sampling_alpha/communities_individual_based_sampling_alpha_valgrind_files/1615777602-test.R
|
no_license
|
akhikolla/updatedatatype-list2
|
R
| false
| false
| 348
|
r
|
testlist <- list(m = NULL, repetitions = 0L, in_m = structure(c(4.63168614785354e+77, 9.53818252170339e+295, 1.22810536108214e+146, 4.12396251261199e-221, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(10L, 3L)))
result <- do.call(CNull:::communities_individual_based_sampling_alpha,testlist)
str(result)
|
# This file cleans the compustat file
rm(list=ls())
library(plyr)
library(XML)
library(xml2)
library(RCurl)
library(dplyr)
library(pryr)
library(stringr)
library(data.table)
library(gtools)
library(lubridate)
setwd("~/Networks/Analysis")
compustat <- read.csv("compustat_for_age.csv")
compustat$cusip6 <- substr(compustat$cusip, 1,6)
compustat_firstdate <- aggregate( compustat[c("datadate")], compustat[c("cusip6")], FUN=min )
names(compustat_firstdate) <- c("cusip6","first_date")
# compustat <- read.csv("compustat_q_1999.csv")
compustat <- read.csv("compustat_q_2015.csv")
# compustat <- read.csv("compustat_q_2000_2007.csv")
# compustat <- read.csv("compustat_q_2008_2014.csv")
# Select the variables
compustat_short = compustat[c("dlttq", "dlcq", "seqq","mkvaltq",
"cshoq", "prccq", "ceqq", "xrdq", "revtq", "cogsq", "xsgaq",
"atq","niq","txditcq", "datadate", "cusip")]
compustat_short = as.data.table( compustat_short )
# saveRDS(compustat_short, file="compustat_short_1999.rds")
# saveRDS(compustat_short, file="compustat_short_2000_2007.rds")
# saveRDS(compustat_short, file="compustat_short_2008_2014.rds")
# saveRDS(compustat_short, file="compustat_short_2015.rds")
# load("compustat_short_2000_2014")
compustat = compustat_short
rm(compustat_short)
compustat$cusip6 <- substr(compustat$cusip, 1,6)
compustat <- merge(compustat, compustat_firstdate, by=("cusip6"))
compustat$date <- as.Date( as.character(compustat$datadate),"%Y%m%d")
compustat$first_date <- as.Date(as.character(compustat$first_date),"%Y%m%d")
compustat$age = year(compustat$date) - year(compustat$first_date)
# save(compustat, file= "compustat_q_2015_w_age")
compustat$leverage = (compustat$dlttq+compustat$dlcq)/compustat$seqq
mkt_cap = function( number ){
if( !is.na( mkvaltq[ number ] ) ){
market.value.mln <- mkvaltq[ number ]
}else{ market.value.mln <- cshoq[ number ]*prccq[ number ] }
return( market.value.mln )
}
mkvaltq = compustat$mkvaltq
cshoq = compustat$cshoq
prccq = compustat$prccq
market.value.mln = do.call( rbind, lapply( 1:nrow(compustat), mkt_cap ) )
compustat$market.value.mln = market.value.mln
compustat$size = compustat$market.value.mln
compustat$mtb = compustat$market.value.mln/compustat$ceqq
# If XRD (R&D) expenses are NA, substitute them with 0
compustat$xrdq[is.na(compustat$xrdq)] <- 0
compustat$oper_profit = compustat$revtq - compustat$cogsq - (compustat$xsgaq - compustat$xrdq)
compustat$roa = compustat$atq/compustat$niq
compustat$tobins_q = (compustat$size+compustat$dlttq+compustat$dlcq +compustat$txditcq)/compustat$atq
compustat$asset_turnover = compustat$revtq/compustat$atq
compustat$rd_to_assets = compustat$xrdq
compustat$period = as.Date(as.character(compustat$datadate),"%Y%m%d")
# saveRDS(compustat, file="compustat_short_1999.rds")
# saveRDS(compustat, file="compustat_short_2000_2007.rds")
# saveRDS(compustat, file="compustat_short_2008_2014.rds")
# saveRDS(compustat, file="compustat_short_2015.rds")
# Bind all those files together
compustat = data.table( )
c_1999 = readRDS( "compustat_short_1999.rds" )
compustat = rbind( compustat, c_1999 )
c_00_07 = readRDS( "compustat_short_2000_2007.rds" )
compustat = rbind( compustat, c_00_07 )
c_08_14 = readRDS( "compustat_short_2008_2014.rds" )
compustat = rbind( compustat, c_08_14 )
c_2015 = readRDS( "compustat_short_2015.rds" )
compustat = rbind( compustat, c_2015 )
saveRDS(compustat, file="compustat_short_1999_2015.rds")
save(compustat, file="compustat_short_2015")
load("compustat_short_2000_2014")
c=compustat
# load("compustat_short_2000_2007")
compustat= rbind(compustat,c)
save(compustat, file="compustat_short_2000_2014")
load("compustat_q_2015_w_age")
compustat$leverage = (compustat$dlttq+compustat$dlcq)/compustat$seqq
for (i in 1:nrow(compustat)){
if(!is.na(compustat$mkvaltq[i])){
compustat$market.value.mln[i] <- compustat$mkvalt[i]
}else{compustat$market.value.mln[i] <- compustat$cshoq[i]*compustat$prccq[i]}
}
compustat$size = compustat$market.value.mln
compustat$mtb = compustat$market.value.mln/compustat$ceqq
# If XRD (R&D) expenses are NA, substitute them with 0
compustat$xrdq[is.na(compustat$xrdq)] <- 0
compustat$oper_profit = compustat$revtq - compustat$cogsq - (compustat$xsgaq - compustat$xrdq)
compustat$roa = compustat$atq/compustat$niq
compustat$tobins_q = (compustat$size+compustat$dlttq+compustat$dlcq +compustat$txditcq)/compustat$atq
compustat$asset_turnover = compustat$saleq/compustat$atq
compustat$rd_to_assets = compustat$xrdq
compustat$period = as.Date(as.character(compustat$datadate),"%Y%m%d")
save(compustat, file="compustat_short_2015_w_age")
|
/clean_compustat.R
|
no_license
|
AnnaNakhmurina/Networks
|
R
| false
| false
| 4,881
|
r
|
# This file cleans the compustat file
rm(list=ls())
library(plyr)
library(XML)
library(xml2)
library(RCurl)
library(dplyr)
library(pryr)
library(stringr)
library(data.table)
library(gtools)
library(lubridate)
setwd("~/Networks/Analysis")
compustat <- read.csv("compustat_for_age.csv")
compustat$cusip6 <- substr(compustat$cusip, 1,6)
compustat_firstdate <- aggregate( compustat[c("datadate")], compustat[c("cusip6")], FUN=min )
names(compustat_firstdate) <- c("cusip6","first_date")
# compustat <- read.csv("compustat_q_1999.csv")
compustat <- read.csv("compustat_q_2015.csv")
# compustat <- read.csv("compustat_q_2000_2007.csv")
# compustat <- read.csv("compustat_q_2008_2014.csv")
# Select the variables
compustat_short = compustat[c("dlttq", "dlcq", "seqq","mkvaltq",
"cshoq", "prccq", "ceqq", "xrdq", "revtq", "cogsq", "xsgaq",
"atq","niq","txditcq", "datadate", "cusip")]
compustat_short = as.data.table( compustat_short )
# saveRDS(compustat_short, file="compustat_short_1999.rds")
# saveRDS(compustat_short, file="compustat_short_2000_2007.rds")
# saveRDS(compustat_short, file="compustat_short_2008_2014.rds")
# saveRDS(compustat_short, file="compustat_short_2015.rds")
# load("compustat_short_2000_2014")
compustat = compustat_short
rm(compustat_short)
compustat$cusip6 <- substr(compustat$cusip, 1,6)
compustat <- merge(compustat, compustat_firstdate, by=("cusip6"))
compustat$date <- as.Date( as.character(compustat$datadate),"%Y%m%d")
compustat$first_date <- as.Date(as.character(compustat$first_date),"%Y%m%d")
compustat$age = year(compustat$date) - year(compustat$first_date)
# save(compustat, file= "compustat_q_2015_w_age")
compustat$leverage = (compustat$dlttq+compustat$dlcq)/compustat$seqq
mkt_cap = function( number ){
if( !is.na( mkvaltq[ number ] ) ){
market.value.mln <- mkvaltq[ number ]
}else{ market.value.mln <- cshoq[ number ]*prccq[ number ] }
return( market.value.mln )
}
mkvaltq = compustat$mkvaltq
cshoq = compustat$cshoq
prccq = compustat$prccq
market.value.mln = do.call( rbind, lapply( 1:nrow(compustat), mkt_cap ) )
compustat$market.value.mln = market.value.mln
compustat$size = compustat$market.value.mln
compustat$mtb = compustat$market.value.mln/compustat$ceqq
# If XRD (R&D) expenses are NA, substitute them with 0
compustat$xrdq[is.na(compustat$xrdq)] <- 0
compustat$oper_profit = compustat$revtq - compustat$cogsq - (compustat$xsgaq - compustat$xrdq)
compustat$roa = compustat$atq/compustat$niq
compustat$tobins_q = (compustat$size+compustat$dlttq+compustat$dlcq +compustat$txditcq)/compustat$atq
compustat$asset_turnover = compustat$revtq/compustat$atq
compustat$rd_to_assets = compustat$xrdq
compustat$period = as.Date(as.character(compustat$datadate),"%Y%m%d")
# saveRDS(compustat, file="compustat_short_1999.rds")
# saveRDS(compustat, file="compustat_short_2000_2007.rds")
# saveRDS(compustat, file="compustat_short_2008_2014.rds")
# saveRDS(compustat, file="compustat_short_2015.rds")
# Bind all those files together
compustat = data.table( )
c_1999 = readRDS( "compustat_short_1999.rds" )
compustat = rbind( compustat, c_1999 )
c_00_07 = readRDS( "compustat_short_2000_2007.rds" )
compustat = rbind( compustat, c_00_07 )
c_08_14 = readRDS( "compustat_short_2008_2014.rds" )
compustat = rbind( compustat, c_08_14 )
c_2015 = readRDS( "compustat_short_2015.rds" )
compustat = rbind( compustat, c_2015 )
saveRDS(compustat, file="compustat_short_1999_2015.rds")
save(compustat, file="compustat_short_2015")
load("compustat_short_2000_2014")
c=compustat
# load("compustat_short_2000_2007")
compustat= rbind(compustat,c)
save(compustat, file="compustat_short_2000_2014")
load("compustat_q_2015_w_age")
compustat$leverage = (compustat$dlttq+compustat$dlcq)/compustat$seqq
for (i in 1:nrow(compustat)){
if(!is.na(compustat$mkvaltq[i])){
compustat$market.value.mln[i] <- compustat$mkvalt[i]
}else{compustat$market.value.mln[i] <- compustat$cshoq[i]*compustat$prccq[i]}
}
compustat$size = compustat$market.value.mln
compustat$mtb = compustat$market.value.mln/compustat$ceqq
# If XRD (R&D) expenses are NA, substitute them with 0
compustat$xrdq[is.na(compustat$xrdq)] <- 0
compustat$oper_profit = compustat$revtq - compustat$cogsq - (compustat$xsgaq - compustat$xrdq)
compustat$roa = compustat$atq/compustat$niq
compustat$tobins_q = (compustat$size+compustat$dlttq+compustat$dlcq +compustat$txditcq)/compustat$atq
compustat$asset_turnover = compustat$saleq/compustat$atq
compustat$rd_to_assets = compustat$xrdq
compustat$period = as.Date(as.character(compustat$datadate),"%Y%m%d")
save(compustat, file="compustat_short_2015_w_age")
|
library(shiny)
library(shinythemes)
fluidPage(
theme = shinytheme("flatly"),
titlePanel("Overfitting"),
fluidRow(
column(3, wellPanel(
sliderInput("n", "Number of Samples:", min = 5, max = 500, value = 5 ,
step = 10),
sliderInput("p", "True Population Correlation:", min = -1, max = 1 , value = 0,
step = 0.01),
sliderInput("k", "The Number of Variables:", min = 1, max = 100 , value = 3 ,
step = 1),
actionButton("goButton", "Click here to Plot"),
br(),
br(),
actionButton("goButton2", "Click here to Validate")
)),
column(6,offset=1, align="center",
plotOutput("plot1", width = 640, height = 480)
),
div(style = "position:absolute;bottom: 4em;left:42em;",
htmlOutput("text"))
)
)
|
/Overfit-Improved/ui.R
|
no_license
|
jefeng/Overfitting
|
R
| false
| false
| 860
|
r
|
library(shiny)
library(shinythemes)
fluidPage(
theme = shinytheme("flatly"),
titlePanel("Overfitting"),
fluidRow(
column(3, wellPanel(
sliderInput("n", "Number of Samples:", min = 5, max = 500, value = 5 ,
step = 10),
sliderInput("p", "True Population Correlation:", min = -1, max = 1 , value = 0,
step = 0.01),
sliderInput("k", "The Number of Variables:", min = 1, max = 100 , value = 3 ,
step = 1),
actionButton("goButton", "Click here to Plot"),
br(),
br(),
actionButton("goButton2", "Click here to Validate")
)),
column(6,offset=1, align="center",
plotOutput("plot1", width = 640, height = 480)
),
div(style = "position:absolute;bottom: 4em;left:42em;",
htmlOutput("text"))
)
)
|
#PAGE=455
y1=1980
y2=1986
p01=163.7*10
pn1=125
p02=143
pn2=116
q01=2.427
qn1=2.931
q02=13.947
qn2=13.285
p0=c(p01,p02)
pn=c(pn1,pn2)
q0=c(q01,q02)
qn=c(qn1,qn2)
a=(qn1/q01+qn2/q02)/2
a=a*100
a=round(a,digits = 0)
b=(qn1*p01+qn2*p02)/(q01*p01+q02*p02)
b=b*100
b=round(b,digits = 1)
c=(qn1*pn1*10+qn2*pn2)/(q01*pn1*10+q02*pn2)
c=c*100
c=round(c,digits = 1)
d=sqrt(b*c)
d=round(d,digits = 0)
y1=1980
y2=1986
a1=163.7/100
a2=125/100
b1=143
b2=116
c1=2.427
c2=2.931
d1=13.947
d2=13.285
a=((a2*c1*1000)+(b2*d1))/((a1*c1*1000)+(b1*d1))
a=a*100
a=round(a,digits = 1)
a=a/100
b=((a2*c2*1000)+(b2*d2))/((a1*c2*1000)+(b1*d2))
b=b*100
b=round(b,digits = 1)
b=b/100
c=sqrt(a*b)
c=c*100
c=round(c,digits = 1)
vi=d*c
vi=vi/100
vi=round(vi,digits = 1)
cat(vi,'%')
|
/Schaum'S_Outline_Series_-_Theory_And_Problems_Of_Statistics_by_Murray_R._Spiegel/CH19/EX19.19.35/Ex19_19_35.R
|
permissive
|
FOSSEE/R_TBC_Uploads
|
R
| false
| false
| 819
|
r
|
#PAGE=455
y1=1980
y2=1986
p01=163.7*10
pn1=125
p02=143
pn2=116
q01=2.427
qn1=2.931
q02=13.947
qn2=13.285
p0=c(p01,p02)
pn=c(pn1,pn2)
q0=c(q01,q02)
qn=c(qn1,qn2)
a=(qn1/q01+qn2/q02)/2
a=a*100
a=round(a,digits = 0)
b=(qn1*p01+qn2*p02)/(q01*p01+q02*p02)
b=b*100
b=round(b,digits = 1)
c=(qn1*pn1*10+qn2*pn2)/(q01*pn1*10+q02*pn2)
c=c*100
c=round(c,digits = 1)
d=sqrt(b*c)
d=round(d,digits = 0)
y1=1980
y2=1986
a1=163.7/100
a2=125/100
b1=143
b2=116
c1=2.427
c2=2.931
d1=13.947
d2=13.285
a=((a2*c1*1000)+(b2*d1))/((a1*c1*1000)+(b1*d1))
a=a*100
a=round(a,digits = 1)
a=a/100
b=((a2*c2*1000)+(b2*d2))/((a1*c2*1000)+(b1*d2))
b=b*100
b=round(b,digits = 1)
b=b/100
c=sqrt(a*b)
c=c*100
c=round(c,digits = 1)
vi=d*c
vi=vi/100
vi=round(vi,digits = 1)
cat(vi,'%')
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/PlotNetworks.R
\name{hn_plot}
\alias{hn_plot}
\title{Plot an igraph object}
\usage{
hn_plot(gadj, layout = layout_with_kk, width.scale = 2, colorbar = FALSE,
colorbarx, colorbary, ...)
}
\arguments{
\item{gadj}{igraph object}
\item{layout}{layout function}
\item{width.scale}{scale factor for edges}
\item{colorbar}{\code{logical}, should a legend providing a scale for the edges be displayed?}
\item{colorbarx}{coordinates for legend}
\item{colorbary}{coordinates for legend}
\item{...}{passed to plot.igraph}
}
\value{
plots the network
}
\description{
Plot an igraph object
}
|
/man/hn_plot.Rd
|
no_license
|
sqyu/HurdleNormal
|
R
| false
| true
| 665
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/PlotNetworks.R
\name{hn_plot}
\alias{hn_plot}
\title{Plot an igraph object}
\usage{
hn_plot(gadj, layout = layout_with_kk, width.scale = 2, colorbar = FALSE,
colorbarx, colorbary, ...)
}
\arguments{
\item{gadj}{igraph object}
\item{layout}{layout function}
\item{width.scale}{scale factor for edges}
\item{colorbar}{\code{logical}, should a legend providing a scale for the edges be displayed?}
\item{colorbarx}{coordinates for legend}
\item{colorbary}{coordinates for legend}
\item{...}{passed to plot.igraph}
}
\value{
plots the network
}
\description{
Plot an igraph object
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ee_download.R
\name{ee_table_to_gcs}
\alias{ee_table_to_gcs}
\title{Creates a task to export a FeatureCollection to Google Cloud Storage.}
\usage{
ee_table_to_gcs(
collection,
description = "myExportTableTask",
bucket = NULL,
fileNamePrefix = NULL,
timePrefix = TRUE,
fileFormat = NULL,
selectors = NULL
)
}
\arguments{
\item{collection}{The feature collection to be exported.}
\item{description}{Human-readable name of the task.}
\item{bucket}{The name of a Cloud Storage bucket for the export.}
\item{fileNamePrefix}{Cloud Storage object name prefix
for the export. Defaults to the name of the task.}
\item{timePrefix}{Add current date and time as a prefix to files to export.}
\item{fileFormat}{The output format: "CSV" (default),
"GeoJSON", "KML", "KMZ", "SHP", or "TFRecord".}
\item{selectors}{The list of properties to include in the output,
as a list of strings or a comma-separated string. By default, all
properties are included. **kwargs: Holds other keyword arguments
that may have been deprecated such as 'outputBucket'.}
}
\value{
An unstarted Task that exports the table to Google Cloud Storage.
}
\description{
Creates a task to export a FeatureCollection to Google Cloud Storage.
This function is a wrapper around
\code{ee$batch$Export$table$toCloudStorage(...)}.
}
\examples{
\dontrun{
library(rgee)
library(stars)
library(sf)
ee_users()
ee_Initialize(gcs = TRUE)
# Define study area (local -> earth engine)
# Communal Reserve Amarakaeri - Peru
rlist <- list(xmin = -71.13, xmax = -70.95,ymin = -12.89, ymax = -12.73)
ROI <- c(rlist$xmin, rlist$ymin,
rlist$xmax, rlist$ymin,
rlist$xmax, rlist$ymax,
rlist$xmin, rlist$ymax,
rlist$xmin, rlist$ymin)
ee_ROI <- matrix(ROI, ncol = 2, byrow = TRUE) \%>\%
list() \%>\%
st_polygon() \%>\%
st_sfc() \%>\%
st_set_crs(4326) \%>\%
sf_as_ee()
amk_fc <- ee$FeatureCollection(
list(ee$Feature(ee_ROI$geometry(), list(name = "Amarakaeri")))
)
task_vector <- ee_table_to_gcs(
collection = amk_fc,
bucket = "rgee_dev",
fileFormat = "SHP",
fileNamePrefix = "geom_Amarakaeri"
)
task_vector$start()
ee_monitoring(task_vector) # optional
amk_geom <- ee_gcs_to_local(task = task_vector)
plot(amk_geom$geometry, border = "red", lwd = 10)
}
}
\seealso{
Other vector export task creator:
\code{\link{ee_table_to_asset}()},
\code{\link{ee_table_to_drive}()}
}
\concept{vector export task creator}
|
/man/ee_table_to_gcs.Rd
|
permissive
|
karigar/rgee
|
R
| false
| true
| 2,504
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ee_download.R
\name{ee_table_to_gcs}
\alias{ee_table_to_gcs}
\title{Creates a task to export a FeatureCollection to Google Cloud Storage.}
\usage{
ee_table_to_gcs(
collection,
description = "myExportTableTask",
bucket = NULL,
fileNamePrefix = NULL,
timePrefix = TRUE,
fileFormat = NULL,
selectors = NULL
)
}
\arguments{
\item{collection}{The feature collection to be exported.}
\item{description}{Human-readable name of the task.}
\item{bucket}{The name of a Cloud Storage bucket for the export.}
\item{fileNamePrefix}{Cloud Storage object name prefix
for the export. Defaults to the name of the task.}
\item{timePrefix}{Add current date and time as a prefix to files to export.}
\item{fileFormat}{The output format: "CSV" (default),
"GeoJSON", "KML", "KMZ", "SHP", or "TFRecord".}
\item{selectors}{The list of properties to include in the output,
as a list of strings or a comma-separated string. By default, all
properties are included. **kwargs: Holds other keyword arguments
that may have been deprecated such as 'outputBucket'.}
}
\value{
An unstarted Task that exports the table to Google Cloud Storage.
}
\description{
Creates a task to export a FeatureCollection to Google Cloud Storage.
This function is a wrapper around
\code{ee$batch$Export$table$toCloudStorage(...)}.
}
\examples{
\dontrun{
library(rgee)
library(stars)
library(sf)
ee_users()
ee_Initialize(gcs = TRUE)
# Define study area (local -> earth engine)
# Communal Reserve Amarakaeri - Peru
rlist <- list(xmin = -71.13, xmax = -70.95,ymin = -12.89, ymax = -12.73)
ROI <- c(rlist$xmin, rlist$ymin,
rlist$xmax, rlist$ymin,
rlist$xmax, rlist$ymax,
rlist$xmin, rlist$ymax,
rlist$xmin, rlist$ymin)
ee_ROI <- matrix(ROI, ncol = 2, byrow = TRUE) \%>\%
list() \%>\%
st_polygon() \%>\%
st_sfc() \%>\%
st_set_crs(4326) \%>\%
sf_as_ee()
amk_fc <- ee$FeatureCollection(
list(ee$Feature(ee_ROI$geometry(), list(name = "Amarakaeri")))
)
task_vector <- ee_table_to_gcs(
collection = amk_fc,
bucket = "rgee_dev",
fileFormat = "SHP",
fileNamePrefix = "geom_Amarakaeri"
)
task_vector$start()
ee_monitoring(task_vector) # optional
amk_geom <- ee_gcs_to_local(task = task_vector)
plot(amk_geom$geometry, border = "red", lwd = 10)
}
}
\seealso{
Other vector export task creator:
\code{\link{ee_table_to_asset}()},
\code{\link{ee_table_to_drive}()}
}
\concept{vector export task creator}
|
timeToLabels <- function(nsec) {
secs <- 0:nsec
sec_labels <- sapply(secs, function(s) ifelse(s %% 600 == 0, as.character(s/60), ""))
sec_ticks <- which(sec_labels != "")
sec_labels[length(sec_labels) + 1] <- round(nsec/60)
sec_ticks[length(sec_ticks) + 1] <- nsec
sec_labels <- paste0(sec_labels[sec_labels != ""], "'")
res <- list(
'label' = sec_labels,
'tick' = sec_ticks/60
)
return(res)
}
|
/R/utils.R
|
permissive
|
js2264/movieaRt
|
R
| false
| false
| 446
|
r
|
timeToLabels <- function(nsec) {
secs <- 0:nsec
sec_labels <- sapply(secs, function(s) ifelse(s %% 600 == 0, as.character(s/60), ""))
sec_ticks <- which(sec_labels != "")
sec_labels[length(sec_labels) + 1] <- round(nsec/60)
sec_ticks[length(sec_ticks) + 1] <- nsec
sec_labels <- paste0(sec_labels[sec_labels != ""], "'")
res <- list(
'label' = sec_labels,
'tick' = sec_ticks/60
)
return(res)
}
|
# Join (Merge) data frames (inner, outer, left, right)
df1 = data.frame(CustomerId = c(1:6), Product = c(rep("Oven", 3), rep("Television", 3)))
df1
df2 = data.frame(CustomerId = c(2, 4, 6,7), State = c(rep("California", 3), rep("Texas", 1)))
df2
# Merging happens based on the common column name in both the data sets
# Inner Join
df<-merge(x=df1,y=df2,by="CustomerId")
df
# Outer Join
df<-merge(x=df1,y=df2,by="CustomerId",all=TRUE)
df
# Left outer join
df<-merge(x=df1,y=df2,by="CustomerId",all.x=TRUE)
df
# Right outer join
df<-merge(x=df1,y=df2,by="CustomerId",all.y=TRUE)
df
# apply
# Returns a vector or array or list of values obtained by
#applying a function to margins of an array or matrix.
x <- matrix(rnorm(30), nrow=5, ncol=6)
x
?apply
apply(x, 1 ,sum)
apply(x, 2 ,sum)
# lapply
# lapply function takes list, vector or Data frame as input and returns only list as output
A <- matrix(1:9, 3,3)
A
B <- matrix(4:15, 4,3)
B
C <- matrix(8:10, 3,2)
C
MyList <- list(A,B,C)
MyList
class(MyList)
lapply(MyList, mean)
# sapply
# sapply function takes list, vector or Data frame as input. It is similar to lapply function but returns only vector as output
sapply(MyList, mean)
random <- c("This","is","random","vector")
random
lapply(random, nchar)
# mapply
# mapply applies FUN to the first elements of each (.) argument, the second elements, the third elements, and so on.
mapply(sum,c(1,2,3), c(4,5,6), c(7,8,9))
M1 = matrix(c(1,2,3,4))
M2 = matrix(c(5,6,7,8))
mapply(sum,M1,M2)
# tapply
# tapply is used when you want to apply a function to subsets of a vector and the subsets are
# defined by some other vector, usually a factor
attach(iris)
View(iris)
tapply(Sepal.Length, Species, mean) # mean of Sepal.Length for all 3 Species
tapply(Sepal.Width, Species, median)
### getting present working directory
getwd()
### changing the present working directory
setwd("path")
data = read.csv('D:\\Assignments Python\\Association rules\\book.csv')
## to read excel sheet
library(readxl)
data = read_xlsx('C:\\Users\\Tarun Reddy\\Downloads\\EastWestAirlines.xlsx' , sheet = 2)
##default data sets in R
library(datasets)
data("mtcars")
airquality <-datasets::mtcars
#install.packages("dplyr")
library(dplyr)
data()
data(mtcars)
View(mtcars)
?mtcars
head(mtcars)
tail(mtcars)
str(mtcars)
summary(mtcars)
#1. Filter or subset
#You can use "," or "&" to use and condition
filter(mtcars,cyl==8,gear==5)
filter(mtcars,cyl==8&gear==5)
filter(mtcars,cyl==8 |gear==5)
#select columns
select(mtcars,mpg,cyl,gear)
# Use ":" to select multiple contiguous columns,
#and use "contains" to match columns by name
select(mtcars,"carb",mpg:disp,"gear")
select(mtcars,mpg:disp,contains("ge"),
contains("carb"))
#Exclude a particular column
select(mtcars,c(-gear,-carb))
select(mtcars,-mpg:-disp)
select(mtcars,-contains("ge"))
filter(select(mtcars,gear,carb,cyl),
cyl==8)
#To select all columns that start with the character string "c",
#use the function starts_with()
head(select(mtcars, starts_with("c")))
##Some additional options to select columns based on specific criteria:
#ends_with() : Select columns that end with a character string
#contains() : Select columns that contain a character string
#3. Arrange : Reorder rows
#Syntax:
#arrange(dataframe,orderby)
arrange(mtcars,cyl)
arrange(select(mtcars,"cyl","gear"),cyl)
arrange(select(mtcars,"cyl","gear"),desc(cyl))
#mutate: Add new variable
#Base R Approach
temp <- mtcars
temp$new_variable <- temp$hp + temp$wt
View(temp)
temp$new_variable1 <- 0
str(temp)
temp$new_variable <- NULL
str(temp)
##dplyr Approach
temp <- mutate(temp,mutate_new = temp$hp + temp$wt)
str(temp)
View(temp)
# Fetch the unique values in dataframe
#Base Package approach - unique function
#unique()
#dplyr approach
#distinct()
distinct(mtcars["cyl"])
distinct(mtcars[c("cyl","gear")])
#NA is a logical constant of length 1, which contains a missing value indicator.
#NULL represents the null object
c(1,2,3,NA,NULL)
sum(10,20,NA)
mean(c(2,2,2,2,NA),na.rm = T)
sum(10,20,NA)
sum(10,20,NA,na.rm = T)
#Table is very handy to find the frequencies (mode)
#Base Package Approach
table(mtcars$cyl)
# Rowsums ,colsums rowmeans and Columnmeans
data("iris")
View(iris)
rowSums(iris[,-5])
colSums(iris[,-5])
rowMeans(iris[,-5])
colMeans(iris[,-5])
unique(iris$Species)
# no. of rows
data("iris")
length(iris)
length(iris[,1])
# which() on data frame
data("iris")
View(iris)
which(iris$Petal.Width == 0.2)
Range <- function(x){
max(x, na.rm = T) - min(x, na.rm = T)
}
Range(iris$Sepal.Length)
Range(iris$Sepal.Width)
Range(iris$Petal.Length)
Range(iris$Petal.Width)
var(iris$Sepal.Length, na.rm = T)
var(iris$Sepal.Width, na.rm = T)
var(iris$Petal.Length, na.rm = T)
var(iris$Petal.Width, na.rm = T)
sd(iris$Sepal.Length, na.rm = T)
sd(iris$Sepal.Width, na.rm = T)
sd(iris$Petal.Length, na.rm = T)
sd(iris$Petal.Width, na.rm = T)
|
/Basics of R -Part 3.R
|
no_license
|
ApurwaLoya/Data-Science
|
R
| false
| false
| 5,172
|
r
|
# Join (Merge) data frames (inner, outer, left, right)
df1 = data.frame(CustomerId = c(1:6), Product = c(rep("Oven", 3), rep("Television", 3)))
df1
df2 = data.frame(CustomerId = c(2, 4, 6,7), State = c(rep("California", 3), rep("Texas", 1)))
df2
# Merging happens based on the common column name in both the data sets
# Inner Join
df<-merge(x=df1,y=df2,by="CustomerId")
df
# Outer Join
df<-merge(x=df1,y=df2,by="CustomerId",all=TRUE)
df
# Left outer join
df<-merge(x=df1,y=df2,by="CustomerId",all.x=TRUE)
df
# Right outer join
df<-merge(x=df1,y=df2,by="CustomerId",all.y=TRUE)
df
# apply
# Returns a vector or array or list of values obtained by
#applying a function to margins of an array or matrix.
x <- matrix(rnorm(30), nrow=5, ncol=6)
x
?apply
apply(x, 1 ,sum)
apply(x, 2 ,sum)
# lapply
# lapply function takes list, vector or Data frame as input and returns only list as output
A <- matrix(1:9, 3,3)
A
B <- matrix(4:15, 4,3)
B
C <- matrix(8:10, 3,2)
C
MyList <- list(A,B,C)
MyList
class(MyList)
lapply(MyList, mean)
# sapply
# sapply function takes list, vector or Data frame as input. It is similar to lapply function but returns only vector as output
sapply(MyList, mean)
random <- c("This","is","random","vector")
random
lapply(random, nchar)
# mapply
# mapply applies FUN to the first elements of each (.) argument, the second elements, the third elements, and so on.
mapply(sum,c(1,2,3), c(4,5,6), c(7,8,9))
M1 = matrix(c(1,2,3,4))
M2 = matrix(c(5,6,7,8))
mapply(sum,M1,M2)
# tapply
# tapply is used when you want to apply a function to subsets of a vector and the subsets are
# defined by some other vector, usually a factor
attach(iris)
View(iris)
tapply(Sepal.Length, Species, mean) # mean of Sepal.Length for all 3 Species
tapply(Sepal.Width, Species, median)
### getting present working directory
getwd()
### changing the present working directory
setwd("path")
data = read.csv('D:\\Assignments Python\\Association rules\\book.csv')
## to read excel sheet
library(readxl)
data = read_xlsx('C:\\Users\\Tarun Reddy\\Downloads\\EastWestAirlines.xlsx' , sheet = 2)
##default data sets in R
library(datasets)
data("mtcars")
airquality <-datasets::mtcars
#install.packages("dplyr")
library(dplyr)
data()
data(mtcars)
View(mtcars)
?mtcars
head(mtcars)
tail(mtcars)
str(mtcars)
summary(mtcars)
#1. Filter or subset
#You can use "," or "&" to use and condition
filter(mtcars,cyl==8,gear==5)
filter(mtcars,cyl==8&gear==5)
filter(mtcars,cyl==8 |gear==5)
#select columns
select(mtcars,mpg,cyl,gear)
# Use ":" to select multiple contiguous columns,
#and use "contains" to match columns by name
select(mtcars,"carb",mpg:disp,"gear")
select(mtcars,mpg:disp,contains("ge"),
contains("carb"))
#Exclude a particular column
select(mtcars,c(-gear,-carb))
select(mtcars,-mpg:-disp)
select(mtcars,-contains("ge"))
filter(select(mtcars,gear,carb,cyl),
cyl==8)
#To select all columns that start with the character string "c",
#use the function starts_with()
head(select(mtcars, starts_with("c")))
##Some additional options to select columns based on specific criteria:
#ends_with() : Select columns that end with a character string
#contains() : Select columns that contain a character string
#3. Arrange : Reorder rows
#Syntax:
#arrange(dataframe,orderby)
arrange(mtcars,cyl)
arrange(select(mtcars,"cyl","gear"),cyl)
arrange(select(mtcars,"cyl","gear"),desc(cyl))
#mutate: Add new variable
#Base R Approach
temp <- mtcars
temp$new_variable <- temp$hp + temp$wt
View(temp)
temp$new_variable1 <- 0
str(temp)
temp$new_variable <- NULL
str(temp)
##dplyr Approach
temp <- mutate(temp,mutate_new = temp$hp + temp$wt)
str(temp)
View(temp)
# Fetch the unique values in dataframe
#Base Package approach - unique function
#unique()
#dplyr approach
#distinct()
distinct(mtcars["cyl"])
distinct(mtcars[c("cyl","gear")])
#NA is a logical constant of length 1, which contains a missing value indicator.
#NULL represents the null object
c(1,2,3,NA,NULL)
sum(10,20,NA)
mean(c(2,2,2,2,NA),na.rm = T)
sum(10,20,NA)
sum(10,20,NA,na.rm = T)
#Table is very handy to find the frequencies (mode)
#Base Package Approach
table(mtcars$cyl)
# Rowsums ,colsums rowmeans and Columnmeans
data("iris")
View(iris)
rowSums(iris[,-5])
colSums(iris[,-5])
rowMeans(iris[,-5])
colMeans(iris[,-5])
unique(iris$Species)
# no. of rows
data("iris")
length(iris)
length(iris[,1])
# which() on data frame
data("iris")
View(iris)
which(iris$Petal.Width == 0.2)
Range <- function(x){
max(x, na.rm = T) - min(x, na.rm = T)
}
Range(iris$Sepal.Length)
Range(iris$Sepal.Width)
Range(iris$Petal.Length)
Range(iris$Petal.Width)
var(iris$Sepal.Length, na.rm = T)
var(iris$Sepal.Width, na.rm = T)
var(iris$Petal.Length, na.rm = T)
var(iris$Petal.Width, na.rm = T)
sd(iris$Sepal.Length, na.rm = T)
sd(iris$Sepal.Width, na.rm = T)
sd(iris$Petal.Length, na.rm = T)
sd(iris$Petal.Width, na.rm = T)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/app.R
\name{browserHistory}
\alias{browserHistory}
\title{Enable history navigation}
\usage{
browserHistory(navId, input, session)
}
\arguments{
\item{navId}{Character: identifier of the navigation bar}
\item{input}{Input object}
\item{session}{Session object}
}
\value{
NULL (this function is used to modify the Shiny session's state)
}
\description{
Navigate app according to the location given by the navigation bar. Code
and logic adapted from
\url{https://github.com/daattali/advanced-shiny/blob/master/navigate-history}
}
|
/man/browserHistory.Rd
|
no_license
|
ahmczwg/psichomics
|
R
| false
| true
| 608
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/app.R
\name{browserHistory}
\alias{browserHistory}
\title{Enable history navigation}
\usage{
browserHistory(navId, input, session)
}
\arguments{
\item{navId}{Character: identifier of the navigation bar}
\item{input}{Input object}
\item{session}{Session object}
}
\value{
NULL (this function is used to modify the Shiny session's state)
}
\description{
Navigate app according to the location given by the navigation bar. Code
and logic adapted from
\url{https://github.com/daattali/advanced-shiny/blob/master/navigate-history}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/benchmark.R
\name{benchmarks_code}
\alias{benchmarks_code}
\title{Run benchmark for R code across R implementation and platform}
\usage{
benchmarks_code(code, platforms = c("debian", "ubuntu"),
r_implementations = c("gnu-r", "mro"), volumes = NULL, times = 3,
pull_image = FALSE, ...)
}
\arguments{
\item{code}{An expression or string of R code}
\item{platforms}{List of platforms}
\item{r_implementations}{List of R implementations}
\item{volumes}{Volume mapping from host to container. Passed to \link{run_code}}
\item{times}{How many times the code will be run. Passed to \link{microbenchmark}}
\item{pull_image}{If set to TRUE, the needed docker image will be pulled first}
\item{...}{Parameters for \link[microbenchmark]{microbenchmark}}
}
\description{
Run benchmark for R code across R implementation and platform.
}
\examples{
\dontrun{
benchmarks_code(code = "1 + 1", times = 3)
# This code below is for running sample, need to set proper directory
code = expression(setwd('/home/docker/sdsr'),
bookdown::clean_book(TRUE),
unlink('_book/', recursive=TRUE),
unlink('_bookdown_files', recursive=TRUE),
bookdown::render_book('index.Rmd', 'bookdown::gitbook')
)
benchmarks_code(code = code,
volumes = '/home/ismailsunni/dev/r/sdsr:/home/docker/sdsr',
times = 3)
benchmarks_code(code = code,
platforms = c("debian", "ubuntu", "fedora"),
volumes = '/home/ismailsunni/dev/r/sdsr:/home/docker/sdsr',
times = 3)
}
}
|
/man/benchmarks_code.Rd
|
permissive
|
ismailsunni/altRnative
|
R
| false
| true
| 1,666
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/benchmark.R
\name{benchmarks_code}
\alias{benchmarks_code}
\title{Run benchmark for R code across R implementation and platform}
\usage{
benchmarks_code(code, platforms = c("debian", "ubuntu"),
r_implementations = c("gnu-r", "mro"), volumes = NULL, times = 3,
pull_image = FALSE, ...)
}
\arguments{
\item{code}{An expression or string of R code}
\item{platforms}{List of platforms}
\item{r_implementations}{List of R implementations}
\item{volumes}{Volume mapping from host to container. Passed to \link{run_code}}
\item{times}{How many times the code will be run. Passed to \link{microbenchmark}}
\item{pull_image}{If set to TRUE, the needed docker image will be pulled first}
\item{...}{Parameters for \link[microbenchmark]{microbenchmark}}
}
\description{
Run benchmark for R code across R implementation and platform.
}
\examples{
\dontrun{
benchmarks_code(code = "1 + 1", times = 3)
# This code below is for running sample, need to set proper directory
code = expression(setwd('/home/docker/sdsr'),
bookdown::clean_book(TRUE),
unlink('_book/', recursive=TRUE),
unlink('_bookdown_files', recursive=TRUE),
bookdown::render_book('index.Rmd', 'bookdown::gitbook')
)
benchmarks_code(code = code,
volumes = '/home/ismailsunni/dev/r/sdsr:/home/docker/sdsr',
times = 3)
benchmarks_code(code = code,
platforms = c("debian", "ubuntu", "fedora"),
volumes = '/home/ismailsunni/dev/r/sdsr:/home/docker/sdsr',
times = 3)
}
}
|
## TidyX Episode 74: Joins with Databases
### Packages ---------------------------------------------
library(tidyverse)
library(rvest)
library(janitor)
library(RSQLite)
library(DBI)
library(microbenchmark)
### Get Game PbP Data ---------------------------------------------
pull_game_pbp_data <- function(game_id, con, verbose = FALSE){
if(verbose){
print(game_id)
}
## html ----
espn_pbp <- read_html(paste0("https://www.espn.com/nba/playbyplay/_/gameId/",game_id))
espn_game_summary <- read_html(paste0("https://www.espn.com/nba/game/_/gameId/",game_id))
## game info ----
teams <- espn_pbp %>%
html_nodes(".competitors")
home <- teams %>%
html_nodes(".home") %>%
html_nodes("span") %>%
`[`(1:3) %>%
html_text
away <- teams %>%
html_nodes(".away") %>%
html_nodes("span") %>%
`[`(1:3) %>%
html_text
game_info <- espn_game_summary %>%
html_nodes(".game-information") %>%
html_nodes(".game-field")
game_time <- game_info %>%
html_nodes(".game-date-time") %>%
html_node("span") %>%
html_attr("data-date")
game_odds <- espn_game_summary %>%
html_nodes(".game-information") %>%
html_nodes(".odds") %>%
html_nodes("li") %>%
html_text() %>%
str_split(":") %>%
data.frame() %>%
janitor::row_to_names(1)
game_capacity <- espn_game_summary %>%
html_nodes(".game-information") %>%
html_nodes(".game-info-note") %>%
html_text() %>%
str_split(":") %>%
data.frame() %>%
janitor::row_to_names(1)
game_summary <- espn_game_summary %>%
html_nodes(".header") %>%
html_text() %>%
str_split(",") %>%
pluck(1) %>%
pluck(1)
game_df <- data.frame(
game_id = game_id,
game_time = game_time,
game_info = game_summary[[1]],
home_team = paste(home[1:2],collapse = " "),
home_team_abbrev = home[3],
away_team = paste(away[1:2],collapse = " "),
away_team_abbrev = away[3],
game_capacity,
game_odds
) %>%
janitor::clean_names()
## pbp info ----
quarter_tabs <- espn_pbp %>%
html_nodes("#gamepackage-qtrs-wrap") %>%
html_nodes(".webview-internal") %>%
html_attr("href")
full_game_pbp <- map_dfr(quarter_tabs, function(qtab){
## scrape elements for time stamps, play details, and score
time_stamps <- espn_pbp %>%
html_nodes("div") %>%
html_nodes(qtab) %>%
html_nodes(".time-stamp") %>%
html_text() %>%
as_tibble() %>%
rename(time = value)
possession_details <- espn_pbp %>%
html_nodes("div") %>%
html_nodes(qtab) %>%
html_nodes(".logo") %>%
html_nodes("img") %>%
html_attr("src") %>%
as_tibble() %>%
rename(possession = value) %>%
mutate(
possession = basename(possession)
) %>%
mutate(
possession = str_replace(possession, "(.+)([.]png.+)","\\1")
)
play_details <- espn_pbp %>%
html_nodes("div") %>%
html_nodes(qtab) %>%
html_nodes(".game-details") %>%
html_text() %>%
as_tibble() %>%
rename(play_details = value)
score <- espn_pbp %>%
html_nodes("div") %>%
html_nodes(qtab) %>%
html_nodes(".combined-score") %>%
html_text() %>%
as_tibble() %>%
rename(score = value)
## bind data together
bind_cols(time_stamps, possession_details, play_details, score) %>%
mutate(
quarter = gsub("#","",qtab)
)
}) %>%
mutate(play_id_num = seq_len(nrow(.)))
dbWriteTable(con, name = paste0("game_",game_id), full_game_pbp, overwrite = TRUE)
if("game_ids" %in% dbListTables(con)){
hist_game_table <- dbReadTable(con, "game_ids")
if(!game_id %in% hist_game_table$game_id){
game_df <- rbind(hist_game_table, game_df)
}
}
dbWriteTable(con, name = "game_ids", game_df, overwrite = TRUE)
}
# Write several games to database ---------------------------------------------
## create connection
db_con <- dbConnect(
drv = RSQLite::SQLite(),
here::here("TidyTuesday_Explained/074-Databases_with_R_3/nba_playoffs.db")
)
## write table
walk(
c(
"401327715", ## Miami Heat @ Milwaukee Bucks (1st Rd | Game 1 Eastern Conference Playoffs, 2021)
"401327878", ## Miami Heat @ Milwaukee Bucks (1st Rd | Game 2 Eastern Conference Playoffs, 2021)
"401327879", ## Miami Heat @ Milwaukee Bucks (1st Rd | Game 3 Eastern Conference Playoffs, 2021)
"401327870" ## Denver Nuggets @ Portland Trail Blazers (1st Rd | Game 4 Western Conference Playoffs, 2021)
),
pull_game_pbp_data,
db_con
)
##disconnect
dbDisconnect(db_con)
# Interact with database ---------------------------------------------
# create connection
db_con <- dbConnect(
drv = RSQLite::SQLite(),
here::here("TidyTuesday_Explained/074-Databases_with_R_3/nba_playoffs.db")
)
## list tables
dbListTables(db_con)
game_info <- tbl(db_con, "game_ids") %>%
filter( game_info == "East 1st Round - Game 3")
play_of_interest<- tbl(db_con, "game_401327879") %>%
filter(play_id_num == "12")
## Join information ----
### Locally
#### This only works if tables can fit into memory!~
game_pbp <- tbl(db_con, "game_401327879") %>% collect()
game_info <- tbl(db_con, "game_ids") %>% collect()
full_game_info <- game_pbp %>%
mutate(
game_id_number = "401327879"
) %>%
left_join(
game_info,
by = c("game_id_number" = "game_id")
)
## Using SQL Joins
## this will work, and only the result needs to be able to fit into memory
full_game_info_sql <- tbl(db_con, "game_401327879") %>%
mutate(
game_id_number = "401327879"
) %>%
left_join(
tbl(db_con, "game_ids"),
by = c("game_id_number" = "game_id")
)
show_query(full_game_info_sql)
## Benchmark joins ----
mb_sql_comparison <- microbenchmark(
sql_join = {
tbl(db_con, "game_401327879") %>%
mutate(
game_id_number = "401327879"
) %>%
left_join(
tbl(db_con, "game_ids"),
by = c("game_id_number" = "game_id")
)
},
load_to_memory = {
game_pbp <- tbl(db_con, "game_401327879") %>% collect()
game_info <- tbl(db_con, "game_ids") %>% collect()
game_pbp %>%
mutate(
game_id_number = "401327879"
) %>%
left_join(
game_info,
by = c("game_id_number" = "game_id")
)
}
)
autoplot(mb_sql_comparison)
dbDisconnect(db_con)
|
/TidyTuesday_Explained/074-Databases_with_R_3/Episode 74 - Joins with databases.R
|
no_license
|
Jpzhaoo/TidyX
|
R
| false
| false
| 6,410
|
r
|
## TidyX Episode 74: Joins with Databases
### Packages ---------------------------------------------
library(tidyverse)
library(rvest)
library(janitor)
library(RSQLite)
library(DBI)
library(microbenchmark)
### Get Game PbP Data ---------------------------------------------
pull_game_pbp_data <- function(game_id, con, verbose = FALSE){
if(verbose){
print(game_id)
}
## html ----
espn_pbp <- read_html(paste0("https://www.espn.com/nba/playbyplay/_/gameId/",game_id))
espn_game_summary <- read_html(paste0("https://www.espn.com/nba/game/_/gameId/",game_id))
## game info ----
teams <- espn_pbp %>%
html_nodes(".competitors")
home <- teams %>%
html_nodes(".home") %>%
html_nodes("span") %>%
`[`(1:3) %>%
html_text
away <- teams %>%
html_nodes(".away") %>%
html_nodes("span") %>%
`[`(1:3) %>%
html_text
game_info <- espn_game_summary %>%
html_nodes(".game-information") %>%
html_nodes(".game-field")
game_time <- game_info %>%
html_nodes(".game-date-time") %>%
html_node("span") %>%
html_attr("data-date")
game_odds <- espn_game_summary %>%
html_nodes(".game-information") %>%
html_nodes(".odds") %>%
html_nodes("li") %>%
html_text() %>%
str_split(":") %>%
data.frame() %>%
janitor::row_to_names(1)
game_capacity <- espn_game_summary %>%
html_nodes(".game-information") %>%
html_nodes(".game-info-note") %>%
html_text() %>%
str_split(":") %>%
data.frame() %>%
janitor::row_to_names(1)
game_summary <- espn_game_summary %>%
html_nodes(".header") %>%
html_text() %>%
str_split(",") %>%
pluck(1) %>%
pluck(1)
game_df <- data.frame(
game_id = game_id,
game_time = game_time,
game_info = game_summary[[1]],
home_team = paste(home[1:2],collapse = " "),
home_team_abbrev = home[3],
away_team = paste(away[1:2],collapse = " "),
away_team_abbrev = away[3],
game_capacity,
game_odds
) %>%
janitor::clean_names()
## pbp info ----
quarter_tabs <- espn_pbp %>%
html_nodes("#gamepackage-qtrs-wrap") %>%
html_nodes(".webview-internal") %>%
html_attr("href")
full_game_pbp <- map_dfr(quarter_tabs, function(qtab){
## scrape elements for time stamps, play details, and score
time_stamps <- espn_pbp %>%
html_nodes("div") %>%
html_nodes(qtab) %>%
html_nodes(".time-stamp") %>%
html_text() %>%
as_tibble() %>%
rename(time = value)
possession_details <- espn_pbp %>%
html_nodes("div") %>%
html_nodes(qtab) %>%
html_nodes(".logo") %>%
html_nodes("img") %>%
html_attr("src") %>%
as_tibble() %>%
rename(possession = value) %>%
mutate(
possession = basename(possession)
) %>%
mutate(
possession = str_replace(possession, "(.+)([.]png.+)","\\1")
)
play_details <- espn_pbp %>%
html_nodes("div") %>%
html_nodes(qtab) %>%
html_nodes(".game-details") %>%
html_text() %>%
as_tibble() %>%
rename(play_details = value)
score <- espn_pbp %>%
html_nodes("div") %>%
html_nodes(qtab) %>%
html_nodes(".combined-score") %>%
html_text() %>%
as_tibble() %>%
rename(score = value)
## bind data together
bind_cols(time_stamps, possession_details, play_details, score) %>%
mutate(
quarter = gsub("#","",qtab)
)
}) %>%
mutate(play_id_num = seq_len(nrow(.)))
dbWriteTable(con, name = paste0("game_",game_id), full_game_pbp, overwrite = TRUE)
if("game_ids" %in% dbListTables(con)){
hist_game_table <- dbReadTable(con, "game_ids")
if(!game_id %in% hist_game_table$game_id){
game_df <- rbind(hist_game_table, game_df)
}
}
dbWriteTable(con, name = "game_ids", game_df, overwrite = TRUE)
}
# Write several games to database ---------------------------------------------
## create connection
db_con <- dbConnect(
drv = RSQLite::SQLite(),
here::here("TidyTuesday_Explained/074-Databases_with_R_3/nba_playoffs.db")
)
## write table
walk(
c(
"401327715", ## Miami Heat @ Milwaukee Bucks (1st Rd | Game 1 Eastern Conference Playoffs, 2021)
"401327878", ## Miami Heat @ Milwaukee Bucks (1st Rd | Game 2 Eastern Conference Playoffs, 2021)
"401327879", ## Miami Heat @ Milwaukee Bucks (1st Rd | Game 3 Eastern Conference Playoffs, 2021)
"401327870" ## Denver Nuggets @ Portland Trail Blazers (1st Rd | Game 4 Western Conference Playoffs, 2021)
),
pull_game_pbp_data,
db_con
)
##disconnect
dbDisconnect(db_con)
# Interact with database ---------------------------------------------
# create connection
db_con <- dbConnect(
drv = RSQLite::SQLite(),
here::here("TidyTuesday_Explained/074-Databases_with_R_3/nba_playoffs.db")
)
## list tables
dbListTables(db_con)
game_info <- tbl(db_con, "game_ids") %>%
filter( game_info == "East 1st Round - Game 3")
play_of_interest<- tbl(db_con, "game_401327879") %>%
filter(play_id_num == "12")
## Join information ----
### Locally
#### This only works if tables can fit into memory!~
game_pbp <- tbl(db_con, "game_401327879") %>% collect()
game_info <- tbl(db_con, "game_ids") %>% collect()
full_game_info <- game_pbp %>%
mutate(
game_id_number = "401327879"
) %>%
left_join(
game_info,
by = c("game_id_number" = "game_id")
)
## Using SQL Joins
## this will work, and only the result needs to be able to fit into memory
full_game_info_sql <- tbl(db_con, "game_401327879") %>%
mutate(
game_id_number = "401327879"
) %>%
left_join(
tbl(db_con, "game_ids"),
by = c("game_id_number" = "game_id")
)
show_query(full_game_info_sql)
## Benchmark joins ----
mb_sql_comparison <- microbenchmark(
sql_join = {
tbl(db_con, "game_401327879") %>%
mutate(
game_id_number = "401327879"
) %>%
left_join(
tbl(db_con, "game_ids"),
by = c("game_id_number" = "game_id")
)
},
load_to_memory = {
game_pbp <- tbl(db_con, "game_401327879") %>% collect()
game_info <- tbl(db_con, "game_ids") %>% collect()
game_pbp %>%
mutate(
game_id_number = "401327879"
) %>%
left_join(
game_info,
by = c("game_id_number" = "game_id")
)
}
)
autoplot(mb_sql_comparison)
dbDisconnect(db_con)
|
#' @export
TestResult <- function(test_name, H0_description, test_statistic, distribution, ...){
crit_values <- critical_values(distribution=distribution, ...)
test_result <- structure(list("test" = test_name,
"H0"= H0_description,
"test_statistic" = test_statistic,
"critical_values" = crit_values),
class="TestResult")
return(test_result)
}
#' @export
print.TestResult <- function(test_result){
cat(paste0("\033[1m", test_result$test, "\033[0m\n")) # Bold font for test name
cat("Null Hypothesis (H0): ", test_result$H0, "\n\n")
cat("Test Statistic = ", test_result$test_statistic, "\n\n")
cat(capture.output(test_result$critical_values), sep="\n")
}
|
/R/test_result.r
|
permissive
|
kevinkevin556/econometrics
|
R
| false
| false
| 817
|
r
|
#' @export
TestResult <- function(test_name, H0_description, test_statistic, distribution, ...){
crit_values <- critical_values(distribution=distribution, ...)
test_result <- structure(list("test" = test_name,
"H0"= H0_description,
"test_statistic" = test_statistic,
"critical_values" = crit_values),
class="TestResult")
return(test_result)
}
#' @export
print.TestResult <- function(test_result){
cat(paste0("\033[1m", test_result$test, "\033[0m\n")) # Bold font for test name
cat("Null Hypothesis (H0): ", test_result$H0, "\n\n")
cat("Test Statistic = ", test_result$test_statistic, "\n\n")
cat(capture.output(test_result$critical_values), sep="\n")
}
|
# ----------------------------
# Function to get formals
# from the user provided function
# function by: Jon Harmon
# ----------------------------
library(magrittr)
get_all_formals <- function(func) {
# If the function is passed in as a function, get its name.
func <- rlang::as_name(rlang::enquo(func))
if (sloop::is_s3_generic(func)) {
known_methods <- sloop::s3_methods_generic(func) %>%
purrr::pmap_chr(~paste(..1, ..2, sep = "."))
all_formals <- purrr::map(known_methods, sloop::s3_get_method) %>%
purrr::map(formalArgs) %>%
unlist() %>%
unique()
all_formals
} else {
formalArgs(func)
}
}
# get_all_formals("mean")
# get_all_formals(mean)
|
/Presentations/Week17/Cohort1/expressions/get_all_formals.R
|
no_license
|
r4ds/bookclub-Advanced_R
|
R
| false
| false
| 703
|
r
|
# ----------------------------
# Function to get formals
# from the user provided function
# function by: Jon Harmon
# ----------------------------
library(magrittr)
get_all_formals <- function(func) {
# If the function is passed in as a function, get its name.
func <- rlang::as_name(rlang::enquo(func))
if (sloop::is_s3_generic(func)) {
known_methods <- sloop::s3_methods_generic(func) %>%
purrr::pmap_chr(~paste(..1, ..2, sep = "."))
all_formals <- purrr::map(known_methods, sloop::s3_get_method) %>%
purrr::map(formalArgs) %>%
unlist() %>%
unique()
all_formals
} else {
formalArgs(func)
}
}
# get_all_formals("mean")
# get_all_formals(mean)
|
#reading the data
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
#merging the two datasets
NEISCC <- merge(NEI, SCC, by="SCC")
#subsetting the coal combustion related sources values
coal <- grepl("coal", NEISCC$Short.Name, ignore.case = TRUE)
coal <- subset(NEISCC, coal)
#creating an aggregate dataset from the coal combustion dataset
coal <- aggregate(Emissions~year, coal, sum)
#creating a png device
png(filename = "plot4.png")
#plotting the data using base plotting system via barplot
barplot(
height = coal$Emissions/10^5,
names.arg = coal$year,
xlab = "year",
ylab = expression('total PM'[2.5]),
main = expression ('total PM'[2.5]*' across each year from coal combustion sources')
)
#closing the device
dev.off()
|
/plot4.R
|
no_license
|
e-abuzaid/Exploratory-data-analysis-course-project
|
R
| false
| false
| 786
|
r
|
#reading the data
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
#merging the two datasets
NEISCC <- merge(NEI, SCC, by="SCC")
#subsetting the coal combustion related sources values
coal <- grepl("coal", NEISCC$Short.Name, ignore.case = TRUE)
coal <- subset(NEISCC, coal)
#creating an aggregate dataset from the coal combustion dataset
coal <- aggregate(Emissions~year, coal, sum)
#creating a png device
png(filename = "plot4.png")
#plotting the data using base plotting system via barplot
barplot(
height = coal$Emissions/10^5,
names.arg = coal$year,
xlab = "year",
ylab = expression('total PM'[2.5]),
main = expression ('total PM'[2.5]*' across each year from coal combustion sources')
)
#closing the device
dev.off()
|
library(dplyr)
library(ggplot2)
library(grid)
library(gtable)
load('cleaned_data')
# Get names ####
renameVariable <- function(x) {
words = strsplit(x, '_')[[1]]
new_name = vector()
for (i in words){
s= toupper(substring(i, 1,1))
new_word = paste0(s, substring(i,2,nchar(i)))
if(i==words[1]){new_name = new_word}
else {new_name = paste(new_name, new_word)}
}
return(new_name)
}
old_names = names(data)
display_names = sapply(old_names, renameVariable)
getDisplayName = function(old_name){
return(display_names[old_name])
}
getOldName = function(display_name){
if(display_name %in% display_names){
return(names(display_names[display_names == display_name]))
}else{
return(display_name)
}
}
# identify categorical and numerical variables ####
fac = vector()
num = vector()
for(i in names(data)){
fac[i] = is.factor(data[[i]])
num[i] = is.numeric(data[[i]])
}
fac_var = names(data)[fac]
num_var = names(data)[num]
#overwrite and rearrange variables
fac_var = c('year', 'month', 'region','town',
'flat_type', 'flat_model', 'storey_range',
'sqft_bin', 'age_bin',
'resale_price_bin', 'price_psf_bin')
num_var = c('resale_price', 'price_per_sqft',
'floor_area_sqft', 'floor_area_sqm',
'age')
all_var = c('year', 'month', 'region','town',
'flat_type', 'flat_model', 'storey_range',
'floor_area_sqm','floor_area_sqft', 'sqft_bin',
'age','age_bin',
'resale_price','resale_price_bin',
'price_per_sqft','price_psf_bin')
fac_var_d = unname(getDisplayName(fac_var))
num_var_d = unname(getDisplayName(num_var))
all_var_d = unname(getDisplayName(all_var))
remove(fac,num)
# pareto function ####
pareto = function(data, cat_var, xlab=NA, ylab = 'Frequency'){
if (!cat_var %in% names(data)){return('Error: Variable not found in data frame')}
if (!is.factor(data[[cat_var]])){return('Error: Variable is not factor type')}
if (is.na(xlab)){xlab=cat_var}
dt= as.data.frame(sort(table(data[cat_var], dnn=xlab), decreasing=TRUE), responseName = ylab)
q = ggplot(dt,aes_string(xlab,ylab)) + geom_bar(stat='identity')
q = q + theme(axis.text.x = element_text(angle = 90, hjust = 1))
q = q + labs(x = getDisplayName(cat_var))
print(q)
}
# ctable_bar function ####
ctable_bar = function(dt, rowlab='row',collab='col'){
df = as.data.frame(dt)
names(df) = c(rowlab, collab,'frequency')
g = qplot(df[[rowlab]], df[['frequency']],fill = df[[collab]]) +
geom_bar(stat='identity',colour = 'black') +
labs(x=getDisplayName(rowlab), y = 'Frequency', fill = getDisplayName(collab)) +
theme(axis.text.x = element_text(angle = 90, hjust = 1))
print(g)
}
# ctable_graph function ####
ctable_graph = function(dt,rowlab="row", collab="col"){
x = 1:ncol(dt)
y = 1:nrow(dt)
centers <- expand.grid(y,x)
par(mar = c(3,13,8,1))
colfunc <- colorRampPalette(c("aliceblue","blue"))
image(x, y, t(dt),
col = colfunc(as.integer(max(dt)/10)+1),
breaks = seq(0,as.integer(max(dt)/10)*10+10,10),
xaxt = 'n',
yaxt = 'n',
xlab = '',
ylab = '',
ylim = c(max(y) + 0.5, min(y) - 0.5)
)
text(centers[,2], centers[,1], c(dt), col= "black")
mtext(attributes(dt)$dimnames[[2]], at=1:ncol(dt), padj = -1, las = 2)
mtext(attributes(dt)$dimnames[[1]], at=1:nrow(dt), side = 2, las = 1, adj = 1.2)
abline(h=y + 0.5)
abline(v=x + 0.5)
mtext(paste("Contingency table of",getDisplayName(rowlab),"vs", getDisplayName(collab) ,sep=" "),side=1,font=2)
#title(main= paste("Contingency Table of",rowlab,"vs", collab ,sep=" "))
}
# unidist_num function ####
unidist_num = function(data, num_var, cat1, cat1vals){
temp = as.data.frame(data
%>% filter_(.dots= paste0(cat1, '%in% cat1vals')))
for(i in num_var){
h = ggplotGrob(qplot(temp[[i]],main = getDisplayName(i), xlab=NULL, ylab = 'Frequency')+geom_vline(xintercept=mean(temp[[i]]),col='red') + scale_x_continuous(limits=c(min(temp[[i]]), max(temp[[i]]))))
b = ggplotGrob(qplot('',temp[[i]], xlab = NULL, ylab =NULL, geom='boxplot')+coord_flip()+ scale_y_continuous(limits=c(min(temp[[i]]), max(temp[[i]]))))
maxwidths <- grid::unit.pmax(h$widths[2:5], b$widths[2:5])
h$widths[2:5] <- as.list(maxwidths)
b$widths[2:5] <- as.list(maxwidths)
g <- gtable_matrix(name = getDisplayName(i),
grobs = matrix(list(h, b), nrow = 2),
widths = unit(6.5, "in"),
heights = unit(c(2.8, 0.7), "in"))
grid.newpage()
grid.draw(g)
}
}
# unidist_cat function ####
unidist_cat = function(data, fac_var, cat1, cat1vals){
temp = as.data.frame(data
%>% filter_(.dots= paste0(cat1, '%in% cat1vals')))
for(i in fac_var){
pareto(temp,i)
}
}
# unidist_summary function ####
unidist_summary = function(data, var, cat1, cat1vals){
temp = as.data.frame(data
%>% filter_(.dots= paste0(cat1, '%in% cat1vals')))
s = summary(temp[[var]])
if(is.numeric(temp[[var]])){s['Std. Dev.'] = sd(temp[[var]])}
s
}
# bidist_cat_cat functions ####
bidist_cat_cat_table = function(data, cat1, cat2){
ctable_graph(table(data[[cat1]],data[[cat2]]),cat1,cat2)
}
bidist_cat_cat_bar = function(data, cat1, cat2){
ctable_bar(table(data[[cat1]],data[[cat2]]),cat1,cat2)
}
# bidist_cat_num function ####
bidist_cat_num = function(data, cat1, num1){
qplot(data[[cat1]],data[[num1]],geom='boxplot',xlab=getDisplayName(cat1),ylab=getDisplayName(num1)) + theme(axis.text.x = element_text(angle = 90, hjust = 1))
}
# bidist_num_num function ####
bidist_num_num = function (data, num1, num2){
qplot(data[[num1]],data[[num2]],xlab=getDisplayName(num1),ylab=getDisplayName(num2))
}
# mdist_2num_1cat function ####
multidist_2num_1cat = function(data, num1, num2, cat1){
qplot(data[[num1]], data[[num2]], colour = data[[cat1]])+
labs(x = getDisplayName(num1), y = getDisplayName(num2), colour = getDisplayName(cat1))
}
# mdist_1num_2cat function ####
multidist_1num_2cat = function(data, cat1, num1, cat2){
qplot(data[[cat1]], data[[num1]], colour = data[[cat2]])+
labs(x = getDisplayName(cat1),y = getDisplayName(num1), colour = getDisplayName(cat2)) +
theme(axis.text.x = element_text(angle = 90, hjust = 1))
}
# dist function ####
dist = function(data, x, y, pan =NULL, pan_vals=NULL, col=NULL, col_vals=NULL){
if(pan == 'Not Required'){
pan = NULL
pan_vals = NULL
}
if(col == 'Not Required'){
col = NULL
col_vals = NULL
}
temp = data
if(!is.null(pan_vals)){temp = temp %>% filter_(.dots= paste0(pan, ' %in% pan_vals'))}
if(!is.null(col_vals)){temp = temp %>% filter_(.dots= paste0(col, ' %in% col_vals'))}
temp = as.data.frame(temp)
g = ggplot(temp, aes_string(x,y)) + theme(axis.text.x = element_text(angle = 90, hjust = 1))
if(is.factor(data[[x]]) & is.factor(data[[y]])){
if(!is.null(pan)){
df = as.data.frame(table(temp[[x]],temp[[y]],temp[[pan]]))
names(df) = c(x, y,pan,'frequency')
}else{
df = as.data.frame(table(temp[[x]],temp[[y]]))
names(df) = c(x, y,'frequency')
}
g = ggplot(df, aes_string(x,'frequency')) + theme(axis.text.x = element_text(angle = 90, hjust = 1))
g = g + geom_col(aes(fill = df[[y]]))
g = g + labs(x = getDisplayName(x),y = 'Frequency', fill = getDisplayName(y))
}
if(is.factor(data[[x]]) & is.numeric(data[[y]])){
if(!is.null(col)){
g = g + geom_point(aes(col=temp[[col]]))
g = g + labs(x = getDisplayName(x),y = getDisplayName(y), col = getDisplayName(col))
}else{
g = g + geom_boxplot()
g = g + labs(x = getDisplayName(x),y = getDisplayName(y))
}
}
if(is.numeric(data[[x]]) & is.numeric(data[[y]])){
if(!is.null(col)){
g = g + geom_point(aes(col=temp[[col]]))
g = g + labs(x = getDisplayName(x), y = getDisplayName(y), col = getDisplayName(col))
}
else{
g = g + geom_point()
g = g + labs(x = getDisplayName(x), y = getDisplayName(y))
}
}
if(!is.null(pan)){g = g + facet_wrap(as.formula(paste("~", pan)), nrow = 2, scales = 'free')}
print(g)
}
# num_trend_gen function ####
num_trend_gen = function(data, num1, func){
func2 = paste0(tolower(substring(func,1,1)), substring(func,2,nchar(func)))
ggplot(data, aes_string(x='date', y=num1)) +
stat_summary(fun.y = func2,
geom='line') +
labs(title =paste(func, 'of', getDisplayName(num1), 'Over Time', sep=' '), x = 'Date', y = paste(func, 'of', getDisplayName(num1)))
}
# num_trend_bycat function ####
num_trend_bycat = function(data, num1, cat1, cat1vals, func){
if (func == "Mean"){f = function(x){mean(x)}}
else {
if (func == "Median"){f = function(x){median(x)}}
else {return("Error: func needs to be Mean or Median in quotes and caps first letter")}
}
temp = as.data.frame(data
%>% group_by_(.dots=c(cat1,'date'))
%>% summarise_(.dots= paste('summary = f(',num1,')',sep=''))
%>% filter_(.dots= paste0(cat1, '%in% cat1vals')))
names(temp) = c(cat1, 'date','summary')
g = ggplot(temp, aes(date, summary))
g = g + geom_line(aes_string(color=cat1))
g = g + labs(x = 'Date', y = paste(func, "of", getDisplayName(num1), sep = " "), color = getDisplayName(cat1) )
print(g)
}
# agg_bi function ####
agg_bi = function(data, cat1, num1, cat1vals, func, desc = FALSE){
if (func == "Mean"){f = function(x){mean(x)}}
else {
if (func == "Median"){f = function(x){median(x)}}
else {return("Error: func needs to be Mean or Median in quotes and caps first letter")}
}
temp = as.data.frame(data[c(cat1,num1)]
%>% group_by_(cat1)
%>% summarise_(.dots= paste('summary = f(',num1,')',sep=''))
%>% filter_(.dots= paste0(cat1, '%in% cat1vals')))
names(temp) = c(cat1, 'summary')
if(desc){
g =ggplot(data = temp, aes_string(x=reorder(temp[[cat1]],temp[['summary']], function(x)-mean(x)), y='summary'))
}else{
g =ggplot(data = temp, aes_string(x=cat1, y='summary'))
}
g = g +
geom_bar(stat='identity') +
theme(axis.text.x = element_text(angle = 90, hjust = 1)) +
labs(x= getDisplayName(cat1), y = paste(func,'of', getDisplayName(num1), sep= ' '))
print(g)
}
# agg_tri function ####
agg_tri = function(data, cat1, num1, cat2, cat1vals, cat2vals, func, desc = FALSE){
if (func == "Mean"){f = function(x){mean(x)}}
else {
if (func == "Median"){f = function(x){median(x)}}
else {return("Error: func needs to be Mean or Median in quotes and caps first letter")}
}
temp = as.data.frame(data %>% group_by_(.dots=c(cat1, cat2))
%>% summarise_(.dots= paste('summary = f(',num1,')',sep=''))
%>% filter_(.dots= paste0(cat2, '%in% cat2vals'))
%>% filter_(.dots= paste0(cat1, '%in% cat1vals')))
names(temp) = c(cat1, cat2,'summary')
desc_sum <- paste0('desc(', 'summary)')
temp2 = as.data.frame(temp %>% arrange_(.dots= c(cat2,desc_sum))
%>% mutate(.r = row_number()))
if(desc){
g =ggplot(data = temp2, aes(.r,summary))
g = g + facet_wrap(as.formula(paste("~", cat2)), nrow = 2, scales = 'free')
g = g + geom_col()
g = g + theme(axis.text.x = element_text(angle = 90, hjust = 1))
g = g + scale_x_continuous(breaks = temp2$.r, labels = temp2[[cat1]])
g = g + labs(x = getDisplayName(cat1), y = paste(func, 'of', getDisplayName(num1), sep = ' '), facet = getDisplayName(cat2))
g
}else{
g =ggplot(data = temp, aes_string(x=cat1, y='summary'))
g = g + facet_wrap(as.formula(paste("~", cat2)), nrow = 2, scales = 'free')
g = g + geom_col()
g = g + theme(axis.text.x = element_text(angle = 90, hjust = 1))
g = g + labs(x = getDisplayName(cat1), y = paste(func, 'of', getDisplayName(num1), sep = ' '))
g
}
}
|
/eda.R
|
no_license
|
carolynkpi/HDB_Phase1
|
R
| false
| false
| 12,057
|
r
|
library(dplyr)
library(ggplot2)
library(grid)
library(gtable)
load('cleaned_data')
# Get names ####
renameVariable <- function(x) {
words = strsplit(x, '_')[[1]]
new_name = vector()
for (i in words){
s= toupper(substring(i, 1,1))
new_word = paste0(s, substring(i,2,nchar(i)))
if(i==words[1]){new_name = new_word}
else {new_name = paste(new_name, new_word)}
}
return(new_name)
}
old_names = names(data)
display_names = sapply(old_names, renameVariable)
getDisplayName = function(old_name){
return(display_names[old_name])
}
getOldName = function(display_name){
if(display_name %in% display_names){
return(names(display_names[display_names == display_name]))
}else{
return(display_name)
}
}
# identify categorical and numerical variables ####
fac = vector()
num = vector()
for(i in names(data)){
fac[i] = is.factor(data[[i]])
num[i] = is.numeric(data[[i]])
}
fac_var = names(data)[fac]
num_var = names(data)[num]
#overwrite and rearrange variables
fac_var = c('year', 'month', 'region','town',
'flat_type', 'flat_model', 'storey_range',
'sqft_bin', 'age_bin',
'resale_price_bin', 'price_psf_bin')
num_var = c('resale_price', 'price_per_sqft',
'floor_area_sqft', 'floor_area_sqm',
'age')
all_var = c('year', 'month', 'region','town',
'flat_type', 'flat_model', 'storey_range',
'floor_area_sqm','floor_area_sqft', 'sqft_bin',
'age','age_bin',
'resale_price','resale_price_bin',
'price_per_sqft','price_psf_bin')
fac_var_d = unname(getDisplayName(fac_var))
num_var_d = unname(getDisplayName(num_var))
all_var_d = unname(getDisplayName(all_var))
remove(fac,num)
# pareto function ####
pareto = function(data, cat_var, xlab=NA, ylab = 'Frequency'){
if (!cat_var %in% names(data)){return('Error: Variable not found in data frame')}
if (!is.factor(data[[cat_var]])){return('Error: Variable is not factor type')}
if (is.na(xlab)){xlab=cat_var}
dt= as.data.frame(sort(table(data[cat_var], dnn=xlab), decreasing=TRUE), responseName = ylab)
q = ggplot(dt,aes_string(xlab,ylab)) + geom_bar(stat='identity')
q = q + theme(axis.text.x = element_text(angle = 90, hjust = 1))
q = q + labs(x = getDisplayName(cat_var))
print(q)
}
# ctable_bar function ####
ctable_bar = function(dt, rowlab='row',collab='col'){
df = as.data.frame(dt)
names(df) = c(rowlab, collab,'frequency')
g = qplot(df[[rowlab]], df[['frequency']],fill = df[[collab]]) +
geom_bar(stat='identity',colour = 'black') +
labs(x=getDisplayName(rowlab), y = 'Frequency', fill = getDisplayName(collab)) +
theme(axis.text.x = element_text(angle = 90, hjust = 1))
print(g)
}
# ctable_graph function ####
ctable_graph = function(dt,rowlab="row", collab="col"){
x = 1:ncol(dt)
y = 1:nrow(dt)
centers <- expand.grid(y,x)
par(mar = c(3,13,8,1))
colfunc <- colorRampPalette(c("aliceblue","blue"))
image(x, y, t(dt),
col = colfunc(as.integer(max(dt)/10)+1),
breaks = seq(0,as.integer(max(dt)/10)*10+10,10),
xaxt = 'n',
yaxt = 'n',
xlab = '',
ylab = '',
ylim = c(max(y) + 0.5, min(y) - 0.5)
)
text(centers[,2], centers[,1], c(dt), col= "black")
mtext(attributes(dt)$dimnames[[2]], at=1:ncol(dt), padj = -1, las = 2)
mtext(attributes(dt)$dimnames[[1]], at=1:nrow(dt), side = 2, las = 1, adj = 1.2)
abline(h=y + 0.5)
abline(v=x + 0.5)
mtext(paste("Contingency table of",getDisplayName(rowlab),"vs", getDisplayName(collab) ,sep=" "),side=1,font=2)
#title(main= paste("Contingency Table of",rowlab,"vs", collab ,sep=" "))
}
# unidist_num function ####
unidist_num = function(data, num_var, cat1, cat1vals){
temp = as.data.frame(data
%>% filter_(.dots= paste0(cat1, '%in% cat1vals')))
for(i in num_var){
h = ggplotGrob(qplot(temp[[i]],main = getDisplayName(i), xlab=NULL, ylab = 'Frequency')+geom_vline(xintercept=mean(temp[[i]]),col='red') + scale_x_continuous(limits=c(min(temp[[i]]), max(temp[[i]]))))
b = ggplotGrob(qplot('',temp[[i]], xlab = NULL, ylab =NULL, geom='boxplot')+coord_flip()+ scale_y_continuous(limits=c(min(temp[[i]]), max(temp[[i]]))))
maxwidths <- grid::unit.pmax(h$widths[2:5], b$widths[2:5])
h$widths[2:5] <- as.list(maxwidths)
b$widths[2:5] <- as.list(maxwidths)
g <- gtable_matrix(name = getDisplayName(i),
grobs = matrix(list(h, b), nrow = 2),
widths = unit(6.5, "in"),
heights = unit(c(2.8, 0.7), "in"))
grid.newpage()
grid.draw(g)
}
}
# unidist_cat function ####
unidist_cat = function(data, fac_var, cat1, cat1vals){
temp = as.data.frame(data
%>% filter_(.dots= paste0(cat1, '%in% cat1vals')))
for(i in fac_var){
pareto(temp,i)
}
}
# unidist_summary function ####
unidist_summary = function(data, var, cat1, cat1vals){
temp = as.data.frame(data
%>% filter_(.dots= paste0(cat1, '%in% cat1vals')))
s = summary(temp[[var]])
if(is.numeric(temp[[var]])){s['Std. Dev.'] = sd(temp[[var]])}
s
}
# bidist_cat_cat functions ####
bidist_cat_cat_table = function(data, cat1, cat2){
ctable_graph(table(data[[cat1]],data[[cat2]]),cat1,cat2)
}
bidist_cat_cat_bar = function(data, cat1, cat2){
ctable_bar(table(data[[cat1]],data[[cat2]]),cat1,cat2)
}
# bidist_cat_num function ####
bidist_cat_num = function(data, cat1, num1){
qplot(data[[cat1]],data[[num1]],geom='boxplot',xlab=getDisplayName(cat1),ylab=getDisplayName(num1)) + theme(axis.text.x = element_text(angle = 90, hjust = 1))
}
# bidist_num_num function ####
bidist_num_num = function (data, num1, num2){
qplot(data[[num1]],data[[num2]],xlab=getDisplayName(num1),ylab=getDisplayName(num2))
}
# mdist_2num_1cat function ####
multidist_2num_1cat = function(data, num1, num2, cat1){
qplot(data[[num1]], data[[num2]], colour = data[[cat1]])+
labs(x = getDisplayName(num1), y = getDisplayName(num2), colour = getDisplayName(cat1))
}
# mdist_1num_2cat function ####
multidist_1num_2cat = function(data, cat1, num1, cat2){
qplot(data[[cat1]], data[[num1]], colour = data[[cat2]])+
labs(x = getDisplayName(cat1),y = getDisplayName(num1), colour = getDisplayName(cat2)) +
theme(axis.text.x = element_text(angle = 90, hjust = 1))
}
# dist function ####
dist = function(data, x, y, pan =NULL, pan_vals=NULL, col=NULL, col_vals=NULL){
if(pan == 'Not Required'){
pan = NULL
pan_vals = NULL
}
if(col == 'Not Required'){
col = NULL
col_vals = NULL
}
temp = data
if(!is.null(pan_vals)){temp = temp %>% filter_(.dots= paste0(pan, ' %in% pan_vals'))}
if(!is.null(col_vals)){temp = temp %>% filter_(.dots= paste0(col, ' %in% col_vals'))}
temp = as.data.frame(temp)
g = ggplot(temp, aes_string(x,y)) + theme(axis.text.x = element_text(angle = 90, hjust = 1))
if(is.factor(data[[x]]) & is.factor(data[[y]])){
if(!is.null(pan)){
df = as.data.frame(table(temp[[x]],temp[[y]],temp[[pan]]))
names(df) = c(x, y,pan,'frequency')
}else{
df = as.data.frame(table(temp[[x]],temp[[y]]))
names(df) = c(x, y,'frequency')
}
g = ggplot(df, aes_string(x,'frequency')) + theme(axis.text.x = element_text(angle = 90, hjust = 1))
g = g + geom_col(aes(fill = df[[y]]))
g = g + labs(x = getDisplayName(x),y = 'Frequency', fill = getDisplayName(y))
}
if(is.factor(data[[x]]) & is.numeric(data[[y]])){
if(!is.null(col)){
g = g + geom_point(aes(col=temp[[col]]))
g = g + labs(x = getDisplayName(x),y = getDisplayName(y), col = getDisplayName(col))
}else{
g = g + geom_boxplot()
g = g + labs(x = getDisplayName(x),y = getDisplayName(y))
}
}
if(is.numeric(data[[x]]) & is.numeric(data[[y]])){
if(!is.null(col)){
g = g + geom_point(aes(col=temp[[col]]))
g = g + labs(x = getDisplayName(x), y = getDisplayName(y), col = getDisplayName(col))
}
else{
g = g + geom_point()
g = g + labs(x = getDisplayName(x), y = getDisplayName(y))
}
}
if(!is.null(pan)){g = g + facet_wrap(as.formula(paste("~", pan)), nrow = 2, scales = 'free')}
print(g)
}
# num_trend_gen function ####
num_trend_gen = function(data, num1, func){
func2 = paste0(tolower(substring(func,1,1)), substring(func,2,nchar(func)))
ggplot(data, aes_string(x='date', y=num1)) +
stat_summary(fun.y = func2,
geom='line') +
labs(title =paste(func, 'of', getDisplayName(num1), 'Over Time', sep=' '), x = 'Date', y = paste(func, 'of', getDisplayName(num1)))
}
# num_trend_bycat function ####
num_trend_bycat = function(data, num1, cat1, cat1vals, func){
if (func == "Mean"){f = function(x){mean(x)}}
else {
if (func == "Median"){f = function(x){median(x)}}
else {return("Error: func needs to be Mean or Median in quotes and caps first letter")}
}
temp = as.data.frame(data
%>% group_by_(.dots=c(cat1,'date'))
%>% summarise_(.dots= paste('summary = f(',num1,')',sep=''))
%>% filter_(.dots= paste0(cat1, '%in% cat1vals')))
names(temp) = c(cat1, 'date','summary')
g = ggplot(temp, aes(date, summary))
g = g + geom_line(aes_string(color=cat1))
g = g + labs(x = 'Date', y = paste(func, "of", getDisplayName(num1), sep = " "), color = getDisplayName(cat1) )
print(g)
}
# agg_bi function ####
agg_bi = function(data, cat1, num1, cat1vals, func, desc = FALSE){
if (func == "Mean"){f = function(x){mean(x)}}
else {
if (func == "Median"){f = function(x){median(x)}}
else {return("Error: func needs to be Mean or Median in quotes and caps first letter")}
}
temp = as.data.frame(data[c(cat1,num1)]
%>% group_by_(cat1)
%>% summarise_(.dots= paste('summary = f(',num1,')',sep=''))
%>% filter_(.dots= paste0(cat1, '%in% cat1vals')))
names(temp) = c(cat1, 'summary')
if(desc){
g =ggplot(data = temp, aes_string(x=reorder(temp[[cat1]],temp[['summary']], function(x)-mean(x)), y='summary'))
}else{
g =ggplot(data = temp, aes_string(x=cat1, y='summary'))
}
g = g +
geom_bar(stat='identity') +
theme(axis.text.x = element_text(angle = 90, hjust = 1)) +
labs(x= getDisplayName(cat1), y = paste(func,'of', getDisplayName(num1), sep= ' '))
print(g)
}
# agg_tri function ####
agg_tri = function(data, cat1, num1, cat2, cat1vals, cat2vals, func, desc = FALSE){
if (func == "Mean"){f = function(x){mean(x)}}
else {
if (func == "Median"){f = function(x){median(x)}}
else {return("Error: func needs to be Mean or Median in quotes and caps first letter")}
}
temp = as.data.frame(data %>% group_by_(.dots=c(cat1, cat2))
%>% summarise_(.dots= paste('summary = f(',num1,')',sep=''))
%>% filter_(.dots= paste0(cat2, '%in% cat2vals'))
%>% filter_(.dots= paste0(cat1, '%in% cat1vals')))
names(temp) = c(cat1, cat2,'summary')
desc_sum <- paste0('desc(', 'summary)')
temp2 = as.data.frame(temp %>% arrange_(.dots= c(cat2,desc_sum))
%>% mutate(.r = row_number()))
if(desc){
g =ggplot(data = temp2, aes(.r,summary))
g = g + facet_wrap(as.formula(paste("~", cat2)), nrow = 2, scales = 'free')
g = g + geom_col()
g = g + theme(axis.text.x = element_text(angle = 90, hjust = 1))
g = g + scale_x_continuous(breaks = temp2$.r, labels = temp2[[cat1]])
g = g + labs(x = getDisplayName(cat1), y = paste(func, 'of', getDisplayName(num1), sep = ' '), facet = getDisplayName(cat2))
g
}else{
g =ggplot(data = temp, aes_string(x=cat1, y='summary'))
g = g + facet_wrap(as.formula(paste("~", cat2)), nrow = 2, scales = 'free')
g = g + geom_col()
g = g + theme(axis.text.x = element_text(angle = 90, hjust = 1))
g = g + labs(x = getDisplayName(cat1), y = paste(func, 'of', getDisplayName(num1), sep = ' '))
g
}
}
|
library(gtools)
best <- function(state, outcome){
data<- read.csv("outcome-of-care-measures.csv")
p<-1
if (!state %in% unique(data$State)){
p=0
stop("Invalid State")
}
if (!outcome %in% c("heart attack","heart failure","pneumonia")){
p=0
stop("Invalid Outcome")
}
if (p==1){
if (outcome=="heart attack"){
desired<-data[data$State==state,]
desired<-desired[mixedorder(desired$Hospital.Name),]
desired<-desired[desired$Hospital.30.Day.Death..Mortality..Rates.from.Heart.Attack==min(desired[,11]),]
return(desired[1,2])
}
if(outcome=="heart failure"){
desired<-data[data$State==state,]
desired<-desired[mixedorder(desired$Hospital.Name),]
desired<-desired[desired$Hospital.30.Day.Death..Mortality..Rates.from.Heart.Failure==min(desired[,17]),]
return(desired[1,2])
}
if(outcome=="pneumonia"){
desired<-data[data$State==state,]
desired<-desired[mixedorder(desired$Hospital.Name),]
desired<-desired[desired$Hospital.30.Day.Death..Mortality..Rates.from.Pneumonia==min(desired[,23]),]
return(desired[1,2])
}
}
}
|
/best.R
|
no_license
|
yagmuruzuun/ProgrammingAssignment3
|
R
| false
| false
| 1,106
|
r
|
library(gtools)
best <- function(state, outcome){
data<- read.csv("outcome-of-care-measures.csv")
p<-1
if (!state %in% unique(data$State)){
p=0
stop("Invalid State")
}
if (!outcome %in% c("heart attack","heart failure","pneumonia")){
p=0
stop("Invalid Outcome")
}
if (p==1){
if (outcome=="heart attack"){
desired<-data[data$State==state,]
desired<-desired[mixedorder(desired$Hospital.Name),]
desired<-desired[desired$Hospital.30.Day.Death..Mortality..Rates.from.Heart.Attack==min(desired[,11]),]
return(desired[1,2])
}
if(outcome=="heart failure"){
desired<-data[data$State==state,]
desired<-desired[mixedorder(desired$Hospital.Name),]
desired<-desired[desired$Hospital.30.Day.Death..Mortality..Rates.from.Heart.Failure==min(desired[,17]),]
return(desired[1,2])
}
if(outcome=="pneumonia"){
desired<-data[data$State==state,]
desired<-desired[mixedorder(desired$Hospital.Name),]
desired<-desired[desired$Hospital.30.Day.Death..Mortality..Rates.from.Pneumonia==min(desired[,23]),]
return(desired[1,2])
}
}
}
|
## Check if zip file is already downloaded. If not download to working directory.
zipfile <- "household_power_consumption.zip"
if (!file.exists(zipfile)) {
download.file("https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip",
destfile=zipfile, method="curl")
}
## Unzip the file
datafile <- "household_power_consumption.txt"
unzip(zipfile, datafile)
## Read the unzipped text file
dataframe <- read.csv(datafile, header=TRUE,
sep=";", stringsAsFactors=FALSE, na.strings="?",
colClasses=c(rep("character", 2), rep("numeric",7)))
## Subset the dataframe to include only 2007-02-01 and 2007-02-02
dataframe <- dataframe[dataframe$Date == "1/2/2007" | dataframe$Date == "2/2/2007", ]
## Add new field combining both Date and Time, formatted as dd/mm/yyyy hh:mm:ss
dataframe$DateTime <- strptime(paste(dataframe$Date, dataframe$Time),
"%d/%m/%Y %H:%M:%S")
## Create the plot
png(filename="plot3.png", width=480, height=480)
plot(dataframe$DateTime, dataframe$Sub_metering_1, type="l", xlab="",
ylab="Energy sub metering")
lines(dataframe$DateTime, dataframe$Sub_metering_2, col="red")
lines(dataframe$DateTime, dataframe$Sub_metering_3, col="blue")
legend("topright", legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"),
col=c("black", "red", "blue"), lwd=1)
dev.off()
|
/plot3.R
|
no_license
|
gogounou/ExData_Plotting1
|
R
| false
| false
| 1,425
|
r
|
## Check if zip file is already downloaded. If not download to working directory.
zipfile <- "household_power_consumption.zip"
if (!file.exists(zipfile)) {
download.file("https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip",
destfile=zipfile, method="curl")
}
## Unzip the file
datafile <- "household_power_consumption.txt"
unzip(zipfile, datafile)
## Read the unzipped text file
dataframe <- read.csv(datafile, header=TRUE,
sep=";", stringsAsFactors=FALSE, na.strings="?",
colClasses=c(rep("character", 2), rep("numeric",7)))
## Subset the dataframe to include only 2007-02-01 and 2007-02-02
dataframe <- dataframe[dataframe$Date == "1/2/2007" | dataframe$Date == "2/2/2007", ]
## Add new field combining both Date and Time, formatted as dd/mm/yyyy hh:mm:ss
dataframe$DateTime <- strptime(paste(dataframe$Date, dataframe$Time),
"%d/%m/%Y %H:%M:%S")
## Create the plot
png(filename="plot3.png", width=480, height=480)
plot(dataframe$DateTime, dataframe$Sub_metering_1, type="l", xlab="",
ylab="Energy sub metering")
lines(dataframe$DateTime, dataframe$Sub_metering_2, col="red")
lines(dataframe$DateTime, dataframe$Sub_metering_3, col="blue")
legend("topright", legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"),
col=c("black", "red", "blue"), lwd=1)
dev.off()
|
source( "masternegloglikereduced1.R" )
source("eudicottree.R" )
library( "expm" )
source( "Qmatrixwoodherb3.R" )
source("Pruning2.R")
bichrom.dataset<-read.table( "eudicotvals.txt",header=FALSE,sep=",",stringsAsFactors=FALSE)
last.state=50
uniform.samples<-read.csv("sample474.csv",header=FALSE)
a<- as.numeric(t(uniform.samples))
p.0<-rep(1,2*(last.state+1))/(2*(last.state+1))
results<-rep(0,10)
mle<-try(optim(par=a,fn=negloglikelihood.wh, method= "Nelder-Mead", bichrom.phy=angiosperm.tree, bichrom.data=bichrom.dataset,max.chromosome=last.state,pi.0=p.0),silent=TRUE)
print(mle)
if(class(mle)=="try-error"){results<-rep(NA,10)}else{
results[1:9]<-exp(mle$par)
results[10]<-mle$value}
write.table(results,file="results474.csv",sep=",")
|
/Reduced model optimizations/explorelikereduced474.R
|
no_license
|
roszenil/Bichromdryad
|
R
| false
| false
| 750
|
r
|
source( "masternegloglikereduced1.R" )
source("eudicottree.R" )
library( "expm" )
source( "Qmatrixwoodherb3.R" )
source("Pruning2.R")
bichrom.dataset<-read.table( "eudicotvals.txt",header=FALSE,sep=",",stringsAsFactors=FALSE)
last.state=50
uniform.samples<-read.csv("sample474.csv",header=FALSE)
a<- as.numeric(t(uniform.samples))
p.0<-rep(1,2*(last.state+1))/(2*(last.state+1))
results<-rep(0,10)
mle<-try(optim(par=a,fn=negloglikelihood.wh, method= "Nelder-Mead", bichrom.phy=angiosperm.tree, bichrom.data=bichrom.dataset,max.chromosome=last.state,pi.0=p.0),silent=TRUE)
print(mle)
if(class(mle)=="try-error"){results<-rep(NA,10)}else{
results[1:9]<-exp(mle$par)
results[10]<-mle$value}
write.table(results,file="results474.csv",sep=",")
|
## -----------------------------------------------------------------------------
library(bridgesampling)
set.seed(12345)
# Sleep data from t.test example
data(sleep)
# compute difference scores
y <- sleep$extra[sleep$group == 2] - sleep$extra[sleep$group == 1]
n <- length(y)
## ---- eval=FALSE--------------------------------------------------------------
# library(rstan)
#
# # models
# stancodeH0 <- '
# data {
# int<lower=1> n; // number of observations
# vector[n] y; // observations
# }
# parameters {
# real<lower=0> sigma2; // variance parameter
# }
# model {
# target += log(1/sigma2); // Jeffreys prior on sigma2
# target += normal_lpdf(y | 0, sqrt(sigma2)); // likelihood
# }
# '
# stancodeH1 <- '
# data {
# int<lower=1> n; // number of observations
# vector[n] y; // observations
# real<lower=0> r; // Cauchy prior scale
# }
# parameters {
# real delta;
# real<lower=0> sigma2;// variance parameter
# }
# model {
# target += cauchy_lpdf(delta | 0, r); // Cauchy prior on delta
# target += log(1/sigma2); // Jeffreys prior on sigma2
# target += normal_lpdf(y | delta*sqrt(sigma2), sqrt(sigma2)); // likelihood
# }
# '
# # compile models
# stanmodelH0 <- stan_model(model_code = stancodeH0, model_name="stanmodel")
# stanmodelH1 <- stan_model(model_code = stancodeH1, model_name="stanmodel")
## ---- eval=FALSE--------------------------------------------------------------
# # fit models
# stanfitH0 <- sampling(stanmodelH0, data = list(y = y, n = n),
# iter = 20000, warmup = 1000, chains = 4, cores = 1,
# control = list(adapt_delta = .99))
# stanfitH1 <- sampling(stanmodelH1, data = list(y = y, n = n, r = 1/sqrt(2)),
# iter = 20000, warmup = 1000, chains = 4, cores = 1,
# control = list(adapt_delta = .99))
## ---- echo=FALSE--------------------------------------------------------------
load(system.file("extdata/", "vignette_stan_ttest.RData",
package = "bridgesampling"))
## ---- eval=FALSE--------------------------------------------------------------
# H0 <- bridge_sampler(stanfitH0, silent = TRUE)
# H1 <- bridge_sampler(stanfitH1, silent = TRUE)
## -----------------------------------------------------------------------------
print(H0)
print(H1)
## ----eval=FALSE---------------------------------------------------------------
# # compute percentage errors
# H0.error <- error_measures(H0)$percentage
# H1.error <- error_measures(H1)$percentage
## -----------------------------------------------------------------------------
print(H0.error)
print(H1.error)
## -----------------------------------------------------------------------------
# compute Bayes factor
BF10 <- bf(H1, H0)
print(BF10)
## ---- eval=FALSE--------------------------------------------------------------
# library(BayesFactor)
# BF10.BayesFactor <- extractBF(ttestBF(y), onlybf = TRUE)
## ---- message=FALSE-----------------------------------------------------------
print(BF10.BayesFactor)
## ---- eval=FALSE--------------------------------------------------------------
# stancodeHplus <- '
# data {
# int<lower=1> n; // number of observations
# vector[n] y; // observations
# real<lower=0> r; // Cauchy prior scale
# }
# parameters {
# real<lower=0> delta; // constrained to be positive
# real<lower=0> sigma2;// variance parameter
# }
# model {
# target += cauchy_lpdf(delta | 0, r) - cauchy_lccdf(0 | 0, r); // Cauchy prior on delta
# target += log(1/sigma2); // Jeffreys prior on sigma2
# target += normal_lpdf(y | delta*sqrt(sigma2), sqrt(sigma2)); // likelihood
# }
# '
# # compile and fit model
# stanmodelHplus <- stan_model(model_code = stancodeHplus, model_name="stanmodel")
# stanfitHplus <- sampling(stanmodelHplus, data = list(y = y, n = n, r = 1/sqrt(2)),
# iter = 30000, warmup = 1000, chains = 4,
# control = list(adapt_delta = .99))
## ----eval=FALSE---------------------------------------------------------------
# Hplus <- bridge_sampler(stanfitHplus, silent = TRUE)
## -----------------------------------------------------------------------------
print(Hplus)
## ----eval=FALSE---------------------------------------------------------------
# Hplus.error <- error_measures(Hplus)$percentage
## -----------------------------------------------------------------------------
print(Hplus.error)
## -----------------------------------------------------------------------------
# compute Bayes factor
BFplus0 <- bf(Hplus, H0)
print(BFplus0)
## ---- eval=FALSE--------------------------------------------------------------
# BFplus0.BayesFactor <- extractBF(ttestBF(y, nullInterval = c(0, Inf)), onlybf = TRUE)[1]
## -----------------------------------------------------------------------------
print(BFplus0.BayesFactor)
|
/inst/doc/bridgesampling_stan_ttest.R
|
no_license
|
cran/bridgesampling
|
R
| false
| false
| 5,069
|
r
|
## -----------------------------------------------------------------------------
library(bridgesampling)
set.seed(12345)
# Sleep data from t.test example
data(sleep)
# compute difference scores
y <- sleep$extra[sleep$group == 2] - sleep$extra[sleep$group == 1]
n <- length(y)
## ---- eval=FALSE--------------------------------------------------------------
# library(rstan)
#
# # models
# stancodeH0 <- '
# data {
# int<lower=1> n; // number of observations
# vector[n] y; // observations
# }
# parameters {
# real<lower=0> sigma2; // variance parameter
# }
# model {
# target += log(1/sigma2); // Jeffreys prior on sigma2
# target += normal_lpdf(y | 0, sqrt(sigma2)); // likelihood
# }
# '
# stancodeH1 <- '
# data {
# int<lower=1> n; // number of observations
# vector[n] y; // observations
# real<lower=0> r; // Cauchy prior scale
# }
# parameters {
# real delta;
# real<lower=0> sigma2;// variance parameter
# }
# model {
# target += cauchy_lpdf(delta | 0, r); // Cauchy prior on delta
# target += log(1/sigma2); // Jeffreys prior on sigma2
# target += normal_lpdf(y | delta*sqrt(sigma2), sqrt(sigma2)); // likelihood
# }
# '
# # compile models
# stanmodelH0 <- stan_model(model_code = stancodeH0, model_name="stanmodel")
# stanmodelH1 <- stan_model(model_code = stancodeH1, model_name="stanmodel")
## ---- eval=FALSE--------------------------------------------------------------
# # fit models
# stanfitH0 <- sampling(stanmodelH0, data = list(y = y, n = n),
# iter = 20000, warmup = 1000, chains = 4, cores = 1,
# control = list(adapt_delta = .99))
# stanfitH1 <- sampling(stanmodelH1, data = list(y = y, n = n, r = 1/sqrt(2)),
# iter = 20000, warmup = 1000, chains = 4, cores = 1,
# control = list(adapt_delta = .99))
## ---- echo=FALSE--------------------------------------------------------------
load(system.file("extdata/", "vignette_stan_ttest.RData",
package = "bridgesampling"))
## ---- eval=FALSE--------------------------------------------------------------
# H0 <- bridge_sampler(stanfitH0, silent = TRUE)
# H1 <- bridge_sampler(stanfitH1, silent = TRUE)
## -----------------------------------------------------------------------------
print(H0)
print(H1)
## ----eval=FALSE---------------------------------------------------------------
# # compute percentage errors
# H0.error <- error_measures(H0)$percentage
# H1.error <- error_measures(H1)$percentage
## -----------------------------------------------------------------------------
print(H0.error)
print(H1.error)
## -----------------------------------------------------------------------------
# compute Bayes factor
BF10 <- bf(H1, H0)
print(BF10)
## ---- eval=FALSE--------------------------------------------------------------
# library(BayesFactor)
# BF10.BayesFactor <- extractBF(ttestBF(y), onlybf = TRUE)
## ---- message=FALSE-----------------------------------------------------------
print(BF10.BayesFactor)
## ---- eval=FALSE--------------------------------------------------------------
# stancodeHplus <- '
# data {
# int<lower=1> n; // number of observations
# vector[n] y; // observations
# real<lower=0> r; // Cauchy prior scale
# }
# parameters {
# real<lower=0> delta; // constrained to be positive
# real<lower=0> sigma2;// variance parameter
# }
# model {
# target += cauchy_lpdf(delta | 0, r) - cauchy_lccdf(0 | 0, r); // Cauchy prior on delta
# target += log(1/sigma2); // Jeffreys prior on sigma2
# target += normal_lpdf(y | delta*sqrt(sigma2), sqrt(sigma2)); // likelihood
# }
# '
# # compile and fit model
# stanmodelHplus <- stan_model(model_code = stancodeHplus, model_name="stanmodel")
# stanfitHplus <- sampling(stanmodelHplus, data = list(y = y, n = n, r = 1/sqrt(2)),
# iter = 30000, warmup = 1000, chains = 4,
# control = list(adapt_delta = .99))
## ----eval=FALSE---------------------------------------------------------------
# Hplus <- bridge_sampler(stanfitHplus, silent = TRUE)
## -----------------------------------------------------------------------------
print(Hplus)
## ----eval=FALSE---------------------------------------------------------------
# Hplus.error <- error_measures(Hplus)$percentage
## -----------------------------------------------------------------------------
print(Hplus.error)
## -----------------------------------------------------------------------------
# compute Bayes factor
BFplus0 <- bf(Hplus, H0)
print(BFplus0)
## ---- eval=FALSE--------------------------------------------------------------
# BFplus0.BayesFactor <- extractBF(ttestBF(y, nullInterval = c(0, Inf)), onlybf = TRUE)[1]
## -----------------------------------------------------------------------------
print(BFplus0.BayesFactor)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/interpretation_help.R
\name{mapClusters}
\alias{mapClusters}
\title{build some maps to visualize the results of the clustering}
\usage{
mapClusters(geodata, belongmatrix, undecided = NULL)
}
\arguments{
\item{belongmatrix}{The belonging matrix obtained at the end of the algorithm}
}
\description{
build some maps to visualize the results of the clustering
}
\examples{
}
|
/man/mapClusters.Rd
|
no_license
|
JeremyGelb/old_geoCmeans
|
R
| false
| true
| 451
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/interpretation_help.R
\name{mapClusters}
\alias{mapClusters}
\title{build some maps to visualize the results of the clustering}
\usage{
mapClusters(geodata, belongmatrix, undecided = NULL)
}
\arguments{
\item{belongmatrix}{The belonging matrix obtained at the end of the algorithm}
}
\description{
build some maps to visualize the results of the clustering
}
\examples{
}
|
library(bigsnpr)
ukb <- snp_attach("data/UKBB_imp_HM3.rds")
G <- ukb$genotypes
CHR <- as.integer(ukb$map$chromosome)
POS <- ukb$map$physical.pos
(NCORES <- as.integer(Sys.getenv("SLURM_JOB_CPUS_PER_NODE")) - 1L)
bigassertr::assert_dir("data/pheno-simu")
#### Anywhere on the genome ####
params <- expand.grid(
h2 = 0.4,
M = c(300, 3e3, 30e3, 300e3),
K = 0.15,
iter = 1:10
)
purrr::pwalk(params, function(h2, M, K, iter) {
pheno_file <- sprintf("data/pheno-simu/all_%d_%d_%d_%d.rds",
100 * h2, M, 100 * K, iter)
if (!file.exists(pheno_file)) {
print(pheno_file)
simu <- snp_simuPheno(G, h2, M, K, ncores = NCORES)
saveRDS(simu$pheno, pheno_file)
}
})
#### In HLA ####
ind.HLA <- snp_indLRLDR(CHR, POS, LD.wiki34[12, ])
params2 <- expand.grid(
h2 = 0.3,
M = c(300, 3000),
K = 0.15,
iter = 1:10
)
purrr::pwalk(params2, function(h2, M, K, iter) {
pheno_file <- sprintf("data/pheno-simu/HLA_%d_%d_%d_%d.rds",
100 * h2, M, 100 * K, iter)
if (!file.exists(pheno_file)) {
print(pheno_file)
simu <- snp_simuPheno(G, h2, M, K, ind.possible = ind.HLA, ncores = NCORES)
saveRDS(simu$pheno, pheno_file)
}
})
#### In both ####
h2 <- 0.4
M_HLA <- 300
M_all <- 10e3
K <- 0.15
for (iter in 1:10) {
pheno_file <- sprintf("data/pheno-simu/both_%d_%d_%d.rds",
100 * h2, 100 * K, iter)
if (!file.exists(pheno_file)) {
print(pheno_file)
simu1 <- snp_simuPheno(G, h2, M_HLA, ind.possible = ind.HLA)
simu2 <- snp_simuPheno(G, h2, M_all, ncores = NCORES)
simu_liab <- (simu1$pheno + simu2$pheno) / sqrt(2)
simu_pheno <- (simu_liab > qnorm(K, lower.tail = FALSE)) + 0L
saveRDS(simu_pheno, pheno_file)
}
}
|
/code/prepare-simu-phenotypes.R
|
no_license
|
privefl/paper-ldpred2
|
R
| false
| false
| 1,768
|
r
|
library(bigsnpr)
ukb <- snp_attach("data/UKBB_imp_HM3.rds")
G <- ukb$genotypes
CHR <- as.integer(ukb$map$chromosome)
POS <- ukb$map$physical.pos
(NCORES <- as.integer(Sys.getenv("SLURM_JOB_CPUS_PER_NODE")) - 1L)
bigassertr::assert_dir("data/pheno-simu")
#### Anywhere on the genome ####
params <- expand.grid(
h2 = 0.4,
M = c(300, 3e3, 30e3, 300e3),
K = 0.15,
iter = 1:10
)
purrr::pwalk(params, function(h2, M, K, iter) {
pheno_file <- sprintf("data/pheno-simu/all_%d_%d_%d_%d.rds",
100 * h2, M, 100 * K, iter)
if (!file.exists(pheno_file)) {
print(pheno_file)
simu <- snp_simuPheno(G, h2, M, K, ncores = NCORES)
saveRDS(simu$pheno, pheno_file)
}
})
#### In HLA ####
ind.HLA <- snp_indLRLDR(CHR, POS, LD.wiki34[12, ])
params2 <- expand.grid(
h2 = 0.3,
M = c(300, 3000),
K = 0.15,
iter = 1:10
)
purrr::pwalk(params2, function(h2, M, K, iter) {
pheno_file <- sprintf("data/pheno-simu/HLA_%d_%d_%d_%d.rds",
100 * h2, M, 100 * K, iter)
if (!file.exists(pheno_file)) {
print(pheno_file)
simu <- snp_simuPheno(G, h2, M, K, ind.possible = ind.HLA, ncores = NCORES)
saveRDS(simu$pheno, pheno_file)
}
})
#### In both ####
h2 <- 0.4
M_HLA <- 300
M_all <- 10e3
K <- 0.15
for (iter in 1:10) {
pheno_file <- sprintf("data/pheno-simu/both_%d_%d_%d.rds",
100 * h2, 100 * K, iter)
if (!file.exists(pheno_file)) {
print(pheno_file)
simu1 <- snp_simuPheno(G, h2, M_HLA, ind.possible = ind.HLA)
simu2 <- snp_simuPheno(G, h2, M_all, ncores = NCORES)
simu_liab <- (simu1$pheno + simu2$pheno) / sqrt(2)
simu_pheno <- (simu_liab > qnorm(K, lower.tail = FALSE)) + 0L
saveRDS(simu_pheno, pheno_file)
}
}
|
##
## Description: Create plot1 for Project 1 of R-class
##
## AUTHOR : Edward J Hopkins
## $DATE : Mon Aug 04 19:25:48 2014 ## date()
## $Revision : 1.00 $
## DEVELOPED : Rstudio, Version 0.98.507 ## packageVersion("rstudio")
## : R version 3.1.0 (2014-04-10) ## R.Version.string
## Copyright : Copyright (c) 2014 E. J. Hopkins
## FILENAME : plot1.R
## Dependencies: data.table (for fread)
## See also:
###############################################################################
## BEGIN CODE
library(data.table)
# Assume data file is in working directory
fname="household_power_consumption.txt" # var for filename
data <- suppressWarnings(fread(fname,
select=c("Date","Global_active_power"))) # Read relevant data
data$Date <- as.Date(data$Date, "%d/%m/%Y") # Fix data formats
# Select dates of 1Feb2007 to 2Feb2007
data3 <- data[Date>="2007/02/01" & Date<="2007/02/02"]
data3$Global_active_power <- as.numeric(data3$Global_active_power)
rm(data) # Remove unneeded data from memory
# Plot histogram, color, and label
defpar <- par() # Save default parameter values
par(cex.lab=0.8) # Reduce font size of x,y labels
par(cex.axis=0.8) # Reduce font size of tick labels
hist(data3$Global_active_power, # histogram call
col="red", # make red columns
main="Global Active Power", # y-axis label text
xlab="Global Active Power (kilowatts)") # x-axis label text
dev.copy(device=png,file="plot1.png",
width=480,height=480,units="px") # Export to png 480x480px
dev.off()
par(defpar) # Restore default parameter values
## END CODE
###############################################################################
# Memory calculations
#MAXmem <- memory.limit()
#CURmem <- memory.size()
#AVLmem <- MAXmem-CURmem
#Nrows <- 2075259
#Ncols <- 9
#Nbyte <- 8
#paste("File Size:",round(zipFileInfo$Length/1E6),"Mb")
#paste("Available Memory:",round(AVLmem),"Mb")
#paste("Estimated file Mem:",round((Nrows*Ncols*Nbyte)/1E6),"Mb")
#system.time(data <- read.csv(zipFileInfo$Name,sep=";"))
#FILE <- list.files(pattern='*.zip')
#zipFileInfo <- unzip(FILE, list=TRUE)
#system.time(DF4 <- read.csv.sql(
# fname, sep=";",sql = 'select * from file where Date = "2/2/2007"'))
#library(ggplot2)
#ggplot(data3,aes(x=Global_active_power)) +
# geom_histogram(fill="red", binwidth=0.5) +
# xlab("Global Active Power (kilowatts)") +
# ylab("Frequency")
#png("plotA.png",width=480,height=480,units="px") # Create file device
## MODIFICATIONS:
## date()
## Description...
## If this code does not work, then I don't know who wrote it.
|
/plot1.R
|
no_license
|
AConundrum/ExData_Plotting1
|
R
| false
| false
| 2,807
|
r
|
##
## Description: Create plot1 for Project 1 of R-class
##
## AUTHOR : Edward J Hopkins
## $DATE : Mon Aug 04 19:25:48 2014 ## date()
## $Revision : 1.00 $
## DEVELOPED : Rstudio, Version 0.98.507 ## packageVersion("rstudio")
## : R version 3.1.0 (2014-04-10) ## R.Version.string
## Copyright : Copyright (c) 2014 E. J. Hopkins
## FILENAME : plot1.R
## Dependencies: data.table (for fread)
## See also:
###############################################################################
## BEGIN CODE
library(data.table)
# Assume data file is in working directory
fname="household_power_consumption.txt" # var for filename
data <- suppressWarnings(fread(fname,
select=c("Date","Global_active_power"))) # Read relevant data
data$Date <- as.Date(data$Date, "%d/%m/%Y") # Fix data formats
# Select dates of 1Feb2007 to 2Feb2007
data3 <- data[Date>="2007/02/01" & Date<="2007/02/02"]
data3$Global_active_power <- as.numeric(data3$Global_active_power)
rm(data) # Remove unneeded data from memory
# Plot histogram, color, and label
defpar <- par() # Save default parameter values
par(cex.lab=0.8) # Reduce font size of x,y labels
par(cex.axis=0.8) # Reduce font size of tick labels
hist(data3$Global_active_power, # histogram call
col="red", # make red columns
main="Global Active Power", # y-axis label text
xlab="Global Active Power (kilowatts)") # x-axis label text
dev.copy(device=png,file="plot1.png",
width=480,height=480,units="px") # Export to png 480x480px
dev.off()
par(defpar) # Restore default parameter values
## END CODE
###############################################################################
# Memory calculations
#MAXmem <- memory.limit()
#CURmem <- memory.size()
#AVLmem <- MAXmem-CURmem
#Nrows <- 2075259
#Ncols <- 9
#Nbyte <- 8
#paste("File Size:",round(zipFileInfo$Length/1E6),"Mb")
#paste("Available Memory:",round(AVLmem),"Mb")
#paste("Estimated file Mem:",round((Nrows*Ncols*Nbyte)/1E6),"Mb")
#system.time(data <- read.csv(zipFileInfo$Name,sep=";"))
#FILE <- list.files(pattern='*.zip')
#zipFileInfo <- unzip(FILE, list=TRUE)
#system.time(DF4 <- read.csv.sql(
# fname, sep=";",sql = 'select * from file where Date = "2/2/2007"'))
#library(ggplot2)
#ggplot(data3,aes(x=Global_active_power)) +
# geom_histogram(fill="red", binwidth=0.5) +
# xlab("Global Active Power (kilowatts)") +
# ylab("Frequency")
#png("plotA.png",width=480,height=480,units="px") # Create file device
## MODIFICATIONS:
## date()
## Description...
## If this code does not work, then I don't know who wrote it.
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/measures_multiclass.R
\name{multiclass.Brier}
\alias{multiclass.Brier}
\title{Multiclass Brier score}
\usage{
multiclass.Brier(probabilities, truth)
}
\arguments{
\item{probabilities}{[numeric] matrix of predicted probabilities with columnnames of the classes}
\item{truth}{vector of true values}
}
\description{
Defined as: (1/n) sum_i sum_j (y_ij - p_ij)^2, where y_ij = 1 if observation i has class j (else 0),
and p_ij is the predicted probability of observation i for class j.
From http://docs.lib.noaa.gov/rescue/mwr/078/mwr-078-01-0001.pdf.
}
\examples{
n = 20
set.seed(122)
truth = as.factor(sample(c(1,2,3), n, replace = TRUE))
probabilities = matrix(runif(60), 20, 3)
probabilities = probabilities/rowSums(probabilities)
colnames(probabilities) = c(1,2,3)
multiclass.Brier(probabilities, truth)
}
|
/man/multiclass.Brier.Rd
|
no_license
|
PhilippPro/measures
|
R
| false
| true
| 888
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/measures_multiclass.R
\name{multiclass.Brier}
\alias{multiclass.Brier}
\title{Multiclass Brier score}
\usage{
multiclass.Brier(probabilities, truth)
}
\arguments{
\item{probabilities}{[numeric] matrix of predicted probabilities with columnnames of the classes}
\item{truth}{vector of true values}
}
\description{
Defined as: (1/n) sum_i sum_j (y_ij - p_ij)^2, where y_ij = 1 if observation i has class j (else 0),
and p_ij is the predicted probability of observation i for class j.
From http://docs.lib.noaa.gov/rescue/mwr/078/mwr-078-01-0001.pdf.
}
\examples{
n = 20
set.seed(122)
truth = as.factor(sample(c(1,2,3), n, replace = TRUE))
probabilities = matrix(runif(60), 20, 3)
probabilities = probabilities/rowSums(probabilities)
colnames(probabilities) = c(1,2,3)
multiclass.Brier(probabilities, truth)
}
|
#!/usr/bin/env Rscript
# Load required modules
library(tidyverse)
library(optparse)
# Define Options
option_list = list(
make_option(c("-p", "--project"),
type="character",
default=NULL,
help="Project Name",
metavar="character"),
make_option(c("-t", "--time_summary"),
type="character",
default=NULL,
help="Project time summary file (produced with report_cpu_usage.py",
metavar="character"),
make_option(c("-a", "--task_summary"),
type="character",
default=NULL,
help="File with project task summary variables",
metavar="character"),
make_option(c("-s", "--study_summary"),
type="character",
default=NULL,
help="File with study summary variables",
metavar="character")
);
opt_parser = OptionParser(option_list=option_list);
opt = parse_args(opt_parser);
# Validate arguments are provided
if (is.null(opt$project)){
print_help(opt_parser)
stop("You must provide a project string (ie. MMRF_1234) to -p/--project", call.=FALSE)
}
if (is.null(opt$time_summary)){
print_help(opt_parser)
stop("You must provide a time summary file to -t/--time_summary", call.=FALSE)
}
if (is.null(opt$task_summary)){
print_help(opt_parser)
stop("You must provide a study task file to -a/--task_summary", call.=FALSE)
}
if (is.null(opt$study_summary)){
print_help(opt_parser)
stop("You must provide a study project summary file to -s/--study_summary", call.=FALSE)
}
##############################################
# Plot out project run time for summary
##############################################
# Import data table
# Generated within project folder using <python3 /home/tgenjetstream/git_repositories/jetstream_resources/reporting_tools/report_cpu_usage.py>
data <- read_delim(opt$time_summary, delim = "\t")
project_name <- opt$project
## Update the tibble table
# Remove "<Task(complete): " and ">" from the TASK column
data <- data %>% mutate_if(is.character, str_replace_all, pattern = '<', replacement = "")
data <- data %>% mutate_if(is.character, str_replace_all, pattern = "[(]", replacement = "")
data <- data %>% mutate_if(is.character, str_replace_all, pattern = '[)]', replacement = "")
data <- data %>% mutate_if(is.character, str_replace_all, pattern = 'Taskcomplete: ', replacement = "")
data <- data %>% mutate_if(is.character, str_replace_all, pattern = '[>]', replacement = "")
# Parse the Elapsed time into hours, minutes, seconds
data <- data %>% separate(Elapsed, into = c("hours", "minutes", "seconds"), sep = ":", convert = TRUE, remove = FALSE)
# Add Summary Columns
data <- data %>% mutate(Group = case_when(str_detect(Task, "^copy_fastqs") ~ "Copy_Fastq",
str_detect(Task, "^split_fastq") ~ "Split_Fastq",
str_detect(Task, "^chunked_bwa_mem_samtools_fixmate") ~ "BWA_Align",
str_detect(Task, "^chunked_samtools_merge_rg_bams") ~ "Samtools_Merge",
str_detect(Task, "^samtools_markdup") ~ "Samtools_MarkDup",
str_detect(Task, "^bam_to_cram") ~ "Samtools_BamCram",
str_detect(Task, "^gatk_collectwgsmetrics") ~ "Picard_Metric",
str_detect(Task, "^gatk_collectwgsmetricswithnonzerocoverage") ~ "Picard_Metric",
str_detect(Task, "^gatk_collectrawwgsmetrics") ~ "Picard_Metric",
str_detect(Task, "^gatk_collectmultiplemetrics") ~ "Picard_Metric",
str_detect(Task, "^gatk_convertsequencingarrtifacttooxog") ~ "Picard_Metric",
str_detect(Task, "^gatk_collecthsmetrics") ~ "Picard_Metric",
str_detect(Task, "^gatk_collectrnaseqmetrics") ~ "Picard_Metric",
str_detect(Task, "^samtools_stats") ~ "Samtools_Metric",
str_detect(Task, "^samtools_flagstat") ~ "Samtools_Metric",
str_detect(Task, "^samtools_idxstats") ~ "Samtools_Metric",
str_detect(Task, "^verifybamid2") ~ "Random_Stat",
str_detect(Task, "^freebayes_sex_check") ~ "Random_Stat",
str_detect(Task, "^snpsniffer_geno") ~ "Random_Stat",
str_detect(Task, "^hmmcopy_make_wig_bwa") ~ "iChor_CNA",
str_detect(Task, "^ichor_cna_bwa") ~ "iChor_CNA",
str_detect(Task, "^haplotypecaller_gvcf") ~ "HaplotypeCaller",
str_detect(Task, "^haplotypecaller_gvcf_merge") ~ "HaplotypeCaller",
str_detect(Task, "^manta") ~ "Manta_Strelka",
str_detect(Task, "^strelka2_filter_variants") ~ "Variant_Filter",
str_detect(Task, "^strelka2") ~ "Manta_Strelka",
str_detect(Task, "^deepvariant_make_examples") ~ "Deepvariant",
str_detect(Task, "^deepvariant_call_variants") ~ "Deepvariant",
str_detect(Task, "^deepvariant_postprocess_variants") ~ "Deepvariant",
str_detect(Task, "^deepvariant_filter_variants") ~ "Deepvariant",
str_detect(Task, "^lancet_merge_chunks") ~ "Variant_Merge",
str_detect(Task, "^lancet_filter_variants") ~ "Variant_Filter",
str_detect(Task, "^lancet") ~ "Lancet",
str_detect(Task, "^octopus_merge_chunks") ~ "Variant_Merge",
str_detect(Task, "^octopus_filter_variants") ~ "Variant_Filter",
str_detect(Task, "^octopus") ~ "Octopus",
str_detect(Task, "^vardict_merge_chunks") ~ "Variant_Merge",
str_detect(Task, "^vardict_filter_variants") ~ "Variant_Filter",
str_detect(Task, "^vardict") ~ "VarDictJava",
str_detect(Task, "^mutect2_merge_chunks") ~ "Variant_Merge",
str_detect(Task, "^mutect2_filter_variants") ~ "Variant_Filter",
str_detect(Task, "^mutect2_filter_calls") ~ "Mutect",
str_detect(Task, "^mutect2_calculate_contamination") ~ "Mutect",
str_detect(Task, "^mutect2_merge_pileup_summaries") ~ "Mutect",
str_detect(Task, "^mutect2_learn_readorientationmodel") ~ "Mutect",
str_detect(Task, "^mutect2_merge_stats") ~ "Mutect",
str_detect(Task, "^mutect2_merge_pileup_summaries") ~ "Mutect",
str_detect(Task, "^mutect2_GetPileupSummaries") ~ "Mutect",
str_detect(Task, "^mutect2") ~ "Mutect",
str_detect(Task, "^vcfmerger2") ~ "VCFmerger",
str_detect(Task, "^bcftools_annotate") ~ "Annotation",
str_detect(Task, "^snpeff") ~ "Annotation",
str_detect(Task, "^vep") ~ "Annotation",
str_detect(Task, "^bcftools_annotate") ~ "Annotation",
str_detect(Task, "^bcftools_annotate") ~ "Annotation",
str_detect(Task, "^delly") ~ "Delly",
str_detect(Task, "^gatk_call_cnv") ~ "GATK_CNV",
str_detect(Task, "^add_matched_rna") ~ "RNA_Steps",
str_detect(Task, "^add_rna_header_to_vcf") ~ "RNA_Steps",
str_detect(Task, "^salmon_quant_cdna") ~ "RNA_Steps",
str_detect(Task, "^star_quant") ~ "RNA_Steps",
str_detect(Task, "^star_fusion") ~ "RNA_Steps",
str_detect(Task, "^fixmate_sort_star") ~ "RNA_Steps",
str_detect(Task, "^markduplicates_star_gatk") ~ "RNA_Steps",
str_detect(Task, "^rna_getBTcellLociCounts") ~ "RNA_Steps",
TRUE ~ "Misc"
)
)
# Plot
ggplot(data, aes(x=Group, y=Elapsed, color=as.factor(CPUs))) +
geom_jitter() +
scale_color_discrete() +
coord_flip()
ggsave(file=paste(project_name, "_ElapsedTime_by_Task_per_Group.png", sep=""), dpi=150)
ggplot(data, aes(x=Group, y=Hours)) +
geom_jitter() +
coord_flip()
ggsave(file=paste(project_name, "_CPUhours_by_Task_per_Group.png", sep=""), dpi=150)
# Group and summarize to get realtime and CPU hours by task Group
task_summary <- data %>%
group_by(Group) %>%
summarise(Tasks = n(),
Total_CPU_Hours = sum(Hours),
Max_Task_CPU_Hours = max(Hours),
Total_Elapsed_Hours = as.double(sum(Elapsed)/3600),
Max_Task_Elapsed_Hours = max(Elapsed)
) %>%
mutate(PCT_CPU_Hours = Total_CPU_Hours/sum(Total_CPU_Hours)) %>%
mutate(PCT_Elapsed_Hours = Total_Elapsed_Hours/sum(Total_Elapsed_Hours))
# Add column with project
task_summary <- task_summary %>%
add_column(Project = project_name, .before = "Group")
# Plot Summary Data
ggplot(task_summary, aes(x=Group, y=Total_Elapsed_Hours)) +
geom_bar(stat="identity") +
coord_flip()
ggsave(file=paste(project_name, "_ElapsedHours_by_TaskGroup.png", sep=""), dpi=150)
ggplot(task_summary, aes(x=Group, y=Total_CPU_Hours)) +
geom_bar(stat="identity") +
coord_flip()
ggsave(file=paste(project_name, "_CPUhours_by_TaskGroup.png", sep=""), dpi=150)
# Generate Project Summary
project_summary <- task_summary %>%
group_by(Project) %>%
summarise(Tasks = sum(Tasks),
Total_CPU_Hours = sum(Total_CPU_Hours),
Total_Elapsed_Hours = (sum(Total_Elapsed_Hours)),
)
##############################################
# Group with other projects for Study Summary
##############################################
# Read in project summary files
study_task_summary <- read_delim(opt$task_summary, delim = "\t", col_types = "ccidddtdd")
study_project_summary <- read_delim(opt$study_summary, delim = "\t", col_types = "cidd")
# Concatonate into Project summary files
study_task_summary <- bind_rows(study_task_summary, task_summary)
study_project_summary <- bind_rows(study_project_summary, project_summary)
# Write out Project summary file
write_tsv(study_task_summary, "study_task_summary.txt")
write_tsv(study_project_summary, "study_project_summary.txt")
|
/reporting_tools/summarize_project_runtime.R
|
permissive
|
kdrenner/jetstream_resources
|
R
| false
| false
| 11,703
|
r
|
#!/usr/bin/env Rscript
# Load required modules
library(tidyverse)
library(optparse)
# Define Options
option_list = list(
make_option(c("-p", "--project"),
type="character",
default=NULL,
help="Project Name",
metavar="character"),
make_option(c("-t", "--time_summary"),
type="character",
default=NULL,
help="Project time summary file (produced with report_cpu_usage.py",
metavar="character"),
make_option(c("-a", "--task_summary"),
type="character",
default=NULL,
help="File with project task summary variables",
metavar="character"),
make_option(c("-s", "--study_summary"),
type="character",
default=NULL,
help="File with study summary variables",
metavar="character")
);
opt_parser = OptionParser(option_list=option_list);
opt = parse_args(opt_parser);
# Validate arguments are provided
if (is.null(opt$project)){
print_help(opt_parser)
stop("You must provide a project string (ie. MMRF_1234) to -p/--project", call.=FALSE)
}
if (is.null(opt$time_summary)){
print_help(opt_parser)
stop("You must provide a time summary file to -t/--time_summary", call.=FALSE)
}
if (is.null(opt$task_summary)){
print_help(opt_parser)
stop("You must provide a study task file to -a/--task_summary", call.=FALSE)
}
if (is.null(opt$study_summary)){
print_help(opt_parser)
stop("You must provide a study project summary file to -s/--study_summary", call.=FALSE)
}
##############################################
# Plot out project run time for summary
##############################################
# Import data table
# Generated within project folder using <python3 /home/tgenjetstream/git_repositories/jetstream_resources/reporting_tools/report_cpu_usage.py>
data <- read_delim(opt$time_summary, delim = "\t")
project_name <- opt$project
## Update the tibble table
# Remove "<Task(complete): " and ">" from the TASK column
data <- data %>% mutate_if(is.character, str_replace_all, pattern = '<', replacement = "")
data <- data %>% mutate_if(is.character, str_replace_all, pattern = "[(]", replacement = "")
data <- data %>% mutate_if(is.character, str_replace_all, pattern = '[)]', replacement = "")
data <- data %>% mutate_if(is.character, str_replace_all, pattern = 'Taskcomplete: ', replacement = "")
data <- data %>% mutate_if(is.character, str_replace_all, pattern = '[>]', replacement = "")
# Parse the Elapsed time into hours, minutes, seconds
data <- data %>% separate(Elapsed, into = c("hours", "minutes", "seconds"), sep = ":", convert = TRUE, remove = FALSE)
# Add Summary Columns
data <- data %>% mutate(Group = case_when(str_detect(Task, "^copy_fastqs") ~ "Copy_Fastq",
str_detect(Task, "^split_fastq") ~ "Split_Fastq",
str_detect(Task, "^chunked_bwa_mem_samtools_fixmate") ~ "BWA_Align",
str_detect(Task, "^chunked_samtools_merge_rg_bams") ~ "Samtools_Merge",
str_detect(Task, "^samtools_markdup") ~ "Samtools_MarkDup",
str_detect(Task, "^bam_to_cram") ~ "Samtools_BamCram",
str_detect(Task, "^gatk_collectwgsmetrics") ~ "Picard_Metric",
str_detect(Task, "^gatk_collectwgsmetricswithnonzerocoverage") ~ "Picard_Metric",
str_detect(Task, "^gatk_collectrawwgsmetrics") ~ "Picard_Metric",
str_detect(Task, "^gatk_collectmultiplemetrics") ~ "Picard_Metric",
str_detect(Task, "^gatk_convertsequencingarrtifacttooxog") ~ "Picard_Metric",
str_detect(Task, "^gatk_collecthsmetrics") ~ "Picard_Metric",
str_detect(Task, "^gatk_collectrnaseqmetrics") ~ "Picard_Metric",
str_detect(Task, "^samtools_stats") ~ "Samtools_Metric",
str_detect(Task, "^samtools_flagstat") ~ "Samtools_Metric",
str_detect(Task, "^samtools_idxstats") ~ "Samtools_Metric",
str_detect(Task, "^verifybamid2") ~ "Random_Stat",
str_detect(Task, "^freebayes_sex_check") ~ "Random_Stat",
str_detect(Task, "^snpsniffer_geno") ~ "Random_Stat",
str_detect(Task, "^hmmcopy_make_wig_bwa") ~ "iChor_CNA",
str_detect(Task, "^ichor_cna_bwa") ~ "iChor_CNA",
str_detect(Task, "^haplotypecaller_gvcf") ~ "HaplotypeCaller",
str_detect(Task, "^haplotypecaller_gvcf_merge") ~ "HaplotypeCaller",
str_detect(Task, "^manta") ~ "Manta_Strelka",
str_detect(Task, "^strelka2_filter_variants") ~ "Variant_Filter",
str_detect(Task, "^strelka2") ~ "Manta_Strelka",
str_detect(Task, "^deepvariant_make_examples") ~ "Deepvariant",
str_detect(Task, "^deepvariant_call_variants") ~ "Deepvariant",
str_detect(Task, "^deepvariant_postprocess_variants") ~ "Deepvariant",
str_detect(Task, "^deepvariant_filter_variants") ~ "Deepvariant",
str_detect(Task, "^lancet_merge_chunks") ~ "Variant_Merge",
str_detect(Task, "^lancet_filter_variants") ~ "Variant_Filter",
str_detect(Task, "^lancet") ~ "Lancet",
str_detect(Task, "^octopus_merge_chunks") ~ "Variant_Merge",
str_detect(Task, "^octopus_filter_variants") ~ "Variant_Filter",
str_detect(Task, "^octopus") ~ "Octopus",
str_detect(Task, "^vardict_merge_chunks") ~ "Variant_Merge",
str_detect(Task, "^vardict_filter_variants") ~ "Variant_Filter",
str_detect(Task, "^vardict") ~ "VarDictJava",
str_detect(Task, "^mutect2_merge_chunks") ~ "Variant_Merge",
str_detect(Task, "^mutect2_filter_variants") ~ "Variant_Filter",
str_detect(Task, "^mutect2_filter_calls") ~ "Mutect",
str_detect(Task, "^mutect2_calculate_contamination") ~ "Mutect",
str_detect(Task, "^mutect2_merge_pileup_summaries") ~ "Mutect",
str_detect(Task, "^mutect2_learn_readorientationmodel") ~ "Mutect",
str_detect(Task, "^mutect2_merge_stats") ~ "Mutect",
str_detect(Task, "^mutect2_merge_pileup_summaries") ~ "Mutect",
str_detect(Task, "^mutect2_GetPileupSummaries") ~ "Mutect",
str_detect(Task, "^mutect2") ~ "Mutect",
str_detect(Task, "^vcfmerger2") ~ "VCFmerger",
str_detect(Task, "^bcftools_annotate") ~ "Annotation",
str_detect(Task, "^snpeff") ~ "Annotation",
str_detect(Task, "^vep") ~ "Annotation",
str_detect(Task, "^bcftools_annotate") ~ "Annotation",
str_detect(Task, "^bcftools_annotate") ~ "Annotation",
str_detect(Task, "^delly") ~ "Delly",
str_detect(Task, "^gatk_call_cnv") ~ "GATK_CNV",
str_detect(Task, "^add_matched_rna") ~ "RNA_Steps",
str_detect(Task, "^add_rna_header_to_vcf") ~ "RNA_Steps",
str_detect(Task, "^salmon_quant_cdna") ~ "RNA_Steps",
str_detect(Task, "^star_quant") ~ "RNA_Steps",
str_detect(Task, "^star_fusion") ~ "RNA_Steps",
str_detect(Task, "^fixmate_sort_star") ~ "RNA_Steps",
str_detect(Task, "^markduplicates_star_gatk") ~ "RNA_Steps",
str_detect(Task, "^rna_getBTcellLociCounts") ~ "RNA_Steps",
TRUE ~ "Misc"
)
)
# Plot
ggplot(data, aes(x=Group, y=Elapsed, color=as.factor(CPUs))) +
geom_jitter() +
scale_color_discrete() +
coord_flip()
ggsave(file=paste(project_name, "_ElapsedTime_by_Task_per_Group.png", sep=""), dpi=150)
ggplot(data, aes(x=Group, y=Hours)) +
geom_jitter() +
coord_flip()
ggsave(file=paste(project_name, "_CPUhours_by_Task_per_Group.png", sep=""), dpi=150)
# Group and summarize to get realtime and CPU hours by task Group
task_summary <- data %>%
group_by(Group) %>%
summarise(Tasks = n(),
Total_CPU_Hours = sum(Hours),
Max_Task_CPU_Hours = max(Hours),
Total_Elapsed_Hours = as.double(sum(Elapsed)/3600),
Max_Task_Elapsed_Hours = max(Elapsed)
) %>%
mutate(PCT_CPU_Hours = Total_CPU_Hours/sum(Total_CPU_Hours)) %>%
mutate(PCT_Elapsed_Hours = Total_Elapsed_Hours/sum(Total_Elapsed_Hours))
# Add column with project
task_summary <- task_summary %>%
add_column(Project = project_name, .before = "Group")
# Plot Summary Data
ggplot(task_summary, aes(x=Group, y=Total_Elapsed_Hours)) +
geom_bar(stat="identity") +
coord_flip()
ggsave(file=paste(project_name, "_ElapsedHours_by_TaskGroup.png", sep=""), dpi=150)
ggplot(task_summary, aes(x=Group, y=Total_CPU_Hours)) +
geom_bar(stat="identity") +
coord_flip()
ggsave(file=paste(project_name, "_CPUhours_by_TaskGroup.png", sep=""), dpi=150)
# Generate Project Summary
project_summary <- task_summary %>%
group_by(Project) %>%
summarise(Tasks = sum(Tasks),
Total_CPU_Hours = sum(Total_CPU_Hours),
Total_Elapsed_Hours = (sum(Total_Elapsed_Hours)),
)
##############################################
# Group with other projects for Study Summary
##############################################
# Read in project summary files
study_task_summary <- read_delim(opt$task_summary, delim = "\t", col_types = "ccidddtdd")
study_project_summary <- read_delim(opt$study_summary, delim = "\t", col_types = "cidd")
# Concatonate into Project summary files
study_task_summary <- bind_rows(study_task_summary, task_summary)
study_project_summary <- bind_rows(study_project_summary, project_summary)
# Write out Project summary file
write_tsv(study_task_summary, "study_task_summary.txt")
write_tsv(study_project_summary, "study_project_summary.txt")
|
############################################################
# Course: Modern econometric and statistical learning
# methods forquantitative asset management
#
# Instructor: Prof. Dr. Marc Paolella,
# Urban Ulrych,
# Simon Hediger,
# University of Zurich
#
# Author: Rino Beeli
#
# Date: May 25th, 2020
#
# Topic: Homework 7 - Exercise 2.
###########################################################
library(rpart) # regression trees
library(rpart.plot)
library(ranger) # random forests
library(MASS) # simulate Multivariate Normal distribution
library(glmnet) # LASSO regression
library(foreach) # parallel processing
library(doParallel) # parallel processing
rm(list=ls())
set.seed(42)
# -------------
# functions
# -------------
sim.mv.AR2 <- function(N, d, rho) {
# Simulates N observations of a multivariate stationary AR(2)
# process given by:
# X_{t,j} = 0.3 X_{t-1,j} - 0.4 X_{t-2,j} + e_{t,j}
# with
# j=1,...,d
# e_{t,j} ~ N(0, Sigma)
# X_{0,j} = X_{1,j} = 0
# Sigma_{i,i} = 1
# Sigma_{i,j} = rho for j != i
# -1/(d-1) < rho < 1
# ensure positive definite
stopifnot(-1/(d-1) < rho)
stopifnot(rho < 1)
# create equi-correlated matrix
Sigma <- matrix(rho, d, d)
diag(Sigma) <- 1
# create matrix
X <- matrix(0, ncol=d, nrow=N)
for (t in 3:N) {
e_t <- mvrnorm(1, mu=rep(0,d), Sigma=Sigma)
X[t,] <- 0.3*X[t-1,] - 0.4*X[t-2,] + e_t
}
return(X)
}
dgp <- function(X) {
return(-sin(2*X[1]) + (X[2]^2 - (25/12)) + X[3] + (exp(-X[4]) - (2/5)*sinh(5/2)))
}
make.D <- function(X, d) {
# Creates the data matrix D as specified
# in exercise 2. First column represents
# the response variable Y_t.
u <- rnorm(nrow(X), mean=0, sd=1)
Y <- matrix(0, nrow=nrow(X), ncol=1)
for (t in 1:nrow(X)) {
Y[t] <- dgp(X[t,]) + u[t]
}
D <- cbind(Y, X)
return(D)
}
run.sim <- function(S, N, d, rho, mtry) {
# prediction errors vectors
err.rnd <- rep(0, S)
err.dgp <- rep(0, S)
err.ar <- rep(0, S)
err.rtree <- rep(0, S)
err.rtree.pruned <- rep(0, S)
err.rforest <- rep(0, S)
err.rforest.oob <- rep(0, S)
err.ols <- rep(0, S)
err.lasso <- rep(0, S)
# e) keep track if first four variables have been selected
# by the variable importance measure of the random forest
rforest.var.imp.sel <- matrix(0, nrow=S, ncol=4)
for (s in 1:S) {
cat(sprintf('Simulation %i of %i \n', s, S))
# simulate multivariate AR(2) process
X <- sim.mv.AR2(N+1, d, rho)
# data matrix D
D <- as.data.frame(make.D(X, d))
colnames(D) <- c('Y', paste0('X', 1:d))
# split into training (n=500) and test data (n=1)
D.train <- D[-nrow(D), ]
D.test <- D[nrow(D), ]
# squared test error function
test.serr <- function(pred) (D.test[1,'Y'] - pred)^2
# ---------------- AR(2) benchmark ----------------
ar.fit <- ar(D.train[,1], aic=F, method='mle', order.max=2)
ar.pred <- predict(ar.fit, newdata=tail(D.train[,1], n=2), n.ahead=1)$pred[1]
err.ar[s] <- test.serr(ar.pred)
# ---------- Gaussian random prediction -----------
err.rnd[s] <- test.serr(rnorm(1, mean=mean(X), sd=sd(X)))
# --------- True data generating process ----------
dgp.pred <- dgp(unlist(D.test[,2:ncol(D.test)]))
err.dgp[s] <- test.serr(dgp.pred)
# ---------------- regression tree ----------------
rtree.fit <- rpart(Y ~ ., data=D.train, method='anova', control=rpart.control(cp=0))
# b) prune the tree based on minimum cost complexity criterion
rtree.fit.pruned <- prune(rtree.fit, cp=rtree.fit$cptable[which.min(rtree.fit$cptable[,'xerror']),'CP'])
if (s == 1) {
# plot regression trees
prp(rtree.fit, type=2, extra=1)
prp(rtree.fit.pruned, type=2, extra=1)
}
# uses last row of D for test prediction
rtree.pred <- predict(rtree.fit, newdata=D.test)
rtree.pruned.pred <- predict(rtree.fit.pruned, newdata=D.test)
err.rtree[s] <- test.serr(rtree.pred)
err.rtree.pruned[s] <- test.serr(rtree.pruned.pred)
# ----------------- random forest -----------------
rforest.fit <- ranger(Y ~ ., data=D.train, mtry=mtry, num.trees=100, importance='impurity')
rforest.pred <- predict(rforest.fit, data=D.test)$predictions[1]
err.rforest[s] <- test.serr(rforest.pred)
# c) Out-of-bag error
err.rforest.oob[s] <- rforest.fit$prediction.error
# e) selected variables by variable importance measure
rforest.var.imp.sel[s,] <- names(sort(rforest.fit$variable.importance, decreasing=T))[1:4] %in% names(D)[2:5]
# --------------- OLS regression ----------------
ols.fit <- lm(Y ~ -1 + ., data=D.train)
ols.pred <- predict(ols.fit, newdata=D.test[2:ncol(D)])
err.ols[s] <- test.serr(ols.pred)
if (s == 1) {
print(summary(ols.fit))
}
# -------------- LASSO regression ---------------
# (not part of exercise)
lasso.fit <- cv.glmnet(as.matrix(D.train[, 2:ncol(D)]), as.matrix(D.train[,1]), alpha=1, intercept=F)
lasso.pred <- predict(lasso.fit, newx=as.matrix(D.test[2:ncol(D)]), s='lambda.1se')
err.lasso[s] <- test.serr(lasso.pred)
}
# print mean squared errors
out <- sprintf('
rho=%.1f
MSE Gaussian random: %.3f
MSE AR(2) fitted: %.3f
MSE DGP: %.3f
MSE regression tree: %.3f
MSE regression tree pruned: %.3f
MSE random forest: %.3f (OOB=%.3f, %.0f%% of times greater than MSE)
MSE OLS: %.3f
MSE LASSO: %.3f
', rho, mean(err.rnd), mean(err.ar), mean(err.dgp),
mean(err.rtree), mean(err.rtree.pruned),
mean(err.rforest), mean(err.rforest.oob),
100*mean(err.rforest.oob > err.rforest),
mean(err.ols), mean(err.lasso))
cat(out)
# save to txt file
hdl <- file(sprintf('output/2_MSE_rho=%.1f_mtry=%i_d=%i.txt', rho, mtry, d))
writeLines(c(out), hdl)
close(hdl)
# barplot of MSE by method
pdf(file=sprintf('output/2_MSE_rho=%.1f_mtry=%i_d=%i.pdf', rho, mtry, d), width=8, height=8)
err.df <- data.frame(Method=c(
'Gaussian random', 'DGP', 'AR(2) fitted', 'Regression tree',
'Regression tree pruned', 'Random forest', 'OLS', 'LASSO'),
MSE=c(mean(err.rnd), mean(err.dgp), mean(err.ar), mean(err.rtree),
mean(err.rtree.pruned), mean(err.rforest),
mean(err.ols), mean(err.lasso)))
err.df <- err.df[order(err.df$MSE), ] # sort by MSE
par(mfrow=c(1,1), mar=c(5, 10, 5, 2))
b <- barplot(err.df$MSE, names.arg=err.df$Method,
main=sprintf('MSE test prediction error (rho=%.1f)', rho),
horiz=T, xlab='MSE',
xlim=c(0, max(err.df$MSE)*1.4), las=2)
text(x=err.df$MSE + 0.1*max(err.df$MSE), y=b, label=format(err.df$MSE, digits=2), cex=0.75)
dev.off()
# e) show how many times variable of lag 1-4 have been selected
# by the variable importance measure of the random forest
out <- sprintf('
rho=%.1f
Selected lag 1,2,3,4: %.0f%%
Selected lag=1: %.0f%%
Selected lag=2: %.0f%%
Selected lag=3: %.0f%%
Selected lag=4: %.0f%%
Overall: %.0f%%
', rho,
100*mean(rowSums(rforest.var.imp.sel) == 4),
100*mean(rforest.var.imp.sel[,1]),
100*mean(rforest.var.imp.sel[,2]),
100*mean(rforest.var.imp.sel[,3]),
100*mean(rforest.var.imp.sel[,4]),
100*mean(rforest.var.imp.sel))
cat(out)
# save to txt file
hdl <- file(sprintf('output/2_var_imp_rho=%.1f_mtry=%i_d=%i.txt', rho, mtry, d))
writeLines(c(out), hdl)
close(hdl)
}
# ------------------------------------------------------------------------------------------
# verify multivariate AR(2) simulation function by plotting PACFs
pdf(file='output/2_PACF.pdf', width=8, height=8)
par(mfrow=c(2,2), mar=c(5, 5, 4, 1))
ts <- sim.mv.AR2(5000, 4, 0.45)
for (i in 1:4) {
pacf(ts[,i], lag.max=10, main='PACF simulated AR(2) with N=5000')
}
dev.off()
# print correlation matrix
cor(ts[,1:4])
# -------------
# simulations
# -------------
# parameters
S <- 100 # number of simulations
N <- 200 # T training samples from AR(2) process
d <- 150 # number of predictor columns in D
# parameters matrix
params <- matrix(0, nrow=10, ncol=2)
# increasing rho
params[1,] = c(0.0, floor(sqrt(d)))
params[2,] = c(0.1, floor(sqrt(d)))
params[3,] = c(0.5, floor(sqrt(d)))
params[4,] = c(0.9, floor(sqrt(d)))
# increasing mtry
params[5,] = c(0.1, floor(sqrt(d)))
params[6,] = c(0.1, 50)
params[7,] = c(0.1, 120)
params[8,] = c(0.9, floor(sqrt(d)))
params[9,] = c(0.9, 50)
params[10,] = c(0.9, 120)
# create parallel computation processes
cl <- makeCluster(detectCores())
registerDoParallel(cl)
# run simulations using different rho and mtry
# values, see params matrix
pkgs <- c('rpart', 'rpart.plot', 'ranger', 'MASS', 'glmnet')
foreach(i=1:nrow(params), .packages=pkgs) %dopar% {
run.sim(S, N, d, params[i,1], params[i,2])
}
# dispose parallel computation processes
stopCluster(cl)
|
/hw_7/hw_7_ex_2.R
|
no_license
|
rbeeli/course_modern_econometrics_2020
|
R
| false
| false
| 9,284
|
r
|
############################################################
# Course: Modern econometric and statistical learning
# methods forquantitative asset management
#
# Instructor: Prof. Dr. Marc Paolella,
# Urban Ulrych,
# Simon Hediger,
# University of Zurich
#
# Author: Rino Beeli
#
# Date: May 25th, 2020
#
# Topic: Homework 7 - Exercise 2.
###########################################################
library(rpart) # regression trees
library(rpart.plot)
library(ranger) # random forests
library(MASS) # simulate Multivariate Normal distribution
library(glmnet) # LASSO regression
library(foreach) # parallel processing
library(doParallel) # parallel processing
rm(list=ls())
set.seed(42)
# -------------
# functions
# -------------
sim.mv.AR2 <- function(N, d, rho) {
# Simulates N observations of a multivariate stationary AR(2)
# process given by:
# X_{t,j} = 0.3 X_{t-1,j} - 0.4 X_{t-2,j} + e_{t,j}
# with
# j=1,...,d
# e_{t,j} ~ N(0, Sigma)
# X_{0,j} = X_{1,j} = 0
# Sigma_{i,i} = 1
# Sigma_{i,j} = rho for j != i
# -1/(d-1) < rho < 1
# ensure positive definite
stopifnot(-1/(d-1) < rho)
stopifnot(rho < 1)
# create equi-correlated matrix
Sigma <- matrix(rho, d, d)
diag(Sigma) <- 1
# create matrix
X <- matrix(0, ncol=d, nrow=N)
for (t in 3:N) {
e_t <- mvrnorm(1, mu=rep(0,d), Sigma=Sigma)
X[t,] <- 0.3*X[t-1,] - 0.4*X[t-2,] + e_t
}
return(X)
}
dgp <- function(X) {
return(-sin(2*X[1]) + (X[2]^2 - (25/12)) + X[3] + (exp(-X[4]) - (2/5)*sinh(5/2)))
}
make.D <- function(X, d) {
# Creates the data matrix D as specified
# in exercise 2. First column represents
# the response variable Y_t.
u <- rnorm(nrow(X), mean=0, sd=1)
Y <- matrix(0, nrow=nrow(X), ncol=1)
for (t in 1:nrow(X)) {
Y[t] <- dgp(X[t,]) + u[t]
}
D <- cbind(Y, X)
return(D)
}
run.sim <- function(S, N, d, rho, mtry) {
# prediction errors vectors
err.rnd <- rep(0, S)
err.dgp <- rep(0, S)
err.ar <- rep(0, S)
err.rtree <- rep(0, S)
err.rtree.pruned <- rep(0, S)
err.rforest <- rep(0, S)
err.rforest.oob <- rep(0, S)
err.ols <- rep(0, S)
err.lasso <- rep(0, S)
# e) keep track if first four variables have been selected
# by the variable importance measure of the random forest
rforest.var.imp.sel <- matrix(0, nrow=S, ncol=4)
for (s in 1:S) {
cat(sprintf('Simulation %i of %i \n', s, S))
# simulate multivariate AR(2) process
X <- sim.mv.AR2(N+1, d, rho)
# data matrix D
D <- as.data.frame(make.D(X, d))
colnames(D) <- c('Y', paste0('X', 1:d))
# split into training (n=500) and test data (n=1)
D.train <- D[-nrow(D), ]
D.test <- D[nrow(D), ]
# squared test error function
test.serr <- function(pred) (D.test[1,'Y'] - pred)^2
# ---------------- AR(2) benchmark ----------------
ar.fit <- ar(D.train[,1], aic=F, method='mle', order.max=2)
ar.pred <- predict(ar.fit, newdata=tail(D.train[,1], n=2), n.ahead=1)$pred[1]
err.ar[s] <- test.serr(ar.pred)
# ---------- Gaussian random prediction -----------
err.rnd[s] <- test.serr(rnorm(1, mean=mean(X), sd=sd(X)))
# --------- True data generating process ----------
dgp.pred <- dgp(unlist(D.test[,2:ncol(D.test)]))
err.dgp[s] <- test.serr(dgp.pred)
# ---------------- regression tree ----------------
rtree.fit <- rpart(Y ~ ., data=D.train, method='anova', control=rpart.control(cp=0))
# b) prune the tree based on minimum cost complexity criterion
rtree.fit.pruned <- prune(rtree.fit, cp=rtree.fit$cptable[which.min(rtree.fit$cptable[,'xerror']),'CP'])
if (s == 1) {
# plot regression trees
prp(rtree.fit, type=2, extra=1)
prp(rtree.fit.pruned, type=2, extra=1)
}
# uses last row of D for test prediction
rtree.pred <- predict(rtree.fit, newdata=D.test)
rtree.pruned.pred <- predict(rtree.fit.pruned, newdata=D.test)
err.rtree[s] <- test.serr(rtree.pred)
err.rtree.pruned[s] <- test.serr(rtree.pruned.pred)
# ----------------- random forest -----------------
rforest.fit <- ranger(Y ~ ., data=D.train, mtry=mtry, num.trees=100, importance='impurity')
rforest.pred <- predict(rforest.fit, data=D.test)$predictions[1]
err.rforest[s] <- test.serr(rforest.pred)
# c) Out-of-bag error
err.rforest.oob[s] <- rforest.fit$prediction.error
# e) selected variables by variable importance measure
rforest.var.imp.sel[s,] <- names(sort(rforest.fit$variable.importance, decreasing=T))[1:4] %in% names(D)[2:5]
# --------------- OLS regression ----------------
ols.fit <- lm(Y ~ -1 + ., data=D.train)
ols.pred <- predict(ols.fit, newdata=D.test[2:ncol(D)])
err.ols[s] <- test.serr(ols.pred)
if (s == 1) {
print(summary(ols.fit))
}
# -------------- LASSO regression ---------------
# (not part of exercise)
lasso.fit <- cv.glmnet(as.matrix(D.train[, 2:ncol(D)]), as.matrix(D.train[,1]), alpha=1, intercept=F)
lasso.pred <- predict(lasso.fit, newx=as.matrix(D.test[2:ncol(D)]), s='lambda.1se')
err.lasso[s] <- test.serr(lasso.pred)
}
# print mean squared errors
out <- sprintf('
rho=%.1f
MSE Gaussian random: %.3f
MSE AR(2) fitted: %.3f
MSE DGP: %.3f
MSE regression tree: %.3f
MSE regression tree pruned: %.3f
MSE random forest: %.3f (OOB=%.3f, %.0f%% of times greater than MSE)
MSE OLS: %.3f
MSE LASSO: %.3f
', rho, mean(err.rnd), mean(err.ar), mean(err.dgp),
mean(err.rtree), mean(err.rtree.pruned),
mean(err.rforest), mean(err.rforest.oob),
100*mean(err.rforest.oob > err.rforest),
mean(err.ols), mean(err.lasso))
cat(out)
# save to txt file
hdl <- file(sprintf('output/2_MSE_rho=%.1f_mtry=%i_d=%i.txt', rho, mtry, d))
writeLines(c(out), hdl)
close(hdl)
# barplot of MSE by method
pdf(file=sprintf('output/2_MSE_rho=%.1f_mtry=%i_d=%i.pdf', rho, mtry, d), width=8, height=8)
err.df <- data.frame(Method=c(
'Gaussian random', 'DGP', 'AR(2) fitted', 'Regression tree',
'Regression tree pruned', 'Random forest', 'OLS', 'LASSO'),
MSE=c(mean(err.rnd), mean(err.dgp), mean(err.ar), mean(err.rtree),
mean(err.rtree.pruned), mean(err.rforest),
mean(err.ols), mean(err.lasso)))
err.df <- err.df[order(err.df$MSE), ] # sort by MSE
par(mfrow=c(1,1), mar=c(5, 10, 5, 2))
b <- barplot(err.df$MSE, names.arg=err.df$Method,
main=sprintf('MSE test prediction error (rho=%.1f)', rho),
horiz=T, xlab='MSE',
xlim=c(0, max(err.df$MSE)*1.4), las=2)
text(x=err.df$MSE + 0.1*max(err.df$MSE), y=b, label=format(err.df$MSE, digits=2), cex=0.75)
dev.off()
# e) show how many times variable of lag 1-4 have been selected
# by the variable importance measure of the random forest
out <- sprintf('
rho=%.1f
Selected lag 1,2,3,4: %.0f%%
Selected lag=1: %.0f%%
Selected lag=2: %.0f%%
Selected lag=3: %.0f%%
Selected lag=4: %.0f%%
Overall: %.0f%%
', rho,
100*mean(rowSums(rforest.var.imp.sel) == 4),
100*mean(rforest.var.imp.sel[,1]),
100*mean(rforest.var.imp.sel[,2]),
100*mean(rforest.var.imp.sel[,3]),
100*mean(rforest.var.imp.sel[,4]),
100*mean(rforest.var.imp.sel))
cat(out)
# save to txt file
hdl <- file(sprintf('output/2_var_imp_rho=%.1f_mtry=%i_d=%i.txt', rho, mtry, d))
writeLines(c(out), hdl)
close(hdl)
}
# ------------------------------------------------------------------------------------------
# verify multivariate AR(2) simulation function by plotting PACFs
pdf(file='output/2_PACF.pdf', width=8, height=8)
par(mfrow=c(2,2), mar=c(5, 5, 4, 1))
ts <- sim.mv.AR2(5000, 4, 0.45)
for (i in 1:4) {
pacf(ts[,i], lag.max=10, main='PACF simulated AR(2) with N=5000')
}
dev.off()
# print correlation matrix
cor(ts[,1:4])
# -------------
# simulations
# -------------
# parameters
S <- 100 # number of simulations
N <- 200 # T training samples from AR(2) process
d <- 150 # number of predictor columns in D
# parameters matrix
params <- matrix(0, nrow=10, ncol=2)
# increasing rho
params[1,] = c(0.0, floor(sqrt(d)))
params[2,] = c(0.1, floor(sqrt(d)))
params[3,] = c(0.5, floor(sqrt(d)))
params[4,] = c(0.9, floor(sqrt(d)))
# increasing mtry
params[5,] = c(0.1, floor(sqrt(d)))
params[6,] = c(0.1, 50)
params[7,] = c(0.1, 120)
params[8,] = c(0.9, floor(sqrt(d)))
params[9,] = c(0.9, 50)
params[10,] = c(0.9, 120)
# create parallel computation processes
cl <- makeCluster(detectCores())
registerDoParallel(cl)
# run simulations using different rho and mtry
# values, see params matrix
pkgs <- c('rpart', 'rpart.plot', 'ranger', 'MASS', 'glmnet')
foreach(i=1:nrow(params), .packages=pkgs) %dopar% {
run.sim(S, N, d, params[i,1], params[i,2])
}
# dispose parallel computation processes
stopCluster(cl)
|
#' @export
data_plot.parameters_pca <- function(x, data = NULL, ...) {
dataplot <- as.data.frame(x)
dataplot$Complexity <- NULL
dataplot$Uniqueness <- NULL
dataplot$MSA <- NULL
if ("Label" %in% colnames(dataplot)) {
dataplot$Variable <- dataplot$Label
dataplot$Label <- NULL
}
dataplot <- .reshape_to_long(
dataplot,
names_to = "Component",
values_to = "y",
columns = 2:ncol(dataplot)
)
dataplot$Variable <- factor(dataplot$Variable, levels = rev(unique(dataplot$Variable)))
# Title
if (inherits(x, "parameters_efa")) {
title <- "Factor Analysis"
} else {
title <- "Principal Component Analysis"
}
rotation_name <- attr(x, "rotation", exact = TRUE)
if (rotation_name == "none") {
title <- paste("Loadings from", title, "(no rotation)")
} else {
title <- paste0("Rotated loadings from ", title, " (", rotation_name, ")")
}
attr(dataplot, "info") <- list(
"xlab" = "",
"ylab" = "",
"title" = title
)
class(dataplot) <- c("data_plot", "see_parameters_pca", "data.frame")
dataplot
}
#' @export
data_plot.parameters_efa <- data_plot.parameters_pca
# Plot --------------------------------------------------------------------
#' Plot method for principal component analysis
#'
#' The `plot()` method for the `parameters::principal_components()` function.
#'
#' @param text_color Character specifying color of text labels.
#' @inheritParams data_plot
#' @inheritParams plot.see_bayesfactor_parameters
#' @inheritParams plot.see_check_outliers
#' @inheritParams plot.see_n_factors
#'
#' @return A ggplot2-object.
#'
#' @examples
#' library(parameters)
#' data(mtcars)
#' result <- principal_components(mtcars[, 1:7], n = "all", threshold = 0.2)
#' result
#' plot(result)
#' @importFrom ggplot2 .data
#' @export
plot.see_parameters_pca <- function(x,
type = c("bar", "line"),
size_text = 3.5,
text_color = "black",
size = 1,
show_labels = TRUE,
...) {
type <- match.arg(type)
if (!inherits(x, "data_plot")) {
x <- data_plot(x)
}
if (missing(size)) {
size <- switch(type,
"bar" = 0.6,
"line" = 1,
1
)
}
p <- ggplot(
as.data.frame(x),
aes(
y = .data$Variable,
x = abs(.data$y),
fill = .data$y,
color = .data$y
)
)
if (type == "bar") {
p <- p +
geom_bar(stat = "identity", width = size, colour = NA) +
scale_fill_gradientn(colours = c("#cd201f", "#ffffff", "#0077B5"), limits = c(-1, 1))
} else {
p <- p +
geom_segment(aes(x = 0, yend = .data$Variable, xend = abs(.data$y)), linewidth = size) +
geom_point(size = 2 * size) +
scale_color_gradientn(colours = c("#cd201f", "#ffffff", "#0077B5"), limits = c(-1, 1))
}
if (isTRUE(show_labels)) {
p <- p +
geom_text(
aes(x = abs(.data$y), label = round(.data$y, 2)),
color = text_color,
size = size_text,
nudge_y = 0.15
)
}
p <- p +
guides(fill = "none", color = "none") +
scale_x_continuous(
limits = c(0, 1.25),
breaks = c(0, 0.25, 0.5, 0.75, 1, 1.25),
labels = c("0", "0.25", "0.5", "0.75", "1", "")
) +
facet_wrap(~Component) +
add_plot_attributes(x)
p
}
#' @export
plot.see_parameters_efa <- plot.see_parameters_pca
|
/R/plot.parameters_pca.R
|
no_license
|
cran/see
|
R
| false
| false
| 3,502
|
r
|
#' @export
data_plot.parameters_pca <- function(x, data = NULL, ...) {
dataplot <- as.data.frame(x)
dataplot$Complexity <- NULL
dataplot$Uniqueness <- NULL
dataplot$MSA <- NULL
if ("Label" %in% colnames(dataplot)) {
dataplot$Variable <- dataplot$Label
dataplot$Label <- NULL
}
dataplot <- .reshape_to_long(
dataplot,
names_to = "Component",
values_to = "y",
columns = 2:ncol(dataplot)
)
dataplot$Variable <- factor(dataplot$Variable, levels = rev(unique(dataplot$Variable)))
# Title
if (inherits(x, "parameters_efa")) {
title <- "Factor Analysis"
} else {
title <- "Principal Component Analysis"
}
rotation_name <- attr(x, "rotation", exact = TRUE)
if (rotation_name == "none") {
title <- paste("Loadings from", title, "(no rotation)")
} else {
title <- paste0("Rotated loadings from ", title, " (", rotation_name, ")")
}
attr(dataplot, "info") <- list(
"xlab" = "",
"ylab" = "",
"title" = title
)
class(dataplot) <- c("data_plot", "see_parameters_pca", "data.frame")
dataplot
}
#' @export
data_plot.parameters_efa <- data_plot.parameters_pca
# Plot --------------------------------------------------------------------
#' Plot method for principal component analysis
#'
#' The `plot()` method for the `parameters::principal_components()` function.
#'
#' @param text_color Character specifying color of text labels.
#' @inheritParams data_plot
#' @inheritParams plot.see_bayesfactor_parameters
#' @inheritParams plot.see_check_outliers
#' @inheritParams plot.see_n_factors
#'
#' @return A ggplot2-object.
#'
#' @examples
#' library(parameters)
#' data(mtcars)
#' result <- principal_components(mtcars[, 1:7], n = "all", threshold = 0.2)
#' result
#' plot(result)
#' @importFrom ggplot2 .data
#' @export
plot.see_parameters_pca <- function(x,
type = c("bar", "line"),
size_text = 3.5,
text_color = "black",
size = 1,
show_labels = TRUE,
...) {
type <- match.arg(type)
if (!inherits(x, "data_plot")) {
x <- data_plot(x)
}
if (missing(size)) {
size <- switch(type,
"bar" = 0.6,
"line" = 1,
1
)
}
p <- ggplot(
as.data.frame(x),
aes(
y = .data$Variable,
x = abs(.data$y),
fill = .data$y,
color = .data$y
)
)
if (type == "bar") {
p <- p +
geom_bar(stat = "identity", width = size, colour = NA) +
scale_fill_gradientn(colours = c("#cd201f", "#ffffff", "#0077B5"), limits = c(-1, 1))
} else {
p <- p +
geom_segment(aes(x = 0, yend = .data$Variable, xend = abs(.data$y)), linewidth = size) +
geom_point(size = 2 * size) +
scale_color_gradientn(colours = c("#cd201f", "#ffffff", "#0077B5"), limits = c(-1, 1))
}
if (isTRUE(show_labels)) {
p <- p +
geom_text(
aes(x = abs(.data$y), label = round(.data$y, 2)),
color = text_color,
size = size_text,
nudge_y = 0.15
)
}
p <- p +
guides(fill = "none", color = "none") +
scale_x_continuous(
limits = c(0, 1.25),
breaks = c(0, 0.25, 0.5, 0.75, 1, 1.25),
labels = c("0", "0.25", "0.5", "0.75", "1", "")
) +
facet_wrap(~Component) +
add_plot_attributes(x)
p
}
#' @export
plot.see_parameters_efa <- plot.see_parameters_pca
|
\name{roofDiff}
\alias{roofDiff}
\keyword{
second-order difference estimator
}
\title{
roof/valley edge detection
}
\description{
Compute difference between two one-sided gradient
estimators.
}
\usage{
roofDiff(image, bandwidth, blur)
}
\arguments{
\item{image}{A square matrix object of size n by n, no
missing value allowed.}
\item{bandwidth}{A positive integer to specify the number of
pixels used in the local smoothing.}
\item{blur}{If blur = TRUE, besides the conventional 2-D kernel
function, a univariate kernel function is used to address
the issue of blur.}
}
\value{
Returns a matrix where each entry is the maximum of the
differences: \eqn{|\widehat{f}_{x+} - \widehat{f}_{x-}|} and
\eqn{|\widehat{f}_{y+} - \widehat{f}_{y-}|} at each pixel.
}
\details{
At each pixel, the second-order derivarives (i.e., \eqn{f''_{xx}},
\eqn{f''_{xy}}, and \eqn{f''_{yy}}) are estimated by
a local quadratic kernel smoothing procedure. Next, the local
neighborhood is first divided into two halves along the direction
perpendicular to (\eqn{\widehat{f}''_{xx}}, \eqn{\widehat{f}''_{xy}}). Then the
one-sided estimates of \eqn{f'_{x+}} and \eqn{f'_{x-}} are obtained
respectively by local linear kernel smoothing. The estimates of
\eqn{f'_{y+}} and \eqn{f'_{y-}} are obtained by the same procedure
except that the neighborhood is divided along the direction
(\eqn{\widehat{f}''_{xy}}, \eqn{\widehat{f}''_{yy}}).
}
\seealso{
\code{\link{roofEdgeParSel}}, \code{\link{roofEdge}}
}
\examples{
data(peppers)
#diff = roofDiff(image = peppers, bandwidth = 8) # Time consuming
}
\references{
Qiu, P., and Kang, Y. "Blind Image Deblurring Using Jump Regression
Analysis," \emph{Statistica Sinica}, \bold{25}, 2015, 879-899.
}
|
/man/roofDiff.Rd
|
no_license
|
cran/DRIP
|
R
| false
| false
| 1,762
|
rd
|
\name{roofDiff}
\alias{roofDiff}
\keyword{
second-order difference estimator
}
\title{
roof/valley edge detection
}
\description{
Compute difference between two one-sided gradient
estimators.
}
\usage{
roofDiff(image, bandwidth, blur)
}
\arguments{
\item{image}{A square matrix object of size n by n, no
missing value allowed.}
\item{bandwidth}{A positive integer to specify the number of
pixels used in the local smoothing.}
\item{blur}{If blur = TRUE, besides the conventional 2-D kernel
function, a univariate kernel function is used to address
the issue of blur.}
}
\value{
Returns a matrix where each entry is the maximum of the
differences: \eqn{|\widehat{f}_{x+} - \widehat{f}_{x-}|} and
\eqn{|\widehat{f}_{y+} - \widehat{f}_{y-}|} at each pixel.
}
\details{
At each pixel, the second-order derivarives (i.e., \eqn{f''_{xx}},
\eqn{f''_{xy}}, and \eqn{f''_{yy}}) are estimated by
a local quadratic kernel smoothing procedure. Next, the local
neighborhood is first divided into two halves along the direction
perpendicular to (\eqn{\widehat{f}''_{xx}}, \eqn{\widehat{f}''_{xy}}). Then the
one-sided estimates of \eqn{f'_{x+}} and \eqn{f'_{x-}} are obtained
respectively by local linear kernel smoothing. The estimates of
\eqn{f'_{y+}} and \eqn{f'_{y-}} are obtained by the same procedure
except that the neighborhood is divided along the direction
(\eqn{\widehat{f}''_{xy}}, \eqn{\widehat{f}''_{yy}}).
}
\seealso{
\code{\link{roofEdgeParSel}}, \code{\link{roofEdge}}
}
\examples{
data(peppers)
#diff = roofDiff(image = peppers, bandwidth = 8) # Time consuming
}
\references{
Qiu, P., and Kang, Y. "Blind Image Deblurring Using Jump Regression
Analysis," \emph{Statistica Sinica}, \bold{25}, 2015, 879-899.
}
|
#' analyseslope: A package which performs Bayesian analysis and visualization for slope parameters.
#'
#' The package contains functions which extracts slope parameters from our database,
#' plot these slope parameters, and analyse them. We can also identify outliers.
#'
#' @section analyseslope functions:
#' The analyseslope functions ...
#'
#' @docType package
#' @name analyseslope
NULL
|
/R/analyseslope.R
|
no_license
|
samaperrin/analyseslope
|
R
| false
| false
| 392
|
r
|
#' analyseslope: A package which performs Bayesian analysis and visualization for slope parameters.
#'
#' The package contains functions which extracts slope parameters from our database,
#' plot these slope parameters, and analyse them. We can also identify outliers.
#'
#' @section analyseslope functions:
#' The analyseslope functions ...
#'
#' @docType package
#' @name analyseslope
NULL
|
library(pbdMPI)
### Name: get job id
### Title: Divide Job ID by Ranks
### Aliases: get.jid
### Keywords: utility
### ** Examples
### Save code in a file "demo.r" and run with 4 processors by
### SHELL> mpiexec -np 4 Rscript demo.r
spmd.code <- "
### Initial.
suppressMessages(library(pbdMPI, quietly = TRUE))
init()
### Examples.
comm.cat(\">>> block\n\", quiet = TRUE)
jid <- get.jid(7, method = \"block\")
comm.print(jid, all.rank = TRUE)
comm.cat(\">>> cycle\n\", quiet = TRUE)
jid <- get.jid(7, method = \"cycle\")
comm.print(jid, all.rank = TRUE)
comm.cat(\">>> block (all)\n\", quiet = TRUE)
alljid <- get.jid(7, method = \"block\", all = TRUE)
comm.print(alljid)
comm.cat(\">>> cycle (all)\n\", quiet = TRUE)
alljid <- get.jid(7, method = \"cycle\", all = TRUE)
comm.print(alljid)
### Finish.
finalize()
"
pbdMPI::execmpi(spmd.code, nranks = 4L)
|
/data/genthat_extracted_code/pbdMPI/examples/vv_get_jid.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false
| false
| 867
|
r
|
library(pbdMPI)
### Name: get job id
### Title: Divide Job ID by Ranks
### Aliases: get.jid
### Keywords: utility
### ** Examples
### Save code in a file "demo.r" and run with 4 processors by
### SHELL> mpiexec -np 4 Rscript demo.r
spmd.code <- "
### Initial.
suppressMessages(library(pbdMPI, quietly = TRUE))
init()
### Examples.
comm.cat(\">>> block\n\", quiet = TRUE)
jid <- get.jid(7, method = \"block\")
comm.print(jid, all.rank = TRUE)
comm.cat(\">>> cycle\n\", quiet = TRUE)
jid <- get.jid(7, method = \"cycle\")
comm.print(jid, all.rank = TRUE)
comm.cat(\">>> block (all)\n\", quiet = TRUE)
alljid <- get.jid(7, method = \"block\", all = TRUE)
comm.print(alljid)
comm.cat(\">>> cycle (all)\n\", quiet = TRUE)
alljid <- get.jid(7, method = \"cycle\", all = TRUE)
comm.print(alljid)
### Finish.
finalize()
"
pbdMPI::execmpi(spmd.code, nranks = 4L)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/procrustesCM_RPS.R
\name{procrustesCM_RPS}
\alias{procrustesCM_RPS}
\title{This function s simply a wrapper for the geomorph function gpagen that performs
the classical least squares Procrustes superimposition of the input configurations
of landmarks.}
\usage{
procrustesCM_RPS(X)
}
\arguments{
\item{X}{A s-dimensional array (s=2 or s=3) of n x k matrices, representing shapes of k objects through n landmarks in s dimensions}
}
\value{
s-dimensional array of n x k matrices, representing shapes of k objects following superimposition.
}
\description{
This function s simply a wrapper for the geomorph function gpagen that performs
the classical least squares Procrustes superimposition of the input configurations
of landmarks.
}
\examples{
source = array(matrix(nrow = 8,ncol = 3),c(8,3,3),dimnames = NULL)
source[,,1] <- matrix(c(3,0,0,3,0,1,3,1,1,3,1,0,0,0,0,0,0,1,0,1,1,0,1,0)
,nrow = 8,ncol = 3,byrow = TRUE)
source[,,2] <- matrix(c(3, 0 ,0,3, 0, 0.5,3, 1 ,0.75,3 ,1 ,0,0 ,0 ,0,0, 0 ,1,0, 1, 1,0, 1, 0.25)
,nrow = 8,ncol = 3,byrow = TRUE)
source[,,3] <- matrix(c(5, 2 ,1,3, 0, 1.5,3.4, 1 ,1.75,3 ,1 ,0,0 ,0 ,0,0, 2 ,1,0, 3, 1,0, 1, 0.75)
,nrow = 8,ncol = 3,byrow = TRUE)
result <- RPS::procrustesCM_RPS(source)
result
}
\author{
Dean C.Adams, Michael Collyer
}
|
/man/procrustesCM_RPS.Rd
|
no_license
|
cran/RPS
|
R
| false
| true
| 1,447
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/procrustesCM_RPS.R
\name{procrustesCM_RPS}
\alias{procrustesCM_RPS}
\title{This function s simply a wrapper for the geomorph function gpagen that performs
the classical least squares Procrustes superimposition of the input configurations
of landmarks.}
\usage{
procrustesCM_RPS(X)
}
\arguments{
\item{X}{A s-dimensional array (s=2 or s=3) of n x k matrices, representing shapes of k objects through n landmarks in s dimensions}
}
\value{
s-dimensional array of n x k matrices, representing shapes of k objects following superimposition.
}
\description{
This function s simply a wrapper for the geomorph function gpagen that performs
the classical least squares Procrustes superimposition of the input configurations
of landmarks.
}
\examples{
source = array(matrix(nrow = 8,ncol = 3),c(8,3,3),dimnames = NULL)
source[,,1] <- matrix(c(3,0,0,3,0,1,3,1,1,3,1,0,0,0,0,0,0,1,0,1,1,0,1,0)
,nrow = 8,ncol = 3,byrow = TRUE)
source[,,2] <- matrix(c(3, 0 ,0,3, 0, 0.5,3, 1 ,0.75,3 ,1 ,0,0 ,0 ,0,0, 0 ,1,0, 1, 1,0, 1, 0.25)
,nrow = 8,ncol = 3,byrow = TRUE)
source[,,3] <- matrix(c(5, 2 ,1,3, 0, 1.5,3.4, 1 ,1.75,3 ,1 ,0,0 ,0 ,0,0, 2 ,1,0, 3, 1,0, 1, 0.75)
,nrow = 8,ncol = 3,byrow = TRUE)
result <- RPS::procrustesCM_RPS(source)
result
}
\author{
Dean C.Adams, Michael Collyer
}
|
# most variable genes.r
options(stringsAsFactors=FALSE) # for compatibile code between us
library(tidyverse)
getwd() <-
"~/Documents/UCSC/Junior/Treehouse/Treehouse_OutlierRNASeq/"
setwd(paste0(getwd(), "comp4.3_tert8.ckcc.outlier_results"))
up_outlier_files=list.files(, "outlier_results_")
outlierResults<-lapply(up_outlier_files, function(x) {
read_tsv(x, col_types=cols()) %>%
add_column(sampleID=gsub("outlier_results_", "", x))
}) %>%
bind_rows()
dfGeneMean <- outlierResults %>%
group_by(Gene) %>%
summarize(mean = mean(sample))
mean(dfGeneMean$mean)
# the overall mean is 1.019 sample
dfGeneVar <- outlierResults %>%
group_by(Gene) %>%
summarize(variation = var(sample)) %>%
arrange(desc(variation))
mean(dfGeneVar$variation) # 0.56
# so most genes differ from the norm on average by 0.56
sd(dfGeneVar$variation) # standard deviation = 1.15
summary(dfGeneVar)
# Gene variation
# Length:58581 Min. : 0.000000
# Class :character 1st Qu.: 0.001579
# Mode :character Median : 0.069611
# Mean : 0.565726
# 3rd Qu.: 0.707318
# Max. :28.579563
quantile(dfGeneVar$variation, 0.95)
# > 95% of the data variates from the mean by 2.56
geneList <- dfGeneVar %>% filter(variation > quantile(dfGeneVar$variation, 0.95))
# get names of genes p95 of variation and up
dfPercentile <- outlierResults %>%
group_by(sampleID) %>%
summarize(p95 = quantile(sample, 0.95))
dfSamples <-
outlierResults %>% group_by(sampleID) %>% filter(Gene %in% geneList$Gene)
# match names to all of their th01 th02 etc...
dfSamples$TH01 <- grepl(pattern = 'TH01', dfSamples$sampleID)
dfSamples$TH01 <- gsub('TRUE', 'blue',dfSamples$TH01)
dfSamples$TH01 <- gsub('FALSE', 'red',dfSamples$TH01)
sampleList <- dfSamples %>%
select(sampleID,sample) %>%
group_by(sampleID) %>%
summarize()
for (thisSample in sampleList$sampleID) {
print(thisSample)
dfi <- dfSamples %>% filter(sampleID == thisSample)
dfp <- dfPercentile %>% filter(sampleID == thisSample)
dfc <- dfSamples %>% group_by(TH01) %>% filter(sampleID == thisSample) %>% summarize()
print(dfc[1][[1]])
ggplot(dfSamples %>% filter(sampleID == thisSample)) +
geom_histogram(aes(sample), binwidth = 0.1) +
scale_fill_manual(values = c('red')) +
ggtitle(thisSample) +
xlab("log2(TPM+1)") + ylab("Gene Expression") +
scale_x_continuous(limits = c(0,20)) +
scale_y_continuous(limits = c(0,200)) +
geom_vline(xintercept = dfp$p95) +
annotate(
"text",
x = round(dfp$p95,4)+4.3,
y = 150,
label = paste0(
"glp95: ",
round(dfp$p95,4)
)
)
maxGene <- max(dfi$sample)
maxVarGene<-dfi[which.max(dfi$sample),]$Gene
variationOfMax <- round(maxGene- mean(dfi$sample),3)
ggsave(
paste0(
"pctl=",
format(round(dfp$p95,4),nsmall = 4),
"-var=",
variationOfMax,
"-",
thisSample,
".png"
),
plot = p,
"png",
paste0(getwd(), "Batch-MostVariantGenesSorted-by-p95")
)
}
# saves plots and bumps for 22 sample files on the low end of the 95th percentile
{
nfpDF <-
dfSamples %>% group_by(sampleID) %>% summarize(nfp = quantile(sample, 0.95))
fifteenth = quantile(nfpDF$nfp, 0.15)
worst15pctSamples <-
nfpDF %>% filter(nfp < fifteenth) %>% arrange(desc(nfp))
thisSample <- NULL
order <- 0
sumBump <- 0
countBump <- 0
averageBump <- 0
x <- list()
for (thisSample in worst15pctSamples$sampleID) {
print(thisSample)
df <-
dfSamples %>% filter(sampleID == thisSample) %>% filter(sample > 1.8)
dfn <- count(df, sample=round(sample,1))
dfn$index <- seq(1,length(dfn$n))
maxGene <- max(df$sample)
maxVarGene<-df[which.max(df$sample),]$Gene
variationOfMax <- round(maxGene- mean(df$sample),3)
order = order + 1
p <- ggplot(dfSamples %>% filter(sampleID == thisSample)) +
geom_histogram(aes(sample), binwidth = 0.1) +
ggtitle(paste0(thisSample,"\n maxGene: ", maxVarGene)) +
scale_x_continuous(limits = c(0,15))+
scale_y_continuous(limits = c(0,125))+
xlab("log2(TPM+1)") + ylab("Gene Expression")
if(dfn[which.max(dfn$n),]$sample > 2.1) {
p = p + annotate(
"text",
x = dfn[which.max(dfn$n),]$sample+3,
y = 1000,
label = paste0(
"bump: ",
dfn[which.max(dfn$n),]$sample
)
) + geom_vline(xintercept = dfn[which.max(dfn$n),]$sample)
sumBump = sumBump + dfn[which.max(dfn$n),]$sample
countBump = countBump + 1
}
ggsave(
paste0(
order + 100,
"_",
round(worst15pctSamples[[2]][order], digits = 3),
"_",
thisSample,
".png"
),
plot = p,
"png",
paste0(getwd(), "BatchPlotsMostVar-Below-p15")
)
}
averageBump = sumBump / countBump
print(averageBump)
}
# saves plots and bumps for 22 sample files on the high end of the 95th percentile
{
nfpDF <-
dfSamples %>% group_by(sampleID) %>% summarize(nfp = quantile(sample, 0.95))
eightyFive = quantile(nfpDF$nfp, 0.85)
best85pctSamples <-
nfpDF %>% filter(nfp > eightyFive) %>% arrange(desc(nfp))
thisSample <- NULL
order <- 0
sumBump <- 0
countBump <- 0
averageBump <- 0
x <- list()
for (thisSample in best95pctSamples$sampleID) {
print(thisSample)
df <-
dfSamples %>% filter(sampleID == thisSample) %>% filter(sample > 1.8)
dfn <- count(df, sample=round(sample,1))
dfn$index <- seq(1,length(dfn$n))
maxGene <- max(df$sample)
maxVarGene<-df[which.max(df$sample),]$Gene
variationOfMax <- round(maxGene- mean(df$sample),3)
order = order + 1
p <- ggplot(dfSamples %>% filter(sampleID == thisSample)) +
geom_histogram(aes(sample), binwidth = 0.1) +
ggtitle(paste0(thisSample,"> p85\n maxGene: ", maxVarGene)) +
scale_x_continuous(limits = c(0,20))+
scale_y_continuous(limits = c(0,300))+
xlab("log2(TPM+1)") + ylab("Gene Expression")+
geom_vline(xintercept=)
if(dfn[which.max(dfn$n),]$sample > 2.5) {
p = p + annotate(
"text",
x = dfn[which.max(dfn$n),]$sample+3,
y = 1000,
label = paste0(
"bump: ",
dfn[which.max(dfn$n),]$sample
)
) + geom_vline(xintercept = dfn[which.max(dfn$n),]$sample)
sumBump = sumBump + dfn[which.max(dfn$n),]$sample
countBump = countBump + 1
}
ggsave(
paste0(
order + 100,
"_",
round(best95pctSamples[[2]][order], digits = 3),
"_",
thisSample,
".png"
),
plot = p,
"png",
paste0(getwd(), "BatchPlotsMostVar-Above-p95")
)
}
averageBump = sumBump / countBump
print(averageBump)
}
nfpDF <-
dfSamples %>% group_by(sampleID) %>% summarize(nfp = quantile(sample, 0.95))
eightyFive = quantile(nfpDF$nfp, 0.85)
best85pctSamples <-
nfpDF %>% filter(nfp > eightyFive) %>% arrange(desc(nfp))
fifteenth = quantile(nfpDF$nfp, 0.15)
worst15pctSamples <-
nfpDF %>% filter(nfp < fifteenth) %>% arrange(desc(nfp))
maxGene <- max(dfSamples$sample)
maxVarGene<-dfSamples[which.max(dfSamples$sample),]$Gene
variationOfMax <- round(maxGene- mean(dfSamples$sample),3)
pctl <- data.frame(outlierResults %>% group_by(sampleID) %>% summarize(global95 = quantile(sample, 0.95)))
dfSamples <- left_join(dfSamples, pctl, by="sampleID")
dfSamples %>% arrange((global95))
dfSamples$TH01 <- grepl("TH01", dfSamples$sampleID)
facetBigPlot <- ggplot(dfSamples, aes(sample, fill= TH01)) + geom_histogram(binwidth=0.1) +
ggtitle(paste0("Samples All | maxGene: ", maxVarGene, " | Distance From Mean: ", variationOfMax)) +
xlab("log2(TPM+1)") + ylab("Gene Expression")+
scale_x_continuous(limits = c(0,20)) +
scale_y_continuous(limits = c(0,100)) +
scale_fill_brewer(palette = "Set1") +
facet_wrap(~ global95)
ggsave(filename = "facetWrapColored3.png", facetBigPlot,
width = 20, height = 20, dpi = 150, units = "in", device='png', paste0(getwd()))
ggplot(dfSamples %>% filter(sampleID == best85pctSamples$sampleID), aes(sample)) +
geom_histogram(binwidth=0.1) +
ggtitle(paste0("Highest 22 p95s | maxVarGene: ", maxVarGene, " | Distance From Mean: ", variationOfMax)) +
xlab("log2(TPM+1)") + ylab("Gene Expression")+
scale_x_continuous(limits = c(0,17)) +
scale_y_continuous(limits = c(0,20)) +
facet_wrap(~ sampleID)
dfBadSamples <- dfSamples %>% filter(sampleID == worst15pctSamples$sampleID)
maxGene <- max(dfBadSamples$sample)
maxVarGene<-dfBadSamples[which.max(dfBadSamples$sample),]$Gene
variationOfMax <- round(maxGene- mean(dfBadSamples$sample),3)
ggplot(dfBadSamples, aes(sample)) +
geom_histogram(binwidth=0.1) +
ggtitle(paste0("Lowest 22 p95s | maxVarGene: ", maxVarGene, " | Distance From Mean: ", variationOfMax)) +
xlab("log2(TPM+1)") + ylab("Gene Expression")+
scale_x_continuous(limits = c(0,17)) +
scale_y_continuous(limits = c(0,20)) +
facet_wrap(~ sampleID)
|
/RCode/mostVariableGenes.R
|
no_license
|
liammckay19/Treehouse_OutlierRNASeq
|
R
| false
| false
| 8,737
|
r
|
# most variable genes.r
options(stringsAsFactors=FALSE) # for compatibile code between us
library(tidyverse)
getwd() <-
"~/Documents/UCSC/Junior/Treehouse/Treehouse_OutlierRNASeq/"
setwd(paste0(getwd(), "comp4.3_tert8.ckcc.outlier_results"))
up_outlier_files=list.files(, "outlier_results_")
outlierResults<-lapply(up_outlier_files, function(x) {
read_tsv(x, col_types=cols()) %>%
add_column(sampleID=gsub("outlier_results_", "", x))
}) %>%
bind_rows()
dfGeneMean <- outlierResults %>%
group_by(Gene) %>%
summarize(mean = mean(sample))
mean(dfGeneMean$mean)
# the overall mean is 1.019 sample
dfGeneVar <- outlierResults %>%
group_by(Gene) %>%
summarize(variation = var(sample)) %>%
arrange(desc(variation))
mean(dfGeneVar$variation) # 0.56
# so most genes differ from the norm on average by 0.56
sd(dfGeneVar$variation) # standard deviation = 1.15
summary(dfGeneVar)
# Gene variation
# Length:58581 Min. : 0.000000
# Class :character 1st Qu.: 0.001579
# Mode :character Median : 0.069611
# Mean : 0.565726
# 3rd Qu.: 0.707318
# Max. :28.579563
quantile(dfGeneVar$variation, 0.95)
# > 95% of the data variates from the mean by 2.56
geneList <- dfGeneVar %>% filter(variation > quantile(dfGeneVar$variation, 0.95))
# get names of genes p95 of variation and up
dfPercentile <- outlierResults %>%
group_by(sampleID) %>%
summarize(p95 = quantile(sample, 0.95))
dfSamples <-
outlierResults %>% group_by(sampleID) %>% filter(Gene %in% geneList$Gene)
# match names to all of their th01 th02 etc...
dfSamples$TH01 <- grepl(pattern = 'TH01', dfSamples$sampleID)
dfSamples$TH01 <- gsub('TRUE', 'blue',dfSamples$TH01)
dfSamples$TH01 <- gsub('FALSE', 'red',dfSamples$TH01)
sampleList <- dfSamples %>%
select(sampleID,sample) %>%
group_by(sampleID) %>%
summarize()
for (thisSample in sampleList$sampleID) {
print(thisSample)
dfi <- dfSamples %>% filter(sampleID == thisSample)
dfp <- dfPercentile %>% filter(sampleID == thisSample)
dfc <- dfSamples %>% group_by(TH01) %>% filter(sampleID == thisSample) %>% summarize()
print(dfc[1][[1]])
ggplot(dfSamples %>% filter(sampleID == thisSample)) +
geom_histogram(aes(sample), binwidth = 0.1) +
scale_fill_manual(values = c('red')) +
ggtitle(thisSample) +
xlab("log2(TPM+1)") + ylab("Gene Expression") +
scale_x_continuous(limits = c(0,20)) +
scale_y_continuous(limits = c(0,200)) +
geom_vline(xintercept = dfp$p95) +
annotate(
"text",
x = round(dfp$p95,4)+4.3,
y = 150,
label = paste0(
"glp95: ",
round(dfp$p95,4)
)
)
maxGene <- max(dfi$sample)
maxVarGene<-dfi[which.max(dfi$sample),]$Gene
variationOfMax <- round(maxGene- mean(dfi$sample),3)
ggsave(
paste0(
"pctl=",
format(round(dfp$p95,4),nsmall = 4),
"-var=",
variationOfMax,
"-",
thisSample,
".png"
),
plot = p,
"png",
paste0(getwd(), "Batch-MostVariantGenesSorted-by-p95")
)
}
# saves plots and bumps for 22 sample files on the low end of the 95th percentile
{
nfpDF <-
dfSamples %>% group_by(sampleID) %>% summarize(nfp = quantile(sample, 0.95))
fifteenth = quantile(nfpDF$nfp, 0.15)
worst15pctSamples <-
nfpDF %>% filter(nfp < fifteenth) %>% arrange(desc(nfp))
thisSample <- NULL
order <- 0
sumBump <- 0
countBump <- 0
averageBump <- 0
x <- list()
for (thisSample in worst15pctSamples$sampleID) {
print(thisSample)
df <-
dfSamples %>% filter(sampleID == thisSample) %>% filter(sample > 1.8)
dfn <- count(df, sample=round(sample,1))
dfn$index <- seq(1,length(dfn$n))
maxGene <- max(df$sample)
maxVarGene<-df[which.max(df$sample),]$Gene
variationOfMax <- round(maxGene- mean(df$sample),3)
order = order + 1
p <- ggplot(dfSamples %>% filter(sampleID == thisSample)) +
geom_histogram(aes(sample), binwidth = 0.1) +
ggtitle(paste0(thisSample,"\n maxGene: ", maxVarGene)) +
scale_x_continuous(limits = c(0,15))+
scale_y_continuous(limits = c(0,125))+
xlab("log2(TPM+1)") + ylab("Gene Expression")
if(dfn[which.max(dfn$n),]$sample > 2.1) {
p = p + annotate(
"text",
x = dfn[which.max(dfn$n),]$sample+3,
y = 1000,
label = paste0(
"bump: ",
dfn[which.max(dfn$n),]$sample
)
) + geom_vline(xintercept = dfn[which.max(dfn$n),]$sample)
sumBump = sumBump + dfn[which.max(dfn$n),]$sample
countBump = countBump + 1
}
ggsave(
paste0(
order + 100,
"_",
round(worst15pctSamples[[2]][order], digits = 3),
"_",
thisSample,
".png"
),
plot = p,
"png",
paste0(getwd(), "BatchPlotsMostVar-Below-p15")
)
}
averageBump = sumBump / countBump
print(averageBump)
}
# saves plots and bumps for 22 sample files on the high end of the 95th percentile
{
nfpDF <-
dfSamples %>% group_by(sampleID) %>% summarize(nfp = quantile(sample, 0.95))
eightyFive = quantile(nfpDF$nfp, 0.85)
best85pctSamples <-
nfpDF %>% filter(nfp > eightyFive) %>% arrange(desc(nfp))
thisSample <- NULL
order <- 0
sumBump <- 0
countBump <- 0
averageBump <- 0
x <- list()
for (thisSample in best95pctSamples$sampleID) {
print(thisSample)
df <-
dfSamples %>% filter(sampleID == thisSample) %>% filter(sample > 1.8)
dfn <- count(df, sample=round(sample,1))
dfn$index <- seq(1,length(dfn$n))
maxGene <- max(df$sample)
maxVarGene<-df[which.max(df$sample),]$Gene
variationOfMax <- round(maxGene- mean(df$sample),3)
order = order + 1
p <- ggplot(dfSamples %>% filter(sampleID == thisSample)) +
geom_histogram(aes(sample), binwidth = 0.1) +
ggtitle(paste0(thisSample,"> p85\n maxGene: ", maxVarGene)) +
scale_x_continuous(limits = c(0,20))+
scale_y_continuous(limits = c(0,300))+
xlab("log2(TPM+1)") + ylab("Gene Expression")+
geom_vline(xintercept=)
if(dfn[which.max(dfn$n),]$sample > 2.5) {
p = p + annotate(
"text",
x = dfn[which.max(dfn$n),]$sample+3,
y = 1000,
label = paste0(
"bump: ",
dfn[which.max(dfn$n),]$sample
)
) + geom_vline(xintercept = dfn[which.max(dfn$n),]$sample)
sumBump = sumBump + dfn[which.max(dfn$n),]$sample
countBump = countBump + 1
}
ggsave(
paste0(
order + 100,
"_",
round(best95pctSamples[[2]][order], digits = 3),
"_",
thisSample,
".png"
),
plot = p,
"png",
paste0(getwd(), "BatchPlotsMostVar-Above-p95")
)
}
averageBump = sumBump / countBump
print(averageBump)
}
nfpDF <-
dfSamples %>% group_by(sampleID) %>% summarize(nfp = quantile(sample, 0.95))
eightyFive = quantile(nfpDF$nfp, 0.85)
best85pctSamples <-
nfpDF %>% filter(nfp > eightyFive) %>% arrange(desc(nfp))
fifteenth = quantile(nfpDF$nfp, 0.15)
worst15pctSamples <-
nfpDF %>% filter(nfp < fifteenth) %>% arrange(desc(nfp))
maxGene <- max(dfSamples$sample)
maxVarGene<-dfSamples[which.max(dfSamples$sample),]$Gene
variationOfMax <- round(maxGene- mean(dfSamples$sample),3)
pctl <- data.frame(outlierResults %>% group_by(sampleID) %>% summarize(global95 = quantile(sample, 0.95)))
dfSamples <- left_join(dfSamples, pctl, by="sampleID")
dfSamples %>% arrange((global95))
dfSamples$TH01 <- grepl("TH01", dfSamples$sampleID)
facetBigPlot <- ggplot(dfSamples, aes(sample, fill= TH01)) + geom_histogram(binwidth=0.1) +
ggtitle(paste0("Samples All | maxGene: ", maxVarGene, " | Distance From Mean: ", variationOfMax)) +
xlab("log2(TPM+1)") + ylab("Gene Expression")+
scale_x_continuous(limits = c(0,20)) +
scale_y_continuous(limits = c(0,100)) +
scale_fill_brewer(palette = "Set1") +
facet_wrap(~ global95)
ggsave(filename = "facetWrapColored3.png", facetBigPlot,
width = 20, height = 20, dpi = 150, units = "in", device='png', paste0(getwd()))
ggplot(dfSamples %>% filter(sampleID == best85pctSamples$sampleID), aes(sample)) +
geom_histogram(binwidth=0.1) +
ggtitle(paste0("Highest 22 p95s | maxVarGene: ", maxVarGene, " | Distance From Mean: ", variationOfMax)) +
xlab("log2(TPM+1)") + ylab("Gene Expression")+
scale_x_continuous(limits = c(0,17)) +
scale_y_continuous(limits = c(0,20)) +
facet_wrap(~ sampleID)
dfBadSamples <- dfSamples %>% filter(sampleID == worst15pctSamples$sampleID)
maxGene <- max(dfBadSamples$sample)
maxVarGene<-dfBadSamples[which.max(dfBadSamples$sample),]$Gene
variationOfMax <- round(maxGene- mean(dfBadSamples$sample),3)
ggplot(dfBadSamples, aes(sample)) +
geom_histogram(binwidth=0.1) +
ggtitle(paste0("Lowest 22 p95s | maxVarGene: ", maxVarGene, " | Distance From Mean: ", variationOfMax)) +
xlab("log2(TPM+1)") + ylab("Gene Expression")+
scale_x_continuous(limits = c(0,17)) +
scale_y_continuous(limits = c(0,20)) +
facet_wrap(~ sampleID)
|
#' @title Get all the tasks which are currently scheduled at the Windows task scheduler.
#' @description Get all the tasks which are currently scheduled at the Windows task scheduler.
#'
#' @return a data.frame with scheduled tasks as returned by schtasks /Query for which the Taskname or second
#' column in the dataset the preceding \\ is removed
#' @param encoding encoding of the CSV which schtasks.exe generates. Defaults to UTF-8.
#' @param ... optional arguments passed on to \code{fread} in order to read in the CSV file which schtasks generates
#' @export
#' @examples
#' x <- taskscheduler_ls()
#' x
taskscheduler_ls <- function(encoding = 'UTF-8', ...){
change_code_page <- system("chcp 65001", intern = TRUE)
cmd <- sprintf('schtasks /Query /FO CSV /V')
x <- system(cmd, intern = TRUE)
f <- tempfile()
writeLines(x, f)
x <- try(data.table::fread(f, encoding = encoding, ...), silent = TRUE)
if(inherits(x, "try-error")){
x <- utils::read.csv(f, check.names = FALSE, stringsAsFactors=FALSE, encoding = encoding, ...)
}
x <- data.table::setDF(x)
if("TaskName" %in% names(x)){
try(x$TaskName <- gsub("^\\\\", "", x$TaskName), silent = TRUE)
}else{
try(x[, 2] <- gsub("^\\\\", "", x[, 2]), silent = TRUE)
}
on.exit(file.remove(f))
x
}
#' @title Schedule an R script with the Windows task scheduler.
#' @description Schedule an R script with the Windows task scheduler. E.g. daily, weekly, once, at startup, ...
#' More information about the scheduling format can be found in the docs/schtasks.pdf file inside this package.
#' The rscript file will be scheduled with Rscript.exe and the log of the run will be put in the .log file which can be found in the same directory
#' as the location of the rscript
#'
#' @param taskname a character string with the name of the task. Defaults to the filename. Should not contain any spaces.
#' @param rscript the full path to the .R script with the R code to execute. Should not contain any spaces.
#' @param schedule when to schedule the \code{rscript}.
#' Either one of 'ONCE', 'MONTHLY', 'WEEKLY', 'DAILY', 'HOURLY', 'MINUTE', 'ONLOGON', 'ONIDLE'.
#' @param starttime a timepoint in HH:mm format indicating when to run the script. Defaults to within 62 seconds.
#' @param startdate a date that specifies the first date on which to run the task.
#' Only applicable if schedule is of type 'MONTHLY', 'WEEKLY', 'DAILY', 'HOURLY', 'MINUTE'. Defaults to today in '\%d/\%m/\%Y' format. Change to your locale format if needed.
#' @param days character string with days on which to run the script if schedule is 'WEEKLY' or 'MONTHLY'. Possible values
#' are * (all days). For weekly: 'MON', 'TUE', 'WED', 'THU', 'FRI', 'SAT', 'SUN' or a vector of these in your locale.
#' For monthly: 1:31 or a vector of these.
#' @param months character string with months on which to run the script if schedule is 'MONTHLY'. Possible values
#' are * (all months) or 'JAN', 'FEB', 'MAR', 'APR', 'MAY', 'JUN', 'JUL', 'AUG', 'SEP', 'OCT', 'NOV', 'DEC' or a vector of these in your locale.
#' @param modifier a modifier to apply. See the docs/schtasks.pdf
#' @param idletime integer containing a value that specifies the amount of idle time to wait before
#' running a scheduled ONIDLE task. The valid range is 1 - 999 minutes.
#' @param Rexe path to Rscript.exe which will be used to run the script. Defaults to Rscript at the bin folder of R_HOME.
#' @param rscript_args character string with further arguments passed on to Rscript. See args in \code{\link{Rscript}}.
#' @param rscript_options character string with further options passed on to Rscript. See options in \code{\link{Rscript}}.
#' @param schtasks_extra character string with further schtasks arguments. See the inst/docs/schtasks.pdf
#' @param debug logical to print the system call to screen
#' @param exec_path character string of the path where cmd should be executed. Defaults to system path.
#' @return the system call to schtasks /Create
#' @export
#' @examples
#' myscript <- system.file("extdata", "helloworld.R", package = "taskscheduleR")
#' cat(readLines(myscript), sep = "\n")
#'
#' \dontrun{
#' ## Run script once at a specific timepoint (within 62 seconds)
#' runon <- format(Sys.time() + 62, "%H:%M")
#' taskscheduler_create(taskname = "myfancyscript", rscript = myscript,
#' schedule = "ONCE", starttime = runon)
#'
#' ## Run every day at the same time on 09:10, starting from tomorrow on
#' ## Mark: change the format of startdate to your locale if needed (e.g. US: %m/%d/%Y)
#' taskscheduler_create(taskname = "myfancyscriptdaily", rscript = myscript,
#' schedule = "DAILY", starttime = "09:10", startdate = format(Sys.Date()+1, "%d/%m/%Y"))
#'
#' ## Run every week on Sunday at 09:10
#' taskscheduler_create(taskname = "myfancyscript_sun", rscript = myscript,
#' schedule = "WEEKLY", starttime = "09:10", days = 'SUN')
#'
#' ## Run every 5 minutes, starting from 10:40
#' taskscheduler_create(taskname = "myfancyscript_5min", rscript = myscript,
#' schedule = "MINUTE", starttime = "10:40", modifier = 5)
#'
#' ## Run every minute, giving some command line arguments which can be used in the script itself
#' taskscheduler_create(taskname = "myfancyscript_withargs_a", rscript = myscript,
#' schedule = "MINUTE", rscript_args = "productxyz 20160101")
#' taskscheduler_create(taskname = "myfancyscript_withargs_b", rscript = myscript,
#' schedule = "MINUTE", rscript_args = c("productabc", "20150101"))
#'
#' alltasks <- taskscheduler_ls()
#' subset(alltasks, TaskName %in% c("myfancyscript", "myfancyscriptdaily"))
#' # The field TaskName might have been different on Windows with non-english language locale
#'
#' taskscheduler_delete(taskname = "myfancyscript")
#' taskscheduler_delete(taskname = "myfancyscriptdaily")
#' taskscheduler_delete(taskname = "myfancyscript_sun")
#' taskscheduler_delete(taskname = "myfancyscript_5min")
#' taskscheduler_delete(taskname = "myfancyscript_withargs_a")
#' taskscheduler_delete(taskname = "myfancyscript_withargs_b")
#'
#' ## Have a look at the log
#' mylog <- system.file("extdata", "helloworld.log", package = "taskscheduleR")
#' cat(readLines(mylog), sep = "\n")
#' }
taskscheduler_create <- function(taskname = basename(rscript),
rscript,
schedule = c('ONCE', 'MONTHLY', 'WEEKLY', 'DAILY', 'HOURLY', 'MINUTE', 'ONLOGON', 'ONIDLE'),
starttime = format(Sys.time() + 62, "%H:%M"),
startdate = format(Sys.Date(), "%d/%m/%Y"),
days = c('*', 'MON', 'TUE', 'WED', 'THU', 'FRI', 'SAT', 'SUN', 1:31),
months = c('*', 'JAN', 'FEB', 'MAR', 'APR', 'MAY', 'JUN', 'JUL', 'AUG', 'SEP', 'OCT', 'NOV', 'DEC'),
modifier,
idletime = 60L,
Rexe = file.path(Sys.getenv("R_HOME"), "bin", "Rscript.exe"),
rscript_args = "",
rscript_options = "",
schtasks_extra = "",
debug = FALSE,
exec_path = ""){
if(!file.exists(rscript)){
stop(sprintf("File %s does not exist", rscript))
}
if(basename(rscript) == rscript){
warning("Filename does not include the full path, provide %s as full path including the directory", task)
}
schedule <- match.arg(schedule)
if("*" %in% days){
days <- "*"
}else{
days <- paste(days, collapse = ",")
}
if("*" %in% months){
months <- "*"
}else{
months <- paste(months, collapse = ",")
}
taskname <- force(taskname)
if(length(grep(" ", taskname)) > 0){
taskname <- gsub(" ", "-", taskname)
message(sprintf("No spaces are allowed in taskname, changing the name of the task to %s", taskname))
}
if(length(grep(" ", rscript)) > 0){
message(sprintf("Full path to filename '%s' contains spaces, it is advised to put your script in another location which contains no spaces", rscript))
}
if (exec_path == "") {
task <- sprintf("cmd /c %s %s %s %s >> %s 2>&1", Rexe,
paste(rscript_options, collapse = " "), shQuote(rscript),
paste(rscript_args, collapse = " "), shQuote(sprintf("%s.log",
tools::file_path_sans_ext(rscript))))
} else {
task <- sprintf("cmd /c %s %s %s %s %s >> %s 2>&1", paste("cd",exec_path, "&", collapse = " "), Rexe,
paste(rscript_options, collapse = " "), shQuote(rscript),
paste(rscript_args, collapse = " "), shQuote(sprintf("%s.log",
tools::file_path_sans_ext(rscript))))
}
if(nchar(task) > 260){
warning(sprintf("Passing on this to the TR argument of schtasks.exe: %s, this is too long. Consider putting your scripts into another folder", task))
}
cmd <- sprintf('schtasks /Create /TN %s /TR %s /SC %s',
shQuote(taskname, type = "cmd"),
shQuote(task, type = "cmd"),
schedule)
if(!missing(modifier)){
cmd <- sprintf("%s /MO %s", cmd, modifier)
}
if(schedule %in% c('ONIDLE')){
cmd <- sprintf("%s /I %s", cmd, idletime)
}
if(!schedule %in% c('ONLOGON', 'ONIDLE')){
cmd <- sprintf("%s /ST %s", cmd, starttime)
}
if(!schedule %in% c('ONLOGON', 'ONIDLE')){
if(schedule %in% "ONCE" && missing(startdate)){
## run once now
cmd <- cmd
}else{
cmd <- sprintf("%s /SD %s", cmd, shQuote(startdate))
}
}
if(schedule %in% c('WEEKLY', 'MONTHLY')){
cmd <- sprintf("%s /D %s", cmd, days)
}
if(schedule %in% c('MONTHLY')){
cmd <- sprintf("%s /M %s", cmd, months)
}
cmd <- sprintf("%s %s", cmd, schtasks_extra)
if(debug){
message(sprintf("Creating task schedule: %s", cmd))
}
system(cmd, intern = TRUE)
}
#' @title Delete a specific task which was scheduled in the Windows task scheduler.
#' @description Delete a specific task which was scheduled in the Windows task scheduler.
#'
#' @param taskname the name of the task to delete. See the example.
#' @return the system call to schtasks /Delete
#' @export
#' @examples
#' \dontrun{
#' x <- taskscheduler_ls()
#' x
#' # The field TaskName might have been different on Windows with non-english language locale
#' task <- x$TaskName[1]
#' taskscheduler_delete(taskname = task)
#' }
taskscheduler_delete <- function(taskname){
cmd <- sprintf('schtasks /Delete /TN %s /F', shQuote(taskname, type = "cmd"))
system(cmd, intern = FALSE)
}
#' @title Immediately run a specific task available in the Windows task scheduler.
#' @description Immediately run a specific task available in the Windows task scheduler.
#'
#' @param taskname the name of the task to run. See the example.
#' @return the system call to schtasks /Run
#' @export taskscheduler_runnow
#' @export taskcheduler_runnow
#' @aliases taskcheduler_runnow
#' @examples
#' \dontrun{
#' myscript <- system.file("extdata", "helloworld.R", package = "taskscheduleR")
#' taskscheduler_create(taskname = "myfancyscript", rscript = myscript,
#' schedule = "ONCE", starttime = format(Sys.time() + 10*60, "%H:%M"))
#'
#' taskscheduler_runnow("myfancyscript")
#' Sys.sleep(5)
#' taskscheduler_stop("myfancyscript")
#'
#'
#' taskscheduler_delete(taskname = "myfancyscript")
#' }
taskscheduler_runnow <- function(taskname){
cmd <- sprintf('schtasks /Run /TN %s', shQuote(taskname, type = "cmd"))
system(cmd, intern = FALSE)
}
#' @title Stop the run of a specific task which is running in the Windows task scheduler.
#' @description Stop the run of a specific task which is running in the Windows task scheduler.
#'
#' @param taskname the name of the task to stop. See the example.
#' @return the system call to schtasks /End
#' @export taskscheduler_stop
#' @export taskcheduler_stop
#' @aliases taskcheduler_stop
#' @examples
#' \dontrun{
#' myscript <- system.file("extdata", "helloworld.R", package = "taskscheduleR")
#' taskscheduler_create(taskname = "myfancyscript", rscript = myscript,
#' schedule = "ONCE", starttime = format(Sys.time() + 10*60, "%H:%M"))
#'
#' taskscheduler_runnow("myfancyscript")
#' Sys.sleep(5)
#' taskscheduler_stop("myfancyscript")
#'
#'
#' taskscheduler_delete(taskname = "myfancyscript")
#' }
taskscheduler_stop <- function(taskname){
cmd <- sprintf('schtasks /End /TN %s', shQuote(taskname, type = "cmd"))
system(cmd, intern = FALSE)
}
taskcheduler_stop <- function(taskname){
.Deprecated("taskscheduler_stop", msg = "Use taskscheduler_stop instead of taskcheduler_stop")
taskscheduler_stop(taskname)
}
taskcheduler_runnow <- function(taskname){
.Deprecated("taskscheduler_stop", msg = "Use taskscheduler_runnow instead of taskcheduler_runnow")
taskscheduler_runnow(taskname)
}
|
/R/taskscheduleR.R
|
no_license
|
cran/taskscheduleR
|
R
| false
| false
| 13,359
|
r
|
#' @title Get all the tasks which are currently scheduled at the Windows task scheduler.
#' @description Get all the tasks which are currently scheduled at the Windows task scheduler.
#'
#' @return a data.frame with scheduled tasks as returned by schtasks /Query for which the Taskname or second
#' column in the dataset the preceding \\ is removed
#' @param encoding encoding of the CSV which schtasks.exe generates. Defaults to UTF-8.
#' @param ... optional arguments passed on to \code{fread} in order to read in the CSV file which schtasks generates
#' @export
#' @examples
#' x <- taskscheduler_ls()
#' x
taskscheduler_ls <- function(encoding = 'UTF-8', ...){
change_code_page <- system("chcp 65001", intern = TRUE)
cmd <- sprintf('schtasks /Query /FO CSV /V')
x <- system(cmd, intern = TRUE)
f <- tempfile()
writeLines(x, f)
x <- try(data.table::fread(f, encoding = encoding, ...), silent = TRUE)
if(inherits(x, "try-error")){
x <- utils::read.csv(f, check.names = FALSE, stringsAsFactors=FALSE, encoding = encoding, ...)
}
x <- data.table::setDF(x)
if("TaskName" %in% names(x)){
try(x$TaskName <- gsub("^\\\\", "", x$TaskName), silent = TRUE)
}else{
try(x[, 2] <- gsub("^\\\\", "", x[, 2]), silent = TRUE)
}
on.exit(file.remove(f))
x
}
#' @title Schedule an R script with the Windows task scheduler.
#' @description Schedule an R script with the Windows task scheduler. E.g. daily, weekly, once, at startup, ...
#' More information about the scheduling format can be found in the docs/schtasks.pdf file inside this package.
#' The rscript file will be scheduled with Rscript.exe and the log of the run will be put in the .log file which can be found in the same directory
#' as the location of the rscript
#'
#' @param taskname a character string with the name of the task. Defaults to the filename. Should not contain any spaces.
#' @param rscript the full path to the .R script with the R code to execute. Should not contain any spaces.
#' @param schedule when to schedule the \code{rscript}.
#' Either one of 'ONCE', 'MONTHLY', 'WEEKLY', 'DAILY', 'HOURLY', 'MINUTE', 'ONLOGON', 'ONIDLE'.
#' @param starttime a timepoint in HH:mm format indicating when to run the script. Defaults to within 62 seconds.
#' @param startdate a date that specifies the first date on which to run the task.
#' Only applicable if schedule is of type 'MONTHLY', 'WEEKLY', 'DAILY', 'HOURLY', 'MINUTE'. Defaults to today in '\%d/\%m/\%Y' format. Change to your locale format if needed.
#' @param days character string with days on which to run the script if schedule is 'WEEKLY' or 'MONTHLY'. Possible values
#' are * (all days). For weekly: 'MON', 'TUE', 'WED', 'THU', 'FRI', 'SAT', 'SUN' or a vector of these in your locale.
#' For monthly: 1:31 or a vector of these.
#' @param months character string with months on which to run the script if schedule is 'MONTHLY'. Possible values
#' are * (all months) or 'JAN', 'FEB', 'MAR', 'APR', 'MAY', 'JUN', 'JUL', 'AUG', 'SEP', 'OCT', 'NOV', 'DEC' or a vector of these in your locale.
#' @param modifier a modifier to apply. See the docs/schtasks.pdf
#' @param idletime integer containing a value that specifies the amount of idle time to wait before
#' running a scheduled ONIDLE task. The valid range is 1 - 999 minutes.
#' @param Rexe path to Rscript.exe which will be used to run the script. Defaults to Rscript at the bin folder of R_HOME.
#' @param rscript_args character string with further arguments passed on to Rscript. See args in \code{\link{Rscript}}.
#' @param rscript_options character string with further options passed on to Rscript. See options in \code{\link{Rscript}}.
#' @param schtasks_extra character string with further schtasks arguments. See the inst/docs/schtasks.pdf
#' @param debug logical to print the system call to screen
#' @param exec_path character string of the path where cmd should be executed. Defaults to system path.
#' @return the system call to schtasks /Create
#' @export
#' @examples
#' myscript <- system.file("extdata", "helloworld.R", package = "taskscheduleR")
#' cat(readLines(myscript), sep = "\n")
#'
#' \dontrun{
#' ## Run script once at a specific timepoint (within 62 seconds)
#' runon <- format(Sys.time() + 62, "%H:%M")
#' taskscheduler_create(taskname = "myfancyscript", rscript = myscript,
#' schedule = "ONCE", starttime = runon)
#'
#' ## Run every day at the same time on 09:10, starting from tomorrow on
#' ## Mark: change the format of startdate to your locale if needed (e.g. US: %m/%d/%Y)
#' taskscheduler_create(taskname = "myfancyscriptdaily", rscript = myscript,
#' schedule = "DAILY", starttime = "09:10", startdate = format(Sys.Date()+1, "%d/%m/%Y"))
#'
#' ## Run every week on Sunday at 09:10
#' taskscheduler_create(taskname = "myfancyscript_sun", rscript = myscript,
#' schedule = "WEEKLY", starttime = "09:10", days = 'SUN')
#'
#' ## Run every 5 minutes, starting from 10:40
#' taskscheduler_create(taskname = "myfancyscript_5min", rscript = myscript,
#' schedule = "MINUTE", starttime = "10:40", modifier = 5)
#'
#' ## Run every minute, giving some command line arguments which can be used in the script itself
#' taskscheduler_create(taskname = "myfancyscript_withargs_a", rscript = myscript,
#' schedule = "MINUTE", rscript_args = "productxyz 20160101")
#' taskscheduler_create(taskname = "myfancyscript_withargs_b", rscript = myscript,
#' schedule = "MINUTE", rscript_args = c("productabc", "20150101"))
#'
#' alltasks <- taskscheduler_ls()
#' subset(alltasks, TaskName %in% c("myfancyscript", "myfancyscriptdaily"))
#' # The field TaskName might have been different on Windows with non-english language locale
#'
#' taskscheduler_delete(taskname = "myfancyscript")
#' taskscheduler_delete(taskname = "myfancyscriptdaily")
#' taskscheduler_delete(taskname = "myfancyscript_sun")
#' taskscheduler_delete(taskname = "myfancyscript_5min")
#' taskscheduler_delete(taskname = "myfancyscript_withargs_a")
#' taskscheduler_delete(taskname = "myfancyscript_withargs_b")
#'
#' ## Have a look at the log
#' mylog <- system.file("extdata", "helloworld.log", package = "taskscheduleR")
#' cat(readLines(mylog), sep = "\n")
#' }
taskscheduler_create <- function(taskname = basename(rscript),
rscript,
schedule = c('ONCE', 'MONTHLY', 'WEEKLY', 'DAILY', 'HOURLY', 'MINUTE', 'ONLOGON', 'ONIDLE'),
starttime = format(Sys.time() + 62, "%H:%M"),
startdate = format(Sys.Date(), "%d/%m/%Y"),
days = c('*', 'MON', 'TUE', 'WED', 'THU', 'FRI', 'SAT', 'SUN', 1:31),
months = c('*', 'JAN', 'FEB', 'MAR', 'APR', 'MAY', 'JUN', 'JUL', 'AUG', 'SEP', 'OCT', 'NOV', 'DEC'),
modifier,
idletime = 60L,
Rexe = file.path(Sys.getenv("R_HOME"), "bin", "Rscript.exe"),
rscript_args = "",
rscript_options = "",
schtasks_extra = "",
debug = FALSE,
exec_path = ""){
if(!file.exists(rscript)){
stop(sprintf("File %s does not exist", rscript))
}
if(basename(rscript) == rscript){
warning("Filename does not include the full path, provide %s as full path including the directory", task)
}
schedule <- match.arg(schedule)
if("*" %in% days){
days <- "*"
}else{
days <- paste(days, collapse = ",")
}
if("*" %in% months){
months <- "*"
}else{
months <- paste(months, collapse = ",")
}
taskname <- force(taskname)
if(length(grep(" ", taskname)) > 0){
taskname <- gsub(" ", "-", taskname)
message(sprintf("No spaces are allowed in taskname, changing the name of the task to %s", taskname))
}
if(length(grep(" ", rscript)) > 0){
message(sprintf("Full path to filename '%s' contains spaces, it is advised to put your script in another location which contains no spaces", rscript))
}
if (exec_path == "") {
task <- sprintf("cmd /c %s %s %s %s >> %s 2>&1", Rexe,
paste(rscript_options, collapse = " "), shQuote(rscript),
paste(rscript_args, collapse = " "), shQuote(sprintf("%s.log",
tools::file_path_sans_ext(rscript))))
} else {
task <- sprintf("cmd /c %s %s %s %s %s >> %s 2>&1", paste("cd",exec_path, "&", collapse = " "), Rexe,
paste(rscript_options, collapse = " "), shQuote(rscript),
paste(rscript_args, collapse = " "), shQuote(sprintf("%s.log",
tools::file_path_sans_ext(rscript))))
}
if(nchar(task) > 260){
warning(sprintf("Passing on this to the TR argument of schtasks.exe: %s, this is too long. Consider putting your scripts into another folder", task))
}
cmd <- sprintf('schtasks /Create /TN %s /TR %s /SC %s',
shQuote(taskname, type = "cmd"),
shQuote(task, type = "cmd"),
schedule)
if(!missing(modifier)){
cmd <- sprintf("%s /MO %s", cmd, modifier)
}
if(schedule %in% c('ONIDLE')){
cmd <- sprintf("%s /I %s", cmd, idletime)
}
if(!schedule %in% c('ONLOGON', 'ONIDLE')){
cmd <- sprintf("%s /ST %s", cmd, starttime)
}
if(!schedule %in% c('ONLOGON', 'ONIDLE')){
if(schedule %in% "ONCE" && missing(startdate)){
## run once now
cmd <- cmd
}else{
cmd <- sprintf("%s /SD %s", cmd, shQuote(startdate))
}
}
if(schedule %in% c('WEEKLY', 'MONTHLY')){
cmd <- sprintf("%s /D %s", cmd, days)
}
if(schedule %in% c('MONTHLY')){
cmd <- sprintf("%s /M %s", cmd, months)
}
cmd <- sprintf("%s %s", cmd, schtasks_extra)
if(debug){
message(sprintf("Creating task schedule: %s", cmd))
}
system(cmd, intern = TRUE)
}
#' @title Delete a specific task which was scheduled in the Windows task scheduler.
#' @description Delete a specific task which was scheduled in the Windows task scheduler.
#'
#' @param taskname the name of the task to delete. See the example.
#' @return the system call to schtasks /Delete
#' @export
#' @examples
#' \dontrun{
#' x <- taskscheduler_ls()
#' x
#' # The field TaskName might have been different on Windows with non-english language locale
#' task <- x$TaskName[1]
#' taskscheduler_delete(taskname = task)
#' }
taskscheduler_delete <- function(taskname){
cmd <- sprintf('schtasks /Delete /TN %s /F', shQuote(taskname, type = "cmd"))
system(cmd, intern = FALSE)
}
#' @title Immediately run a specific task available in the Windows task scheduler.
#' @description Immediately run a specific task available in the Windows task scheduler.
#'
#' @param taskname the name of the task to run. See the example.
#' @return the system call to schtasks /Run
#' @export taskscheduler_runnow
#' @export taskcheduler_runnow
#' @aliases taskcheduler_runnow
#' @examples
#' \dontrun{
#' myscript <- system.file("extdata", "helloworld.R", package = "taskscheduleR")
#' taskscheduler_create(taskname = "myfancyscript", rscript = myscript,
#' schedule = "ONCE", starttime = format(Sys.time() + 10*60, "%H:%M"))
#'
#' taskscheduler_runnow("myfancyscript")
#' Sys.sleep(5)
#' taskscheduler_stop("myfancyscript")
#'
#'
#' taskscheduler_delete(taskname = "myfancyscript")
#' }
taskscheduler_runnow <- function(taskname){
cmd <- sprintf('schtasks /Run /TN %s', shQuote(taskname, type = "cmd"))
system(cmd, intern = FALSE)
}
#' @title Stop the run of a specific task which is running in the Windows task scheduler.
#' @description Stop the run of a specific task which is running in the Windows task scheduler.
#'
#' @param taskname the name of the task to stop. See the example.
#' @return the system call to schtasks /End
#' @export taskscheduler_stop
#' @export taskcheduler_stop
#' @aliases taskcheduler_stop
#' @examples
#' \dontrun{
#' myscript <- system.file("extdata", "helloworld.R", package = "taskscheduleR")
#' taskscheduler_create(taskname = "myfancyscript", rscript = myscript,
#' schedule = "ONCE", starttime = format(Sys.time() + 10*60, "%H:%M"))
#'
#' taskscheduler_runnow("myfancyscript")
#' Sys.sleep(5)
#' taskscheduler_stop("myfancyscript")
#'
#'
#' taskscheduler_delete(taskname = "myfancyscript")
#' }
taskscheduler_stop <- function(taskname){
cmd <- sprintf('schtasks /End /TN %s', shQuote(taskname, type = "cmd"))
system(cmd, intern = FALSE)
}
taskcheduler_stop <- function(taskname){
.Deprecated("taskscheduler_stop", msg = "Use taskscheduler_stop instead of taskcheduler_stop")
taskscheduler_stop(taskname)
}
taskcheduler_runnow <- function(taskname){
.Deprecated("taskscheduler_stop", msg = "Use taskscheduler_runnow instead of taskcheduler_runnow")
taskscheduler_runnow(taskname)
}
|
#' Get the GBIF backbone taxon ID from taxonomic names.
#'
#' @export
#' @param sciname character; scientific name.
#' @param ask logical; should get_colid be run in interactive mode?
#' If TRUE and more than one ID is found for the species, the user is asked for
#' input. If FALSE NA is returned for multiple matches.
#' @param verbose logical; If TRUE the actual taxon queried is printed on the console.
#' @param rows numeric; Any number from 1 to infinity. If the default NA, all rows are considered.
#' Note that this function still only gives back a gbifid class object with one to many identifiers.
#' See \code{\link[taxize]{get_gbifid_}} to get back all, or a subset, of the raw data that you are
#' presented during the ask process.
#' @param phylum (character) A phylum (aka division) name. Optional. See \code{Filtering}
#' below.
#' @param class (character) A class name. Optional. See \code{Filtering} below.
#' @param order (character) An order name. Optional. See \code{Filtering} below.
#' @param family (character) A family name. Optional. See \code{Filtering} below.
#' @param rank (character) A taxonomic rank name. See \code{\link{rank_ref}} for possible
#' options. Though note that some data sources use atypical ranks, so inspect the
#' data itself for options. Optional. See \code{Filtering} below.
#' @param method (character) one of "backbone" or "lookup". See Details.
#' @param x Input to \code{\link{as.gbifid}}
#' @param check logical; Check if ID matches any existing on the DB, only used in
#' \code{\link{as.gbifid}}
#' @param ... Ignored
#' @template getreturn
#'
#' @seealso \code{\link[taxize]{get_tsn}}, \code{\link[taxize]{get_uid}},
#' \code{\link[taxize]{get_tpsid}}, \code{\link[taxize]{get_eolid}},
#' \code{\link[taxize]{get_colid}}, \code{\link[taxize]{get_ids}},
#' \code{\link[taxize]{classification}}
#'
#' @author Scott Chamberlain, \email{myrmecocystus@@gmail.com}
#'
#' @details Internally in this function we use a function to search GBIF's taxonomy,
#' and if we find an exact match we return the ID for that match. If there isn't an
#' exact match we return the options to you to pick from.
#'
#' @section method parameter:
#' "backbone" uses the \code{/species/match} GBIF API route, matching against their
#' backbone taxonomy. We turn on fuzzy matching by default, as the search without
#' fuzzy against backbone is quite narrow. "lookup" uses the \code{/species/search}
#' GBIF API route, doing a full text search of name usages covering scientific
#' and vernacular named, species descriptions, distributions and the entire
#' classification.
#'
#' @section Filtering:
#' The parameters \code{phylum}, \code{class}, \code{order}, \code{family}, and \code{rank}
#' are not used in the search to the data provider, but are used in filtering the data down
#' to a subset that is closer to the target you want. For all these parameters,
#' you can use regex strings since we use \code{\link{grep}} internally to match.
#' Filtering narrows down to the set that matches your query, and removes the rest.
#'
#' @examples \dontrun{
#' get_gbifid(sciname='Poa annua')
#' get_gbifid(sciname='Pinus contorta')
#' get_gbifid(sciname='Puma concolor')
#'
#' # multiple names
#' get_gbifid(c("Poa annua", "Pinus contorta"))
#'
#' # specify rows to limit choices available
#' get_gbifid(sciname='Pinus')
#' get_gbifid(sciname='Pinus', rows=10)
#' get_gbifid(sciname='Pinus', rows=1:3)
#'
#' # When not found, NA given
#' get_gbifid(sciname="uaudnadndj")
#' get_gbifid(c("Chironomus riparius", "uaudnadndj"))
#'
#' # Narrow down results to a division or rank, or both
#' ## Satyrium example
#' ### Results w/o narrowing
#' get_gbifid("Satyrium")
#' ### w/ phylum
#' get_gbifid("Satyrium", phylum = "Tracheophyta")
#' get_gbifid("Satyrium", phylum = "Arthropoda")
#' ### w/ phylum & rank
#' get_gbifid("Satyrium", phylum = "Arthropoda", rank = "genus")
#'
#' ## Rank example
#' get_gbifid("Poa", method = "lookup")
#' get_gbifid("Poa", method = "lookup", rank = "genus")
#' get_gbifid("Poa", method = "lookup", family = "Thripidae")
#'
#' # Fuzzy filter on any filtering fields
#' ## uses grep on the inside
#' get_gbifid("Satyrium", phylum = "arthropoda")
#' get_gbifid("A*", method = "lookup", order = "*tera")
#' get_gbifid("A*", method = "lookup", order = "*ales")
#'
#' # Convert a uid without class information to a uid class
#' as.gbifid(get_gbifid("Poa annua")) # already a uid, returns the same
#' as.gbifid(get_gbifid(c("Poa annua","Puma concolor"))) # same
#' as.gbifid(2704179) # numeric
#' as.gbifid(c(2704179,2435099,3171445)) # numeric vector, length > 1
#' as.gbifid("2704179") # character
#' as.gbifid(c("2704179","2435099","3171445")) # character vector, length > 1
#' as.gbifid(list("2704179","2435099","3171445")) # list, either numeric or character
#' ## dont check, much faster
#' as.gbifid("2704179", check=FALSE)
#' as.gbifid(2704179, check=FALSE)
#' as.gbifid(2704179, check=FALSE)
#' as.gbifid(c("2704179","2435099","3171445"), check=FALSE)
#' as.gbifid(list("2704179","2435099","3171445"), check=FALSE)
#'
#' (out <- as.gbifid(c(2704179,2435099,3171445)))
#' data.frame(out)
#' as.uid( data.frame(out) )
#'
#' # Get all data back
#' get_gbifid_("Puma concolor")
#' get_gbifid_(c("Pinus", "uaudnadndj"))
#' get_gbifid_(c("Pinus", "Puma"), rows=5)
#' get_gbifid_(c("Pinus", "Puma"), rows=1:5)
#'
#' # use curl options
#' library("httr")
#' get_gbifid("Quercus douglasii", config=verbose())
#' bb <- get_gbifid("Quercus douglasii", config=progress())
#' }
get_gbifid <- function(sciname, ask = TRUE, verbose = TRUE, rows = NA,
phylum = NULL, class = NULL, order = NULL,
family = NULL, rank = NULL, method = "backbone", ...) {
fun <- function(sciname, ask, verbose, rows, ...) {
direct <- FALSE
mssg(verbose, "\nRetrieving data for taxon '", sciname, "'\n")
df <- switch(
method,
backbone = gbif_name_backbone(sciname, ...),
lookup = gbif_name_lookup(sciname, ...)
)
mm <- NROW(df) > 1
#df <- sub_rows(df, rows)
if (is.null(df)) df <- data.frame(NULL)
if (nrow(df) == 0) {
mssg(verbose, "Not found. Consider checking the spelling or alternate classification")
id <- NA_character_
att <- "not found"
} else {
names(df)[1] <- 'gbifid'
id <- df$gbifid
att <- "found"
}
# not found
if (length(id) == 0) {
mssg(verbose, "Not found. Consider checking the spelling or alternate classification")
id <- NA_character_
att <- "not found"
}
# more than one found -> user input
if (length(id) > 1) {
# check for exact match
matchtmp <- df[as.character(df$canonicalname) %in% sciname, "gbifid"]
if (length(matchtmp) == 1) {
id <- as.character(matchtmp)
direct <- TRUE
} else {
if (ask) {
if (!is.null(phylum) || !is.null(class) || !is.null(order) ||
!is.null(family) || !is.null(rank)) {
df <- filt(df, "phylum", phylum)
df <- filt(df, "class", class)
df <- filt(df, "order", order)
df <- filt(df, "family", family)
df <- filt(df, "rank", rank)
}
df <- sub_rows(df, rows)
if (NROW(df) == 0) {
id <- NA_character_
att <- "not found"
} else {
id <- df$gbifid
if (length(id) == 1) {
rank_taken <- as.character(df$rank)
att <- "found"
}
}
if (length(id) > 1) {
# limit to subset of columns for ease of use
df <- df[, switch(method, backbone = gbif_cols_show_backbone, lookup = gbif_cols_show_lookup)]
# prompt
message("\n\n")
message("\nMore than one eolid found for taxon '", sciname, "'!\n
Enter rownumber of taxon (other inputs will return 'NA'):\n")
rownames(df) <- 1:nrow(df)
print(df)
take <- scan(n = 1, quiet = TRUE, what = 'raw')
if (length(take) == 0) {
take <- 'notake'
att <- 'nothing chosen'
}
if (take %in% seq_len(nrow(df))) {
take <- as.numeric(take)
message("Input accepted, took gbifid '", as.character(df$gbifid[take]), "'.\n")
id <- as.character(df$gbifid[take])
att <- "found"
} else {
id <- NA_character_
att <- "not found"
mssg(verbose, "\nReturned 'NA'!\n\n")
}
}
} else {
id <- NA_character_
att <- "NA due to ask=FALSE"
}
}
}
list(id = id, att = att, multiple = mm, direct = direct)
}
out <- lapply(as.character(sciname), fun, ask, verbose, rows, ...)
ids <- structure(as.character(unlist(pluck(out, "id"))), class = "gbifid",
match = pluck(out, "att", ""),
multiple_matches = pluck(out, "multiple", logical(1)),
pattern_match = pluck(out, "direct", logical(1)))
add_uri(ids, 'http://www.gbif.org/species/%s')
}
#' @export
#' @rdname get_gbifid
as.gbifid <- function(x, check=FALSE) UseMethod("as.gbifid")
#' @export
#' @rdname get_gbifid
as.gbifid.gbifid <- function(x, check=FALSE) x
#' @export
#' @rdname get_gbifid
as.gbifid.character <- function(x, check=TRUE) if(length(x) == 1) make_gbifid(x, check) else collapse(x, make_gbifid, "gbifid", check=check)
#' @export
#' @rdname get_gbifid
as.gbifid.list <- function(x, check=TRUE) if(length(x) == 1) make_gbifid(x, check) else collapse(x, make_gbifid, "gbifid", check=check)
#' @export
#' @rdname get_gbifid
as.gbifid.numeric <- function(x, check=TRUE) as.gbifid(as.character(x), check)
#' @export
#' @rdname get_gbifid
as.gbifid.data.frame <- function(x, check = TRUE) {
structure(x$ids, class = "gbifid", match = x$match,
multiple_matches = x$multiple_matches,
pattern_match = x$pattern_match, uri = x$uri)
}
#' @export
#' @rdname get_gbifid
as.data.frame.gbifid <- function(x, ...){
data.frame(ids = as.character(unclass(x)),
class = "gbifid",
match = attr(x, "match"),
multiple_matches = attr(x, "multiple_matches"),
pattern_match = attr(x, "pattern_match"),
uri = attr(x, "uri"),
stringsAsFactors = FALSE)
}
make_gbifid <- function(x, check=TRUE) make_generic(x, 'http://www.gbif.org/species/%s', "gbifid", check)
check_gbifid <- function(x){
tryid <- tryCatch(gbif_name_usage(key = x), error = function(e) e)
if ( "error" %in% class(tryid) && is.null(tryid$key) ) FALSE else TRUE
}
#' @export
#' @rdname get_gbifid
get_gbifid_ <- function(sciname, verbose = TRUE, rows = NA, method = "backbone"){
setNames(lapply(sciname, get_gbifd_help, verbose = verbose, rows = rows, method = method), sciname)
}
get_gbifd_help <- function(sciname, verbose, rows, method){
mssg(verbose, "\nRetrieving data for taxon '", sciname, "'\n")
df <- switch(
method,
backbone = gbif_name_backbone(sciname),
lookup = gbif_name_lookup(sciname)
)
if (!is.null(df)) df <- nmslwr(df)
sub_rows(df, rows)
}
|
/R/get_gbifid.R
|
permissive
|
ktargows/taxize
|
R
| false
| false
| 11,246
|
r
|
#' Get the GBIF backbone taxon ID from taxonomic names.
#'
#' @export
#' @param sciname character; scientific name.
#' @param ask logical; should get_colid be run in interactive mode?
#' If TRUE and more than one ID is found for the species, the user is asked for
#' input. If FALSE NA is returned for multiple matches.
#' @param verbose logical; If TRUE the actual taxon queried is printed on the console.
#' @param rows numeric; Any number from 1 to infinity. If the default NA, all rows are considered.
#' Note that this function still only gives back a gbifid class object with one to many identifiers.
#' See \code{\link[taxize]{get_gbifid_}} to get back all, or a subset, of the raw data that you are
#' presented during the ask process.
#' @param phylum (character) A phylum (aka division) name. Optional. See \code{Filtering}
#' below.
#' @param class (character) A class name. Optional. See \code{Filtering} below.
#' @param order (character) An order name. Optional. See \code{Filtering} below.
#' @param family (character) A family name. Optional. See \code{Filtering} below.
#' @param rank (character) A taxonomic rank name. See \code{\link{rank_ref}} for possible
#' options. Though note that some data sources use atypical ranks, so inspect the
#' data itself for options. Optional. See \code{Filtering} below.
#' @param method (character) one of "backbone" or "lookup". See Details.
#' @param x Input to \code{\link{as.gbifid}}
#' @param check logical; Check if ID matches any existing on the DB, only used in
#' \code{\link{as.gbifid}}
#' @param ... Ignored
#' @template getreturn
#'
#' @seealso \code{\link[taxize]{get_tsn}}, \code{\link[taxize]{get_uid}},
#' \code{\link[taxize]{get_tpsid}}, \code{\link[taxize]{get_eolid}},
#' \code{\link[taxize]{get_colid}}, \code{\link[taxize]{get_ids}},
#' \code{\link[taxize]{classification}}
#'
#' @author Scott Chamberlain, \email{myrmecocystus@@gmail.com}
#'
#' @details Internally in this function we use a function to search GBIF's taxonomy,
#' and if we find an exact match we return the ID for that match. If there isn't an
#' exact match we return the options to you to pick from.
#'
#' @section method parameter:
#' "backbone" uses the \code{/species/match} GBIF API route, matching against their
#' backbone taxonomy. We turn on fuzzy matching by default, as the search without
#' fuzzy against backbone is quite narrow. "lookup" uses the \code{/species/search}
#' GBIF API route, doing a full text search of name usages covering scientific
#' and vernacular named, species descriptions, distributions and the entire
#' classification.
#'
#' @section Filtering:
#' The parameters \code{phylum}, \code{class}, \code{order}, \code{family}, and \code{rank}
#' are not used in the search to the data provider, but are used in filtering the data down
#' to a subset that is closer to the target you want. For all these parameters,
#' you can use regex strings since we use \code{\link{grep}} internally to match.
#' Filtering narrows down to the set that matches your query, and removes the rest.
#'
#' @examples \dontrun{
#' get_gbifid(sciname='Poa annua')
#' get_gbifid(sciname='Pinus contorta')
#' get_gbifid(sciname='Puma concolor')
#'
#' # multiple names
#' get_gbifid(c("Poa annua", "Pinus contorta"))
#'
#' # specify rows to limit choices available
#' get_gbifid(sciname='Pinus')
#' get_gbifid(sciname='Pinus', rows=10)
#' get_gbifid(sciname='Pinus', rows=1:3)
#'
#' # When not found, NA given
#' get_gbifid(sciname="uaudnadndj")
#' get_gbifid(c("Chironomus riparius", "uaudnadndj"))
#'
#' # Narrow down results to a division or rank, or both
#' ## Satyrium example
#' ### Results w/o narrowing
#' get_gbifid("Satyrium")
#' ### w/ phylum
#' get_gbifid("Satyrium", phylum = "Tracheophyta")
#' get_gbifid("Satyrium", phylum = "Arthropoda")
#' ### w/ phylum & rank
#' get_gbifid("Satyrium", phylum = "Arthropoda", rank = "genus")
#'
#' ## Rank example
#' get_gbifid("Poa", method = "lookup")
#' get_gbifid("Poa", method = "lookup", rank = "genus")
#' get_gbifid("Poa", method = "lookup", family = "Thripidae")
#'
#' # Fuzzy filter on any filtering fields
#' ## uses grep on the inside
#' get_gbifid("Satyrium", phylum = "arthropoda")
#' get_gbifid("A*", method = "lookup", order = "*tera")
#' get_gbifid("A*", method = "lookup", order = "*ales")
#'
#' # Convert a uid without class information to a uid class
#' as.gbifid(get_gbifid("Poa annua")) # already a uid, returns the same
#' as.gbifid(get_gbifid(c("Poa annua","Puma concolor"))) # same
#' as.gbifid(2704179) # numeric
#' as.gbifid(c(2704179,2435099,3171445)) # numeric vector, length > 1
#' as.gbifid("2704179") # character
#' as.gbifid(c("2704179","2435099","3171445")) # character vector, length > 1
#' as.gbifid(list("2704179","2435099","3171445")) # list, either numeric or character
#' ## dont check, much faster
#' as.gbifid("2704179", check=FALSE)
#' as.gbifid(2704179, check=FALSE)
#' as.gbifid(2704179, check=FALSE)
#' as.gbifid(c("2704179","2435099","3171445"), check=FALSE)
#' as.gbifid(list("2704179","2435099","3171445"), check=FALSE)
#'
#' (out <- as.gbifid(c(2704179,2435099,3171445)))
#' data.frame(out)
#' as.uid( data.frame(out) )
#'
#' # Get all data back
#' get_gbifid_("Puma concolor")
#' get_gbifid_(c("Pinus", "uaudnadndj"))
#' get_gbifid_(c("Pinus", "Puma"), rows=5)
#' get_gbifid_(c("Pinus", "Puma"), rows=1:5)
#'
#' # use curl options
#' library("httr")
#' get_gbifid("Quercus douglasii", config=verbose())
#' bb <- get_gbifid("Quercus douglasii", config=progress())
#' }
get_gbifid <- function(sciname, ask = TRUE, verbose = TRUE, rows = NA,
phylum = NULL, class = NULL, order = NULL,
family = NULL, rank = NULL, method = "backbone", ...) {
fun <- function(sciname, ask, verbose, rows, ...) {
direct <- FALSE
mssg(verbose, "\nRetrieving data for taxon '", sciname, "'\n")
df <- switch(
method,
backbone = gbif_name_backbone(sciname, ...),
lookup = gbif_name_lookup(sciname, ...)
)
mm <- NROW(df) > 1
#df <- sub_rows(df, rows)
if (is.null(df)) df <- data.frame(NULL)
if (nrow(df) == 0) {
mssg(verbose, "Not found. Consider checking the spelling or alternate classification")
id <- NA_character_
att <- "not found"
} else {
names(df)[1] <- 'gbifid'
id <- df$gbifid
att <- "found"
}
# not found
if (length(id) == 0) {
mssg(verbose, "Not found. Consider checking the spelling or alternate classification")
id <- NA_character_
att <- "not found"
}
# more than one found -> user input
if (length(id) > 1) {
# check for exact match
matchtmp <- df[as.character(df$canonicalname) %in% sciname, "gbifid"]
if (length(matchtmp) == 1) {
id <- as.character(matchtmp)
direct <- TRUE
} else {
if (ask) {
if (!is.null(phylum) || !is.null(class) || !is.null(order) ||
!is.null(family) || !is.null(rank)) {
df <- filt(df, "phylum", phylum)
df <- filt(df, "class", class)
df <- filt(df, "order", order)
df <- filt(df, "family", family)
df <- filt(df, "rank", rank)
}
df <- sub_rows(df, rows)
if (NROW(df) == 0) {
id <- NA_character_
att <- "not found"
} else {
id <- df$gbifid
if (length(id) == 1) {
rank_taken <- as.character(df$rank)
att <- "found"
}
}
if (length(id) > 1) {
# limit to subset of columns for ease of use
df <- df[, switch(method, backbone = gbif_cols_show_backbone, lookup = gbif_cols_show_lookup)]
# prompt
message("\n\n")
message("\nMore than one eolid found for taxon '", sciname, "'!\n
Enter rownumber of taxon (other inputs will return 'NA'):\n")
rownames(df) <- 1:nrow(df)
print(df)
take <- scan(n = 1, quiet = TRUE, what = 'raw')
if (length(take) == 0) {
take <- 'notake'
att <- 'nothing chosen'
}
if (take %in% seq_len(nrow(df))) {
take <- as.numeric(take)
message("Input accepted, took gbifid '", as.character(df$gbifid[take]), "'.\n")
id <- as.character(df$gbifid[take])
att <- "found"
} else {
id <- NA_character_
att <- "not found"
mssg(verbose, "\nReturned 'NA'!\n\n")
}
}
} else {
id <- NA_character_
att <- "NA due to ask=FALSE"
}
}
}
list(id = id, att = att, multiple = mm, direct = direct)
}
out <- lapply(as.character(sciname), fun, ask, verbose, rows, ...)
ids <- structure(as.character(unlist(pluck(out, "id"))), class = "gbifid",
match = pluck(out, "att", ""),
multiple_matches = pluck(out, "multiple", logical(1)),
pattern_match = pluck(out, "direct", logical(1)))
add_uri(ids, 'http://www.gbif.org/species/%s')
}
#' @export
#' @rdname get_gbifid
as.gbifid <- function(x, check=FALSE) UseMethod("as.gbifid")
#' @export
#' @rdname get_gbifid
as.gbifid.gbifid <- function(x, check=FALSE) x
#' @export
#' @rdname get_gbifid
as.gbifid.character <- function(x, check=TRUE) if(length(x) == 1) make_gbifid(x, check) else collapse(x, make_gbifid, "gbifid", check=check)
#' @export
#' @rdname get_gbifid
as.gbifid.list <- function(x, check=TRUE) if(length(x) == 1) make_gbifid(x, check) else collapse(x, make_gbifid, "gbifid", check=check)
#' @export
#' @rdname get_gbifid
as.gbifid.numeric <- function(x, check=TRUE) as.gbifid(as.character(x), check)
#' @export
#' @rdname get_gbifid
as.gbifid.data.frame <- function(x, check = TRUE) {
structure(x$ids, class = "gbifid", match = x$match,
multiple_matches = x$multiple_matches,
pattern_match = x$pattern_match, uri = x$uri)
}
#' @export
#' @rdname get_gbifid
as.data.frame.gbifid <- function(x, ...){
data.frame(ids = as.character(unclass(x)),
class = "gbifid",
match = attr(x, "match"),
multiple_matches = attr(x, "multiple_matches"),
pattern_match = attr(x, "pattern_match"),
uri = attr(x, "uri"),
stringsAsFactors = FALSE)
}
make_gbifid <- function(x, check=TRUE) make_generic(x, 'http://www.gbif.org/species/%s', "gbifid", check)
check_gbifid <- function(x){
tryid <- tryCatch(gbif_name_usage(key = x), error = function(e) e)
if ( "error" %in% class(tryid) && is.null(tryid$key) ) FALSE else TRUE
}
#' @export
#' @rdname get_gbifid
get_gbifid_ <- function(sciname, verbose = TRUE, rows = NA, method = "backbone"){
setNames(lapply(sciname, get_gbifd_help, verbose = verbose, rows = rows, method = method), sciname)
}
get_gbifd_help <- function(sciname, verbose, rows, method){
mssg(verbose, "\nRetrieving data for taxon '", sciname, "'\n")
df <- switch(
method,
backbone = gbif_name_backbone(sciname),
lookup = gbif_name_lookup(sciname)
)
if (!is.null(df)) df <- nmslwr(df)
sub_rows(df, rows)
}
|
if(!nzchar(Sys.getenv("MASS_TESTING"))) q("no")
unlink("scripts", recursive = TRUE)
dir.create("scripts")
Sys.unsetenv("R_TESTS") # avoid startup using startup.Rs (which is in the dir above)
setwd("scripts")
writeLines(c(".Random.seed <- c(0L,1:3)",
"options(width = 65, show.signif.stars=FALSE)"),
".Rprofile")
runone <- function(f)
{
message(" Running ", sQuote(basename(f)))
outfile <- paste(basename(f), "out", sep = "")
failfile <- paste(outfile, "fail", sep=".")
unlink(c(outfile, failfile))
res <- system2(file.path(R.home("bin"), "R"),
c("CMD BATCH --vanilla", shQuote(f), shQuote(outfile)),
env = paste("R_LIBS", Sys.getenv("R_LIBS"), sep = "="))
if (res) {
cat(tail(readLines(outfile), 20), sep="\n")
file.rename(outfile, failfile)
return(1L)
}
0L
}
library(MASS)
dd <- system.file("scripts", package="MASS")
files <- list.files(dd, pattern="\\.R$", full.names=TRUE)
res <- 0L
for(f in files) res <- res + runone(f)
proc.time()
if(res) stop(gettextf("%d scripts failed", res))
|
/Tools/DECoN-master/Windows/packrat/lib-R/MASS/tests/scripts.R
|
permissive
|
robinwijngaard/TFM_code
|
R
| false
| false
| 1,148
|
r
|
if(!nzchar(Sys.getenv("MASS_TESTING"))) q("no")
unlink("scripts", recursive = TRUE)
dir.create("scripts")
Sys.unsetenv("R_TESTS") # avoid startup using startup.Rs (which is in the dir above)
setwd("scripts")
writeLines(c(".Random.seed <- c(0L,1:3)",
"options(width = 65, show.signif.stars=FALSE)"),
".Rprofile")
runone <- function(f)
{
message(" Running ", sQuote(basename(f)))
outfile <- paste(basename(f), "out", sep = "")
failfile <- paste(outfile, "fail", sep=".")
unlink(c(outfile, failfile))
res <- system2(file.path(R.home("bin"), "R"),
c("CMD BATCH --vanilla", shQuote(f), shQuote(outfile)),
env = paste("R_LIBS", Sys.getenv("R_LIBS"), sep = "="))
if (res) {
cat(tail(readLines(outfile), 20), sep="\n")
file.rename(outfile, failfile)
return(1L)
}
0L
}
library(MASS)
dd <- system.file("scripts", package="MASS")
files <- list.files(dd, pattern="\\.R$", full.names=TRUE)
res <- 0L
for(f in files) res <- res + runone(f)
proc.time()
if(res) stop(gettextf("%d scripts failed", res))
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/PercChange.R
\name{PercChange}
\alias{PercChange}
\title{Calculate the percentage change from a specified lag, including within groups}
\usage{
PercChange(data, Var, GroupVar, NewVar, slideBy = -1, type = "percent", ...)
}
\arguments{
\item{data}{a data frame object.}
\item{Var}{a character string naming the variable you would like to find the
percentage change for.}
\item{GroupVar}{a character string naming the variable grouping the units
within which the percentage change will be found for (i.e. countries in a
time series). If \code{GroupVar} is missing then the entire data frame is
treated as one unit.}
\item{NewVar}{a character string specifying the name for the new variable to
place the percentage change in.}
\item{slideBy}{numeric value specifying how many rows (time units) to make
the percentage change comparison for. Positive values shift the data up--lead
the data.}
\item{type}{character string set at either \code{percent} for percentages or
\code{proportion} to find proportions.}
\item{...}{arguments passed to \code{\link{slide}}.}
}
\value{
a data frame
}
\description{
Calculate the percentage change from a specified lag, including within groups
}
\details{
Finds the percentage or proportion change for over a given time
period either within groups of data or the whole data frame. Important: the
data must be in time order and, if groups are used, group-time order.
}
\examples{
# Create fake data frame
A <- c(1, 1, 1, 1, 1, 2, 2, 2, 2, 2)
B <- c(1:10)
Data <- data.frame(A, B)
# Find percentage change from two periods before
Out <- PercChange(Data, Var = 'B',
type = 'proportion',
NewVar = 'PercentChange',
slideBy = -2)
Out
}
|
/man/PercChange.Rd
|
no_license
|
cran/DataCombine
|
R
| false
| true
| 1,775
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/PercChange.R
\name{PercChange}
\alias{PercChange}
\title{Calculate the percentage change from a specified lag, including within groups}
\usage{
PercChange(data, Var, GroupVar, NewVar, slideBy = -1, type = "percent", ...)
}
\arguments{
\item{data}{a data frame object.}
\item{Var}{a character string naming the variable you would like to find the
percentage change for.}
\item{GroupVar}{a character string naming the variable grouping the units
within which the percentage change will be found for (i.e. countries in a
time series). If \code{GroupVar} is missing then the entire data frame is
treated as one unit.}
\item{NewVar}{a character string specifying the name for the new variable to
place the percentage change in.}
\item{slideBy}{numeric value specifying how many rows (time units) to make
the percentage change comparison for. Positive values shift the data up--lead
the data.}
\item{type}{character string set at either \code{percent} for percentages or
\code{proportion} to find proportions.}
\item{...}{arguments passed to \code{\link{slide}}.}
}
\value{
a data frame
}
\description{
Calculate the percentage change from a specified lag, including within groups
}
\details{
Finds the percentage or proportion change for over a given time
period either within groups of data or the whole data frame. Important: the
data must be in time order and, if groups are used, group-time order.
}
\examples{
# Create fake data frame
A <- c(1, 1, 1, 1, 1, 2, 2, 2, 2, 2)
B <- c(1:10)
Data <- data.frame(A, B)
# Find percentage change from two periods before
Out <- PercChange(Data, Var = 'B',
type = 'proportion',
NewVar = 'PercentChange',
slideBy = -2)
Out
}
|
install.packages("dplyr")
install.packages("NbClust")
library(dplyr)
library(NbClust)
choose.dir()
setwd("C:/Users/PC39043/Downloads")
getwd()
data <- read.csv("Customer2001.csv")
#Peak 주간, OffPeak 야간, Weedend 주말, International 국제통화
data %>% head()
data %>% names()
data.use <- data %>% select("Peak_calls_Sum", "OffPeak_calls_Sum", "Weekend_calls_Sum", "International_mins_Sum", "AvePeak", "AveOffPeak","AveWeekend")
data.use %>% head()
plot(data.use)
nc <- NbClust(data.use, min.nc=2, max.nc=10, method="kmeans")
kc <- kmeans(data.use, 3)
kc
kc$cluster
kc$centers
pred <- kc$cluster
target <- data.use$Peak_calls_Sum
table(target, pred)
|
/K-means(통화량).R
|
no_license
|
lch5518/OpenDataAnalysis
|
R
| false
| false
| 707
|
r
|
install.packages("dplyr")
install.packages("NbClust")
library(dplyr)
library(NbClust)
choose.dir()
setwd("C:/Users/PC39043/Downloads")
getwd()
data <- read.csv("Customer2001.csv")
#Peak 주간, OffPeak 야간, Weedend 주말, International 국제통화
data %>% head()
data %>% names()
data.use <- data %>% select("Peak_calls_Sum", "OffPeak_calls_Sum", "Weekend_calls_Sum", "International_mins_Sum", "AvePeak", "AveOffPeak","AveWeekend")
data.use %>% head()
plot(data.use)
nc <- NbClust(data.use, min.nc=2, max.nc=10, method="kmeans")
kc <- kmeans(data.use, 3)
kc
kc$cluster
kc$centers
pred <- kc$cluster
target <- data.use$Peak_calls_Sum
table(target, pred)
|
% Generated by roxygen2 (4.1.1.9000): do not edit by hand
% Please edit documentation in R/immunogen_mh_tools_2.r
\name{get_q_h1_given_h2}
\alias{get_q_h1_given_h2}
\title{Evaluate Q function}
\usage{
get_q_h1_given_h2(h1, h2, unif.prop = T, radius)
}
\description{
Evaluate Q function
}
|
/man/get_q_h1_given_h2.Rd
|
no_license
|
liesb/BIITE
|
R
| false
| false
| 289
|
rd
|
% Generated by roxygen2 (4.1.1.9000): do not edit by hand
% Please edit documentation in R/immunogen_mh_tools_2.r
\name{get_q_h1_given_h2}
\alias{get_q_h1_given_h2}
\title{Evaluate Q function}
\usage{
get_q_h1_given_h2(h1, h2, unif.prop = T, radius)
}
\description{
Evaluate Q function
}
|
\name{prox.l1}
\alias{prox.l1}
\title{
Proximal L1 Mapping
}
\description{
Calculates the proximal L1 mapping for the given input matrix
}
\usage{
prox.l1(z, lambda, r)
}
\arguments{
\item{z}{input matrix}
\item{lambda}{parameters for calculating proximal L1 mapping}
\item{r}{number of columns used in matrix}
}
\value{
\item{x_prox}{proximal L1 Mapping}
}
\references{
Chen, S., Ma, S., Xue, L., and Zou, H. (2020) "An Alternating Manifold Proximal Gradient Method for Sparse Principal Component Analysis and Sparse Canonical Correlation Analysis" *INFORMS Journal on Optimization* 2:3, 192-208
}
\author{
Shixiang Chen, Justin Huang, Benjamin Jochem, Shiqian Ma, Lingzhou Xue and Hui Zou
}
|
/R/amanpg/man/prox.l1.Rd
|
no_license
|
benjoch/AManPG
|
R
| false
| false
| 702
|
rd
|
\name{prox.l1}
\alias{prox.l1}
\title{
Proximal L1 Mapping
}
\description{
Calculates the proximal L1 mapping for the given input matrix
}
\usage{
prox.l1(z, lambda, r)
}
\arguments{
\item{z}{input matrix}
\item{lambda}{parameters for calculating proximal L1 mapping}
\item{r}{number of columns used in matrix}
}
\value{
\item{x_prox}{proximal L1 Mapping}
}
\references{
Chen, S., Ma, S., Xue, L., and Zou, H. (2020) "An Alternating Manifold Proximal Gradient Method for Sparse Principal Component Analysis and Sparse Canonical Correlation Analysis" *INFORMS Journal on Optimization* 2:3, 192-208
}
\author{
Shixiang Chen, Justin Huang, Benjamin Jochem, Shiqian Ma, Lingzhou Xue and Hui Zou
}
|
library(testthat)
library(unitizer)
local( {
test_that("Invisible Expression", {
e <- new.env()
exp <- quote(x <- 1:30)
expect_equal(1:30, unitizer:::eval_user_exp(exp, e)$value)
} )
# `eval_user_exp` must be evaluated outside of test_that; also note that by
# design this will output stuff to stderr and stdout
test.obj.s3 <- structure("hello", class="test_obj")
setClass("testObj", list(a="character"))
test.obj.s4 <- new("testObj", a="goodday")
print.test_obj <- function(x, ...) stop("Error in Print")
setMethod("show", "testObj", function(object) stop("Error in Show"))
fun_signal <- function() signalCondition(simpleError("Error in Function", sys.call()))
fun_error <- function() stop("Error in function 2")
fun_error_cond <- function() stop(simpleError("Error in function 2", sys.call()))
fun_error_cond_call <- function() fun_error_cond()
fun_s3 <- function() test.obj.s3
fun_s4 <- function() test.obj.s4
fun_msg <- function() message("This is a Message")
fun_warn <- function() warning("This is a warning")
eval.env <- sys.frame(sys.nframe())
ex0 <- unitizer:::eval_user_exp(quote(stop()), eval.env)
unitizer:::set_trace(ex0$trace)
trace0 <- traceback()
ex1 <- unitizer:::eval_user_exp(quote(fun_signal()), eval.env)
unitizer:::set_trace(ex1$trace)
trace1 <- traceback()
ex2 <- unitizer:::eval_user_exp(quote(fun_error()), eval.env)
unitizer:::set_trace(ex2$trace)
trace2 <- traceback()
ex2a <- unitizer:::eval_user_exp(expression(fun_error()), eval.env)
unitizer:::set_trace(ex2a$trace)
trace2a <- traceback()
ex6 <- unitizer:::eval_user_exp(quote(fun_error_cond()), eval.env)
unitizer:::set_trace(ex6$trace)
trace6 <- traceback()
ex7 <- unitizer:::eval_user_exp(quote(fun_error_cond_call()), eval.env)
unitizer:::set_trace(ex7$trace)
trace7 <- traceback()
ex3 <- unitizer:::eval_user_exp(quote(fun_s3()), eval.env)
unitizer:::set_trace(ex3$trace)
trace3 <- traceback()
ex3a <- unitizer:::eval_user_exp(expression(fun_s3()), eval.env)
unitizer:::set_trace(ex3a$trace)
trace3a <- traceback()
ex4 <- unitizer:::eval_user_exp(quote(fun_s4()), eval.env)
ex4a <- unitizer:::eval_user_exp(expression(fun_s4()), eval.env)
unitizer:::set_trace(ex4a$trace)
trace4a <- traceback()
ex5 <- unitizer:::eval_user_exp(quote(sum(1:20)), eval.env)
ex9 <- unitizer:::eval_user_exp(quote(fun_warn()), eval.env)
ex10 <- unitizer:::eval_user_exp(quote(fun_msg()), eval.env)
ex11 <- unitizer:::eval_user_exp(
quote((function() quote(stop("shouldn't error")))()), eval.env
)
# NOTE: deparsed test values generated with unitizer:::deparse_mixed
test_that("User Expression Evaluation", {
expect_equal(
structure(list(value = NULL, aborted = FALSE, conditions = list(structure(list(message = "Error in Function", call = quote(fun_signal())), .Names = c("message", "call"), class = c("simpleError", "error", "condition"), printed = FALSE)), trace = list()), .Names = c("value", "aborted", "conditions", "trace")),
ex1 # a condition error, signaled, not stop (hence no aborted, etc.)
)
expect_equal(
structure(list(value = NULL, aborted = structure(TRUE, printed = FALSE), conditions = list(structure(list(message = "Error in function 2", call = quote(fun_error())), .Names = c("message", "call"), class = c("simpleError", "error", "condition"), unitizer.printed = FALSE)), trace = list(quote(fun_error()), quote(stop("Error in function 2")))), .Names = c("value", "aborted", "conditions", "trace")),
ex2 # a stop
)
expect_equal(
structure(list(value = structure("hello", class = "test_obj"), aborted = structure(TRUE, printed = TRUE), conditions = list(structure(list(message = "Error in Print", call = quote(print.test_obj(structure("hello", class = "test_obj")))), .Names = c("message", "call"), class = c("simpleError", "error", "condition"), unitizer.printed = TRUE)), trace = list(quote(print(structure("hello", class = "test_obj"))), quote(print.test_obj(structure("hello", class = "test_obj"))), quote(stop("Error in Print")))), .Names = c("value", "aborted", "conditions", "trace")),
ex3 # a stop in print
)
expect_equal(
structure(list(value = structure("hello", class = "test_obj"), aborted = structure(TRUE, printed = TRUE), conditions = list(structure(list(message = "Error in Print", call = quote(print.test_obj(structure("hello", class = "test_obj")))), .Names = c("message", "call"), class = c("simpleError", "error", "condition"), unitizer.printed = TRUE)), trace = list(quote(print(structure("hello", class = "test_obj"))), quote(print.test_obj(structure("hello", class = "test_obj"))), quote(stop("Error in Print")))), .Names = c("value", "aborted", "conditions", "trace")),
ex3a
)
# Can't deparse S4 objects, especially now that we are correctly including
# them as part of the call of the condition
# expect_equal(
# structure(list(aborted = structure(TRUE, printed = TRUE), conditions = list(structure(list(message = "Error in Show", call = quote(show(unitizerTESTRES))), .Names = c("message", "call"), class = c("simpleError", "error", "condition"), printed = TRUE)), trace = list("stop(\"Error in Show\")", "show(fun_s4())", "show(fun_s4())")), .Names = c("aborted", "conditions", "trace")),
# ex4[-1L] # a stop in show, have to remove 1L because S4 object doesn't deparse
# )
# expect_equal(
# structure(list(aborted = structure(TRUE, printed = TRUE), conditions = list(structure(list(message = "Error in Show", call = quote(show(unitizerTESTRES))), .Names = c("message", "call"), class = c("simpleError", "error", "condition"), printed = TRUE)), trace = list("stop(\"Error in Show\")", "show(fun_s4())", "show(fun_s4())")), .Names = c("aborted", "conditions", "trace")),
# ex4a[-1L] # a stop in show, have to remove 1L because S4 object doesn't deparse
# )
expect_equal(
structure(list(value = 210L, aborted = FALSE, conditions = list(), trace = list()), .Names = c("value", "aborted", "conditions", "trace")),
ex5 # a normal expression
)
expect_equal(
structure(list(value = "This is a warning", aborted = FALSE, conditions = list(structure(list(message = "This is a warning", call = quote(fun_warn())), .Names = c("message", "call"), class = c("simpleWarning", "warning", "condition"), unitizer.printed = FALSE)), trace = list()), .Names = c("value", "aborted", "conditions", "trace")),
ex9
)
expect_equal(
structure(list(value = NULL, aborted = FALSE, conditions = list(structure(list(message = "This is a Message\n", call = quote(message("This is a Message"))), .Names = c("message", "call"), class = c("simpleMessage", "message", "condition"), unitizer.printed = FALSE)), trace = list()), .Names = c("value", "aborted", "conditions", "trace")),
ex10
)
expect_false(ex11$aborted)
} )
test_that("Trace Setting", {
expect_identical(trace0, trace1)
expect_identical(trace2, list("stop(\"Error in function 2\")", "fun_error()"))
expect_identical(trace6, list("stop(simpleError(\"Error in function 2\", sys.call()))", "fun_error_cond()"))
expect_identical(trace7, list("stop(simpleError(\"Error in function 2\", sys.call()))", "fun_error_cond()", "fun_error_cond_call()"))
expect_identical(trace3a, list("stop(\"Error in Print\")", "print.test_obj(\"hello\")", "print(\"hello\")"))
expect_identical(trace4a, list("stop(\"Error in Show\")", "show(<S4 object of class \"testObj\">)", "show(<S4 object of class \"testObj\">)"))
} )
} )
|
/tests/testthat/testthat.exec.R
|
no_license
|
loerasg/unitizer
|
R
| false
| false
| 7,605
|
r
|
library(testthat)
library(unitizer)
local( {
test_that("Invisible Expression", {
e <- new.env()
exp <- quote(x <- 1:30)
expect_equal(1:30, unitizer:::eval_user_exp(exp, e)$value)
} )
# `eval_user_exp` must be evaluated outside of test_that; also note that by
# design this will output stuff to stderr and stdout
test.obj.s3 <- structure("hello", class="test_obj")
setClass("testObj", list(a="character"))
test.obj.s4 <- new("testObj", a="goodday")
print.test_obj <- function(x, ...) stop("Error in Print")
setMethod("show", "testObj", function(object) stop("Error in Show"))
fun_signal <- function() signalCondition(simpleError("Error in Function", sys.call()))
fun_error <- function() stop("Error in function 2")
fun_error_cond <- function() stop(simpleError("Error in function 2", sys.call()))
fun_error_cond_call <- function() fun_error_cond()
fun_s3 <- function() test.obj.s3
fun_s4 <- function() test.obj.s4
fun_msg <- function() message("This is a Message")
fun_warn <- function() warning("This is a warning")
eval.env <- sys.frame(sys.nframe())
ex0 <- unitizer:::eval_user_exp(quote(stop()), eval.env)
unitizer:::set_trace(ex0$trace)
trace0 <- traceback()
ex1 <- unitizer:::eval_user_exp(quote(fun_signal()), eval.env)
unitizer:::set_trace(ex1$trace)
trace1 <- traceback()
ex2 <- unitizer:::eval_user_exp(quote(fun_error()), eval.env)
unitizer:::set_trace(ex2$trace)
trace2 <- traceback()
ex2a <- unitizer:::eval_user_exp(expression(fun_error()), eval.env)
unitizer:::set_trace(ex2a$trace)
trace2a <- traceback()
ex6 <- unitizer:::eval_user_exp(quote(fun_error_cond()), eval.env)
unitizer:::set_trace(ex6$trace)
trace6 <- traceback()
ex7 <- unitizer:::eval_user_exp(quote(fun_error_cond_call()), eval.env)
unitizer:::set_trace(ex7$trace)
trace7 <- traceback()
ex3 <- unitizer:::eval_user_exp(quote(fun_s3()), eval.env)
unitizer:::set_trace(ex3$trace)
trace3 <- traceback()
ex3a <- unitizer:::eval_user_exp(expression(fun_s3()), eval.env)
unitizer:::set_trace(ex3a$trace)
trace3a <- traceback()
ex4 <- unitizer:::eval_user_exp(quote(fun_s4()), eval.env)
ex4a <- unitizer:::eval_user_exp(expression(fun_s4()), eval.env)
unitizer:::set_trace(ex4a$trace)
trace4a <- traceback()
ex5 <- unitizer:::eval_user_exp(quote(sum(1:20)), eval.env)
ex9 <- unitizer:::eval_user_exp(quote(fun_warn()), eval.env)
ex10 <- unitizer:::eval_user_exp(quote(fun_msg()), eval.env)
ex11 <- unitizer:::eval_user_exp(
quote((function() quote(stop("shouldn't error")))()), eval.env
)
# NOTE: deparsed test values generated with unitizer:::deparse_mixed
test_that("User Expression Evaluation", {
expect_equal(
structure(list(value = NULL, aborted = FALSE, conditions = list(structure(list(message = "Error in Function", call = quote(fun_signal())), .Names = c("message", "call"), class = c("simpleError", "error", "condition"), printed = FALSE)), trace = list()), .Names = c("value", "aborted", "conditions", "trace")),
ex1 # a condition error, signaled, not stop (hence no aborted, etc.)
)
expect_equal(
structure(list(value = NULL, aborted = structure(TRUE, printed = FALSE), conditions = list(structure(list(message = "Error in function 2", call = quote(fun_error())), .Names = c("message", "call"), class = c("simpleError", "error", "condition"), unitizer.printed = FALSE)), trace = list(quote(fun_error()), quote(stop("Error in function 2")))), .Names = c("value", "aborted", "conditions", "trace")),
ex2 # a stop
)
expect_equal(
structure(list(value = structure("hello", class = "test_obj"), aborted = structure(TRUE, printed = TRUE), conditions = list(structure(list(message = "Error in Print", call = quote(print.test_obj(structure("hello", class = "test_obj")))), .Names = c("message", "call"), class = c("simpleError", "error", "condition"), unitizer.printed = TRUE)), trace = list(quote(print(structure("hello", class = "test_obj"))), quote(print.test_obj(structure("hello", class = "test_obj"))), quote(stop("Error in Print")))), .Names = c("value", "aborted", "conditions", "trace")),
ex3 # a stop in print
)
expect_equal(
structure(list(value = structure("hello", class = "test_obj"), aborted = structure(TRUE, printed = TRUE), conditions = list(structure(list(message = "Error in Print", call = quote(print.test_obj(structure("hello", class = "test_obj")))), .Names = c("message", "call"), class = c("simpleError", "error", "condition"), unitizer.printed = TRUE)), trace = list(quote(print(structure("hello", class = "test_obj"))), quote(print.test_obj(structure("hello", class = "test_obj"))), quote(stop("Error in Print")))), .Names = c("value", "aborted", "conditions", "trace")),
ex3a
)
# Can't deparse S4 objects, especially now that we are correctly including
# them as part of the call of the condition
# expect_equal(
# structure(list(aborted = structure(TRUE, printed = TRUE), conditions = list(structure(list(message = "Error in Show", call = quote(show(unitizerTESTRES))), .Names = c("message", "call"), class = c("simpleError", "error", "condition"), printed = TRUE)), trace = list("stop(\"Error in Show\")", "show(fun_s4())", "show(fun_s4())")), .Names = c("aborted", "conditions", "trace")),
# ex4[-1L] # a stop in show, have to remove 1L because S4 object doesn't deparse
# )
# expect_equal(
# structure(list(aborted = structure(TRUE, printed = TRUE), conditions = list(structure(list(message = "Error in Show", call = quote(show(unitizerTESTRES))), .Names = c("message", "call"), class = c("simpleError", "error", "condition"), printed = TRUE)), trace = list("stop(\"Error in Show\")", "show(fun_s4())", "show(fun_s4())")), .Names = c("aborted", "conditions", "trace")),
# ex4a[-1L] # a stop in show, have to remove 1L because S4 object doesn't deparse
# )
expect_equal(
structure(list(value = 210L, aborted = FALSE, conditions = list(), trace = list()), .Names = c("value", "aborted", "conditions", "trace")),
ex5 # a normal expression
)
expect_equal(
structure(list(value = "This is a warning", aborted = FALSE, conditions = list(structure(list(message = "This is a warning", call = quote(fun_warn())), .Names = c("message", "call"), class = c("simpleWarning", "warning", "condition"), unitizer.printed = FALSE)), trace = list()), .Names = c("value", "aborted", "conditions", "trace")),
ex9
)
expect_equal(
structure(list(value = NULL, aborted = FALSE, conditions = list(structure(list(message = "This is a Message\n", call = quote(message("This is a Message"))), .Names = c("message", "call"), class = c("simpleMessage", "message", "condition"), unitizer.printed = FALSE)), trace = list()), .Names = c("value", "aborted", "conditions", "trace")),
ex10
)
expect_false(ex11$aborted)
} )
test_that("Trace Setting", {
expect_identical(trace0, trace1)
expect_identical(trace2, list("stop(\"Error in function 2\")", "fun_error()"))
expect_identical(trace6, list("stop(simpleError(\"Error in function 2\", sys.call()))", "fun_error_cond()"))
expect_identical(trace7, list("stop(simpleError(\"Error in function 2\", sys.call()))", "fun_error_cond()", "fun_error_cond_call()"))
expect_identical(trace3a, list("stop(\"Error in Print\")", "print.test_obj(\"hello\")", "print(\"hello\")"))
expect_identical(trace4a, list("stop(\"Error in Show\")", "show(<S4 object of class \"testObj\">)", "show(<S4 object of class \"testObj\">)"))
} )
} )
|
## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this function
# makeCacheMatrix - contains special function atributes in order to cache the inverse of a matrix
# Returns functions: set, get, setinverse, getinverse
makeCacheMatrix <- function(x = matrix()) {
INV <- NULL
set <- function(y) {
x <<- y
INV <<- NULL
}
get <- function() x
setinverse <- function(solve) INV <<- solve
getinverse <- function() INV
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## Write a short comment describing this function
# cacheSolve calculates the cached inverse of a matrix contained in a makeCacheMatrix object
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
INV <- x$getinverse()
if(!is.null(INV)) {
message("getting cached data")
return(INV)
}
data <- x$get()
INV <- solve(data, ...)
x$setinverse(INV)
INV
}
|
/cachematrix.R
|
no_license
|
B2BWild/ProgrammingAssignment2
|
R
| false
| false
| 1,018
|
r
|
## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this function
# makeCacheMatrix - contains special function atributes in order to cache the inverse of a matrix
# Returns functions: set, get, setinverse, getinverse
makeCacheMatrix <- function(x = matrix()) {
INV <- NULL
set <- function(y) {
x <<- y
INV <<- NULL
}
get <- function() x
setinverse <- function(solve) INV <<- solve
getinverse <- function() INV
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## Write a short comment describing this function
# cacheSolve calculates the cached inverse of a matrix contained in a makeCacheMatrix object
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
INV <- x$getinverse()
if(!is.null(INV)) {
message("getting cached data")
return(INV)
}
data <- x$get()
INV <- solve(data, ...)
x$setinverse(INV)
INV
}
|
require(rtracklayer)
require(plyr)
require(grid)
require(ggplot2)
source('Location.R')
source('comp.fun.R')
source('comp.plot.fun.R')
diro = sprintf("%s/comp.stat", Sys.getenv("misc3"))
tlen = read.table(tcfg$size, sep = "\t", header = F, as.is = T)
grt = with(tlen, GRanges(seqnames = V1, ranges = IRanges(1, end = V2)))
tgap = read.table(tcfg$gap, sep = "\t", header = F, as.is = T)
grp = with(tgap, GRanges(seqnames = V1, ranges = IRanges(V2, end = V3)))
grnp = GenomicRanges::setdiff(grt, grp)
tsize = sum(width(grt))
tsize2 = sum(width(grnp))
tg = read.table(tcfg$gene, sep = "\t", header = F, as.is = T)
colnames(tg) = c('chr', 'beg', 'end', 'srd', 'id', 'type', 'fam')
tg = tg[tg$type == 'cds',]
gb = group_by(tg, id)
tg = summarise(gb, fam = fam[1], chr = chr[1], beg = min(beg), end = max(end), size = end - beg + 1)
tg = cbind(idx = 1:nrow(tg), tg)
grgt = with(tg, GRanges(seqnames = chr, ranges = IRanges(beg, end = end)))
##### SV impact on genome and proteome
qname = "HM004"
for (qname in qnames_all) {
cfg = cfgs[[qname]]
tlen = read.table(cfg$size, sep = "\t", header = F, as.is = T)
grt = with(tlen, GRanges(seqnames = V1, ranges = IRanges(1, end = V2)))
tgap = read.table(cfg$gap, sep = "\t", header = F, as.is = T)
grp = with(tgap, GRanges(seqnames = V1, ranges = IRanges(V2, end = V3)))
grnp = GenomicRanges::setdiff(grt, grp)
qsize = sum(width(grt))
qsize2 = sum(width(grnp))
qg = read.table(cfg$gene, sep = "\t", header = F, as.is = T)
colnames(qg) = c('chr', 'beg', 'end', 'srd', 'id', 'type', 'fam')
qg = qg[qg$type == 'cds',]
gb = group_by(qg, id)
qg = summarise(gb, fam = fam[1], chr = chr[1], beg = min(beg), end = max(end), size = end - beg + 1)
grgq = with(qg, GRanges(seqnames = chr, ranges = IRanges(beg, end = end)))
cdir = cfg$cdir
fy = file.path(cdir, "31.9/gal")
ty = read.table(fy, header = T, sep = "\t", as.is = T)[,c('tId', 'tBeg', 'tEnd', 'qId', 'qBeg', 'qEnd')]
gryt = with(ty, GRanges(seqnames = tId, ranges = IRanges(tBeg, end = tEnd)))
gryq = with(ty, GRanges(seqnames = qId, ranges = IRanges(qBeg, end = qEnd)))
ytlen = sum(width(reduce(gryt)))
yqlen = sum(width(reduce(gryq)))
fv = file.path(cdir, "../31_sv/05.stb")
tv = read.table(fv, header = T, sep = "\t", as.is = T)[,c('id','tchr','tbeg','tend','tlen','srd','qchr','qbeg','qend','qlen')]
tv = tv[tv$tlen+tv$qlen-2 >= 50,]
tvt = tv[tv$tlen > 0, c('tchr','tbeg','tend')]
grst = with(tvt, GRanges(seqnames = tchr, ranges = IRanges(tbeg, end = tend)))
tvq = tv[tv$qlen > 0, c('qchr','qbeg','qend')]
grsq = with(tvq, GRanges(seqnames = qchr, ranges = IRanges(qbeg, end = qend)))
stlen = sum(width(reduce(grst)))
sqlen = sum(width(reduce(grsq)))
tocnt = intersect_count(grgt, grst)
tg2 = cbind(tg, cnt = tocnt)
tgb = group_by(tg2, id)
x = summarise(tgb, cnt = sum(cnt))
tpct = sum(x$cnt > 0) / nrow(x)
qocnt = intersect_count(grgq, grsq)
qg2 = cbind(qg, cnt = qocnt)
qgb = group_by(qg2, id)
x = summarise(qgb, cnt = sum(cnt))
qpct = sum(x$cnt > 0) / nrow(x)
cat(sprintf("%s: genome [tgt %.03f qry %.03f] gene [tgt %.03f qry %.03f]\n", qname, stlen/ytlen, sqlen/yqlen, tpct, qpct))
}
#### SV impact on genes / gene family
do = data.frame()
for (qname in qnames_all[1:12]) {
qname = "HM340.AC"
cfg = cfgs[[qname]]
tlen = read.table(cfg$size, sep = "\t", header = F, as.is = T)
grt = with(tlen, GRanges(seqnames = V1, ranges = IRanges(1, end = V2)))
tgap = read.table(cfg$gap, sep = "\t", header = F, as.is = T)
grp = with(tgap, GRanges(seqnames = V1, ranges = IRanges(V2, end = V3)))
grnp = GenomicRanges::setdiff(grt, grp)
qsize = sum(width(grt))
qsize2 = sum(width(grnp))
qg = read.table(cfg$gene, sep = "\t", header = F, as.is = T)
colnames(qg) = c('chr', 'beg', 'end', 'srd', 'id', 'type', 'fam')
qg = qg[qg$type == 'cds',]
gb = group_by(qg, id)
qg = summarise(gb, fam = fam[1], chr = chr[1], beg = min(beg), end = max(end), size = end - beg + 1)
qg = cbind(idx = 1:nrow(qg), qg)
grgq = with(qg, GRanges(seqnames = chr, ranges = IRanges(beg, end = end)))
cdir = cfg$cdir
#fy = file.path(cdir, "31.9/idm")
#ty = read.table(fy, header = F, sep = "\t", as.is = T)
#colnames(ty) = c("tchr", 'tbeg', 'tend', 'tsrd', 'qchr', 'qbeg', 'qend', 'qsrd', 'cid', 'lev')
#ty = ty[ty$lev == 1,]
fv = file.path(cdir, "../31_sv/05.stb")
tv = read.table(fv, header = T, sep = "\t", as.is = T)[,c('id','tchr','tbeg','tend','tlen','srd','qchr','qbeg','qend','qlen')]
tv = tv[tv$tlen+tv$qlen-2 >= 50,]
tvt = tv[tv$tlen > 0, c('tchr','tbeg','tend')]
tvt = cbind(idx = 1:nrow(tvt), tvt)
grst = with(tvt, GRanges(seqnames = tchr, ranges = IRanges(tbeg+1, end = tend-1)))
tvq = tv[tv$qlen > 0, c('qchr','qbeg','qend','tchr','tbeg','tend')]
tvq = cbind(idx = 1:nrow(tvq), tvq)
grsq = with(tvq, GRanges(seqnames = qchr, ranges = IRanges(qbeg+1, end = qend-1)))
x = intersect_idx(grst, grgt)
y = merge(x, tg[,c('idx','id','fam','size')], by.x = 'qidx', by.y = 'idx')
gb = group_by(y, idx)
z = summarize(gb, ngene = n(), n2 = sum(fam %in% c("NCR","CRP0000-1030")))
z = merge(z, tvt, by = 'idx')
z[order(z$n2, decreasing = T), ][1:30,]
x = intersect_idx(grsq, grgq)
y = merge(x, qg[,c('idx','id','fam','size')], by.x = 'qidx', by.y = 'idx')
gb = group_by(y, idx)
z = summarize(gb, ngene = n(), n2 = sum(fam %in% c("NCR","CRP0000-1030")))
z = merge(z, tvq, by = 'idx')
z[order(z$n2, decreasing = T), ][1:30,]
ds = cbind(ds, org = qname)
do = rbind(do, ds)
}
to = ddply(do, .(fam), summarise, q25 = quantile(prop, 0.25), q50 = quantile(prop, 0.5), q75 = quantile(prop, 0.75))
ffam = file.path(diro, "41.gene.fams.tbl")
fams = read.table(ffam, header = F, sep = "\t", as.is = T)[,1]
ti = to
tis = ti[ti$fam %in% fams,]
tis = tis[order(tis$q50, decreasing = T),]
tis$fam = factor(tis$fam, levels = tis$fam)
p4 = ggplot(tis) +
geom_crossbar(aes(x = fam, y = q50, ymin = q25, ymax = q75),
stat = 'identity', position = 'dodge', geom_params = list(width = 0.7, size = 0.3)) +
coord_flip() +
scale_x_discrete(name = '', expand = c(0.01, 0.01)) +
scale_y_continuous(name = 'Proportion SV', expand = c(0, 0)) +
theme_bw() +
# theme(axis.ticks.y = element_blank(), axis.line.y = element_blank()) +
theme(plot.margin = unit(c(0.5,0.5,0,0), "lines")) +
theme(axis.title.y = element_blank()) +
theme(axis.title.x = element_text(size = 9)) +
theme(axis.text.x = element_text(size = 8, colour = "black", angle = 0)) +
theme(axis.text.y = element_text(size = 8, colour = "royalblue", angle = 0, hjust = 1))
fp = file.path(dirw, "19_sv_genefam.pdf")
ggsave(p4, filename = fp, width = 5, height = 4)
|
/archive/comp.sv.R
|
permissive
|
orionzhou/rmaize
|
R
| false
| false
| 6,509
|
r
|
require(rtracklayer)
require(plyr)
require(grid)
require(ggplot2)
source('Location.R')
source('comp.fun.R')
source('comp.plot.fun.R')
diro = sprintf("%s/comp.stat", Sys.getenv("misc3"))
tlen = read.table(tcfg$size, sep = "\t", header = F, as.is = T)
grt = with(tlen, GRanges(seqnames = V1, ranges = IRanges(1, end = V2)))
tgap = read.table(tcfg$gap, sep = "\t", header = F, as.is = T)
grp = with(tgap, GRanges(seqnames = V1, ranges = IRanges(V2, end = V3)))
grnp = GenomicRanges::setdiff(grt, grp)
tsize = sum(width(grt))
tsize2 = sum(width(grnp))
tg = read.table(tcfg$gene, sep = "\t", header = F, as.is = T)
colnames(tg) = c('chr', 'beg', 'end', 'srd', 'id', 'type', 'fam')
tg = tg[tg$type == 'cds',]
gb = group_by(tg, id)
tg = summarise(gb, fam = fam[1], chr = chr[1], beg = min(beg), end = max(end), size = end - beg + 1)
tg = cbind(idx = 1:nrow(tg), tg)
grgt = with(tg, GRanges(seqnames = chr, ranges = IRanges(beg, end = end)))
##### SV impact on genome and proteome
qname = "HM004"
for (qname in qnames_all) {
cfg = cfgs[[qname]]
tlen = read.table(cfg$size, sep = "\t", header = F, as.is = T)
grt = with(tlen, GRanges(seqnames = V1, ranges = IRanges(1, end = V2)))
tgap = read.table(cfg$gap, sep = "\t", header = F, as.is = T)
grp = with(tgap, GRanges(seqnames = V1, ranges = IRanges(V2, end = V3)))
grnp = GenomicRanges::setdiff(grt, grp)
qsize = sum(width(grt))
qsize2 = sum(width(grnp))
qg = read.table(cfg$gene, sep = "\t", header = F, as.is = T)
colnames(qg) = c('chr', 'beg', 'end', 'srd', 'id', 'type', 'fam')
qg = qg[qg$type == 'cds',]
gb = group_by(qg, id)
qg = summarise(gb, fam = fam[1], chr = chr[1], beg = min(beg), end = max(end), size = end - beg + 1)
grgq = with(qg, GRanges(seqnames = chr, ranges = IRanges(beg, end = end)))
cdir = cfg$cdir
fy = file.path(cdir, "31.9/gal")
ty = read.table(fy, header = T, sep = "\t", as.is = T)[,c('tId', 'tBeg', 'tEnd', 'qId', 'qBeg', 'qEnd')]
gryt = with(ty, GRanges(seqnames = tId, ranges = IRanges(tBeg, end = tEnd)))
gryq = with(ty, GRanges(seqnames = qId, ranges = IRanges(qBeg, end = qEnd)))
ytlen = sum(width(reduce(gryt)))
yqlen = sum(width(reduce(gryq)))
fv = file.path(cdir, "../31_sv/05.stb")
tv = read.table(fv, header = T, sep = "\t", as.is = T)[,c('id','tchr','tbeg','tend','tlen','srd','qchr','qbeg','qend','qlen')]
tv = tv[tv$tlen+tv$qlen-2 >= 50,]
tvt = tv[tv$tlen > 0, c('tchr','tbeg','tend')]
grst = with(tvt, GRanges(seqnames = tchr, ranges = IRanges(tbeg, end = tend)))
tvq = tv[tv$qlen > 0, c('qchr','qbeg','qend')]
grsq = with(tvq, GRanges(seqnames = qchr, ranges = IRanges(qbeg, end = qend)))
stlen = sum(width(reduce(grst)))
sqlen = sum(width(reduce(grsq)))
tocnt = intersect_count(grgt, grst)
tg2 = cbind(tg, cnt = tocnt)
tgb = group_by(tg2, id)
x = summarise(tgb, cnt = sum(cnt))
tpct = sum(x$cnt > 0) / nrow(x)
qocnt = intersect_count(grgq, grsq)
qg2 = cbind(qg, cnt = qocnt)
qgb = group_by(qg2, id)
x = summarise(qgb, cnt = sum(cnt))
qpct = sum(x$cnt > 0) / nrow(x)
cat(sprintf("%s: genome [tgt %.03f qry %.03f] gene [tgt %.03f qry %.03f]\n", qname, stlen/ytlen, sqlen/yqlen, tpct, qpct))
}
#### SV impact on genes / gene family
do = data.frame()
for (qname in qnames_all[1:12]) {
qname = "HM340.AC"
cfg = cfgs[[qname]]
tlen = read.table(cfg$size, sep = "\t", header = F, as.is = T)
grt = with(tlen, GRanges(seqnames = V1, ranges = IRanges(1, end = V2)))
tgap = read.table(cfg$gap, sep = "\t", header = F, as.is = T)
grp = with(tgap, GRanges(seqnames = V1, ranges = IRanges(V2, end = V3)))
grnp = GenomicRanges::setdiff(grt, grp)
qsize = sum(width(grt))
qsize2 = sum(width(grnp))
qg = read.table(cfg$gene, sep = "\t", header = F, as.is = T)
colnames(qg) = c('chr', 'beg', 'end', 'srd', 'id', 'type', 'fam')
qg = qg[qg$type == 'cds',]
gb = group_by(qg, id)
qg = summarise(gb, fam = fam[1], chr = chr[1], beg = min(beg), end = max(end), size = end - beg + 1)
qg = cbind(idx = 1:nrow(qg), qg)
grgq = with(qg, GRanges(seqnames = chr, ranges = IRanges(beg, end = end)))
cdir = cfg$cdir
#fy = file.path(cdir, "31.9/idm")
#ty = read.table(fy, header = F, sep = "\t", as.is = T)
#colnames(ty) = c("tchr", 'tbeg', 'tend', 'tsrd', 'qchr', 'qbeg', 'qend', 'qsrd', 'cid', 'lev')
#ty = ty[ty$lev == 1,]
fv = file.path(cdir, "../31_sv/05.stb")
tv = read.table(fv, header = T, sep = "\t", as.is = T)[,c('id','tchr','tbeg','tend','tlen','srd','qchr','qbeg','qend','qlen')]
tv = tv[tv$tlen+tv$qlen-2 >= 50,]
tvt = tv[tv$tlen > 0, c('tchr','tbeg','tend')]
tvt = cbind(idx = 1:nrow(tvt), tvt)
grst = with(tvt, GRanges(seqnames = tchr, ranges = IRanges(tbeg+1, end = tend-1)))
tvq = tv[tv$qlen > 0, c('qchr','qbeg','qend','tchr','tbeg','tend')]
tvq = cbind(idx = 1:nrow(tvq), tvq)
grsq = with(tvq, GRanges(seqnames = qchr, ranges = IRanges(qbeg+1, end = qend-1)))
x = intersect_idx(grst, grgt)
y = merge(x, tg[,c('idx','id','fam','size')], by.x = 'qidx', by.y = 'idx')
gb = group_by(y, idx)
z = summarize(gb, ngene = n(), n2 = sum(fam %in% c("NCR","CRP0000-1030")))
z = merge(z, tvt, by = 'idx')
z[order(z$n2, decreasing = T), ][1:30,]
x = intersect_idx(grsq, grgq)
y = merge(x, qg[,c('idx','id','fam','size')], by.x = 'qidx', by.y = 'idx')
gb = group_by(y, idx)
z = summarize(gb, ngene = n(), n2 = sum(fam %in% c("NCR","CRP0000-1030")))
z = merge(z, tvq, by = 'idx')
z[order(z$n2, decreasing = T), ][1:30,]
ds = cbind(ds, org = qname)
do = rbind(do, ds)
}
to = ddply(do, .(fam), summarise, q25 = quantile(prop, 0.25), q50 = quantile(prop, 0.5), q75 = quantile(prop, 0.75))
ffam = file.path(diro, "41.gene.fams.tbl")
fams = read.table(ffam, header = F, sep = "\t", as.is = T)[,1]
ti = to
tis = ti[ti$fam %in% fams,]
tis = tis[order(tis$q50, decreasing = T),]
tis$fam = factor(tis$fam, levels = tis$fam)
p4 = ggplot(tis) +
geom_crossbar(aes(x = fam, y = q50, ymin = q25, ymax = q75),
stat = 'identity', position = 'dodge', geom_params = list(width = 0.7, size = 0.3)) +
coord_flip() +
scale_x_discrete(name = '', expand = c(0.01, 0.01)) +
scale_y_continuous(name = 'Proportion SV', expand = c(0, 0)) +
theme_bw() +
# theme(axis.ticks.y = element_blank(), axis.line.y = element_blank()) +
theme(plot.margin = unit(c(0.5,0.5,0,0), "lines")) +
theme(axis.title.y = element_blank()) +
theme(axis.title.x = element_text(size = 9)) +
theme(axis.text.x = element_text(size = 8, colour = "black", angle = 0)) +
theme(axis.text.y = element_text(size = 8, colour = "royalblue", angle = 0, hjust = 1))
fp = file.path(dirw, "19_sv_genefam.pdf")
ggsave(p4, filename = fp, width = 5, height = 4)
|
# ---- 0. Load dependencies
message('Loading script dependencies...\n')
# Suppress package load messages to ensure logs are not cluttered
suppressMessages({
library(minfi, quietly=TRUE)
library(data.table, quietly=TRUE)
library(qs, quietly=TRUE)
library(wateRmelon, quietly=TRUE)
})
# ---- 0. Parse Snakemake arguments
input <- snakemake@input
output <- snakemake@output
# ---- 1. Read in RGChannelSet
message(paste0('Reading in RGChannelSet from ', input$rgset, '...\n'))
rgSet <- qread(input$rgset)
# ---- 2. Get the bead count for each probe in each sample
message('Extracting bead count matrix...\n')
beadCountMatrix <- beadcount(rgSet)
message('Saving bead counts to: ', output$bead_counts, '...\n')
beadCountDT <- data.table(beadCountMatrix, keep.rownames='rownames')
fwrite(beadCountDT, file=output$bead_counts)
message("Filtering RGChannelSet to probes with more than three beads...\n")
perCount <- 1 / ncol(rgSet)
rgSetFiltered <- pfilter(mn=rgSet, un=rgSet,
bc=beadCountMatrix, perCount=perCount,
perc=10.1, pthresh=100,
logical.return=TRUE)
print(rgSetFiltered)
message(paste0("Saving filtered RGChannelSet to: ", output$rgset_filtered, '...\n'))
qsave(rgSetFiltered, file=output$rgset_filtered)
message("Done!\n\n")
|
/scripts/5_dropProbesWithLessThan3Beads.R
|
no_license
|
bhklab/illuminaEPICmethylation
|
R
| false
| false
| 1,316
|
r
|
# ---- 0. Load dependencies
message('Loading script dependencies...\n')
# Suppress package load messages to ensure logs are not cluttered
suppressMessages({
library(minfi, quietly=TRUE)
library(data.table, quietly=TRUE)
library(qs, quietly=TRUE)
library(wateRmelon, quietly=TRUE)
})
# ---- 0. Parse Snakemake arguments
input <- snakemake@input
output <- snakemake@output
# ---- 1. Read in RGChannelSet
message(paste0('Reading in RGChannelSet from ', input$rgset, '...\n'))
rgSet <- qread(input$rgset)
# ---- 2. Get the bead count for each probe in each sample
message('Extracting bead count matrix...\n')
beadCountMatrix <- beadcount(rgSet)
message('Saving bead counts to: ', output$bead_counts, '...\n')
beadCountDT <- data.table(beadCountMatrix, keep.rownames='rownames')
fwrite(beadCountDT, file=output$bead_counts)
message("Filtering RGChannelSet to probes with more than three beads...\n")
perCount <- 1 / ncol(rgSet)
rgSetFiltered <- pfilter(mn=rgSet, un=rgSet,
bc=beadCountMatrix, perCount=perCount,
perc=10.1, pthresh=100,
logical.return=TRUE)
print(rgSetFiltered)
message(paste0("Saving filtered RGChannelSet to: ", output$rgset_filtered, '...\n'))
qsave(rgSetFiltered, file=output$rgset_filtered)
message("Done!\n\n")
|
#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!#
#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!#
#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!#
#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!#
#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!#
#
# This software was authored by Zhian N. Kamvar and Javier F. Tabima, graduate
# students at Oregon State University; Jonah C. Brooks, undergraduate student at
# Oregon State University; and Dr. Nik Grünwald, an employee of USDA-ARS.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for educational, research and non-profit purposes, without fee,
# and without a written agreement is hereby granted, provided that the statement
# above is incorporated into the material, giving appropriate attribution to the
# authors.
#
# Permission to incorporate this software into commercial products may be
# obtained by contacting USDA ARS and OREGON STATE UNIVERSITY Office for
# Commercialization and Corporate Development.
#
# The software program and documentation are supplied "as is", without any
# accompanying services from the USDA or the University. USDA ARS or the
# University do not warrant that the operation of the program will be
# uninterrupted or error-free. The end-user understands that the program was
# developed for research purposes and is advised not to rely exclusively on the
# program for any reason.
#
# IN NO EVENT SHALL USDA ARS OR OREGON STATE UNIVERSITY BE LIABLE TO ANY PARTY
# FOR DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, INCLUDING
# LOST PROFITS, ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION,
# EVEN IF THE OREGON STATE UNIVERSITY HAS BEEN ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE. USDA ARS OR OREGON STATE UNIVERSITY SPECIFICALLY DISCLAIMS ANY
# WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE AND ANY STATUTORY
# WARRANTY OF NON-INFRINGEMENT. THE SOFTWARE PROVIDED HEREUNDER IS ON AN "AS IS"
# BASIS, AND USDA ARS AND OREGON STATE UNIVERSITY HAVE NO OBLIGATIONS TO PROVIDE
# MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
#
#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!#
#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!#
#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!#
#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!#
#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!#
#==============================================================================#
#' Utilize all algorithms of mlg.filter
#'
#' This function is a wrapper to mlg.filter. It will calculate all of the stats
#' for mlg.filter utilizing all of the algorithms.
#'
#' @param x a \code{\link{genind}}, \code{\link{genclone}},
#' \code{\link{genlight}}, or \code{\link{snpclone}} object
#' @param distance a distance function or matrix
#' @param threshold a threshold to be passed to \code{\link{mlg.filter}}
#' (Default: 1e6)
#' @param stats what statistics should be calculated.
#' @param missing how to treat missing data with mlg.filter
#' @param plot If the threshold is a maximum threshold, should the statistics be
#' plotted (Figure 2)
#' @param cols the colors to use for each algorithm (defaults to set1 of
#' \pkg{RColorBrewer}).
#' @param nclone the number of multilocus genotypes you expect for the data.
#' This will draw horizontal line on the graph at the value nclone and then
#' vertical lines showing the cutoff thresholds for each algorithm.
#' @param hist if you want a histogram to be plotted behind the statistics,
#' select a method here. Available methods are "sturges", "fd", or "scott"
#' (default) as documented in \code{\link[graphics]{hist}}. If you don't want
#' to plot the histogram, set \code{hist = NULL}.
#' @param threads the number of threads to use. Passed on to \code{\link{mlg.filter}}.
#' @param ... extra parameters passed on to the distance function.
#'
#' @return a list of results from mlg.filter from the three
#' algorithms. (returns invisibly if \code{plot = TRUE})
#' @export
#' @seealso \code{\link{mlg.filter}} \code{\link{cutoff_predictor}}
#' \code{\link{bitwise.dist}} \code{\link{diss.dist}}
#' @note This function originally appeared in
#' \href{http://dx.doi.org/10.5281/zenodo.17424}{DOI: 10.5281/zenodo.17424}
#' @references ZN Kamvar, JC Brooks, and NJ Grünwald. 2015. Supplementary
#' Material for Frontiers Plant Genetics and Genomics 'Novel R tools for
#' analysis of genome-wide population genetic data with emphasis on
#' clonality'. DOI:
#' \href{http://dx.doi.org/10.5281/zenodo.17424}{10.5281/zenodo.17424}
#'
#' Kamvar ZN, Brooks JC and Grünwald NJ (2015) Novel R tools for analysis of
#' genome-wide population genetic data with emphasis on clonality. Front.
#' Genet. 6:208. doi:
#' \href{http://dx.doi.org/10.3389/fgene.2015.00208}{10.3389/fgene.2015.00208}
#'
#' @author Zhian N. Kamvar, Jonah C. Brooks
#' @examples
#' data(Pinf)
#' pinfreps <- fix_replen(Pinf, c(2, 2, 6, 2, 2, 2, 2, 2, 3, 3, 2))
#' filter_stats(Pinf, distance = bruvo.dist, replen = pinfreps, plot = TRUE, threads = 1L)
#==============================================================================#
filter_stats <- function(x, distance = bitwise.dist,
threshold = 1e6 + .Machine$double.eps^0.5,
stats = "All", missing = "ignore", plot = FALSE,
cols = NULL, nclone = NULL, hist = "Scott",
threads = 0L, ...){
if (!inherits(distance, "dist")){
DIST <- match.fun(distance)
if (inherits(x, "genind")){
x <- missingno(x, type = missing)
}
distmat <- DIST(x, ...)
} else {
distmat <- distance
}
f <- mlg.filter(x, threshold, missing, algorithm = "f", distance = distmat,
stats = stats, threads = threads, ...)
a <- mlg.filter(x, threshold, missing, algorithm = "a", distance = distmat,
stats = stats, threads = threads, ...)
n <- mlg.filter(x, threshold, missing, algorithm = "n", distance = distmat,
stats = stats, threads = threads, ...)
fanlist <- list(farthest = f, average = a, nearest = n)
if (stats == "All"){
if (plot){
plot_filter_stats(x, fanlist, distmat, cols, nclone, hist)
return(invisible(fanlist))
}
}
return(fanlist)
}
#==============================================================================#
#' Predict cutoff thresholds for use with mlg.filter
#'
#' Given a series of thresholds for a data set that collapse it into one giant
#' cluster, this will search the top fraction of threshold differences to find
#' the largest difference. The average between the thresholds spanning that
#' difference is the cutoff threshold defining the clonal lineage threshold.
#'
#' @param thresholds a vector of numerics coming from mlg.filter where the
#' threshold has been set to the maximum threshold theoretically possible.
#' @param fraction the fraction of the data to seek the threshold.
#'
#' @return a numeric value representing the threshold at which multilocus
#' lineages should be defined.
#' @seealso \code{\link{filter_stats}} \code{\link{mlg.filter}}
#' @note This function originally appeared in
#' \href{http://dx.doi.org/10.5281/zenodo.17424}{DOI: 10.5281/zenodo.17424}.
#' This is a bit of a blunt instrument.
#' @export
#' @references ZN Kamvar, JC Brooks, and NJ Grünwald. 2015. Supplementary
#' Material for Frontiers Plant Genetics and Genomics 'Novel R tools for
#' analysis of genome-wide population genetic data with emphasis on clonality'.
#' DOI: \href{http://dx.doi.org/10.5281/zenodo.17424}{10.5281/zenodo.17424}
#'
#' Kamvar ZN, Brooks JC and Grünwald NJ (2015) Novel R tools for analysis of
#' genome-wide population genetic data with emphasis on clonality. Front. Genet.
#' 6:208. doi:
#' \href{http://dx.doi.org/10.3389/fgene.2015.00208}{10.3389/fgene.2015.00208}
#'
#' @author Zhian N. Kamvar
#' @examples
#'
#' data(Pinf)
#' pinfreps <- fix_replen(Pinf, c(2, 2, 6, 2, 2, 2, 2, 2, 3, 3, 2))
#' pthresh <- filter_stats(Pinf, distance = bruvo.dist, replen = pinfreps,
#' plot = TRUE, stats = "THRESHOLD", threads = 1L)
#'
#' # prediction for farthest neighbor
#' cutoff_predictor(pthresh$farthest)
#'
#' # prediction for all algorithms
#' sapply(pthresh, cutoff_predictor)
#'
#==============================================================================#
cutoff_predictor <- function(thresholds, fraction = 0.5){
frac <- 1:round(length(thresholds)*fraction)
diffs <- diff(thresholds[frac])
diffmax <- which.max(diffs)
mean(thresholds[diffmax:(diffmax + 1)])
}
#==============================================================================#
#' Plot the results of filter_stats
#'
#' @param x a genlight of genind object
#' @param fstats the list passed from \code{\link{filter_stats}}
#' @param distmat a distance matrix passed from \code{\link{filter_stats}}
#' @param cols colors to use for each algorithm (defaults to \pkg{RColorBrewer}
#' set 1)
#' @param nclone see \code{\link{filter_stats}}
#'
#' @return a plot depicting how many MLLs are collapsed as the genetic distance
#' increases for each algorithm.
#' @export
#' @seealso \code{\link{filter_stats}}
#' @note This function originally appeared in
#' \href{http://dx.doi.org/10.5281/zenodo.17424}{DOI: 10.5281/zenodo.17424}
#' @author Zhian N. Kamvar
#' @references ZN Kamvar, JC Brooks, and NJ Grünwald. 2015. Supplementary
#' Material for Frontiers Plant Genetics and Genomics 'Novel R tools for
#' analysis of genome-wide population genetic data with emphasis on clonality'.
#' DOI: \href{http://dx.doi.org/10.5281/zenodo.17424}{10.5281/zenodo.17424}
#'
#' Kamvar ZN, Brooks JC and Grünwald NJ (2015) Novel R tools for analysis of
#' genome-wide population genetic data with emphasis on clonality. Front. Genet.
#' 6:208. doi:
#' \href{http://dx.doi.org/10.3389/fgene.2015.00208}{10.3389/fgene.2015.00208}
#'
#' @keywords internal
#==============================================================================#
plot_filter_stats <- function(x, fstats, distmat, cols = NULL, nclone = NULL, breaks = NULL){
upper <- round(max(distmat), digits = 1)
ylims <- c(ifelse(is.genind(x), suppressWarnings(nmll(x, "original")), nInd(x)), 1)
if (!is.null(breaks)){
graphics::hist(distmat, breaks = breaks, xlab = "", ylab = "", axes = FALSE,
xlim = c(0, upper), main = "")
par(new = TRUE)
}
plot(x = c(upper, 0), y = ylims, type = "n",
ylab = "Number of Multilocus Lineages",
xlab = "Genetic Distance Cutoff")
a <- fstats$average$THRESHOLDS
n <- fstats$nearest$THRESHOLDS
f <- fstats$farthest$THRESHOLDS
plotcols <- c("#E41A1C", "#377EB8", "#4DAF4A") # RColorBrewer::brewer.pal(3, "Set1")
names(plotcols) <- c("f", "a", "n")
points(x = rev(a), y = 1:length(a), col = plotcols["a"])
points(x = rev(f), y = 1:length(f), col = plotcols["f"])
points(x = rev(n), y = 1:length(n), col = plotcols["n"])
if (!is.null(nclone)){
abline(v = a[1 + length(a) - nclone] + .Machine$double.eps^0.5, lty = 2,
col = plotcols["a"])
abline(v = f[1 + length(f) - nclone] + .Machine$double.eps^0.5, lty = 2,
col = plotcols["f"])
abline(v = n[1 + length(n) - nclone] + .Machine$double.eps^0.5, lty = 2,
col = plotcols["n"])
abline(h = nclone)
text(upper, nclone, labels = paste0("n = ", nclone),
adj = c(1, -0.5))
legend("topright",
legend = c("Nearest Neighbor", "UPGMA", "Farthest Neighbor"),
col = plotcols[c("n", "a", "f")],
pch = 1,
lty = 2,
title = "Clustering Method")
} else {
legend("topright",
legend = c("Nearest Neighbor", "UPGMA", "Farthest Neighbor"),
col = plotcols[c("n", "a", "f")],
pch = 1,
title = "Clustering Method")
}
}
|
/R/filter_stats.R
|
no_license
|
knausb/poppr
|
R
| false
| false
| 12,341
|
r
|
#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!#
#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!#
#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!#
#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!#
#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!#
#
# This software was authored by Zhian N. Kamvar and Javier F. Tabima, graduate
# students at Oregon State University; Jonah C. Brooks, undergraduate student at
# Oregon State University; and Dr. Nik Grünwald, an employee of USDA-ARS.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for educational, research and non-profit purposes, without fee,
# and without a written agreement is hereby granted, provided that the statement
# above is incorporated into the material, giving appropriate attribution to the
# authors.
#
# Permission to incorporate this software into commercial products may be
# obtained by contacting USDA ARS and OREGON STATE UNIVERSITY Office for
# Commercialization and Corporate Development.
#
# The software program and documentation are supplied "as is", without any
# accompanying services from the USDA or the University. USDA ARS or the
# University do not warrant that the operation of the program will be
# uninterrupted or error-free. The end-user understands that the program was
# developed for research purposes and is advised not to rely exclusively on the
# program for any reason.
#
# IN NO EVENT SHALL USDA ARS OR OREGON STATE UNIVERSITY BE LIABLE TO ANY PARTY
# FOR DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, INCLUDING
# LOST PROFITS, ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION,
# EVEN IF THE OREGON STATE UNIVERSITY HAS BEEN ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE. USDA ARS OR OREGON STATE UNIVERSITY SPECIFICALLY DISCLAIMS ANY
# WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE AND ANY STATUTORY
# WARRANTY OF NON-INFRINGEMENT. THE SOFTWARE PROVIDED HEREUNDER IS ON AN "AS IS"
# BASIS, AND USDA ARS AND OREGON STATE UNIVERSITY HAVE NO OBLIGATIONS TO PROVIDE
# MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
#
#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!#
#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!#
#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!#
#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!#
#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!#
#==============================================================================#
#' Utilize all algorithms of mlg.filter
#'
#' This function is a wrapper to mlg.filter. It will calculate all of the stats
#' for mlg.filter utilizing all of the algorithms.
#'
#' @param x a \code{\link{genind}}, \code{\link{genclone}},
#' \code{\link{genlight}}, or \code{\link{snpclone}} object
#' @param distance a distance function or matrix
#' @param threshold a threshold to be passed to \code{\link{mlg.filter}}
#' (Default: 1e6)
#' @param stats what statistics should be calculated.
#' @param missing how to treat missing data with mlg.filter
#' @param plot If the threshold is a maximum threshold, should the statistics be
#' plotted (Figure 2)
#' @param cols the colors to use for each algorithm (defaults to set1 of
#' \pkg{RColorBrewer}).
#' @param nclone the number of multilocus genotypes you expect for the data.
#' This will draw horizontal line on the graph at the value nclone and then
#' vertical lines showing the cutoff thresholds for each algorithm.
#' @param hist if you want a histogram to be plotted behind the statistics,
#' select a method here. Available methods are "sturges", "fd", or "scott"
#' (default) as documented in \code{\link[graphics]{hist}}. If you don't want
#' to plot the histogram, set \code{hist = NULL}.
#' @param threads the number of threads to use. Passed on to \code{\link{mlg.filter}}.
#' @param ... extra parameters passed on to the distance function.
#'
#' @return a list of results from mlg.filter from the three
#' algorithms. (returns invisibly if \code{plot = TRUE})
#' @export
#' @seealso \code{\link{mlg.filter}} \code{\link{cutoff_predictor}}
#' \code{\link{bitwise.dist}} \code{\link{diss.dist}}
#' @note This function originally appeared in
#' \href{http://dx.doi.org/10.5281/zenodo.17424}{DOI: 10.5281/zenodo.17424}
#' @references ZN Kamvar, JC Brooks, and NJ Grünwald. 2015. Supplementary
#' Material for Frontiers Plant Genetics and Genomics 'Novel R tools for
#' analysis of genome-wide population genetic data with emphasis on
#' clonality'. DOI:
#' \href{http://dx.doi.org/10.5281/zenodo.17424}{10.5281/zenodo.17424}
#'
#' Kamvar ZN, Brooks JC and Grünwald NJ (2015) Novel R tools for analysis of
#' genome-wide population genetic data with emphasis on clonality. Front.
#' Genet. 6:208. doi:
#' \href{http://dx.doi.org/10.3389/fgene.2015.00208}{10.3389/fgene.2015.00208}
#'
#' @author Zhian N. Kamvar, Jonah C. Brooks
#' @examples
#' data(Pinf)
#' pinfreps <- fix_replen(Pinf, c(2, 2, 6, 2, 2, 2, 2, 2, 3, 3, 2))
#' filter_stats(Pinf, distance = bruvo.dist, replen = pinfreps, plot = TRUE, threads = 1L)
#==============================================================================#
filter_stats <- function(x, distance = bitwise.dist,
threshold = 1e6 + .Machine$double.eps^0.5,
stats = "All", missing = "ignore", plot = FALSE,
cols = NULL, nclone = NULL, hist = "Scott",
threads = 0L, ...){
if (!inherits(distance, "dist")){
DIST <- match.fun(distance)
if (inherits(x, "genind")){
x <- missingno(x, type = missing)
}
distmat <- DIST(x, ...)
} else {
distmat <- distance
}
f <- mlg.filter(x, threshold, missing, algorithm = "f", distance = distmat,
stats = stats, threads = threads, ...)
a <- mlg.filter(x, threshold, missing, algorithm = "a", distance = distmat,
stats = stats, threads = threads, ...)
n <- mlg.filter(x, threshold, missing, algorithm = "n", distance = distmat,
stats = stats, threads = threads, ...)
fanlist <- list(farthest = f, average = a, nearest = n)
if (stats == "All"){
if (plot){
plot_filter_stats(x, fanlist, distmat, cols, nclone, hist)
return(invisible(fanlist))
}
}
return(fanlist)
}
#==============================================================================#
#' Predict cutoff thresholds for use with mlg.filter
#'
#' Given a series of thresholds for a data set that collapse it into one giant
#' cluster, this will search the top fraction of threshold differences to find
#' the largest difference. The average between the thresholds spanning that
#' difference is the cutoff threshold defining the clonal lineage threshold.
#'
#' @param thresholds a vector of numerics coming from mlg.filter where the
#' threshold has been set to the maximum threshold theoretically possible.
#' @param fraction the fraction of the data to seek the threshold.
#'
#' @return a numeric value representing the threshold at which multilocus
#' lineages should be defined.
#' @seealso \code{\link{filter_stats}} \code{\link{mlg.filter}}
#' @note This function originally appeared in
#' \href{http://dx.doi.org/10.5281/zenodo.17424}{DOI: 10.5281/zenodo.17424}.
#' This is a bit of a blunt instrument.
#' @export
#' @references ZN Kamvar, JC Brooks, and NJ Grünwald. 2015. Supplementary
#' Material for Frontiers Plant Genetics and Genomics 'Novel R tools for
#' analysis of genome-wide population genetic data with emphasis on clonality'.
#' DOI: \href{http://dx.doi.org/10.5281/zenodo.17424}{10.5281/zenodo.17424}
#'
#' Kamvar ZN, Brooks JC and Grünwald NJ (2015) Novel R tools for analysis of
#' genome-wide population genetic data with emphasis on clonality. Front. Genet.
#' 6:208. doi:
#' \href{http://dx.doi.org/10.3389/fgene.2015.00208}{10.3389/fgene.2015.00208}
#'
#' @author Zhian N. Kamvar
#' @examples
#'
#' data(Pinf)
#' pinfreps <- fix_replen(Pinf, c(2, 2, 6, 2, 2, 2, 2, 2, 3, 3, 2))
#' pthresh <- filter_stats(Pinf, distance = bruvo.dist, replen = pinfreps,
#' plot = TRUE, stats = "THRESHOLD", threads = 1L)
#'
#' # prediction for farthest neighbor
#' cutoff_predictor(pthresh$farthest)
#'
#' # prediction for all algorithms
#' sapply(pthresh, cutoff_predictor)
#'
#==============================================================================#
cutoff_predictor <- function(thresholds, fraction = 0.5){
frac <- 1:round(length(thresholds)*fraction)
diffs <- diff(thresholds[frac])
diffmax <- which.max(diffs)
mean(thresholds[diffmax:(diffmax + 1)])
}
#==============================================================================#
#' Plot the results of filter_stats
#'
#' @param x a genlight of genind object
#' @param fstats the list passed from \code{\link{filter_stats}}
#' @param distmat a distance matrix passed from \code{\link{filter_stats}}
#' @param cols colors to use for each algorithm (defaults to \pkg{RColorBrewer}
#' set 1)
#' @param nclone see \code{\link{filter_stats}}
#'
#' @return a plot depicting how many MLLs are collapsed as the genetic distance
#' increases for each algorithm.
#' @export
#' @seealso \code{\link{filter_stats}}
#' @note This function originally appeared in
#' \href{http://dx.doi.org/10.5281/zenodo.17424}{DOI: 10.5281/zenodo.17424}
#' @author Zhian N. Kamvar
#' @references ZN Kamvar, JC Brooks, and NJ Grünwald. 2015. Supplementary
#' Material for Frontiers Plant Genetics and Genomics 'Novel R tools for
#' analysis of genome-wide population genetic data with emphasis on clonality'.
#' DOI: \href{http://dx.doi.org/10.5281/zenodo.17424}{10.5281/zenodo.17424}
#'
#' Kamvar ZN, Brooks JC and Grünwald NJ (2015) Novel R tools for analysis of
#' genome-wide population genetic data with emphasis on clonality. Front. Genet.
#' 6:208. doi:
#' \href{http://dx.doi.org/10.3389/fgene.2015.00208}{10.3389/fgene.2015.00208}
#'
#' @keywords internal
#==============================================================================#
plot_filter_stats <- function(x, fstats, distmat, cols = NULL, nclone = NULL, breaks = NULL){
upper <- round(max(distmat), digits = 1)
ylims <- c(ifelse(is.genind(x), suppressWarnings(nmll(x, "original")), nInd(x)), 1)
if (!is.null(breaks)){
graphics::hist(distmat, breaks = breaks, xlab = "", ylab = "", axes = FALSE,
xlim = c(0, upper), main = "")
par(new = TRUE)
}
plot(x = c(upper, 0), y = ylims, type = "n",
ylab = "Number of Multilocus Lineages",
xlab = "Genetic Distance Cutoff")
a <- fstats$average$THRESHOLDS
n <- fstats$nearest$THRESHOLDS
f <- fstats$farthest$THRESHOLDS
plotcols <- c("#E41A1C", "#377EB8", "#4DAF4A") # RColorBrewer::brewer.pal(3, "Set1")
names(plotcols) <- c("f", "a", "n")
points(x = rev(a), y = 1:length(a), col = plotcols["a"])
points(x = rev(f), y = 1:length(f), col = plotcols["f"])
points(x = rev(n), y = 1:length(n), col = plotcols["n"])
if (!is.null(nclone)){
abline(v = a[1 + length(a) - nclone] + .Machine$double.eps^0.5, lty = 2,
col = plotcols["a"])
abline(v = f[1 + length(f) - nclone] + .Machine$double.eps^0.5, lty = 2,
col = plotcols["f"])
abline(v = n[1 + length(n) - nclone] + .Machine$double.eps^0.5, lty = 2,
col = plotcols["n"])
abline(h = nclone)
text(upper, nclone, labels = paste0("n = ", nclone),
adj = c(1, -0.5))
legend("topright",
legend = c("Nearest Neighbor", "UPGMA", "Farthest Neighbor"),
col = plotcols[c("n", "a", "f")],
pch = 1,
lty = 2,
title = "Clustering Method")
} else {
legend("topright",
legend = c("Nearest Neighbor", "UPGMA", "Farthest Neighbor"),
col = plotcols[c("n", "a", "f")],
pch = 1,
title = "Clustering Method")
}
}
|
#Read files test
stuff <- list.dirs(path = sy, full.names = FALSE, recursive = TRUE)
|
/Preprocessing/pdf reading into corpus.R
|
no_license
|
ashgillman/BOW-Mining-Project
|
R
| false
| false
| 88
|
r
|
#Read files test
stuff <- list.dirs(path = sy, full.names = FALSE, recursive = TRUE)
|
local(envir=.PBSmodEnv,expr={
locale = sys.frame(sys.nframe() - 1) # local environment
# **********************************************
# R code for the Lissajous example
# **********************************************
# Calculate and draw a Lissajous figure ("Plot" button)
drawLiss <- function() {
getWinVal(scope="L");
ti <- 2*pi*(0:k)/k;
x <- sin(2*pi*m*ti);
y <- sin(2*pi*(n*ti+phi));
resetGraph();
plot(x,y,type=ptype);
invisible(NULL); }
# Load PBS Modelling and initialize the GUI
require(PBSmodelling); createWin("LissFigWin.txt")
}) # end local scope
|
/PBSmodelling/inst/examples/LissFig.r
|
no_license
|
pbs-software/pbs-modelling
|
R
| false
| false
| 605
|
r
|
local(envir=.PBSmodEnv,expr={
locale = sys.frame(sys.nframe() - 1) # local environment
# **********************************************
# R code for the Lissajous example
# **********************************************
# Calculate and draw a Lissajous figure ("Plot" button)
drawLiss <- function() {
getWinVal(scope="L");
ti <- 2*pi*(0:k)/k;
x <- sin(2*pi*m*ti);
y <- sin(2*pi*(n*ti+phi));
resetGraph();
plot(x,y,type=ptype);
invisible(NULL); }
# Load PBS Modelling and initialize the GUI
require(PBSmodelling); createWin("LissFigWin.txt")
}) # end local scope
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
H2OMOJOSettings.default <- function() {
H2OMOJOSettings()
}
#' @export H2OMOJOSettings
H2OMOJOSettings <- setRefClass("H2OMOJOSettings",
fields = list(predictionCol = "character",
detailedPredictionCol = "character",
convertUnknownCategoricalLevelsToNa = "logical",
convertInvalidNumbersToNa = "logical",
withContributions = "logical",
withInternalContributions = "logical",
withPredictionInterval = "logical",
withLeafNodeAssignments = "logical",
withStageResults = "logical",
dataFrameSerializer = "character",
scoringBulkSize = "integer"),
methods = list(
initialize = function(predictionCol = "prediction",
detailedPredictionCol = "detailed_prediction",
convertUnknownCategoricalLevelsToNa = FALSE,
convertInvalidNumbersToNa = FALSE,
withContributions = FALSE,
withInternalContributions = FALSE,
withPredictionInterval = FALSE,
withLeafNodeAssignments = FALSE,
withStageResults = FALSE,
dataFrameSerializer = "ai.h2o.sparkling.utils.JSONDataFrameSerializer",
scoringBulkSize = 1000L) {
.self$predictionCol <- predictionCol
.self$detailedPredictionCol <- detailedPredictionCol
.self$convertUnknownCategoricalLevelsToNa <- convertUnknownCategoricalLevelsToNa
.self$convertInvalidNumbersToNa <- convertInvalidNumbersToNa
.self$withContributions <- withContributions
.self$withInternalContributions <- withInternalContributions
.self$withPredictionInterval <- withPredictionInterval
.self$withLeafNodeAssignments <- withLeafNodeAssignments
.self$withStageResults <- withStageResults
.self$dataFrameSerializer <- dataFrameSerializer
.self$scoringBulkSize <- scoringBulkSize
},
toJavaObject = function() {
sc <- spark_connection_find()[[1]]
invoke_new(sc, "ai.h2o.sparkling.ml.models.H2OMOJOSettings",
.self$predictionCol,
.self$detailedPredictionCol,
.self$convertUnknownCategoricalLevelsToNa,
.self$convertInvalidNumbersToNa,
.self$withContributions,
.self$withInternalContributions,
.self$withPredictionInterval,
.self$withLeafNodeAssignments,
.self$withStageResults,
.self$dataFrameSerializer,
.self$scoringBulkSize)
}
))
|
/r/src/R/ai/h2o/sparkling/ml/models/H2OMOJOSettings.R
|
permissive
|
h2oai/sparkling-water
|
R
| false
| false
| 4,998
|
r
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
H2OMOJOSettings.default <- function() {
H2OMOJOSettings()
}
#' @export H2OMOJOSettings
H2OMOJOSettings <- setRefClass("H2OMOJOSettings",
fields = list(predictionCol = "character",
detailedPredictionCol = "character",
convertUnknownCategoricalLevelsToNa = "logical",
convertInvalidNumbersToNa = "logical",
withContributions = "logical",
withInternalContributions = "logical",
withPredictionInterval = "logical",
withLeafNodeAssignments = "logical",
withStageResults = "logical",
dataFrameSerializer = "character",
scoringBulkSize = "integer"),
methods = list(
initialize = function(predictionCol = "prediction",
detailedPredictionCol = "detailed_prediction",
convertUnknownCategoricalLevelsToNa = FALSE,
convertInvalidNumbersToNa = FALSE,
withContributions = FALSE,
withInternalContributions = FALSE,
withPredictionInterval = FALSE,
withLeafNodeAssignments = FALSE,
withStageResults = FALSE,
dataFrameSerializer = "ai.h2o.sparkling.utils.JSONDataFrameSerializer",
scoringBulkSize = 1000L) {
.self$predictionCol <- predictionCol
.self$detailedPredictionCol <- detailedPredictionCol
.self$convertUnknownCategoricalLevelsToNa <- convertUnknownCategoricalLevelsToNa
.self$convertInvalidNumbersToNa <- convertInvalidNumbersToNa
.self$withContributions <- withContributions
.self$withInternalContributions <- withInternalContributions
.self$withPredictionInterval <- withPredictionInterval
.self$withLeafNodeAssignments <- withLeafNodeAssignments
.self$withStageResults <- withStageResults
.self$dataFrameSerializer <- dataFrameSerializer
.self$scoringBulkSize <- scoringBulkSize
},
toJavaObject = function() {
sc <- spark_connection_find()[[1]]
invoke_new(sc, "ai.h2o.sparkling.ml.models.H2OMOJOSettings",
.self$predictionCol,
.self$detailedPredictionCol,
.self$convertUnknownCategoricalLevelsToNa,
.self$convertInvalidNumbersToNa,
.self$withContributions,
.self$withInternalContributions,
.self$withPredictionInterval,
.self$withLeafNodeAssignments,
.self$withStageResults,
.self$dataFrameSerializer,
.self$scoringBulkSize)
}
))
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/pipe.R
\name{\%>\%}
\alias{\%>\%}
\title{magrittr forward-pipe operator}
\description{
magrittr forward-pipe operator
}
\keyword{internal}
|
/man/grapes-greater-than-grapes.Rd
|
no_license
|
knausb/poppr
|
R
| false
| true
| 218
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/pipe.R
\name{\%>\%}
\alias{\%>\%}
\title{magrittr forward-pipe operator}
\description{
magrittr forward-pipe operator
}
\keyword{internal}
|
## Vector
x<-5:20;
print(x)
|
/script/RProgramming/first.R
|
no_license
|
forkmafia993/Project1
|
R
| false
| false
| 29
|
r
|
## Vector
x<-5:20;
print(x)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.