blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 327 | content_id stringlengths 40 40 | detected_licenses listlengths 0 91 | license_type stringclasses 2 values | repo_name stringlengths 5 134 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 46 values | visit_date timestamp[us]date 2016-08-02 22:44:29 2023-09-06 08:39:28 | revision_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | committer_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | github_id int64 19.4k 671M ⌀ | star_events_count int64 0 40k | fork_events_count int64 0 32.4k | gha_license_id stringclasses 14 values | gha_event_created_at timestamp[us]date 2012-06-21 16:39:19 2023-09-14 21:52:42 ⌀ | gha_created_at timestamp[us]date 2008-05-25 01:21:32 2023-06-28 13:19:12 ⌀ | gha_language stringclasses 60 values | src_encoding stringclasses 24 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 7 9.18M | extension stringclasses 20 values | filename stringlengths 1 141 | content stringlengths 7 9.18M |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
a5ce07cd3652510a1557bd5dc61b5777393a9a4a | 81be95b50b4a31539eedbaaad6e595754129cf51 | /man/plot.pedigree.Rd | 7340bf166c96872898e2a4474aba74191f4c09cb | [] | no_license | cran/synbreed | dd023649cd0932a612d1e7a7eb3b2d4c619d93f7 | 21d1ecf6d1936987ac0d68f390ac2ae06a737066 | refs/heads/master | 2020-05-21T03:12:27.630810 | 2018-03-16T04:01:36 | 2018-03-16T04:01:36 | 17,700,320 | 3 | 3 | null | null | null | null | UTF-8 | R | false | false | 2,086 | rd | plot.pedigree.Rd | \name{plot.pedigree}
\alias{plot.pedigree}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Visualization of pedigree
}
\description{
A function to visualize pedigree structure by a graph using the \code{igraph} package. Each genotype is represented as vertex and direct offsprings are linked by an edge.
}
\usage{
\method{plot}{pedigree}(x, effect = NULL,\dots)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{x}{
object of class \code{pedigree} or object of class \code{gpData} with element \code{pedigree}
}
\item{effect}{
vector of length \code{nrow(pedigree)} with effects to plot on the x axis
}
\item{...}{
Other arguments for function \code{igraph.plotting}
}
}
\details{
The pedigree is structured top to bottom. The first generation is printed in the first line. Links over more than one generation are possible as well as genotypes with only one (known) parent. Usually, no structure in one generation is plotted. If an \code{effect} is given, the genotypes are ordered by this effect in the horizontal direction and a labeled axis is plotted at the bottom.
}
\value{
A named graph visualizing the pedigree structure. Color is used to distinguish sex.
}
\author{
Valentin Wimmer and Hans-Juergen Auinger
}
\note{
This function uses the plotting method for graphs in the library \code{igraph}
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
\code{\link{create.pedigree}}, \code{\link{simul.pedigree}}
}
\examples{
id <- paste("ID", 1:9, sep="0")
par1 <- paste("ID", c("","","","",1,1,1,4,7), sep="0")
par2 <- paste("ID", c("","","","",2,3,2,5,8), sep="0")
ped1 <- create.pedigree(id,par1,par2,unknown="ID0")
ped1
plot(ped1)
# create 2nd pedigree object
Id <- paste("ID", 10:16, sep="")
Par1 <- paste("ID", c("","",1,1,6,7,7), sep="0")
Par2 <- paste("ID", c("","",10,"08","09",11,14), sep="")
ped2 <- create.pedigree(Id,Par1,Par2,unknown=c("ID0", "ID"))
ped2
ped <- add.pedigree(ped1, ped2)
plot(ped)
}
\keyword{hplot}
|
29517e7cb2c907b64de604e26735f204f7be3efd | 109d8b526470a90abcdd33d5828d59063b9d2d0b | /plot2.r | 64c594366f7151cfca4508a1a026a12bdf1a4103 | [] | no_license | Sertinell/ExData_2 | d649946b632af83130e8b52d0a82f8b305b46b21 | c6f3c9d53233dcbfc18459467e0c40cef4011b7b | refs/heads/master | 2021-01-10T15:35:49.413056 | 2015-11-15T11:20:53 | 2015-11-15T11:20:53 | 45,709,415 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 553 | r | plot2.r | library(dplyr)
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
#filer baltimore data
baltimore <- filter(NEI, fips == "24510")
#group by year, summarise and plot
baltbyyear <- group_by(baltimore, year)
totals <- summarise(baltbyyear, totalemi = sum(Emissions))
png("plot2.png", width = 480, height = 480)
plot(totals$year, totals$totalemi,
type= "l",
main = "Total emissions per year in Baltimore",
xlab = "Year", ylab = "Total Emissions (ton)")
points(totals$year, totals$totalemi)
dev.off()
|
eb9eedc48e9f9c1a9655504ec78859700c516949 | 371021138aa7cfd67e8d0be335142db0133962e9 | /code/Plot_VDJgene_Freq.R | 1afe1bfc7b2f2cdcb70d9e0a02635e7642149601 | [] | no_license | nicwulab/SARS-CoV-2_Abs | 7032251bce1b4bbd385010d7e936a5eeda8bb500 | 50de02fe345289855868f0758c299e564b56ed08 | refs/heads/master | 2023-04-19T05:34:04.688197 | 2022-05-16T17:49:47 | 2022-05-16T17:49:47 | 390,160,709 | 5 | 1 | null | null | null | null | UTF-8 | R | false | false | 4,129 | r | Plot_VDJgene_Freq.R | #R code
library(ggplot2)
library(scales)
library(RColorBrewer)
library(readr)
library(tidyr)
library(reshape)
library(stringr)
library(dplyr)
library(gridExtra)
library(ggforce)
library(ggbeeswarm)
library(sinaplot)
library(readxl)
require(cowplot)
plot_Vgene_usage <- function(data,ref,p_width,graphname){
palette <- c(brewer.pal(3,"Accent"))
textsize <- 7
p <- ggplot() +
geom_bar(data=data, aes(x=gene, y=freq*100, fill=epitope), stat='identity', width=0.6, position=position_dodge()) +
scale_fill_manual(values=palette,drop=FALSE) +
geom_errorbar(data = ref,aes(x=V_gene,ymin=(range_low)*100, ymax=(range_high)*100),
size=0.25, width=0.5,position=position_dodge(), color='gray30') +
theme_cowplot(12) +
theme(axis.text=element_text(size=textsize-1,face="bold",colour = 'black'),
axis.text.x=element_text(angle=90,hjust=0.5,vjust=0.5,colour = 'black'),
axis.text.y=element_text(hjust=0.5,vjust=0.5,colour = 'black'),
axis.title.x=element_blank(),
axis.title.y=element_text(size=textsize,face="bold"),
axis.line = element_line(colour = 'black', size = 0),
panel.border = element_rect(colour = "black", fill=NA, size=1),
legend.position = "right",
legend.title = element_text(size=textsize,face="bold"),
legend.text=element_text(size=textsize,face="bold"),
legend.justification='center',
legend.key.size = unit(0.3,"line")) +
ylab("frequency (%)")
ggsave(graphname,p,width=p_width, height=1.3, bg='white', dpi=1200)
}
plot_Dgene_usage <- function(data,ref,graphname){
palette <- c(brewer.pal(3,"Accent"))
textsize <- 7
p <- ggplot() +
geom_bar(data=data, aes(x=gene, y=freq*100, fill=epitope), stat='identity', width=0.6, position=position_dodge()) +
scale_fill_manual(values=palette,drop=FALSE) +
geom_errorbar(data = ref,aes(x=D_gene,ymin=(range_low)*100, ymax=(range_high)*100),
size=0.25, width=0.5,position=position_dodge(), color='gray30') +
theme_cowplot(12) +
theme(axis.text=element_text(size=textsize,face="bold",colour = 'black'),
axis.text.x=element_text(angle=90,hjust=0.5,vjust=0.5,colour = 'black'),
axis.text.y=element_text(hjust=0.5,vjust=0.5,colour = 'black'),
axis.title.x=element_blank(),
axis.title.y=element_text(size=textsize,face="bold"),
axis.line = element_line(colour = 'black', size = 0),
panel.border = element_rect(colour = "black", fill=NA, size=1),
legend.position = "none",
legend.title = element_text(size=textsize,face="bold"),
legend.text=element_text(size=textsize,face="bold"),
legend.justification='center',
legend.key.size = unit(0.3,"line")) +
ylab("frequency (%)")
ggsave(graphname,p,width=3, height=1.3, bg='white', dpi=1200)
}
set.seed(5)
HVgene_ref <- read_excel('data/HV_Repertoire_freq.xlsx')
HVgene_level <- sort(HVgene_ref$V_gene)
HVgene_data <- read_tsv('result/HVgene_freq.tsv') %>%
filter(gene %in% HVgene_level) %>%
mutate(gene=factor(gene, level=HVgene_level))
plot_Vgene_usage(HVgene_data, HVgene_ref, 6,'graph/HV_gene_usage.png')
LVgene_ref <- read_excel('data/LV_Repertoire_freq.xlsx')
LVgene_level <- sort(LVgene_ref$V_gene)
LVgene_data <- read_tsv('result/LVgene_freq.tsv') %>%
filter(gene %in% LVgene_level) %>%
mutate(gene=factor(gene, level=LVgene_level))
plot_Vgene_usage(LVgene_data, LVgene_ref, 6,'graph/LV_gene_usage.png')
Dgene_ref <- read_excel('data/D_Repertoire_freq.xlsx')
Dgene_level <- sort(Dgene_ref$D_gene)
Dgene_data <- read_tsv('result/Dgene_freq.tsv') %>%
filter(gene %in% Dgene_level) %>%
mutate(gene=factor(gene, level=Dgene_level))
plot_Dgene_usage(Dgene_data, Dgene_ref, 'graph/D_gene_usage.png')
|
efb314204cc94e37439c4361b4fa98dbf3a609ac | e05b54bd53ffc69e63c99bd64e12341d0eb0fa2c | /功能注释.R | db2408fc0a37a82865175e9b82a8eff7af6ba622 | [] | no_license | taoshengqiansui/linux | 55f0de4adcd983b55f6ebc3fe678426d46c25abc | a2245edcac6d1064a32155e69662bda8556f4b45 | refs/heads/main | 2023-07-09T13:54:35.546565 | 2021-08-05T09:18:11 | 2021-08-05T09:18:11 | 368,573,122 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,809 | r | 功能注释.R | #ENTREZID SYMBOL ENSEMBLE 的相互转换
suppressMessages(library(org.Hs.eg.db))# 载入包
keytypes(org.Hs.eg.db) #查看支持对选项
#rt=read.table("gene.txt",sep="\t",check.names=F,header=T)
rt <- pname$ENSEMBL
#rt<-as.data.frame(rt[,1])
class(rt)
#keys是自己的基因,columns是输出的类型,keytype是输入的类型
#gene_list<-select(org.Hs.eg.db, keys=as.character(rt$`rt[, 1]`), columns=c("SYMBOL","ENTREZID"), keytype="ENSEMBL")
gene_list<-select(org.Hs.eg.db, keys=as.character(symbol), columns=c("SYMBOL","ENTREZID"), keytype="ENSEMBL")
gene_list<-select(org.Hs.eg.db, keys=rt, columns=c("SYMBOL","ENTREZID"), keytype="ENSEMBL")
gene_list <- gene_list[c(-3,-5,-60,-69,-93),]
gene_list[1:4,1:3]
write.table(gene_list,file="a.txt",sep="\t",quote=F,row.names=F)
#基因功能注释
##ID转换
library("org.Hs.eg.db")
rt <- pname$SYMBOL
rt <- na.omit(rt)
rt=read.table("symbol.txt",sep="\t",check.names=F,header=T)
genes=as.vector(gene_list[,1])
entrezIDs <- mget(rt, org.Hs.egSYMBOL2EG, ifnotfound=NA)
entrezIDs <- as.character(entrezIDs)
out=cbind(rt,entrezID=entrezIDs)
dim(out)
out<-out[(out$entrezID!='NA'),] #删除NA值
dim(out)
write.table(out,file="id.txt",sep="\t",quote=F,row.names=F)
##2GO分析
library("clusterProfiler")
library("org.Hs.eg.db")
library("enrichplot")
library("ggplot2")
rt=read.table("id.txt",sep="\t",header=T,check.names=F)
rt=rt[is.na(rt[,"entrezID"])==F,]
gene=rt$entrezID
#GO富集分析
kk <- enrichGO(gene = gene,
OrgDb = org.Hs.eg.db,
pvalueCutoff =0.05,
qvalueCutoff = 0.05,
ont="all",
readable =T)
write.table(kk,file="GO.txt",sep="\t",quote=F,row.names = F)
#柱状图
tiff(file="GO.tiff",width = 26,height = 20,units ="cm",compression="lzw",bg="white",res=600)
barplot(kk, drop = TRUE, showCategory =10) #只展示一种
barplot(kk, drop = TRUE, showCategory =10,split="ONTOLOGY") + facet_grid(ONTOLOGY~., scale='free')
dev.off()
#气泡图
tiff(file="dotplot.tiff",width = 26,height = 20,units ="cm",compression="lzw",bg="white",res=600)
dotplot(kk,showCategory = 10,split="ONTOLOGY") + facet_grid(ONTOLOGY~., scale='free')
dev.off()
#热图
tiff(file="heatplot.tiff",width = 40,height = 20,units ="cm",compression="lzw",bg="white",res=600)
heatplot(kk,showCategory =20, foldChange=cor)
dev.off()
#画柱状图
shorten_names <- function(x, n_word=4, n_char=40){
if (length(strsplit(x, " ")[[1]]) > n_word || (nchar(x) > 40))
{
if (nchar(x) > 40) x <- substr(x, 1, 40)
x <- paste(paste(strsplit(x, " ")[[1]][1:min(length(strsplit(x," ")[[1]]), n_word)],
collapse=" "), "...", sep="")
return(x)
}
else
{
return(x)
}
}
data<-read.table("GO.txt",header = T,sep = "\t")
#替换名字
data$ONTOLOGY<-gsub("BP", "biological_process", data$ONTOLOGY)
data$ONTOLOGY<-gsub("CC", "cellular_component", data$ONTOLOGY)
data$ONTOLOGY<-gsub("MF", "molecular_function", data$ONTOLOGY)
class(data)
colnames(data)[3]<-c('GO_term')
data <- subset(data,Count>3) #数目很多时才做,保证每个GO的基因数>3
#将GO_TERM的名字变短,shorten_names是上述定义的函数
data$GO_term=(sapply(levels(data$GO_term)[as.numeric(data$GO_term)],shorten_names))
data<-data[order(data[,1]),] #排序
data$GO_term<- as.character(data$GO_term) #先转换成字符串
data$GO_term<-factor(data$GO_term,levels = c(data$GO_term)) #再强制加入因子
COLS <- c("#66C3A5", "#8DA1CB", "#FD8D62")
a<-ggplot(data=data, aes(x=GO_term,y=Count, fill=ONTOLOGY)) +
geom_bar(stat="identity", width=0.8) + coord_flip() +
xlab("GO term") + ylab("Num of Genes") +
scale_fill_manual(values = COLS)+ theme_bw()
ggsave(a, file="go_all.pdf", width=9.03, height=5.74)
#KEGG分析
library("clusterProfiler")
library("org.Hs.eg.db")
library("enrichplot")
library("ggplot2")
rt=read.table("id.txt",sep="\t",header=T,check.names=F)
rt=rt[is.na(rt[,"entrezID"])==F,]
gene=rt$entrezID
#kegg富集分析
kk <- enrichKEGG(gene = gene, organism = "hsa", pvalueCutoff =0.05, qvalueCutoff =0.05)
write.table(kk,file="KEGG.txt",sep="\t",quote=F,row.names = F)
#柱状图
tiff(file="KEGG.tiff",width = 20,height = 12,units ="cm",compression="lzw",bg="white",res=600)
barplot(kk, drop = TRUE, showCategory = 20)
dev.off()
#气泡图
tiff(file="dotplot.tiff",width = 20,height = 12,units ="cm",compression="lzw",bg="white",res=600)
dotplot(kk, showCategory = 20)
dev.off()
#热图
tiff(file="heatplot.tiff",width = 25,height = 15,units ="cm",compression="lzw",bg="white",res=600)
heatplot(kk,showCategory =20, foldChange=cor)
dev.off()
#通路图
library("pathview")
keggxls=read.table("KEGG.txt",sep="\t",header=T)
for(i in keggxls$ID){
pv.out <- pathview(gene.data = cor, pathway.id = i, species = "hsa", out.suffix = "pathview")
}
|
a2681ed3cfd56a5ae2db1249dbd998edfc0c963e | 91a77be68e5ad1aa16e9a2681ba6fb090c118e4d | /man/justification.Rd | efadeed3b10c03efd82e842ae7db5eaf4fcbc3a3 | [] | no_license | cran/qdap | e42f194e98a38eb02084eb6ac92dd587024b8540 | 5f032a6a8bf41255cd2547b11325ed457a02a72a | refs/heads/master | 2023-05-25T03:10:36.324940 | 2023-05-11T05:10:02 | 2023-05-11T05:10:02 | 17,698,836 | 1 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,201 | rd | justification.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/left_just.R
\name{left_just}
\alias{left_just}
\alias{right_just}
\title{Text Justification}
\usage{
left_just(dataframe, column = NULL, keep.class = FALSE)
right_just(dataframe)
}
\arguments{
\item{dataframe}{A data.frame object with the text column.}
\item{column}{The column to be justified. If \code{NULL} all columns are
justified.}
\item{keep.class}{logical. If \code{TRUE} will attempt to keep the original
classes of the dataframe if the justification is not altered (i.e., numeric
will not be honored but factor may be).}
}
\value{
Returns a dataframe with selected text column left/right justified.
}
\description{
\code{left_just} - Left justifies a text/character column.
\code{right_just} - A means of undoing a left justification.
}
\note{
\code{\link[qdap]{left_just}} inserts spaces to achieve the
justification. This could interfere with analysis and therefore the output
from \code{\link[qdap]{left_just}} should only be used for visualization
purposes, not analysis.
}
\examples{
\dontrun{
left_just(DATA)
left_just(DATA, "state")
left_just(CO2[1:15,])
right_just(left_just(CO2[1:15,]))
}
}
|
c2e4a0ffca7819dc5624d3cd2458dc231cb0f23f | 8be80c32be024ba01f8bd0cf2b4a3128cdaf2947 | /tooltips.R | 6ca2b8d8b9a747d3727495fae4c9bba1ab5b9816 | [] | no_license | mattkumar/r_tooltips | 15cbc60ee62309d58ccdd18074ad390faf339ceb | 32320e9bfe498f498ee4c90ad3c6f10fc0823ed7 | refs/heads/master | 2020-06-17T19:31:35.056777 | 2019-07-09T17:22:50 | 2019-07-09T17:22:50 | 196,026,292 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,334 | r | tooltips.R | library(tidyr)
library(dplyr)
library(purrr)
library(highcharter)
#Read in data
me_raw = read.csv("measles.csv")
#Make long from wide
me_raw_long = gather(data=me_raw, Month, Total, January:December)
#Up front filtering
me_raw_filter = me_raw_long %>%
filter(!(Year==2011) & Country %in% c('Canada','United States of America'))
#Make a Totals Dataset
totals = me_raw_filter %>%
group_by(Year, Country) %>%
mutate(Total=sum(na.rm=TRUE,Total)) %>%
distinct(Year, .keep_all = TRUE) %>%
select(Year, Country, Total)
#Make Monthly Dataset
months = me_raw_filter %>%
select(Year, Country, Month, Total) %>%
nest(-Year,-Country) %>%
mutate(
data = map(data, mutate_mapping, hcaes(categories=Month, y=Total)),
data = map(data, list_parse)
) %>%
rename(ttdata = data)
#Merge Them
all_me <- left_join(totals, months, by = c("Country","Year"))
#Plot it
hchart(all_me, "line", hcaes(x = Year, y = Total, group=Country)) %>%
hc_add_theme(hc_theme_538()) %>%
hc_xAxis(allowDecimals=FALSE, title=list(text="Year")) %>%
hc_yAxis(title=list(text="Counts")) %>%
hc_title(text = paste("Yearly Measles Count in Canada and the United States of America"),
margin = 20, align = "left",
style = list(useHTML = TRUE)) %>%
hc_subtitle(text = "Source: World Health Organization (WHO)",
align = "left") %>%
hc_tooltip(useHTML = TRUE,
headerFormat = "<b>{point.key}</b>",
pointFormatter = tooltip_chart(accesor = "ttdata",
width="500",
hc_opts = list(xAxis = list(title=list(text="Month"),
labels=list(style=list(color="white")),
categories=list(list(categories= c('Jan', 'Feb', 'Mar','Apr','May','Jun','Jul','Aug','Sep','Oct','Nov','Dec')))),
yAxis = list(labels=list(style=list(color="white")))
)
)
)
|
936d78179d116b975ca3f568232cd221626ec798 | d690af5c19bb0d6b723e1b8f1687794b4e0f8830 | /R/safepredict-package.R | 38fd2ba4daf828dc03def887b62ad2b6df1982ba | [
"MIT"
] | permissive | roldanalex/safepredict | 03113c5095518fef7c007c7e98342ecf15c0f9dc | 05c3b9c8770583221a73b7b68f88805402630f5f | refs/heads/master | 2021-10-09T11:32:24.866936 | 2018-12-27T06:11:45 | 2018-12-27T06:11:45 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,034 | r | safepredict-package.R | #' @keywords internal
#'
#' @importFrom glue glue
#'
#' @importFrom rlang abort
#' @importFrom rlang arg_match
#'
#' @importFrom stats family
#' @importFrom stats model.frame
#' @importFrom stats model.response
#' @importFrom stats na.pass
#' @importFrom stats predict
#' @importFrom stats qnorm
#' @importFrom stats qt
#'
#' @importFrom tibble as_tibble
#' @importFrom tibble tibble
#'
#'
"_PACKAGE"
#' Safely predict from a model object
#'
#' @param object An object or model you would like to get predictions from.
#'
#' @param new_data **Required**. Data in the same format as required for
#' the `predict()` (or relevant method) for `object`. We do our best to
#' support missing data specified via `NA`s in `new_data` although this
#' is somewhat dependent on the underlying `predict()` method.
#'
#' `new_data`:
#'
#' - does not need to contain the model outcome
#' - can contain additional columns not used for prediction
#' - can have one or more rows when specified via a table-like object such
#' as a [tibble::tibble()], [data.frame()] or [matrix()].
#'
#' @param type A character vector indicating what kind of predictions you
#' would like.
#'
#' Options are:
#'
#' - `"response"`: continuous/numeric predictions
#' - `"class"`: hard class predictions
#' - `"prob"`: class or survival probabilities
#' - `"link"`: predictors on the linear scale (GLMs only)
#' - `"conf_int"`: confidence intervals for means of continuous predictions
#' - `"pred_int"`: prediction intervals for continuous outcomes
#'
#' In most cases, only a subset of these options are available.
#'
#' @template unused_dots
#' @template level
#' @template std_error
#'
#' @template return
#'
#' @template intervals
#' @template novel_factor_levels
#'
#' @section Recommended implementations:
#'
#' The goal of `safepredict` is to make prediction as painless and consistent
#' as possible across a wide variety of model objects. In some cases, the
#' existing intrafrastructure is insufficient to provide a consistent and
#' feature-rich prediction interface. As a result, we support a number of
#' model objects that we do not actually recommend using. In these cases,
#' we try to link to better and more feature-rich implementations.
#'
#' @export
safe_predict <- function(
object,
new_data,
type = NULL,
...,
level = 0.95,
std_error = FALSE) {
ellipsis::check_dots_used()
UseMethod("safe_predict")
}
#' @export
safe_predict.default <- function(object, ...) {
cls <- class(object)[1]
glubort("There is no safe_predict() method for objects of class {cls}.")
}
#' Safely predict from many fits at once
#'
#' Experimental and untested, not recommended for general use.
#'
#' @param object TODO
#' @param ... TODO
multi_predict <- function(object, ...) {
ellipsis::check_dots_used()
UseMethod("multi_predict")
}
#' @rdname multi_predict
#' @export
multi_predict.default <- function(object, ...) {
cls <- class(object)[1]
glubort("There is no multi_predict() method for objects of class {cls}.")
}
|
ed46c8fef45bcb0ada33971f266f92c091495b46 | 73f1d9ef21784abe01dc08bbab8e051f0ad7ba53 | /_workflowr.R | c144401e34ffdfae702ff38590f750fdfaeee572 | [] | no_license | jma1991/EB_KDR | 3e9dd894773e9abd6c3f2608ceacd1649d03b9a8 | 96afa34c41d3a31a8988b5a57a705bd44d8a725a | refs/heads/main | 2023-07-04T07:08:47.234301 | 2021-08-09T17:18:57 | 2021-08-09T17:18:57 | 386,411,921 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,923 | r | _workflowr.R | # Step 1: Commit RMD files
system("git add analysis/about.Rmd")
system("git add analysis/index.Rmd")
system("git add analysis/license.Rmd")
system("git add analysis/01-batch-integration.Rmd")
system("git add analysis/02-reduced-dimensions.Rmd")
system("git add analysis/03-clustering.Rmd")
system("git add analysis/04-marker-detection.Rmd")
system("git add analysis/signaling-analysis.Rmd")
system("git add analysis/scenic-analysis.Rmd")
system("git add analysis/trajectory-analysis.Rmd")
system("git add analysis/pseudotime-analysis.Rmd")
system("git add analysis/mouse-integration.Rmd")
system("git add analysis/mouse-trajectory.Rmd")
system("git add analysis/mouse-pseudotime.Rmd")
system("git add analysis/interactive.Rmd")
system("git commit -m 'Build'")
# Step 2: Build HTML files
wflow_build("analysis/about.Rmd")
wflow_build("analysis/index.Rmd")
wflow_build("analysis/license.Rmd")
wflow_build("analysis/01-batch-integration.Rmd")
wflow_build("analysis/02-reduced-dimensions.Rmd")
wflow_build("analysis/03-clustering.Rmd")
wflow_build("analysis/04-marker-detection.Rmd")
wflow_build("analysis/signaling-analysis.Rmd")
wflow_build("analysis/scenic-analysis.Rmd")
wflow_build("analysis/trajectory-analysis.Rmd")
wflow_build("analysis/pseudotime-analysis.Rmd")
wflow_build("analysis/mouse-integration.Rmd")
wflow_build("analysis/mouse-trajectory.Rmd")
wflow_build("analysis/mouse-pseudotime.Rmd")
wflow_build("analysis/interactive.Rmd")
# Step 3: Add HTML files
system("git add docs/about.html")
system("git add docs/index.html")
system("git add docs/license.html")
system("git add docs/01-batch-integration.html")
system("git add docs/02-reduced-dimensions.html")
system("git add docs/03-clustering.html")
system("git add docs/04-marker-detection.html")
system("git add docs/signaling-analysis.html")
system("git add docs/scenic-analysis.html")
system("git add docs/trajectory-analysis.html")
system("git add docs/pseudotime-analysis.html")
system("git add docs/mouse-integration.html")
system("git add docs/mouse-trajectory.html")
system("git add docs/mouse-pseudotime.html")
system("git add docs/interactive.html")
# Step 4: Add PNG files
system("git add docs/figure/01-batch-integration.Rmd")
system("git add docs/figure/02-reduced-dimensions.Rmd")
system("git add docs/figure/03-clustering.Rmd")
system("git add docs/figure/04-marker-detection.Rmd")
system("git add docs/figure/signaling-analysis.Rmd")
system("git add docs/figure/scenic-analysis.Rmd")
system("git add docs/figure/trajectory-analysis.Rmd")
system("git add docs/figure/pseudotime-analysis.Rmd")
system("git add docs/figure/mouse-integration.Rmd")
system("git add docs/figure/mouse-trajectory.Rmd")
system("git add docs/figure/mouse-pseudotime.Rmd")
# Step 5: Add site files
system("git add docs/site_libs")
system("git add docs/.nojekyll")
# Step 6: Commit and push files
system("git commit -m 'Build'")
system("git push origin main")
|
b08833a3aeb0bc577ea5626c08d95b7268eb8058 | d141ea7f44e4694e0feae2831f36c32d3e482577 | /man/pedigreeSim2PH.Rd | 8b39f141318e5b1a665d917ca7e56d5cdfc4f94f | [] | no_license | cran/PolyHaplotyper | 0185fad52cd908dca321665f67fa83d691031207 | c733cbb69263b2e96635d805bc22c77183d734ef | refs/heads/master | 2023-05-25T10:28:56.696224 | 2021-06-17T13:20:05 | 2021-06-17T13:20:05 | 337,721,550 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 2,822 | rd | pedigreeSim2PH.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/PolyHaplotyper.R
\name{pedigreeSim2PH}
\alias{pedigreeSim2PH}
\title{convert the PedigreeSim true haplotypes to marker dosages and
haplotype dosages}
\usage{
pedigreeSim2PH(ps_geno, haploblock, indiv=NULL, dropUnused=TRUE)
}
\arguments{
\item{ps_geno}{the filename of a *_genotypes.dat file as produced by
PedigreeSim, or a data.frame read from such a file (with a column marker,
followed by <ploidy> columns per individual with the marker alleles for each
homolog; the names of these columns must be <indivname>_<homolog number>).
ps_geno should not contain missing data (this will be true unless the
PedigreeSim output is midified)}
\item{haploblock}{a list of character vectors. The names are the names of the
haploblocks, the character vectors have the names of the markers in each
haploblock. Haplotype names are constructed from the haploblock names, which
are used as prefixes to which the (zero-padded) haplotype numbers are are
appended with separator '_'.\cr
All markers in haploblock must be present in ps_geno. If haploblock is NULL
only the marker dosages are generated, not the haplotype dosages.}
\item{indiv}{the names of the individuals to be extracted; default NULL: all
individuals in ps_geno. If not NULL, all indiv must be present in ps_geno}
\item{dropUnused}{TRUE (default) if the returned matrix should only contain
rows for haplotypes that are present; if FALSE matrix contains rows for all
possible haplotypes}
}
\value{
a list with 2 items:\cr
$mrkDosage: a matrix of marker dosages in the input format of inferHaplotypes,
with all markers that occur in haploblock, sorted according to haploblock,
and for the selected indiv in the specified order\cr
$haplist: (only if haploblock is not NULL) a list with one element for each
haploblock, similar to the
inferHaplotypes output). Each element is itself a list, with two components:\cr
$hapdos is a matrix with the haplotype dosages for that haploblock for each
individual\cr
$markers: a vector with the names of
the markers in the haploblock in the output of inferHaplotypes
}
\description{
convert the PedigreeSim true haplotypes to marker dosages and
haplotype dosages
}
\details{
if all alleles are in 0/1, these are kept. If only 2 allele symbols
occur in ps_geno all are converted (alphabetically) to 0/1 in
the same way (e.g. if the alleles are A and B, A -> 0 and B -> 1, even in
markers with only B's). If different allele symbols are used between
markers, per marker the (alphabetically or numerically) lowest -> 0 and
the highest -> 1.
So if more than two different allele symbols occur in ps_geno, and
one marker has only A and another only B alleles, both are converted
to 0's.\cr
in mrkDosage, the dosage of the alleles (converted to) 1 is reported
}
|
614181c5fcd940e4539331e1cdb781a5841c1650 | d5364db161f73c70ee8dec6f9c2229488dbb8649 | /총정리/18-03.R | 181f3cbe65de85903f83753b977efde40d4b267c | [] | no_license | dksktjdrhks2/R_programming | 26ac74f3044d2267f9e5f0b1a90b694b83b16322 | e2a73997fdccecdd4246bd9ed2e0990d913b0ad6 | refs/heads/master | 2020-12-13T19:43:37.929777 | 2020-02-13T07:52:26 | 2020-02-13T07:52:26 | 234,513,120 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,535 | r | 18-03.R | ####k-최근접 이웃 분류
# 1. 분류 문제의 사례 : 분류 classification는 그룹이 있는 데이터에 대해 그룹을 모르는 데이터가 들어왔을 때 어떤 그룹에 속 하는지를 예측하는 기술
# 2. k-최근접 이웃 분류의 방법
library(class)
# 훈련용 데이터와 테스트용 데이터 준비
tr.idx <- c(1:25,51:75, 101:125) # 훈련용 데이터의 인덱스
ds.tr <- iris[tr.idx, 1:4] # 훈련용 데이터셋
ds.ts <- iris[-tr.idx, 1:4] # 테스트용 데이터셋
cl.tr <- factor(iris[tr.idx, 5]) # 훈련용 데이터셋의 그룹(품종) 정보
str(cl.tr)
cl.ts <- factor(iris[-tr.idx, 5]) # 테스트용 데이터셋의 그룹(품종) 정보
str(cl.ts)
pred <- knn(ds.tr, ds.ts, cl.tr, k=7, prob=TRUE) # knn()함수를 이용하여 테스트용 데이터셋으 그룹(품종) 정보를 예측하고 결과를 pred에 저장. knn(ds.tr(훈련용 데이터셋을 저장), ds.ts(테스트용 데이터셋 저장), cl.tr(훈련용 데이터셋의 그룹을 지정), k=3(최근접 이웃의 개수를 지정/ 가능하면 관측값의 갯수의 루트를 씌운 값보다 작은 것이 좋음), prob=TRUE(예측된 그룹에 대한 지지 확률을 표시 여부를 결정))
pred
# 테스트데이터에 대한 예측 결과와 예측 결과에 대한 지지 확률을 표시
acc <- mean(pred==cl.ts) # 예측 정확도
acc
table(pred,cl.ts) # 예측값과 실제값 비교 통계
|
41cb7082c115cd06f1f740513ac73461c799c35c | 76392c8b75dd02f7b38757512320c9b70e92be36 | /Lesson8.R | 63fb0b2de77e77b4366e24f5ba1277705fe6a7b4 | [] | no_license | YanZeng1989/Lesson8 | 50de7d87184231a6690c22cfa3330d487848d193 | bb437c2012f32ab93dae5362397844531699fa7a | refs/heads/master | 2016-09-05T17:13:42.340018 | 2013-12-05T15:42:22 | 2013-12-05T15:42:22 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,981 | r | Lesson8.R | ##author:Yan Zeng
##Date:5-12-2013
##title:function of plot timeseries of different class
##input:modis 23 image of a year,classification image,classification table
update.packages(checkBuilt=TRUE, ask = FALSE)
library(raster)
library(sp)
library(ggplot2)
source('functions.R')
source('Mainfunction.R')
##download data
datdir<-getwd()
data<-dl_from_dropbox('Lesson8_YanZeng.zip','68nn2jigyklz8mh')
unzip(file.path(datdir, "Lesson8_YanZeng.zip"), exdir = datdir)
#
# ##set work directory of r file
#
# ##make classfication table
# ClassTable<-data.frame(classcode=c(1:16))
# ClassTable$class<-c('Evergreen Needleaf forest','evergreen broadleaf forst','deciduous needleleaf forest','deciduous broadleaf forest','mixed forests','closed shrublands','open shrublands','woody savannas','savannas','grasslands','permanent wetlands','croplands','urban and built-up','cropland/natural vegetation mosaic','snow and ice','barren or sparsely vegetated')
# ClassTable$cols <- c("chartreuse", "darkolivegreen1","darkgreen","chartreuse4","darkolivegreen4","brown", "light pink", "yellow","dark grey","chartreuse3","cyan","burlywood","red","orange", "white","beige")
# save(ClassTable,file="ClassTableofModisLandCover.RData")
#
# ##import LandCovermap
# LandCover<-raster('MCD12Q1.A2005001.h18v03.005.2011085003115_Land_Cover_Type_1.tif')
# plot(LandCover)
# ##plot with meaningful legned
# plot(LandCover, col=ClassTable$cols, legend=FALSE)
# legend("topright",legend=ClassTable$class, fill=ClassTable$cols)
# ##make a list of modis data
# list <- list.files(pattern="\\NDVI.tif$", recursive = TRUE) # without directories
# modis<-stack(x=list)
# save(LandCover,file='ModisLandCover.RData')
# save(modis,file='ModisDataofyear2012.RData')
load('ClassTableofModisLandCover.RData')
load('ModisDataofyear2012.RData')
load('ModisLandCover.RData')
##call function
##if the load do not work ,run the commend code
MakeHistofMean(modis,LandCover)
TimeSeriesofModis(modis,LandCover,ClassTable)
|
5499a817589b4e4601eb2e59a5c9f593a5d356ac | 9aafde089eb3d8bba05aec912e61fbd9fb84bd49 | /codeml_files/newick_trees_processed/1724_0/rinput.R | 5fb26a432494bd4425dd5e2896b5f93d44182fa7 | [] | no_license | DaniBoo/cyanobacteria_project | 6a816bb0ccf285842b61bfd3612c176f5877a1fb | be08ff723284b0c38f9c758d3e250c664bbfbf3b | refs/heads/master | 2021-01-25T05:28:00.686474 | 2013-03-23T15:09:39 | 2013-03-23T15:09:39 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 135 | r | rinput.R | library(ape)
testtree <- read.tree("1724_0.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="1724_0_unrooted.txt") |
dc23115e9dc229ed3e039b380dddb0b54d45e0c9 | 2baed5a8a605c42d1e8ba05a341ac1ef63bcfeb2 | /package/man/qc_applications.Rd | 905de52681c3f6c1cefed65866e296403e2fd0f7 | [
"MIT"
] | permissive | dynverse/dynbenchmark | 0c58ef1a9909b589b1cd8b0a0e69c2fa222aeb2b | 8dca4eb841b8ce6edfb62d37ff6cfd4c974af339 | refs/heads/master | 2022-12-08T04:01:13.746186 | 2022-11-25T08:25:50 | 2022-11-25T08:25:50 | 102,579,801 | 171 | 40 | null | 2019-04-07T20:59:59 | 2017-09-06T07:53:11 | R | UTF-8 | R | false | true | 387 | rd | qc_applications.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{qc_applications}
\alias{qc_applications}
\title{Metadata on QC applications}
\format{An object of class \code{tbl_df} (inherits from \code{tbl}, \code{data.frame}) with 3 rows and 1 columns.}
\usage{
qc_applications
}
\description{
Metadata on QC applications
}
\keyword{datasets}
|
45ac122b4d3a5fcad0a2565ece8cb731fb347155 | f3d6f1531e8332708e40ad17ef4a4edeb803be51 | /Quiz_Solutions/Week_3_Quiz.R | 260bef2f208de99d98eb87c97f206b3d9298df4b | [] | no_license | daswan1118/Coursera-Practical-Machine-Learning | ba39faf9b01eedf99d84a62edf922990f8e7728d | 9ceb716b69eb2500306ec7d9aab0b5c357b0eb07 | refs/heads/master | 2020-04-01T18:27:10.680516 | 2019-02-08T04:29:01 | 2019-02-08T04:29:01 | 153,493,230 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,033 | r | Week_3_Quiz.R | ### Problem 1 ###
## Use segmenetation data to build a rpart model
library(AppliedPredictiveModeling); data(segmentationOriginal)
library(caret)
library(e1071)
# Split Train/Test
train <- segmentationOriginal[segmentationOriginal$Case == "Train",]
test <- segmentationOriginal[segmentationOriginal$Case == "Test",]
# Predict & observe tree
set.seed(125)
tree_model <- train(Class~., data = train, method = "rpart")
plot(tree_model$finalModel)
text(tree_model$finalModel)
### Problem 3 ###
## Build a rpart model for olive oil data and predict on a new data
# load data
library(pgmm)
data(olive)
olive = olive[,-1]
# build rpart model & predict on new data
tree_model <- train(Area~., data = olive, method = "rpart")
newdata = as.data.frame(t(colMeans(olive)))
predict(tree_model, newdata)
### Problem 4 ###
## Fit a GLM model on South Africa Heart Disease Data and
## calculate misclassification rate on training and test sets
# load data and split train/test
library(ElemStatLearn)
data(SAheart)
set.seed(8484)
train = sample(1:dim(SAheart)[1],size=dim(SAheart)[1]/2,replace=F)
trainSA = SAheart[train,]
testSA = SAheart[-train,]
# fit model and predict on train/test
set.seed(13234)
glm_model <- train(chd~age+alcohol+obesity+tobacco+typea+ldl, data = trainSA, method = "glm", family = "binomial")
prediction_test <- predict(glm_model, testSA)
prediction_train <- predict(glm_model, trainSA)
# calculate misclassification
missClass = function(values,prediction){
sum(((prediction > 0.5)*1) != values)/length(values)}
missClass(testSA$chd, prediction_test)
missClass(trainSA$chd, prediction_train)
### Problem 5 ###
## Fit a random forest predictor on vowel data and observe the variable imporantance
# load data and set y to factor
library(ElemStatLearn)
library(randomForest)
data(vowel.train)
data(vowel.test)
vowel.train$y <- factor(vowel.train$y)
vowel.test$y <- factor(vowel.test$y)
# Fit RF model & observe varimp
set.seed(33833)
rf_model <- train(y~., data = vowel.train, method = "rf")
varImp(rf_model)
|
a982131f68ce17a7aba58e56b043500480783ead | 87044c17b367d5cffb7aa0403d891c54e9d3d433 | /R/loading.R | 89dd6d5e272a40328f81e81ea27742602cb7ad0d | [] | no_license | SeoncheolPark/SphereWaveu | e7637ae2bc56b02606c644c7819bb4b739b018e3 | 9ec18402244b24effd49cd6062375b3ebbd66faa | refs/heads/master | 2021-04-28T14:05:25.299385 | 2018-02-18T14:35:39 | 2018-02-18T14:35:39 | 121,956,441 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 72 | r | loading.R | .First.lib <- function(lib, pkg) library.dynam("SphereWaveu", pkg, lib)
|
77c3bf9f69e66084439ef115f6f9796ab85e4fb8 | 0a906cf8b1b7da2aea87de958e3662870df49727 | /grattan/inst/testfiles/IncomeTax/libFuzzer_IncomeTax/IncomeTax_valgrind_files/1610051769-test.R | 212167755341699ca74e382d472beb125386f922 | [] | no_license | akhikolla/updated-only-Issues | a85c887f0e1aae8a8dc358717d55b21678d04660 | 7d74489dfc7ddfec3955ae7891f15e920cad2e0c | refs/heads/master | 2023-04-13T08:22:15.699449 | 2021-04-21T16:25:35 | 2021-04-21T16:25:35 | 360,232,775 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 367 | r | 1610051769-test.R | testlist <- list(rates = numeric(0), thresholds = c(-5.46354690059085e-108, Inf, -5.46354690059085e-108), x = c(-5.46354689220641e-108, -5.46354690059085e-108, -5.46354690059085e-108, -5.46354690059085e-108, -5.46354690059085e-108, -5.46354690059085e-108, -5.46354690059085e-108, -5.46354690059085e-108 ))
result <- do.call(grattan::IncomeTax,testlist)
str(result) |
06caa2a39d82a6cdb60a21a1abcbb1ea4c2e0ace | 115ec71246ee1dcda3e086d48929e25a777604e9 | /test_dag.R | c9273a84860c733f49de5404f84b481f942dd804 | [] | no_license | georgegui/ExperimentDetection | 8fa27f0ef465b0a7ea32835e65c3c6aa2fd29509 | 8c09ff9942266434f2921d717a0e99c6d6eba2be | refs/heads/master | 2021-09-05T00:31:33.026435 | 2018-01-23T04:59:29 | 2018-01-23T05:01:45 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 591 | r | test_dag.R | digraph boxes_and_circles {
# a 'graph' statement
graph [overlap = true, fontsize = 10]
# several 'node' statements
node [shape = circle,
fontname = Helvetica, style = filled, colour = gray, width = 0.6]
X [x = 2, y = 1]
Yr [label = <Y<SUB>R</SUB>>]
W1 [label = <W<SUB>1</SUB>>, x = 0, y = 1]
W2 [label = <W<SUB>2</SUB>>, x = 1, y = 1]
node [shape = circle,
fontname = Helvetica,
style = dotted, width = 0.6]
Uy [label = <U<SUB>Y</SUB>>]
Ux [label = <U<SUB>X</SUB>>]
# subgraph{rank = same; Yr; X}
# edge
X->Yr Ux->{X, W1, W2} Uy->Yr
}
|
573630f666f1dceaabe2ec7551476b250c1d4f65 | ed530c731d762e830c8d340e2c540099457b41de | /R/function_plot_heatmap.R | c7de8a622a04cb0eb54d15b7eaea3abef109ddbe | [
"MIT"
] | permissive | MarianSchoen/DTD | af040f0bb08f47b1cfe1b5dbe6182d04d9dd5ce6 | 1d01c0be039a68dcbe65777d702dde530ea9b0dd | refs/heads/master | 2022-05-13T06:52:49.974420 | 2022-04-04T07:33:03 | 2022-04-04T07:33:03 | 182,028,277 | 13 | 1 | null | null | null | null | UTF-8 | R | false | false | 9,870 | r | function_plot_heatmap.R | #' clustered heatmap of diag(g) * X
#'
#' For a DTD.model the 'ggplot_heatmap' function visualizes
#' \deqn{diag(g) * X} on a subset of features as a clustered heatmap.\cr
#' Feature subsetting can either be done by a vector of strings,
#' that match the feature names of X.\cr
#' Alternatively, via 'explained correlation':
#' In order to assess the importance of a feature in the deconvolution process,
#' we can exclude the feature from a trained model, and observe the change of
#' correlaion on a test set. If the correlation e.g. decreases by 1 %, the gene
#' explains 1 % correlation within the deconvolution model.\cr
#' The 'ggplot_heatmap' function iteratively excludes each feature from the
#' trained model, resulting in a ranking for the genes.
#'
#' For an example see section "Explained correlation" in the package vignette `browseVignettes("DTD")`
#'
#' @param DTD.model either a numeric vector with length of nrow(X), or a list
#' returned by \code{\link{train_deconvolution_model}},
#' \code{\link{DTD_cv_lambda_cxx}}, or \code{\link{descent_generalized_fista}}.
#' In the equation above the DTD.model provides the vector g.
#' @param X.matrix numeric matrix, with features/genes as rows,
#' and cell types as column. Each column of X.matrix is a reference
#' expression profile. A trained DTD model includes X.matrix, it has been
#' trained on. Therefore, X.matrix should only be set, if the 'DTD.model'
#' is not a DTD model.
#' @param test.data list, with two entries, a numeric matrix each,
#' named 'mixtures' and 'quantities' For examples see \code{\link{mix_samples}},
#' \code{\link{mix_samples_with_jitter}} or the package vignette
#' `browseVignettes("DTD")`.
#' @param estimate.c.type string, either "non_negative", or "direct".
#' Indicates how the algorithm finds the solution of
#' \eqn{arg min_C ||diag(g)(Y - XC)||_2}.
#' \itemize{
#' \item If 'estimate.c.type' is set to "direct",
#' there is no regularization (see \code{\link{estimate_c}}),
#' \item if 'estimate.c.type' is set to "non_negative",
#' the estimates "C" must not be negative (non-negative least squares)
#' (see (see \code{\link{estimate_nn_c}}))
#' }
#' @param feature.subset numeric or a vector of strings. If it is a numeric,
#' "subset" features will be picked from the explained correlation' ranking
#' (if 'feature.subset' <= 1, this is the fraction of feature,
#' if 'feature.subset' > 1 it is the total amount). If it is a vector of
#' strings, these features will be used (if they intersect with
#' rownames(X.matrix))
#' @param title string, additionally title
#' @param log2.expression logical, in the heatmap, should the values be
#' log transformed?
#'
#' @return ggplot object
#' @export
#'
#' @import ggplot2
ggplot_heatmap <- function(
DTD.model,
X.matrix = NA,
test.data=NULL,
estimate.c.type = "decide.on.model",
title = "",
feature.subset = 100,
log2.expression = TRUE
){
# safety check: DTD.model
if (is.list(DTD.model)) {
if ("best.model" %in% names(DTD.model)) {
tweak <- DTD.model$best.model$Tweak
} else {
if (!"Tweak" %in% names(DTD.model)) {
stop("In ggplot_heatmap: There is no Tweak entry in the 'DTD.model'")
} else {
tweak <- DTD.model$Tweak
}
}
if ("estimate.c.type" %in% names(DTD.model)){
estimate.c.type <- DTD.model$estimate.c.type
}
} else {
if(is.numeric(DTD.model)){
tweak <- DTD.model
}else{
stop("In ggplot_heatmap: DTD.model is neither a list nor a numeric vector")
}
}
# end -> DTD.model
# safety check: X.matrix
if(!is.matrix(X.matrix) || any(is.na(X.matrix)) || !is.numeric(X.matrix)){
if (is.list(DTD.model) && "reference.X" %in% names(DTD.model)) {
message("In ggplot_heatmap: provided 'X.matrix' could not be used, therefore used: 'DTD.model$reference.X'")
X.matrix <- DTD.model$reference.X
}else{
stop("In ggplot_heatmap: can't use 'X.matrix'. Neither via X.matrix argument nor included in the DTD.model.")
}
}
# end -> X.matrix
# safety check: log2.expression
test <- test_logical(
test.value = log2.expression,
output.info = c("ggplot_heatmap", "log2.expression")
)
# end -> log2.expression
# safety check: test.data is moved into "expl.cor" part,
# because if subset is a list of genes, I don't need it
# estimate.c.type is moved as well
# logical, if features has been set
features.set <- FALSE
# safety check: subset
if(length(feature.subset) != 1 && all(is.character(feature.subset))){
useable.subset <- intersect(feature.subset, rownames(X.matrix))
if(length(useable.subset) == 0){
stop("In ggplot_heatmap: 'feature.subset' is provided as vector of character. However, none of them can be found in rownames(X.matrix).")
}else{
features <- useable.subset
features.set <- TRUE
}
}else{
test <- test_numeric(
feature.subset
, output.info = c("ggplot_heatmap", "feature.subset")
, min = 0
, max = Inf)
if(feature.subset <= 1){ # fraction is provided
feature.subset <- round(nrow(X.matrix) * feature.subset)
}
feature.subset <- ceiling(min(feature.subset, nrow(X.matrix)))
}
# end -> subset
# safety check: title
useable.title <- try(as.character(title), silent = TRUE)
if(any(grepl(x = useable.title, pattern = "Error"))){
stop("In ggplot_cv: provided 'title' can not be used as.character.")
}
# end -> title
# if there is no test.data, features can not be detected via explained correlation ...
if(is.null(test.data)){
# ... therefore, set features to the rownames (complete X)
if(!features.set){ # If the input feature.subset is a vector of strings, "features" has already been set
features <- rownames(X.matrix)
features.set <- TRUE
}
}
if(!features.set){
# safety check: test.data
if(is.list(test.data) && length(test.data) == 2){
if(!all(c("quantities", "mixtures") %in% names(test.data))){
stop("In ggplot_heatmap: entries of 'test.data' must be named 'quantities' and 'mixtures'.")
}else{
if(!is.matrix(test.data$mixtures)){
stop("In ggplot_heatmap: 'test.data$mixtures' is not a matrix.")
}
if(!is.matrix(test.data$quantities)){
stop("In ggplot_heatmap: 'test.data$quantities' is not a matrix. Therefore, 'test.data' can not be used.")
}
}
}else{
stop("In ggplot_heatmap: 'test.data' must be provided as a list with two entries: 'quantities' and 'mixtures'.")
}
# end -> test.data
# safety check: estimate.c.tye
test <- test_c_type(test.value = estimate.c.type,
output.info = c("ggplot_heatmap", "estimate.c.type"))
# end -> estimate.c.type
DTD.evCor.wrapper <- function(tweak,
X = X.matrix,
train.list = test.data,
esti.c.type = estimate.c.type) {
Y <- train.list$mixtures
C <- train.list$quantities
loss <- evaluate_cor(
X.matrix = X,
new.data = Y,
true.compositions = C,
DTD.model = tweak,
estimate.c.type = esti.c.type
) / ncol(X)
return(loss)
}
# correlation achieved with all features:
maximum.cor <- -DTD.evCor.wrapper(tweak = tweak)
manipulated.cor <- rep(NA, length(tweak))
names(manipulated.cor) <- names(tweak)
for(l.feature in names(tweak)){
l.tweak <- tweak
l.tweak[l.feature] <- 0
# manipulated correlation works as follows:
# if maximum.cor is e.g. 0.7,
# the output of 'DTD.evCor.wrapper' is the loss, which is the negative correlation.
# If it is e.g. -0.68, this results in a manipulated correlation of '+0.02'.
# meaning, this gene explaines 2% correlation.
# If the manipulated correlation would be negative, this means excluding this gene is favorable for the model.
manipulated.cor[l.feature] <- (maximum.cor + DTD.evCor.wrapper(tweak = l.tweak))*100
}
sorted.manipulated.cor <- sort(manipulated.cor, decreasing = TRUE)
features <- names(sorted.manipulated.cor)[1:feature.subset]
# upper.axis <- sorted.manipulated.cor[1:feature.subset]
}
tmp.rownames <- rownames(X.matrix)
tmp.colnames <- colnames(X.matrix)
X.times.g <- diag(tweak) %*% X.matrix
colnames(X.times.g) <- tmp.colnames
rownames(X.times.g) <- tmp.rownames
if(log2.expression){
tmp.X.matrix <- as.matrix(log2(X.times.g[features, , drop = FALSE] + 1))
}else{
tmp.X.matrix <- X.times.g[features, , drop = FALSE]
}
if(ncol(tmp.X.matrix) > 1){
cell.type.cluster.order <- stats::hclust(
stats::dist(
x = t(tmp.X.matrix)
, method = "euclidean")
, method = "average"
)$order
} else {
cell.type.cluster.order <- 1
}
feature.cluster.order <- stats::hclust(
stats::dist(
x = tmp.X.matrix
, method = "euclidean")
, method = "average"
)$order
melted.X <- reshape2::melt(
data = tmp.X.matrix,
value.name = "expression"
)
melted.X$Var1 <- factor(
x = melted.X$Var1,
levels = rownames(tmp.X.matrix)[feature.cluster.order],
ordered = TRUE
)
melted.X$Var2 <- factor(
x = melted.X$Var2,
levels = colnames(tmp.X.matrix)[cell.type.cluster.order],
ordered = TRUE
)
if(log2.expression){
legend.name <- "log2(expr)"
}else{
legend.name <- "expr"
}
pic0 <- ggplot(melted.X, aes(x = .data$Var1, y = .data$Var2)) +
geom_tile(aes(fill = expression)) +
scale_fill_gradient(
name = legend.name,
low = "darkblue",
high = "yellow"
) +
xlab("features") +
ylab("Cell types") +
ggtitle(title) +
theme(
axis.text.x = element_text(
angle = 90
, hjust = 1)
)
return(pic0)
} |
cda7404de32d10116f2f6ef5b5238200948bf2c6 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/naptime/tests/test-naptime.R | 4a3b2c07e082a72077ccffa614f8277bdb5cbecb | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,823 | r | test-naptime.R | context("Naptime core functionality")
test_that("test of numeric dispatch", {
test1 <- system.time(naptime(5))[["elapsed"]]
expect_gte(test1, 4)
expect_lte(test1, 6)
test2 <- system.time(naptime(5L))[["elapsed"]]
expect_gte(test2, 4)
expect_lte(test2, 6)
inf_test <- system.time(expect_error(naptime(Inf)))[["elapsed"]]
inf_test <- system.time(expect_warning(naptime(Inf, permissive = TRUE)))[["elapsed"]]
neg_test <- system.time(expect_warning(naptime(-10)))[["elapsed"]]
expect_gte(inf_test, 0)
expect_lte(inf_test, 2)
expect_gte(neg_test, 0)
expect_lte(neg_test, 2)
expect_error(naptime(c(1,2)))
})
test_that("numeric dispatch warnings can be disabled", {
# Disable warnings
options(naptime.warnings = FALSE)
expect_silent(naptime(Inf, permissive = TRUE))
expect_silent(naptime(-10))
options(naptime.warnings = TRUE)
})
test_that("test of difftime dispatch", {
difftime_test <- system.time(naptime(difftime("2016-01-01 00:00:05", "2016-01-01 00:00:00")))[["elapsed"]]
expect_gte(difftime_test, 3)
expect_lte(difftime_test, 14)
})
test_that("test of POSIXct dispatch", {
ct_test <- system.time(naptime(as.POSIXct(lubridate::now(tzone = "UTC")+lubridate::seconds(5))))[["elapsed"]]
expect_gte(ct_test, 3)
expect_lte(ct_test, 14)
})
test_that("test of difftime error conditions", {
difftime_test <- system.time(expect_warning(naptime(difftime("2016-01-01 00:00:00", "2016-01-01 00:00:05"))))[["elapsed"]]
expect_gte(difftime_test, 0)
expect_lte(difftime_test, 3)
})
test_that("test of NULL dispatch", {
null_test <- system.time(naptime(NULL))[["elapsed"]]
expect_gte(null_test, 0)
expect_lte(null_test, 3)
})
test_that("test of NA dispatch", {
expect_error(naptime(NA, permissive = FALSE))
na_test <- system.time(expect_warning(naptime(NA, permissive = TRUE)))[["elapsed"]]
expect_gte(na_test, 0)
expect_lte(na_test, 3)
expect_error(naptime(NA_character_))
expect_warning(naptime(NA_character_, permissive = TRUE))
})
test_that("test of logical dispatch", {
true_test <- system.time(naptime(TRUE))[["elapsed"]]
expect_gte(true_test, 0)
expect_lte(true_test, 3)
false_test <- system.time(naptime(FALSE))[["elapsed"]]
expect_gte(false_test, 0)
expect_lte(false_test, 3)
expect_error(naptime(logical(0)))
expect_warning(naptime(logical(0), permissive = TRUE))
})
test_that("test of no_arg dispatch", {
no_arg_test <- system.time(naptime())[["elapsed"]]
expect_gte(no_arg_test, 0)
expect_lte(no_arg_test, 3)
})
test_that("non-time character produces warning, not an error", {
testval <- "boo"
expect_error(naptime(testval))
expect_warning(naptime(testval, permissive = TRUE))
testval <- "really long scary text string"
expect_error(naptime(testval))
expect_warning(naptime(testval, permissive = TRUE))
non_time_test <- system.time(expect_warning(naptime(testval, permissive = TRUE)))[["elapsed"]]
expect_gte(non_time_test, 0)
expect_lte(non_time_test, 3)
})
test_that("non-valid produces warning, not an error", {
testval <- pi
class(testval) <- "bad-class"
expect_error(naptime(testval))
expect_warning(naptime(testval, permissive = TRUE))
non_class_test <- system.time(expect_warning(naptime(testval, permissive = TRUE)))[["elapsed"]]
expect_gte(non_class_test, 0)
expect_lte(non_class_test, 3)
})
test_that("period dispatch", {
period_test <- system.time(naptime(lubridate::seconds(5)))[["elapsed"]]
expect_gte(period_test, 3)
expect_lte(period_test, 14)
})
test_that("negative period handling", {
expect_warning(
neg_period_test <- system.time(naptime(lubridate::seconds(-1)))[["elapsed"]]
)
expect_gte(neg_period_test, 0)
expect_lte(neg_period_test, getOption("naptime.default_delay", 0.1) + 2)
})
test_that("hour-long negative period handling", {
expect_warning(
neg_period_test <- system.time(naptime(lubridate::hours(-1)))[["elapsed"]]
)
expect_gte(neg_period_test, 0)
expect_lte(neg_period_test, getOption("naptime.default_delay", 0.1) + 2)
})
test_that("character date handling: yyyy-mm-dd hh:mm:ss in past", {
expect_warning(
neg_period_test <- system.time(
naptime(as.character(lubridate::now() + lubridate::seconds(-1)))
)[["elapsed"]]
)
expect_gte(neg_period_test, 0)
expect_lt(neg_period_test, getOption("naptime.default_delay", 0.1) + 3)
})
test_that("character date handling: yyyy-mm-dd hh:mm:ss in future", {
pos_period_test <- system.time(
naptime(as.character(lubridate::now() + lubridate::seconds(5)))
)[["elapsed"]]
expect_gte(pos_period_test, 3)
expect_lte(pos_period_test, 14)
})
test_that("generic stop", {
# actually hits the not-scalar error
expect_error(naptime(glm(rnorm(5) ~ runif(5)), permissive = FALSE))
expect_warning(naptime(glm(rnorm(5) ~ runif(5)), permissive = TRUE))
})
test_that("generic warning if permissive", {
options(naptime.permissive = TRUE)
expect_warning(naptime(glm(rnorm(5) ~ runif(5))))
expect_error(naptime(glm(rnorm(5) ~ runif(5)), permissive = FALSE))
options(naptime.permissive = FALSE)
expect_error(naptime(glm(rnorm(5) ~ runif(5))))
expect_warning(naptime(glm(rnorm(5) ~ runif(5)), permissive = TRUE))
})
test_that("zero length custom class produces a warning/error", {
boo <- integer(0)
class(boo) <- "moo"
expect_warning(naptime(boo, permissive = TRUE))
expect_error(naptime(boo, permissive = FALSE))
})
test_that("Invalid Period throws error", {
basic_period <- difftime(now()+seconds(2), now())
attr(basic_period, "units") <- "Invalid Unit"
options(naptime.permissive = TRUE)
expect_warning(naptime(basic_period))
expect_error(naptime(basic_period, permissive = FALSE))
options(naptime.permissive = FALSE)
expect_error(naptime(basic_period))
expect_warning(naptime(basic_period, permissive = TRUE))
})
|
c1f312141099ca6c3f072c50b09ea815ad7eb734 | 3324667e4c8377a4621c34601cff709bc4a3d2b0 | /Scripts/EmptyRasterTest.R | dd752b608b8291376b13b7a07b46593e87a0faeb | [] | no_license | CM26-Climate-Paper/Analysis-code | 6a98cfc31ae6a79eb82ce4d9dc5016b54d1945b1 | 2e5861307caf3759beac0d78696e34b06dfbd694 | refs/heads/master | 2020-06-30T07:35:58.532666 | 2019-09-16T19:54:00 | 2019-09-16T19:54:00 | 74,387,756 | 2 | 1 | null | 2016-12-16T18:47:39 | 2016-11-21T17:24:22 | null | UTF-8 | R | false | false | 540 | r | EmptyRasterTest.R | ## CLIMATE PAPER ROUND 2.
#A. FIRST NEED TO KNOW WHICH RASTERS ARE EMPTY
#B. THEN NEED TO REPROJECT ON THEM BASED ON A MODEL OBJECT
###TESTING IF RASTERS ARE EMPTY
library(raster)
setwd("/Volumes/SeaGate/ClimatePaperCM2.6/Species_Projections_all")
blank=list()
fileslist=list.files()
for(f in fileslist){
path=paste(getwd(),"/",f,sep="")
#print(path)
small_list=list.files(path,full.names = TRUE)
for(tif in small_list){
if((file.info(tif)$size<10000)==TRUE){
blank=list(blank,tif)
}
}
}
final_list=unlist(blank)
|
e707d32046db33e2e8a7bcd6a5dfa8ee1a174d89 | 05ea9c8ed1d49ae35414d06b6a2b2eb1dd295f9d | /IDEAM1/04_interpolationDay.R | 559a7c14d2f5291e3be09404d4988955648772be | [] | no_license | kika1002/LULC | d776a268c5b89baf01f9c53ac2b3406f7221dfb4 | 2384560109fe672df3a874e7c76bd0f932e3a4dd | refs/heads/master | 2020-06-02T18:28:46.804490 | 2019-06-25T21:35:07 | 2019-06-25T21:35:07 | 191,265,932 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,432 | r | 04_interpolationDay.R |
# Load libraries ----------------------------------------------------------
require(pacman)
pacman::p_load(stringr, tidyverse, readxl, lubridate, imputeTS, reshape, purrr, readxl, reshape)
# Initial setup ----------------------------------------------------------
g <- gc(reset = TRUE)
rm(list = ls())
options(scipen = 999)
# Functions to use --------------------------------------------------------
fillNA <- function(vr, st, yr, mn){
# vr <- 'tmax'; st <- 35055010; yr <- 1995; mn <- 'Jan'
df <- data %>%
filter(variable == vr,
stt == st,
year == yr,
month == mn)
ts <- ts(pull(df, value))
nn <- sum(is.na(ts))
if(nn < 26){
int <- na.interpolation(ts)
int <- as.numeric(int)
dfm <- data.frame(value = int) %>%
mutate(variable = vr,
stt = st,
year = yr,
month = mn,
day = 1:nrow(.)) %>%
as_tibble() %>%
dplyr::select(variable, stt, year, month, day, value)
} else {
print('No interpolate, many NAs')
}
print('Done!')
return(dfm)
}
myFilter <- function(vr, st, yr, mn){
# vr <- 'tmax'; st <- 35055010; yr <- 1995; mn <- 'Jan'
dt <- data %>%
filter(variable == vr &
stt == st &
year == yr &
month == mn)
print('Done')
return(dt)
}
# Load data ---------------------------------------------------------------
data <- read_csv('ideam/prc/ideam_all.csv')
data <- mutate(data, month = factor(month, levels = month.abb))
# Counting the NAs --------------------------------------------------------
nas <- data %>%
group_by(variable, stt, year, month) %>%
summarize(count = sum(is.na(value))) %>%
ungroup()
ntr <- filter(nas, count < 26)
trb <- filter(nas, count >= 26)
write.csv(nas, 'ideam/int/count_all_nas.csv', row.names = FALSE)
# Extracting the rows of NAs from data ------------------------------------
data_nas <- inner_join(data, trb, by = c('variable', 'stt', 'year', 'month'))
write.csv(data_nas, 'ideam/int/data_nas.csv', row.names = FALSE)
# Apply the function to fill NAs ------------------------------------------
nst <- data %>%
nest(-variable, -stt, -year, -month) %>%
mutate(result = pmap(list(variable, stt, year, month), .f = fillNA))
df_days <- bind_rows(nst$result)
write.csv(df_days, 'ideam/int/tbl_int_days.csv', row.names = FALSE)
write.csv(trb, 'ideam/int/trb_days.csv', row.names = FALSE) |
f9f6efcafc6930016b02148fa70c3815a5f7a1a0 | 25118c0a012490aec49cd57a246618c8379a877c | /R/SpaDES-modules/disturbanceCalcCLUS/disturbanceCalcCLUS.R | 5215d4185cac0d3ead7e0fa380f11d1db2e7116e | [
"Apache-2.0"
] | permissive | davan690/clus | a3ad79334d028c69793125e07fa362fed2ba33f3 | aead0c11c504a236c465dfb1b2c65b384dd8a3b1 | refs/heads/master | 2022-08-09T15:16:59.232834 | 2020-05-21T20:02:07 | 2020-05-21T20:02:07 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 10,918 | r | disturbanceCalcCLUS.R | # Copyright 2020rsf_ Province of British Columbia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and limitations under the License.
defineModule(sim, list(
name = "disturbanceCalcCLUS",
description = NA, #"insert module description here",
keywords = NA, # c("insert key words here"),
authors = c(person("Kyle", "Lochhead", email = "kyle.lochhead@gov.bc.ca", role = c("aut", "cre")),
person("Tyler", "Muhly", email = "tyler.muhly@gov.bc.ca", role = c("aut", "cre"))),
childModules = character(0),
version = list(SpaDES.core = "0.2.5", disturbanceCalcCLUS = "0.0.1"),
spatialExtent = raster::extent(rep(NA_real_, 4)),
timeframe = as.POSIXlt(c(NA, NA)),
timeunit = "year",
citation = list("citation.bib"),
documentation = list("README.txt", "disturbanceCalcCLUS.Rmd"),
reqdPkgs = list("raster"),
parameters = rbind(
defineParameter("criticalHabitatTable", "character", '99999', NA, NA, "Value attribute table that links to the raster and describes the boundaries of the critical habitat"),
defineParameter("criticalHabRaster", "character", '99999', NA, NA, "Raster that describes the boundaries of the critical habitat"),
defineParameter("calculateInterval", "numeric", 1, NA, NA, "The simulation time at which disturbance indicators are calculated"),
defineParameter("permDisturbanceRaster", "character", '99999', NA, NA, "Raster of permanent disturbances"),
defineParameter("recovery", "numeric", 40, NA, NA, "The age of recovery for disturbances"),
defineParameter(".plotInitialTime", "numeric", NA, NA, NA, "This describes the simulation time at which the first plot event should occur"),
defineParameter(".plotInterval", "numeric", NA, NA, NA, "This describes the simulation time interval between plot events"),
defineParameter(".saveInitialTime", "numeric", NA, NA, NA, "This describes the simulation time at which the first save event should occur"),
defineParameter(".saveInterval", "numeric", NA, NA, NA, "This describes the simulation time interval between save events"),
defineParameter(".useCache", "logical", FALSE, NA, NA, "Should this entire module be run with caching activated? This is generally intended for data-type modules, where stochasticity and time are not relevant")
),
inputObjects = bind_rows(
expectsInput(objectName = "boundaryInfo", objectClass = "character", desc = NA, sourceURL = NA),
expectsInput(objectName = "clusdb", objectClass = "SQLiteConnection", desc = 'A database that stores dynamic variables used in the RSF', sourceURL = NA),
expectsInput(objectName = "ras", objectClass = "RasterLayer", desc = "A raster object created in dataLoaderCLUS. It is a raster defining the area of analysis (e.g., supply blocks/TSAs).", sourceURL = NA),
expectsInput(objectName = "pts", objectClass = "data.table", desc = "Centroid x,y locations of the ras.", sourceURL = NA),
expectsInput(objectName = "scenario", objectClass = "data.table", desc = 'The name of the scenario and its description', sourceURL = NA),
expectsInput(objectName ="updateInterval", objectClass ="numeric", desc = 'The length of the time period. Ex, 1 year, 5 year', sourceURL = NA)
),
outputObjects = bind_rows(
#createsOutput("objectName", "objectClass", "output object description", ...),
createsOutput("disturbance", "data.table", "Disturbance table for every pixel"),
createsOutput("disturbanceReport", "data.table", "Summary per simulation year of the disturbance indicators")
)
))
doEvent.disturbanceCalcCLUS = function(sim, eventTime, eventType) {
switch(
eventType,
init = {
sim <- Init (sim) # this function inits
sim <- scheduleEvent(sim, time(sim) , "disturbanceCalcCLUS", "analysis", 9)
},
analysis = {
sim <- distAnalysis(sim)
sim <- scheduleEvent(sim, time(sim) + P(sim, "disturbanceCalcCLUS", "calculateInterval"), "disturbanceCalcCLUS", "analysis", 9)
},
warning(paste("Undefined event type: '", current(sim)[1, "eventType", with = FALSE],
"' in module '", current(sim)[1, "moduleName", with = FALSE], "'", sep = ""))
)
return(invisible(sim))
}
Init <- function(sim) {
sim$disturbanceReport<-data.table(scenario = character(), compartment = character(), timeperiod= integer(),
critical_hab = character(), dist500 = numeric(), dist500_per = numeric(), dist = numeric(), dist_per = numeric())
sim$disturbance<-sim$pts
#Get the critical habitat
if(P(sim, "disturbanceCalcCLUS", "criticalHabRaster") == '99999'){
sim$disturbance[, critical_hab:= 1]
}else{
bounds <- data.table (c (t (raster::as.matrix(
RASTER_CLIP2(tmpRast = sim$boundaryInfo[[3]],
srcRaster = P(sim, "disturbanceCalcCLUS", "criticalHabRaster"),
clipper = sim$boundaryInfo[[1]], # by the area of analysis (e.g., supply block/TSA)
geom = sim$boundaryInfo[[4]],
where_clause = paste0 (sim$boundaryInfo[[2]], " in (''", paste(sim$boundaryInfo[[3]], sep = "' '", collapse= "'', ''") ,"'')"),
conn = NULL)))))
bounds[,pixelid:=seq_len(.N)]#make a unique id to ensure it merges correctly
if(nrow(bounds[!is.na(V1),]) > 0){ #check to see if some of the aoi overlaps with the boundary
if(!(P(sim, "disturbanceCalcCLUS", "criticalHabitatTable") == '99999')){
crit_lu<-data.table(getTableQuery(paste0("SELECT cast(value as int) , crithab FROM ",P(sim, "disturbanceCalcCLUS", "criticalHabitatTable"))))
bounds<-merge(bounds, crit_lu, by.x = "V1", by.y = "value", all.x = TRUE)
}else{
stop(paste0("ERROR: need to supply a lookup table: ", P(sim, "rsfCLUS", "criticalHabitatTable")))
}
}else{
stop(paste0(P(sim, "disturbanceCalcCLUS", "criticalHabRaster"), "- does not overlap with aoi"))
}
setorder(bounds, pixelid) #sort the bounds
sim$disturbance[, critical_hab:= bounds$crithab]
}
#get the permanent disturbance raster
#check it a field already in sim$clusdb?
if(dbGetQuery (sim$clusdb, "SELECT COUNT(*) as exists_check FROM pragma_table_info('pixels') WHERE name='perm_dist';")$exists_check == 0){
# add in the column
dbExecute(sim$clusdb, "ALTER TABLE pixels ADD COLUMN perm_dist integer DEFAULT 0")
dbExecute(sim$clusdb, "ALTER TABLE pixels ADD COLUMN dist numeric DEFAULT 0")
# add in the raster
if(P(sim, "disturbanceCalcCLUS", "permDisturbanceRaster") == '99999'){
message("Need to supply a permanent disturbance raster parameter as permDisturbanceRaster = ... defaulting to no permanent disturbances")
dbExecute(sim$clusdb, "Update pixels set perm_dist = 0;")
}else{
perm_dist <- data.table (c(t(raster::as.matrix(
RASTER_CLIP2(tmpRast = sim$boundaryInfo[[3]],
srcRaster = P(sim, "disturbanceCalcCLUS", "permDisturbanceRaster"),
clipper = sim$boundaryInfo[[1]], # by the area of analysis (e.g., supply block/TSA)
geom = sim$boundaryInfo[[4]],
where_clause = paste0 (sim$boundaryInfo[[2]], " in (''", paste(sim$boundaryInfo[[3]], sep = "' '", collapse= "'', ''") ,"'')"),
conn = NULL)))))
perm_dist[,pixelid:=seq_len(.N)]#make a unique id to ensure it merges correctly
setnames(perm_dist, "V1", "perm_dist")
#add to the clusdb
dbBegin(sim$clusdb)
rs<-dbSendQuery(sim$clusdb, "Update pixels set perm_dist = :perm_dist where pixelid = :pixelid", perm_dist)
dbClearResult(rs)
dbCommit(sim$clusdb)
#clean up
rm(perm_dist)
gc()
}
}else{
message("...using existing permanent disturbance raster")
}
return(invisible(sim))
}
distAnalysis <- function(sim) {
dt_select<-data.table(dbGetQuery(sim$clusdb, paste0("SELECT pixelid FROM pixels WHERE perm_dist > 0 or (roadyear >= ", max(0,as.integer(time(sim) - P(sim, "disturbanceCalcCLUS", "recovery"))),") or (blockid > 0 and age BETWEEN 0 AND ",P(sim, "disturbanceCalcCLUS", "recovery"),")"))) #
if(nrow(dt_select) > 0){
dt_select[, field := 0]
outPts<-merge(sim$disturbance, dt_select, by = 'pixelid', all.x =TRUE)
nearNeigh<-RANN::nn2(outPts[field==0 & !is.na(critical_hab), c('x', 'y')],
outPts[is.na(field) & !is.na(critical_hab) > 0, c('x', 'y')],
k = 1)
outPts<-outPts[is.na(field) & !is.na(critical_hab), dist:=nearNeigh$nn.dists] # assign the distances
outPts[is.na(dist) & !is.na(critical_hab), dist:=0] # those that are the distance to pixels, assign
sim$disturbance<-merge(sim$disturbance, outPts[,c("pixelid","dist")], by = 'pixelid', all.x =TRUE) #sim$rsfcovar contains: pixelid, x,y, population
#update the pixels table
dbBegin(sim$clusdb)
rs<-dbSendQuery(sim$clusdb, "UPDATE pixels SET dist = :dist WHERE pixelid = :pixelid;", outPts[,c("pixelid","dist")])
dbClearResult(rs)
dbCommit(sim$clusdb)
}else{
sim$disturbance$dist<-501
}
#out.ras<-sim$ras
#out.ras[]<-sim$disturbance$dist
#writeRaster(out.ras, paste0("dist",time(sim), ".tif"), overwrite = TRUE)
#Sum the area up > 500 m
tempDisturbanceReport<-merge(sim$disturbance[dist > 500, .(hab500 = uniqueN(.I)), by = "critical_hab"], sim$disturbance[!is.na(critical_hab), .(total = uniqueN(.I)), by = "critical_hab"])
tempDisturbanceReport<-merge(tempDisturbanceReport, sim$disturbance[dist > 0, .(hab = uniqueN(.I)), by = "critical_hab"] )
tempDisturbanceReport[, c("scenario", "compartment", "timeperiod", "dist500_per", "dist500", "dist_per", "dist") := list(scenario$name,sim$boundaryInfo[[3]],time(sim)*sim$updateInterval,((total - hab500)/total)*100,total - hab500,((total - hab)/total)*100, total - hab)]
tempDisturbanceReport[, c("total","hab500","hab") := list(NULL, NULL,NULL)]
sim$disturbanceReport<-rbindlist(list(sim$disturbanceReport, tempDisturbanceReport), use.names=TRUE )
sim$disturbance[, dist:=NULL]
return(invisible(sim))
}
patchAnalysis <- function(sim) {
#calculates the patch size distributions
#For each landscape unit that has a patch size constraint
# Make a graph
# igraph::induce_subgraph based on a SELECT pixelids from pixels where age < 40
# determine the number of distinct components using igraph::component_distribution
return(invisible(sim))
}
.inputObjects <- function(sim) {
return(invisible(sim))
}
|
0d25e97ab26861bbda2c90a2d8f8be941a8eebee | 24e00f71d190b683670e778dd293cf2778ebdede | /membrane-proteins/MembProt.R | 253b240424a42f66913ed483ef58654a9f903c56 | [] | no_license | daniel-wells/wikimedia-graphs | 63653a183a1b4e4b027c9625d332666c9ae1404b | 2ea7d0bc9c4671fde8abfd0fd12321b803f70bfd | refs/heads/master | 2021-01-15T15:25:35.596924 | 2016-07-02T14:47:35 | 2016-07-02T14:47:35 | 44,247,062 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,103 | r | MembProt.R | MembProt <- structure(list(Year = 1985:2014, Count = c(1L, 0L, 1L, 0L, 0L,
0L, 0L, 3L, 1L, 3L, 2L, 6L, 8L, 9L, 7L, 11L, 5L, 13L, 18L, 16L,
18L, 24L, 28L, 30L, 41L, 43L, 40L, 65L, 55L, 72L), Cumulative = c(1L,
1L, 2L, 2L, 2L, 2L, 2L, 5L, 6L, 9L, 11L, 17L, 25L, 34L, 41L,
52L, 57L, 70L, 88L, 104L, 122L, 146L, 174L, 204L, 245L, 288L,
328L, 393L, 448L, 520L)), .Names = c("Year", "Count", "Cumulative"
), row.names = c(NA, 30L), class = "data.frame")
library(ggplot2)
library(plyr)
# Remove most recent year (incomplete data)
MembProt <- subset(MembProt, Year != "2015")
# Create Plot
ggplot(data=MembProt,aes(x=Year, y=Cumulative,)) +
geom_bar(stat="identity",size=1) +
labs(x="Year", y="Cumulative Number of Structures", title="Cumulative Number of Membrane Protein Structures") +
scale_x_continuous(breaks=c(seq(from=1985,to=2014,by=2))) +
scale_y_continuous(breaks=c(seq(from=0,to=600,by=100))) +
theme_minimal() +
theme(axis.text.x = element_text(angle=45,hjust=1),axis.line = element_line(size = 0.5, linetype = "solid",colour = "black"))
ggsave(file="MembProf.pdf", width=7, height=5) |
5fd1afa57bb09f6fb27323a778dd4fdcb9e9eceb | 71ead3bebcb2007d7bed603749e79263b759f8a0 | /man/DkubeModel.Rd | e0b688d155d20e1dec99cb8dc437497ce0a64e93 | [
"Apache-2.0"
] | permissive | oneconvergence/dkube-R | 3f88ec2ab1baa14e3a690eaa23c04f10e2d87972 | 8a21163e4d6616623d3db066092d4f888696ce97 | refs/heads/main | 2023-02-16T13:22:03.916837 | 2021-01-17T06:18:01 | 2021-01-17T06:18:01 | 329,491,316 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 350 | rd | DkubeModel.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/repos.R
\name{DkubeModel}
\alias{DkubeModel}
\title{Dkube Model repo}
\usage{
DkubeModel(user = NULL, name = NULL)
}
\arguments{
\item{user}{username}
\item{name}{repo/resource name}
\item{description}{additional details for the repo}
}
\description{
Dkube Model repo
}
|
d2f8c445dfbce2a74ac45465311bb8c277dd8558 | 7c326a26ffdd5f56d0dbdaf2eff8ceef11c35ae5 | /forecasting_time_series.R | 6e9775fef19d9e7a1651b6b46203d613334cd8b3 | [] | no_license | NiteshTiwari/Forecasting_in_R | ea562a09054deff3938693235ce8bf030468de41 | c6ced0278fba78659fa088d9a4bcb4bf43207cc7 | refs/heads/master | 2021-06-03T23:37:55.368271 | 2016-09-14T12:52:31 | 2016-09-14T12:52:31 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,381 | r | forecasting_time_series.R | fit.ex3 <- tslm(consumption ~ income , data=usconsumption)
plot(usconsumption, ylab="% change in consumption and income", plot.type="single", col=1:2, xlab="Year")
legend("topright",legend=c("Consumption", "Income"),lty=1,col=c(1,2), cex=0.9)
plot(consumption~income, data=usconsumption,ylab="% change in consumption", xlab="% change in income")
abline(fit.ex3)
summary(fit.ex3)
#scenario based forecasting. how much would consumption change given 1% increase and decrease in income
fcast <- forecast(fit.ex3, newdata=data.frame(income=c(-1,1)))
plot(fcast, ylab="% change in consumption", xlab="% change in income")
#linear trend example
fit.ex4 <- tslm(austa ~ trend)
f <- forecast(fit.ex4, h=5, level=c(80,95))
plot(f,ylab="International tourist arrivals to Australia (millions)", xlab="t")
lines(fitted(fit.ex4),col="blue")
summary(fit.ex4)
#residual autocorrelation: with time series it is highly likely that the value of a variable observed in the current
#time period will be influenced by its value in the previous period. The forecasts from a model with autocorrelated errors
#are still unbiased, but they will usually have larger prediction intervals than they need to
par(mfrow=c(2,2))
res3 <- ts(resid(fit.ex3), s=1970.25, f=4)
plot.ts(res3,ylab="res (Consumption)")
abline(0,0)
Acf(res3)
res4 <- resid(fit.ex4)
plot(res4,ylab="res (Tourism)")
abline(0,0)
Acf(res4)
|
69937747fc01f7a362dea1a02864f4a3a9c63042 | 3fe64eef3db9e5cbb920f8bacacb83029720df04 | /hw02/hw02-6.R | 17fc1df7cb805779c45a1cff8772c56437cfc742 | [] | no_license | mtaylor-semo/klingeman_dylan | f71962848e807afb88e326247f130683c3274aef | 875c4f9b0ff7e523c45ed423dd0af7a01e812edb | refs/heads/master | 2022-11-27T19:39:23.316431 | 2020-08-04T21:01:02 | 2020-08-04T21:01:02 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,113 | r | hw02-6.R | # Dylan Klingeman
### 6.1 Introduction to Lists
### 6.2 Create a list
# Numeric vector with numerics from 1 to 6
numbers <- 1:6
# Should make a vector of the numbers 1 through 6
# Logical vector with TRUE and FALSE
boolean <- rep(c(TRUE, FALSE), 4)
boolean
# should report TRUE, FALSE 4 times
# Letter matrix with the first nine letters of the english alphabet
letters <- matrix(LETTERS[1:9], ncol = 3)
# sorts the first 9 letters of the alphabet into 3 columns
# First 10 elemetns of the chickwts data frame
chicks <- chickwts[1:10,]
# Should show the first 10 data of the data frame
# Use the "list()" function to create "the_list" with the above objects. Use the names of the objects to name the objects
the_list <- list(numbers = numbers, boolean = boolean, letters = letters, chicks = chicks)
# Should form a list of all the data made above
# Display the contents of "the_list"
the_list
# Shows the data within the_list
#
### 6.3 Extract eleemnts from a list
# Add the line to create the horsebean vector
hb_chicks <- chickwts$weight[1:10]
# shows only the 1-10 lines of the horsebean vector in chickwts
# Add the line to create the linseed vector
ls_chicks <- chickwts$weight[11:20]
# shows only the 11-20 lines of the linseed vector in chickwts
# Create a list object with the results of the t-test.
chicks_t <- t.test(hb_chicks, ls_chicks, var.equal = TRUE)
# creates a test list of the above
# Run the "str()" function on "chicks_t"
str(chicks_t)
# shows the structure of the chicks_t
# Display the numeric value of the "statistic" object from "chicks_t"
chicks_t$statistic
# displays the t-value in the chicks_t
# Display the numeric value of the "p.value" object from "chicks_t"
chicks_t$p.value
# displays the p-value in chicks_t
# Display the lower confidence limit
chicks_t$conf.int[1]
# displays the lower confidence limit
# Display the upper confidence limit
chicks_t$conf.int[2]
# displays the upper confidence limit
# Create a text string called "whew" with the value"I did it!"
whew <- "I_did_it!"
whew
# says "I did it!" whenever whew is typed
# |
1796430dd4984511d006895b739e60d6fae40721 | 4e67e325a28518d78ae9c6a9d14204410421f9d5 | /man/dat2.Rd | 630a2c38cbe9f2516e98f071e51221f5e24bc9d5 | [
"MIT"
] | permissive | shkonishi/rskodat | 857edc919aae8309d2c46ca61b93c072777f836a | 4247ebac0d5d025ba17c4ef82b791892c8eb1be8 | refs/heads/master | 2020-06-12T02:16:18.859935 | 2019-06-27T21:20:10 | 2019-06-27T21:20:10 | 194,164,286 | 1 | 0 | null | null | null | null | UTF-8 | R | false | true | 355 | rd | dat2.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dat2.R
\docType{data}
\name{dat2}
\alias{dat2}
\title{dat2}
\format{An object of dataframe with with 18 rows and 5 columns.}
\usage{
data(dat2)
}
\description{
Multivariate compression value with three factor attributes.
}
\examples{
data(dat2)
head(dat2)
}
\keyword{datasets}
|
f9d1a0c2a187043e7853cfa63c0ae5066abe01cb | 734405d4e0d6f941c50e5b4862943ffc1cab0aa6 | /script/0627.R | 20e397698a390e870b8a5d5d2f02a99ac3477e48 | [] | no_license | valborgs/RStudy | e98a079a5d0ea7a48f6bf19555630d117df44983 | ac1808fcc1c3362876e8f2d8823d304496f29231 | refs/heads/master | 2020-07-04T14:15:30.309572 | 2019-08-14T08:32:00 | 2019-08-14T08:32:00 | 202,308,511 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,997 | r | 0627.R | # 조건문 복습
# scan함수를 이용해서 사용자에게 id와 pass변수값을 받는다
# id==abc 그리고 pass==1234 인지 확인해서 로그인 성공 또는 로그인 실패를 출력
ID<-scan(what="",n=1,quiet=TRUE) # 입력값을 문자처리해서 받음
PASS<-scan(what="",n=1,quiet=TRUE)
if((ID=='abc')&(PASS=='1234')){
print('로그인 성공')
}else{
print('로그인 실패')
}
# 교통량 관측치가 500이상이면 red, 300이상이면 yellow, 100 이상이면 blue, 그 외는 white라는 문자를 color 변수에 대입하시오.
traffic<-function(){
data<-scan(n=1,quiet=TRUE)
color<-"white"
if(data>=500){
color<-"red"
}else if(data>=300){
color<-"yellow"
}else if(data>=100){
color<-"blue"
}else{
color<-"white"
}
print(color)
}
traffic()
# 사용자 입력숫자가 짝수면 0을 출력 홀수면 1을 출력하시오
n<-scan(n=1,quiet=TRUE)
ifelse(n%%2==0,0,1)
# 반복문 for
# 동일한 작업을 여러번 해야 할 때 사용
# 동일 연산자, 함수, 명령문이 여러번 나올때
# 1부터 10까지 출력
# print() 10번 사용
print(1)
print(2)
print(3)
.
.
.
# 반복문 사용시
for(i in 1:10){
print(i)
}
# for(카운터 변수 in 반복횟수){
# 반복문장
# }
a<-1
for(i in 6:10){
print(a)
a<-a+1
}
a<-c('가','나','다','라','마')
for(i in 1:5){
print(a[i])
}
# 반복횟수로 벡터 변수를 사용하면 벡터 변수 내 요소 갯수만큼 반복
for(i in a){
print(i)
}
# 벡터 변수 dt에 값 5,7,9,3,5,60,70,21이 들어있다
# dt변수 값 모두를 더한 결과를 출력하는 반복문을 작성하시오
sum1<-0
dt<-c(5,7,9,3,5,60,70,21)
for(i in dt){
sum1<-sum1+i
}
sum1
# 위 코드 문장의 반복 횟수는 얼마인가
# -> 8번
# 25부터 10005까지의 합을 구하는 문장을 작성하시오
sum2<-0
count<-1
for(i in 25:10005){
sum2<-sum2+i
cat(count,sum2,'\n')
count<-count+1
}
sum2
# 구구단 2단을 출력
for(i in 1:9){
cat(2,"x",i,"=",2*i,"\n")
}
# 사용자로부터 단을 입력받아 해당 구구단을 출력하는 코드를 작성하시오
dan<-scan(n=1,quiet=TRUE)
for(i in 1:9){
cat(dan,"x",i,"=",dan*i,"\n")
}
# 반복문 while()
# while문은 구성에 따라 for문과 동일하다
# 반복의 횟수가 정확하지 않을 때 사용
# while(조건){
# 명령문
# }
# 조건이 참일동안 반복
n<-0
while(n<5){
print("돌아갑니다")
n<-n+1
}
while(TRUE){
# 반드시 탈출 코드가 있어야 함
}
# 1부터 5000까지 출력하는 반복문을 while문으로 작성하시오
# 단 3의 배수는 출력하지 않는다
n=1
while(n<=5000){
if(n%%3!=0){
print(n)
}
n<-n+1
}
n=0
while(n<=50){
n<-n+1
if(n%%3==0){
next
}
print(n)
}
n=0
while(n<=5000){
n<-n+1
if(n==1000){
break
}
print(n)
}
# break는 반복문을 탈출
# next는 해당 조건일 경우 skip하고 다음 반복 실행
# 1부터 10까지 출력하는 반복문 작성
# 단, 4는 출력하지 않는다
n=0
while(n<10){
n<-n+1
if(n==4){
next
}
print(n)
}
# 1부터 10까지의 숫자를 더하는 프로그램을 작성한다
# 단 총합의 숫자가 30이 넘어가면 프로그램을 종료하고 30이 넘어가는 경우의 더해지는 숫자를 출력하시오
n=0
sum=0
while(n<=10){
n<-n+1
sum=sum+n
if(sum>30){
break
}
}
n;sum
# 1부터 1씩 증가하면서 값을 누적시키다가 총 누적값이 5500을 초과하는 증가값을 알고자 한다
n<-0
sum<-0
cond<-FALSE
while(cond){
n<-n+1
sum<-sum+n
if(sum>5500){
break
}
}
sum;n
# while의 조건이 FALSE인 경우 한번도 실행하지 않는다
# repeat() 반복문
# repeat{
# if(cond)break
# }
# while과 repeat의 차이점
# while은 최소실행횟수0번
# repeat는 최소 한번 실행한다.
# break: 반복 제어문을 탈출한다
# next: 반복 제어문을 스킵 후 다음 반복을 진행
# 모든 반복문에 사용가능
|
c4cab8a7dd73412aebe25865a7c95e4682e9f556 | 2a7e77565c33e6b5d92ce6702b4a5fd96f80d7d0 | /fuzzedpackages/MGDrivE/man/oneDay_eggDM_stochastic_Patch.Rd | 3ef13a635a99a7125841041e51e15b8350f380df | [] | no_license | akhikolla/testpackages | 62ccaeed866e2194652b65e7360987b3b20df7e7 | 01259c3543febc89955ea5b79f3a08d3afe57e95 | refs/heads/master | 2023-02-18T03:50:28.288006 | 2021-01-18T13:23:32 | 2021-01-18T13:23:32 | 329,981,898 | 7 | 1 | null | null | null | null | UTF-8 | R | false | true | 603 | rd | oneDay_eggDM_stochastic_Patch.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Patch-Simulation.R
\name{oneDay_eggDM_stochastic_Patch}
\alias{oneDay_eggDM_stochastic_Patch}
\title{Stochastic Egg Death and Pupation}
\usage{
oneDay_eggDM_stochastic_Patch()
}
\description{
Daily egg survival is sampled from a binomial distribution, where survival
probability is given by \eqn{1-\mu_{aq}}. \eqn{\mu_{aq}} corresponds
to daily non-density-dependent aquatic mortality. \cr
Eggs transition into larvae at the end of \eqn{T_e}. \cr
See \code{\link{parameterizeMGDrivE}} for how these parameters are derived.
}
|
47f415221164d46d266e920c83c59dd3d7635198 | 4dbf5223f27d3aa0db6800058404fe1ba0c017d1 | /ICH project/SPOTLIGHT_STOP-IT inc_exc.R | 792b0fac8e7b6bd9971870686d5d7232ce1928c4 | [] | no_license | awong13375/r-projects | b07895427228d1ab222435346042121f01b09a7c | e82d96d0dcaf27effa33ca6f8bd27dd153b6105d | refs/heads/master | 2023-07-19T14:19:03.661296 | 2021-08-31T00:32:46 | 2021-08-31T00:32:46 | 198,112,096 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,547 | r | SPOTLIGHT_STOP-IT inc_exc.R | library(googlesheets4)
library(googledrive)
library(irr)
library(stringr)
library(blandr)
library(ggplot2)
library(cowplot)
library(data.table)
library(stringr)
# Open data files ----
fulldb=readxl::read_xlsx("D:/Google Drive/Desktop files/Dal Med/ICH/Archive/GraebCombined_withmRS (Recovered).xlsx", sheet="ALL")
fulldb=as.data.frame(fulldb)
keep_cols=c("V1", "PID", "time","study")
# SPOTLIGHT
dcm_add_files=transpose(as.data.frame(read.csv("D:/ICH Files/SPOTLIGHT/dcm_add_files.csv", fileEncoding="UTF-8-BOM", header=FALSE)))
dcm_previous_files=transpose(as.data.frame(read.csv("D:/ICH Files/SPOTLIGHT/dcm_previous_files.csv", fileEncoding="UTF-8-BOM", header=FALSE)))
SL_dcm_combined_files=rbind(dcm_add_files, dcm_previous_files)
SL_dcm_combined_files$id_1=substr(SL_dcm_combined_files$V1, 11, 13)
SL_dcm_combined_files$id_2=substr(SL_dcm_combined_files$V1, 15, 18)
SL_dcm_combined_files$time=substr(SL_dcm_combined_files$V1, 20, 20)
SL_dcm_combined_files$PID=paste(SL_dcm_combined_files$id_1, SL_dcm_combined_files$id_2, sep="")
SL_dcm_combined_files$study=c("SPOTLIGHT")
SL_dcm_combined_files=SL_dcm_combined_files[keep_cols]
# STOP-IT
dcm_add_files=transpose(as.data.frame(read.csv("D:/ICH Files/STOP-IT/dcm_add_files.csv", fileEncoding="UTF-8-BOM", header=FALSE)))
dcm_previous_files=transpose(as.data.frame(read.csv("D:/ICH Files/STOP-IT/dcm_previous_files.csv", fileEncoding="UTF-8-BOM", header=FALSE)))
SI_dcm_combined_files=rbind(dcm_add_files, dcm_previous_files)
SI_dcm_combined_files$id_1=as.numeric(substr(SI_dcm_combined_files$V1, 1, 2))+10
SI_dcm_combined_files$id_2=substr(SI_dcm_combined_files$V1, 4, 5)
SI_dcm_combined_files$id_3=substr(SI_dcm_combined_files$V1, 7, 9)
SI_dcm_combined_files$time=c(99)
SI_dcm_combined_files$time[grepl("Base",SI_dcm_combined_files$V1)]=0
SI_dcm_combined_files$time[grepl("Follow",SI_dcm_combined_files$V1)]=1
SI_dcm_combined_files$PID=paste(SI_dcm_combined_files$id_2, SI_dcm_combined_files$id_3, SI_dcm_combined_files$id_1, sep="")
SI_dcm_combined_files$study=c("STOP-IT")
SI_dcm_combined_files=SI_dcm_combined_files[keep_cols]
# SPOTLIGHT STOP IT Merge
SL_SI_dcm_files=rbind(SL_dcm_combined_files, SI_dcm_combined_files)
SL_SI_dcm_files$PID=as.character(SL_SI_dcm_files$PID)
#write.csv(SL_SI_dcm_files, "D:/Google Drive/Desktop files/Dal Med/ICH/SL_SI_dcm_files.csv")
# Included/Excluded Studies ----
dcm_list=SL_SI_dcm_files$PID
fulldb$inc_exc=c(99)
fulldb$inc_exc[(fulldb$SPOT_Present==2)& !(fulldb$PID %in% dcm_list)]=0
fulldb$inc_exc[(fulldb$SPOT_Present==1)& (fulldb$PID %in% dcm_list)]=1
fulldb$inc_exc[(fulldb$SPOT_Present==1)& !(fulldb$PID %in% dcm_list)]=2
fulldb$inc_exc[(fulldb$SPOT_Present==2)& (fulldb$PID %in% dcm_list)]=3
# Patients not in SL/SI demographics xls at all, but included in our study
dcm_list=SL_SI_dcm_files[,c("PID","study")]
missing_dcm=subset(dcm_list, !(dcm_list$PID %in% fulldb$PID))
missing_pid_list=as.factor(as.character(missing_dcm$PID))
# Identify IVH cases ----
ivhdb=readxl::read_xlsx("D:/Google Drive/Desktop files/Dal Med/ICH/Archive/GraebCombined_withmRS (Recovered).xlsx", sheet="IVH")
ivhdb=as.data.frame(ivhdb)
ivh_list=as.character(ivhdb$Subject)
SL_SI_dcm_files$IVH=c(99)
SL_SI_dcm_files$IVH[SL_SI_dcm_files$PID %in% ivh_list]=1
SL_SI_dcm_files$IVH[!(SL_SI_dcm_files$PID %in% ivh_list)]=0
#write.csv(SL_SI_dcm_files, "D:/Google Drive/Desktop files/Dal Med/ICH/SL_SI_dcm_files_ivh.csv")
SL_SI_dcm_files_IVH=subset(SL_SI_dcm_files, SL_SI_dcm_files$IVH==1)
print(as.factor(as.character(SL_SI_dcm_files_IVH$PID)))
|
3982ff9d3ff1244665e02fe4f1eba16f959ceae1 | 6eeeb62fe59d2196be375361811c0fd05ca0707a | /man/df_dict.Rd | 922fc2e08464b5ca7df21e9d2528cebb1e8423e4 | [
"CC0-1.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | Aariq/mynotebooks | 94a125b913119022df60de4acee4c57154d95f84 | 74172002ad11e724395d3b7ed02099aeecc3828c | refs/heads/master | 2021-09-15T15:54:55.258808 | 2021-09-13T21:56:54 | 2021-09-13T21:56:54 | 199,040,003 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 375 | rd | df_dict.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils.r
\name{df_dict}
\alias{df_dict}
\title{Insert data dictionary}
\usage{
df_dict(x = NULL)
}
\arguments{
\item{x}{a data frame}
}
\value{
Inserts a data dictionary template as a markdown list at the cursor
}
\description{
Insert data dictionary
}
\examples{
\dontrun{
df_dict(warpbreaks)
}
}
|
9d04ece57a296d1aa4922a6ee30ca477710d9a1b | fcd90210265c2e3a6951dd43a70ce7bd0c8a49c9 | /man/getGAF.Rd | cde7a8396569a0505b1445c371e2058d7f76486d | [
"MIT"
] | permissive | ropensci/BaseSet | 49c6d7db6e7162ae036d2b0afe612b50f849bd37 | 6c492b6a00e54bd40dff195880cabf1cf71ca346 | refs/heads/master | 2023-08-31T19:43:23.008636 | 2023-08-29T22:34:51 | 2023-08-29T22:34:51 | 159,389,093 | 5 | 0 | NOASSERTION | 2021-04-16T13:05:25 | 2018-11-27T19:37:01 | R | UTF-8 | R | false | true | 653 | rd | getGAF.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/obo.R
\name{getGAF}
\alias{getGAF}
\title{Read a GAF file}
\usage{
getGAF(x)
}
\arguments{
\item{x}{A file in GAF format}
}
\value{
A TidySet object
}
\description{
Read a GO Annotation File (GAF) formatted file
}
\examples{
gafFile <- system.file(
package = "BaseSet", "extdata",
"go_human_rna_valid_subset.gaf"
)
gs <- getGAF(gafFile)
head(gs)
}
\references{
The format is defined \href{https://geneontology.org/docs/go-annotation-file-gaf-format-2.1/}{here}.
}
\seealso{
Other IO functions:
\code{\link{getGMT}()},
\code{\link{getOBO}()}
}
\concept{IO functions}
|
fe30a5193822653d649ac5b22409fe873bf00f31 | ae8b51ba12067916f02b44c0360f37327171626e | /DataCamp/Intermediate-R/Utilities.R | cafe4320f6d5bcaa08ce41235f402f2158beb10d | [] | no_license | r-repos-org/R-2 | 621e25fd2a05e1d9272a5d4587c965fb6231b6be | c1ccbc1519a73ae7914b66c4c5ffe65449dcac24 | refs/heads/master | 2022-04-06T17:04:14.810390 | 2018-01-07T16:42:58 | 2018-01-07T16:42:58 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,478 | r | Utilities.R | ###### Useful Functions ######
### Mathematical utilities ###
errors <- c(1.9, -2.6, 4.0, -9.5, -3.4, 7.3)
# Sum of absolute rounded values of errors
sum(abs(round(errors)))
vec1 <- c(1.5, 2.5, 8.4, 3.7, 6.3)
vec2 <- rev(vec1)
mean(c(abs(vec1), abs(vec2)))
rep(seq(from = 1, to = 7, by = 2), times = 7)
### Data Utilities ###
linkedin <- list(16, 9, 13, 5, 2, 17, 14)
facebook <- list(17, 7, 5, 16, 8, 13, 14)
li_vec <- as.vector(linkedin)
fb_vec <- as.vector(facebook)
# Append fb_vec to li_vec: social_vec
social_vec <- append(li_vec, fb_vec)
# Sort social_vec
sort(as.numeric(social_vec), decreasing = TRUE)
### Beat Gauss using R ###
# Create first sequence: seq1
seq1 <- seq(1, 500, by = 3)
# Create second sequence: seq2
seq2 <- seq(1200, 900, by = -7)
sum(seq1, seq2)
###### Regular Expressions ######
### grepl & grep ###
emails <- c("john.doe@ivyleague.edu", "education@world.gov",
"dalai.lama@peace.org", "invalid.edu",
"quant@bigdatacollege.edu", "cookie.monster@sesame.tv")
# Use grepl() to match for "edu"
grepl("edu", emails)
# Use grep() to match for "edu", save result to hits
hits <- grep("edu", emails)
# Subset emails using hits
emails[hits]
# Use grepl() to match for .edu addresses more robustly
grepl("@.*\\.edu$", emails )
# Use grep() to match for .edu addresses more robustly, save result to hits
hits <- grep("@.*\\.edu$", emails)
# Subset emails using hits
emails[hits]
### sub & gsub ###
# Use sub() to convert the email domains to datacamp.edu
sub("@.*\\.edu$", "@datacamp.edu", emails)
###### Times and Dates ######
### Right here, right now ###
today <- Sys.Date()
# See what today looks like under the hood
unclass(today)
# Get the current time: now
now <- Sys.time()
# See what now looks like under the hood
unclass(now)
### Create and format dates ###
# Definition of character strings representing dates
str1 <- "May 23, '96"
str2 <- "2012-03-15"
str3 <- "30/January/2006"
# Convert the strings to dates: date1, date2, date3
date1 <- as.Date(str1, format = "%b %d, '%y")
date2 <- as.Date(str2, format = "%Y-%m-%d")
date3 <- as.Date(str3, format = "%d/%B/%Y")
# Convert dates to formatted strings
format(date1, "%A")
format(date2, "%d")
format(date3, "%b %Y")
### Create and format times ###
# Definition of character strings representing times
str1 <- "May 23, '96 hours:23 minutes:01 seconds:45"
str2 <- "2012-3-12 14:23:08"
# Convert the strings to POSIXct objects: time1, time2
time1 <- as.POSIXct(str1, format = "%B %d, '%y hours:%H minutes:%M seconds:%S")
time2 <- as.POSIXct(str2)
# Convert times to formatted strings
format(time1, "%M")
format(time2, "%I:%M %p")
### Calculations with Dates ###
# Difference between last and first pizza day
day5 - day1
# Create vector pizza
pizza <- c(day1, day2, day3, day4, day5)
# Create differences between consecutive pizza days: day_diff
day_diff <- diff(pizza)
# Average period between two consecutive pizza days
mean(day_diff)
### Calculations with Times ###
# Calculate the difference between login and logout: time_online
time_online <- logout - login
sum(time_online)
mean(time_online)
### Time is of the essence ###
# Convert astro to vector of Date objects: astro_dates
astro_dates <- as.Date(astro, "%d-%b-%Y")
# Convert meteo to vector of Date objects: meteo_dates
meteo_dates <- as.Date(meteo, "%B%d, %y")
# Calculate the maximum absolute difference between astro_dates and meteo_dates
max(abs(astro_dates - meteo_dates))
|
ec8115cbd347d3cb12d590201dca5905e0f917a4 | f38bace9fc1f70068f2119362b4184e3d7122078 | /ensemble_model_training.R | 6e487f209621e78f0cdb1870adb22a3bb56a6dd8 | [] | no_license | thies/paper-uk-vintages | 9aafa11a558be8330cd9c69e5272b3cb432d322c | d31a7930725728efbb367d28d36502d205e58783 | refs/heads/master | 2021-06-24T17:34:10.390676 | 2020-11-14T12:57:46 | 2020-11-14T12:57:46 | 163,590,256 | 5 | 0 | null | null | null | null | UTF-8 | R | false | false | 9,273 | r | ensemble_model_training.R | # load additional libraries
library(spdep)
library(keras)
library(dplyr)
source("~/research/paper-uk-vintages/R/helper.R")
#==================================
# Create 4096 vectors, by stacking 2048 from nearest neighbour
# Most of the time, this won't be necessary....
if(FALSE){
v <- read.csv("~/research/paper-cambridge-vintages/data/groundtruth_image_vectors.csv", as.is=TRUE)
v <- unique(v)
#vv <- subset(v, !is.na(x) & true_era != "x greenery" & true_era != "w walls" )
v$true_era <- factor(v$true_era)
vector.columns <- grepl('^V\\d+$', colnames(v), perl=TRUE)
# find nearest neighbours
k <- 1
col.knn <- knearneigh(as.matrix(v[,c("x","y")]), k=30)
# calculate average vector for n nearest neighbours
indices <- as.matrix(col.knn[[1]])
# only keep neighbours that offer clean view on building,
# exclude greenery and walls
walls.greenery.index <- which(v$true %in% c("x greenery","w walls"))
indices[indices %in% walls.greenery.index] <- NA
nnvec <- list()
for(i in 1:nrow(v)){
if(i %% 200 == 0){ print(i) }
toid.tmp <- as.character( v$toid[i] )
ind.tmp <- indices[i,]
ind.tmp <- ind.tmp[!is.na(ind.tmp)][1]
nnvec[[ toid.tmp ]] <- as.list( v[ ind.tmp , vector.columns])
}
nnvecs <- as.data.frame( do.call( rbind, nnvec ))
toids.tmp <- rownames(nnvecs)
nnvecs <- matrix(unlist(nnvecs), ncol=2048 )
colnames(nnvecs) <- paste("VV", 1:ncol(nnvecs), sep="")
nnvecs <- as.data.frame(nnvecs)
nnvecs$toid <- as.numeric(unlist(toids.tmp))
rm(nnvec)
vvv <- merge(v, nnvecs, by="toid")
rm(nnvecs)
write.csv(vvv, "~/research/paper-cambridge-vintages/data/groundtruth_image_vectors_1nn.csv", row.names = FALSE)
}
#############################################
# train the model(s)
library(spdep)
library(keras)
library(dplyr)
# load stacked Inception vectors
# 2048 from obs (Variables "V123"), 2048 from neares neighbour ("VV123")
v <- read.csv("~/research/paper-cambridge-vintages/data/groundtruth_image_vectors_1nn.csv", as.is=TRUE)
vv <- v
# rescale inception vectors
vector.columns <- grepl('^V+\\d+$', colnames(vv), perl=TRUE)
vv[,vector.columns] <- vv[,vector.columns]/max(vv[,vector.columns])
for(iteration in 1:100){
cat(paste("\n\n\n\n\n\n\nIteration: ", iteration, "\n\n"))
# stratified training sample
# Exclude observations with bad images (those not showing home)
# There is no point in training with those
train.pre <- subset(vv, true_era != "x greenery" & true_era != "w walls" )
train.pre <- subset(train.pre, !is.na(true_era))
train <- train.pre %>%
group_by(true_era) %>%
sample_n(330)
# there are not that many Georgians in the sample, unfortunately
# trainng sample will be not fully balanced...
tmp <- subset(train.pre, ! toid %in% unique(train$toid))
tmp <- subset(tmp, true_era != "a georgian")
train.tmp <- tmp %>%
group_by(true_era) %>%
sample_n(270)
train <- rbind(train, train.tmp)
train$true_era <- factor(train$true_era)
rm(train.pre)
# test with the rest, true out of sample
test <- subset(vv, ! toid %in% unique(train$toid))
###### TRAIN 2 MODELS
# - based on one picture only
# - based on the nearest neighbour as well
# ----------
model1cols <- colnames(train)[grepl('^V\\d+$', colnames(train), perl=TRUE)]
model2cols <- colnames(train)[grepl('^V+\\d+$', colnames(train), perl=TRUE)]
for(vector.columns in list(model1cols, model2cols ) ){
veclength <- length(vector.columns)
# define the train and test data
x_train <- as.matrix( train[,vector.columns] )
x_test <- as.matrix( test[,vector.columns] )
y_train <- to_categorical( as.numeric(train$true_era) -1 )
# define model
model <- keras_model_sequential()
model %>%
layer_dense(units = ncol(x_train)/2, activation = 'relu', input_shape = c(ncol(x_train))) %>%
layer_dropout(rate = 0.5) %>%
layer_dense(units = ncol(x_train)/4, activation = 'relu') %>%
layer_dropout(rate = 0.5) %>%
layer_dense(units = ncol(y_train), activation = 'softmax')
# model summary
summary(model)
# compile model
model %>% compile(
loss = 'categorical_crossentropy',
optimizer = optimizer_rmsprop(),
metrics = c('accuracy')
)
# train model and plot loss functions/accuracy
history <- model %>% fit(
x_train, y_train,
epochs = 50, batch_size = 128,
validation_split = 0.2
)
jpeg(filename=paste("~/research/paper-cambridge-vintages/data/iterations/training_history",iteration,"_", veclength,".jpg", sep=""), height=400, width=700)
plot(history)
dev.off()
# predict, out of sample
pred <- model %>% predict_classes(x_test)
predictions.tmp <- as.data.frame( cbind(test$toid, pred, veclength))
colnames(predictions.tmp) <- c("toid","pred","model")
write.csv(predictions.tmp, file = paste( "~/research/paper-uk-vintages/data/derived/k-fold-preds", iteration, "_", veclength,".csv", sep=""), row.names=FALSE)
rm(predictions.tmp)
}
}
# find best classification
bestPrediction <- function( model = "4096", num=100){
pred <- list()
for(n in 1:num){
try( pred[[as.character(n)]] <- read.csv(file = paste("~/research/paper-uk-vintages/data/derived/k-fold-preds",n,"_",model,".csv", sep=""), as.is=TRUE)[,c("toid","pred")] )
}
pred <- as.data.frame( do.call(rbind, pred))
tab <- table(pred$toid, pred$pred)
tab.max <- apply(tab, 1, max)
pred.max <- rep(NA, nrow(tab))
for(i in 1:ncol(tab)){
pred.max[ tab[,i] == tab.max] <- i-1
}
tmp <- as.data.frame( cbind(row.names(tab),pred.max))
colnames(tmp) <- c("toid",paste("pred",model, sep=""))
return( tmp )
}
distPrediction <- function( model = "4096", num=100){
pred <- list()
for(n in 1:num){
try( pred[[as.character(n)]] <- read.csv(file = paste("~/research/paper-uk-vintages/data/derived/k-fold-preds",n,"_",model,".csv", sep=""), as.is=TRUE)[,c("toid","pred")] )
}
pred <- as.data.frame( do.call(rbind, pred))
return( pred )
}
pred.dist.4096 <- distPrediction(model="4096")
pred.dist.4096.tab <- table(pred.dist.4096$toid, pred.dist.4096$pred)
write.csv(pred.dist.4096.tab, paste("~/research/paper-uk-vintages/data/derived/k-fold-preds_tabulated_4096.csv", sep=""))
pred.4096 <- bestPrediction(model="4096")
pred.2048 <- bestPrediction(model="2048")
preds <- merge(pred.2048, pred.4096, by="toid")
preds <- merge(preds, v[,c("toid","true_era")], all.x=TRUE)
preds$true_era <- factor(preds$true_era)
lev <- levels(preds$true_era)
preds$pred4096 <- sapply(as.numeric(preds$pred4096), function(x){ return(lev[x])} )
preds$pred2048 <- sapply(as.numeric(preds$pred2048), function(x){ return(lev[x])} )
write.csv(preds, file="~/research/paper-uk-vintages/data/derived/predicted_styles.csv", row.names=FALSE)
#============================================
# create confusion matrix tables
tab <- table(preds[, c("pred4096","true_era")])[,1:7]
colnames(tab) <- unlist(custom.labels)
rownames(tab) <- unlist(custom.labels)
# classification accuracy statistics
recall <- diag(tab)/colSums(tab)
precision <- diag(tab)/rowSums(tab)
f1scores <- 2*(precision*recall)/(precision+recall)
f1scores.4096 <- f1scores
tab.mean <- tab
for(i in 1:ncol(tab)){
tab.mean[,i] <- (tab.mean[,i]/sum(tab.mean[,i]))*100
}
tab.4096 <- rbind(tab, tab.mean, recall, precision, f1scores)
# base model
tab <- table(preds[, c("pred2048","true_era")])
colnames(tab) <- unlist(custom.labels)
rownames(tab) <- unlist(custom.labels)
# classification accuracy statistics
recall <- diag(tab)/colSums(tab)
precision <- diag(tab)/rowSums(tab)
f1scores <- 2*(precision*recall)/(precision+recall)
f1scores.2048 <- f1scores
tab.mean <- tab
for(i in 1:ncol(tab)){
tab.mean[,i] <- (tab.mean[,i]/sum(tab.mean[,i]))*100
}
tab.2048 <- rbind(tab, tab.mean, recall, precision, f1scores)
library(xtable)
confusion.matrices.mean.digits <- matrix(0,nrow=nrow(tab.4096), ncol=ncol(tab.4096)+1)
confusion.matrices.mean.digits[15:17,]<-2
print.xtable(xtable(tab.4096,digits=confusion.matrices.mean.digits),
booktabs = TRUE,
sanitize.rownames.function = function(x){
x <- gsub(".\\d+$","", x, perl=TRUE)
x <- gsub("Contemporary","Cont.", x, perl=TRUE)
return( x )
},
size="footnotesize",
type="latex",
file="~/research/paper-uk-vintages/text/confusion_matrix_raw_4096.tex"
)
print.xtable(xtable(tab.2048, digits=confusion.matrices.mean.digits),
booktabs = TRUE,
sanitize.rownames.function = function(x){
x <- gsub(".\\d+$","", x, perl=TRUE)
x <- gsub("Contemporary","Cont.", x, perl=TRUE)
return( x )
},
size="footnotesize",
type="latex",
file="~/research/paper-uk-vintages/text/confusion_matrix_raw_2048.tex"
)
jpeg(filename="~/research/paper-uk-vintages/text/figures/barplot_f1scores.jpg", width=700, height=350)
par(mar=c(3.1,3.1,1,1.1))
barplot(t(cbind(f1scores.2048, f1scores.4096))[,1:7], beside=TRUE, col=colour[2:3], names.arg=c("Georg.","Early V.", "Late V./E.","Interw.","Postw.","Cont.","Revival"), legend.text=c("base classifier","spatial classifier"))
box()
dev.off()
|
262b1f8961f9f33c2ba038d973ccade616218da0 | 3525be6e50c10b1bffcf4747d37e459631f48de3 | /Code/R/Var-Cov_UN.R | 8e13d57f7699b63b2176f9eec83093e6995c9aa0 | [] | no_license | sbmeco1991/Simulation_SAS | bc4f6753e9bd30d01c802c6a922017ccd80326d5 | 3dbc0397f6c484f4081f954444910abf3100b150 | refs/heads/main | 2023-03-03T13:17:14.329707 | 2021-02-15T20:51:11 | 2021-02-15T20:51:11 | 331,181,991 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,926 | r | Var-Cov_UN.R | # Computing the UN variance-covariance structure based on the UN var-cov provided by Dr. Nichole Carlson for our class
# project and then scaling the values by dividing each off-diagonal value of the matrices by the mean of the diagonal
# so that this matrix remains closer in value to those of the CS and AR(1) var-cov structure
R_10 <- matrix(c(6.988, 6.755, 7.0115, 7.1315, 7.3183, 7.4758, 7.7466, 8.1752, 8.3013, 8.3872,
6.755, 8.7139, 8.7507, 8.9942, 9.1205, 9.2025, 9.6835, 9.9827, 10.1662, 10.1089,
7.0115, 8.7507, 10.3856, 10.2715, 10.4166, 10.5692, 11.0451, 11.2225, 11.6627, 11.811,
7.1315, 8.9942, 10.2715, 12.1268, 11.7023, 11.8662, 12.2579, 12.6695, 13.069, 13.2163,
7.3183, 9.1205, 10.4166, 11.7023, 13.014, 12.8216, 13.2749, 13.6248, 14.2559, 14.645,
7.4758, 9.2025, 10.5692, 11.8662, 12.8216, 14.3973, 14.2043, 14.4938, 14.9599, 15.4169,
7.7466, 9.6835, 11.0451, 12.2579, 13.2749, 14.2043, 15.6551, 15.7345, 16.3114, 16.7909,
8.1752, 9.9827, 11.2225, 12.6695, 13.6248, 14.4938, 15.7345, 17.8786, 17.8312, 18.3562,
8.3013, 10.1662, 11.6627, 13.069, 14.2559, 14.9599, 16.3114, 17.8312, 19.1028, 19.218,
8.3872, 10.1089, 11.811, 13.2163, 14.645, 15.4169, 16.7909, 18.3562, 19.218, 20.5045),
nrow = 10, ncol = 10)
R_5 <- matrix(c(6.988, 6.755, 7.0115, 7.1315, 7.3183,
6.755, 8.7139, 8.7507, 8.9942, 9.1205,
7.0115, 8.7507, 10.3856, 10.2715, 10.4166,
7.1315, 8.9942, 10.2715, 12.1268, 11.7023,
7.3183, 9.1205, 10.4166, 11.7023, 13.014),
nrow = 5, ncol = 5)
v5_un <- R_5 * 2.5 / mean(diag(R_5))
v10_un <- R_10 * 2.5 / mean(diag(R_10))
v5_un
v10_un
# Multiplying the matrices by 2.5 then dividing by the mean of the diagonals - Var-Cov structure
# for UN simulation. |
6646cf939bd08d5b6087599db5f227d06c537971 | 32a061c47b1b870514d344b27d95d3b030138353 | /make_latlon_composite.R | 1301f7127cb978ca3b9c2bdbb8651f82a2272a7d | [] | no_license | jedman/meiyu-jet-data | c18be0cbaec26eb0a793febbc759dfaa562a54df | bc910d7300f57c9b27b9f693675a4c6d5df1067f | refs/heads/master | 2021-01-10T21:17:58.336114 | 2015-03-16T17:01:42 | 2015-03-16T17:01:42 | 32,340,662 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,522 | r | make_latlon_composite.R | # this function adds up all the jet counts in a given time window and makes composites based on some yearly selection criteria
# warning: mean latitude than calculated from daily means because some days have more jet counts than others
make_latlon_composite <- function(min,max, lim, type){
latlon_ECbig <- list()
#min = '05-01'
#max = '05-10'
if (type == 'june'){
limit <- which(GPCC_jun$LPC1 >= lim)
limit2 <-which(GPCC_jun$LPC1 <= -lim)
}
else if (type == 'july'){
limit <- which(GPCC_julaug$LPC1 >= lim)
limit2 <-which(GPCC_julaug$LPC1 <= -lim)
}
else{
break()
}
#limit <- which(eof1_yearly >= lim)
#limit2 <-which(eof1_yearly <= -lim)
for (y in seq(1958,2001)){
datayr <- paste('yr',y,sep='')
datemin <- paste(y,'-',min, sep='')
datemax <- paste(y,'-',max,sep='')
if(y %in% GPCC_jun$yr[limit]){
#if(y %in% time_yr[limit]){
index <- which(as.Date(data_eastChbig[[datayr]]$time)>=datemin & as.Date(data_eastChbig[[datayr]]$time)<=datemax)
latlon_ECbig$pos$lat <- append(latlon_ECbig$pos$lat,data_eastChbig[[datayr]]$lat[index])
latlon_ECbig$pos$lon <- append(latlon_ECbig$pos$lon,data_eastChbig[[datayr]]$lon[index])
latlon_ECbig$pos$u <- append(latlon_ECbig$pos$u,data_eastChbig[[datayr]]$u[index])
}
if (y %in% GPCC_jun$yr[limit2]){
# if (y %in% time_yr[limit2]){
index <- which(as.Date(data_eastChbig[[datayr]]$time)>=datemin & as.Date(data_eastChbig[[datayr]]$time)<=datemax)
latlon_ECbig$neg$lat <- append(latlon_ECbig$neg$lat,data_eastChbig[[datayr]]$lat[index])
latlon_ECbig$neg$lon <- append(latlon_ECbig$neg$lon,data_eastChbig[[datayr]]$lon[index])
latlon_ECbig$neg$u <- append(latlon_ECbig$neg$u,data_eastChbig[[datayr]]$u[index])
}
}
#index<- which(latlon_ECbig$neg$u > mean(latlon_ECbig$neg$u) + sd(latlon_ECbig$neg$u))
#index2<- which(latlon_ECbig$pos$u > mean(latlon_ECbig$pos$u) + sd(latlon_ECbig$pos$u))
index<- which(latlon_ECbig$neg$u > 0.)
index2<- which(latlon_ECbig$pos$u > 0.)
#latlon_ECbig$neg$mean <-mean(latlon_ECbig$neg$u)
#latlon_ECbig$pos$mean <-mean(latlon_ECbig$pos$u)
latlon_ECbig$neg$sd <-sd(latlon_ECbig$neg$u)
latlon_ECbig$pos$sd <-sd(latlon_ECbig$pos$u)
latlon_ECbig$neg$lat= latlon_ECbig$neg$lat[index]
latlon_ECbig$neg$lon= latlon_ECbig$neg$lon[index]
latlon_ECbig$neg$u= latlon_ECbig$neg$u[index]
latlon_ECbig$pos$lat= latlon_ECbig$pos$lat[index2]
latlon_ECbig$pos$lon= latlon_ECbig$pos$lon[index2]
latlon_ECbig$pos$u= latlon_ECbig$pos$u[index2]
return(latlon_ECbig)
}
|
ad18022540f15f92da980638d772b56c66995b5b | f6392b2ae66c3fc295335fd3d88ddb5711f1c294 | /src/3-do/Pipeline/Pipeline_for_survival_multiple_genes.R | 73ca1cbafd0e63bba7a6c6e273b485310262e993 | [] | no_license | Leo-1992/multiomics | 8c19d2ef9b8c3f9afa9971dd6e38dc28dc27120a | 949c6e9cedf4a2526a74fa2db02646631b6a3205 | refs/heads/master | 2020-06-13T16:46:50.110842 | 2018-10-07T18:26:23 | 2018-10-07T18:26:23 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 11,722 | r | Pipeline_for_survival_multiple_genes.R | ##########################################################################################################################
# #This pipeline calculates, gene by gene in an independente way, the correlation between survival and expression data.
# Author: Matias
###########################################################################################################################
#This pipeline do 3 steps:
# Step1: It generates the file (For example 1-TCGAExpressionAndOS.csv) joining the expression and followUpData, keeping just the samples that are in both files and that has the follow up data
# Step2: It generates the file (For example 2-TCGACorrelationBetweenExpressionAndSurvivalForEachGene) that shows the correlation between each gen expression and its follow up date. The file will have one row per gen showing the coxph.socre and the coxph.p.value
# Step3: It generates the pdf (For example 3-TCGAKaplanMeierGeneByGene.pdf) with the kaplanMeier+survdiff p.value (showing if the difference between curves is statistical significance) just with the genes with surv.diff p-value lower than a parameter (now it is set to 0.01)
#
#If you have the file with the fowllowing format you should avoid first step.
# -Row 1: Sample labels
# -Row 2: Surival time (it could be for example relapse free survival time or overall suvival)
# -Row 3: Survival event. 0 means the event did not occur. 1 the event occurred.
# -From Row 4: expression data
# -Column 1: Gene symbol (ejemplo: A1BG, A2M) except in row 1, 2 and 3 that are the labels for the rows descripted below.
#
#If you have two different files: one for the expression data and another one with the clinical data, you can use the "generateExpressionAndSurvivalData" function of this package.
#
#This pipeline example uses TCGA data
###################################################################################################################################
##############################################SOURCE###############################################################################
###################################################################################################################################
#source("D:/desarrollo/workspaces/R/multiomics/3-do/API/API_multiomics_for_survival_gene_by_gene.R", echo=FALSE, encoding="Cp1252")
#source("D:/desarrollo/workspaces/R/BioplatUtils/matrix_utils.R", echo=FALSE, encoding="Cp1252")
# path adonde estan los archivos de entrada
sourceBaseLocation="D:\\desarrollo\\workspaces\\R\\multiomics\\"
source(paste(sourceBaseLocation, "src/3-do/API/API_multiomics_for_survival_gene_by_gene.R",sep=""), echo=FALSE, encoding="Cp1252")# path for the input files
source(paste(sourceBaseLocation, "src/survival.utils/matrix_utils.R",sep=""), echo=FALSE, encoding="Cp1252")
source(paste(sourceBaseLocation, "src/survival.utils/expression_grouping_functions.R",sep=""), echo=FALSE, encoding="Cp1252")
source(paste(sourceBaseLocation, "src/survival.utils/survival_utils.R",sep=""), echo=FALSE, encoding="Cp1252")
source(paste(sourceBaseLocation, "src/3-do/Private/multiomics_private_tcga.R",sep=""), echo=FALSE, encoding="Cp1252")
############################################################################################################################################
##########CONFIG: You should change this variable to set your values########################################################################
############################################################################################################################################
#input.path is the folder in which the expression and clinical file are
#example
input.path="D:\\desarrollo\\workspaces\\R\\multiomics\\examples\\survival_gene_by_gene\\input\\"
input.expression.file.name="input1_expressionMatrix"
input.clinical.file.name="input2_clinicalData.tsv"
input.clinical.survival.column.name="OVERALL_SURVIVAL"
input.clinical.event.column.name="overall_survival_indicator"
input.clinical.file.path=paste(input.path, input.clinical.file.name, sep="")
input.expression.file.path=paste(input.path, input.expression.file.name, sep="")
#output.pat is the folder in which all generated files will be placed.
output.path="D:\\desarrollo\\workspaces\\R\\multiomics\\examples\\survival_gene_by_gene\\output\\"
####################################################################################################################################################################################################################################
############################# Step1 - it Generates the file ExpressionWithOS.csv containing the expression and just the overall suvival data. It takes as input the expression file (in TCGA format) and the clincal data (in TCGA format).
####################################################################################################################################################################################################################################
#-----------------Output of this step---------------------------------------------------------------------------------
#
# It generates a file with the following format
# -Row 1: Sample labels
# -Row 2: Surival time (it could be for example relapse free survival time or overall suvival)
# -Row 3: Survival event. 0 means the event did not occur. 1 the event occurred.
# -From Row 4: expression data
# -Column 1: Gene symbol (ejemplo: A1BG, A2M) except in row 1, 2 and 3 that are the labels for the rows descripted below.
#
# All the samples which has NA for the columns event or survival will be eliminated and they will not be i
#
# Example
# sampleID TCGA-A1-A0SB-01 TCGA-A1-A0SD-01 TCGA-A1-A0SE-01
# OS_event_nature2012 259 437 1320
# OS_Time_nature2012 0 0 0
# ARHGEF10L 108.075 91.657 97.179
#
# If you have a file with the described format, you can avoid this step
#-------------------------------------------------------------------------------------------------------------------------------
#---------CONFIG (you dont need to change it)------------------
output.expression.with.survival.file.name="1-TCGAExpressionAndOS.csv"
output.expression.with.survival.path=paste(output.path, output.expression.with.survival.file.name, sep="")
#----------DO IT - it Generates the file ExpressionWithSurvival.csv taking as input the expression file (in TCGA format) and the clincal data (in TCGA format)#########
generateExpressionAndSurvivalDataFromTCGA(input.expression.file.path, input.clinical.file.path, input.clinical.survival.column.name, input.clinical.event.column.name, output.expression.with.survival.path)
##################################################################################################################################################################
############################# END STEP1 #########################################################################################################################
##################################################################################################################################################################
####################################################################################################################################################################################################################################
#############################STEP2 (multiomics.to.spreadsheet)- It calculates, gene by gene, the correlation between its expression and the follow up data. It uses coxph that is why it is not necessary to do clustering.
####################################################################################################################################################################################################################################
#---------GENERAl Config-----------------
#input.expression.with.survival.file.path = output.expression.with.survival.path
expression.grouping.FUN=multiomics.G1PairUpG2PairDown #If ABCC4 and the partner are overexpressed, then the sample is in group1; if not is iin group2.
#---------SpecficConfig-----------------
output.multiomics.to.spreadsheet.file.path<-paste(output.path, "ABCC4.Withpartners.OS.G1UG2D0.7.csv", sep="")
a.maximum.p.value.accepted=1
a.candidate.gene.name="ABCC4"
#partner.genes.of.interest=c("BPESC1","MLLT4","OR7G1","PIGS","PRDM10","PRDM10","SEC63","NDUFS1","PDSS2","TSNAX","VPS41","TAS2R38","GPR107","RAI1","LMTK2","KAL1","KIAA0232","STC1","UBXN7","SRCIN1","LASS6","LOC729082","TAF1L","OR2G2","CMTM4","TMEM106B","SHPRH","RGPD8","PPA2","ZNF611","ZNF616","ASH1L","MYEF2","ATF6","SLC38A2","RAB3GAP1","SDCCAG1","MED17","SYN3","ZNF223","PTPDC1","DLG1","PEX19","ZNF28","C20orf117","CCNT1","EARS2","C18orf25","SLC22A25","C11orf42","AKTIP","UHMK1","AHI1","DYNC1H1","GPR160","OR5T2","AGPAT1","MRE11A","PIP4K2B","BCLAF1","KIAA1826","ZNF322A","ZNF609","COX7B2","DDX52","CDC42BPB","OR11L1","TOR1AIP2","NBAS","LACE1","ZFC3H1","TIRAP","CBX5","MKS1","SNX19","SBNO1","EIF5","ASB4","ASB8","PRPF40B","TMEM184C","REST","MAP3K13","CEACAM6","DHX15","IKBKAP","CNNM4","DIP2B","GRHL2","RBM16","KIAA1731","RPL10L","OR5M11","PDE4A","PHF3")
a.minimium.number.of.samples.in.a.group=10
#---------Do it-----------------
multiomics.to.spreadsheet(input.expression.with.survival.file.path, number.of.clusters=2, output.file.path=output.multiomics.to.spreadsheet.file.path, maximum.p.value.accepted=a.maximum.p.value.accepted, gene.name=a.candidate.gene.name, grouping.FUN=expression.grouping.FUN, print.surv.diff=TRUE, print.concordance.index=TRUE, print.coxph=TRUE,partner.gene.names.to.evaluate=NULL,minimium.number.of.samples.in.a.group=a.minimium.number.of.samples.in.a.group)
####################################################################################################################################################################################################################################
#############################END STEP2 #############################################################################################################################################################################################
####################################################################################################################################################################################################################################
####################################################################################################################################################################################################################################
#############################STEP3 - It calculates the survival gene by gen
####################################################################################################################################################################################################################################
#-----------------Output of this step---------------------------------------------------------------------------------
# It will generate a file "TCGA_kaplanMeier_geneByGene.pdf".
#
# gene: Gene symbol
input.expression.with.survival.file.name="1-TCGAExpressionAndOS.csv"
input.expression.with.survival.path=paste(output.path, input.expression.with.survival.file.name, sep="")
output.kaplan.meier.file.name="3-TCGAKaplanMeierGeneByGene.pdf"
kaplanMeierForAGeneUsingCut2GeneByGene(input.expression.with.survival.path, 2, output.path, output.kaplanmeier.file.name=output.kaplan.meier.file.name, x.lab="Time", y.lab="survival", maximum.p.value.accepted=0.01)
|
a4097d9294a6b13c859a958ccf376a8830953774 | a7747b98c8f7d54c8e3131db624e2d0915e9a496 | /cachematrix.R | 12b512141c52090d9ac3ac606f52df867cc88193 | [] | no_license | manisha1895/ProgrammingAssignment2 | 6095450d6101512b3596babf682722b184311393 | 6d35efd4d9bc8c4291233767fec6c9cd1bcfe923 | refs/heads/master | 2020-12-11T09:04:45.815221 | 2015-10-22T14:48:46 | 2015-10-22T14:48:46 | 44,558,033 | 0 | 0 | null | 2015-10-19T19:20:46 | 2015-10-19T19:20:46 | null | UTF-8 | R | false | false | 870 | r | cachematrix.R | ## My code
makeCacheMatrix <- function(x = matrix()) {
mat_inv <- NULL
set <- function(y){
x <<- y
mat_inv <<- NULL
}
get <- function() x
set_inv <- function(inverse) mat_inv <<- inverse
get_inv <- function() mat_inv
list(set = set , get = get , set_inv = set_inv , get_inv = get_inv)
}
## Following function finds the inverse of the matrix created by makeCacheMatrix() above
## If the inverse already exists , it retrieves from cache.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
mat_inv <- x$get_inv()
if(!is.null(mat_inv)){
message("getting cached matrix inverse")
return mat_inv
}
mat <- x$get()
mat_inv <- solve(mat,...)
x$get_inv(mat_inv)
mat_inv
}
|
6b0ef5b76a0e809be0407f85bdd3ce728fda770c | f7301f8c0520ea8a00c36b1c7d94235a7347e540 | /man/LoggerList.Rd | 6f6f8ab829bd80521ef41478556c95695b958c07 | [
"MIT"
] | permissive | pfistfl/compboost | 685eaf48f0e4a76da8b568606bdc9bdd459ee713 | 1d5f527aca86459b7d05010feeeaba0f0219f8f9 | refs/heads/master | 2020-06-27T18:51:23.375089 | 2019-07-02T12:36:40 | 2019-07-02T12:36:40 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,526 | rd | LoggerList.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/RcppExports.R
\name{LoggerList}
\alias{LoggerList}
\title{Logger list class to collect all loggers}
\format{\code{\link{S4}} object.}
\description{
This class is meant to define all logger which should be used to track the
progress of the algorithm.
}
\section{Usage}{
\preformatted{
LoggerList$new()
}
}
\section{Fields}{
This class doesn't contain public fields.
}
\section{Methods}{
\describe{
\item{\code{clearRegisteredLogger()}}{Removes all registered logger
from the list. The used logger are not deleted, just removed from the
map.}
\item{\code{getNamesOfRegisteredLogger()}}{Returns the registered logger
names as character vector.}
\item{\code{getNumberOfRegisteredLogger()}}{Returns the number of registered
logger as integer.}
\item{\code{printRegisteredLogger()}}{Prints all registered logger.}
\item{\code{registerLogger(logger)}}{Includes a new \code{logger}
into the logger list with the \code{logger_id} as key.}
}
}
\examples{
# Define logger:
log_iters = LoggerIteration$new("iteration", TRUE, 100)
log_time = LoggerTime$new("time", FALSE, 20, "minutes")
# Create logger list:
logger_list = LoggerList$new()
# Register new loggeR:
logger_list$registerLogger(log_iters)
logger_list$registerLogger(log_time)
# Print registered logger:
logger_list$printRegisteredLogger()
# Remove all logger:
logger_list$clearRegisteredLogger()
# Get number of registered logger:
logger_list$getNumberOfRegisteredLogger()
}
|
78cc24e7d96c7d59429636c2fb5e5eea6ae76e02 | 4ecb4c0c7d8f2bdf2304d6fa0726fa1b3e7f18af | /man/getDailyOHLC.Rd | 8085b04f6da33cfb4d301a4f59cb73f4bee9f14b | [] | no_license | hal2001/smarketcrawlR | 7f6ec88eb2f3815d42de8a85e3b766e56c0b0a9e | e6551fd42a35491fbc6530104ce41c25688c2050 | refs/heads/master | 2021-06-17T21:56:37.833874 | 2017-06-15T18:53:15 | 2017-06-15T18:53:15 | 113,870,439 | 0 | 1 | null | 2017-12-11T14:42:40 | 2017-12-11T14:42:40 | null | UTF-8 | R | false | true | 2,722 | rd | getDailyOHLC.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/scrapeData.R
\name{getDailyOHLC}
\alias{getDailyOHLC}
\title{getDailyOHLC}
\usage{
getDailyOHLC(searchDates, isin, input_df)
}
\arguments{
\item{searchDates}{- vector of charachters with different (formats see examples) Date requests: e.g. 20.09.2014-22.09.2014 (normal time period between two dates) // 08.2014 (short for one month) // 2014 (short for one year) // 20.08.2014 (short for one day) // 2013-2014 (time period between two years) // 08.2014-09.2014 (time period between two months)}
\item{isin}{- vector of characters with the id number of the stocks (ISIN = International Stock Identification Number)}
\item{input_df}{- data.frame with dates(as character in format dd.mm.yyyy, e.g. 21.02.2008 (%d.%m.%Y)) and isin(as character, e.g. DE0007037145(RWE AG)). Column names and order --> dates and isin are restricted!}
}
\value{
data.frame : date, isin, open, high, low, close, currency, volume, turnover
}
\description{
This method crawls through the ariva.de website and grabs the daily price data out of the html tables.
You can get OHLC data from specific dates for specific stocks (ISIN required).
Either on putting the different stocks/isins and different dates for prices in a data.frame (input_df). This is good to get different prices on different dates and different stocks.
or by delclaring a vector of dates/intervals and stocks/isins. This is good for comparing the prices on different time periods within a stock or with other stocks.
The date must be in the right format (dd.mm.yyyy) as a character. It is not checked if the date is a correct trading day.
It is also not checked if the date intervals are correct.
Status Quo:
- Only Xetra price data
- price data is cleaned up from stock splits (clean_splits)
The method strictly depends on the website structure of ariva and GET request of the csv file!
}
\examples{
# First example for the first search option (input parameter: only input_df)
#' # Build the input parameter data.frame
dates <- c("25.02.2008","27.02.2008")
isin <- c("DE0007037145","DE0007037145")
input_df <- data.frame(dates, isin)
# Get the prices for RWE AG (DE0007037145) of the 25.02.2008 and 27.02.2008
prices1 <- getDailyOHLC(input_df = input_df)
# Second example for the second search option (input parameter: searchDates and isin)
# Build the input parameters
isin <- c("DE0007037145","DE0007664039")
searchDates <- c("20.09.2014-22.09.2014","20.08.2014","06.05.2014-16.05.2014")
# Get the prices for RWE AG (DE0007037145) and VW VZ (DE0007664039) for different time periods or dates (see searchDates parameter above)
prices2 <- getDailyOHLC(searchDates=searchDates, isin=isin)
}
|
0cc5938e8c8bc5b114c9db825276ad5a572ab9fa | 1db71d2f2d70bf7c38320d7bcc9725fbb59cb064 | /data/studies/steinberg_2016_tem1/standardise_steinberg_2016_tem1.R | 3fb0363c407fa1050aaaae38b8c8ae175632d5c7 | [
"Apache-2.0"
] | permissive | ostrokach/aa_subtypes | fda6346f4244dfb5cb15388f742d63087a29fe40 | 5f8e7e7557436d8798c908e94981aead21243755 | refs/heads/master | 2022-12-01T03:09:55.367152 | 2020-08-10T13:34:26 | 2020-08-10T13:34:26 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 808 | r | standardise_steinberg_2016_tem1.R | #!/usr/bin/env Rscript
# Standardise data from Steinberg & Ostermeier 2016 (TEM1)
source('src/config.R')
source('src/study_standardising.R')
# Import and process data
meta <- read_yaml('data/studies/steinberg_2016_tem1/steinberg_2016_tem1.yaml')
dm_data <- read_xlsx('data/studies/steinberg_2016_tem1/raw/1-s2.0-S0022283616301450-mmc2.xlsx', trim_ws = TRUE) %>%
rename_all(~str_replace_all(str_to_lower(.), ' ', '_')) %>%
drop_na(codon_position) %>%
select(position = codon_position, wt = wt_aa, mut = mutant_aa, raw_score = tem1_amp_fitness) %>%
mutate(transformed_score = log2(raw_score),
score = normalise_score(transformed_score),
class = get_variant_class(wt, mut)) %>%
drop_na(score) # Not all measured
# Save output
standardise_study(dm_data, meta$study, meta$transform)
|
66b2a371e6040051283226714ee4497ec8429b8b | 0500ba15e741ce1c84bfd397f0f3b43af8cb5ffb | /cran/paws.developer.tools/man/codedeploy_get_deployment.Rd | 94e09d4dd4a7802dfb7eb191efb132739cbe956f | [
"Apache-2.0"
] | permissive | paws-r/paws | 196d42a2b9aca0e551a51ea5e6f34daca739591b | a689da2aee079391e100060524f6b973130f4e40 | refs/heads/main | 2023-08-18T00:33:48.538539 | 2023-08-09T09:31:24 | 2023-08-09T09:31:24 | 154,419,943 | 293 | 45 | NOASSERTION | 2023-09-14T15:31:32 | 2018-10-24T01:28:47 | R | UTF-8 | R | false | true | 566 | rd | codedeploy_get_deployment.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/codedeploy_operations.R
\name{codedeploy_get_deployment}
\alias{codedeploy_get_deployment}
\title{Gets information about a deployment}
\usage{
codedeploy_get_deployment(deploymentId)
}
\arguments{
\item{deploymentId}{[required] The unique ID of a deployment associated with the IAM user or Amazon Web
Services account.}
}
\description{
Gets information about a deployment.
See \url{https://www.paws-r-sdk.com/docs/codedeploy_get_deployment/} for full documentation.
}
\keyword{internal}
|
d13a47fd216d4fe7fb8be10c54db422fff9a5ef0 | c53162f4861dffcc985743fa7766d728f66c00ab | /R/Script.R | 2773edae4d749732363b7d303e6d63342914c0e7 | [] | no_license | anampc/coral_antibiotics_physiology | 40d3d90fc335585a4a71666773e93f0e8596f5d1 | 6c84ee4498faf835dbfbf97e564b36754035e443 | refs/heads/master | 2023-03-10T18:48:03.438377 | 2021-02-26T19:24:39 | 2021-02-26T19:24:39 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 11,283 | r | Script.R | ## Load required packages
library(respR)
library(readxl)
## Create function to import these types of file
## (This will be in the next version, out in a week or so)
parse_multiplate <- function(path) {
out <- readxl::read_xlsx(path, skip = 10) # skip top 10 rows
return(out)
}
# Experiment parameters ---------------------------------------------------
# These could all be in a single file of course
## Volume - for conversion later this should be in L
## Corrected for the approx. 40% you said the corals take up
## I'll show you later how to calculate this more precisely
volume <- 0.00065 * 0.40
temperature <- 24.3
salinity <- 35
## these are not quite what you told me, but i worked it out.
exp_wells <- c("A2", "A4", "A5", "A6", "B2", "B5", "C1", "C2", "C4", "D1", "D3", "D5")
ctl_wells <- c("A1", "A3", "B1", "B3", "B4", "B6", "C3", "C5", "C6", "D2", "D4", "D6")
# Import Data -------------------------------------------------------------
## If you have lots of files you only need to change this path and the code
## should work as long as everything else is the same
## Set file path
path <- "~/Documents/Github repositories/respR Example Analyses/Mike Connelly/pocillopora_368_connelly_032619_b_Oxygen.xlsx"
## import
coral <- parse_multiplate(path)
## Convert well names to column index. This just converts the data and ctrl
## columns to an index (i.e. column numbers) which makes selecting the correct
## columns easier
data_cols <- which(names(coral) %in% exp_wells)
ctrl_cols <- which(names(coral) %in% ctl_wells)
## quick check there are no duplicates - should be FALSE
any(data_cols %in% ctrl_cols)
# Background --------------------------------------------------------------
# The calc_rate.bg function is different to the others in that it will accept
# several columns of oxygen, and return an average rate for all of them (as well
# as individually). Using the average may or may bnot be what you want depending
# on your experiment, but I'm going to use it here.
# save bg
bg <- calc_rate.bg(coral, xcol = 2, ycol = ctrl_cols)
# view
print(bg)
plot(bg)
# Usually bg rates are negative, that is the oxygen decreases because
# microorganisms are using it. Here, it's increasing.... I don't know exactly
# what's going on in your experiment. I presume you don't have anything in there
# that will produce oxygen, so it suggests the wells might not be sealed
# correctly. If you are using water that is not 100% saturated and there is a
# leak it will slowly absorb oxygen and % will increase. Either that or the
# probes are drifting for some reason.
## Anyway, you can see the rates are pretty consistent, so presumably the same
## thing is happening in the experimental wells, so we can still use it to
## correct them. But i would run some trials to try and investigate what is
## going on. If this is going on while your corals are respiring you will be
## underestimating their uptake rate.
## Anyway, we have saved the background rate to the `bg` object (it contains
## both the 12 individual bg rates and a mean) and will use that later to
## correct.
# Inspect and save data ---------------------------------------------------
## this will inspect all data columns
inspect(coral, time = 2, oxygen = data_cols)
## However it doesn't let you look at them individually (depending on respR
## version you are using - that will change in next version), so I'm going to
## inspect and save them as separate objects and save them to a list
## empty list to save results to
coral_insp <- list()
for (i in 1:length(data_cols)){
coral_insp[[i]] <- inspect(coral,
time = 2,
oxygen = data_cols[i])
}
## You will see it has plotted each one - too fast for you to see, but you can
## scroll back through plots to view each
## Or use these to look at individual ones
## (double square brackets are for referencing list() elements)
print(coral_insp[[1]])
plot(coral_insp[[1]])
## They all look ok (though see comments at very end). Don't worry about the
## unevenly spaced time warning - it's because you are using decimal minutes.
# Calculate rates ---------------------------------------------------------
## There's a number of different ways you can do this. Depends on your
## experiemnt and what you are looking for. I'll show you a couple.
## First, using calc_rate to use the same time period from each. I picked a time
## after things have appeared to have stabilised a little.
## calc_rate
coral_rates_cr <- list()
for(i in 1: length(data_cols)){
coral_rates_cr[[i]] <- calc_rate(coral_insp[[i]],
from = 500,
to = 1500,
by = "time")
}
## Again, it will go too fast to see all the plots, but you can scroll back through
## them, or use these on the list the calc_rate objects are saved to.
print(coral_rates_cr[[1]])
plot(coral_rates_cr[[1]])
## You can view or extract all rates using this
sapply(coral_rates_cr, function(x) x$rate)
## Seem fairly consistent!
## auto_rate
## Secondly using auto_rate which finds the most linear rate in each (see
## documentation and vignette).
coral_rates_ar <- list()
for(i in 1:length(data_cols)){
coral_rates_ar[[i]] <- auto_rate(coral_insp[[i]])
}
## View all
## Need to add the [1] because auto_rate may identify several linear
## sections, and [1] is the top ranked one (i.e. most linear)
sapply(coral_rates_ar, function(x) x$rate[1])
## Again, you can scroll back through the plots to look at each, or view
## particular ones with print() and plot().
## Which of these methods you use is up to you. It depends on the experiment and
## what you are looking at. For yours, i would be careful with auto_rate. In
## most of your data, you have a clear section up to 500s that looks different.
## My guess is the corals are retracted or less active after the disturbance of
## setting up and starting the experiment, so you don't really want to use these
## sections.
## So there's a good chance auto_rate may select linear regions within these
## regions that you don't want to use. Going back through the auto_rate results,
## you'll see this has happened for at least one -
plot(coral_rates_ar[[10]])
## Not really representative of the experiment!
## However, you can go to the next ranked result using (pos =) to see if
## it fits better. This one is more consistent with the others.
plot(coral_rates_ar[[10]], pos = 2)
print(coral_rates_ar[[10]], pos = 2)
## Another thing you could do is subset out only the regions of data you are
## interested in and run auto_rate on those.
coral_rates_ar_sub <- list()
for(i in 1:length(data_cols)){
subs <- subset_data(coral_insp[[i]], from = 500, to = 1772, by = "time")
coral_rates_ar_sub[[i]] <- auto_rate(subs)
}
## these look a lot better, and rates more consistent
sapply(coral_rates_ar_sub, function(x) x$rate[1])
# Correct for background --------------------------------------------------
## I'll use the calc_rate results and correct them. Remember there are 12
## results saved in a list.
## adjust first one - not saved
adjust_rate(coral_rates_cr[[1]], by = bg)
## This just shows you what's happening. Because your bg rate is positive the
## specimen rate actually gets corrected upwards. i.e. get bigger (more
## negative). By default this uses the mean bg rate of all 12 controls, but you
## can enter a different one or any value.
adjust_rate(coral_rates_cr[[1]], by = bg$bgrate[1])
adjust_rate(coral_rates_cr[[1]], by = 0.0005)
## Can adjust them all like this
coral_rates_cr_adj <- list()
for(i in 1:length(data_cols)){
coral_rates_cr_adj[[i]] <- adjust_rate(coral_rates_cr[[i]], by = bg)
}
## new adjusted rates - make sure you extract the $corrected rate
sapply(coral_rates_cr_adj, function(x) x$corrected)
# Convert -----------------------------------------------------------------
## Ok, last thing is to convert the rates to units.
## One to show you what's happening
convert_rate(coral_rates_cr_adj[[1]],
o2.unit = "%",
time.unit = "min",
volume = volume,
output.unit = "mg/h",
t = temperature, # set at start
S = salinity) # set at start
## All
coral_rates_cr_adj_conv <- list()
for(i in 1:length(data_cols)){
coral_rates_cr_adj_conv[[i]] <- convert_rate(coral_rates_cr_adj[[i]],
o2.unit = "%",
time.unit = "min",
volume = volume,
output.unit = "mg/h",
t = temperature, # set at start
S = salinity)
}
# view one
print(coral_rates_cr_adj_conv[[4]])
# extract all
final_rates <- sapply(coral_rates_cr_adj_conv, function(x) x$output)
## these are your final rates in mg/h
final_rates
# Comments ----------------------------------------------------------------
## I know this is just a pilot experiment, but one thing with your data that
## stands out is a lot of the curves are getting steeper as the experiment goes
## on. That is they haven't levelled out to a consistent slope. You can see this
## in the bottom panel of the inspect() plots - these show the rate over the
## experiment and in nearly all it is constantly rising. It should level off at
## some point if you are looking for a standard or routine metabolic rate.
## This suggests to me your experiments are not long enough to reach a steady
## state. In other words the corals are using up the available O2 too quickly.
## At 25 mins or so, these experiments are really brief! I know you are in
## tropical temperatures, but my own experiments on inverts generally last
## several hours. The only thing you can do if this is the case (given your
## respirometry system) is try to reduce the size of your coral fragments. That
## means they will take up less proportional volume. This is also good thing -
## generally you want a much lower specimen:chamber volume ratio than what you
## have at 40%. It's slightly different for mobile inverts, but i generally go
## for the specimen taking up around 10% of the volume, or at most 20% or so. It
## just gives the animal more time to become accustomed to the chamber, handling
## etc. and so revert eventually to routine behaviour. Alternatively you could
## have them in the chambers for a longer time but open within a tank of
## water, and only seal them at the last minute.
## Basically you need to do a lot of trials. What you want is an inspect() plot
## where the rolling rate eventually flattens out.
## For converting mass and density of the corals to volume, and calculating the
## effective volume see my other R package
## https://github.com/nicholascarey/respfun
|
975be4f2d30ea818881e9541edc1291907a031de | 1a372979ef9fd1e1c0f238b3d7e562985d4144a1 | /importCropBeeData.R | 46f43f2b1668af63268bf411289704b09f7287fc | [] | no_license | YDAckerman/EPA-Bee | 9d6b4981c61b5be1ab59a40f1aabda2a6b6ed41d | 7e3309e51064d22e0b28c938e4141ac2dab95b2a | refs/heads/master | 2021-05-04T10:45:16.429799 | 2018-01-19T17:39:07 | 2018-01-19T17:39:07 | 43,094,241 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,569 | r | importCropBeeData.R | ## set wd, load libs, source helpers, read data, and define input files
setwd("/Users/Yoni/Documents/ZhangLab/BeePUR/cropBeeData/")
## import libraries
library(xlsx)
library(plyr)
library(dplyr)
source("~/Documents/ZhangLab/R/helper_functions.R")
## get all files to import
files <- list.files(getwd(), full.names = TRUE)
## read each xlsx file and merge into one df
cropAttr <- ldply(files, function(file){
d <- read.xlsx(file, stringsAsFactors = FALSE,
sheetIndex = 1)
colnames(d)[2:4] <- c("Grp_Description", "HB.Pol", "HB.Nec")
d %>% select(Crop, HB.Pol, HB.Nec, Grp_Description)
})
## spot corrections
cropAttr$Crop <- gsub("\n", " ", cropAttr$Crop)
cropAttr$Crop <- hf$removeParens(cropAttr$Crop)
## whoa spot corrections. easy, now.
cropAttr[, c("HB.Pol", "HB.Nec")] <-
llply(cropAttr[, c("HB.Pol", "HB.Nec")], function(y){
unlist(llply(y, function(x){
x <- gsub("[0-9]", "", gsub("\n", "", x))
ifelse(hf$mgrepl(c("\\+", "\\-"), x), x, NA)
}))})
## change tomatoes to tomAhtoes:
cropAttr$Crop <- gsub("Tomatoes", "Tomato", cropAttr$Crop)
cropAttr$Crop <- gsub("Cherries", "Cherry", cropAttr$Crop)
cropAttr$Crop <- gsub("Blueberries", "Blueberry", cropAttr$Crop)
cropAttr$Crop <- gsub("Lemons/ limes", "Lemons", cropAttr$Crop)
cropAttr$Crop <- gsub("Strawberries", "Strawberry", cropAttr$Crop)
cropAttr$Crop <- gsub("Watermelon s", "Watermelons", cropAttr$Crop)
cropAttr$Crop <- gsub("Chick peas", "Garbanzos", cropAttr$Crop)
save(cropAttr, file = "~/Documents/ZhangLab/BeePUR/cropBeeAttr.rda")
|
39246d8c64f2665fa3c5e41ac770403c4bac4f37 | c8abc62c98b7d3fae2916baaf998e4ac98e31ade | /GEO/src/eset2help.R | 6649250d859c18b2e8030622f32902024ad634a8 | [
"MIT"
] | permissive | biobakery/arepa | 5ad5ec3062b43350630a1875b8103cb1820ce5f0 | bbd1586b4e73e17ee99feb8e6efcfab8b216bc8b | refs/heads/master | 2022-07-29T22:11:43.877504 | 2020-05-19T18:57:24 | 2020-05-19T18:57:24 | 265,335,461 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,955 | r | eset2help.R | ## ARepA: Automated Repository Acquisition
##
## ARepA is licensed under the MIT license.
##
## Copyright (C) 2013 Yo Sup Moon, Daniela Boernigen, Levi Waldron, Eric Franzosa, Xochitl Morgan, and Curtis Huttenhower
##
## Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation
## files (the "Software"), to deal in the Software without restriction, including without limitation the rights to
## use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons
## to whom the Software is furnished to do so, subject to the following conditions:
##
## The above copyright notice and this permission notice shall be included in all copies or
## substantial portions of the Software.
##
## THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
## INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
## IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
## WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
## OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
##R CMD BATCH --vanilla "--args GSE29221.RData GSE29221.Rd" esetToHelp.R
## Look here for an example of the directory structure (on hutlab4):
## /home/lwaldron/hg/curatedovariandata/curation/ovarian/curatedOvarianData
## ignore inst/ tests/ and CHANGELOG. You need man/, data/, NAMESPACE (just copy),
## DESCRIPTION:
## Suggests: survival
## Don't need URL, biocViews, License
## Will also need an equivalent of man/curatedOvarianData-package.Rd. Could be a template for now.
inputargs <- commandArgs(TRUE)
print(inputargs)
file.in <- inputargs[1]
file.out <- inputargs[2]
library(affy)
##file.in <- "GDS104-GPL67.RData"
##file.out <- "GDS104-GPL67.Rd"
object.name <- sub(".Rd", "", file.out, fixed=TRUE)
object.name <- make.names(basename(object.name))
object.name <- paste(object.name, "_eset", sep="")
getEset <- function(file.in=file.in){
library(affy)
load(file.in)
obj.classes <- sapply(ls(), function(x) class(get(x)))
obj.wanted <- names(obj.classes)[obj.classes == "ExpressionSet"][1]
get(obj.wanted)
}
##assign(object.name, getEset(file.in))
eset <- getEset(file.in)
pdata.nonblank <- pData(eset)
pdata.nonblank <- pdata.nonblank[,apply(pdata.nonblank,2,function(x) sum(!is.na(x)) > 0)]
sink(file=file.out)
writeLines( paste("\\title{", object.name, "}", sep="") )
writeLines( paste("\\name{", object.name, "}", sep="") )
writeLines( paste("\\alias{", object.name, "}", sep="") )
writeLines( "\\docType{data}")
writeLines( "" )
accessionID <- sub(".RData", "", file.in)
writeLines("\\description{")
writeLines(paste("assayData:", nrow(eset),"features,",ncol(eset),"samples"))
writeLines("")
writeLines(paste("First 6 feature names:", paste(head(featureNames(eset)), collapse=" ")))
writeLines("")
writeLines(paste("First 6 sample names:", paste(head(sampleNames(eset)), collapse=" ")))
writeLines("")
writeLines(paste("Annotation:", eset@annotation))
writeLines("")
writeLines( "--------------------------- ")
writeLines( "Study-level meta-data: ")
writeLines( "--------------------------- ")
writeLines("")
print( experimentData(eset) )
writeLines("")
writeLines( "--------------------------- ")
writeLines( "Sample-level meta-data: ")
writeLines( "--------------------------- ")
writeLines( "")
if (class(pdata.nonblank) == "data.frame"){
for (iCol in 1:ncol(pdata.nonblank)){
if(length(unique(pdata.nonblank[,iCol])) < 6)
pdata.nonblank[,iCol] <- factor(pdata.nonblank[,iCol])
writeLines(paste(colnames(pdata.nonblank)[iCol],": ",sep=""))
print(summary(pdata.nonblank[,iCol]))
writeLines("")
}
}
writeLines("")
writeLines("}")
writeLines("")
sink(NULL)
|
06dc34008881f1a4825215c42b91201744c633c1 | 4225c0ff2720afcb3dfe4bd170229597a81076c7 | /RIsk Parity Port by A-F-P (2012).R | 3d5b562247722339bfda677d5363dec47e3738dd | [] | no_license | big4quants/QAM | fa25f73e9e3ea1f79dbe08a305ef2b5db2578b35 | bd6d70d85be945b119bc8b78816b5f77b96683b6 | refs/heads/master | 2021-01-26T07:33:28.477133 | 2020-02-26T21:42:47 | 2020-02-26T21:42:47 | 243,367,082 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 11,450 | r | RIsk Parity Port by A-F-P (2012).R |
# 1. Construct the equal-weighted bond market return, value-weighted bond market return, and lagged total bond market capitalization using CRSP Bond data 1. Your output should be from January 1926 to December 2018, at a monthly frequency.
library(foreign)
library(data.table)
library(ggplot2)
library(lubridate)
library(moments)
library(readr)
library(zoo)
Monthly_CRSP_Stocks <- read_csv("C:/Users/mulin/Desktop/MFE Spring 2019/Quant Asset Management/PS1/Market data.csv")
Monthly_CRSP_Bonds <- read_csv("C:/Users/mulin/Desktop/MFE Spring 2019/Quant Asset Management/PS2/bond data.csv",col_types = cols(TMTOTOUT = col_integer()))
PS2_Q1 <- function(universe){
#universe <- Monthly_CRSP_Bonds
universe <- as.data.table(universe) # just to make sure
# First step clean
universe$MCALDT <- as.Date(universe$MCALDT,"%m/%d/%Y")
setkey(universe, MCALDT)
universe[,Year:= year(MCALDT)] #year column
universe[,Month:= month(MCALDT)] #month column
universe[, TMRETNUA := as.numeric(as.character(TMRETNUA))]
universe <- universe[!TMRETNUA %in% c(-99)] # to get rid of -99
universe[, TMTOTOUT := as.numeric(as.character(TMTOTOUT))]
# market value = TMTOTOUT
universe[, MV := TMTOTOUT]
universe[, MV_lag := shift(MV), by=c("KYCRSPID")]
# second step clean, remove NAs
universe <- universe[TMRETNUA != "NA"]
universe <- universe[MV_lag != "NA"]
# value weight portfolio
Bond_Vw_ret <- universe[,list(vwret = weighted.mean(TMRETNUA, MV_lag, na.rm = TRUE)), by=list(Year, Month)]
# equal weight portfolio
Bond_Ew_ret <- universe[,list(ewret = mean(TMRETNUA)),by=list(Year, Month)]
# Total market value of the previous month
Bond_lag_MV <- universe[,list(MV_lag = sum(MV_lag)), by=list(Year, Month)]
output <- cbind(Bond_Vw_ret, Bond_Ew_ret[,3],Bond_lag_MV[,3])
return(output)
}
PS2_1ans <- PS2_Q1(Monthly_CRSP_Bonds)
# 2. Aggregate stock, bond, and riskless datatables. For each year-month, calculate the lagged market value and excess value-weighted returns for both stocks and bonds. Your output should be from January 1926 to December 2018, at a monthly frequency.
## direct copy from PS_1
PS1_Q1 <- function(universe){
universe <- as.data.table(universe) # just to make sure
universe <- universe[(SHRCD %in% c(10, 11)) & (EXCHCD %in% c(1,2,3)),]
universe[, date:= mdy(date)]
setkey(universe, date)
universe[,Year:= year(date)] #year column
universe[,Month:= month(date)] #month column
universe <- universe[!RET %in% c(-99,-88,-77,-66,-55,-44)]
universe <- universe[!DLRET %in% c(-99,-88,-77,-66,-55,-44)]
universe[, RET := as.numeric(as.character(RET))]
universe[, DLRET := as.numeric(as.character(DLRET))]
universe[, PRC := as.numeric(as.character(PRC))]
universe[, SHROUT := as.numeric(as.character(SHROUT))]
universe[, `:=`(cumDivRet, ifelse(is.na(DLRET),RET,ifelse(is.na(RET), DLRET, (1+DLRET)*(1+RET)-1 )))]
universe[, `:=`(ME, abs(PRC * SHROUT * 1000))]
universe[, ME_lag := shift(ME), by=c("PERMNO")]
uuniverse <- universe[PRC != "NA"]
universe <- universe[RET != "NA"]
universe <- universe[cumDivRet != "NA"]
universe <- universe[ME_lag != "NA"]
value_weight <- universe[,list(vwret = weighted.mean(cumDivRet, ME_lag, na.rm = TRUE)), by=list(Year, Month)]
equal_weight <- universe[,list(ewret = mean(cumDivRet)),by=list(Year, Month)]
mkt_cap <- universe[,list(ME_lag = sum(ME_lag)/1000000), by=list(Year, Month)]
mkt_ret <- cbind(value_weight, equal_weight[,3],mkt_cap[,3])
return(mkt_ret)
}
PS1_1ans <- PS1_Q1(Monthly_CRSP_Stocks)
##############
Monthly_CRSP_Riskless <- read_csv("C:/Users/mulin/Desktop/MFE Spring 2019/Quant Asset Management/PS2/Risk free data.csv")
PS2_Q2 <- function(PS1_1ans, PS2_1ans, Monthly_CRSP_Riskless){
rf <- as.data.table(Monthly_CRSP_Riskless)
rf[,caldt := ymd(caldt)]
rf[,Year := year(caldt)]
rf[,Month := month(caldt)]
rf_table <- cbind(rf$Year,rf$Month,rf$t30ret) # 30-day rf as reference for risk free rate
rf_table <- rf_table[-1,]
Stock_Excess_Vw_Ret <- PS1_1ans[,3] - rf_table[,3]
Bond_Excess_Vw_Ret <- PS2_1ans[,3] - rf_table[,3]
result <- cbind(PS2_1ans[,c(1,2,5)],Bond_Excess_Vw_Ret, PS1_1ans[,5], Stock_Excess_Vw_Ret)
colnames(result) <- c("Year", "Month", "Bond_lag_MV", "Bond_Excess_Vw_Ret", "Stock_lag_MV", "Stock_Excess_Vw_Ret")
return(result)
}
PS2_2ans <- PS2_Q2(PS1_1ans, PS2_1ans, Monthly_CRSP_Riskless)
# 3. Calculate the monthly unlevered and levered risk-parity portfolio returns as defined by Asness, Frazzini, and Pedersen (2012). For the levered risk-parity portfolio, match the value-weighted portfolio's over the longest matched holding period of both. Your output should be from January 1926 to December 2018, at a monthly frequency.
PS2_Q3 <- function(Monthly_CRSP_Universe){
# 60-40 portfolio return
Monthly_CRSP_Universe[,Excess_60_40_Ret := 0.6 * Stock_Excess_Vw_Ret + 0.4 * Bond_Excess_Vw_Ret]
# vol as estimated by using three-year monthly excess returns up to month (t-1)
Monthly_CRSP_Universe[,Stock_inverse_sigma_hat := shift(1/rollapply(Stock_Excess_Vw_Ret,36, sd, fill=NA, align="right"))]
Monthly_CRSP_Universe[,Bond_inverse_sigma_hat := shift(1/rollapply(Bond_Excess_Vw_Ret,36, sd, fill=NA, align="right"))]
# unlevered RP where k = 1/sum of inverse of combined sigma
Monthly_CRSP_Universe[,Unlevered_k := 1/(Stock_inverse_sigma_hat + Bond_inverse_sigma_hat)]
# weight as defined as k * inverse of sigma
Monthly_CRSP_Universe[,Excess_Unlevered_RP_Ret := (Unlevered_k * Stock_inverse_sigma_hat * Stock_Excess_Vw_Ret
+ Unlevered_k * Bond_inverse_sigma_hat * Bond_Excess_Vw_Ret)]
# to construct value weighted portfolio with stocks and bonds
Monthly_CRSP_Universe[,stock_Vw_Weight := Stock_lag_MV/(Stock_lag_MV + Bond_lag_MV)]
Monthly_CRSP_Universe[,Excess_Vw_Ret := stock_Vw_Weight * Stock_Excess_Vw_Ret + (1 - stock_Vw_Weight) * Bond_Excess_Vw_Ret]
# to get the constant levered K
vol_levered_port <- sd(Monthly_CRSP_Universe$Stock_inverse_sigma_hat * Monthly_CRSP_Universe$Stock_Excess_Vw_Ret
+ Monthly_CRSP_Universe$Bond_inverse_sigma_hat * Monthly_CRSP_Universe$Bond_Excess_Vw_Ret, na.rm = TRUE)
Monthly_CRSP_Universe[,Levered_k := sd(Monthly_CRSP_Universe$Excess_Vw_Ret)/vol_levered_port]
# same as above for unlevered port
Monthly_CRSP_Universe[,Excess_Levered_RP_Ret := Levered_k * Stock_inverse_sigma_hat * Stock_Excess_Vw_Ret
+ Levered_k * Bond_inverse_sigma_hat * Bond_Excess_Vw_Ret]
result <- Monthly_CRSP_Universe[,.(Year, Month, Stock_Excess_Vw_Ret, Bond_Excess_Vw_Ret, Excess_Vw_Ret, Excess_60_40_Ret,
Stock_inverse_sigma_hat, Bond_inverse_sigma_hat, Unlevered_k,
Excess_Unlevered_RP_Ret, Levered_k, Excess_Levered_RP_Ret)]
result <- result[-(1:36),]
return(result)
}
PS2_3ans <- PS2_Q3(PS2_2ans)
# 4. Replicate and report Panel A of Table 2 in Asness, Frazzini, and Pedersen (2012), except for Alpha and t-stat of Alpha columns. Specifically, for all strategies considered, report the annualized average excess returns, t-statistic of the average excess returns, annualized volatility, annualized Sharpe Ratio, skewness, and excess kurtosis. Your sample should be from January 1930 to June 2010, at monthly frequency. Match the format of the table to the extent possible. Discuss the difference between your table and the table reported in the paper. It is zero? If not, justify whether the difference is economically negligible or not. What are the reasons a nonzero difference?
PS2_Q4 <- function(Port_Rets){
# #January 1930 to June 2010
Port_Rets[, date:= ymd(paste0(Year,'/',Month,'/01'))] #combined Year and Month
Port_Rets <- Port_Rets[date %between% c("1930-01-01", "2010-06-01")]
# CRSP_stock
crsp_stock <- c(0)
crsp_stock[1] <- mean(Port_Rets$Stock_Excess_Vw_Ret, na.rm = TRUE) * 12 *100 #add %
crsp_stock[2] <- t.test(Port_Rets$Stock_Excess_Vw_Ret,na.rm = TRUE)[1] # t-stats
crsp_stock[3] <- sd(Port_Rets$Stock_Excess_Vw_Ret, na.rm = TRUE) * sqrt(12)*100
crsp_stock[4] <- crsp_stock[[1]]/crsp_stock[[3]]
crsp_stock[5] <- skewness(Port_Rets$Stock_Excess_Vw_Ret,na.rm = TRUE)
crsp_stock[6] <- kurtosis(Port_Rets$Stock_Excess_Vw_Ret,na.rm = TRUE) -3
# CRSP_bonds
crsp_bonds <- c()
crsp_bonds[1] <-mean(Port_Rets$Bond_Excess_Vw_Ret, na.rm = TRUE) * 12 *100
crsp_bonds[2] <- t.test(Port_Rets$Bond_Excess_Vw_Ret,na.rm = TRUE)[1]
crsp_bonds[3] <- sd(Port_Rets$Bond_Excess_Vw_Ret, na.rm = TRUE) * sqrt(12)*100
crsp_bonds[4] <- crsp_bonds[[1]]/crsp_bonds[[3]]
crsp_bonds[5] <- skewness(Port_Rets$Bond_Excess_Vw_Ret,na.rm = TRUE)
crsp_bonds[6] <- kurtosis(Port_Rets$Bond_Excess_Vw_Ret,na.rm = TRUE) -3
# Value weight return
VW_Portfolio <- c()
VW_Portfolio[1] <-mean(Port_Rets$Excess_Vw_Ret, na.rm = TRUE) * 12 *100
VW_Portfolio[2] <- t.test(Port_Rets$Excess_Vw_Ret,na.rm = TRUE)[1]
VW_Portfolio[3] <- sd(Port_Rets$Excess_Vw_Ret, na.rm = TRUE) * sqrt(12)*100
VW_Portfolio[4] <- VW_Portfolio[[1]]/VW_Portfolio[[3]]
VW_Portfolio[5] <- skewness(Port_Rets$Excess_Vw_Ret,na.rm = TRUE)
VW_Portfolio[6] <- kurtosis(Port_Rets$Excess_Vw_Ret,na.rm = TRUE) -3
# 60/40 portfolio
Excess_60_40_Portfolio <- c()
Excess_60_40_Portfolio[1] <-mean(Port_Rets$Excess_60_40_Ret, na.rm = TRUE) * 12 *100
Excess_60_40_Portfolio[2] <- t.test(Port_Rets$Excess_60_40_Ret,na.rm = TRUE)[1]
Excess_60_40_Portfolio[3] <- sd(Port_Rets$Excess_60_40_Ret, na.rm = TRUE) * sqrt(12)*100
Excess_60_40_Portfolio[4] <- Excess_60_40_Portfolio[[1]]/Excess_60_40_Portfolio[[3]]
Excess_60_40_Portfolio[5] <- skewness(Port_Rets$Excess_60_40_Ret,na.rm = TRUE)
Excess_60_40_Portfolio[6] <- kurtosis(Port_Rets$Excess_60_40_Ret,na.rm = TRUE) -3
# unlevered RP
Unlevered_RP_Portfolio <- c()
Unlevered_RP_Portfolio[1] <-mean(Port_Rets$Excess_Unlevered_RP_Ret, na.rm = TRUE) * 12 *100
Unlevered_RP_Portfolio[2] <- t.test(Port_Rets$Excess_Unlevered_RP_Ret,na.rm = TRUE)[1]
Unlevered_RP_Portfolio[3] <- sd(Port_Rets$Excess_Unlevered_RP_Ret, na.rm = TRUE) * sqrt(12)*100
Unlevered_RP_Portfolio[4] <- Unlevered_RP_Portfolio[[1]]/Unlevered_RP_Portfolio[[3]]
Unlevered_RP_Portfolio[5] <- skewness(Port_Rets$Excess_Unlevered_RP_Ret,na.rm = TRUE)
Unlevered_RP_Portfolio[6] <- kurtosis(Port_Rets$Excess_Unlevered_RP_Ret,na.rm = TRUE) -3
# levered RP
Levered_RP_Portfolio <- c()
Levered_RP_Portfolio[1] <-mean(Port_Rets$Excess_Levered_RP_Ret, na.rm = TRUE) * 12 *100
Levered_RP_Portfolio[2] <- t.test(Port_Rets$Excess_Levered_RP_Ret,na.rm = TRUE)[1]
Levered_RP_Portfolio[3] <- sd(Port_Rets$Excess_Levered_RP_Ret, na.rm = TRUE) * sqrt(12)*100
Levered_RP_Portfolio[4] <- Levered_RP_Portfolio[[1]]/Levered_RP_Portfolio[[3]]
Levered_RP_Portfolio[5] <- skewness(Port_Rets$Excess_Levered_RP_Ret,na.rm = TRUE)
Levered_RP_Portfolio[6] <- kurtosis(Port_Rets$Excess_Levered_RP_Ret,na.rm = TRUE) -3
result <- cbind(crsp_stock, crsp_bonds, VW_Portfolio, Excess_60_40_Portfolio,
Unlevered_RP_Portfolio, Levered_RP_Portfolio)
result <- t(result)
colnames(result) <- c("Annualized Mean %","t-stat of Annualized Mean","Annualized Standard Deviation %",
"Annualized Sharpe Ratio","Skewness","Excess Kurtosis")
return(result)
}
PS2_4ans <- PS2_Q4(PS2_3ans)
|
423a5fe378c2eeec2df923d4a5cf67bce6f99639 | 72d9009d19e92b721d5cc0e8f8045e1145921130 | /StepSignalMargiLike/man/prior.norm.A.Rd | 57639dcf4d57fad71f66b2411ff0acf0767a1759 | [] | no_license | akhikolla/TestedPackages-NoIssues | be46c49c0836b3f0cf60e247087089868adf7a62 | eb8d498cc132def615c090941bc172e17fdce267 | refs/heads/master | 2023-03-01T09:10:17.227119 | 2021-01-25T19:44:44 | 2021-01-25T19:44:44 | 332,027,727 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,222 | rd | prior.norm.A.Rd | \name{prior.norm.A}
\alias{prior.norm.A}
\title{prior.norm.A}
\usage{
prior.norm.A(data.x)
}
\arguments{
\item{data.x}{Observed data in vector form where each element represents a single observation.}
}
\value{
Vector for prior parameters in the order of (\eqn{\mu0,
\kappa0, \nu0, \sigma0^2})
}
\description{
This function computes the Norm-A prior proposed in Du, Kao
and Kou (2015), which is used under conjugate normal
assumption. The variance \eqn{\sigma^2} is assumed to be
drawn from an inverse Gamma distribution with shape
parameter \eqn{\nu0} and scale parameter \eqn{\sigma0^2},
while mean is assumed to be drawn from a normal
distribution with mean \eqn{\mu0} and variance
\eqn{\sigma^2/\kappa0}.
}
\details{
See Manual.pdf in "data" folder.
}
\examples{
library(StepSignalMargiLike)
n <- 5
data.x <- rnorm(n, 1, 1)
data.x <- c(data.x, rnorm(n, 10,1))
data.x <- c(data.x, rnorm(n, 2,1))
data.x <- c(data.x, rnorm(n, 10,1))
data.x <- c(data.x, rnorm(n, 1,1))
prior.norm.A(data.x)
}
\references{
Chao Du, Chu-Lan Michael Kao and S. C. Kou (2015),
"Stepwise Signal Extraction via Marginal Likelihood".
Forthcoming in Journal of American Statistical Association.
}
|
8c6a2804bb481a0c5158b2820ebb0056a5d93633 | 387cef68bd1759aa02bffe9c097d45787e862106 | /tests/testthat/test-mHMM.R | 6910feb595bf23eb486e0aacb0afcfb840561acb | [] | no_license | emmekeaarts/mHMMbayes | 85c1f2cbe7c34e94f6b9c463f673007db5d575a2 | 0222eb41d7e143eae02a33199c93364fabd07b13 | refs/heads/master | 2023-08-31T01:12:49.606904 | 2023-08-14T12:01:50 | 2023-08-14T12:01:50 | 167,544,703 | 11 | 10 | null | 2023-07-25T22:37:58 | 2019-01-25T12:33:24 | R | UTF-8 | R | false | false | 14,037 | r | test-mHMM.R | context("function mHMM and S3 print and summary methods")
################
### CREATE OUTPUT OBJECTS TO TEST
################
## general properties tested model
n_t <- 100
n <- 10
m <- 3
J = 11
burn_in = 5
n_dep <- 2
q_emiss <- c(4,2)
gamma <- matrix(c(0.8, 0.1, 0.1,
0.2, 0.6, 0.2,
0.1, 0.2, 0.7), ncol = m, byrow = TRUE)
emiss_distr <- list(matrix(c(0.45, 0.45, 0.05, 0.05,
0.1, 0.05, 0.8, 0.05,
0.1, 0.1, 0.2, 0.6), nrow = m, ncol = q_emiss[1], byrow = TRUE),
matrix(c(0.7, 0.3,
0.9, 0.1,
0.8, 0.2), nrow = m, ncol = q_emiss[2], byrow = TRUE)
)
set.seed(4231)
data_sim <- sim_mHMM(n_t = n_t, n = n, gen = list(m = m, n_dep = n_dep, q_emiss = q_emiss), gamma = gamma,
emiss_distr = emiss_distr, var_gamma = .5, var_emiss = c(.5, 0.5))
colnames(data_sim$obs) <- c("subj", "output_1", "output_2")
# Fit the mHMM on 2 dep variable data
set.seed(3523)
out_2st_simb <- mHMM(s_data = data_sim$obs,
gen = list(m = m, n_dep = n_dep, q_emiss = q_emiss),
start_val = c(list(gamma), emiss_distr),
mcmc = list(J = J, burn_in = burn_in), show_progress = FALSE)
##### Create model with covariates
xx <- rep(list(matrix(1, ncol = 1, nrow = n)), (n_dep + 1))
for(i in 2:(n_dep + 1)){
xx[[i]] <- cbind(xx[[i]], nonverbal_cov$std_CDI_change)
}
set.seed(3523)
out_2st_sim_cov1 <- mHMM(s_data = data_sim$obs, xx = xx,
gen = list(m = m, n_dep = n_dep, q_emiss = q_emiss),
start_val = c(list(gamma), emiss_distr),
mcmc = list(J = J, burn_in = burn_in), show_progress = FALSE)
xx_gamma_only <- rep(list(matrix(1, ncol = 1, nrow = n)), (n_dep + 1))
xx_gamma_only[[1]] <- cbind(xx_gamma_only[[i]], nonverbal_cov$std_CDI_change)
set.seed(3523)
out_2st_sim_cov2 <- mHMM(s_data = data_sim$obs, xx = xx_gamma_only,
gen = list(m = m, n_dep = n_dep, q_emiss = q_emiss),
start_val = c(list(gamma), emiss_distr),
mcmc = list(J = J, burn_in = burn_in), show_progress = FALSE)
xx_gamma_only_V2 <- rep(list(NULL), (n_dep + 1))
xx_gamma_only_V2[[1]] <- cbind(rep(1,n), nonverbal_cov$std_CDI_change)
set.seed(3523)
out_2st_sim_cov3 <- mHMM(s_data = data_sim$obs, xx = xx_gamma_only_V2,
gen = list(m = m, n_dep = n_dep, q_emiss = q_emiss),
start_val = c(list(gamma), emiss_distr),
mcmc = list(J = J, burn_in = burn_in), show_progress = FALSE)
xx_dep1_only <- rep(list(matrix(1, ncol = 1, nrow = n)), (n_dep + 1))
xx_dep1_only[[2]] <- cbind(xx_dep1_only[[i]], nonverbal_cov$std_CDI_change)
set.seed(3523)
out_2st_sim_cov4 <- mHMM(s_data = data_sim$obs, xx = xx_dep1_only,
gen = list(m = m, n_dep = n_dep, q_emiss = q_emiss),
start_val = c(list(gamma), emiss_distr),
mcmc = list(J = J, burn_in = burn_in), show_progress = FALSE)
xx_wrong1 <- rep(list(NULL), (n_dep + 1))
for(i in 2:(n_dep + 1)){
xx_wrong1[[i]] <- matrix(nonverbal_cov$std_CDI_change, ncol = 1)
}
xx_wrong2 <- rep(list(NULL), (n_dep + 1))
xx_wrong2[[1]] <- matrix(nonverbal_cov$std_CDI_change, ncol = 1)
xx_wrong3 <- list(cbind(rep(1,n), nonverbal_cov$std_CDI_change))
xx_wrong4 <- cbind(rep(1,n), nonverbal_cov$std_CDI_change)
xx_wrong5 <- rep(list(NULL), (n_dep + 1))
xx_wrong5[[1]] <- cbind(rep(1,n), c(rep(2,n/2), rep(1, n/2)))
xx_wrong6 <- rep(list(NULL), (n_dep + 1))
xx_wrong6[[1]] <- data.frame(rep(1,n), as.factor(c(rep(2,n/2), rep(1, n/2))))
####################
## TESTING
###############
test_that("errors mHMM input", {
expect_error(mHMM(s_data = data_sim$obs,
gen = list(m = m, n_dep = n_dep, q_emiss = q_emiss),
start_val = list(gamma, list(emiss_distr)),
mcmc = list(J = J, burn_in = burn_in), show_progress = FALSE),
"number of elements in the list start_val")
expect_error(mHMM(s_data = data.frame(data_sim$obs[,1], as.factor(data_sim$obs[,2]), data_sim$obs[,3]),
gen = list(m = m, n_dep = n_dep, q_emiss = q_emiss),
start_val = c(list(gamma), emiss_distr),
mcmc = list(J = J, burn_in = burn_in), show_progress = FALSE),
"factorial variables")
expect_error(mHMM(s_data = data_sim$obs, xx = xx_wrong1,
gen = list(m = m, n_dep = n_dep, q_emiss = q_emiss),
start_val = c(list(gamma), emiss_distr),
mcmc = list(J = J, burn_in = burn_in), show_progress = FALSE),
"first column in each element of xx has to represent the intercept")
expect_error(mHMM(s_data = data_sim$obs, xx = xx_wrong2,
gen = list(m = m, n_dep = n_dep, q_emiss = q_emiss),
start_val = c(list(gamma), emiss_distr),
mcmc = list(J = J, burn_in = burn_in), show_progress = FALSE),
"first column in each element of xx has to represent the intercept")
expect_error(mHMM(s_data = data_sim$obs, xx = xx_wrong3,
gen = list(m = m, n_dep = n_dep, q_emiss = q_emiss),
start_val = c(list(gamma), emiss_distr),
mcmc = list(J = J, burn_in = burn_in), show_progress = FALSE),
"xx should be a list, with the number of elements")
expect_error(mHMM(s_data = data_sim$obs, xx = xx_wrong4,
gen = list(m = m, n_dep = n_dep, q_emiss = q_emiss),
start_val = c(list(gamma), emiss_distr),
mcmc = list(J = J, burn_in = burn_in), show_progress = FALSE),
"xx should be a list, with the number of elements")
expect_error(mHMM(s_data = data_sim$obs, xx = xx_wrong5,
gen = list(m = m, n_dep = n_dep, q_emiss = q_emiss),
start_val = c(list(gamma), emiss_distr),
mcmc = list(J = J, burn_in = burn_in), show_progress = FALSE),
"Dichotomous covariates")
expect_error(mHMM(s_data = data_sim$obs, xx = xx_wrong6,
gen = list(m = m, n_dep = n_dep, q_emiss = q_emiss),
start_val = c(list(gamma), emiss_distr),
mcmc = list(J = J, burn_in = burn_in), show_progress = FALSE),
"Factors currently cannot be used as covariates")
})
test_that("class inherit", {
expect_s3_class(out_2st_simb, "mHMM")
expect_s3_class(out_2st_sim_cov2, "mHMM")
})
test_that("print mHMM", {
# 2 dependent var, no covariates
expect_output(print(out_2st_simb), paste("Number of subjects:", n))
expect_output(print(out_2st_simb), paste(J, "iterations"))
expect_output(print(out_2st_simb), paste("likelihood over all subjects:", -170))
expect_output(print(out_2st_simb), paste("AIC over all subjects:", 377))
expect_output(print(out_2st_simb), paste("states used:", m))
expect_output(print(out_2st_simb), paste("dependent variables used:", n_dep))
})
test_that("summary mHMM", {
# 2 dependent var, no covariates
expect_output(summary(out_2st_simb), "From state 1 0.762 0.138 0.100")
expect_output(summary(out_2st_simb), "From state 3 0.095 0.324 0.581")
expect_output(summary(out_2st_simb), "output_1")
expect_output(summary(out_2st_simb), "output_2")
})
test_that("output PD_subj", {
# test output dimensions
expect_equal(length(out_2st_simb$PD_subj), n)
expect_equal(dim(out_2st_simb$PD_subj[[1]]$trans_prob), c(J, m * m ))
expect_equal(dim(out_2st_simb$PD_subj[[1]]$cat_emiss), c(J, sum(m*q_emiss)))
expect_equal(dim(out_2st_simb$PD_subj[[1]]$log_likl), c(J, 1))
expect_equal(length(out_2st_simb$PD_subj[[1]]), length(out_2st_simb$PD_subj[[n]]))
# start values
expect_equal(as.vector(out_2st_simb$PD_subj[[2]]$cat_emiss[1,]), c(as.vector(t(emiss_distr[[1]])),
as.vector(t(emiss_distr[[2]]))))
expect_equal(as.vector(out_2st_simb$PD_subj[[2]]$trans_prob[1,]), as.vector(t(gamma)))
# calculations
expect_equal(as.numeric(out_2st_simb$PD_subj[[2]]$cat_emiss[10,(m*q_emiss[1] + 2)]), 0.4066, tolerance=1e-4)
expect_equal(as.numeric(out_2st_simb$PD_subj[[2]]$log_likl[10,1]), -181.6811 , tolerance=1e-4)
})
test_that("output emis_cov_bar", {
# test output dimensions
expect_match(out_2st_simb$emiss_cov_bar, "predict the emission probabilities")
expect_equal(dim(out_2st_sim_cov1$emiss_cov_bar[[1]]), c(J, m * (q_emiss[1] - 1)))
expect_equal(dim(out_2st_sim_cov1$emiss_cov_bar[[2]]), c(J, m * (q_emiss[2] - 1)))
expect_match(out_2st_sim_cov2$emiss_cov_bar, "predict the emission probabilities")
expect_match(out_2st_sim_cov3$emiss_cov_bar, "predict the emission probabilities")
expect_match(out_2st_sim_cov3$emiss_cov_bar, "predict the emission probabilities")
expect_equal(dim(out_2st_sim_cov4$emiss_cov_bar[[1]]), c(J, m * (q_emiss[1] - 1)))
expect_match(out_2st_sim_cov4$emiss_cov_bar[[2]], "predict the emission probabilities")
# test calcultations
expect_equal(as.numeric(out_2st_sim_cov1$emiss_cov_bar[[1]][11, m * (q_emiss[1] - 1)]), -1.129246 , tolerance=1e-4)
expect_equal(as.numeric(out_2st_sim_cov1$emiss_cov_bar[[2]][11, m * (q_emiss[2] - 1)]), 0.07090971 , tolerance=1e-4)
})
test_that("output emiss_int_bar", {
# test output dimensions
expect_equal(dim(out_2st_simb$emiss_int_bar[[1]]), c(J, m * (q_emiss[1] - 1)))
expect_equal(dim(out_2st_simb$emiss_int_bar[[2]]), c(J, m * (q_emiss[2] - 1)))
expect_equal(dim(out_2st_sim_cov1$emiss_int_bar[[1]]), c(J, m * (q_emiss[1] - 1)))
# calculations
expect_equal(as.numeric(out_2st_simb$emiss_int_bar[[1]][10,m * (q_emiss[1] - 1)]), 1.03869 , tolerance=1e-4)
expect_equal(as.numeric(out_2st_simb$emiss_int_bar[[2]][11,m * (q_emiss[2] - 1) - 1]), -1.274551 , tolerance=1e-4)
})
test_that("output emiss_int_subj", {
# test output dimensions
expect_equal(length(out_2st_simb$emiss_int_subj), n)
expect_equal(dim(out_2st_simb$emiss_int_subj[[1]][[1]]), dim(out_2st_simb$emiss_int_subj[[n]][[1]]))
expect_equal(dim(out_2st_simb$emiss_int_subj[[1]][[2]]), c(J, m * (q_emiss[2] - 1)))
# calculations
expect_equal(as.numeric(out_2st_simb$emiss_int_subj[[1]][[1]][2,1]), -0.8649858 , tolerance = 1e-4)
})
test_that("output emiss_naccept", {
# test output dimensions
expect_equal(length(out_2st_simb$emiss_naccept), n_dep)
expect_equal(dim(out_2st_simb$emiss_naccept[[1]]), c(J-1, m))
expect_equal(dim(out_2st_simb$emiss_naccept[[2]]), c(J-1, m))
# calculations
expect_equal(out_2st_simb$emiss_naccept[[1]][9,2], 3)
expect_equal(out_2st_simb$emiss_naccept[[2]][10,3], 3)
})
test_that("output emiss_prob_bar", {
# test output dimensions
expect_equal(dim(out_2st_simb$emiss_prob_bar[[1]]),c(J, m * q_emiss[1]))
expect_equal(dim(out_2st_simb$emiss_prob_bar[[2]]),c(J, m * q_emiss[2]))
# start values
expect_equal(as.vector(out_2st_simb$emiss_prob_bar[[1]][1,]), as.vector(t(emiss_distr[[1]])))
expect_equal(as.vector(out_2st_simb$emiss_prob_bar[[2]][1,]), as.vector(t(emiss_distr[[2]])))
# calculations
expect_equal(as.numeric(out_2st_simb$emiss_prob_bar[[1]][11, q_emiss[1]]), 0.08674569 , tolerance = 1e-4)
expect_equal(as.numeric(out_2st_sim_cov1$emiss_prob_bar[[2]][10, q_emiss[2]]),0.336074, tolerance = 1e-4)
})
test_that("output gamma_cov_bar", {
# test output dimensions
expect_match(out_2st_simb$gamma_cov_bar, "predict the transition probability")
expect_match(out_2st_sim_cov1$gamma_cov_bar, "predict the transition probability")
expect_equal(dim(out_2st_sim_cov2$gamma_cov_bar), c(J, m * (m-1)))
expect_equal(dim(out_2st_sim_cov3$gamma_cov_bar), c(J, m * (m-1)))
expect_match(out_2st_sim_cov4$gamma_cov_bar, "predict the transition probability")
# calculations
expect_equal(as.numeric(out_2st_sim_cov2$gamma_cov_bar[10, m * (m-1) - 1]), 0.2785782 , tolerance = 1e-4)
})
test_that("output gamma_int_bar", {
# test output dimensions
expect_equal(dim(out_2st_simb$gamma_int_bar), c(J, m * (m - 1)))
expect_equal(dim(out_2st_sim_cov2$gamma_int_bar), c(J, m * (m - 1)))
# calculations
expect_equal(as.numeric(out_2st_sim_cov2$gamma_int_bar[11, m - 1]), -1.69139 , tolerance = 1e-4)
})
test_that("output gamma_int_subj", {
# test output dimensions
expect_equal(length(out_2st_simb$gamma_int_subj), n)
expect_equal(dim(out_2st_simb$gamma_int_subj[[1]]), dim(out_2st_simb$gamma_int_subj[[n]]))
expect_equal(dim(out_2st_simb$gamma_int_subj[[1]]), c(J, m * (m - 1)))
# calculations
expect_equal(as.numeric(out_2st_simb$gamma_int_subj[[1]][11, 2 * (m-1)]), 0.8804161 , tolerance = 1e-4)
})
test_that("output gamma_naccept", {
# test output dimensions
expect_equal(dim(out_2st_simb$gamma_naccept), c((J-1), m))
# calculations
expect_equal(out_2st_simb$gamma_naccept[8,3], 5)
})
test_that("output gamma_prob_bar", {
# test output dimensions
expect_equal(dim(out_2st_simb$gamma_prob_bar),c(J, m * m))
# start values
expect_equal(as.vector(out_2st_simb$gamma_prob_bar[1,]), as.vector(t(gamma)))
# calculations
expect_equal(as.numeric(out_2st_simb$gamma_prob_bar[11,1]),0.8075333 , tolerance = 1e-4)
expect_equal(as.numeric(out_2st_simb$gamma_prob_bar[10,8]), 0.3415242, tolerance = 1e-4)
})
test_that("output input", {
# test output dimensions
expect_equal(length(out_2st_simb$input), 9)
# test correct output
expect_equal(out_2st_simb$input[[1]], "categorical")
expect_equal(out_2st_simb$input[[2]], m)
expect_equal(out_2st_simb$input[[3]], n_dep)
expect_equal(out_2st_simb$input[[4]], q_emiss)
expect_equal(out_2st_simb$input[[5]], J)
expect_equal(out_2st_simb$input[[6]], burn_in)
expect_equal(out_2st_simb$input[[7]], n)
expect_equal(out_2st_simb$input[[8]], rep(n_t, n))
expect_equal(out_2st_simb$input[[9]], c("output_1", "output_2"))
})
|
7196f3166f8dd5e56117ae7c609b2691127ed106 | 06f2c49e80444cfdbc49357e36c1e1ded3de5526 | /R/NAMESPACE.R | 222b599b13be689ea910718a3314a70f0ee27b75 | [] | no_license | poissonconsulting/juggler2 | 510799c20d17d1e8adb7d7571b2423e40407ee48 | d2372c2bc9d767b60130a5527decd4d6f328255b | refs/heads/master | 2021-07-21T23:35:14.121361 | 2021-02-12T22:48:34 | 2021-02-12T22:48:34 | 68,331,429 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 81 | r | NAMESPACE.R | #' @import datacheckr stringr
#' @importFrom magrittr %<>% %>% add subtract
NULL
|
04d43187012700dc5f920b536001619717c542b4 | ab13454ba79f60279a257a7c6869b85995b1e17e | /oop_with_environments.R | b81c6a798d51bb9d6ea7534b0b9ffe981def3d94 | [] | no_license | mwesthues/workflow | 94101c844fb662b2b9b1c4ea66605b847969ac9a | 8bff5ccc23de49d23ada74fac65b7a85c748afe4 | refs/heads/master | 2021-09-05T17:00:28.584107 | 2018-01-29T20:38:09 | 2018-01-29T20:38:09 | 119,411,736 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,232 | r | oop_with_environments.R | # Source: https://rstudio-pubs-static.s3.amazonaws.com/150296_904158e070594471864e89c10c0d14f9.html
# we can create environments
MyEnv <- new.env()
# ...and assign values to their variables...
MyEnv$a <- 5
MyEnv$b <- TRUE
# ...and retrieve them, just like with lists...
MyEnv$a
#... and even loop through them easily
for (el in ls(MyEnv)) {
print(MyEnv[[el]])
}
# we can try to copy an environment
MyEnv_copy <- MyEnv
# ... but it will just create another pointer
MyEnv$a <- 10
MyEnv_copy$a
# you also can't use == to compare, but need the identical function
identical(MyEnv_copy, MyEnv)
##
# Just like we did before with lists, we can also set the class attribute
# of environments. So we get an easy way to create objects that behave like we
# know it from other languages by simply returning a pointer to the environment
# of a function from it. We do this by using the 'environment' function to get
# this pointer to the current environment inside the constructor function.
# with environments we get classes...
MyClass <- function(x) {
x <- x - 1
get_x <- function() x
structure(class = "MyClass", environment())
}
#... that are truly classy
MyObject <- MyClass(3)
MyObject$x
MyObject$x <- 5
MyObject$get_x()
|
5da1473aac6e2df6e539ab306775c2ac6b51ccce | 25449f88edddc74beb261a934964d7d1ce358deb | /man/plot.vpi.Rd | bc064dc4480e3ee3ee8123fe780a7736d857d5eb | [
"MIT"
] | permissive | adokter/bioRad | 53de114ca6e2151743045db8556ffd7a45f90570 | d4935eddaa7cc1c3c50e47278e72967c8bbd980c | refs/heads/master | 2023-09-01T10:49:36.747974 | 2023-07-28T14:12:57 | 2023-07-28T14:12:57 | 59,586,835 | 29 | 21 | NOASSERTION | 2023-09-02T17:36:08 | 2016-05-24T15:49:06 | R | UTF-8 | R | false | true | 3,535 | rd | plot.vpi.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot.vpi.R
\name{plot.vpi}
\alias{plot.vpi}
\title{Plot an integrated profile (\code{vpi})}
\usage{
\method{plot}{vpi}(
x,
quantity = "mtr",
xlab = "time",
ylab = "migration traffic rate [#/km/h]",
main = "MTR",
night_shade = TRUE,
elev = -0.268,
lat = NULL,
lon = NULL,
ylim = NULL,
nightshade = TRUE,
...
)
}
\arguments{
\item{x}{1 class object inheriting from class \code{vpi}, typically a
call to \link[=integrate_profile]{integrate_profile}.}
\item{quantity}{Character string with the quantity to plot, one of
\itemize{
\item \code{vid} (vertically integrated density),
\item \code{vir} (vertically integrated reflectivity),
\item \code{mtr} (migration traffic rate),
\item \code{rtr} (reflectivity traffic rate),
\item \code{mt} ((cumulative) migration traffic),
\item \code{rt} ((cumulative) reflectivity traffic),
\item \code{ff} (height-averaged ground speed)
\item \code{dd} (height-averaged direction)
\item \code{u} (height-averaged u-component of ground speed),
\item \code{v} (height-averaged v-component of ground speed).
}}
\item{xlab}{A title for the x-axis.}
\item{ylab}{A title for the y-axis.}
\item{main}{A title for the plot.}
\item{night_shade}{Logical, whether to plot night time shading.}
\item{elev}{Numeric, sun elevation to use for day/night transition,
see \link{sunrise}.}
\item{lat}{(optional) Latitude in decimal degrees. Overrides the lat
attribute of \code{x}.}
\item{lon}{(optional) Longitude in decimal degrees. Overrides the lon
attribute of \code{x}.}
\item{ylim}{y-axis plot range, numeric atomic vector of length 2.}
\item{nightshade}{Deprecated argument, use night_shade instead.}
\item{...}{Additional arguments to be passed to the low level
\link[graphics:plot.default]{plot} plotting function.}
}
\value{
No return value, side effect is a plot.
}
\description{
Plot an object of class \code{vpi}.
}
\details{
The integrated profiles can be visualized in various related quantities, as specified by
argument \code{quantity}:
\itemize{
\item \code{vid}: Vertically Integrated Density, i.e. the aerial surface density of
individuals. This quantity is dependent on the assumed radar cross section
per individual (RCS)
\item \code{vir}: Vertically Integrated Reflectivity. This quantity is independent of
the value of individual's radar cross section
\item \code{mtr}: Migration Traffic Rate. This quantity is dependent on the assumed
radar cross section (RCS)
\item \code{rtr}: Reflectivity Traffic Rate. This quantity is independent on the
assumed radar cross section (RCS)
\item \code{mt}: Migration Traffic. This quantity is dependent on the assumed radar
cross section (RCS)
\item \code{rt}: Reflectivity Traffic. This quantity is independent on the assumed
radar cross section (RCS)
\item \code{ff}: Horizontal ground speed in m/s
\item \code{dd}: Horizontal ground speed direction in degrees
\item \code{u}: Ground speed component west to east in m/s
\item \code{v}: Ground speed component south to north in m/s
\item \code{height}: Mean flight height (height weighted by reflectivity eta) in m
above sea level
The height-averaged ground speed quantities (ff,dd,u,v) and height are weighted averages by reflectivity eta.
}
}
\examples{
# vertically integrate a vpts object:
vpi <- integrate_profile(example_vpts)
# plot the migration traffic rates
plot(vpi)
# plot the vertically integrated densities, without night shading:
plot(vpi, quantity = "vid", night_shade = FALSE)
}
|
aef8408da866f7599e8d6e1b36ea09839d5cfc55 | a289946a3b069f7e343e9f54590989926ebe7f32 | /tests/testthat/test-search-onset.R | e1c452e8c67a50a5f819738ca7b7b57195f4e0d2 | [] | no_license | hejtmy/navr | ffdfc1e9e5a6b2c54c35d026a34e39c4a3f9a55b | 2bcc4eba8dbed2d1cb4835139966770cfc6b64cb | refs/heads/master | 2023-03-08T16:49:38.487959 | 2023-03-01T02:21:56 | 2023-03-01T02:21:56 | 119,145,726 | 0 | 0 | null | 2020-03-08T04:06:41 | 2018-01-27T08:10:24 | R | UTF-8 | R | false | false | 2,617 | r | test-search-onset.R | context("Onset searching")
obj <- navr_object_preprocessed
obj <- remove_unreal_speeds(obj, type = "std", cutoff = 3, total_recalculate = T)
obj <- smooth_speed(obj, type = "median", points = 11)
test_that("testing search onset", {
expect_silent(ls <- search_onsets(obj, speed_threshold = 5, min_duration = 2))
expect_equal(c("time", "time_since_start", "duration"), names(ls))
expect_equal(length(ls$time), length(ls$time_since_start), length(ls$duration))
expect_gt(length(ls$time_since_start), 0)
expect_silent(ls <- search_onsets(obj, speed_threshold = 5, min_duration = 2, still_duration = 2))
expect_gt(length(ls$time_since_start), 0)
expect_silent(ls2 <- search_onsets(obj, speed_threshold = 5, min_duration = 2, still_speed_threshold = 5, still_duration = 2))
expect_equal(ls, ls2)
expect_silent(ls <- search_onsets(obj, speed_threshold = 500, min_duration = 2, still_duration = 2))
expect_length(ls$time_since_start, 0)
expect_silent(ls <- search_onsets(obj, speed_threshold = 1, min_duration = 200))
expect_length(ls$time_since_start, 0)
expect_silent(ls <- search_onsets(obj, speed_threshold = 1, min_duration = 1, still_duration = 200))
expect_length(ls$time_since_start, 0)
})
test_that("testing search onset with pauses", {
expect_silent(res <- search_onsets(obj, speed_threshold = 5, min_duration = 2, pause_duration = 1))
expect_silent(res2 <- search_onsets(obj, speed_threshold = 5, min_duration = 2, pause_duration = 0))
expect_gt(length(res$time_since_start), length(res2$time_since_start))
res <- search_onsets(obj, speed_threshold = 5, min_duration = 2)
expect_equal(res, res2)
})
test_that("testing search stops", {
expect_silent(ls <- search_stops(obj, speed_threshold = 5, min_duration = 2))
expect_gt(length(ls$time_since_start), 0)
expect_silent(ls <- search_stops(obj, speed_threshold = 0, min_duration = 0))
expect_length(ls$time_since_start, 0)
expect_silent(ls <- search_stops(obj, speed_threshold = 1, min_duration = 1000))
expect_length(ls$time_since_start, 0)
})
test_that("testing searching for deliberation stops", {
expect_silent(ls <- search_deliberation_stops(obj, speed_threshold = 5, min_duration = 2, min_rotation = 0))
expect_silent(ls_stop <- search_stops(obj, speed_threshold = 5, min_duration = 2))
expect_equal(sum(ls_stop$time_since_start - ls$time_since_start), 0)
expect_warning(ls <- search_deliberation_stops(obj, speed_threshold = 5, min_duration = 2, min_rotation = 10)) #waring because of deprecated add_angle_difference
expect_gt(length(ls_stop$time_since_start), length(ls$time_since_start))
})
|
bd01307360d36da14ce9d4da77e4ec69bc35ba28 | ebf2d02e0b13ae18614a5dce7203991548d5ea32 | /scripts/spat_mete_ddr.R | fad42f837c380d64753bbc4f89b7cb15138bc15a | [
"MIT"
] | permissive | weecology/mete-spatial | 48d2917afb1b88b9e80f94e5a95b96a0cb1a18ca | 1ed11e0661b70dfb79834058729b3ad5a7df1be8 | refs/heads/master | 2022-01-23T12:07:31.462049 | 2022-01-10T18:12:26 | 2022-01-10T18:12:26 | 7,699,982 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,344 | r | spat_mete_ddr.R | ## Purpose: to generate the analytical METE DDR predictions
## given the parameters in the empirically observed communities
print('Submitting jobs to compute the analytical METE DDRs, ...')
source('./scripts/spat_functions.R')
clArgs = commandArgs(trailingOnly=TRUE)
if (length(clArgs) > 0) {
server = clArgs[1]
site_index = clArgs[2]
sadType = clArgs[3]
}
if (sadType == 'both')
sadType = c('empirSAD','meteSAD')
shrtnames = read.table('./data/shrtnames.txt', colClasses='character')
grain_fine = as.numeric(read.table('./data/grain_fine.txt'))
bisect_fine = as.numeric(read.table('./data/bisect_fine.txt'))
bisect_coarse = as.numeric(read.table('./data/bisect_coarse.txt'))
if (site_index != 'all')
site_index = as.integer(site_index)
if (site_index == 'all') {
site_index = 1:length(shrtnames)
}
nice = FALSE # make TRUE to use nice when submitting jobs
wait = TRUE # make FALSE to submit all the jobs at once
for(i in site_index) {
for(j in sadType) {
unit_distance = sqrt(grain_fine[i]) * n_pixels_wide(bisect_fine[i])
abu_file = paste('./data/', shrtnames[i], '_sad.csv', sep='')
if (j == 'meteSAD') {
out_file = paste('./sorensen/', shrtnames[i],
'_mete_sor.csv', sep='')
log_file = paste('./scripts/log_files/', shrtnames[i],
'_mete_sor.log', sep='')
}
if (j == 'empirSAD') {
out_file = paste('./sorensen/', shrtnames[i],
'_empirSAD_mete_sor.csv', sep='')
log_file = paste('./scripts/log_files/', shrtnames[i],
'_empirSAD_mete_sor.log', sep='')
}
if (server == 'LSF') {
cmd = paste('bsub -q week -M 10 -o', log_file, '-J', shrtnames[i],
'python ./scripts/spat_heap_ddr.py',
bisect_fine[i], bisect_coarse[i],
j, abu_file, out_file,
unit_distance, sep=' ')
}
else {
if (nice)
cmd = 'nice python'
else
cmd = 'python'
cmd = paste(cmd, './scripts/spat_heap_ddr.py',
bisect_fine[i], bisect_coarse[i],
j, abu_file, out_file,
unit_distance, '>', log_file, '2>&1', sep=' ')
system(cmd, wait=wait)
}
}
print('Submitting jobs to compute the analytical METE DDRs, complete!')
|
ccc0ba1178bd709c003a53c7c68d72bf6cf2e799 | da08909e4b6d88a39867bbc08adfaa85d9879009 | /code/boral_withOTUs.R | 36fa2c916bb77f9b6f086c03e6e959cf0ff38ee4 | [] | no_license | neiljun/tutorials_communityAnalyses | fa52706bc45de0ad1fddeca3d7de2c2e04887a92 | 6451697b494a9c0fa7d1aac1c713922e00eae82f | refs/heads/master | 2021-01-08T18:42:12.919070 | 2019-02-19T20:44:28 | 2019-02-19T20:44:28 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 15,086 | r | boral_withOTUs.R |
# boral example
# particularly designed for studies with very diverse communities, e.g. OTU data
# custom functions to display ...
# ... mcmc trace plots
# ... distribution of shared enviroment and residual correlations
# ... strength and direction of shared enviroment and residual correlations
require(boral)
require(dplyr) # this has handy dataframe manipulation tools
require(tidyr) # ditto
require(ggplot2) #plotting
require(circlize) #chord diagrams
source("code/boral_withOTUs_auxilaryfunctions.R")
# -------------------------------------------------------------------#
# load data --- this is the same as in part 1, just copied over
# this dataset has been modified and simplified... the original version has not yet been published
exdata <- readRDS(file = "data/woodEndophyte_exData.RData")
# here's the OTU matrix
otus <- exdata[['otus']]
head(otus)
# the OTU matrix has been minimally processed
# it has been quality checked to remove failed samples (i.e. abundance of reads below a threshold) and remove OTUs that clearly are not fungal
dim(otus)
# there are 50 samples (rows) and 1000 OTUS (cols)
# here's the covariate dataframe (with sample metadata too)
covariates <- exdata[['covariates']]
head(covariates)
# -------------------------------------------------------------------#
# Trim out rare OTUs because they don't contribute much information --- this is the same as in part 1, just copied over
minPerc <- 20
numSamps <- dim(otus)[1]
minSamps <- floor( numSamps * (minPerc/100) )
x <- apply(otus > 0, 2, sum) # remove OTUs that don't meet this criteria
otus <- otus[, x > minSamps]
dim(otus)
# now we have 55 OTUs
# select environmental variables you'd like to use to explain OTU composition/abundances
covariates %>%
select(size, waterperc, density) -> envVars
# -------------------------------------------------------------------#
# Fit model-based constrained and unconstrained ordinations using the boral package
# recode any categorical predictors as a dummy variable
# in this case, it's size which just has 2 levels
dummy <- data.frame(model.matrix( ~ size - 1, data = envVars))
envVars %>%
mutate(sizesmall = dummy$sizesmall) %>%
select(-size) -> envVars
# specify row effects in a rowEffs dataframe
# these are typically blocking variables; ie variables that you have good reason to believe will influence OTU abundances and want to account for it
# in this case, I'm including 2 row effects...
# one for each sample (1:50)
# --- this is really important for OTU data because we know there is sample-to-sample variation in OTU abundances during lab processing that we need to account for
# and one for each site from which the samples were collected (1:3)
rowEffs<-data.frame(
sample = as.numeric(seq(1:nrow(otus))),
site = covariates$site
)
#factors need to be coded as numeric
uniqSITE<-unique(rowEffs$site)
for(i in 1:length(uniqSITE)){
rowEffs$site<-as.character(rowEffs$site)
rowEffs[rowEffs$site==uniqSITE[i],"site"]<-i
}
rowEffs$site <- as.numeric(rowEffs$site)
row.ids <- matrix(c(rowEffs$sample, rowEffs$site), ncol = 2)
# set mcmc controls
mcmc.controls <- list(n.burnin = 50,
n.iteration = 100,
n.thin = 1,
seed = 1)
# these values are set so that everything runs relatively quickly
# real values are more like...
#n.burnin = 10000
#n.iteration = 100000
#n.thin = 100
# also, its good practice to do model fitting multiple times (9-12?) with new seed values to make sure you are not stuck in a weird part of parameter space
# set priors
prior.controls = list(type = c("normal","normal","normal","uniform"),
hypparams = c(100, 20, 100, 20))
# Fit the model with only latent variables included
# this is analogous to a unconstrained ordination
fit.lv <- boral(y = otus,
family = "negative.binomial",
num.lv = 2,
row.eff = "fixed",
row.ids = row.ids,
calc.ics = TRUE, # for model selection
save.model = TRUE, # to access mcmc samples
mcmc.control = mcmc.controls,
prior.control = prior.controls)
# Fit the model with X variation and latent variables included
# this is analogous to a constrained ordination
fit.envlv <- boral(y = otus,
X = envVars,
family = "negative.binomial",
num.lv = 2,
row.eff = "fixed",
row.ids = row.ids,
calc.ics = TRUE, # for model selection
save.model = TRUE, # to access mcmc samples
mcmc.control = mcmc.controls,
prior.control = prior.controls)
# -------------------------------------------------------------------#
# Figure out if your models converged
# note that since these models were run using quick mcmc controls, I am pretty durn sure that they didn't converge
# I'm just going to look at fit.envlv in this section for simplicity but you'll also want to check the convergence of fit.lv and any other models too
# Plot a trace of the MCMC chain for each parameter
# extract the mcmc object (remember, this only gets saved if save.model = TRUE)
mcmc.obj <- as.mcmc.list(fit.envlv$jags.model$BUGSoutput)
# remember that there are typically a ton of parameters estimated in a single boral model
dim(mcmc.obj[[1]]) # this model includes 55 OTUs, 3 covariates, 2 latent variables, and 2 row effects which brings us to 539 parameters
# typically, you can use some pre-packaged functions from within coda to plot the traces
require(coda)
#plot(mcmc.obj) # this will take a while because there are a lot of parameters...
# but as you can see, these boral models tend to have a ton of parameters
# I've made some custom functions that I think work better for visualling all these parameters
source("code/boral_withOTUs_auxilaryfunctions.R")
plotTrace.lvcoefs(mcmc.obj = mcmc.obj, mcmc.controls = mcmc.controls) # each line is a parameter associated with an OTU
plotTrace.lvs(mcmc.obj = mcmc.obj, mcmc.controls = mcmc.controls) # each line is a parameter associated with an OTU
plotTrace.rowcoefs(mcmc.obj = mcmc.obj, mcmc.controls = mcmc.controls) # each line is a row effect parameter, e.g. there are 3 sites so there are 3 rowID2 lines
plotTrace.Xcoefs(mcmc.obj = mcmc.obj, mcmc.controls = mcmc.controls) # each line is a parameter associated with an OTU, Xcoef 1 = waterperc, 2 = density, 3 = sizesmall, correspond to column positions in the covariate matrix
# interpreting the traces...
# remember that there's only 1 MCMC chain for all boral models (check out boral help for more details on why)
# your parameter estimate (y) has converged if the estimated value remains stable over the iterations (x) (e.g. chain's slope = 0)
# Calculate convergence statistics -- Geweke convergence diagnostic
# this diagnostic is recommended because of the 1 mcmc chain thing; its discussed on the boral() help page
# Geweke (1992) proposed a convergence diagnostic for Markov chains based on a test for equality of the means of the first and last part of a Markov chain (by default the first 10% and the last 50%).
# If the samples are drawn from the stationary distribution of the chain, the two means are equal and Geweke's statistic has an asymptotically standard normal distribution
# So, if the p-value is less than 0.05, there is evidence the MCMC chain has not converged
# calculate z
geweke <- geweke.diag(mcmc.obj, frac1=0.1, frac2=0.5)
geweke.z <- geweke[[1]]$z
geweke.df <- data.frame(param = names(geweke.z), z = geweke.z)
# reformat, convert z to pval, do holm pval adjustment for multiple comparisons
geweke.df %>%
mutate(pval = 2*pnorm(abs(z), lower.tail = FALSE)) %>%
mutate(adj.pval = p.adjust(pval, method = "holm")) -> geweke.df
# annotate params
geweke.df %>%
separate(param, into = c("paramType","string"), sep = "\\[", fill = "right") %>%
separate(string, into = c("OTUnum","string2"), sep = "\\,", fill = "right") %>%
separate(string2, into = c("paramNum", "drop"), sep = "\\]", fill = "right") %>%
select(paramType, paramNum, OTUnum, z, pval, adj.pval) -> geweke.df.ann
head(geweke.df.ann) # pval that indicates whether each parameter has converged
geweke.df.ann %>%
group_by(paramType, paramNum) %>%
summarize(total = length(adj.pval),
numConverged = sum(adj.pval > 0.05),
propConverged = numConverged/total) -> summ
summ # proportion of parameters that have converged by parameter type
# -------------------------------------------------------------------#
# Check residuals
plot(fit.lv)
plot(fit.envlv)
# -------------------------------------------------------------------#
# Visualize as ordinations
par(mfrow=c(2,1))
# unconstrained model
lvsplot(fit.lv, biplot=F)
# constrained model
lvsplot(fit.envlv, biplot=F) # I like to turn off the biplot because the OTU names as vectors get to be overwhelming
# if you'd like to make prettier plots, you can access the scaled lv scores...
lvsplot.vals <- lvsplot(fit.envlv, biplot = TRUE, est = "median", return.vals = TRUE)
lvsplot.vals
# -------------------------------------------------------------------#
# Check out the X coef estimates
#make dfs
OTUId <- row.names(fit.envlv$X.coefs.median)
X.df <- data.frame(OTUId = OTUId, fit.envlv$X.coefs.median)
upper.df <- data.frame(OTUId = OTUId, fit.envlv$hpdintervals$X.coefs[,,"upper"])
lower.df <- data.frame(OTUId = OTUId, fit.envlv$hpdintervals$X.coefs[,,"lower"])
#make long
X.df %>%
gather(key = "term", value = "X", -OTUId) -> X.df.l
upper.df %>%
gather(key = "term", value = "X.upper", -OTUId) -> upper.df.l
lower.df %>%
gather(key = "term", value = "X.lower", -OTUId) -> lower.df.l
#join dfs
X.df.l %>%
left_join(upper.df.l) %>%
left_join(lower.df.l) -> Xcoefs.df
head(Xcoefs.df)
# lower and upper refer to the bounds of the 95% highest posterior density (HPD) credible interval
# -------------------------------------------------------------------#
# Check out correlation between OTUs due to...
# (i) similarities in the response to explainatory variables (ie shared enviromental response); see get.enviro.cor() help for more info
# (ii) residual variation accounted for by latent variables; see get.residual.cor() help for more info
enviro.cor <- get.enviro.cor(fit.envlv, prob = 0.95)
residual.cor <- get.residual.cor(fit.envlv, prob = 0.95)
# these objects contain a lot of info, so I wrote a function to extract just the info I want...
source("code/boral_withOTUs_auxilaryfunctions.R")
enviro.cor.df <- extract_cors(corobj = enviro.cor, corType = "enviro")
residual.cor.df <- extract_cors(corobj = residual.cor, corType = "residual")
enviro.cor.df %>%
left_join(residual.cor.df) -> cor.df
head(cor.df)
dim(cor.df)
# this is a dataframe that holds infor each pairwise OTU combination (rows), i.e. there are 1485 unique pairs of 55 OTUs
# the shared environment and residual correlation values are there for each OTU pair,
# along with an categorical factor that indicates whether the correlation is "significant", ie 95% highest posterior interval does not contain zero
# plot the distribution of correlations in your study
#signif factors
signif.order <- c("no","positive","negative")
cor.df$enviro_signif <- factor(cor.df$enviro_signif, levels = signif.order)
cor.df$residual_signif <- factor(cor.df$residual_signif, levels = signif.order)
#enviro
cor.df %>%
group_by(enviro_signif) %>%
summarize(num = length(enviro_signif)) -> tmp
tmp
total <- sum(tmp[,2])
percPos <- round((tmp[tmp$enviro_signif=="positive", "num"]/total)*100, digits = 0)
percNeg <- round((tmp[tmp$enviro_signif=="negative", "num"]/total)*100, digits = 0)
p.env<-ggplot(cor.df, aes(x = enviro_cor, fill = enviro_signif)) +
geom_histogram() +
geom_vline(xintercept = 0, linetype=2) +
xlab("Shared environment correlation") + ylab("Frequency") +
annotate("text", y=150, x=-0.5, color = 4, label=paste(percNeg, "%", sep="")) +
annotate("text", y=150, x=0.5, color = 3, label=paste(percPos ,"%", sep=""))
# Shared enviromental correlation between two OTUs suggests that the OTU abundances either respond in the same way (positively covary) or in opposite ways (negatively covary) to the measured X vars included in the model
# This output doesn't mean too much since the model did not converge, but if it did you could say that...
# Many OTU pairs either occupy either very similar (positive correlations, 15%) or very different (negative correlations, 14%) wood habitat as characterized by wood size, waterperc, and density
p.env
#residual
cor.df %>%
group_by(residual_signif) %>%
summarize(num = length(residual_signif)) -> tmp
total <- sum(tmp[,2])
percPos <- round((tmp[tmp$residual_signif=="positive", "num"]/total)*100, digits = 0)
percNeg <- round((tmp[tmp$residual_signif=="negative", "num"]/total)*100, digits = 0)
p.res<-ggplot(cor.df, aes(x = residual_cor, fill = residual_signif)) +
geom_histogram() +
geom_vline(xintercept = 0, linetype=2) +
xlab("Residual correlation") + ylab("Frequency") +
annotate("text", y=150, x=-0.0005, color=4, label=paste(percNeg,"%", sep="")) +
annotate("text", y=150, x=0.0005, color=3, label=paste(percPos,"%", sep=""))
# Residual correlation between two OTUs suggests that there was some variable(s) not accounted for in the model that influenced the OTU abundances to positively/negatively covary
# This output doesn't mean too much since the model did not converge, but if it did you could say that...
# Most OTU pairs are not residually correlated after accounting for the measured wood covariates included in the model (100 - (14 + 21) %)
# More OTU pairs have positive (21%) than negative (14%) residual correlations, although the magnitude of those "significant" residual correlations are often weak
p.res
# plot a chord diagram to dig deeper OTU correlations
# here's an example using the shared environment correlations
# you'll probably be interested in annotating with more informative OTU names, but this will do for now...
# filter out the OTU pairs that are not significantly correlated and color based on whether the correlation is positive or negative
cor.df %>%
filter(enviro_signif != "no") %>%
select(otu1, otu2, enviro_cor) %>%
mutate(col = ifelse(enviro_cor > 0,
"black","red")) %>%
arrange(desc(enviro_cor)) -> env.frame
# start plot
par(mfrow=c(1,1))
circos.par(track.height=.5)
chordDiagram(env.frame[,1:3],
col = env.frame$col,
annotationTrack = c("grid"),
annotationTrackHeight = c(0.01,0.01),
preAllocateTracks = 1,
grid.col= 1, reduce = 0)
circos.trackPlotRegion(track.index = 1, panel.fun = function(x, y) {
xlim = get.cell.meta.data("xlim")
ylim = get.cell.meta.data("ylim")
sector.name = get.cell.meta.data("sector.index")
circos.text(mean(xlim), ylim[1] + .001, sector.name,
facing = "clockwise",
niceFacing = TRUE, adj = c(0, 0.5), cex=.7)
}, bg.border = NA)
|
475bc2590dfe7a18b53434ecc9a611e15df0fb87 | ca2b7a7b8692aa06cb8f3ad547da8454695b8709 | /R/rSD.R | 601579990beccecc288594801a1dba6eae87ebbc | [] | no_license | jthurner/hydroGOF | 0cc522a508daac8f40fe830aab2ce6a18356abdf | 60fecbfbb094915dff2718abaac5817377864545 | refs/heads/master | 2021-01-19T04:34:08.821128 | 2017-05-01T15:15:08 | 2017-05-01T15:15:08 | 95,649,683 | 0 | 1 | null | 2017-06-28T08:58:30 | 2017-06-28T08:58:30 | null | UTF-8 | R | false | false | 2,550 | r | rSD.R | ################################################
# 'rSD': Ratio of Standard Deviations #
################################################
# 15-Dic-2008; 06-Sep-09 #
###############################
# SD(sim) / SD(obs)
# 'obs' : numeric 'data.frame', 'matrix' or 'vector' with observed values
# 'sim' : numeric 'data.frame', 'matrix' or 'vector' with simulated values
# 'Result': Ratio of Standard Deviations between 'sim' and 'obs',
# when multiplied by 100 its units is %
rSD <-function(sim, obs, ...) UseMethod("rSD")
rSD.default <- function (sim, obs, na.rm=TRUE, ...){
denominator <- sd(obs, na.rm = na.rm)
if (denominator != 0) {
rSD <- sd(sim, na.rm= na.rm) / sd(obs, na.rm= na.rm)
} else {
rSD <- NA
warning("'sd(obs)=0', it is not possible to compute 'rSD'")
} # ELSE end
return(rSD)
} # 'rSD.default' end
rSD.matrix <- function (sim, obs, na.rm=TRUE, ...){
# Checking that 'sim' and 'obs' have the same dimensions
if ( all.equal(dim(sim), dim(obs)) != TRUE )
stop( paste("Invalid argument: dim(sim) != dim(obs) ( [",
paste(dim(sim), collapse=" "), "] != [",
paste(dim(obs), collapse=" "), "] )", sep="") )
rSD <- rep(NA, ncol(obs))
rSD <- sapply(1:ncol(obs), function(i,x,y) {
rSD[i] <- rSD.default( x[,i], y[,i], na.rm=na.rm, ... )
}, x=sim, y=obs )
names(rSD) <- colnames(obs)
return(rSD)
} # 'rSD.matrix' end
rSD.data.frame <- function (sim, obs, na.rm=TRUE, ...){
sim <- as.matrix(sim)
obs <- as.matrix(obs)
rSD.matrix(sim, obs, na.rm=na.rm, ...)
} # 'rSD.data.frame' end
################################################################################
# Author: Mauricio Zambrano-Bigiarini #
################################################################################
# Started: 22-Mar-2013 #
# Updates: #
################################################################################
rSD.zoo <- function(sim, obs, na.rm=TRUE, ...){
sim <- zoo::coredata(sim)
if (is.zoo(obs)) obs <- zoo::coredata(obs)
if (is.matrix(sim) | is.data.frame(sim)) {
rSD.matrix(sim, obs, na.rm=na.rm, ...)
} else NextMethod(sim, obs, na.rm=na.rm, ...)
} # 'rSD.zoo' end
|
4b4a8e1dc6f349ab5c4d07dabbeb8cfdecc3a7c2 | d96bf3b991b61019d82f0d36a3482c9201bfa451 | /aux_aster.r | cf2fed08ddf95e2c5c538248630325bfbad614e4 | [] | no_license | alfcrisci/ASTER_decoding | e82984fd0daf1f847f396f3ca278d4310ab66c81 | 18bfc8090573f8fe04ed974dac1a321a376a2a87 | refs/heads/master | 2021-06-11T06:20:42.842171 | 2016-12-06T14:38:39 | 2016-12-06T14:38:39 | 75,731,283 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 8,797 | r | aux_aster.r | require(gdalUtils)
require(raster)
#------------------------------------------------------------------------------
# Next, define functions for calculations
# Write a function to convert degrees to radians
calc_radians <- function(x) {(x * pi) / (180)}
# Write a function to calculate the Radiance from DN values
calc_radiance <- function(x,ucc){(x - 1) * ucc1}
# Write a function to calculate the TOA Reflectance from Radiance
calc_reflectance <- function(x,earth_sun_dist,irradiance,sza){
(pi * x * (earth_sun_dist^2)) / (irradiance1 * sin(pi * sza / 180))
}
read_metadata_ASTER=function(file_name) {
meta_data <- gdalinfo(file_name)
lr_row <- grep('LOWERRIGHTM', meta_data)
ul_row <- grep('UPPERLEFTM', meta_data)
lr <- substr(meta_data[lr_row[1]], 15, 50)
ul <- substr(meta_data[ul_row[1]], 14, 50)
clip4 <- regexpr(', ' , ul)
clip5 <- regexpr(', ', lr)
# Define LR and UL x and y values for 15m VNIR Data
ul_y <- as.numeric((substr(ul, 1, (clip4 - 1)))) + 7.5
ul_x <- as.numeric((substr(ul, (clip4 + 2), 10000))) - 7.5
lr_y <- as.numeric((substr(lr, 1, (clip5 - 1)))) - 7.5
lr_x <- as.numeric((substr(lr, (clip5 + 2) , 10000))) + 7.5
# Define LR and UL x and y values for 30m SWIR Data
ul_y_30m <- as.numeric((substr(ul, 1, (clip4 - 1)))) + 15
ul_x_30m <- as.numeric((substr(ul, (clip4 + 2), 10000))) - 15
lr_y_30m <- as.numeric((substr(lr, 1, (clip5 - 1)))) - 15
lr_x_30m <- as.numeric((substr(lr, (clip5 + 2) , 10000))) + 15
# Define LR and UL x and y values for 90m TIR Data
ul_y_90m <- as.numeric((substr(ul, 1, (clip4 - 1)))) + 45
ul_x_90m <- as.numeric((substr(ul, (clip4 + 2), 10000))) - 45
lr_y_90m <- as.numeric((substr(lr, 1, (clip5 - 1)))) - 45
lr_x_90m <- as.numeric((substr(lr, (clip5 + 2) , 10000))) + 45
# Define UTM zone
utm_row <- grep('UTMZONECODE', meta_data)
utm_zone <- substr(meta_data[utm_row[1]], 1, 50)
clip6 <- regexpr('=', utm_zone)
utm_zone <- substr(utm_zone, clip6 + 1, 50)
# Configure extent properties (15m VNIR)
y_min <- min(ul_y, lr_y)
y_max <- max(ul_y, lr_y)
x_max <- max(ul_x, lr_x)
x_min <- min(ul_x, lr_x)
# Configure extent properties (30m SWIR)
y_min_30m <- min(ul_y_30m, lr_y_30m)
y_max_30m <- max(ul_y_30m, lr_y_30m)
x_max_30m <- max(ul_x_30m, lr_x_30m)
x_min_30m <- min(ul_x_30m, lr_x_30m)
# Configure extent properties (90m TIR)
y_min_90m <- min(ul_y_90m, lr_y_90m)
y_max_90m <- max(ul_y_90m, lr_y_90m)
x_max_90m <- max(ul_x_90m, lr_x_90m)
x_min_90m <- min(ul_x_90m, lr_x_90m)
raster_dims_15m <- extent(x_min, x_max, y_min, y_max)
raster_dims_30m <- extent(x_min_30m, x_max_30m, y_min_30m, y_max_30m)
raster_dims_90m <- extent(x_min_90m, x_max_90m, y_min_90m, y_max_90m)
# grab DOY from the filename and convert to day of year
month <- substr(file_name, 12, 13)
day <- substr(file_name, 14, 15)
year <- substr(file_name, 16, 19)
datep <- paste(year, month, day, sep = '-')
doy <- as.numeric(strftime(datep, format = '%j'))
# Remove unneccessary variables
rm(month, day, year)
# need SZA--calculate by grabbing solar elevation info
sza <- meta_data[grep('SOLARDIRECTION=', meta_data)]
clip3 <- regexpr(', ', sza)
sza <- as.numeric((substr(sza, (clip3 + 2), 10000)))
# Need the gain designation for each band
gain_01 <- substr(meta_data[grep('GAIN=01', meta_data)], 12, 15)
gain_02 <- substr(meta_data[grep('GAIN=02', meta_data)], 12, 15)
gain_04 <- substr(meta_data[grep('GAIN=04', meta_data)], 12, 15)
gain_05 <- substr(meta_data[grep('GAIN=05', meta_data)], 12, 15)
gain_06 <- substr(meta_data[grep('GAIN=06', meta_data)], 12, 15)
gain_07 <- substr(meta_data[grep('GAIN=07', meta_data)], 12, 15)
gain_08 <- substr(meta_data[grep('GAIN=08', meta_data)], 12, 15)
gain_09 <- substr(meta_data[grep('GAIN=09', meta_data)], 12, 15)
gain_03b <- substr(meta_data[grep('GAIN=3B', meta_data)], 12, 15)
gain_03n <- substr(meta_data[grep('GAIN=3N', meta_data)], 12, 15)
# Calculate Earth Sun Distance (Achard and D'Souza 1994; Eva and Lambin, 1998)
earth_sun_dist <- (1 - 0.01672 * cos(calc_radians(0.9856 * (doy - 4))))
return(list(utm_zone=utm_zone,
raster_dims_15m = raster_dims_15m,
raster_dims_30m = raster_dims_30m,
raster_dims_90m = raster_dims_90m,
earth_sun_dist=earth_sun_dist,
date=as.Date(datep),
doy=doy,
gain_01 = gain_01,
gain_02 = gain_02,
gain_04 = gain_04,
gain_05 = gain_05,
gain_06 = gain_06,
gain_07 = gain_07,
gain_08 = gain_08,
gain_09 = gain_09,
gain_03b = gain_03b,
gain_03b = gain_03b
))
}
readband_ASTER=function(file_name,index_k=4,nameband="b13",utm_zone,geoextent,type_data='TIR_Swath',remove=F) {
crs_string <- paste('+proj=utm +zone=', utm_zone, ' +datum=WGS84 +units=m
+no_defs +ellps=WGS84 +towgs84=0,0,0', sep = '')
sds <- get_subdatasets(file_name)
match_tir <- grep(type_data, sds)
k=match_tir[index_k]
sub_dataset<-sds[k]
new_file_name <- strsplit(file_name,'.hdf')
tif_name <- paste(new_file_name,"_",nameband,'.tif', sep='')
gdal_translate(file_name, tif_name, sd_index=k, output_Raster = T)
aster_file <- suppressWarnings(raster(readGDAL(tif_name)))
proj4string(aster_file)=CRS(crs_string)
extent(aster_file) <- geoextent
aster_file[aster_file==0]=NA
if (remove==T) {file.remove(tif_name)}
return(aster_file)
}
###########################################################################################
# retrieve lst ASTER
# K1 = c1 /(lambda^5) and K2 = c2/lambda
# where c1 and c2 are first and second radiation constants from CODATA Recommended Values of the Fundamental Physical Constants: 2014
# Order for ucc (Abrams, 1999) is: Band 1 high, normal, low; Band 2 h, n, l;
# b3 h, n, l (3N & 3B the same)
# Construct a dataframe for the UCC values:
bands <- c('1', '2', '3N', '3B', '4', '5', '6', '7', '8', '9')
gain_names <- c('Band', 'High Gain', 'Normal', 'Low Gain 1', 'Low Gain 2')
ucc_vals <- matrix( c(0.676, 1.688, 2.25, 0, 0.708, 1.415, 1.89, 0, 0.423,
0.862, 1.15, 0, 0.423, 0.862, 1.15, 0, 0.1087, 0.2174,
0.2900, 0.2900, 0.0348, 0.0696, 0.0925, 0.4090, 0.0313,
0.0625, 0.0830, 0.3900, 0.0299, 0.0597, 0.0795, 0.3320,
0.0209,0.0417, 0.0556, 0.2450, 0.0159, 0.0318, 0.0424,
0.2650), nrow = 10, ncol = 4, byrow = TRUE)
ucc <- data.frame( bands, ucc_vals )
names(ucc) <- gain_names
# Thome et al (B) is used, which uses spectral irradiance values from MODTRAN
# Ordered b1, b2, b3N, b4, b5...b9
irradiance <- c(1848,1549,1114,225.4,86.63,81.85,74.85,66.49,59.85)
klist_ASTER=list()
klist_ASTER$B10$k1 = 3.0236879000387607831e3
klist_ASTER$B10$k2 = 1.73346669879518072289e3
klist_ASTER$B11$k1 = 2.45950033786752380082e3
klist_ASTER$B11$k2 = 1.66332642774566473988e3
klist_ASTER$B12$k1 = 1.9086243591011446439e3
klist_ASTER$B12$k2 = 1.58107402197802197802e3
klist_ASTER$B13$k1 = 8.90016580863773201879e2
klist_ASTER$B13$k2 = 1.35733713207547169811e3
klist_ASTER$B14$k1 = 6.4645039694287387514e2
klist_ASTER$B14$k2 = 1.27325430088495575221e3
klist_ASTER$B10$wavelength=28.3
klist_ASTER$B11$wavelength=8.65
klist_ASTER$B12$wavelength=9.1
klist_ASTER$B13$wavelength=10.6
klist_ASTER$B14$wavelength=11.3
klist_ASTER$B10$unitconversion=6.882e-3
klist_ASTER$B11$unitconversion=6.780e-3
klist_ASTER$B12$unitconversion=6.590e-3
klist_ASTER$B13$unitconversion=5.693e-3
klist_ASTER$B14$unitconversion=5.225e-3
klist_ASTER$c2micro=14388
saveRDS(emiss_ASTER,"klist_ASTER.rds")
emiss_ASTER=list()
emiss_ASTER$mattoni_rossi$emis = 0.94
emiss_ASTER$rame_lucido$emis = 0.15
emiss_ASTER$marmo_bianco$emis = 0.95
emiss_ASTER$vernice_bianca$emis = 0.89
emiss_ASTER$vernice_nera$emis = 0.95
emiss_ASTER$acciaio_ossidato$emis = 0.94
saveRDS(emiss_ASTER,"emiss_ASTER.rds")
table_ASTER=list(ucc=ucc,
irradiance=irradiance,
emiss_ASTER=emiss_ASTER,
klist_ASTER=klist_ASTER)
# Remove unneccessary variables
rm(bands,gain_names,ucc_vals,ucc,irradiance)
saveRDS(table_ASTER,"tables_ASTER.rds")
#############################################################################################################################
calc_lst=function(rbright,emis=0.92,c2micro=14388,Lwave=10.16,kelvin=F) {
temp=rbright / ( 1 + ( Lwave * (rbright / c2micro)) * log(emis))
if ( kelvin==F) { temp=temp-273.15}
return(temp)
}
calc_tbright=function(r,k1,k2,unitconversion) {
temp=(k1/((r-1)*unitconversion))+1
return((k2/log(temp)))
}
|
e0c3244b45ce869786252a0d7d6b06d0b71522c2 | 56d38fc637ae50fafacf29bc0285e8bf1d3dd819 | /R/assertions.R | dada41d44e45310ee75de85cfc2d6c1338dcf8c1 | [
"MIT"
] | permissive | isabella232/cli-12 | 8d0201e0344089739c24e59adc20f539952a20b9 | b34ae2ceac0716a28df1a6f7b9fc1b24f577d701 | refs/heads/master | 2023-08-02T15:25:46.101332 | 2021-09-07T14:03:37 | 2021-09-07T14:03:37 | 404,305,822 | 0 | 0 | NOASSERTION | 2021-09-08T12:43:13 | 2021-09-08T10:31:27 | null | UTF-8 | R | false | false | 742 | r | assertions.R |
is_string <- function(x) {
is.character(x) && length(x) == 1 && !is.na(x)
}
is_flag <- function(x) {
is.logical(x) && length(x) == 1 && !is.na(x)
}
is_border_style <- function(x) {
is_string(x) && x %in% rownames(box_styles())
}
is_padding_or_margin <- function(x) {
is.numeric(x) && length(x) %in% c(1, 4) && all(!is.na(x)) &&
all(as.integer(x) == x)
}
is_col <- function(x) {
is.null(x) || is_string(x) || is.function(x)
}
is_count <- function(x) {
is.numeric(x) && length(x) == 1 && !is.na(x) && as.integer(x) == x &&
x >= 0
}
is_tree_style <- function(x) {
is.list(x) &&
length(x) == 4 &&
!is.null(names(x)) &&
all(sort(names(x)) == sort(c("h", "v", "l", "j"))) &&
all(sapply(x, is_string))
}
|
d9661d53c567aceded3b0d2a70ae9145e7c72b88 | 6f0b7eba4d96bd84b90e750bea9fd5080d8171eb | /R/make_manifest.R | 072824b430d404352d3cba5330c253a0fe4300b4 | [
"MIT"
] | permissive | wp-hnu/terrainr | 2546084afedd2a3fc7a171326c6efd2eee1df07a | d64ee71271c573f1759e46e9ce89b0eefce8f13a | refs/heads/main | 2023-08-02T21:15:55.355551 | 2021-09-24T13:17:06 | 2021-09-24T13:17:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,819 | r | make_manifest.R | #' Transform rasters and write manifest file for import into Unity
#'
#' This function crops input raster files into smaller square tiles and then
#' converts them into either .png or .raw files which are ready to be imported
#' into the Unity game engine. It also writes a "manifest" file and importer
#' script which may be used to automatically import the tiles into Unity.
#'
#' @param heightmap File path to the heightmap to transform.
#' @param overlay Optionally, file path to the image overlay to transform.
#' @param output_prefix The file path to prefix output tiles with.
#' @param manifest_path File path to write the manifest file to.
#' @param importer_path File name to write the importer script to. Set to NULL
#' to not copy the importer script. Will overwrite any file at the same path.
#'
#' @return `manifest_path`, invisibly.
#'
#' @examples
#' \dontrun{
#' if (!isTRUE(as.logical(Sys.getenv("CI")))) {
#' simulated_data <- data.frame(
#' id = seq(1, 100, 1),
#' lat = runif(100, 44.04905, 44.17609),
#' lng = runif(100, -74.01188, -73.83493)
#' )
#' simulated_data <- sf::st_as_sf(simulated_data, coords = c("lng", "lat"))
#' output_files <- get_tiles(simulated_data)
#' temptiff <- tempfile(fileext = ".tif")
#' merge_rasters(output_files["elevation"][[1]], temptiff)
#' make_manifest(temptiff, output_prefix = tempfile(), importer_path = NULL)
#' }
#' }
#'
#' @export
make_manifest <- function(heightmap,
overlay = NULL,
output_prefix = "import",
manifest_path = "terrainr.manifest",
importer_path = "import_terrain.cs") {
input_raster <- raster::raster(heightmap)
max_raster <- raster::cellStats(input_raster, "max")
x_tiles <- ceiling(input_raster@ncols / 4097)
y_tiles <- ceiling(input_raster@nrows / 4097)
n_tiles <- x_tiles * y_tiles
temptiffs <- NULL
while (length(temptiffs) != n_tiles) {
temptiffs <- unique(vapply(
1:(n_tiles),
function(x) tempfile(fileext = ".tiff"),
character(1)
))
}
file_combos <- expand.grid(
x = 1:x_tiles,
y = 1:y_tiles
)
file_names <- mapply(
function(x, y) {
paste0(
output_prefix,
"_",
x,
"_",
y
)
},
file_combos$x,
file_combos$y
)
x_tiles <- 0:(x_tiles - 1)
x_tiles <- (x_tiles * 4097)
y_tiles <- 0:(y_tiles - 1)
y_tiles <- (y_tiles * 4097)
manifest <- data.frame(
filename = paste0(sort(file_names), ".raw"),
x_pos = -rep(x_tiles, each = length(file_names) / length(x_tiles)),
z_pos = rep(y_tiles, length.out = length(file_names)),
x_length = 4097,
height = max_raster,
z_length = 4097,
resolution = 4097,
texture = if (is.null(overlay)) "" else paste0(sort(file_names), ".png")
)
temptiffs <- crop_tif(heightmap, manifest, temptiffs)
temppngs <- NULL
while (length(temppngs) != length(temptiffs)) {
temppngs <- unique(vapply(
seq_along(temptiffs),
function(x) tempfile(fileext = ".png"),
character(1)
))
}
names(temppngs) <- names(temptiffs)
convert_to_png(temptiffs, temppngs, max_raster)
mapply(
function(x, y) {
processing_image <- magick::image_read(x)
processing_image <- magick::image_flop(processing_image)
processing_image <- magick::image_convert(processing_image,
format = "RGB",
depth = 16,
interlace = "Plane"
)
magick::image_write(processing_image, y)
},
temppngs,
names(temppngs)
)
unlink(temppngs)
if (!is.null(overlay)) {
input_overlay <- raster::raster(overlay)
max_overlay <- raster::cellStats(input_overlay, "max")
temptiffs <- NULL
while (length(temptiffs) != n_tiles) {
temptiffs <- unique(vapply(
1:(n_tiles),
function(x) tempfile(fileext = ".tiff"),
character(1)
))
}
temptiffs <- crop_tif(overlay, manifest, temptiffs, "texture")
temppngs <- NULL
temppngs <- names(temptiffs)
names(temppngs) <- names(temptiffs)
convert_to_png(temptiffs, temppngs, max_overlay)
mapply(
function(x, y) {
processing_image <- magick::image_read(x)
processing_image <- magick::image_flip(processing_image)
processing_image <- magick::image_flop(processing_image)
magick::image_write(processing_image, y)
},
temppngs,
names(temppngs)
)
}
utils::write.table(manifest,
manifest_path,
row.names = FALSE,
col.names = FALSE,
sep = "\t",
quote = FALSE
)
if (!is.null(importer_path) && !file.exists(importer_path)) {
file.copy(
system.file("import_terrain.cs", package = "terrainr"),
importer_path,
overwrite = TRUE
)
}
return(invisible(manifest_path))
}
crop_tif <- function(img, manifest, temptiffs, field = "filename") {
for (i in seq_len(nrow(manifest))) {
# changing this to gdalUtilities causes my computer to crash
gdalUtils::gdal_translate(img, temptiffs[[i]],
srcwin = paste0(
-manifest$x_pos[[i]],
", ",
manifest$z_pos[[i]],
", ",
manifest$x_length[[i]],
", ",
manifest$z_length[[i]]
)
)
names(temptiffs)[[i]] <- manifest[[field]][[i]]
}
temptiffs
}
convert_to_png <- function(temptiffs,
temppngs,
max_val) {
mapply(
function(x, y) {
# changing this to gdalUtils causes errors
sf::gdal_utils(
"translate",
source = x,
destination = y,
options = c(
"-ot", "UInt16",
"-of", "png",
"-scale", "0", max_val, "0", "65535"
)
)
},
temptiffs,
temppngs
)
unlink(temptiffs)
}
|
0a2c3fd7cc0448f9f2455c11ecbfa8e3385973f7 | 897df5b57809f1d13479291bae09e9cef2c0f88d | /R/evaluate_smooth2.R | 960d618492bae944fc831e24642aa1699492aa86 | [
"MIT"
] | permissive | andschar/gratia | 8d7327c1aca5e37d2981ab5b7575e8992192915d | 0bf4f0fcbf6f840b55c12fb67c31e9b85623879c | refs/heads/master | 2023-03-10T16:56:53.957470 | 2021-03-01T00:32:40 | 2021-03-01T00:32:40 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 16,732 | r | evaluate_smooth2.R | ##' New `evaluate_smooth()` alike
##'
##' @param object an object of class `"gam"` or `"gamm"`.
##' @param smooth character; a single smooth to evaluate.
##' @param n numeric; the number of points over the range of the covariate at
##' which to evaluate the smooth.
##' @param data a vector or data frame of points at which to evaluate the
##' smooth.
##' @param unconditional logical; should confidence intervals include the
##' uncertainty due to smoothness selection? If `TRUE`, the corrected Bayesian
##' covariance matrix will be used.
##' @param overall_uncertainty logical; should the uncertainty in the model
##' constant term be included in the standard error of the evaluate values of
##' the smooth?
##' @param dist numeric; if greater than 0, this is used to determine when
##' a location is too far from data to be plotted when plotting 2-D smooths.
##' The data are scaled into the unit square before deciding what to exclude,
##' and `dist` is a distance within the unit square. See
##' [mgcv::exclude.too.far()] for further details.
##' @param ... arguments passed to other methods.
##'
##' @return A data frame (tibble), which is of class `"smooth_estimates"`.
##'
##' @export
##'
##' @rdname smooth_estimates
##'
##' @examples
##' load_mgcv()
##' \dontshow{
##' set.seed(2)
##' op <- options(cli.unicode = FALSE, digits = 6)
##' }
##' dat <- gamSim(1, n = 400, dist = "normal", scale = 2)
##' m1 <- gam(y ~ s(x0) + s(x1) + s(x2) + s(x3), data = dat, method = "REML")
##'
##' ## evaluate all smooths
##' smooth_estimates(m1)
##'
##' ## or selected smooths
##' smooth_estimates(m1, smooth = c("s(x0)", "s(x1)"))
##' \dontshow{options(op)}
`smooth_estimates` <- function(object, ...) {
UseMethod("smooth_estimates")
}
##' @export
##' @rdname smooth_estimates
##' @importFrom dplyr bind_rows all_of
##' @importFrom tidyr unnest
`smooth_estimates.gam` <- function(object,
smooth = NULL,
n = 100,
data = NULL,
unconditional = FALSE,
overall_uncertainty = TRUE,
dist = 0.1, ...) {
## if particular smooths selected
smooth_ids <- if (!is.null(smooth)) {
which_smooths(object, smooth) # which smooths match 'smooth'
} else {
seq_len(n_smooths(object))
}
## extract the mgcv.smooth objects
smooths <- get_smooths_by_id(object, smooth_ids)
## loop over the smooths and evaluate them
sm_list <- vector(mode = "list", length = length(smooths))
for (i in seq_along(sm_list)) {
sm_list[[i]] <- eval_smooth(smooths[[i]],
model = object,
n = n,
data = data,
unconditional = unconditional,
overall_uncertainty = overall_uncertainty,
dist = dist)
}
## create a single df of all the smooths
sm_list <- bind_rows(sm_list)
## need to unnest the `data` column
sm_list <- unnest(sm_list, all_of('data'))
## add a class
class(sm_list) <- c("smooth_estimates", class(sm_list))
## return
sm_list
}
##' @export
`smooth_estimates.gamm` <- function(object, ...) {
smooth_estimates(object[["gam"]], ...)
}
##' Determine the type of smooth and return it n a human readble form
##'
##' @param smooth an object inheriting from class `mgcv.smooth`.
##'
##' @keywords internal
##' @noRd
`smooth_type` <- function(smooth) {
sm_type <- if (inherits(smooth, "tprs.smooth")) {
"TPRS"
} else if (inherits(smooth, "ts.smooth")) {
"TPRS (shrink)"
} else if (inherits(smooth, "cr.smooth")) {
"CRS"
} else if (inherits(smooth, "cs.smooth")) {
"CRS (shrink)"
} else if (inherits(smooth, "cyclic.smooth")) {
"Cyclic CRS"
} else if (inherits(smooth, "pspline.smooth")) {
"P spline"
} else if (inherits(smooth, "cp.smooth")) {
"Cyclic P spline"
} else if (inherits(smooth, "Bspline.smooth")) {
"B spline"
} else if (inherits(smooth, "duchon.spline")) {
"Duchon"
} else if (inherits(smooth, "fs.interaction")) {
"Factor smooth"
} else if (inherits(smooth, "gp.smooth")) {
"GP"
} else if (inherits(smooth, "mrf.smooth")) {
"MRF"
} else if (inherits(smooth, "random.effect")) {
"Random effect"
} else if (inherits(smooth, "sw")) {
"Soap (wiggly)"
} else if (inherits(smooth, "sf")) {
"Soap (boundary)"
} else if (inherits(smooth, "soap.film")) {
"Soap"
} else if (inherits(smooth, "t2.smooth")) {
"Tensor (T2)"
} else if (inherits(smooth, "sos.smooth")) {
"SOS"
} else if (inherits(smooth, "tensor.smooth")) {
"Tensor"
} else {
stop("Unknown type of smooth")
}
sm_type
}
##' Check user-supplied data for suitability
##'
##' @param data a data frame of variables to be checked.
##' @param vars character; vector of terms.
##'
##' @importFrom tibble tibble
##' @importFrom rlang := !!
##'
##' @keywords internal
##' @noRd
`check_user_data` <- function(data, vars) {
if (is.data.frame(data)) {
smooth_vars <- vars %in% names(data)
if (!all(vars %in% names(data))) {
stop(paste("Variable(s)",
paste(paste0("'", vars[!smooth_vars], "'"),
collapse = ", "),
"not found in 'data'."),
call. = FALSE)
}
} else if (is.numeric(data)) { # vector; coerce to data frame
if (length(vars) > 1L) {
stop("'smooth' requires multiple data vectors but only 1 provided.",
call. = FALSE)
}
data <- tibble(!!(vars) := data)
} else { # object we can't handle; bail out
stop("'data', if supplied, must be a numeric vector or a data frame.",
call. = FALSE)
}
data
}
##' Evaluate estimated spline values
##'
##' @param smooth currently an object that inherits from class `mgcv.smooth`.
##' @param model a fitted model; currently only [mgcv::gam()] and [mgcv::bam()]
##' models are suported.
##' @param data an optional data frame of values to evaluate `smooth` at.
##'
##' @inheritParams eval_smooth
##'
##' @importFrom tibble tibble add_column
##' @importFrom rlang := !!
##' @importFrom dplyr pull all_of
##' @importFrom tidyr nest unnest
##' @importFrom mgcv PredictMat
##'
##' @keywords internal
##' @noRd
`spline_values2` <- function(smooth, data, model, unconditional,
overall_uncertainty = TRUE) {
X <- PredictMat(smooth, data) # prediction matrix
start <- smooth[["first.para"]]
end <- smooth[["last.para"]]
para.seq <- start:end
coefs <- coef(model)[para.seq]
fit <- drop(X %*% coefs)
label <- smooth_label(smooth)
## want full vcov for component-wise CI
V <- get_vcov(model, unconditional = unconditional)
## variables for component-wise CIs for smooths
column_means <- model[["cmX"]]
lcms <- length(column_means)
nc <- ncol(V)
meanL1 <- smooth[["meanL1"]]
if (isTRUE(overall_uncertainty) && attr(smooth, "nCons") > 0L) {
if (lcms < nc) {
column_means <- c(column_means, rep(0, nc - lcms))
}
Xcm <- matrix(column_means, nrow = nrow(X), ncol = nc, byrow = TRUE)
if (!is.null(meanL1)) {
Xcm <- Xcm / meanL1
}
Xcm[, para.seq] <- X
rs <- rowSums((Xcm %*% V) * Xcm)
} else {
rs <- rowSums((X %*% V[para.seq, para.seq]) * X)
}
se.fit <- sqrt(pmax(0, rs))
d <- smooth_dim(smooth)
sm_var <- smooth_variable(smooth)
## Return object
out <- if (d == 1L) {
if (is_fs_smooth(smooth)) {
tbl <- tibble(smooth = rep(label, nrow(X)),
est = fit, se = se.fit)
tbl <- bind_cols(tbl, data)
nest(tbl, data = all_of(names(data)))
} else {
tbl <- tibble(smooth = rep(label, nrow(X)),
est = fit, se = se.fit,
!!(sm_var) := pull(data, sm_var))
nest(tbl, data = !!(sm_var))
}
} else {
tbl <- bind_cols(tibble(smooth = rep(label, nrow(X)),
est = fit, se = se.fit),
data)
nest(tbl, data = all_of(names(data)))
}
nr <- nrow(out)
## out <- unnest(out, all_of("data")) # not here
out
}
##' S3 methods to evaluate individual smooths
##'
##' @param smooth currently an object that inherits from class `mgcv.smooth`.
##' @param model a fitted model; currently only [mgcv::gam()] and [mgcv::bam()]
##' models are suported.
##' @param data an optional data frame of values to evaluate `smooth` at.
##' @param ... arguments assed to other methods
##'
##' @inheritParams smooth_estimates
##'
##' @export
`eval_smooth` <- function(smooth, ...) {
UseMethod("eval_smooth")
}
##' @rdname eval_smooth
##' @importFrom tibble add_column
##' @export
`eval_smooth.mgcv.smooth` <- function(smooth, model, n = 100, data = NULL,
unconditional = FALSE,
overall_uncertainty = TRUE,
...) {
by_var <- by_variable(smooth) # even if not a by as we want NA later
## deal with data if supplied
data <- process_user_data_for_eval(data = data, model = model, n = n,
id = which_smooth(model,
smooth_label(smooth)))
## values of spline at data
eval_sm <- spline_values2(smooth, data = data,
unconditional = unconditional,
model = model,
overall_uncertainty = overall_uncertainty)
## add on info regarding by variable
nr <- nrow(eval_sm)
eval_sm <- add_column(eval_sm, by = rep(by_var, nr),
.after = 1L)
## add on spline type info
sm_type <- smooth_type(smooth)
eval_sm <- add_column(eval_sm, type = rep(sm_type, nr),
.after = 1L)
## return
eval_sm
}
##' Wrapper to `gratia::smooth_data()` and `gratia:::check_user_data()` for use
##' with [gratia::eval_smooth()] methods
##'
##' @param data an optional data frame of values for the smooth
##' @param model a fitted model
##' @param n numeric; the number of new observations to generate. Passed to
##' [gratia::smooth_data()].
##' @param id the number ID of the smooth within `model` to process.
##'
##' @keywords internal
##' @noRd
`process_user_data_for_eval` <- function(data, model, n, id) {
data <- if (is.null(data)) {
smooth_data(model = model, n = n, id = id)
} else {
smooth <- get_smooths_by_id(model, id)[[1L]]
vars <- smooth_variable(smooth)
by_var <- by_variable(smooth)
if (!identical(by_var, "NA")) {
vars <- append(vars, by_var)
}
check_user_data(data, vars)
}
data
}
##' @rdname eval_smooth
##' @export
##' @importFrom tibble add_column
`eval_smooth.fs.interaction` <- function(smooth, model, n = 100, data = NULL,
unconditional = FALSE,
overall_uncertainty = TRUE,
...) {
by_var <- by_variable(smooth) # even if not a by as we want NA later
## deal with data if supplied
data <- process_user_data_for_eval(data = data, model = model, n = n,
id = which_smooth(model,
smooth_label(smooth)))
## values of spline at data
eval_sm <- spline_values2(smooth, data = data,
unconditional = unconditional,
model = model,
overall_uncertainty = overall_uncertainty)
## add on info regarding by variable
nr <- nrow(eval_sm)
eval_sm <- add_column(eval_sm, by = rep(by_var, nr),
.after = 1L)
## add on spline type info
sm_type <- smooth_type(smooth)
eval_sm <- add_column(eval_sm, type = rep(sm_type, nr),
.after = 1L)
## return
eval_sm
}
##' @rdname eval_smooth
##' @export
##' @importFrom tibble add_column
`eval_smooth.random.effect` <- function(smooth, model, n = 100, data = NULL,
unconditional = FALSE,
overall_uncertainty = TRUE,
...) {
by_var <- by_variable(smooth) # even if not a by as we want NA later
## deal with data if supplied
data <- process_user_data_for_eval(data = data, model = model, n = n,
id = which_smooth(model,
smooth_label(smooth)))
## values of spline at data
eval_sm <- spline_values2(smooth, data = data,
unconditional = unconditional,
model = model,
overall_uncertainty = overall_uncertainty)
## add on info regarding by variable
nr <- nrow(eval_sm)
eval_sm <- add_column(eval_sm, by = rep(by_var, nr),
.after = 1L)
## add on spline type info
sm_type <- smooth_type(smooth)
eval_sm <- add_column(eval_sm, type = rep(sm_type, nr),
.after = 1L)
## return
eval_sm
}
##' @rdname eval_smooth
##' @export
##' @importFrom tibble add_column
`eval_smooth.mrf.smooth` <- function(smooth, model, n = 100, data = NULL,
unconditional = FALSE,
overall_uncertainty = TRUE,
...) {
.NotYetImplemented()
}
##' @rdname eval_smooth
##' @export
##' @importFrom tibble add_column
`eval_smooth.t2.smooth` <- function(smooth, model, n = 100, data = NULL,
unconditional = FALSE,
overall_uncertainty = TRUE,
dist = 0.1, ...) {
by_var <- by_variable(smooth) # even if not a by as we want NA later
## deal with data if supplied
data <- process_user_data_for_eval(data = data, model = model, n = n,
id = which_smooth(model,
smooth_label(smooth)))
## values of spline at data
eval_sm <- spline_values2(smooth, data = data,
unconditional = unconditional,
model = model,
overall_uncertainty = overall_uncertainty)
## add on info regarding by variable
nr <- nrow(eval_sm)
eval_sm <- add_column(eval_sm, by = rep(by_var, nr), .after = 1L)
## add on spline type info
sm_type <- smooth_type(smooth)
eval_sm <- add_column(eval_sm, type = rep(sm_type, nr), .after = 1L)
## return
eval_sm
}
##' @rdname eval_smooth
##' @export
##' @importFrom tibble add_column
`eval_smooth.tensor.smooth` <- function(smooth, model, n = 100, data = NULL,
unconditional = FALSE,
overall_uncertainty = TRUE,
...) {
by_var <- by_variable(smooth) # even if not a by as we want NA later
## deal with data if supplied
data <- process_user_data_for_eval(data = data, model = model, n = n,
id = which_smooth(model,
smooth_label(smooth)))
## values of spline at data
eval_sm <- spline_values2(smooth, data = data,
unconditional = unconditional,
model = model,
overall_uncertainty = overall_uncertainty)
## add on info regarding by variable
nr <- nrow(eval_sm)
eval_sm <- add_column(eval_sm, by = rep(by_var, nr),
.after = 1L)
## add on spline type info
sm_type <- smooth_type(smooth)
eval_sm <- add_column(eval_sm, type = rep(sm_type, nr),
.after = 1L)
## return
eval_sm
}
|
52318cde1a38aac43acd5e9b4f9b7f02379a23ca | 5fe0acf9140cda57c48a982c6d0521bea366bac8 | /Ativ Prát 2/Ativ Prática 2 RASCUNHO.R | 501335098ed2a30cf4bc330a81c8d1ef2b16c7ba | [] | no_license | bruno-marcelino10/trabalhos-em-econometria | b12e901b4cec5e80e60d236cdfb4062a2d75ef8f | 62c5edac604da06900f52e0ef8f9dd6f242ad590 | refs/heads/master | 2023-04-04T17:06:42.324472 | 2021-04-14T05:23:37 | 2021-04-14T05:23:37 | 357,783,545 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 2,478 | r | Ativ Prática 2 RASCUNHO.R | ##### Atividade Prática 2 - Econometria (RASCUNHO) #####
library("MASS")
library("AER")
df <- tibble(Boston)
#### SIGNIFICADOS DAS VARIÁVEIS: -----------------------
# medv = valor medio das casas em uma vizinhanças
# lstat = percentual de moradores de baixo status economico
# chas = variável dummy (1 = perto do rio, 0 = caso contrário)
# old = variável dummy (1 = age>=95, 0 = caso contrário)
# indus = proporção de acres de negócios não varejistas por cidade.
#### Respostas
# ---- Q1 --------------------------
mod <- lm(df$medv ~ df$lstat)
summary(mod)
# ---- Q2 --------------------------
grafico_1 <- ggplot(df, aes(x = lstat , y = medv)) +
geom_point() +
ggtitle("Gráfico 1") +
xlab("% Moradores de Baixa Renda") +
ylab("Valor Médio das Casas") +
geom_smooth(method = "lm", color = "red")
print(grafico_1)
# ---- Q3 --------------------------
log_mod <- lm(df$medv ~ I(log(df$lstat, base = exp(1))))
summary(log_mod)
# ---- Q4 --------------------------
grafico_2 <- ggplot(df, aes(x = lstat , y = medv)) +
geom_point() +
ggtitle("Gráfico 2") +
xlab("Log Neperiano da % de Moradores de Baixa Renda") +
ylab("Valor Médio das Casas") +
geom_smooth(method = lm, formula = y ~ log(x, base = exp(1)), color = "red")
print(grafico_2)
# ---- Q5 --------------------------
log2_mod <- lm(df$medv ~ log(df$lstat) + I(log(df$lstat)^2))
summary(log2_mod)
grafico_3 <- ggplot(df, aes(x = lstat , y = medv)) +
geom_point() +
ggtitle("Gráfico 3") +
geom_smooth(method = lm, formula = y ~ x + I(log(x, base = exp(1))^2), color = "red")
print(grafico_3)
# ---- Q6 --------------------------
## Argumentos
x1 = 10
x2 = 11
# Função que retorna a variação de y em um intervalo de x, que se situa entre por x1 e x2
delta_y <- function(x1, x2){
c = summary(log2_mod)$coefficients[1]
b = summary(log2_mod)$coefficients[2]
a = summary(log2_mod)$coefficients[3]
y1 = b*log(x1, base = exp(1)) + a*log(x1, base = exp(1))^2 + c
y2 = b*log(x2, base = exp(1)) + a*log(x2, base = exp(1))^2 + c
return(y2 - y1)
}
delta_y(x1, x2)
# ---- Q7 --------------------------
df <- df %>%
mutate(old = ifelse(age >= 95, 1, 0))
mod_co <- lm(df$medv ~ df$chas + df$old + I(df$chas*df$old))
summary(mod_co)
# ---- Q8 --------------------------
# ---- Q9 --------------------------
mod_io <- lm(df$medv ~ df$indus + df$old + I(df$indus*df$old))
summary(mod_io)
# ---- Q10 -------------------------
|
18243b46cf7017ceb71e8558205e840c470b27b6 | 2d50491917be033214010ed349f0c6ab5d916271 | /R/all_patterns.R | fb74a92829d552adcc5d0b670be1d168ea304187 | [
"MIT"
] | permissive | ZipZaap/missCompare | 82aa38b388c7c2fd2dbfa9be55bc064fd33bd6b1 | 20ceaf2c16f5e62ef195760b77bb1a5d67e1a35b | refs/heads/master | 2022-11-23T01:44:14.335078 | 2020-05-13T15:01:52 | 2020-05-13T15:01:52 | 282,241,971 | 0 | 0 | NOASSERTION | 2020-07-24T14:34:32 | 2020-07-24T14:34:31 | null | UTF-8 | R | false | false | 4,418 | r | all_patterns.R | #' @title Missing data spike-in in various missing data patterns
#'
#' @description
#' \code{\link{all_patterns}} spikes in missingness using MCAR, MAR, MNAR (default) and MAP (optional) patterns
#'
#' @details
#' This function uses the generated simulated matrix and generates missing datapoints in MCAR, MAR and MNAR patterns.
#' Optionally, in case the user defines an assumed pattern, the \code{\link{all_patterns}} function will also generate
#' a MAP missingness pattern. It is suggested that the user carefully
#' examines the missing data fractions, excludes variables with high missingness using the \code{\link{clean}} function.
#' For more information on the functions that spike in missing data in MCAR, MAR,
#' MNAR and MAP patterns, please see the functions \code{\link{MCAR}}, \code{\link{MAR}},
#' \code{\link{MNAR}} and \code{\link{MAP}}.
#'
#'
#' @param X_hat Simulated matrix with no missingness (Simulated_matrix output from the \code{\link{simulate}} function)
#' @param MD_pattern Missing data pattern in the original dataset (MD_Pattern output from the \code{\link{get_data}} function)
#' @param NA_fraction Fraction of missingness in the original dataset (Fraction_missingness output from the \code{\link{get_data}} function)
#' @param min_PDM All patterns with number of observations less than this number will be removed from the missing data generation. This argument is necessary to be carefully set, as the function will fail or generate erroneous missing data patterns with very complicated missing data patterns. The default is 10, but for large datasets this number needs to be set higher to avoid errors. Please select a value based on the min_PDM_thresholds output from the \code{\link{get_data}} function
#' @param assumed_pattern Vector of missingness types (must be same length as missingness fraction per variable). If this input is specified, the function will spike in missing datapoints in a MAP pattern as well.
#'
#' @name all_patterns
#'
#' @return
#' \item{MCAR_matrix}{Matrix with MCAR pre-defined missingness pattern (default output)}
#' \item{MAR_matrix}{Matrix with MAR pre-defined missingness pattern (default output)}
#' \item{MNAR_matrix}{Matrix with MNAR pre-defined missingness pattern (default output)}
#' \item{MAP_matrix}{Matrix with MAP pre-defined missingness pattern (optional output)}
#'
#' @examples
#' cleaned <- clean(clindata_miss, missingness_coding = -9)
#' metadata <- get_data(cleaned)
#' simulated <- simulate(rownum = metadata$Rows, colnum = metadata$Columns,
#' cormat = metadata$Corr_matrix)
#'
#' miss_list <- all_patterns(simulated$Simulated_matrix,
#' MD_pattern = metadata$MD_Pattern,
#' NA_fraction = metadata$Fraction_missingness,
#' min_PDM = 20)
#'
#' miss_list <- all_patterns(simulated$Simulated_matrix,
#' MD_pattern = metadata$MD_Pattern,
#' NA_fraction = metadata$Fraction_missingness,
#' min_PDM = 10,
#' assumed_pattern = c('MAR', 'MCAR', 'MCAR', 'MAR',
#' 'MNAR', 'MCAR', 'MAR', 'MCAR',
#' 'MCAR', 'MAR', 'MNAR'))
#'
#' @export
### FUNCTION
all_patterns <- function(X_hat, MD_pattern, NA_fraction, min_PDM = 10, assumed_pattern = NA) {
MCAR <- MCAR(X_hat, MD_pattern = MD_pattern, NA_fraction = NA_fraction, min_PDM = min_PDM)
MAR <- MAR(X_hat, MD_pattern = MD_pattern, NA_fraction = NA_fraction, min_PDM = min_PDM)
MNAR <- MNAR(X_hat, MD_pattern = MD_pattern, NA_fraction = NA_fraction, min_PDM = min_PDM)
if (!is.na(assumed_pattern[1]) & (length(assumed_pattern) != ncol(X_hat)))
stop(paste("The number of columns in X_hat (", ncol(X_hat), ") and argument assumed_pattern (",
length(assumed_pattern), ") do not match. Please double-check the arguments of the function.",
sep = ""))
if (!is.na(assumed_pattern[1]))
MAP <- MAP(X_hat, MD_pattern = MD_pattern, NA_fraction = NA_fraction, min_PDM = min_PDM,
assumed_pattern = assumed_pattern)
if (!is.na(assumed_pattern[1]))
list(MCAR_matrix = MCAR$MCAR_matrix, MAR_matrix = MAR$MAR_matrix, MNAR_matrix = MNAR$MNAR_matrix,
MAP_matrix = MAP$MAP_matrix) else list(MCAR_matrix = MCAR$MCAR_matrix, MAR_matrix = MAR$MAR_matrix, MNAR_matrix = MNAR$MNAR_matrix)
}
|
39558cbc2510f5c463e0950c493b716ec83fb228 | 2726f8692e64c8725abf51f9ca0fc5b72cfcd312 | /template/ui/sidebar.R | 7edd1da2021e0c8d66c87c147592e204a2edeb15 | [] | no_license | thiyangt/shiny | a5357072035579cda966caa35d69619d752feb62 | b549d536663d563ba58be4bc2647046296b56068 | refs/heads/master | 2020-06-25T09:46:48.040007 | 2019-07-27T19:07:09 | 2019-07-27T19:07:09 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 165 | r | sidebar.R | sidebar <- dashboardSidebar(
# https://fontawesome.com/icons?d=gallery&m=free
sidebarMenu(
menuItem("Home", tabName = "main_tab", icon = icon("home"))
)
)
|
643f82b481bd45ec954ec1420bccd36a8366c93c | a72a6d84757be8d52b6aa688d9105bdb94d2a709 | /scripts/processSection3.R | 23e8e203567eaf70a95274117ce5575dac1db0f2 | [] | no_license | UrbanInstitute/ed-data | ff192c45d9b1b6abf134a8efc958197f30947322 | 6c4f6361d7be9742f41fc6f046799b9b74dde299 | refs/heads/master | 2020-04-06T19:07:44.973267 | 2017-03-24T20:13:46 | 2017-03-24T20:13:46 | 47,789,463 | 8 | 1 | null | null | null | null | UTF-8 | R | false | false | 29,257 | r | processSection3.R |
library(dplyr)
library(tidyr)
library(jsonlite)
library(openxlsx)
source('~/Documents/ed-data/scripts/createJsons.R')
# Path to Excel file with graph metadata - change to your file path
textpath <- "/Users/vhou/Box Sync/COMM/**Project Folders**/College Affordability (Lumina) Project/**Production/"
#textpath <- "/Users/vivhou/Box Sync/COMM/**Project Folders**/College Affordability (Lumina) Project/**Production/"
graphtext <- readWorkbook(paste(textpath, "GraphText.xlsx", sep=""),sheet = 1)
graphtext$section_number <- as.numeric(graphtext$section_number)
graphtext$multiples <- as.numeric(graphtext$multiples)
graphtext$toggle <- as.numeric(graphtext$toggle)
graphtext <- readWorkbook(paste(textpath,"GraphText.xlsx", sep=""), sheet = 1)
graphtext$section_number <- as.numeric(graphtext$section_number)
graphtext$multiples <- as.numeric(graphtext$multiples)
graphtext$toggle <- as.numeric(graphtext$toggle)
fig3_17 <- read.csv(paste(textpath, "Prices and expenses_forgone earnings/03_0170.csv", sep=""),stringsAsFactors=FALSE)
fig3_17 <- as.data.frame(fig3_17)
#Figure 3-1
fig3_1 <- read.csv(paste(textpath, "Prices and expenses_tuition and fees/03_0010.csv", sep=""),stringsAsFactors=FALSE)
json3_1 <- makeJson(sectionn = 3, graphn = 1, dt = fig3_1$amount, graphtype = "bar",
series = "Amount",
categories = fig3_1$category, tickformat = "dollar", rotated = FALSE, directlabels = TRUE)
#Figure 3-2
fig3_2 <- read.csv(paste(textpath, "Prices and expenses_tuition and fees/03_0020-revised.csv", sep=""),stringsAsFactors=FALSE)
fig3_2$year <- gsub("-", "–", fig3_2$year)
json3_2 <- makeJson(sectionn = 3, graphn = 2, dt = fig3_2, graphtype = "line",
series = c("Private nonprofit four-year", "Public four-year", "Public two-year"),
categories = fig3_2$year, tickformat = "percent", rotated = FALSE, directlabels = TRUE)
#Figure 3-3:
fig3_3 <- read.csv(paste(textpath, "Prices and expenses_tuition and fees/03_0030-ALL.csv", sep=""),stringsAsFactors=FALSE, check.names=FALSE)
json3_3 <- makeJson(sectionn = 3, graphn = 3, dt = fig3_3, graphtype = "bar", series=c("Public and private nonprofit four-year combined", "Public four-year", "Private nonprofit four-year"),
set1=fig3_3[,c("Public and private nonprofit four-year combined")], set2=fig3_3[,c("Public four-year")],set3=fig3_3[,c("Private nonprofit four-year")],
categories = fig3_3$category, tickformat = "percent", rotated = TRUE, directlabels = TRUE)
#Figure 3-4
fig3_4a <- read.csv(paste(textpath, "Prices and expenses_tuition and fees/03_0040.csv", sep=""),stringsAsFactors=FALSE)
fig3_4b <- read.csv(paste(textpath, "Prices and expenses_tuition and fees/03_0041.csv", sep=""),stringsAsFactors=FALSE)
json3_4a <- makeJson(sectionn = 3, graphn = 41, dt = fig3_4a$two, graphtype = "bar",
series = "Amount",
categories = fig3_4a$category, tickformat = "dollar", rotated = TRUE, directlabels = TRUE)
json3_4b <- makeJson(sectionn = 3, graphn = 42, dt = fig3_4b$four, graphtype = "bar",
series = "Amount",
categories = fig3_4b$category, tickformat = "dollar", rotated = TRUE, directlabels = TRUE)
#Figure 3-5
fig3_5 <- read.csv(paste(textpath, "Prices and expenses_room and board/03_0050-Revised.csv", sep=""),stringsAsFactors=FALSE)
json3_5 <- makeJson(sectionn = 3, graphn = 5, dt = fig3_5, graphtype = "bar",
series = c("On campus", "Off campus", "Living with parents"),
categories = fig3_5$Sector, tickformat = "percent", rotated = TRUE, directlabels = TRUE)
#Figure 3-6
fig3_6 <- read.csv(paste(textpath, "Prices and expenses_room and board/03_0060-Revised.csv", sep=""),stringsAsFactors=FALSE)
json3_6 <- makeJson(sectionn = 3, graphn = 6, dt = fig3_6$amount, graphtype = "bar", series="Price",
categories = fig3_6$category, tickformat = "dollar", rotated = FALSE, directlabels = TRUE)
#Figure 3-7
fig3_7 <- read.csv(paste(textpath, "Prices and expenses_room and board/03_0070.csv", sep=""),stringsAsFactors=FALSE)
json3_7 <- makeJson(sectionn = 3, graphn = 7, dt = fig3_7$roomamt, graphtype = "bar", series="Average room and board charges",
categories = fig3_7$state, tickformat = "dollar", rotated = TRUE, directlabels = TRUE)
#Figure 3-8
fig3_8 <- read.csv(paste(textpath, "Prices and expenses_room and board/03_0080.csv", sep=""),stringsAsFactors=FALSE)
json3_8 <- makeJson(sectionn = 3, graphn = 8, dt = fig3_8$difference, graphtype = "bar", series="On-campus housing - average rent",
categories = fig3_8$state, tickformat = "dollar", rotated = TRUE, directlabels = TRUE)
#Figure 3-9
fig3_9 <- read.csv(paste(textpath, "Prices and expenses_room and board/03_0090.csv", sep=""),stringsAsFactors=FALSE)
json3_9 <- makeJson(sectionn = 3, graphn = 9, dt = fig3_9, graphtype = "line", series=c("Private nonprofit four-year", "Public four-year"),
categories = fig3_9$year, tickformat = "dollar", rotated = FALSE, directlabels = TRUE)
#Figure 3-10
#graph 1
fig3_101 <- read.csv(paste(textpath, "Prices and expenses_room and board/03_0100.csv", sep=""),stringsAsFactors=FALSE)
json3_101 <- makeJson(sectionn = 3, graphn = 101, dt = fig3_101, graphtype = "bar", series=c("Room and board", "Tuition and fees"),
categories = fig3_101$category, tickformat = "dollar", rotated = TRUE, directlabels = TRUE)
#graph2
#add to y.tick --> "count": 5,
fig3_102<- read.csv(paste(textpath, "Prices and expenses_room and board/03_0101.csv", sep=""),stringsAsFactors=FALSE)
json3_102 <- makeJson(sectionn = 3, graphn = 102, dt = fig3_102, graphtype = "bar", series=c("Room and board", "Tuition and fees"),
categories = fig3_102$category, tickformat = "percent", rotated = TRUE, directlabels = TRUE)
#Figure 3-11
fig3_11 <- read.csv(paste(textpath, "Prices and expenses_student budgets/03_0110.csv", sep=""),stringsAsFactors=FALSE)
json3_11 <- makeJson(sectionn = 3, graphn = 11, dt = fig3_11, graphtype = "bar", series=c("Lowest quintile", "2nd", "3rd", "4th", "Highest quintile"),
categories = fig3_11$category, tickformat = "dollar", rotated = FALSE, directlabels = FALSE)
#Figure 3-12
fig3_12a <- read.csv(paste(textpath, "Prices and expenses_student budgets/03_0120.csv", sep=""),stringsAsFactors=FALSE)
fig3_12a$year <- gsub("-", "–", fig3_12a$year)
fig3_12b <- read.csv(paste(textpath, "Prices and expenses_student budgets/03_0121.csv", sep=""),stringsAsFactors=FALSE)
fig3_12b$year <- gsub("-", "–", fig3_12b$year)
fig3_12c <- read.csv(paste(textpath, "Prices and expenses_student budgets/03_0122.csv", sep=""),stringsAsFactors=FALSE)
fig3_12c$year <- gsub("-", "–", fig3_12c$year)
fig3_12d <- read.csv(paste(textpath, "Prices and expenses_student budgets/03_0123.csv", sep=""),stringsAsFactors=FALSE)
fig3_12d$year <- gsub("-", "–", fig3_12d$year)
json3_12a <- makeJson(sectionn = 3, graphn = 12, subn= 1, dt = fig3_12a, graphtype = "bar",
series = c("Tuition and fees", "Room and board", "Books and supplies", "Transportation", "Other"),
categories = fig3_12a$year, graphtitle="Public two-year, living off campus", tickformat = "dollar", rotated = FALSE, directlabels = TRUE)
json3_12b <- makeJson(sectionn = 3, graphn = 12, subn= 2, dt = fig3_12b, graphtype = "bar",
series = c("Tuition and fees", "Room and board", "Books and supplies", "Transportation", "Other"),
categories = fig3_12b$year, graphtitle="Public four-year in-state, living on campus", tickformat = "dollar", rotated = FALSE, directlabels = TRUE)
json3_12c <- makeJson(sectionn = 3, graphn = 12, subn= 3, dt = fig3_12c, graphtype = "bar",
series = c("Tuition and fees", "Room and board", "Books and supplies", "Transportation", "Other"),
categories = fig3_12c$year, graphtitle="Private nonprofit four-year, living on campus", tickformat = "dollar", rotated = FALSE, directlabels = TRUE)
json3_12d <- makeJson(sectionn = 3, graphn = 12, subn= 4, dt = fig3_12d, graphtype = "bar",
series = c("Tuition and fees", "Room and board", "Books and supplies", "Transportation", "Other"),
categories = fig3_12d$year, graphtitle="For-profit, living off campus", tickformat = "dollar", rotated = FALSE, directlabels = TRUE)
#Figure 3-13
fig3_13 <- read.csv(paste(textpath, "Prices and expenses_student budgets/03_0130.csv", sep=""),stringsAsFactors=FALSE)
json3_13 <- makeJson(sectionn = 3, graphn = 13, dt = fig3_13, graphtype = "bar", series=c("2001", "2016"),
categories = fig3_14$X, tickformat = "percent", rotated = FALSE, directlabels = TRUE)
#Figure 3-14
#need to add: "line": {"connectNull": true} in JSON
fig3_14 <- read.csv(paste(textpath, "Prices and expenses_student budgets/03_0140.csv", sep=""),stringsAsFactors=FALSE)
fig3_14$X <- gsub("-", "–", fig3_14$X)
json3_14 <- makeJson(sectionn = 3, graphn = 14, dt = fig3_14$course.material.spending, graphtype = "line", series="Annual spending",
categories = fig3_14$X, tickformat = "dollar", rotated = FALSE, directlabels = TRUE)
#Figure 3-15
fig3_15 <- read.csv(paste(textpath, "Prices and expenses_student budgets/03_0150.csv", sep=""),stringsAsFactors=FALSE)
fig3_15$X <- gsub("-", "–", fig3_15$X)
json3_15 <- makeJson(sectionn = 3, graphn = 15, dt = fig3_15, graphtype = "line", series=c("New", "Used"),
categories = fig3_15$X, tickformat = "dollar", rotated = FALSE, directlabels = TRUE)
#Figure 3-16
fig3_16 <- read.csv(paste(textpath, "Prices and expenses_student budgets/03_0160.csv", sep=""),stringsAsFactors=FALSE)
json3_16 <- makeJson(sectionn = 3, graphn = 16, dt = fig3_16, graphtype = "line", series=c("Public two-year", "Public four-year", "Private nonprofit", "For profit"),
categories = fig3_16$X, tickformat = "dollar", rotated = FALSE, directlabels = TRUE)
#Figure 3-17
fig3_17 <- read.csv(paste(textpath, "Prices and expenses_forgone earnings/03_0170.csv", sep=""),stringsAsFactors=FALSE)
fig3_17$age <- gsub("-", "–", fig3_17$age)
json3_17 <- makeJson(sectionn = 3, graphn = 17, dt = fig3_17, graphtype = "bar", xlabel="Age groups",
series = c("Did not work last year", "Part year or part time", "Full year full time"),
categories = fig3_17$age, tickformat = "percent", rotated = TRUE, directlabels = TRUE)
#Figure 3-18:
fig3_18a <- read.csv(paste(textpath, "Prices and expenses_forgone earnings/03_01801.csv", sep=""),stringsAsFactors=FALSE)
fig3_18a$age <- gsub("-", "–", fig3_18a$age)
fig3_18b <- read.csv(paste(textpath, "Prices and expenses_forgone earnings/03_01802.csv", sep=""),stringsAsFactors=FALSE)
fig3_18b$age <- gsub("-", "–", fig3_18b$age)
fig3_18c <- read.csv(paste(textpath, "Prices and expenses_forgone earnings/03_01803.csv", sep=""),stringsAsFactors=FALSE)
fig3_18d <- read.csv(paste(textpath, "Prices and expenses_forgone earnings/03_01804.csv", sep=""),stringsAsFactors=FALSE)
fig3_18e <- read.csv(paste(textpath, "Prices and expenses_forgone earnings/03_01805.csv", sep=""),stringsAsFactors=FALSE)
fig3_18f <- read.csv(paste(textpath, "Prices and expenses_forgone earnings/03_01806.csv", sep=""),stringsAsFactors=FALSE)
#First set
json3_18a <- makeJson(sectionn = 3, graphn = 181, subn=1, dt = fig3_18a, graphtype = "bar",
series = c("25th percentile and below", "25th percentile to Median", "Median to 75th percentile"),
categories = fig3_18a$age, graphtitle="Earnings of men by age", tickformat = "dollar", rotated = TRUE, directlabels = FALSE)
json3_18b <- makeJson(sectionn = 3, graphn = 181, subn=2, dt = fig3_18b, graphtype = "bar",
series = c("25th percentile and below", "25th percentile to Median", "Median to 75th percentile"),
categories = fig3_18b$age, graphtitle="Earnings of women by age", tickformat = "dollar", rotated = TRUE, directlabels = FALSE)
#Second set
json3_18c <- makeJson(sectionn = 3, graphn = 182, subn= 1, dt = fig3_18c, graphtype = "bar",
series = c("25th percentile and below", "25th percentile to Median", "Median to 75th percentile"),
categories = fig3_18c$race, graphtitle="Earnings of men by race", tickformat = "dollar", rotated = TRUE, directlabels = FALSE)
json3_18d <- makeJson(sectionn = 3, graphn = 182, subn= 2, dt = fig3_18d, graphtype = "bar",
series = c("25th percentile and below", "25th percentile to Median", "Median to 75th percentile"),
categories = fig3_18d$race, graphtitle="Earnings of women by race", tickformat = "dollar", rotated = TRUE, directlabels = FALSE)
#Third set
json3_18e <- makeJson(sectionn = 3, graphn = 183, subn= 1, dt = fig3_18e, graphtype = "bar",
series = c("25th percentile and below", "25th percentile to Median", "Median to 75th percentile"),
categories = fig3_18e$race, graphtitle="Earnings of men by race", tickformat = "dollar", rotated = TRUE, directlabels = FALSE)
json3_18f <- makeJson(sectionn = 3, graphn = 183, subn= 2, dt = fig3_18f, graphtype = "bar",
series = c("25th percentile and below", "25th percentile to Median", "Median to 75th percentile"),
categories = fig3_18f$race, graphtitle="Earnings of women by race", tickformat = "dollar", rotated = TRUE, directlabels = FALSE)
#Figure 3-19
#MEN
fig3_19 <- read.csv(paste(textpath, "Prices and expenses_forgone earnings/03_0190.csv", sep=""),stringsAsFactors=FALSE, check.names =FALSE)
json3_19<- makeJson(sectionn = 3, graphn = 191, dt = fig3_19, graphtype = "line", set1= fig3_19[grep("18", fig3_19$category), c("Median", "25th percentile", "75th percentile")], set2= fig3_19[grep("24", fig3_19$category), c("Median", "25th percentile", "75th percentile")],
series = c("Ages 18–23", "Ages 24–34"),
categories = fig3_19$category_label, tickformat = "$s", rotated = FALSE, directlabels = TRUE)
#WOMEN
fig3_19b <- read.csv(paste(textpath, "Prices and expenses_forgone earnings/03_0191.csv", sep=""),stringsAsFactors=FALSE, check.names=FALSE)
json3_19b<- makeJson(sectionn = 3, graphn = 192, dt = fig3_19b, graphtype = "line", set1= fig3_19b[grep("18", fig3_19b$category), c("Median", "25th percentile", "75th percentile")], set2= fig3_19b[grep("24", fig3_19b$category), c("Median", "25th percentile", "75th percentile")],
series = c("Ages 18–23", "Ages 24–34"),
categories = fig3_19b$category_label, tickformat = "$s", rotated = FALSE, directlabels = TRUE)
#Figure 3-20
fig3_20a <- read.csv(paste(textpath, "Prices and expenses_net price/correct csvs/03_0200.csv", sep=""),stringsAsFactors=FALSE)
fig3_20b <- read.csv(paste(textpath, "Prices and expenses_net price/correct csvs/03_0201.csv", sep=""),stringsAsFactors=FALSE)
json3_20a <- makeJson(sectionn = 3, graphn = 201, dt = fig3_20a, graphtype = "bar",
series = c("Grant Aid", "Tuition and fees left over"),
categories = fig3_20a$category, tickformat = "dollar", rotated = TRUE, directlabels = TRUE)
json3_20b <- makeJson(sectionn = 3, graphn = 202, dt = fig3_20b, graphtype = "bar",
series = c("Grant Aid", "Tuition and fees left over"),
categories = fig3_20b$category, tickformat = "percent", rotated = TRUE, directlabels = TRUE)
#Figure 3-21
fig3_21a <- read.csv(paste(textpath, "Prices and expenses_net price/correct csvs/03_02101.csv", sep=""),stringsAsFactors=FALSE)
fig3_21b <- read.csv(paste(textpath, "Prices and expenses_net price/correct csvs/03_02102.csv", sep=""),stringsAsFactors=FALSE)
fig3_21c <- read.csv(paste(textpath, "Prices and expenses_net price/correct csvs/03_02103.csv", sep=""),stringsAsFactors=FALSE)
fig3_21d <- read.csv(paste(textpath, "Prices and expenses_net price/correct csvs/03_02104.csv", sep=""),stringsAsFactors=FALSE)
json3_21a <- makeJson(sectionn = 3, graphn = 21, subn= 1, dt = fig3_21a, graphtype = "bar",
xlabel = "Parental income quartile" series = c("Tuition and fees covered by grant aid", "Remaining (net) tuition and fees", "Living expenses covered by grant aid", "Remaining (net) living expenses"),
categories = fig3_21a$Income, graphtitle="Public four-year", tickformat = "dollar", rotated = FALSE, directlabels = TRUE)
json3_21b <- makeJson(sectionn = 3, graphn = 21, subn= 2, dt = fig3_21b, graphtype = "bar",
xlabel = "Parental income quartile" series = c("Tuition and fees covered by grant aid", "Remaining (net) tuition and fees", "Living expenses covered by grant aid", "Remaining (net) living expenses"),
categories = fig3_21b$Income, graphtitle="Private nonprofit four-year", tickformat = "dollar", rotated = FALSE, directlabels = TRUE)
json3_21c <- makeJson(sectionn = 3, graphn = 21, subn= 3, dt = fig3_21c, graphtype = "bar",
xlabel = "Parental income quartile" series = c("Tuition and fees covered by grant aid", "Remaining (net) tuition and fees", "Living expenses covered by grant aid", "Remaining (net) living expenses"),
categories = fig3_21c$Income, graphtitle="Public two-year", tickformat = "dollar", rotated = FALSE, directlabels = TRUE)
json3_21d <- makeJson(sectionn = 3, graphn = 21, subn= 4, dt = fig3_21d, graphtype = "bar",
xlabel = "Parental income quartile" series = c("Tuition and fees covered by grant aid", "Remaining (net) tuition and fees", "Living expenses covered by grant aid", "Remaining (net) living expenses"),
categories = fig3_21d$Income, graphtitle="For-profit", tickformat = "dollar", rotated = FALSE, directlabels = TRUE)
#Figure 3-22
fig3_22a <- read.csv(paste(textpath, "Prices and expenses_net price/correct csvs/03_02201.csv", sep=""),stringsAsFactors=FALSE)
fig3_22b <- read.csv(paste(textpath, "Prices and expenses_net price/correct csvs/03_02202.csv", sep=""),stringsAsFactors=FALSE)
fig3_22c <- read.csv(paste(textpath, "Prices and expenses_net price/correct csvs/03_02203.csv", sep=""),stringsAsFactors=FALSE)
fig3_22d <- read.csv(paste(textpath, "Prices and expenses_net price/correct csvs/03_02204.csv", sep=""),stringsAsFactors=FALSE)
fig3_22e <- read.csv(paste(textpath, "Prices and expenses_net price/correct csvs/03_02205.csv", sep=""),stringsAsFactors=FALSE)
json3_22a <- makeJson(sectionn = 3, graphn = 22, subn= 1, dt = fig3_22a, graphtype = "bar",
series = c("Tuition and fees covered by grant aid", "Remaining (net) tuition and fees", "Living expenses covered by grant aid", "Remaining (net) living expenses"),
categories = fig3_22a$Year, graphtitle="Lowest income quartile", tickformat = "dollar", rotated = FALSE, directlabels = TRUE)
json3_22b <- makeJson(sectionn = 3, graphn = 22, subn= 2, dt = fig3_22b, graphtype = "bar",
series = c("Tuition and fees covered by grant aid", "Remaining (net) tuition and fees", "Living expenses covered by grant aid", "Remaining (net) living expenses"),
categories = fig3_22b$Year, graphtitle="Second income quartile", tickformat = "dollar", rotated = FALSE, directlabels = TRUE)
json3_22c <- makeJson(sectionn = 3, graphn = 22, subn= 3, dt = fig3_22c, graphtype = "bar",
series = c("Tuition and fees covered by grant aid", "Remaining (net) tuition and fees", "Living expenses covered by grant aid", "Remaining (net) living expenses"),
categories = fig3_22c$Year, graphtitle="Third income quartile", tickformat = "dollar", rotated = FALSE, directlabels = TRUE)
json3_22d <- makeJson(sectionn = 3, graphn = 22, subn= 4, dt = fig3_22d, graphtype = "bar",
series = c("Tuition and fees covered by grant aid", "Remaining (net) tuition and fees", "Living expenses covered by grant aid", "Remaining (net) living expenses"),
categories = fig3_22d$Year, graphtitle="Highest income quartile", tickformat = "dollar", rotated = FALSE, directlabels = TRUE)
json3_22e <- makeJson(sectionn = 3, graphn = 22, subn= 5, dt = fig3_22e, graphtype = "bar",
series = c("Tuition and fees covered by grant aid", "Remaining (net) tuition and fees", "Living expenses covered by grant aid", "Remaining (net) living expenses"),
categories = fig3_22e$Year, graphtitle="Independent", tickformat = "dollar", rotated = FALSE, directlabels = TRUE)
fig3_221a <- read.csv(paste(textpath, "Prices and expenses_net price/correct csvs/03_02211.csv", sep=""),stringsAsFactors=FALSE)
fig3_221b <- read.csv(paste(textpath, "Prices and expenses_net price/correct csvs/03_02212.csv", sep=""),stringsAsFactors=FALSE)
fig3_221c <- read.csv(paste(textpath, "Prices and expenses_net price/correct csvs/03_02213.csv", sep=""),stringsAsFactors=FALSE)
fig3_221d <- read.csv(paste(textpath, "Prices and expenses_net price/correct csvs/03_02214.csv", sep=""),stringsAsFactors=FALSE)
fig3_221e <- read.csv(paste(textpath, "Prices and expenses_net price/correct csvs/03_02215.csv", sep=""),stringsAsFactors=FALSE)
json3_221a <- makeJson(sectionn = 3, graphn = 22, subn= 11, dt = fig3_221a, graphtype = "bar",
series = c("Tuition and fees covered by grant aid", "Remaining (net) tuition and fees", "Living expenses covered by grant aid", "Remaining (net) living expenses"),
categories = fig3_221a$Year, graphtitle="Lowest 25 percent", tickformat = "dollar", rotated = FALSE, directlabels = TRUE)
json3_221b <- makeJson(sectionn = 3, graphn = 22, subn= 12, dt = fig3_221b, graphtype = "bar",
series = c("Tuition and fees covered by grant aid", "Remaining (net) tuition and fees", "Living expenses covered by grant aid", "Remaining (net) living expenses"),
categories = fig3_221b$Year, graphtitle="Lower middle 25 percent", tickformat = "dollar", rotated = FALSE, directlabels = TRUE)
json3_221c <- makeJson(sectionn = 3, graphn = 22, subn= 13, dt = fig3_221c, graphtype = "bar",
series = c("Tuition and fees covered by grant aid", "Remaining (net) tuition and fees", "Living expenses covered by grant aid", "Remaining (net) living expenses"),
categories = fig3_221c$Year, graphtitle="Upper middle 25 percent", tickformat = "dollar", rotated = FALSE, directlabels = TRUE)
json3_221d <- makeJson(sectionn = 3, graphn = 22, subn= 14, dt = fig3_221d, graphtype = "bar",
series = c("Tuition and fees covered by grant aid", "Remaining (net) tuition and fees", "Living expenses covered by grant aid", "Remaining (net) living expenses"),
categories = fig3_221d$Year, graphtitle="Highest 25 percent", tickformat = "dollar", rotated = FALSE, directlabels = TRUE)
json3_221e <- makeJson(sectionn = 3, graphn = 22, subn= 15, dt = fig3_221e, graphtype = "bar",
series = c("Tuition and fees covered by grant aid", "Remaining (net) tuition and fees", "Living expenses covered by grant aid", "Remaining (net) living expenses"),
categories = fig3_221e$Year, graphtitle="Independent", tickformat = "dollar", rotated = FALSE, directlabels = TRUE)
fig3_222a <- read.csv(paste(textpath, "Prices and expenses_net price/correct csvs/03_02221.csv", sep=""),stringsAsFactors=FALSE)
fig3_222b <- read.csv(paste(textpath, "Prices and expenses_net price/correct csvs/03_02222.csv", sep=""),stringsAsFactors=FALSE)
fig3_222c <- read.csv(paste(textpath, "Prices and expenses_net price/correct csvs/03_02223.csv", sep=""),stringsAsFactors=FALSE)
fig3_222d <- read.csv(paste(textpath, "Prices and expenses_net price/correct csvs/03_02224.csv", sep=""),stringsAsFactors=FALSE)
fig3_222e <- read.csv(paste(textpath, "Prices and expenses_net price/correct csvs/03_02225.csv", sep=""),stringsAsFactors=FALSE)
json3_222a <- makeJson(sectionn = 3, graphn = 22, subn= 21, dt = fig3_222a, graphtype = "bar",
series = c("Tuition and fees covered by grant aid", "Remaining (net) tuition and fees", "Living expenses covered by grant aid", "Remaining (net) living expenses"),
categories = fig3_222a$Year, graphtitle="Lowest 25 percent", tickformat = "dollar", rotated = FALSE, directlabels = TRUE)
json3_222b <- makeJson(sectionn = 3, graphn = 22, subn= 22, dt = fig3_222b, graphtype = "bar",
series = c("Tuition and fees covered by grant aid", "Remaining (net) tuition and fees", "Living expenses covered by grant aid", "Remaining (net) living expenses"),
categories = fig3_222b$Year, graphtitle="Lower middle 25 percent", tickformat = "dollar", rotated = FALSE, directlabels = TRUE)
json3_222c <- makeJson(sectionn = 3, graphn = 22, subn= 23, dt = fig3_222c, graphtype = "bar",
series = c("Tuition and fees covered by grant aid", "Remaining (net) tuition and fees", "Living expenses covered by grant aid", "Remaining (net) living expenses"),
categories = fig3_222c$Year, graphtitle="Upper middle 25 percent", tickformat = "dollar", rotated = FALSE, directlabels = TRUE)
json3_222d <- makeJson(sectionn = 3, graphn = 22, subn= 24, dt = fig3_222d, graphtype = "bar",
series = c("Tuition and fees covered by grant aid", "Remaining (net) tuition and fees", "Living expenses covered by grant aid", "Remaining (net) living expenses"),
categories = fig3_222d$Year, graphtitle="Highest 25 percent", tickformat = "dollar", rotated = FALSE, directlabels = TRUE)
json3_222e <- makeJson(sectionn = 3, graphn = 22, subn= 25, dt = fig3_222e, graphtype = "bar",
series = c("Tuition and fees covered by grant aid", "Remaining (net) tuition and fees", "Living expenses covered by grant aid", "Remaining (net) living expenses"),
categories = fig3_222e$Year, graphtitle="Independent", tickformat = "dollar", rotated = FALSE, directlabels = TRUE)
fig3_223a <- read.csv(paste(textpath, "Prices and expenses_net price/correct csvs/03_02231.csv", sep=""),stringsAsFactors=FALSE)
fig3_223b <- read.csv(paste(textpath, "Prices and expenses_net price/correct csvs/03_02232.csv", sep=""),stringsAsFactors=FALSE)
fig3_223c <- read.csv(paste(textpath, "Prices and expenses_net price/correct csvs/03_02233.csv", sep=""),stringsAsFactors=FALSE)
fig3_223d <- read.csv(paste(textpath, "Prices and expenses_net price/correct csvs/03_02234.csv", sep=""),stringsAsFactors=FALSE)
fig3_223e <- read.csv(paste(textpath, "Prices and expenses_net price/correct csvs/03_02235.csv", sep=""),stringsAsFactors=FALSE)
json3_223a <- makeJson(sectionn = 3, graphn = 22, subn= 31, dt = fig3_223a, graphtype = "bar",
series = c("Tuition and fees covered by grant aid", "Remaining (net) tuition and fees", "Living expenses covered by grant aid", "Remaining (net) living expenses"),
categories = fig3_223a$Year, graphtitle="Lowest 25 percent", tickformat = "dollar", rotated = FALSE, directlabels = TRUE)
json3_223b <- makeJson(sectionn = 3, graphn = 22, subn= 32, dt = fig3_223b, graphtype = "bar",
series = c("Tuition and fees covered by grant aid", "Remaining (net) tuition and fees", "Living expenses covered by grant aid", "Remaining (net) living expenses"),
categories = fig3_223b$Year, graphtitle="Lower Middle 25 percent", tickformat = "dollar", rotated = FALSE, directlabels = TRUE)
json3_223c <- makeJson(sectionn = 3, graphn = 22, subn= 33, dt = fig3_223c, graphtype = "bar",
series = c("Tuition and fees covered by grant aid", "Remaining (net) tuition and fees", "Living expenses covered by grant aid", "Remaining (net) living expenses"),
categories = fig3_223c$Year, graphtitle="Upper Middle 25 percent", tickformat = "dollar", rotated = FALSE, directlabels = TRUE)
json3_223d <- makeJson(sectionn = 3, graphn = 22, subn= 34, dt = fig3_223d, graphtype = "bar",
series = c("Tuition and fees covered by grant aid", "Remaining (net) tuition and fees", "Living expenses covered by grant aid", "Remaining (net) living expenses"),
categories = fig3_223d$Year, graphtitle="Highest 25 percent", tickformat = "dollar", rotated = FALSE, directlabels = TRUE)
json3_223e <- makeJson(sectionn = 3, graphn = 22, subn= 35, dt = fig3_223e, graphtype = "bar",
series = c("Tuition and fees covered by grant aid", "Remaining (net) tuition and fees", "Living expenses covered by grant aid", "Remaining (net) living expenses"),
categories = fig3_223e$Year, graphtitle="Independent", tickformat = "dollar", rotated = FALSE, directlabels = TRUE)
|
16d4b2741ecd01cd6f7cfb35e495b8aed60da5a5 | 5d9ddeaded233e601418f036a1b79728b980a2d5 | /BA Plot function.R | 62e1f36d125a6ce2c2a1c643bb44f2ac620943aa | [] | no_license | nanibyrozu/BA_Plot | 8ae4fc853dee89b16d41eea6e395e5101d10eacd | 6da80b03ae480bc8966b32cd6524cba9a21dde54 | refs/heads/main | 2023-02-15T08:51:02.941327 | 2020-12-30T06:34:11 | 2020-12-30T06:34:11 | 325,473,813 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,738 | r | BA Plot function.R | library(ggplot2)
library(BlandAltmanLeh)
#####################NANI BA Plot function################################
baplot<-function(datax, datay){
par(mfrow=c(2,2))
bmean<-(datax + datay)/2.0
dif<-datax-datay
m<-mean(dif)
s<-sd(dif)
l=m-1.96*s
u=m+1.96*s
line=c(l,m,u)
plot(bmean, dif, ylim =c(-5.0, 5.0), pch=18, main="BA plot", xlab="Average", ylab="Difference")
abline(h = line, lty=c(2,1,2), col=c("Red","blue","Red"),
lwd=c(3,2,3))
abline(h=0,lty=3,col=c("black"))
text(x=16, y=0.5+line[1],label=paste("LL", round(line[1], digits=3)))
text(x=16, y=0.5+line[2],label=paste("CL", round(line[2], digits=3)))
text(x=16, y=0.5+line[3],label=paste("UL", round(line[3], digits=3)))
difpc<-((datax- datay) / bmean) * 100
dm<- mean(difpc)
dsd<-sd(difpc)
ll<-dm-1.96*dsd
ul<-dm+1.96*dsd
lines<-c(ll,dm, ul)
plot(bmean, difpc, ylim =c(-50, 50), pch=18, main="BA plot with differnce as % of average", xlab="Average", ylab="Difference (% of average)")
abline(h = lines, lty=c(2,1,2), col=c("Red","blue","Red"),
lwd=c(3,2,3))
abline(h=0,lty=3,col=c("black"))
text(x=16, y=2.5+lines[1],label=paste("LL", round(lines[1], digits=3)))
text(x=16, y=2.5+lines[2],label=paste("CL", round(lines[2], digits=3)))
text(x=16, y=2.5+lines[3],label=paste("UL", round(lines[3], digits=3)))
dmd<-median(dif)
d5<-quantile(dif,0.05)
d95<-quantile(dif,0.95)
linesmd<-c(d5, dmd, d95)
plot(bmean, dif, ylim =c(-5.0, 5.0), pch=18, main=" Non Parametric-BA plot", xlab="Average", ylab="Difference")
abline(h = linesmd, lty=c(2,1,2), col=c("Red","blue","Red"),
lwd=c(3,2,3))
abline(h=0,lty=3,col=c("black"))
text(x=16, y=0.5+linesmd[1],label=paste("LL", round(linesmd[1], digits=3)))
text(x=16, y=0.5+linesmd[2],label=paste("Median", round(linesmd[2], digits=3)))
text(x=16, y=0.5+linesmd[3],label=paste("UL", round(linesmd[3], digits=3)))
dataxl<-log(datax)
datayl<-log(datay)
lmean<-(dataxl + datayl)/2.0
ldif<-dataxl-datayl
lgm<-mean(ldif)
lgs<-sd(ldif)
lgl=lgm-1.96*lgs
lgu=lgm+1.96*lgs
lgline=c(lgl,lgm,lgu)
plot(lmean, ldif, ylim =c(-1.0, 1.0), pch=18, main="Logarithmic BA plot", xlab="log Average", ylab="log Difference")
abline(h = lgline, lty=c(2,1,2), col=c("Red","blue","Red"),
lwd=c(3,2,3))
abline(h=0,lty=3,col=c("black"))
text(x=2.8, y=0.05+lgline[1],label=paste("LL", round(exp(lgline[1]), digits=3)))
text(x=2.8, y=0.05+lgline[2],label=paste("CL", round(exp(lgline[2]), digits=3)))
text(x=2.8, y=0.05+lgline[3],label=paste("UL", round(exp(lgline[3]), digits=3)))
}
################################## |
5d511077b485356624377a77d9eab12ca3240bc3 | 028c8cd561dbc8d7e5f17940c12d86a8fe594836 | /Markov_dyn.R | 3f56289cc217a4f340014946012106e531d34cda | [] | no_license | enderska/Markov-Regime-Switching-Model | 657a3f03a020ecbcc46cb1414523bfcf8bc5152b | 3366f283c8bfd24c93c6782c0af203ae85cf0559 | refs/heads/master | 2020-05-05T04:35:45.651817 | 2019-04-05T16:37:34 | 2019-04-05T16:37:34 | 179,717,443 | 1 | 1 | null | null | null | null | UTF-8 | R | false | false | 4,070 | r | Markov_dyn.R | # Markov Regime Switchin Modelling
# import libraries
library(tseries)
library(MSwM)
library(forecast)
# set working directory
setwd ("/Users/katharinaenders/Documents")
#--------------------------------------Forecasting insample--------------------------------
#-----------------------------------------------------------------------------------------
#create object ifo
#ifo <- read.csv2(file="ifo.csv",header=T)
#ifo <- ts(as.numeric(ifo[,2]), start=c(1991,1), frequency = 12)
#plot(ifo, type="l", col="darkblue")
ifo <- read.csv2(file="ifo_index.csv",header=T) # ifo object as data.frame
ifo <- ts(as.numeric(ifo[,2]), start=c(1991,1), end=c(2017,7), frequency=12) #ifo as ts
ifo <- as.numeric(as.matrix(ifo[,2]))
summary(ifo) #descriptive stat
ifo
plot(ifo, main="Ifo Business Climate Index", type="l", col="blue", sub="1991:1 - 2017:7", ylab="Ifo (2005 = 100)")
#Model Lags
ifo_1 <- c(NA, ifo[-length(ifo)])
#ifo_2 <- c(NA, ifo_1[-length(ifo_1)])
#ifo_3 <- c(NA, ifo_2[-length(ifo_2)])
#ifo_4 <- c(NA, ifo_3[-length(ifo_3)])
#model with 1 Lag
ifodata <- na.omit(cbind(ifo,ifo_1))
#LModel
model=lm(ifo ~ ., data=as.data.frame(ifodata))
summary (model)
#MSWM Model
MSmodel = msmFit(model, k=2, sw=rep(TRUE, 3))
summary(MSmodel)
#Run commands for graphical representation of MS:
#plotProb(MSmodel, which =1)
plotProb(MSmodel, which =2) #'Regime 1 Detection'
plotProb(MSmodel, which =3) #'Regime 2 Detection'
plotReg(MSmodel, regime = 2) #'Regime 1&2 Detection'
### Residual Analysis
# To get diagnostic tests for regime 1, run:
#plotDiag(MSmodel, regime=1, which=1)
#plotDiag(MSmodel, regime=1, which=2)
#plotDiag(MSmodel, regime=1, which=3)
# To get diagnostic tests for regime 2, run:
#plotDiag(MSmodel, regime=2, which=1)
#plotDiag(MSmodel, regime=2, which=2)
#plotDiag(MSmodel, regime=2, which=3)
#-----------------------------------Forecasting outsample -------------------------
#---------------------------------------------------------------------------------
#while loop
#Specifications: 33 values to forcast (out of sample)
# 286 values in sample
## inital values
#specify length of in-sample / outsample
In.sample <- window(ifo, end=c(2014, 10))
Out.sample <- window(ifo, start=c(2014, 11))
#counter var i
i <- 0
#Forcast 30 iterations
while (i < 33) {
#MS Model on in-sample
#1 lag on in sample
ifo_1.In.sample <- c(NA, In.sample[-length(In.sample)])
#2 lag on in sample
#ifo_2.In.sample <- c(NA, ifo_1.In.sample[-length(ifo_1.In.sample)])
ifodata.sample <- na.omit(cbind(In.sample ,ifo_1.In.sample))
#LModel on sample
model.sample = lm(In.sample ~ ., data=as.data.frame(ifodata.sample))
summary (model.sample)
#MSwM on in sample
MSmodel.sample = msmFit(model.sample, k=2, sw=rep(TRUE, 3))
summary(MSmodel.sample)
#estimating Parameters
#extract b and m for each state
In.sample.intercept <- MSmodel.sample@Coef
R1.m <- In.sample.intercept[1,1]
R2.m <- In.sample.intercept[2,1]
R1.b <- In.sample.intercept[1,2]
R2.b <- In.sample.intercept[2,2]
#extract transition probabilities values
#In.sample.trans.prob <- MSmodel.sample@transMat
# Forecasting Y(t+1)
#State dependend
Y.t <- In.sample[286+i]
Y.t_1.s1 <- (Y.t*R1.b)+R1.b #in state 1
Y.t_1.s2 <- (Y.t*R2.b)+R2.b #in state 2
#extract smoothed probabilities
smoProb <- ts(MSmodel.sample@Fit@smoProb[,1])
#value for yt
yt.smoProb <- as.numeric(smoProb[286+i])
#weightend with smoothed probabilities
Y.t_1.w <- (Y.t_1.s1*yt.smoProb)+(Y.t_1.s2*(1-yt.smoProb))
In.sample.New <- append(In.sample,as.numeric(Y.t_1.w), after = length(In.sample) )
In.sample <- In.sample.New
#counter
i = i+1
}
forcast.row <- ts(In.sample, start=c(1991,1), end=c(2017,7), frequency=12)
data.row <- ifo
#plot forecast and ifo data
plot(forcast.row)
seqplot.ts(forcast.row, main='Forecast 3', colx="red", coly="darkblue", data.row)
#Quality Test forecast vs ifo data
accuracy(forcast.row, data.row, test=NULL, d=NULL, D=NULL)
|
a10aa83204393790829a65959f35659abc3f258f | 82dd388bbb6e26a26145d05024956159419e1efc | /codeFromBook/Chapter10/priorityHeuristic.R | b3741ad81f73c16d70fd2bae73b9608f99e42a60 | [] | no_license | humanfactors/computational-modelling | 731a2f143d2795cffd9994700c29a9c78928da83 | ff4be7dcc416012bc83098c1cab3e85dcb321273 | refs/heads/master | 2020-03-16T15:02:23.775932 | 2019-08-05T22:53:58 | 2019-08-05T22:53:58 | 132,724,714 | 0 | 1 | null | 2019-08-05T22:54:00 | 2018-05-09T08:17:45 | R | UTF-8 | R | false | false | 1,758 | r | priorityHeuristic.R | priorityHeuristic <- function(prospect,alpha=0.1){
# priority heuristic for two gambles
allx <- c(prospect[[1]]$x, prospect[[2]]$x)
if (all(allx<0)){ # all negative
# first compare minimums
max_x <- min(allx)
mins <- c(max(prospect[[1]]$x),max(prospect[[2]]$x))
if ((abs(mins[1]-mins[2])/abs(max_x))>0.1){
choice_p <- rep(alpha,2)
choice_p[which.max(mins)] <- 1-alpha
return(choice_p)
}
# then compare probability of minimums
p_of_mins <- c(prospect[[1]]$p[which.max(prospect[[1]]$x)],
prospect[[2]]$p[which.max(prospect[[2]]$x)])
if (abs(p_of_mins[1]-p_of_mins[2])>0.1){
choice_p <- rep(alpha,2)
choice_p[which.min(p_of_mins)] <- 1-alpha
return(choice_p)
}
# then compare maximums
maxs <- c(max(prospect[[1]]$x),max(prospect[[2]]$x))
choice_p <- rep(alpha,2)
choice_p[which.max(maxs)] <- 1-alpha
return(choice_p)
} else { # mixed or positive
# first compare minimums
max_x <- max(allx)
mins <- c(min(prospect[[1]]$x),min(prospect[[2]]$x))
if ((abs(mins[1]-mins[2])/max_x)>0.1){
choice_p <- rep(alpha,2)
choice_p[which.max(mins)] <- 1-alpha
return(choice_p)
}
# then compare probability of minimums
p_of_mins <- c(prospect[[1]]$p[which.min(prospect[[1]]$x)],
prospect[[2]]$p[which.min(prospect[[2]]$x)])
if (abs(p_of_mins[1]-p_of_mins[2])>0.1){
choice_p <- rep(alpha,2)
choice_p[which.min(p_of_mins)] <- 1-alpha
return(choice_p)
}
# then compare maximums
maxs <- c(max(prospect[[1]]$x),max(prospect[[2]]$x))
choice_p <- rep(alpha,2)
choice_p[which.max(maxs)] <- 1-alpha
return(choice_p)
}
} |
f091c000aadef5ee4bb1691a1586bc5665fe8357 | 3925d6ad6d2c1ff9c07b09e3a1d05cf2258254a7 | /Figures/orthology_annotation.Rd | 475102ae8962597b8808fb5637287c203daedb8b | [] | no_license | wrroberts/Achimenes_transcriptomes | 24dff04bc939b462794b2b28927b81c7090f09f7 | 8712b9e0ac8c185dc14b93e1fed829eb88283768 | refs/heads/master | 2021-01-22T22:13:40.423666 | 2017-03-20T21:20:05 | 2017-03-20T21:20:05 | 56,537,194 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,369 | rd | orthology_annotation.Rd | library(ggplot2)
Annotation=read.csv('Annotation.csv',header=T)
data1=melt(Annot)
data1$Category<-factor(data1$Category,levels=c('Unannotated','Annotated Primary','Annotated Alternate'))
ggplot(data1,aes(x=variable,y=value))+
+ geom_bar(aes(alpha=Category),stat='identity',width=0.9,size=0.5,color='gray25',position='fill)+
+ geom_text(data=data.frame(x=rep(4.5,3),y=c(0.4,0.06,0.85),labels=levels(data1$Category)),
+ aes(x,y,label=labels),color='black',hjust=0,cex=10)+
+ labs(x='', y='Percentage', title='Protein Annotation')+
+ scale_alpha_discrete(range=c(1,0))+
+ theme_classic(base_size=30)+
+ guides(alpha=F)+
+ coord_cartesian(xlim=c(0.5,6))+
+ theme(axis.title.y=element_text(vjust=1.5))
Ortho=read.csv('Orthologs.csv',header=T)
data2=melt(Ortho)
data$Category<-factor(data$Category,levels=c('Unique','Shared others','Shared Achimenes','Core'))
ggplot(data2,aes(x=variable,y=value))+
+ geom_bar(aes(alpha=Category),stat='identity',width=0.9,size=0.5,color='gray25')+
+ geom_text(data=data.frame(x=rep(4.5,4),y=c(32000,24000,20000,10000),labels=levels(data$Category)),
+ aes(x,y,label=labels),color='black',hjust=0,cex=10)+
+ labs(x='',y='Number of proteins',title='Orthogroup composition')+
+ scale_alpha_discrete(range=c(1,0))+
+ theme_classic(base_size=30)+
+ guides(alpha=F)+
+ theme(legend.position='bottom')+
+ coord_cartesian(xlim=c(0.5,5.8))
|
632caa33ff2f47d5abc7772e2db96b4afced555d | adf13968c14ecb2f547c8afc96842ffd1b2efa39 | /demo/S_EigenvalueDispersion.R | 3186f18dd2826040abe530879d3a0dd4de89ef1d | [] | no_license | runiaruni/Meucci | 10ed5635d3a756b743a9f75956e247dadb7489ff | 9f0a946631eebe65837c15d05b53e22c5333c25d | refs/heads/master | 2021-05-31T18:07:39.024992 | 2016-05-05T17:17:59 | 2016-05-05T17:17:59 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,891 | r | S_EigenvalueDispersion.R | #' This script displays the sample eigenvalues dispersion phenomenon, as described in A. Meucci,
#' "Risk and Asset Allocation", Springer, 2005, Chapter 4.
#'
#' @references
#' A. Meucci - "Exercises in Advanced Risk and Portfolio Management" \url{http://symmys.com/node/170},
#' "E 168 - Sample covariance and eigenvalue dispersion".
#'
#' See Meucci's script for "S_EigenValueDispersion.m"
#'
#' @author Xavier Valls \email{flamejat@@gmail.com}
##################################################################################################################
### Inputs
N = 50;
nSim = 50;
SampleLenght = seq( N , 10 * N, N)
##################################################################################################################
### Generate mesh and surface
Mu = matrix( 0, N, 1 );
Sigma = diag( 1, N );
# compute true eigenvalues
Eigen = eigen(Sigma);
Index = order( -( Eigen$values ));
EVec = Eigen$vectors[ , Index ];
EVal = diag( Eigen$Values[ Index, Index ]);
# compute eigenvalues of sample estimator
nSampleLenght = length( SampleLenght );
Store_EVal_Hat = matrix( NaN, nSampleLenght, N ); # preallocation for speed
for( i in 1 : nSampleLenght )
{
T = SampleLenght[ i ];
EVal_Hat = 0;
for( n in 1 : nSim )
{
X = rmvnorm( T, Mu, Sigma );
Sigma_Hat = cov( X );
L = eigen( Sigma_Hat )$values;
Index = order(-(L));
L = L[ Index];
EVal_Hat = EVal_Hat + L;
}
EVal_Hat = EVal_Hat / nSim;
Store_EVal_Hat[ i, ] = t(EVal_Hat);
}
##################################################################################################################
### Display surface
dev.new();
persp( SampleLenght/N, 1 :N , Store_EVal_Hat,
theta = 7 * 45, phi = 30, expand=0.6, col='lightblue', shade=0.75, ltheta=120,
ticktype='detailed', xlab = "eigenvalue #", ylab = "sample lenght/N");
|
b1cffe699b51133b3e80c28809e7a4b070a286ce | d99c28b5c5ab9ec297e0cef2624425aa3a000be8 | /man/armodel.Rd | 56a7fa13cf06f88b343d96ed875b52584ccb7f9f | [
"MIT"
] | permissive | georgiaxiao/newrepo | 7dbe9ecfbfcab54d87e9ebd9193b5502d5e60fee | cbb44cda51aaf0be4506e008acc02369e8867341 | refs/heads/main | 2023-02-02T22:48:33.991786 | 2020-12-11T18:11:40 | 2020-12-11T18:11:40 | 320,641,535 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 326 | rd | armodel.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/armodel.R
\name{armodel}
\alias{armodel}
\title{Title armodel}
\usage{
armodel(i)
}
\arguments{
\item{i}{The power you choose for the model.}
}
\value{
}
\description{
This function can use the AR model and see the summary with simplied method.
}
|
e062f89b8df6c916365c38205edc1294cf30fc38 | 5e25e0caf98f695fae1da0771d30ec2705974f9b | /Class 3 - Reading and Writing Data.R | 60371c78211d94c6ca45b42e058bfbaa483ee153 | [] | no_license | KarthikJRao/MICA_AMMA_PGP-2 | 4e2553fc2bb1002ca4b587353d553c321958c898 | c9a60427789435dcf02cd8decc9dbe955f69d77a | refs/heads/master | 2021-01-23T05:55:37.320899 | 2017-09-07T20:20:44 | 2017-09-07T20:20:44 | 102,481,789 | 16 | 4 | null | null | null | null | UTF-8 | R | false | false | 8,363 | r | Class 3 - Reading and Writing Data.R |
################## Reading Data #########################
## Reading data from program editor
## Create a matrix
input.matrix <- matrix(1:100, ncol = 5)
## See the data
input.matrix # we see that a matrix with 5 columns and 20 rows has been formed
# Uniform Random Numbers between 1 to 1000
rr <-1000*runif(2)
class(rr)
rr<-as.integer(1000*runif(2))
class(rr)
mode(rr)
unif <- as.integer(1000*runif(200000))
plot(unif)
head(unif)
hist(unif)
# Scenario
# Application: 300,000
# Selection: 60,000
Prob <- 60000/300000
Appl.num <- seq(1,300000,1)
head(Appl.num)
selected <- sample(Appl.num,60000,replace = F)
selected.df <- data.frame(selected)
LETTERS[1:4]
sample(letters[1:4], 10, replace = T)
## Create a Data Frame
input.df <- data.frame(ID = 1:10,
Class = sample(letters[1:4], 10, replace = TRUE),
Value = seq(1:5))
## View the data frame
View(input.df)
# we see that the data frame has 3 columns named ID, Class, and Value
# The data frame also has 10 rows of data
## See the column names
colnames(input.df)
names(input.df)
row.names(input.df)
## See the dimensions
dim(input.df)
## Set the Working Directory
getwd()
## Reading data from Comma separated file (csv)
input_csv.df <- read.csv(file="C:\\YYYYYY\\AMMA 2017\\Data\\data_2017\\binary.csv")
View(input_csv.df)
# reset directory and read file
setwd("C:\\YYYYYY\\AMMA 2017\\Data\\data_2017")
input_csv.df <- read.csv("binary.csv",header = T)
## See the structure of the data
str(input_csv.df)
sum(input_csv.df$gre)
?sum
## Reading Date Values
input_wthdt.df <- read.csv("binary_withdate.csv")
str(input_wthdt.df)
tab <- data.frame(table(input_wthdt.df$application_date))
sum(tab$Freq)
View(tab)
# we see the date field has been read as factor
summary(input_wthdt.df$gre)
# read Date as character
input_wthdt.df <- read.csv("binary_withdate.csv", stringsAsFactors = F)
str(input_wthdt.df)
View(input_wthdt.df)
mode(input_wthdt.df$application_date)
## working with dates
d <-"2004-12-03"
d
class(d)
d1 <- as.Date(d)
class(d1)
mode(d1)
## find system date
s <-Sys.Date()
s
## Current date and time
c <-date()
c
# reading dates with other than defaul format
# d - Day e.g 1, 2 etc
# m - month
# b - month /Jan, Feb
# B - Month January
# y - 2 digit year
# Y - 4 Digit year
d2 <-as.Date("12-January-2012",format="%d-%B-%Y")
d2
class(d2)
format(d2,"%B")
d3 <-as.Date("12-February-2012",format="%d-%B-%Y")
d3 <-as.Date("12-February-12",format="%d-%B-%y")
d4 <-as.Date("12-12-12",format="%d-%m-%y")
d4
d2
dd <- format(d2,"%d/%B/%Y")
## Calculate age
dob <-as.Date("12-Jan-1983",format="%d-%b-%Y")
dob
age <- difftime(Sys.Date(),dob,units="days")
as.integer(as.numeric(age)/365)
## Correct the date format
input_wthdt.df$application_date1 <- as.Date(input_wthdt.df$application_date, format="%m/%d/%Y")
str(input_wthdt.df)
## Read the data specifying the Class of the data
input_wthdt.df1 <- read.csv("binary_withdate.csv", colClasses=c(application_date = 'myDate'))
str(input_wthdt.df1)
## Read data from the web
## You can directly read a file directly from the internet by specifying the URL
input_webdata.df <- read.table("http://www.stats.ox.ac.uk/pub/datasets/csb/ch11b.dat")
str(input_webdata.df)
input_webdata.df$
## Read the first few records of a dataset
head(input_webdata.df)
## The first 6 rows of data have been displayed
## Ques 1 : What is we need to display the first 10 rows instead?
## Ques 2 : What if we want to display the last few rows?
csv_file <- read.table(file="binary.csv",
header = TRUE,
sep = ',')
str(csv_file)
## Read a Tab Delimited file
input_tabdlmtd.df <- read.table(file="tab_delimited_data.txt",
header = TRUE,
sep = '\t')
head(input_tabdlmtd.df)
## We can use the same functions as with a csv file
## to read dates and modify formats with a tab-delimited file as well
input_dollar.df <- read.table(file="dollar_delimited_data.txt",
header = TRUE,
sep = '$')
View(input_dollar.df)
## Read SAS dataset Data
install.packages("sas7bdat")
library(sas7bdat)
library(help=sas7bdat)
bank_ins = read.sas7bdat("bank_additional_full.sas7bdat")
names(bank_ins)
str(bank_ins)
library(foreign)
library(help=foreign)
# Read SPSS file
spss <- data.frame(read.spss("p004.sav"))
names(spss)
table(spss$PROTEIN)
library(help=sqldf)
sum.tb <- sqldf("select PROTEIN,count(*)
from spss
where PROTEIN>1
group by PROTEIN")
## Read Data From Facebook
install.packages(c("Rfacebook","RCurl","rjson"))
install.packages("twitteR")
library(Rfacebook)
library(RCurl)
library(rjson)
library(twitteR)
library(help=Rfacebook)
# connecting to Facebook
#https://developers.facebook.com/tools/explorer
accessToken <-"EAACEdEose0cBAM4A6BcQfQjLzaH6pxd47BOPsZBBrRgqhtX2B1fE5W2yoKKCuU0VXgUkQ8YqaLHyD61ZBfp7hH1yYeESEmkH4qL6Bnv2zLv9Nmm7EwtWil0RgXCg9nfOKq6xypZB3FAb8vU6VDihplakJEVJZBiTxL5MZCyl27tQW3qn6AFOwjqBiVypGHhsZD"
# Get data from a company page
flipkartPage <-getPage(page="flipkart",
token=accessToken,
n=10)
flipkartPage <-getPage(page="flipkart",
token=accessToken,
n=150)
View(flipkartPage)
teslaPage <-getPage(page="tesla",
token=accessToken,
n=10)
View(teslaPage)
##Twitter on R
api_key <- "c6MfJko2KyiV1P9W55u2jXmkd"
api_secret <- "nCVQrEU3sx74AGlmCBHIJWZJacyEEam4AJwhRt22IUdMS2v7Yp"
access_token <- "707591447726305280-Y28HU8d677t2zIPSGSPoWf3BjTBXRaN"
access_token_secret <- "5cek9he8B5RcQEgmKe5KOJ74kfhcU5lQxrcuJdcFcl0Wd"
setup_twitter_oauth(api_key,api_secret)
vkohliTwitterpage <- getUser("imVkohli","707591447726305280-Y28HU8d677t2zIPSGSPoWf3BjTBXRaN")
View(vkohliTwitterpage)
Searchresults<-searchTwitter("#", n=10)
head(Searchresults)
################## Writing Data #########################
one_row <-bank_ins[,3]
bank_ins.smpl <- bank_ins[1:1000,]
## Save the R object
save(bank_ins,file="bank_ins.smpl.Rda")
# remove
rm(bank_ins.smpl)
rm(bank_ins)
## Load the data back
names(bank_ins)
load("bank_ins.smpl.Rda")
## install and load datasets package
install.packages("datasets")
require(datasets)
library(help="datasets")
## Save the data as a csv file
tt <- mtcars
names(tt)
write.csv(mtcars, "mtcars.csv")
tt$carmodel <- row.names(mtcars)
row.names(tt) <- NULL
write.csv(tt, "C:\\Ram\\R for Data Science\\data\\mtcars.csv",
row.names=F
)
## Save the data as a Tab Delimited file
write.table(mtcars, "mtcars.txt", sep = '%', quote = FALSE, row.names=F)
## write to file without column names
write.table(mtcars,
"mtcars_noheader.txt",
sep = '%',
quote = FALSE,
row.names=F,
col.names = F)
write.csv(mtcars,
"mtcars_noheader1.txt",
quote = FALSE,
row.names=F,
col.names = F)
write.table(mtcars,
"mtcars_noheadercsv.csv",
sep = ',',
quote = FALSE,
row.names=F,
col.names = F)
attach()
names(male)
summary(Age)
rm(Age)
Age
mean(male$Age)
attach(male)
mean(Age)
myfile <- read.table(text="MyName Age
Ram 38
Shyam 25", header=T)
write.csv(myfile,file="myfile.csv")
write.table(myfile,
"myfile.txt",
sep = '$',
row.names=F,
col.names = F)
students <- data.frame(Name= c("Ram","Dinesh","Mona","Sona"),
Gender=c("Male","Male","Female","Female"),
Age =rnorm(4,50,20))
View(students)
write.csv(students,
file="students.csv",
row.names = F)
getwd()
# ----------------- Reference --------------------------
# http://dni-institute.in/blogs/read-large-files-into-r/
# Assignment 2
# Q(1): Down a csv file from https://data.oecd.org/healthstat/infant-mortality-rates.htm and read the csv file
# And remove the last variable
# Q(2): Read 2 table from html page https://en.wikipedia.org/wiki/India%E2%80%93Pakistan_cricket_rivalry
# Find number of ODI Matches won by India
|
3feea1f65800e9d2e7978ff7d5f71c531ef3c426 | 1c1ab899dcb552e671d54e2e5f7dc6a1a2b12223 | /length_data_master.graphs.R.R | 5f8a13b826484ec1a6d5611d5df34aa1fb9111e3 | [] | no_license | rholderm/mfish-feeding | 629d67a3839e8528fb6d6291e1bea0fd4a82fefa | 2649a22f9a48fe813a62a4e9babbd619c0b48728 | refs/heads/master | 2021-07-13T01:29:48.516634 | 2017-09-28T00:26:50 | 2017-09-28T00:26:50 | 105,081,496 | 0 | 0 | null | 2017-09-28T00:26:51 | 2017-09-27T23:57:56 | R | UTF-8 | R | false | false | 6,867 | r | length_data_master.graphs.R.R |
length<-read.csv("C:\\Users\\rhold\\Documents\\UCSC\\Palkovacs Lab Work\\R\\Fish Length Master R.csv")
length$pop<-length$Population
length<-length[,-c(1,6,7)]
env_factors<-read.csv("C:\\Users\\rhold\\Documents\\UCSC\\Palkovacs Lab Work\\R\\Environmental pond data for PCA_R.csv")
# Will need to edit this and pop names once I get Sch lengths
env_factors<-env_factors[-c(4,5,20),]
env_data<-env_factors[-c(1,4,5,6,7,8,17)]
env_data$bg<-as.numeric(c("0","0","0","0","1","0","0","0","0","1","0","0","0","0","0","0","1","0","1","0","0","0"))
env_data$bass<-as.numeric(c("1","0","0","0","0","0","0","1","0","1","0","0","0","1","0","0","1","0","0","0","0","0"))
env_data$pop<-c("Ant","AW","CaK","Corc","DeA","DeL","DoD","FC","FS","Harkin","K2","K5","Larsen","LAW",
"LHC","NBLM","NL","Sho","SpH","WatL","WSU","Yolo")
env_data$logzoops<-log(env_factors$total_zoops_plus_1)
env_data$logclad<-log(env_factors$total_cladocerans_plus_1)
library(plyr)
length.data<-join(length,env_data)
length.data$sex<-length.data$Sex
length.data$abundance<-as.numeric(length.data$mfish_abundance)
length.data<-length.data[,-c(4,7)]
#Remove Yolo
inc.yolo<-which(length.data$pop != "Yolo")
length.data<-length.data[inc.yolo,]
rm(inc.yolo)
library(Rmisc)
### Sex ###
tgc1<-summarySE(length.data, measurevar="Standard.Length", groupvars=c("sex"))
ggplot(tgc1, aes(x=sex,y=Standard.Length))+geom_point(aes(colour = sex), size=4) +
geom_errorbar(aes(ymin=Standard.Length-se, ymax=Standard.Length+se)) +
theme_bw() + theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank()) +
ggtitle("Sex") + theme(plot.title = element_text(hjust = 0.5))
### Remove IM fish ###
inc1<-which(length.data$sex != "IM")
length.data2<-length.data[inc1,]
inc2<-which(length.data2$sex != "IM M")
length.data2<-length.data2[inc2,]
rm(inc1) + rm(inc2)
##### Bass #####
tgc2<-summarySE(length.data2, measurevar="Standard.Length", groupvars=c("sex","bass"))
ggplot(tgc2, aes(x=bass,y=Standard.Length))+geom_point(aes(colour = sex), size=4) +
geom_errorbar(aes(ymin=Standard.Length-se, ymax=Standard.Length+se)) +
theme_bw() + theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank()) +
ggtitle("Bass") + theme(plot.title = element_text(hjust = 0.5))
##### BG #####
tgc3<-summarySE(length.data2, measurevar="Standard.Length", groupvars=c("sex","bg"))
ggplot(tgc3, aes(x=bg,y=Standard.Length))+geom_point(aes(colour = sex), size=4) +
geom_errorbar(aes(ymin=Standard.Length-se, ymax=Standard.Length+se)) +
theme_bw() + theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank()) +
ggtitle("BG") + theme(plot.title = element_text(hjust = 0.5))
##### BG only pops #####
inc.bg<-which(length.data2$bass != 1)
length.data.bg<-length.data2[inc.bg,]
tgc3<-summarySE(length.data2, measurevar="Standard.Length", groupvars=c("sex","bg"))
ggplot(tgc3, aes(x=bg,y=Standard.Length))+geom_point(aes(colour = sex), size=4) +
geom_errorbar(aes(ymin=Standard.Length-se, ymax=Standard.Length+se)) +
theme_bw() + theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank()) +
ggtitle("BG") + theme(plot.title = element_text(hjust = 0.5))
##### Temp #####
tgc4<-summarySE(length.data2, measurevar="Standard.Length", groupvars=c("sex","temp_c"))
ggplot(tgc4, aes(x=temp_c,y=Standard.Length))+geom_point(aes(colour = sex), size=4) +
geom_errorbar(aes(ymin=Standard.Length-se, ymax=Standard.Length+se)) +
theme_bw() + theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank()) +
ggtitle("Temp") + theme(plot.title = element_text(hjust = 0.5))
##### Temp of no pred pops #####
inc3<-which(length.data2$bass == 0)
length.data3<-length.data2[inc3,]
inc4<-which(length.data3$bg == 0)
length.data3<-length.data3[inc4,]
rm(inc3) + rm(inc4)
tgc5<-summarySE(length.data3, measurevar="Standard.Length", groupvars=c("sex","temp_c"))
ggplot(tgc5, aes(x=temp_c,y=Standard.Length))+geom_point(aes(colour = sex), size=4) +
geom_errorbar(aes(ymin=Standard.Length-se, ymax=Standard.Length+se)) +
theme_bw() + theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank()) +
ggtitle("Temp No Pred") + theme(plot.title = element_text(hjust = 0.5))
##### Temp of Bishop ponds #####
inc.B<-which(length.data2$location == "B")
length.data.B<-length.data2[inc.B,]
length.data.B$bass<-as.factor(length.data.B$bass)
tgc6<-summarySE(length.data.B, measurevar="Standard.Length", groupvars=c("sex","temp_c","bass"))
ggplot(tgc6, aes(x=temp_c,y=Standard.Length))+geom_point(aes(colour = sex, shape = bass), size=4) +
geom_errorbar(aes(ymin=Standard.Length-se, ymax=Standard.Length+se)) +
theme_bw() + theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank()) +
ggtitle("Temp B") + theme(plot.title = element_text(hjust = 0.5))
##### Temp of SC ponds #####
inc.SC<-which(length.data2$location == "SC")
length.data.SC<-length.data2[inc.SC,]
tgc7<-summarySE(length.data.SC, measurevar="Standard.Length", groupvars=c("sex","temp_c"))
ggplot(tgc7, aes(x=temp_c,y=Standard.Length))+geom_point(aes(colour = sex), size=4) +
geom_errorbar(aes(ymin=Standard.Length-se, ymax=Standard.Length+se)) +
theme_bw() + theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank()) +
ggtitle("Temp SC") + theme(plot.title = element_text(hjust = 0.5))
##### Abundance #####
tgc8<-summarySE(length.data2, measurevar="Standard.Length", groupvars=c("sex","abundance"))
ggplot(tgc8, aes(x=abundance,y=Standard.Length))+geom_point(aes(colour = sex), size=4) +
geom_errorbar(aes(ymin=Standard.Length-se, ymax=Standard.Length+se)) +
theme_bw() + theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank()) +
ggtitle("Mosquitofish Abundance") + theme(plot.title = element_text(hjust = 0.5))
##### Log zoops #####
tgc9<-summarySE(length.data2, measurevar="Standard.Length", groupvars=c("sex","logzoops"))
ggplot(tgc9, aes(x=logzoops,y=Standard.Length))+geom_point(aes(colour = sex), size=4) +
geom_errorbar(aes(ymin=Standard.Length-se, ymax=Standard.Length+se)) +
theme_bw() + theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank()) +
ggtitle("Zooplankton") + theme(plot.title = element_text(hjust = 0.5))
##### Log cladocerans #####
tgc10<-summarySE(length.data2, measurevar="Standard.Length", groupvars=c("sex","logclad"))
ggplot(tgc10, aes(x=logclad,y=Standard.Length))+geom_point(aes(colour = sex), size=4) +
geom_errorbar(aes(ymin=Standard.Length-se, ymax=Standard.Length+se)) +
theme_bw() + theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank()) +
ggtitle("Cladoceran") + theme(plot.title = element_text(hjust = 0.5))
|
00886e58b2d4f0e1539dc4f7b15ff781615d1512 | 5707fa70258d02ca5981010f078f635d0c5a09ca | /.Rprofile | 5115d004f9da8ac2455e579b8ac6240fb6bacdea | [
"MIT"
] | permissive | cgpu/2019-feature-selection | b60c3a3fe13abcc22836b272dd8bda27d0cb93bf | c7b33508cf92359718c47e13e01688a82d30a7bd | refs/heads/master | 2020-08-07T15:49:53.064371 | 2019-12-10T08:31:56 | 2019-12-21T17:25:19 | 213,512,464 | 0 | 0 | NOASSERTION | 2019-12-21T17:25:20 | 2019-10-08T00:28:15 | null | UTF-8 | R | false | false | 597 | rprofile | .Rprofile | source("renv/activate.R")
options(
drake_make_menu = FALSE,
Ncpus = 10,
#repos = BiocManager::repositories(),
radian.editing_mode = "vi",
radian.auto_match = TRUE,
radian.auto_indentation = TRUE,
radian.tab_size = 2,
radian.complete_while_typing = TRUE,
scipen = 999,
clustermq.scheduler = "slurm",
clustermq.template = "slurm_clustermq.tmpl"
)
library(drake)
library(magrittr)
# tibble > data.frame
if (interactive() && "tibble" %in% rownames(utils::installed.packages())) {
print.data.frame = function(x, ...) {
tibble:::print.tbl(tibble::as_tibble(x), ...)
}
}
|
04d146b343a8e419ec5985fe5f32d5ef04062cab | 6fbc0b4555e94e5dbbb1b6ce58ca2cbd98340b43 | /Modelo CEV(pricing option).R | 9383feeb1c2da0dd9d89d7c78ad66b26e58acc45 | [] | no_license | CEAC333/R-repository | c6dc9b43c89ffe2a50fd280c32068bdc4a36bac0 | 79d8d2640c6f29cc92f8d1d1d005f1600c6dbe6b | refs/heads/master | 2021-05-13T19:02:53.602809 | 2018-01-08T01:32:00 | 2018-01-08T01:32:00 | null | 0 | 0 | null | null | null | null | ISO-8859-1 | R | false | false | 576 | r | Modelo CEV(pricing option).R | st<-1.33 #precio actual del activo
k<-1.33 # precio strike
Te<-252 #vencimiento
r<-.072/100 #tasa de interes libre de riesgo diaria
sigma<-0.77/100 #volalidad diaria
alfa<- 1 #coeficiente elasticidad de la varianza
N<- 24 #número de pasos para el método de euler
h<-T/N #tamaño de los pasos método euler
n<-10000 #número simulaciones
STsum<-0
for(i in 1:n){
x1<-st
x2<-sigma
for (j in 1:N){
w<-sqrt(h)*randn
x1<- x1 + h*r*x1 + x2*x1^alfa*w
}#fin segundo ciclo for
STsum<-STsum + max(0,x1-k)
}#fin ciclo for
callprice<-exp(-r)*STsum/n |
dd218197e54c255116a74c9304018f592fc6f20c | 43a70fcc3c3558b2fc9296eb178fa4e215d3d756 | /man/incidence_vector.Rd | 81e78c8a8c5efa2e6f45ae038fca5f5de0694bac | [] | no_license | laurettemhlanga/PopulationSimulation | ef1a166b3db12866e8daee739b66a0f290fc3014 | 9e7d4c3df4438f15cf5177612ed4ab551cb7a83b | refs/heads/master | 2021-12-23T15:23:23.017571 | 2021-08-09T15:18:07 | 2021-08-09T15:18:07 | 166,325,128 | 0 | 1 | null | null | null | null | UTF-8 | R | false | true | 1,253 | rd | incidence_vector.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/incidence_matrix.R
\name{incidence_vector}
\alias{incidence_vector}
\title{incidence_vector}
\usage{
incidence_vector(n_age_steps, date_of_birth, incidence, time_step)
}
\arguments{
\item{n_age_steps}{The maximuma age attained by each birth cohort (aging steps).}
\item{date_of_birth}{a numeric vectors indicating the dates in simulation. Note that date format is not used.}
\item{incidence}{a function which takes as arguments age and time and returns a numberic rate of incidence for each age and time included in the simulation.
This function can be defined by user or can be selected from among several default options included in the package.
The user-defined or package default function should be called by name when included as an argument in the generate_incidence_matrix function.}
\item{time_step}{the time step between consecurtive birth dates in the vector list_of_birth_times}
}
\value{
a matrix of column length of date birthas,
Values stored in the matrix are numeric double, from 0-1, which represent the incidence rate at given age and time
}
\description{
a function that returns a matrix of probabilities for each age and time step of the simulation
}
|
3a2bf9422e9fa8fb28c18c2020e8e0ccb7e35ff7 | 5a1aa46ebb7bb936bb60f5b9c78b6e81cf5a1c8c | /Universalbank_Logistic_Regression_Analysis.R | 2d0b008ae2099758e89b05c5b9624b3c0040c9cd | [] | no_license | JasSong/Forecasting-Model | e956bb3629485d856f09f567b29d268a82e2297f | c8932ed17d1ab8066bf8d83b14fbc872d6ea5335 | refs/heads/master | 2021-04-12T09:33:24.491710 | 2018-03-21T09:20:16 | 2018-03-21T09:20:16 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,701 | r | Universalbank_Logistic_Regression_Analysis.R | library(data.table)
rdata <- fread(file.choose(), ,sep = ',', header = TRUE, fill = TRUE)
rdata <- rdata[,1:14]
sigmoid <- function(x,a){
1/(1+exp(-a*x))
}
curve(sigmoid(x,0.5),-15,15)
names(rdata)
View(rdata)
install.packages('dummies')
library(dummies)
rdata$Education <- factor(x= rdata$Education, labels = c('und', 'grd','adv'))
str(rdata)
rdata.dum <- dummy.data.frame(data= rdata, names = 'Education', sep = '.',drop = TRUE)
str(rdata.dum)
rdata.dum$Education.und <- NULL
set.seed(1234)
index <- sample(1:nrow(rdata.dum), nrow(rdata.dum)*.7, replace = F)
train.rdata <- rdata.dum[index,]
test.rdata <- rdata.dum[-index,]
logit.fml <- formula(Personal_Loan ~ Age + Experience + Income + Family + CCAvg + Mortgage + Securities_Account + CD_Account + Online + CreditCard + Education.grd + Education.adv)
rdata.model <- glm(formula = logit.fml, data = train.rdata, family = 'binomial')
summary(rdata.model)
install.packages('pscl')
library(pscl)
pR2(rdata.model)
index
confint(rdata.model)
install.packages('caret')
library(caret)
prd.train <- predict(rdata.model, newdata = train.rdata, type = 'response')
pred.train.test <- ifelse(prd.train>=.5,1,0)
confusionMatrix(pred.train.test,train.rdata$Personal_Loan, positive= '1')
prd.test <- predict(rdata.model, newdata = test.rdata, type = 'response')
pred.test.test <- ifelse(prd.test>=.5,1,0)
confusionMatrix(pred.test.test,test.rdata$Personal_Loan,positive = '1')
install.packages('ROCR')
library(ROCR)
prd <- fitted(rdata.model)
prd.obj <- prediction(prd, train.rdata$Personal_Loan)
prf <- performance(prd.obj, measure = 'tpr', x.measure = 'fpr')
plot(prf)
auc <- performance(prd.obj, measure = 'auc')
auc <- auc@y.values[[1]]
auc
|
92f78268098c01c214e8eb55140aa40b77a75676 | 36b84cc0599d78c44ee784f4c3784cf62bc79aa8 | /man/sim_multi_stat.Rd | 68959a65947a1dbac206f983117b21fc475df3e0 | [] | no_license | SJTU-CGM/vPanG | 55d077d790906a58342a9aadc714ab9ba17747b8 | 1a2328747aa6c20f6d6222798a55d561465dc0e7 | refs/heads/master | 2023-04-01T20:42:53.658760 | 2021-04-13T05:29:24 | 2021-04-13T05:29:24 | 355,472,284 | 1 | 0 | null | null | null | null | UTF-8 | R | false | true | 667 | rd | sim_multi_stat.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sim_multi.R
\name{sim_multi_stat}
\alias{sim_multi_stat}
\title{sim_multi_stat}
\usage{
sim_multi_stat(
pav_obj,
phen_name,
n_simulation = 10,
parallel = FALSE,
parallel_n = parallel::detectCores() - 1
)
}
\arguments{
\item{pav_obj}{A PAV object.}
\item{phen_name}{The name of phenotype used for grouping.}
\item{n_simulation}{The number of simulations.}
\item{parallel}{A logical value indicating whether to use parallel computing.}
\item{parallel_n}{The number of CPU cores used for parallel computing.}
}
\description{
Perform genome simulation calculations for groups.
}
|
df33d81b8c3782432b50f53208c47cde20a9f9f2 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/sonar/examples/PLsphericalSpreadingLaw.Rd.R | c76f12d1f9137de01f621f6d488b3dc5def2b0b9 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 210 | r | PLsphericalSpreadingLaw.Rd.R | library(sonar)
### Name: PLsphericalSpreadingLaw
### Title: PL to range r spherical spreading law in logarithmic form
### Aliases: PLsphericalSpreadingLaw
### ** Examples
PLsphericalSpreadingLaw( 1000 )
|
fe8dd59927a597a5267ff071b7b2740098bca51d | 0bd5f9608e0b892fce6f39067106046e9e446065 | /shiny/ggplot2_interactivity_and_bootstraplib/app_functions.R | e3c934d25ef4a9d10eb3e9d45da41aa37af8b69f | [] | no_license | acastroaraujo/ToolBox | 833e31474dfed4a30dbf5e209eec8446fd0b9fb3 | ad6c63f1deb3069bb0cc50c8108ad09317efff05 | refs/heads/master | 2021-07-01T06:15:04.354051 | 2021-01-15T20:46:35 | 2021-01-15T20:46:35 | 211,227,714 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 869 | r | app_functions.R |
gg_dots <- function(data_frame, state_map, var) {
variable <- sym(var)
ggplot(data_frame) +
geom_sf(data = state_map, color = "white", size = 0.5) +
geom_sf(aes(size = !!variable, color = log(!!variable), alpha = log(!!variable)), show.legend = FALSE) +
scale_color_viridis_c(option = "magma", direction = -1, na.value = "white")
}
gg_choropleth <- function(data_frame, state_map, var) {
variable <- sym(var)
data_frame %>%
filter(!is.na(!!variable)) %>%
ggplot() +
geom_sf(data = state_map, color = "white", size = NA) + ## uniform gray background
geom_sf(color = "white", size = 0, mapping = aes(fill = !!variable), show.legend = FALSE) +
geom_sf(data = state_map, color = "white", size = 0.5, fill = NA) + ## white lines
scale_fill_viridis_c(option = "magma", direction = -1, na.value = NA, trans = "log10")
}
|
d8c4ede1d0109d469df442c649cbad7ba37c26ca | 0ee50dd399127ebe38bc8f5197114d46d717ccd7 | /man/autism.Rd | 51f27d36e6c76efe53fdc8543ea1e18a0f272a39 | [] | no_license | cran/bapred | 0051d57056f886e6df028255f0c85339c70d66f1 | e24720be3c6f82c2d5422ae97a8f12f5edc15adc | refs/heads/master | 2022-07-07T03:06:58.840921 | 2022-06-22T07:20:13 | 2022-06-22T07:20:13 | 48,076,867 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 1,728 | rd | autism.Rd | \name{autism}
\alias{autism}
\docType{data}
\title{
Autism dataset
}
\description{
Total RNA obtained from lmyphoblast cell lines derived from 250 individuals, 137 of which suffer from autism and 113 are healthy. The dataset consists of four batches of sizes 101, 96, 45 and 8.
}
\usage{data(autism)}
\format{
1) \code{X} - the covariate matrix: a matrix of dimension 250 x 1000, containing the numerical transcript values
2) \code{batch} - the batch variable: a factor with levels '1', '2', '3' and '4'
3) \code{y} - the target variable: a factor with levels '1' corresponding to 'healthy' and '2' corresponding to 'autism'
}
\details{
The RNA measurements were obtained by the Illumina HumanRef-8 v3.0 Expression BeadChip featuring 24,526 transcripts. To reduce computational burden of potential analyses performed using this dataset we randomly selected 1,000 of these 24,526 transcripts. Moreover, the original dataset consisted of five batches and contained measurements of 439 individuals. Again to reduce computational burden of potential analyses we excluded the biggest batch featuring 189 individuals resulting in the 250 individuals included in the dataset made available in \code{bapred}.
}
\source{
ArrayExpress, accession number: E-GEOD-37772
}
\references{
Luo, R., Sanders, S. J., Tian, Y., Voineagu, I., Huang, N., Chu, S. H., Klei, L., Cai, C., Ou, J., Lowe, J. K., Hurles, M. E., Devlin, B., State, M. W., Geschwind, D. H. (2012). Genome-wide Transcriptome Profiling Reveals the Functional Impact of Rare De Novo and Recurrent CNVs in Autism Spectrum Disorders. The American Journal of Human Genetics 91:38–55, <\doi{10.1016/j.ajhg.2012.05.011}>.
}
\examples{
data(autism)
}
|
947dfd377b444118df164fc5ed1618571a0bcf0e | cf9533db0fec862278310409df04e87c6bc4d56e | /rnaseq/rnaseq_pkgs.R | d805f60b58dd04d07146a0a5803b3ff84557c935 | [
"MIT"
] | permissive | gillsignals/labs | 1642d889b5475795e93f22d32b2bf567f01048ee | 0fa2a1546466566a511df9f2ecfa30700db60b17 | refs/heads/master | 2020-07-04T18:39:36.157145 | 2019-11-12T15:47:06 | 2019-11-12T15:47:06 | 202,377,656 | 1 | 0 | MIT | 2019-08-14T15:27:41 | 2019-08-14T15:27:40 | null | UTF-8 | R | false | false | 837 | r | rnaseq_pkgs.R | # 2 CRAN packages
cranpkgs <- c("ggplot2","pheatmap")
install.packages(cranpkgs)
# rafalib from github (not strictly necessary, but useful for plots)
install.packages("devtools")
library(devtools)
install_github("ririzarr/rafalib")
# the rest are Bioconductor packages
biocpkgs <- c("Rsamtools",
"GenomicFeatures",
"GenomicAlignments",
"Rsubread",
"airway",
"pasilla",
"DESeq2",
"DEXSeq",
"vsn",
"sva",
"org.Hs.eg.db",
"cummeRbund",
"pasillaBamSubset",
"TxDb.Dmelanogaster.UCSC.dm3.ensGene")
source("http://bioconductor.org/biocLite.R")
biocLite(biocpkgs)
# note that Rsubread does not have a binary for Windows. This package is not required for class.
|
9120b60ae6086832c4c9a3054fd9c8d7abfa074b | d22adc683486fa80d31f601a19d0821b4ca8b810 | /R/summary.gvlmaDel.R | e4cc6c5d20b62f524f4506c9069b99b71adbdc7d | [] | no_license | cran/gvlma | 9c846f5bc1a4c2f43fb334c2348057f48ec60a39 | f425025ccce580c8f387a58ecf5134d5df0787df | refs/heads/master | 2021-06-02T04:54:46.073984 | 2019-01-05T18:30:03 | 2019-01-05T18:30:03 | 17,696,582 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,893 | r | summary.gvlmaDel.R | "summary.gvlmaDel" <-
function(object, allstats = TRUE, ...)
{
gvlmaDelobj <- object
nobs <- nrow(gvlmaDelobj)
oddcols <- seq(1, ncol(gvlmaDelobj), by = 2)
Stats <- gvlmaDelobj[,oddcols]
nstats <- ncol(Stats)
pvals <- gvlmaDelobj[,-oddcols]
AllQs <- lapply(as.list(gvlmaDelobj), summary)
StatQs <- lapply(as.list(Stats), summary)
pvalQs <- lapply(as.list(pvals), summary)
AllQsMat <- matrix(unlist(AllQs), nrow = ncol(gvlmaDelobj), byrow= TRUE)
StatQsMat <- matrix(unlist(StatQs), nrow = length(oddcols), byrow = TRUE)
pvalQsMat <- matrix(unlist(pvalQs), nrow = length(oddcols), byrow = TRUE)
rownames(AllQsMat) <- names(gvlmaDelobj)
rownames(StatQsMat) <- names(StatQs)
rownames(pvalQsMat) <- names(pvalQs)
colnames(AllQsMat) <- names(AllQs[[1]])
colnames(StatQsMat) <- names(StatQs[[1]])
colnames(pvalQsMat) <- names(pvalQs[[1]])
# unusual observations
upperfencestat <- StatQsMat[,5] + 3*(StatQsMat[,5] - StatQsMat[,2])
lowerfencestat <- StatQsMat[,2] - 3*(StatQsMat[,5] - StatQsMat[,2])
upperfencepval <- pvalQsMat[,5] + 3*(pvalQsMat[,5] - pvalQsMat[,2])
lowerfencepval <- pvalQsMat[,2] - 3*(pvalQsMat[,5] - pvalQsMat[,2])
hiStats <- Stats > matrix(upperfencestat,
nrow = nobs, ncol = nstats, byrow = TRUE)
loStats <- Stats < matrix(lowerfencestat,
nrow = nobs, ncol = nstats, byrow = TRUE)
hipvals <- pvals > matrix(upperfencepval,
nrow = nobs, ncol = nstats, byrow = TRUE)
lopvals <- pvals < matrix(lowerfencepval,
nrow = nobs, ncol = nstats, byrow = TRUE)
unusualStats <- hiStats | loStats
unusualpvals <- hipvals | lopvals
unusualobs <- unusualStats | unusualpvals
######################################################################
cat("\nGlobal test deletion statistics.\n")
cat("\nLinear Model:\n",
deparse(attr(gvlmaDelobj, "lmcall")), "\n")
cat("\ngvlma call:\n",
deparse(attr(gvlmaDelobj, "gvlmacall")), "\n")
cat("\n\nSummary values:\n")
print(AllQsMat)
if (allstats)
statloop <- c("Global Stat", "Directional Stat1",
"Directional Stat2", "Directional Stat3",
"Directional Stat4")
else statloop <- "Global Stat"
statindx <- 0
for (nm in statloop)
{
statindx <- statindx + 1
unusualobsStat <- unusualobs[,statindx]
unusualindx <- which(unusualobsStat)
unusualobsprint <- data.frame(Stats[unusualindx, statindx],
pvals[unusualindx, statindx],
row.names = unusualindx)
names(unusualobsprint) <- paste(c("Delta", ""),
nm, c("(%)", "p-value"), sep = " ")
cat("\n\nUnusual observations for ", nm, ":\n", sep = "")
print(unusualobsprint)
}
invisible(as.data.frame(unusualobs))
}
|
94a0b5c17d2cce168a797edda3ee7629abe55e8d | 8f3bd5ef1b27757c8c16737d24cbb250e736b2f8 | /man/meshGet.mesh3d.Rd | cb66279794d23c02c2bfe642ec5e28a0fd71e04e | [
"MIT"
] | permissive | tunelipt/model3d | 5ba394a84e2ac77602c310385a7b1f92dd1c3d66 | 2a92047e7f66b3dbf8423e5008cf6e6c861a05cd | refs/heads/master | 2020-07-25T14:02:49.769011 | 2019-09-13T17:41:07 | 2019-09-13T17:41:07 | 208,315,730 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 177 | rd | meshGet.mesh3d.Rd | \name{meshGet.mesh3d}
\alias{meshGet.mesh3d}
\title{Return the ith polygon of a mesh3d.}
\usage{
meshGet.mesh3d(m, i)
}
\description{
Return the ith polygon of a mesh3d.
}
|
0462b783ac3c4242cfbcb995ce31e5d4263409fa | 8dc3e662fdcd64ea0855a67a47be6d42e53e5d0d | /R/zzz.R | 5609c8c648e8ed77e9fec7f4722991df613c552b | [
"MIT"
] | permissive | ylleonv/pack | 2914ed75bfab011af737a83a15d99a51ea5ba9c9 | cb3416a8e230cfbee5a95273c9c6a15184d2458b | refs/heads/master | 2020-12-15T09:54:41.694326 | 2020-06-26T14:10:31 | 2020-06-26T14:10:31 | 233,600,925 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 517 | r | zzz.R | ## usethis namespace: start
#' @import Rcpp
#' @export GLMadj
#' @export GLMcum
#' @export GLMseq
#' @export GLMref
#' @export Discrete_CM
#' @export Predict_Response
#' @export summary.pcglm
#' @export ReferenceF
#' @useDynLib pack, .registration = TRUE
#' @importFrom Rcpp sourceCpp
## usethis namespace: end
loadModule("exportmod", TRUE)
loadModule("cumulativemodule", TRUE)
loadModule("sequentialmodule", TRUE)
loadModule("adjacentmodule", TRUE)
loadModule("referencemodule", TRUE)
# loadModule("fishder", TRUE)
|
c60266ae2de6a8f7109d890b73dcd57f3cd678df | f3116aec6d81f41468b81fdaf9effe006ebd1d98 | /atacAnalysis/forSom_Granges_Code.R | 0bfc44d026ab65878eda4bc03cbaee2256368cf7 | [] | no_license | jk3755/miscLabScripts | fb5be5a91ea568a53c493f1494b91cc235c2abad | cae5faec96a171e6878e2de690fa13c210f78ede | refs/heads/master | 2020-04-20T10:40:27.002754 | 2019-07-18T16:10:52 | 2019-07-18T16:10:52 | 168,795,524 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,613 | r | forSom_Granges_Code.R |
## Install the packages you will need
source("https://bioconductor.org/biocLite.R")
biocLite("GenomicRanges", suppressUpdates = TRUE)
biocLite("EnsDb.Hsapiens.v86", suppressUpdates = TRUE) # This is the database from which you will retrieve the human gene annotations. An alternative would be to use TxDb.Hsapiens.UCSC.hg38.knownGene
biocLite("genomation", suppressUpdates = TRUE) # This package allows you to read in a .bed directly to a GRanges object
## Load the packages
library(GenomicRanges)
library(EnsDb.Hsapiens.v86)
library(genomation)
## Step 1 - load the .bed file and convert to GRanges
bedFile <- "C:\\Users\\Jordan\\Documents\\atac\\SNU61-WT-01.all_peaks.narrowPeak"
snu61Peaks <- readBed(bedFile, track.line = FALSE, remove.unusual = FALSE, zero.based = TRUE)
## Remove the entries that are not on standard chromosomes (chr1-22, X, Y), often helps prevent problems downstream
snu61Peaks <- keepStandardChromosomes(snu61Peaks, pruning.mode="coarse")
## Step 2 - Retrieve human gene annotations in GRanges format
edb <- EnsDb.Hsapiens.v86
## Annoyingly, the gene database annotates the chromosomes differently than the standard "chr1, etc...", using only a single integer
## to represent each chromosome in the GRanges object. In order to find the overlaps, you will have to edit the chromosome annotations first
seqlevelsStyle(edb) <- "UCSC"
## Now, retrieve the gene annotations from the database
hg38Genes <- genes(edb)
## Again helpful to keep only the standard entries
hg38Genes <- keepStandardChromosomes(hg38Genes, pruning.mode="coarse")
## Step 3 - Extend the ranges of the peaks in the GRanges objects (+/- 50,000-100,000 bp, depending on how far you want to search from each peak)
## Note that this will almost certainly result in ranges that extend beyond the ends of the chromosome
## This isn't a huge deal, but it will return warnings to you at some point
## There is a method to remove the out of bounds ranges but I can't find my code for it at the moment
extPeaks <- promoters(snu61Peaks, upstream = 50000, downstream = 50000, use.names=TRUE)
## Step 4 - Find the overlaps between the extended peaks and the gene annotations
## This will return a 'SortedByQueryHits' object that gives you the indices of all overlaps
## If you do View(peakGeneOverlaps), you can see a 'from' entry and a 'to' entry'
## The values in the 'from' entry refer to the indices in the first GRanges object (extPeaks)
## And the values in the 'to' entry refer to the indices in the second (hg38Genes)
peakGeneOverlaps <- findOverlaps(extPeaks, hg38Genes)
|
57c196d0a0a19293c21030899e0cf6d619051d1d | 3e77568cc98c9713cf47dc94c0d309b1d5dbd79e | /.Rprofile | 2c22753b63d15d9e4d5d812a2db380fd00b3d32c | [] | no_license | cimentadaj/lexis_plot | 2f39fb3839a153a4c3c915ae39bf33dc6ffc0e70 | f32b89810a832dbef3454c912908f940b25163c9 | refs/heads/master | 2021-06-28T00:36:19.442737 | 2020-05-24T09:28:16 | 2020-05-24T09:28:16 | 98,638,408 | 4 | 1 | null | 2019-11-07T08:48:43 | 2017-07-28T10:22:01 | R | UTF-8 | R | false | false | 210 | rprofile | .Rprofile | source("renv/activate.R")
options(rsconnect.max.bundle.size = 3145728000,
rsconnect.max.bundle.files = 3145728000)
cat("To deploy this app, read the section 'Deploy the application' from the README.")
|
a69b17acce64fcfe5699f056580c79970c0de243 | 3f06f78873e8cfabceff52eb2287f1e963b1f682 | /man/load_latest_forecasts_repo.Rd | 7f34a98df43e21b509d8559af656ce68fa3a1f41 | [] | no_license | reichlab/covidHubUtils | 449fdd47b4cd20fb035b520088ff2dbc21464f81 | 7258bc1b146906b31e9d31d19fd13cf73259b5a0 | refs/heads/master | 2023-05-25T02:19:54.033459 | 2023-05-22T17:06:49 | 2023-05-22T17:06:49 | 289,015,121 | 18 | 17 | null | 2023-05-22T16:53:28 | 2020-08-20T13:34:43 | HTML | UTF-8 | R | false | true | 2,155 | rd | load_latest_forecasts_repo.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/load_latest_forecasts_repo.R
\name{load_latest_forecasts_repo}
\alias{load_latest_forecasts_repo}
\title{Load the most recent forecasts of all that were submitted
on \code{forecast_dates} from a local clone of \code{reichlab/covid19-forecast-hub} repo.}
\usage{
load_latest_forecasts_repo(
file_path,
models = NULL,
forecast_dates,
locations = NULL,
types = NULL,
targets = NULL,
hub = c("US", "ECDC"),
verbose = TRUE
)
}
\arguments{
\item{file_path}{path to the data-processed folder within a local clone of the hub repo}
\item{models}{Character vector of model abbreviations.
Default all models that submitted forecasts meeting the other criteria.}
\item{forecast_dates}{A 2 dimensional list of forecast dates to retrieve forecasts.
This function will return the latest forecasts
for each sub-list of dates.
Default to \code{NULL} which would include all valid forecast dates.
The function will throw an error if all dates in this parameter are invalid forecast dates.}
\item{locations}{a vector of strings of fips code or CBSA codes or location names,
such as "Hampshire County, MA", "Alabama", "United Kingdom".
A US county location names must include state abbreviation.
Default to \code{NULL} which would include all locations with available forecasts.}
\item{types}{Character vector specifying type of forecasts to load: \code{"quantile"}
and/or \code{"point"}. Default to all valid forecast types.}
\item{targets}{character vector of targets to retrieve, for example
\code{c('1 wk ahead cum death', '2 wk ahead cum death')}.
Default to \code{NULL} which stands for all valid targets.}
\item{hub}{character vector indicating the hub from which to load forecasts.
Possible options are \code{"US"}, \code{"ECDC"} and \code{"FluSight"}.}
\item{verbose}{logical to print out diagnostic messages. Default is \code{TRUE}}
}
\description{
This function will drop rows with NULLs in value column.
}
\details{
\Sexpr[results=rd, stage=render]{lifecycle::badge("deprecated")}
Please use \code{\link[=load_forecasts_repo]{load_forecasts_repo()}} instead.
}
|
496934815ff573f4ff411498024b2e75478628d4 | 050edfa53f5ec7d76b2321c552266e0f60e4db92 | /R/fish.R | 945e833c3056badef849c7eaea4df1eebf8e91c7 | [] | no_license | placeboo/subgraph | e1ab54fabda52ed4243fdc5cdc2a348b2da6d41c | 37036807aa7bd75aeab90fe224fdd44c126fb3f9 | refs/heads/master | 2021-10-27T15:54:59.877512 | 2019-04-18T08:08:57 | 2019-04-18T08:08:57 | 107,905,890 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 1,065 | r | fish.R | #' fish graphs
#' List all possible fish graphs based on given SIX nodes
#' @param x The vector representing nodes
#' @return A matrix listing edges of fish graphs
#' @examples
#' fish(c(1:6))
fish = function(x){
if(length(x)!=6){
stop("The number of nodes should be SIX!")
}
pair4 = combn(x, 4)
fish.mat = matrix(NA, ncol = 7)
for(i in 1:ncol(pair4)){
pair4.vec = sort(pair4[, i])
rst = x[! x%in%pair4.vec]
c4.mat = c4(pair4.vec)
cnct.mat = cbind(paste(rst[1], '-', pair4.vec, sep = ''),
paste(rst[2], '-', pair4.vec, sep = ''),
rep(deToIn(rst[1], rst[2]), 4))
c4.mat2 = matrix(rep(c4.mat, each = nrow(cnct.mat)), ncol = ncol(c4.mat), byrow=F)
cnct.mat2 = rbind(cnct.mat, cnct.mat, cnct.mat)
fish.temp = cbind(c4.mat2, cnct.mat2)
fish.mat = rbind(fish.mat, fish.temp)
}
return(fish.mat[-1, ])
}
|
7d8fb5f7479d3e816c4a67ecded50fe3067da6e4 | 195032d76a6f0c0258d831bbcbc9b91dd3d085a3 | /inst/shiny/ui.R | 2e9cbf719675cf494fcff913c73bcd18fe009385 | [] | no_license | Wei-V-Zhou/KMPPc | 7bf1fd3a7f300e06f2b75aa1ea97c1dafd859963 | 2b4b8e7b09a26e55c65a4e7579f5b605eed8530e | refs/heads/master | 2021-05-27T13:03:22.220622 | 2020-09-03T05:49:04 | 2020-09-03T05:49:04 | 254,270,761 | 0 | 0 | null | null | null | null | IBM852 | R | false | false | 8,850 | r | ui.R | ######################################################
## KMPPc ##
## Interactive User Interface ##
## UI File ##
## Author:Wei Zhou, xxx ##
## Maintainer:Wei Zhou (247328181@qq.com) ##
######################################################
library("shiny")
# Define UI for K-M Plot viewer application
shinyUI(pageWithSidebar(
# Application title
headerPanel("KMPPc: K-M Plot of Pan-cancer"),
# Sidebar with controls to select a dataset and processing
sidebarPanel(
# Define main menu
wellPanel(
helpText(a("Github help docs", href = "https://github.com/Wei-V-Zhou/KMPPc", target = "_blank")),
radioButtons("MainMenu", "Main Menu",
list("Reading in dataset" = "Input",
"Preprocessing" = "Preprocess",
"K-M Plot" = "kmplot",
"Differential gene analysis (optional)" = "Difftest",
"Filtering targets (optional)" = "Filter"
))
),
# Define secondary menu
conditionalPanel(condition = "input.MainMenu == 'Input'",
wellPanel(
selectInput("CancerType", "Choose Cancer Type:",
choices = c("Breast Cancer", "Pancreatic Cancer", "Glioma")),
submitButton("Update View"),
helpText("Note: Click this when network is not very good"),
numericInput("Datasets", "Choose an dataset:", value = 10, min = 1),
# conditionalPanel(condition = "input.CancerType == 'Pancreatic Cancer'",
# numericInput("Datasets", "Choose a dataset:",
# value = 3, min = 1, max = 5)),
# conditionalPanel(condition = "input.CancerType == 'Breast Cancer'",
# numericInput("Datasets", "Choose a dataset:",
# value = 10, min = 1, max = 12)),
# conditionalPanel(condition = "input.CancerType == 'Glioma'",
# numericInput("Datasets", "Choose a dataset:",
# value = 2, min = 1, max = 6)),
helpText("Note: The value is invalid when bigger than the view items"),
textInput("GeneName", "Choose a target gene:"),
sliderInput("obs", "Choose browser numbers", min = 0, max = 1000, value = 10),
submitButton("Update View")),
wellPanel(
helpText("Save raw clinicaldata"),
selectInput("Clinicaltabletype","Choose file type",
choices = c("csv", "txt")),
downloadButton("Clinicaltable", "Download")
),
wellPanel(
helpText("Save raw exprsetdata"),
selectInput("Exprsettabletype","Choose file type",
choices = c("csv", "txt")),
downloadButton("Exprsettable", "Download")
)
),
conditionalPanel(condition = "input.MainMenu == 'Preprocess'",
wellPanel(
fileInput("file1", "Choose Clinicaldata",
accept = c("text/csv", "text/comma-separated-values, text/plain",
".csv", "text/txt", ".txt")),
fileInput("file2", "Choose Exprsetdata",
accept = c("text/csv", "text/comma-separated-values, text/plain",
".csv", "text/txt", ".txt")),
sliderInput("obser", "Choose browser numbers", min = 0, max = 100, value = 10),
# p(actionButton("Inputreadin","Read in")),
selectInput("TimeType", "Choose Survival Time Type:",
choices = c("DFS_MONTHS", "DSS_MONTHS", "OS_MONTHS", "PFS_MONTHS")),
selectInput("StatusType", "Choose Survival Status Type:",
choices = c("DFS_STATUS", "DSS_STATUS", "OS_STATUS", "PFS_STATUS")),
submitButton("Update View")
# tags$hr()
)),
conditionalPanel(condition = "input.MainMenu == 'Difftest'",
wellPanel(
helpText("Save differential expression data"),
selectInput("Difftestplottype", "Choose plot type", choices = c("pdf", "tiff", "ps")),
textInput("Difftestfilewidth", "Enter plot width", 12),
textInput("Difftestfileheight", "Enter plot height", 12),
###### ? uioutput
uiOutput("Difftestfileheightui"),
downloadButton("Difftestsaveplot"),
tags$hr(),
submitButton("Update view")
)),
conditionalPanel(condition = "input.MainMenu == 'kmplot'",
wellPanel(
helpText("Save KM plot data"),
selectInput("KMplottype", "Choose plot type", choices = c("pdf", "tiff", "ps")),
textInput("KMfilewidth", "Enter plot width", 12),
textInput("KMfileheight", "Enter plot height", 12),
# downloadButton("kmplotsaveplot"),
tags$hr(),
submitButton("Update view")
))
),
# Show the K-M Plot of Pan-cancer
mainPanel(
conditionalPanel(condition = "input.MainMenu == 'kmplot'",
tabsetPanel(
tabPanel("KMTable", tableOutput("KMtable")),
tabPanel("KMPlot", plotOutput("KMplot"))
)
),
conditionalPanel(condition = "input.MainMenu == 'Difftest'",
tabsetPanel(
tabPanel("DifftestPlot", plotOutput("Difftestplot"))
)
),
conditionalPanel(condition = "input.MainMenu == 'Preprocess'",
tabsetPanel(
tabPanel("Datareadin", tableOutput("readin")),
tabPanel("Datafastview", tableOutput("fastview")),
tabPanel("Clinicaldata", tableOutput("clinical")),
tabPanel("Exprsetdata", tableOutput("exprset"))
)
# tableOutput("clinicaldat")
# uiOutput("Inputshowinstructionui"),
# uiOutput("Inputshowsummaryui")
),
conditionalPanel(condition = "input.MainMenu == 'Input'",
checkboxInput("Inputshowinstructiontf", "Show instructions", value = T),
uiOutput("Inputshowinstructionui"),
tabsetPanel(
tabPanel("Datasets", tableOutput("datasets")),
tabPanel("Clinicaldata", tableOutput("clinicaldataview")),
tabPanel("Summary", verbatimTextOutput("summary"), tableOutput("exprsetview"))
))
)
))
# setwd("I:/R-3.6.3/shinyapp/KMPPc")
#
# # STEP 1 ĘC INSTALL RSCONNECT
# library("rsconnect")
#
# # STEP 2 ĘC AUTHORIZE ACCOUNT
# rsconnect::setAccountInfo(name = 'zhouwei',
# token = 'A0106D487B1820ED76BDD00FE9ABC3B0',
# secret = 'KrIwYDBltOEzvbfwtMGu5NjfIuOAZQICGFPam2vw')
#
# # STEP 3 ĘC DEPLOY
# shiny::runApp()
# rsconnect::deployApp(server = "shinyapps.io")
|
51c2ff5c451f3a5a123e4dbc52af65aa931ae1c1 | ba554021696c6ee0286e57cbb8990792551b03a0 | /scripts/eia_cohort_project_script.R | 991bc62b292801646cc6c41ab76d7574da1e2670 | [] | no_license | gelkouh/eia-oeconomica | 404b9aba1612f87ae586c134ff3e8f2d73d2f107 | a9fe189ef55b986dada749c03a9b3491ca73d41e | refs/heads/master | 2023-04-24T00:36:51.205538 | 2021-05-20T05:15:35 | 2021-05-20T05:15:35 | 290,599,196 | 0 | 6 | null | 2021-04-03T15:01:34 | 2020-08-26T20:35:53 | R | UTF-8 | R | false | false | 10,852 | r | eia_cohort_project_script.R | # This snippet of code is a little loop that makes my code work on your computer
root <- getwd()
while(basename(root) != "eia-oeconomica") { # this is the name of your project directory you want to use
root <- dirname(root)
}
# This line runs the script in your data.R file so that each person can have
# their data in a different place because everyone's file structure will be
# a little different
source(file.path(root, "data.R"))
# This is the specific folder we want to access data from
ddir <- file.path(ddir, 'eia cohort project')
# Loading the packages we want
if (!require(tidyverse)) install.packages('tidyverse')
library(tidyverse)
library(readxl)
if (!require(scales)) install.packages('scales')
library(scales)
if (!require(reshape2)) install.packages('reshape2')
library(reshape2)
if (!require(cowplot)) install.packages('cowplot')
library(cowplot)
if (!require(plm)) install.packages('plm')
library(plm) # masks lag from dplyr (so we need to specify dplyr::lag)
if (!require(stargazer)) install.packages('stargazer')
library(stargazer)
if(!require(rvest)) install.packages('rvest')
library(rvest)
if (!require(texreg)) install.packages('texreg')
library(texreg)
if(!require(sjPlot)) install.packages('sjPlot')
library(sjPlot)
theme_set(theme_sjplot())
# We will consider six states:
# California (CA), Texas (TX), Illinois (IL), Massachusetts (MA), New York (NY), Florida (FL)
state_list <- c('CA', 'TX', 'IL', 'MA', 'NY', 'FL')
state_code <- c('06', '12', '17', '25', '36', '48')
# COMETS
# https://www.kauffman.org/entrepreneurship/research/comets/
univ_institutional_info <- read_csv(file.path(ddir, 'spring1/univ_institutional_info_v2.csv'))
univ_system_patents <- read_csv(file.path(ddir, 'spring1/univ_system_patents_v2.csv'))
univ_org_info <- read_csv(file.path(ddir, 'spring1/univ_org_info_v2.csv'))
univ_info <- univ_institutional_info %>%
inner_join(univ_org_info, by = 'unitid') %>%
filter(stabbr %in% state_list)
univ_patents <- univ_info %>%
inner_join(univ_system_patents, by = c('org_id', 'yr')) %>%
rename(GeoName = city, year = yr) %>%
mutate(year = as.factor(year),
State = ifelse(stabbr == 'TX', 'Texas',
ifelse(stabbr == 'CA', 'California',
ifelse(stabbr == 'IL', 'Illinois',
ifelse(stabbr == 'MA', 'Massachusetts',
ifelse(stabbr == 'FL', 'Florida', 'New York')))))) %>%
group_by(instnm, GeoName, State, stabbr, year) %>%
summarize(num_patents = sum(num_patents)) %>%
group_by(GeoName, State, stabbr, year) %>%
summarize(patent_universities_count = n_distinct(instnm), num_patents = sum(num_patents))
# USPTO
# https://www.uspto.gov/web/offices/ac/ido/oeip/taf/univ/org_gr/all_univ_ag.htm
web_address <- 'https://www.uspto.gov/web/offices/ac/ido/oeip/taf/univ/org_gr/all_univ_ag.htm'
webpage_code <- read_html(web_address)
webpage_text <- html_text(html_nodes(webpage_code, '.data , .header'))
index <- 1
# index <- 26
# tail(webpage_text, -25))
for (item in webpage_text) {
webpage_text[index] <- str_trim(gsub(item, pattern = "\n", replacement = ""), side = 'both')
index <- index + 1
}
uspto_patents <- data.frame(matrix(unlist(webpage_text), ncol = 25, byrow = TRUE))
colnames(uspto_patents) <- as.character(uspto_patents[1,])
uspto_patents <- uspto_patents[-1,] %>%
select(-c('Total', 'PRE_1992')) %>%
melt(id=c('Organizational Identifier', 'State')) %>%
rename(year = variable, patent_count = value, NAME = `Organizational Identifier`, STATE = State)
# HIFLD
# https://hifld-geoplatform.opendata.arcgis.com/datasets/colleges-and-universities?geometry=25.454%2C-16.798%2C-24.819%2C72.130&selectedAttribute=NAICS_CODE
univ_locations <- read_csv(file.path(ddir, 'Colleges_and_Universities.csv'))
univ_locations <- univ_locations %>%
select(c('NAME', 'CITY', 'STATE'))
# Error introduced here: only merges on full university names (e.g., see Harvard)
# Maybe do a fuzzy match?
# Or go through by hand and edit names?
univ_uspto_patents <- uspto_patents %>%
inner_join(univ_locations, by = c('NAME', 'STATE')) %>%
filter(STATE %in% state_list) %>%
rename(GeoName = CITY) %>%
mutate(year = as.factor(year),
patent_count = as.numeric(patent_count),
State = ifelse(STATE == 'TX', 'Texas',
ifelse(STATE == 'CA', 'California',
ifelse(STATE == 'IL', 'Illinois',
ifelse(STATE == 'MA', 'Massachusetts',
ifelse(STATE == 'FL', 'Florida', 'New York')))))) %>%
group_by(GeoName, State, STATE, year) %>%
summarize(patent_universities_count = n_distinct(NAME), patent_count = sum(patent_count)) %>%
mutate(patent_universities_count = ifelse(patent_count == 0, 0, patent_universities_count)) %>%
filter(patent_universities_count > 0)
# BEA
# https://apps.bea.gov/regional/downloadzip.cfm
# Deleted last four rows of each of these files by hand in Excel (generally bad practice...)
bea_ca <- read_csv(file.path(ddir, 'CAINC6N/CAINC6N_CA_2001_2019.csv')) %>%
mutate(State = 'California')
bea_tx <- read_csv(file.path(ddir, 'CAINC6N/CAINC6N_TX_2001_2019.csv')) %>%
mutate(State = 'Texas')
bea_il <- read_csv(file.path(ddir, 'CAINC6N/CAINC6N_IL_2001_2019.csv')) %>%
mutate(State = 'Illinois')
bea_ma <- read_csv(file.path(ddir, 'CAINC6N/CAINC6N_MA_2001_2019.csv')) %>%
mutate(State = 'Massachusetts')
bea_ny <- read_csv(file.path(ddir, 'CAINC6N/CAINC6N_NY_2001_2019.csv')) %>%
mutate(State = 'New York')
bea_fl <- read_csv(file.path(ddir, 'CAINC6N/CAINC6N_FL_2001_2019.csv')) %>%
mutate(State = 'Florida')
bea_full <- rbind(bea_ca, bea_tx, bea_il, bea_ny, bea_fl) %>%
filter(GeoFIPS != '"36000"' & GeoFIPS != '"06000"' & GeoFIPS != '"48000"' &
GeoFIPS != '"17000"' & GeoFIPS != '"25000"' & GeoFIPS != '"12000"') %>%
mutate(GeoName = toupper(GeoName)) %>%
select(-c('GeoFIPS', 'Region', 'TableName', 'LineCode')) %>%
melt(id=c('GeoName', 'IndustryClassification', 'Description', 'Unit', 'State')) %>%
rename(year = variable, compensation = value) %>%
separate(GeoName, c('GeoName', 'state_code'), ', ') %>%
select(-c('state_code'))
# Merge datasets
univ_full <- bea_full %>%
inner_join(univ_uspto_patents, by = c('GeoName', 'year', 'State')) %>%
select(!c('State')) %>%
rename(state = STATE, city = GeoName, naics = IndustryClassification, naics_description = Description,
compensation_unit = Unit)
univ_naics_simple <- univ_full %>%
filter(nchar(naics) == 3)
kbi_list <- c(325, 333, 334, 335, 512, 513, 211, 324, 332, 486)
# cross check and flag with KBI list
naics_codes_kbi <- univ_naics_simple %>%
filter(naics != '...') %>%
select(c('naics', 'naics_description')) %>%
distinct() %>%
mutate(naics = as.numeric(naics)) %>%
arrange(naics) %>%
mutate(kbi = ifelse(naics %in% kbi_list, 1, 0)) %>%
select(-naics_description)
`%not_in%` <- purrr::negate(`%in%`)
univ_kbi <- univ_naics_simple %>%
filter(naics != '...') %>%
filter(compensation %not_in% c('(D)', '(NA)', '(NM)')) %>%
mutate(naics = as.numeric(naics), compensation = as.numeric(compensation)) %>%
left_join(naics_codes_kbi, by = 'naics') %>%
select(-c('naics_description', 'compensation_unit')) %>%
group_by(year, city, state) %>%
mutate(total_comp = sum(compensation)) %>%
group_by(year, city, state, kbi) %>%
mutate(kbi_comp = sum(compensation)) %>%
filter(kbi == 1) %>%
ungroup() %>%
mutate(kbi_share = kbi_comp/total_comp) %>%
select(c('year', 'city', 'state', 'patent_universities_count', 'patent_count', 'kbi_comp', 'kbi_share')) %>%
distinct() %>%
filter(kbi_comp > 0) %>%
mutate(ln_kbi_comp = log(kbi_comp)) %>%
mutate(kbi_comp_mil = kbi_comp/1000) %>%
mutate(ln_patent_count = log(patent_count))
univ_kbi_pd <- pdata.frame(x = univ_kbi, index = c('city', 'year'))
# REGRESSIONS
lm_fe_1 <- plm(ln_kbi_comp ~ ln_patent_count,
data = univ_kbi_pd, model = "within", effect = "twoways")
lm_fe_2 <- plm(kbi_comp_mil ~ patent_count,
data = univ_kbi_pd, model = "within", effect = "twoways")
lm_plot_2 <- lm(kbi_comp_mil ~ patent_count + as.factor(year) + as.factor(city), data = univ_kbi)
lm_fe_3 <- plm(kbi_comp_mil ~ ln_patent_count,
data = univ_kbi_pd, model = "within", effect = "twoways")
lm_plot_3 <- lm(kbi_comp_mil ~ ln_patent_count + as.factor(year) + as.factor(city), data = univ_kbi)
reg_table <- texreg(list(lm_fe_2, lm_fe_3), include.ci = FALSE, digits = 3,
custom.coef.map = list('patent_count'= "UnivPatents",
'ln_patent_count' = "ln(UnivPatents)"),
custom.header = list('KBIComp' = 1:2),
custom.model.names = c('(1)', '(2)'),
caption = 'Regression analysis',
include.adjrs = FALSE)
write.table(reg_table, file.path(root, 'docs', 'reg_table.tex'), col.names = FALSE, row.names = FALSE, quote = FALSE)
# FIGURES
model_fig1 <- plot_model(lm_plot_2, type = "pred", terms = "patent_count",
title = 'Model Predicted Knowledge-based industry compensation vs. University patent count | Fixed effects',
axis.title = c('University patent count','Knowledge-based industry compensation in millions'))
ggsave(file.path(root, 'docs', 'model_fig1.png'),
plot = model_fig1, width = 10, height = 7)
model_fig2 <- plot_model(lm_plot_3, type = "pred", terms = "ln_patent_count",
title = 'Model Predicted Knowledge-based industry compensation vs. University patent count | Fixed effects',
axis.title = c('University patent count (log)','Knowledge-based industry compensation in millions'))
ggsave(file.path(root, 'docs', 'model_fig2.png'),
plot = model_fig2, width = 10, height = 7)
patents_fig_df <- univ_kbi %>%
group_by(state, year) %>%
summarise(patent_count)
patents_fig <- ggplot(data = patents_fig_df, aes(x = year, y = patent_count, group = state, color = state)) +
geom_smooth(se = FALSE) +
xlab('Year') +
ylab('University patent count') +
ggtitle('University patent counts in cities in sample')
ggsave(file.path(root, 'docs', 'patents_fig.png'),
plot = patents_fig, width = 7, height = 5)
kbi_comp_df <- univ_kbi %>%
group_by(state, year) %>%
summarise(kbi_comp_mil)
kbi_comp_fig <- ggplot(data = kbi_comp_df, aes(x = year, y = kbi_comp_mil, group = state, color = state)) +
geom_smooth(se = FALSE) +
xlab('Year') +
ylab('Knowledge-based industry compensation in millions') +
ggtitle('Knowledge-based industry compensation in cities in sample')
ggsave(file.path(root, 'docs', 'kbi_comp_fig.png'),
plot = kbi_comp_fig, width = 7, height = 5)
|
e684dc17f3a4dc73e8fc4a41d1f620357dac89c9 | 568509efbe527fe3bc862f717014ab3e8bc6d18a | /R/R_scripts/energy.R | ec187d872fc77b8309ca450673acd9cd1d6edefb | [] | no_license | Silveryu/gng.r | 9ab43426d86af21ec52534b0277608c9362a95d6 | 8876c7578fd1584c0e2d6a6ae7f1b1bd45b1c16c | refs/heads/master | 2023-02-02T07:48:15.999468 | 2020-12-16T19:41:42 | 2020-12-16T19:41:42 | 192,547,448 | 3 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,455 | r | energy.R | standard_entropy <- function(cluster_points)
{
dimension <- dim(cluster_points)[2]
cluster_cov_mat <- cov_mat(cluster_points)
det_cluster_cov_mat <- det(cluster_cov_mat)
if(det_cluster_cov_mat == 0)
{
det_cluster_cov_mat <- 1.0e-32
}
return( dimension/2 * log(2 * pi * exp(1)) + log(det_cluster_cov_mat) / 2 )
}
sphere_entropy <- function(cluster_points)
{
dimension <- dim(cluster_points)[2]
cluster_cov_mat_trace <- cov_mat_trace(cluster_points)
if(cluster_cov_mat_trace == 0)
{
cluster_cov_mat_trace <- 1.0e-32
}
return ( dimension/2 * log(2 * pi * exp(1) / dimension) + dimension / 2 * log(cluster_cov_mat_trace) )
}
diagonal_entropy <- function(cluster_points)
{
dimension <- dim(cluster_points)[2]
cluster_cov_mat <- cov_mat(cluster_points)
det_cluster_cov_mat <- prod(diag(cluster_cov_mat))
if(det_cluster_cov_mat == 0)
{
det_cluster_cov_mat <- 1.0e-32
}
return ( dimension/2 * log(2 * pi * exp(1)) + log(det_cluster_cov_mat) / 2 )
}
cluster_energy <- function(cluster_entropy, cluster_npoints, npoints)
{
p <- cluster_npoints / npoints
return( p * (cluster_entropy - log(p)) )
}
cov_mat <- function(cluster_points)
{
npoints <- dim(cluster_points)[1]
dimension <- dim(cluster_points)[2]
mean <- as.vector(colMeans(cluster_points))
result <- matrix(nrow = dimension, ncol = dimension, data = 0)
for(i in 1:npoints)
{
p <- as.matrix(cluster_points[i, ] - mean)
result <- result + (p %*% t(p)) / npoints
}
return(result)
}
cov_mat_trace <- function(cluster_points)
{
npoints <- dim(cluster_points)[1]
mean <- as.vector(colMeans(cluster_points))
result <- 0.0
for(i in 1:npoints)
{
p <- cluster_points[i, ] - mean
result <- result + (p %*% p)
}
result <- result / npoints
return(result)
}
cec_energy <- function(dataset, clustering, entropy_func)
{
dimension <- ncol(dataset)
npoints <- dim(dataset)[1]
energy <- 0
for (i in unique(clustering))
{
cluster_points <- dataset[clustering == i,]
cluster_npoints <- dim(cluster_points)[1]
curr_cluster_entropy <- entropy_func(cluster_points)
curr_cluster_energy <- cluster_energy(curr_cluster_entropy, cluster_npoints, npoints)
energy <- energy + curr_cluster_energy
}
return(as.numeric(energy))
}
|
abcb49e2f74ed858025a901a95533ce44fbf125e | 854f3f6ae97c85ca054c3e1c801ff1ef8badcdf2 | /R/plotCoef.R | b56809434394318c6a8b50866a77531d80125591 | [] | no_license | cran/glmnet | 5d24e2d1ec9e4ca4ae17aeb28781cbe163c3523c | 625f6ca6e3132ce730563ce34233c37f347754c8 | refs/heads/master | 2023-09-01T20:24:37.049612 | 2023-08-22T03:10:09 | 2023-08-22T05:30:40 | 17,696,436 | 64 | 99 | null | 2017-11-09T01:18:04 | 2014-03-13T04:53:09 | Fortran | UTF-8 | R | false | false | 1,566 | r | plotCoef.R | plotCoef=function(beta,norm,lambda,df,dev,label=FALSE,xvar=c("norm","lambda","dev"),xlab=iname,ylab="Coefficients",...){
##beta should be in "dgCMatrix" format
which=nonzeroCoef(beta)
nwhich=length(which)
switch(nwhich+1,#we add one to make switch work
"0"={
warning("No plot produced since all coefficients zero")
return()
},
"1"=warning("1 or less nonzero coefficients; glmnet plot is not meaningful")
)
beta=as.matrix(beta[which,,drop=FALSE])
xvar=match.arg(xvar)
switch(xvar,
"norm"={
index=if(missing(norm))apply(abs(beta),2,sum)else norm
iname="L1 Norm"
approx.f=1
},
"lambda"={
index=log(lambda)
iname="Log Lambda"
approx.f=0
},
"dev"= {
index=dev
iname="Fraction Deviance Explained"
approx.f=1
}
)
dotlist=list(...)
type=dotlist$type
if(is.null(type))
matplot(index,t(beta),lty=1,xlab=xlab,ylab=ylab,type="l",...)
else matplot(index,t(beta),lty=1,xlab=xlab,ylab=ylab,...)
atdf=pretty(index)
### compute df by interpolating to df at next smaller lambda
### thanks to Yunyang Qian
prettydf=approx(x=index,y=df,xout=atdf,rule=2,method="constant",f=approx.f)$y
# prettydf=ceiling(approx(x=index,y=df,xout=atdf,rule=2)$y)
axis(3,at=atdf,labels=prettydf,tcl=NA)
if(label){
nnz=length(which)
xpos=max(index)
pos=4
if(xvar=="lambda"){
xpos=min(index)
pos=2
}
xpos=rep(xpos,nnz)
ypos=beta[,ncol(beta)]
text(xpos,ypos,paste(which),cex=.5,pos=pos)
}
}
|
d3f6f252736bef08d50443812d774bf91589f88e | d582b1d42f2c548abf6b032f04b5aeefb48d848c | /man/bic.plot.alignment.distribution.Rd | 3a9ebf273eb1030fd05ebf617cba1fc46288612a | [] | no_license | caitlinjones/bicrnaseq | 82dc6dacb7ad44af1637c449525d6e13cbb3a0ba | 3a74d39c329ab19aa73d2f26eb2d678d111f20fd | refs/heads/master | 2021-05-23T05:46:24.178527 | 2018-04-28T00:00:27 | 2018-04-28T00:00:27 | 94,919,835 | 0 | 1 | null | 2017-06-20T17:54:02 | 2017-06-20T17:54:02 | null | UTF-8 | R | false | true | 1,395 | rd | bic.plot.alignment.distribution.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/bic_plots.R, R/tmp_bic_plots.R
\name{bic.plot.alignment.distribution}
\alias{bic.plot.alignment.distribution}
\alias{bic.plot.alignment.distribution}
\title{Plot distibution of alignment across different genomic locations}
\usage{
bic.plot.alignment.distribution(dat, pct = FALSE, col.pal = "Set3",
file = NULL)
bic.plot.alignment.distribution(dat, pct = FALSE, col.pal = "Set3",
file = NULL)
}
\arguments{
\item{dat}{data frame consisting of data from CollectRNASeqMetrics
output}
\item{pct}{plot percentages}
\item{col.pal}{name of color palette; must be from list of RColorBrewer palettes
Default: "Set3"}
\item{file}{PDF file to which plot should be saved (optional)}
\item{horizontal}{logical indicating that bars should be horizontal; Default: TRUE}
\item{dat}{data frame consisting of data from CollectRNASeqMetrics
output}
\item{col.pal}{name of color palette; must be from list of RColorBrewer palettes
Default: "Set3"}
\item{file}{PDF file to which plot should be saved (optional)}
\item{pct}{plot percentages}
}
\description{
Bar chart showing for each sample the distribution of read
alignments across ribosomal, coding, UTR, intronic and
intergenic bases
Bar chart showing for each sample the distribution of read
alignments across ribosomal, coding, UTR, intronic and
intergenic bases
}
|
73bc30048250ee30a47af2619de7e60d2321e13f | c5a5548c445ebc98b2c5b7185d1fe9461de49464 | /R/norm.R | fa97ebdf236f70b3a21d78ee3301053924a4e9f5 | [
"MIT"
] | permissive | linnykos/utilities | e54f6b7bfad2a9d341384e73001b315939badec5 | 1a0d85a10fb885804d3313d154ec877d7e55cfc9 | refs/heads/master | 2023-08-18T04:50:02.840262 | 2021-10-05T16:45:54 | 2021-10-05T16:45:54 | 330,321,029 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 101 | r | norm.R | .l2norm <- function(vec){sqrt(sum(vec^2))}
.opnorm <- function(mat){.svd_truncated(mat, K = 1)$d[1]} |
db5c784e410e8cfe0f55c4c4caac04da3e1add28 | 2f8eadc3086c263afdf2784e229f7e80a7dcf49e | /RMark/man/release.gof.Rd | 5cf9e27a02c0af8e048d22fcecc31a8c5f362713 | [] | no_license | wchallenger/RMark | 3edb66cbc924fbc37feaa53d22d99e8ac83ee47a | 636a3a7a6c5ab5292c3a249a1b41dab580dda8ba | refs/heads/master | 2021-01-17T22:35:11.482847 | 2012-01-27T17:16:51 | 2012-01-27T17:16:51 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,089 | rd | release.gof.Rd | \name{release.gof}
\alias{release.gof}
\title{Runs RELEASE for goodness of fit test}
\usage{
release.gof(data, invisible = TRUE,
title = "Release-gof", view = FALSE)
}
\arguments{
\item{data}{processed RMark data}
\item{invisible}{if TRUE, RELEASE run window is hidden
from view}
\item{title}{title for output}
\item{view}{if TRUE, shows release output in a viewer
window}
}
\value{
results: a dataframe giving chi-square, degrees of
freedom and P value for TEST2, TEST3 and total of tests
}
\description{
Creates input file for RELEASE with the specified data,
runs RELEASE and extracts the summary results for TEST2
and TEST3. Output file is named Releasennn.tmp where nnn
is an increasing numeric value to create a unique
filename.
}
\examples{
data(dipper)
dipper.processed=process.data(dipper,groups=("sex"))
# The following is commented out because it does not run
# under check for some reason; very weird error lurking somewhere
#release.gof(dipper.processed)
}
\author{
Jeff Laake
}
\keyword{utility}
|
a664e76b04a4f3f025fdff70b47c7b8204b8d42b | c7b2e2c32b78821e76194c13d02a0c52d8ea8433 | /man/ff.Rd | 495f6808af3962e562d9cf12e2669a15c5a9f15e | [] | no_license | XixuHu/SC19090 | bfb4c2984a97a36e74021e0c98a352c00fc0f0cd | bae290717feb215f81c462dd679937430184b58a | refs/heads/master | 2020-12-02T02:36:53.317559 | 2019-12-31T04:47:04 | 2019-12-31T04:47:04 | 230,695,837 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 378 | rd | ff.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ex.R
\name{ff}
\alias{ff}
\title{Compute frac(2*Gamma(frac(k+1,2)),sqrt(pi*k)*Gamma(frac(k,2))*integral((1+frac(u^2,k))^{-(k+1)/2}*du,0,c[k])}
\usage{
ff(k, a)
}
\arguments{
\item{k}{integer}
\item{a}{real number}
}
\value{
the numerical integration
}
\description{
c[k]=sqrt(frac(a^2*k,k+1-a^2))
}
|
10f9732dcd26faef7124489d07f79902b85a252f | 8377b2ce26626da41d9f69cb2319ce46de6cebfb | /man/eur.Rd | 393365eb236db40c519c6e58473d3f5d35ce26c7 | [] | no_license | koattila95/ultimateexchanger | d0882e55f9236c454cd06ecde0bd0d62f960fa9a | f42743d7118fc8a0bc2f09ec75d01a88ac2bc26e | refs/heads/master | 2022-07-10T02:32:12.918639 | 2020-05-12T20:57:56 | 2020-05-12T20:57:56 | 263,442,289 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 270 | rd | eur.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/eur.R
\name{eur}
\alias{eur}
\title{Converts numeric values into euro format}
\usage{
eur(value)
}
\arguments{
\item{value}{numeric}
}
\description{
Converts numeric values into euro format
}
|
24c588d1643dca5c241ce331ca1874a6a8889172 | aa54763f4c423db0376d0c676113910e0614c398 | /R/figure01.R | 424e9988af30558cd74fb892b8a51fc844e49ca1 | [] | no_license | nemochina2008/tvx | 31376a8394d326b1746f357bef4be4e997f93261 | b81f389711190e19f23ec8e0e5a548aa8e6c5229 | refs/heads/master | 2021-06-19T12:46:54.578792 | 2016-08-18T12:14:43 | 2016-08-18T12:14:43 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 9,126 | r | figure01.R | ### environmental stuff -----
## clear workspace
rm(list = ls(all = TRUE))
## load packages
lib <- c("foreach", "MODIS", "Rsenal", "latticeExtra", "reshape2", "grid",
"gridBase", "mapview", "dplyr")
Orcs::loadPkgs(lib)
## load functions
source("R/movingModel.R")
source("R/funs.R")
source("R/visKili.R")
### lst -----
## import files (daytime)
fls_lst <- list.files("data/MOD11A2.005/qc",
pattern = "^MOD11A2.*_Day_1km.tif$", full.names = TRUE)
fls_lst <- fls_lst[grep("2011001", fls_lst)[1]:grep("2016121", fls_lst)]
rst_lst <- stack(fls_lst) - 273.15
mat_lst <- as.matrix(rst_lst)
dts_lst <- MODIS::extractDate(fls_lst)$inputLayerDates
### ndvi -----
# ## import files
# fls_ndvi <- list.files("data/MCD09Q1.006/ndvi",
# pattern = "^MCD09Q1.*.tif$", full.names = TRUE)
#
# fls_ndvi <- fls_ndvi[grep("2011001", fls_ndvi)[1]:grep("2016121", fls_ndvi)]
#
# rst_ndvi <- stack(fls_ndvi)
# dts_ndvi <- MODIS::extractDate(fls_ndvi)$inputLayerDates
#
# ## remove unavailable lst files
# rst_ndvi <- rst_ndvi[[-which(!dts_ndvi %in% dts_lst)]]
#
# ## resample ndvi
# rst_ndvi_res <- resample(rst_ndvi, rst_lst)
# mat_ndvi_res <- raster::as.matrix(rst_ndvi_res)
## import resampled files
fls_ndvi_res <- list.files("data/MCD09Q1.006/ndvi/res",
pattern = "^MCD09Q1.*.tif$", full.names = TRUE)
fls_ndvi_res <- fls_ndvi_res[grep("2011001", fls_ndvi_res)[1]:
grep("2016121", fls_ndvi_res)]
rst_ndvi_res <- stack(fls_ndvi_res)
mat_ndvi_res <- as.matrix(rst_ndvi_res)
### study area -----
# rst_kili <- kiliAerial(projection = "+init=epsg:4326", type = "google")
# rst_kili <- writeRaster(rst_kili, "data/kili_ll", format = "GTiff",
# overwrite = TRUE)
rst_kili <- stack("data/kili_ll.tif")
## plots
shp_plots <- readOGR("data/station_data", p4s = "+init=epsg:21037",
layer = "PlotPoles_ARC1960_mod_20140807_final")
shp_plots <- subset(shp_plots, PoleType == "AMP")
dat_select <- readRDS("data/results/ndvimax_rmse_7by7_area_terra.rds")
dat_select$habitat <- substr(dat_select$PlotID, 1, 3)
dat_select$habitat[dat_select$PlotID == "mch0"] <- "fpo"
dat_select$habitat[dat_select$PlotID == "mwh0"] <- "fpd"
dat_select %>%
filter(n >= 10) %>%
mutate(ratio = rmse / n * (-1)) %>%
group_by(habitat) %>%
top_n(n = 2, wt = ratio) %>%
data.frame() -> dat_sel
shp_plots <- shp_plots[shp_plots$PlotID %in% dat_sel$PlotID, ]
shp_plots <- spTransform(shp_plots, CRS = CRS("+init=epsg:4326"))
### correlation -----
# loop over matrix rows (i.e., pixels)
mat_cor <- foreach(k = 1:nrow(mat_lst), .combine = "rbind") %do% {
# if enough valid values are available, compute linear regression metrics
dat <- data.frame(y = mat_lst[k, ], x = mat_ndvi_res[k, ])
if (sum(complete.cases(dat)) >= 23) {
dat <- dat[complete.cases(dat), ]
r <- cor(dat$x, dat$y)
mod <- lm(y ~ x, data = dat)
# return metrics only if highly significant (p < 0.001)
data.frame(n = sum(complete.cases(dat)),
r = r, p = Orcs::pvalue(mod))
} else {
return(NA)
}
}
# write r values to raster
rst_r <- writeRaster(setValues(rst_lst[[1]], mat_cor[, 2]),
filename = "data/results/ndvi_lst_r.tif",
format = "GTiff", overwrite = TRUE)
rst_r <- trim(projectRaster(rst_r, crs = "+init=epsg:4326"))
rst_p <- writeRaster(setValues(rst_lst[[1]], mat_cor[, 3]),
filename = "data/results/ndvi_lst_p.tif",
format = "GTiff", overwrite = TRUE)
rst_p <- trim(projectRaster(rst_p, crs = "+init=epsg:4326"))
rst_ndvi_mu <- trim(projectRaster(calc(rst_ndvi_res, fun = function(x) {
mean(x, na.rm = TRUE)
}), crs = "+init=epsg:4326"))
rst_robust <- overlay(rst_r, rst_ndvi_mu, fun = function(x, y) {
x[y[] < 0.15] <- NA
return(x)
})
rst_r_sig <- overlay(rst_robust, rst_p, fun = function(x, y) {
x[y[] >= .001] <- NA
return(x)
})
clr <- Rsenal::envinmrPalette(200)
cnt_r_sig <- rst_r_sig
# ## smooth significant areas, discard majority of island pixels
# cnt_r_sig <- focal(cnt_r_sig, fun = function(x) {
# if (sum(is.na(x)) > 3) NA else mean(x, na.rm = TRUE)
# }, w = matrix(1/9, nc = 3, nr = 3), pad = FALSE)
# cnt_r_sig[1, ] <- rst_r_sig[1, ]
# cnt_r_sig[, 1] <- rst_r_sig[, 1]
# cnt_r_sig[nrow(cnt_r_sig), ] <- rst_r_sig[nrow(rst_r_sig), ]
# cnt_r_sig[, ncol(cnt_r_sig)] <- rst_r_sig[, ncol(rst_r_sig)]
cnt_r_sig[is.na(cnt_r_sig[])] <- -999
cnt_r_sig[cnt_r_sig[] > -999] <- NA
cnt_r_sig <- overlay(cnt_r_sig, rst_ndvi_mu, fun = function(x, y) {
x[y[] < 0.15 | is.na(y[])] <- NA
return(x)
})
spy_r_sig <- rasterToPolygons(cnt_r_sig)
spy_r_dis <- rasterToPolygons(cnt_r_sig, dissolve = TRUE)
### visualize -----
## study area
scale <- list("SpatialPolygonsRescale", layout.scale.bar(), scale = 0.08998623,
offset = c(37.05, -3.38), fill = c("transparent", "black"))
text1 = list("sp.text", c(37.05, -3.36), "0", cex = .5, font = 2)
text2 = list("sp.text", c(37.16, -3.36), "10 km", cex = .5, font = 2)
arrow <- list("SpatialPolygonsRescale", layout.north.arrow(type = 1),
offset = c(37, -3.41), scale = .075)
p_kili <- spplot(rst_robust, col.regions = "transparent",
scales = list(draw = TRUE, cex = .6,
y = list(rot = 90)), colorkey = FALSE,
sp.layout = list(rgb2spLayout(rst_kili, c(.01, .998)),
scale, text1, text2, arrow,
list("sp.text", loc = c(37.02, -2.86),
txt = "a)", font = 2, cex = .6,
adj = c(.1, 1), col = "black")))
clr_pts <- viridisLite::plasma(13)
names(clr_pts) <- sortElevation(df = FALSE)
shp_plots$habitat <- substr(shp_plots$PlotID, 1, 3)
shp_plots$habitat <- factor(shp_plots$habitat, levels = sortElevation(FALSE))
p_pts <- spplot(shp_plots, "habitat", col.regions = clr_pts, pch = 21) +
latticeExtra::layer(sp.points(shp_plots, pch = 21, cex = 1.1, col = "black"))
p_kili <- p_kili +
latticeExtra::as.layer(p_pts)
## topographic map
p_topo <- visKili(cex = .6, lwd = .1, ext = rst_kili)
## standalone .tiff version
tiff("vis/figure01.tiff", width = 19, height = 9, units = "cm", res = 300,
compression = "lzw")
plot.new()
# add image of study area
vp0 <- viewport(-.025, -.075, .55, 1, just = c("left", "bottom"))
pushViewport(vp0)
print(p_kili, newpage = FALSE)
# add topographic map
downViewport(trellis.vpname("figure"))
vp_topo <- viewport(x = .65, y = .6, just = c("left", "bottom"),
width = .425, height = .475)
pushViewport(vp_topo)
print(p_topo, newpage = FALSE)
# add points legend
upViewport()
vp_key <- viewport(x = 0, y = 1.075, width = 1, height = .1,
just = c("left", "bottom"))
pushViewport(vp_key)
draw.key(key = list(points = list(pch = 21, col = "black",
fill = rev(clr_pts)[c(1, 8, 2, 9, 3, 10, 4, 11, 5, 12, 6, 13, 7)]),
text = list(c("mai", "fod", "sav", "fpo", "cof", "fpd", "hom", "fer", "gra", "fed", "flm",
"hel", "foc"), cex = .6), columns = 7, between = .6,
between.columns = 1), draw = TRUE)
grid.text("Habitat type", .5, 1.6, gp = gpar(fontface = "bold", cex = .7))
# add raster with r values
upViewport(0)
vp1 <- viewport(.435, -.075, .55, 1, just = c("left", "bottom"))
pushViewport(vp1)
print(spplot(rst_robust,
at = seq(-.85, .85, .01), col.regions = clr, colorkey = FALSE,
scales = list(draw = TRUE, cex = .6,
y = list(col = "transparent", rot = 90)),
par.settings = list(panel.background=list(col="grey80")),
sp.layout = list("sp.text", loc = c(37.02, -2.86),
txt = "b)", font = 2, cex = .6,
adj = c(.1, 1), col = "black")),
newpage = FALSE)
# add colorkey
downViewport(trellis.vpname("figure"))
vp2 <- viewport(0, 1.075, 1, .1, just = c("left", "bottom"))
pushViewport(vp2)
draw.colorkey(key = list(labels = list(cex = .6), col = clr,
at = seq(-.85, .85, .01), width = .6, height = .75,
space = "top"), draw = TRUE)
grid.text("Pearson's r", .5, 1.6, gp = gpar(fontface = "bold", cex = .7))
# add hatches
upViewport()
par(new = TRUE, fig = gridFIG(), mai = c(0, 0, 0, 0))
plot(spy_r_sig, border = "transparent", bg = "transparent", density = 10,
col = "grey50", xlim = c(xmin(rst_r_sig), xmax(rst_r_sig)), xaxs = "i",
yaxs = "i", ylim = c(ymin(rst_r_sig), ymax(rst_r_sig)))
# add contour lines
par(new = TRUE, fig = gridFIG(), mai = c(0, 0, 0, 0))
plot(spy_r_dis, border = "grey30", bg = "transparent", lwd = 1.2,
xlim = c(xmin(rst_r_sig), xmax(rst_r_sig)), xaxs = "i",
yaxs = "i", ylim = c(ymin(rst_r_sig), ymax(rst_r_sig)))
# add black margin
grid.rect(gp = gpar(fill = "transparent"))
dev.off()
|
aa9affc84977349fa95bfa739e406697f503ff4f | c27c86243549be08eb01144a12f709baa188eb86 | /models.R | 5afb9498b262b1c48a299233cef7e4324bbf31f5 | [] | no_license | Beaudelair/Golf | 57585598d96e3578363ae9e6368b2318a8347b9b | c1e6df2a2eda42b5e5d41890559e321b3fe62ebb | refs/heads/master | 2020-05-21T07:20:04.944659 | 2019-05-29T18:53:47 | 2019-05-29T18:53:47 | 185,222,080 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 15,651 | r | models.R |
#les modèles de léo
# In this script we will do the data exploration andd vizualisation
library(boot)
library(tidyverse)
library(caret)
library(neuralnet)
library(MASS)
library(plotly)
library(dplyr)
library(randomForest)
library(hablar)
library(class)
library(FactoInvestigate)
library(factoextra)
library(rnaturalearth)
library(xml2)
library(rvest)
library(stringr)
library(XML)
library(twitteR)
library(purrr)
library(tidytext)
library(dplyr)
library(tidyr)
library(lubridate)
library(scales)
library(broom)
library(ggplot2)
library(tidyverse)
library(knitr)
install.packages("FactorMineR")
#Create dummy for victory
load("~/Golf/Golf/dbfinal1.RData")
#stepwise selection logistic
# Split the data into training and test set
dbfinal2 <- na.omit(dbfinal1)
set.seed(123)
training.samples <- dbfinal2$lag_vic %>%
createDataPartition(p = 0.75, list = FALSE)
train.data <- dbfinal2[training.samples, ]
test.data <- dbfinal2[-training.samples, ]
full.model <- glm(lag_vic ~ money + putts + distance + score+victories+ cuts+rounds+approach+size+age, data = train.data, family = binomial) %>%
stepAIC(trace = FALSE)
# Summarize the final selected model
summary(full.model)
# Make predictions
probabilities <- full.model %>% predict(train.data, type = "response")
predicted.classes <- ifelse(probabilities > 0.5, "1", "0")
# Model accuracy
mean(as.numeric(predicted.classes)==train.data$lag_vic)
cm<-confusionMatrix(data=as.factor(predicted.classes),
reference=train.data$lag_vic)
cm$table
set.seed(123)
fit <- randomForest(as.factor(lag_vic) ~ money + putts + distance + score+victories+ cuts+rounds+approach+size+age,
mtry = 2,
data = train.data, na.action = na.roughfix)
varImpPlot(fit)
# Make predictions
probabilities <- fit %>% predict(test.data)
# Model accuracy
mean(as.numeric(predicted.classes)==train.data$lag_vic)
cm<-confusionMatrix(data=probabilities,
reference=test.data$lag_vic)
library(class)
cl = train.data['lag_vic', drop = TRUE]
pr <- knn(lag_vic ~ money,train = train.data, test = test.data, cl, k=3)
trctrl <- trainControl(method = "repeatedcv", number = 10, repeats = 3)
set.seed(3333)
knn_fit <- train(lag_vic ~ money + score + victories + cuts + rounds + size + age, data = train.data, method = "knn",
trControl=trctrl,
preProcess = c("center", "scale"),
tuneLength = 10)
knn_fit
test_pred <- predict(knn_fit, newdata = test.data)
cm<-confusionMatrix(data=test_pred,
reference=test.data$lag_vic)
cm$table
dbfinal2$lag_vic1<-as.numeric(as.character(dbfinal2$lag_vic))
dbfinal2$lag_vic <-as.factor(as.character(dbfinal2$lag_vic))
mtcars.pca <- Investigate(dbfinal1[res,c('score','money', 'victories', 'cuts','rounds', 'size', 'age', 'distance', 'lag_vic1', 'accuracy')],scale.unit=TRUE, ncp=5, graph=T)
typeof(dbfinal1[,20])
plot.PCA(mtcars.pca, choix="var")
##create the confucion matrix
tb <- table(pr,test_target)
nrow(train.data)
# function to obtain R-Squared from the data
rsq <- function(formula, data, indices) {
d <- data[indices,] # allows boot to select sample
fit<-glm(formula, data = d,family="binomial")
return(coef(fit))
}
results <- boot(data=right, statistic=rsq,
R=10000, formula = lag_vic~ money + putts + distance + score)
boot.ci(results, type="norm", index=5) # intercept
plot(results) # intercept
right$`TOP 10`
neuralnet((lag_vic == "1") ~ money + putts + distance, data = right, hidden = 1, hidden = c(3, 2), act.fct = softplus)
a <- dbfinal1 %>% mutate(dummy = Hmisc::cut2(dbfinal1$victories, c(0,1)))
levels(a$dummy)[levels(a$dummy)=="0"] <- "No victory"
levels(a$dummy)[levels(a$dummy)=="1"] <- "victory"
levels(a$dummy)[levels(a$dummy)=="[1,6]"] <- "victory"
levels(a$dummy)
dbfinal1 <-a %>% group_by(player) %>% mutate(lag_vic = lag(dummy, order_by = year))
arrange(right, year)
a<-glm(lag_vic~ money, data = right,family="binomial")
b <-summary(a)
b$coefficients
#graphique pour visualisation des datas
dbfinal1 %>%
ggplot(aes(x = NULL,y=distance))+
ylab("Driving Distance")+
geom_boxplot(fill="springgreen3")+
theme_classic()+
scale_x_continuous(labels = NULL)+
scale_y_continuous()+
ggtitle("Driving Distance Overview")+
theme(plot.title = element_text(hjust = 0.5))
dbfinal1 %>%
ggplot(aes(x = NULL,y= c(as.numeric(score))))+
ylab("Average Scoring")+
geom_boxplot(fill="springgreen3")+
theme_classic()+
scale_x_continuous(labels = NULL)+
scale_y_continuous()+
ggtitle("Average scoring per round")+
theme(plot.title = element_text(hjust = 0.5))
dbfinal1 %>%
ggplot(aes(x = NULL,y= c(as.numeric(putts))))+
ylab("Average number of Putts")+
geom_boxplot(fill="springgreen3")+
theme_classic()+
scale_x_continuous(labels = NULL)+
scale_y_continuous()+
ggtitle("Average number of putts per round")+
theme(plot.title = element_text(hjust = 0.5))
dbfinal1 %>%
ggplot(aes(size,distance)) +
geom_point(size=2, shape=18, color = "red") +
geom_smooth(method="auto", se=TRUE, fullrange=FALSE, level=0.95)+
ylab("Average Driving Distance")+ xlab("Size")+
ggtitle("Average driving distance compared to size")+
theme(plot.title = element_text(hjust = 0.5))+
theme_classic()
dbfinal1 %>%
ggplot(aes(weight, distance)) +
geom_point(size=1, shape=1, color = "darkgreen") +
geom_smooth(method="loess",size=1.5,se=TRUE, fullrange=TRUE, level=0.95, color = "cornsilk3")+
ylab("Average Driving Distance")+ xlab("Weight")+
ggtitle("Average driving distance compared to weight")+
theme(plot.title = element_text(hjust = 0.5))+
theme_classic()
dbfinal1 %>%
ggplot(aes(age, distance))+
geom_point(size=1, shape=1, color = "darkgreen") +
geom_smooth(method="loess", se=TRUE, fullrange=FALSE, level=0.95,color = "cornsilk3")+
ylab("Average Driving Distance")+ xlab("Age")+
ggtitle("Average driving distance compared to Age")+
theme(plot.title = element_text(hjust = 0.5))+
theme_classic()
dbfinal1 %>%
ggplot(aes(size, accuracy)) +
geom_point(size=2, shape=18, color = "red") +
geom_smooth(method="auto", se=TRUE, fullrange=FALSE, level=0.95)+
ylab("Driving Accuracy")+ xlab("Size")+
ggtitle("Average driving accuracy compared to size")+
theme(plot.title = element_text(hjust = 0.5))+
theme_classic()
dbfinal1 %>%
ggplot(aes(weight, accuracy)) +
geom_point(size=2, shape=18, color = "red") +
geom_smooth(method="auto", se=TRUE, fullrange=FALSE, level=0.95)+
ylab("Driving Accuracy")+ xlab("Weight")+
ggtitle("Average driving accuracy compared to weight")+
theme(plot.title = element_text(hjust = 0.5))+
theme_classic()
dbfinal1 %>%
ggplot(aes(`age`, accuracy)) +
geom_point(size=2, shape=18, color = "red") +
geom_smooth(method="auto", se=TRUE, fullrange=FALSE, level=0.95)+
ylab("Driving Accuracy")+ xlab("Age")+
ggtitle("Average driving accuracy compared to age")+
theme(plot.title = element_text(hjust = 0.5))+
theme_classic()
#faire un tableau avec les gagnants uniquement et renommer variables + tableau avec les 10 meilleurs joueurs au niveau des victoire total
#résoudre ce problème des dollars -done
#sortir l'origine de chaque joueur aussi - done
#bon voila ce que tu voulais mon petit léo adoré
dbfinal2 <- dbfinal1 %>% rowwise() %>%filter(any(c(victories) %in% c(1:20)))
#group by player sum summarized win
dbfinal3 <- dbfinal1 %>% filter(player == "Dustin Johnson" | player == "Tiger Woods" | player == "Bubba Watson"| player == "Jordan Spieth"| player == "Jason Day"| player == "Phil Mickelson"|
player == "Rory Mcllroy"| player == "Justin Rose"| player == "Justin Thomas"| player == "Zach Johnson" ) %>%
mutate(year = as.Date(paste(as.character(substr(year, 1, 4)), 01, 01, sep = "-")))
all_year<- data.frame("player"=(rep(unique(dbfinal3$player),each=11)), "year"= (rep(2008:2018, 9))) %>% mutate(year = as.Date(paste(year, 01, 01, sep = "-")))
all_year<-full_join(dbfinal3,all_year, by = c("year" = "year", "player" = "player")) %>% mutate(player = as.factor(player))
all_year[is.na(all_year)] <- 0
all_year %>% ggplot(aes(year,victories, col= player ))+
geom_line(size=0.6, alpha = 0.4)+
theme_bw()+
geom_point(size=2.5)+
scale_colour_manual(values=c("#000000", "#E69F00", "#56B4E9", "#009E73", "#F0E442", "#0072B2", "#D55E00", "#CC79A7", "red", "blue","yellow"))
all_year %>% group_by(player) %>% arrange(year) %>% mutate(Total_victory = cumsum(victories)) %>% unique() %>%
ggplot(aes(year,Total_victory, col= player ))+
geom_line(size=0.6, alpha = 0.4)+
theme_bw()+
geom_point(size=2.5)+
scale_colour_manual(values=c("#000000", "#E69F00", "#56B4E9", "#009E73", "#F0E442", "#0072B2", "#D55E00", "#CC79A7", "red", "blue","yellow"))
all_year %>% group_by(player) %>%
arrange(year) %>% mutate(Total_victory = cumsum(victories)) %>%
unique() %>%
ggplot(aes(x=year, y=Total_victory, fill=player)) +
geom_area(colour="black", size=.3, alpha=.7) +
scale_fill_brewer(palette="Greens")+
theme_bw()
#2x Zach Johnson !!!!!!!!!!!!!!!!!!!
#compare sum victories top10 and other
top_10 <- dbfinal1 %>% filter(player == "Dustin Johnson" | player == "Tiger Woods" | player == "Bubba Watson"| player == "Jordan Spieth"| player == "Jason Day"| player == "Phil Mickelson"|
player == "Rory Mcllroy"| player == "Justin Rose"| player == "Justin Thomas"| player == "Zach Johnson" ) %>%
mutate(year = as.Date(paste(as.character(substr(year, 1, 4)), 01, 01, sep = "-"))) %>% group_by(year) %>% summarise(Victories_others= sum(victories, na.rm = TRUE))
others<- dbfinal1 %>% filter(player != "Dustin Johnson" | player != "Tiger Woods" | player != "Bubba Watson"| player != "Jordan Spieth"| player != "Jason Day"| player != "Phil Mickelson"|
player != "Rory Mcllroy"| player != "Justin Rose"| player != "Justin Thomas"| player != "Zach Johnson" ) %>%
mutate(year = as.Date(paste(as.character(substr(year, 1, 4)), 01, 01, sep = "-"))) %>% group_by(year) %>% summarise(Victories_others= sum(victories, na.rm = TRUE))
Freq_country <- dbfinal1 %>% select(country,player) %>%
unique() %>% group_by(country) %>%
summarize(`Number of player` = length(country)) %>% na.omit() %>%
mutate("freq" = (`Number of player`/ sum(`Number of player`))) %>%
mutate(country=replace(country, country=="RSA", "ZAF")) %>%
mutate(country=replace(country, country=="ZIM", "ZWE")) %>%
mutate(country=replace(country, country=="ENG", "GBR")) %>%
mutate(country=replace(country, country=="SCO", "GBR")) %>%
mutate(country=replace(country, country=="WAL", "GBR")) %>%
mutate(country=replace(country, country=="GER", "DEU")) %>%
mutate(country=replace(country, country=="DEN", "DNK")) %>%
mutate(country=replace(country, country=="FIJ", "FJI"))
library(countrycode)
library(sf)
library("rnaturalearth")
library("rnaturalearthdata")
WorldData <- map_data('world')
WorldData$region<- countrycode(WorldData$region,'country.name', 'iso3c' )
joint<-full_join(Freq_country,WorldData, by = c("country"="region"))
ggplot(joint, aes(x = long, y = lat, group = group)) +
geom_polygon(aes(fill = `Number of player`))+
scale_fill_viridis_c(option = "cividis", trans = "log2",name = waiver())+
ggtitle("World map temperature of march 2010")+
theme(plot.title = element_text(hjust = 0.5))+ theme_bw()
#model uniweight, first before data exploration to binarize all the variables
#dbfinal1$dummy<- as.numeric(dbfinal2$dummy)
dbfinal1%>%
ggplot(aes(distance, fill=dummy)) +
geom_density(alpha=0.4)+
theme_bw()
dbfinal2 <- dbfinal1 %>%
ungroup() %>%
mutate(distance_bin = Hmisc::cut2(dbfinal1$distance, 267))
levels(dbfinal2$distance_bin)
levels(dbfinal2$distance_bin)[levels(dbfinal2$distance_bin)=="[237,267)"] <- "0"
levels(dbfinal2$distance_bin)[levels(dbfinal2$distance_bin)=="[267,292]"] <- "1"
levels(dbfinal2$distance_bin)
#compare accuracy
dbfinal2 %>%
ggplot(aes(accuracy, fill=dummy)) +
geom_density(alpha=0.4)+
theme_bw()
#not enough difference
#for the putts now
dbfinal2%>%
ggplot(aes(putts, fill=dummy)) +
geom_density(alpha=0.4)+
theme_bw()
dbfinal2 <- dbfinal2 %>%
ungroup() %>%
mutate(putts_bin = Hmisc::cut2(dbfinal2$putts, 28.8))
levels(dbfinal2$putts_bin)[levels(dbfinal2$putts_bin)=="[28.8,31.0]"] <- "0"
levels(dbfinal2$putts_bin)[levels(dbfinal2$putts_bin)=="[27.5,28.8)"] <- "1"
levels(dbfinal2$putts_bin)
#score
dbfinal2 %>%
ggplot(aes(score, fill=dummy)) +
geom_density(alpha=0.4)+
theme_bw()
dbfinal2 <- dbfinal2 %>%
ungroup() %>%
mutate(score_bin = Hmisc::cut2(dbfinal2$score, 70.7))
levels(dbfinal2$score_bin)[levels(dbfinal2$score_bin)=="[70.7,74.4]"] <- "0"
levels(dbfinal2$score_bin)[levels(dbfinal2$score_bin)=="[68.1,70.7)"] <- "1"
levels(dbfinal2$score_bin)
#lscore
dbfinal2 %>%
ggplot(aes(lscore, fill=dummy)) +
geom_density(alpha=0.4)+
theme_bw()
dbfinal2 <- dbfinal2 %>%
ungroup() %>%
mutate(lscore_bin = Hmisc::cut2(dbfinal2$lscore, 70.7))
levels(dbfinal2$lscore_bin)[levels(dbfinal2$lscore_bin)=="[70.7,76.5]"] <- "0"
levels(dbfinal2$lscore_bin)[levels(dbfinal2$lscore_bin)=="[67.7,70.7)"] <- "1"
levels(dbfinal2$lscore_bin)
#money
dbfinal2 %>%
ggplot(aes(money, fill=dummy)) +
geom_density(alpha=0.4)+
theme_bw()
dbfinal2 <- dbfinal2 %>%
ungroup() %>%
mutate(money_bin = Hmisc::cut2(dbfinal2$money, 1750000))
levels(dbfinal2$money_bin)
levels(dbfinal2$money_bin)[levels(dbfinal2$money_bin)=="[ 24650, 1750000)"] <- "0"
levels(dbfinal2$money_bin)[levels(dbfinal2$money_bin)=="[ 1750000,12030465]"] <- "1"
levels(dbfinal2$money_bin)
#
dbfinal2 %>%
ggplot(aes(cuts, fill=dummy)) +
geom_density(alpha=0.4)+
theme_bw()
dbfinal2 <- dbfinal2 %>%
ungroup() %>%
mutate(cuts_bin = Hmisc::cut2(dbfinal2$cuts, 8))
levels(dbfinal2$cuts_bin)[levels(dbfinal2$cuts_bin)=="[ 2, 8)"] <- "0"
levels(dbfinal2$cuts_bin)[levels(dbfinal2$cuts_bin)=="[ 8,43]"] <- "1"
levels(dbfinal2$cuts_bin)
#up
dbfinal2 %>%
ggplot(aes(victories, fill=dummy)) +
geom_density(alpha=0.4)+
theme_bw()
dbfinal2 <- dbfinal2 %>%
ungroup() %>%
mutate(victories_bin = Hmisc::cut2(dbfinal2$victories, 1))
levels(dbfinal2$victories_bin)[levels(dbfinal2$victories_bin)=="0"] <- "0"
levels(dbfinal2$victories_bin)[levels(dbfinal2$victories_bin)=="[1,6]"] <- "1"
levels(dbfinal2$victories_bin)
#fitted value
dbfinal2$distance_bin <- as.numeric(as.character(dbfinal2$distance_bin))
dbfinal2$cuts_bin <-as.numeric(as.character(dbfinal2$cuts_bin))
dbfinal2$putts_bin <-as.numeric(as.character(dbfinal2$putts_bin))
dbfinal2$score_bin <-as.numeric(as.character(dbfinal2$score_bin))
dbfinal2$lscore_bin <-as.numeric(as.character(dbfinal2$lscore_bin))
dbfinal2$money_bin <-as.numeric(as.character(dbfinal2$money_bin))
dbfinal2$victories_bin <-as.numeric(as.character(dbfinal2$victories_bin))
dbfinal3<-dbfinal2[-which(is.na(dbfinal2$lag_vic)),]
dbfinal3[ "cuts_bin"][is.na(dbfinal3["cuts_bin"])] <- 0
dbfinal3["lscore_bin"][is.na(dbfinal3["lscore_bin"])] <- 0
dbfinal2 <- dbfinal3 %>% mutate("fitted" = 2*distance_bin+cuts_bin+putts_bin+4*score_bin+2*lscore_bin+4*money_bin+2*victories_bin)
dbfinal2 %>%
ggplot(aes(fitted, fill=dummy)) +
geom_density(alpha=0.4)+
theme_bw()
predicted.classes <- ifelse(dbfinal2$fitted >= 16, "1", "0")
library(e1071)
library(caret)
cm<-confusionMatrix(data=as.factor(predicted.classes),
reference=as.factor(dbfinal2$lag_vic))
cm
|
f15eab1e61be2708579973aa507bc510e1229438 | 0b4dc08a3659f35d6fe492dd9d7e454b2f515a6a | /core_microbiome.R | 0290aa04c7046b8fdd850b733ca980a17d4707b8 | [] | no_license | cedwardson4/phyloseq-extras | 16beffc840dbca8ea18071e02bb20d890ab4a3ea | 9b31c70dd372ac759b4dae2755ae8cab8a379312 | refs/heads/master | 2020-03-21T08:26:33.654295 | 2019-12-06T20:55:04 | 2019-12-06T20:55:04 | 138,343,736 | 5 | 1 | null | null | null | null | UTF-8 | R | false | false | 4,855 | r | core_microbiome.R | #https://github.com/davidelliott/core-microbiome/blob/master/core-microbiome.Rmd
# core microbiome function
# finds the core microbiome with various options
# physeq: the phyloseq object to query
# threshold: the abundance below which OTUs are not regarded as being "core taxa"
# method: "max" or "mean" - define whether to calculate the threshold for the max or mean
# ranks: vector of taxonomic ranks at which to search for the core taxa
# group: factor on which to merge samples before applying threshold criteria.
# Note.
# max and mean are calculated for the whole OTU table per row
# therefore
# mean gives the overall abundant OTUs
# max will include any OTU which is abundant any 1 sample, so it is really like a sample-wise search
# returns a phyloseq object containing only the OTUs matching the criteria set
core_microbiome <- function(physeq,threshold=2,method="mean",ranks=c("Species"),group="",verbose=0){
core_otus_glom <- c()
physeq.original <- physeq
if(group!="") {
# note that grouping will sum the counts
# but for the threshold to be applied correctly we need to
# use the mean for the group.
# the mean is worked out and applied below.
# I'm sure this can be done in a better way
# HAS NOT BEEN TESTED ON OBJECTS WHERE THE GROUP FREQUENCY VARIES
sample_data(physeq)$group_frequency <- NA
for(n in 1:nrow(sample_data(physeq))) {
sample_data(physeq)$group_frequency <- sum(get_variable(physeq, group)==get_variable(physeq, group)[n])
}
otu_table(physeq) <- otu_table(physeq)/sample_data(physeq)$group_frequency
physeq <- merge_samples(x=physeq,group=group)
# I don't know why the above transposes the OTU table. Turn it back:
otu_table(physeq) <- t(otu_table(physeq))
# transform_sample_counts(physeq, function(x) x/sample_data(physeq)$group_frequency)
}
for (rank in ranks) {
# agglomerate to the taxonomic rank being searched
# but only do this if we are not at the bottom rank already
if(rank!=rank_names(physeq)[length(rank_names(physeq))]) {
physeq.glom <- tax_glom(physeq,rank)
} else {
physeq.glom <- physeq
}
# get the OTU table into a dataframe
glom_otus <- as.data.frame(otu_table(physeq.glom))
# calculate the test statistic for each OTU
glom_otus$stat <- apply(glom_otus,1,method)
# make a list of the taxa meeting the abundance criteria
glom_rank_candidates <- row.names(glom_otus[glom_otus$stat >= threshold,])
# identify the taxonomy of the candidates
if(length(glom_rank_candidates)>0) {
glom_rank_taxonomy <- tax_table(physeq.glom)[glom_rank_candidates,rank]
# identify the most abundant OTU within the identified taxonomic group, and add it to the list
# tax_glom does not always pick the most abundant OTU in the classification
# Therefore need to specifically check for the most abundant OTU
# should only do this if we are not working at the lowest rank in the phyloseq object
if(rank!=rank_names(physeq)[length(rank_names(physeq))]) {
for (candidate in glom_rank_taxonomy) {
OTUs_in_taxonomic_group <- tax_table(physeq)[,rank] == as.character(candidate)
most_abundant_OTU <- names(which.max(taxa_sums(physeq)[as.vector(OTUs_in_taxonomic_group)]))
core_otus_glom <- c(core_otus_glom,most_abundant_OTU)
}
}
else {
# at OTU level we don't need to search for the most abundant one - we want them all
core_otus_glom <- c(core_otus_glom,glom_rank_candidates)
}
}
if(verbose>1) print(paste(rank,"level search found",length(glom_rank_candidates),"taxa above",method,"abundance of",threshold))
}
if(verbose>0) print(paste("Search found", length(core_otus_glom),"unique OTUs"))
core_otus <- unique(core_otus_glom)
return(prune_taxa(core_otus,physeq.original) )
}
# core compare function
# Input 2 phyloseq objects which have been pruned to contain a 'core microbiome'
# returns a list containing:
# extra OTUs in the first object, not present in the second
# extra taxonomic groups represented by those OTUs, which are not present in the second object
core_compare <- function(physeq1,physeq2) {
out <- list()
# Find out which OTUs were added by the multi-rank approach
extras <- tax_table(physeq1)[!rownames(otu_table(physeq1))%in%rownames(otu_table(physeq2))]
out[["extra_otus"]] <- rownames(extras)
# Find out which taxa are additionally represented by those OTUs
for(rank in rank_names(physeq1)) {
index <- !extras[,rank]%in%unique(tax_table(physeq2)[,rank])
if(sum(index)) {
out[[rank]] <- unique(as.vector(extras[index,rank]))
}
else {
out[rank] <- "none"
}
}
return(out)
}
|
dbbcdd08e92e43e9a8ca17b259254a34fdd6429b | 1b440c732c9f60cbd3130f40ecac2613cadf1940 | /plot1.R | 2cac6a9bb1991357ce0e67c0c582d43213e90ffb | [] | no_license | Mathematician90/ExData_Plotting1 | 221a5ab4ac9b457598d5d85b5e5060f1f82fc68b | a87a59ec1c37b0c6f448688647ee3ab34e9c30f3 | refs/heads/master | 2021-01-21T23:38:50.003562 | 2016-01-08T19:00:53 | 2016-01-08T19:00:53 | 49,279,052 | 0 | 0 | null | 2016-01-08T15:17:45 | 2016-01-08T15:17:44 | null | UTF-8 | R | false | false | 612 | r | plot1.R | library(graphics)
library(grDevices)
household_power_consumption <- read.csv('household_power_consumption.txt', header=TRUE, sep=";",na.strings="?")
household_power_consumption$Date<-as.Date(household_power_consumption$Date,"%d/%m/%Y")
household_power_consumption<-household_power_consumption[which(household_power_consumption$Date=="2007-02-01"|household_power_consumption$Date=="2007-02-02"),]
hist(household_power_consumption$Global_active_power, main="Global Active Power", xlab="Global Active Power (kilowatts)", ylab="Frequency", col="Red")
dev.copy(png, file="plot1.png", height=480, width=480)
dev.off()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.