blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
cacf69231b570f16dcb0ec2518a31b555052bb3b
|
becf66d452c52a4ebafbfb061c11f3c95347ddcf
|
/stats/time-metrics.R
|
9371d478b28ae64469cbfbcd82da2163b4d1c9a1
|
[] |
no_license
|
caiusb/MergeConflictAnalysis
|
55b40734c71e9618eae14e921782b69a79ce841e
|
e41863670505bff853c85572ccf6414b23a0a0af
|
refs/heads/master
| 2021-09-05T17:52:18.254826
| 2018-01-30T02:14:44
| 2018-01-30T02:14:44
| 29,747,203
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,536
|
r
|
time-metrics.R
|
source('common.R')
data <- loadData(resultsFolder)
if(exists("SECONDS_PER_DAY") != TRUE) {
SECONDS_PER_DAY <- 88400
lockBinding("SECONDS_PER_DAY", globalenv())
}
if(exists("SECONDS_PER_HOUR") != TRUE) {
SECONDS_PER_HOUR <- 3600
lockBinding("SECONDS_PER_HOUR", globalenv())
}
if(exists("SECONDS_PER_MINUTE") != TRUE) {
SECONDS_PER_MINUTE <- 60
lockBinding("SECONDS_PER_MINUTE", globalenv())
}
calculateTimeDifferences <<- function(data) {
data <- calculateResolutionTime(data)
data <- calculateTimeBetweenTips(data)
return(data)
}
calculateTimeBetweenTips <- function(data) {
data$TIME_TIPS <- data$TIME_A - data$TIME_B
return(data)
}
calculateResolutionTime <- function(data) {
data$RESOLUTION_TIME <- abs(data$TIME_SOLVED - data$TIME_A)
return(data)
}
calculateEffort <- function(data) {
weightedConflictSize <- data$LOC_A_TO_SOLVED/data$LOC_SIZE_A + data$LOC_B_TO_SOLVED/data$LOC_SIZE_B
weightedAverageConflictSize <- (weightedConflictSize)/2
averageConflictSize <- (data$LOC_A_TO_SOLVED + data$LOC_B_TO_SOLVED)/2
deviationFromDiagonal <- (data$LOC_B_TO_SOLVED-data$LOC_A_TO_SOLVED)/sqrt(2)
conflictSize <- data$LOC_A_TO_SOLVED + data$LOC_B_TO_SOLVED
percentUnmerged <- data$LOC_A_TO_B/data$LOC_DIFF_BEFORE
effort <- data$NO_METHODS #data$FILE*percentUnmerged
return(effort)
}
trimSolveTimeGreaterThanDays <-function(data, days) {
return(data[data$RESOLUTION_TIME <= days*SECONDS_PER_DAY, ])
}
trimSolveTimeGreaterThanHours <- function(data, hours) {
return(data[data$RESOLUTION_TIME <= hours*SECONDS_PER_HOUR, ])
}
trimSolveTimeGreaterThanMinutes <- function(data, minutes) {
return(data[data$RESOLUTION_TIME <= minutes*SECONDS_PER_MINUTE, ])
}
hist(conflictData$AST_DIFF_BEFORE, breaks=500, xlim=c(0,2000))
hist(successfulData$AST_DIFF_BEFORE, breaks=500, xlim=c(0,2000))
boxplot(successfulData$AST_DIFF_BEFORE, conflictData$LOC_DIFF_BEFORE)
print(wilcox.test(conflictData$AST_DIFF_BEFORE, successfulData$AST_DIFF_BEFORE))
timedData <- calculateTimeDifferences(conflictData)
timedCommitData <- calculateTimeDifferences(createCommitData(timedData))
trimmedCommitData <- trimSolveTimeGreaterThanMinutes(timedCommitData, 10)
trimmedCommitData$EFFORT <- calculateEffort(trimmedCommitData)
print(summary(trimmedCommitData$RESOLUTION_TIME))
cat("Standard deviation: ", sd(trimmedCommitData$RESOLUTION_TIME))
hist(trimmedCommitData$RESOLUTION_TIME, main="Resolution time", xlab="Time (s)", breaks=50)
plotWithLinearRegression(trimmedCommitData, "RESOLUTION_TIME", "EFFORT")
|
391c86607771f86aa26e09a0d1331d2062042edc
|
a20921ccfe5a96a2362c732ccc9b09be0918dd67
|
/r/helperFunctions.R
|
77359f54bf481ed9ec029b521d90fa428846abed
|
[] |
no_license
|
HF-Research/ht2
|
897e309a6cd66c9bd13759781fc8067cd2fcde34
|
2b4ee1d6191ed3d28e7ab0bb59e31a79ee980bbb
|
refs/heads/master
| 2022-04-28T06:47:16.202058
| 2022-03-24T13:56:19
| 2022-03-24T13:56:19
| 152,376,713
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 566
|
r
|
helperFunctions.R
|
capWord <- function(y) {
x <- strsplit(y, " ")
sapply(x, function(i){
paste(toupper(substring(i, 1, 1)),
substring(i, 2),
sep = "",
collapse = " ")
})
}
ht_link <- function(lang){
if(lang == "en"){
"https://hjerteforeningen.shinyapps.io/HjerteTal-en/"
} else{
"https://hjerteforeningen.shinyapps.io/HjerteTal/"
}
}
html_wrap <- function(x, width){
paste0(strwrap(x, width = width), collapse = "<br>")
}
html_wrap_v <- function(x, width){
stringr::str_wrap(x, width) %>% gsub("\n", "<br>", x= .)
}
|
ddf57e645b9c62115e3fe8f0ee484088940b3622
|
f387d8bac0ca340364a8f2f3767233c20797e2e4
|
/Wordclouds.R
|
393362f57c8f66a463c73cefeb726fd2b9cc9805
|
[] |
no_license
|
WOLFurriell/twitterR
|
24ba737808bac151de8f24340a86ba006eb0a40b
|
f2317e282e62f9391b5c4e731381961a15886928
|
refs/heads/master
| 2020-04-15T14:10:29.591089
| 2019-01-08T22:40:32
| 2019-01-08T22:40:32
| 164,744,504
| 0
| 0
| null | null | null | null |
ISO-8859-1
|
R
| false
| false
| 3,829
|
r
|
Wordclouds.R
|
library(tm)
library(wordcloud)
library(RColorBrewer)
library(dplyr)
library(data.table)
rm(list = ls())
BD<-'D:/TWITTER/Bancos_tweets'
setwd(BD)
candidatos<-read.table(file = "Candidatos.csv",header = T,sep = ",")
levels(candidatos$nome)
all_tw<-split(candidatos,candidatos$nome)
#Atribuir o banco geral como último elemento da lista
all_tw[[8]]<-candidatos
names(all_tw)[8]<-"candidatos"
plotword<-list()
ordem<-c(2,1,7,6,4,3,5,8)
nomescand<-c("Aécio Neves","Dilma","Eduardo Jorge","Pastor Everaldo",
"Levy Fidelix","Luciana Genro","Marina Silva",
"Geral")
pdf("D:/TWITTER/graphs/wordcloud2.pdf",height = 6, width = 14)
par(mfrow=c(2,4))
for(i in ordem){
# criando um corpus
adapt <- Corpus(VectorSource(all_tw[[i]]))
#removendo espaços em branco, formatando as letras em minúsculas e removendo a pontuação
adapt <- tm_map(adapt, stripWhitespace)
adapt <- tm_map(adapt, content_transformer(tolower))
adapt <- tm_map(adapt, removeNumbers)
adapt <- tm_map(adapt, removePunctuation)
adapt <- tm_map(adapt, stripWhitespace)
adapt <- tm_map(adapt, removeWords, stopwords("portuguese"))
aux<-read.table("D:/TWITTER/STOPWORDS.txt",
encoding = "Latin-1")
words1<-as.vector(aux$V1)
if (i == 1){
words2<-c("aécio","aecioneves","aecio","equipean","neves","https",
"à","p","já","q","u","aécio45","é","rt","t.co")
}else if (i == 2){
words2<-c("dilma","dilmabr","rousseff","https","é","rt","t.co",
"querodilmatreze")
}else if (i == 3){
words2<-c("eduardo","jorge","eduardojorge","eduardojorge43","https",
"à","p","já","q","u","é","rt","t.co","httptcovyevurkvew")
}else if (i == 4){
words2<-c("everaldo","https","à","p","já","q","u","é","rt","t.co")
}else if (i == 5){
words2<-c("levy","fidelix","levyfidelix","https","à","p","já","q","u",
"é","rt","t.co")
}else if (i == 6){
words2<-c("luciana","genro","equipe","https","à","p","já","q","u",
"é","rt","t.co","soumarina")
}else if (i == 7){
words2<-c("silva_marina","silva_marina","marina","silva","silvamarina",
"soumarina40","à","p","já","q","u","é","rt","t.co")
}else
words2<-c("aécio","aecioneves","aecio","equipean","neves",
"dilma","dilmabr","rousseff","eduardo","jorge","eduardojorge",
"everaldo","levy","fidelix","levyfidelix","luciana",
"genro","equipe","silva_marina","silva_marina","marina",
"silva","silvamarina","aécio","aecioneves","aecio","equipean",
"neves", "dilma","dilmabr","rousseff",'http',
"eduardo","jorge","eduardojorge", "everaldo",
"levy","fidelix","levyfidelix","luciana","genro","equipe",
"silva_marina","silva_marina","marina","silva","silvamarina",
"aécio45","é","rt","t.co","já","você","equipe50","https","à","p","já","q","u",
"eduardojorge43","equipe40","soumarina40",'c','está','pra',"são",'sobre','everaldo_20',
"httptcovyevurkvew","http...")
words<-c(words1,words2)
words<-sort(words)
adapt <- tm_map(adapt, removeWords,words)
dtm <- TermDocumentMatrix(adapt)
m <- as.matrix(dtm)
v <- sort(rowSums(m),decreasing=TRUE)
d <- data.frame(word = names(v),freq=v)
set.seed(1234)
wordcloud(words = d$word, freq = d$freq, min.freq = 2,scale=c(5, .2),
max.words=400, random.order=F,rot.per=0.30,
colors=c("darkslategray3","lightslateblue","dodgerblue",
"mediumpurple4","darkmagenta",
"mediumpurple3","midnightblue"))
mtext(nomescand[[i]], side = 2,line = 2)
}
dev.off()
|
1ad3dc3ce29d205f804cb20cd524f765fed353bf
|
15161b8bd7b05e70fb234b6470ea96aa02bcb863
|
/q5.R
|
243095c010d133afd2c216dc155361a6e748ae68
|
[] |
no_license
|
fangrobloke/r-exploratory-peer2
|
27306ec2c3efaa22f773af63ac4f121e21060a84
|
84d3ff7c94b4a8348c1bef4f635de63e43590647
|
refs/heads/master
| 2020-05-25T15:42:13.699376
| 2016-10-04T09:44:18
| 2016-10-04T09:44:18
| 69,945,633
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 689
|
r
|
q5.R
|
#### read file ####
NEI = readRDS("summarySCC_PM25.rds")
SCC = readRDS("Source_Classification_Code.rds")
#### Q5 ####
## crate dataframe
library(dplyr)
nei = tbl_df(NEI)
## solve problem
bitt_e = nei[(nei$fips=="24510") & (nei$type=="ON-ROAD"),]
bitt_e.byyear <- summarise(group_by(bitt_e, year), Emissions=sum(Emissions))
## plot
library(ggplot2)
ggplot(bitt_e.byyear, aes(x=factor(year), y=Emissions,fill=year, label = round(Emissions,2))) +
geom_bar(stat="identity") +
xlab("year") +
ylab(expression("total PM"[2.5]*" emissions in tons")) +
ggtitle("Emissions from motor vehicle sources in Baltimore City")+
geom_label(aes(fill = year),colour = "white", fontface = "bold")
|
7d21fdeaa4afe203484043c7c7d57ef900fb290b
|
bf8e0dea3cb0cc007c3d5e676d89373a23aa8bce
|
/trimmomatic_bowtie2_featureCounts_DESeq2_topGO_clusterProfiler.r
|
b572547cb6098f8c0b8232033d5dc8128631db02
|
[] |
no_license
|
huizhen2014/R_scripts
|
5b7ec15af05125cd76de1875f6eba7584b07a17d
|
964c7965818e40f4f983c5dc0dd6e3f0f520ab74
|
refs/heads/master
| 2020-03-12T00:00:48.880132
| 2020-01-22T10:25:18
| 2020-01-22T10:25:18
| 130,338,420
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 30,304
|
r
|
trimmomatic_bowtie2_featureCounts_DESeq2_topGO_clusterProfiler.r
|
###part 1
##trimmomatic
#trimmomatic PE -summary kp_21_trimmed.log kp_21_R1.fastq.gz kp_21_R2.fastq.gz -baseout kp_21_trimmed.fastq.gz \
#ILLUMINACLIP:Sabgon_adapter.fa:2:30:10 LEADING:20 TRAILING:20 SLIDINGWINDOW:5:20 MINLEN:35
##sangon_adapter.fa
##>PrefixPE/1
##AGATCGGAAGAGCACACGTCTGAAC
##>PrefixPE/2
##AGATCGGAAGAGCGTCGTGTAGGGA
###align bowtie2 with shell scripts
#bowtie2-build GCF_000240185.1_ASM24018v2_genomic.fna hs11286_index
#bowtie2 -x hs11286_index -1 kp_21_sangon_1P.fastq.gz -2 kp_21_sangon_2P.fastq.gz -S kp_21_sangon_mapped.sam
#bowtie2 -x hs11286_index -1 kp_28_sangon_1P.fastq.gz -2 kp_28_sangon_2P.fastq.gz -S kp_28_sangon_mapped.sam
#samtools view -bS kp_21_sangon_mapped.sam > kp_21_sangon_mapped.bam
#samtools view -bS kp_28_sangon_mapped.sam > kp_28_sangon_mapped.bam
#samtools sort -n kp_21_sangon_mapped.bam -o kp_21_sangon_mapped_sorted.bam
#samtools sort -n kp_28_sangon_mapped.bam -o kp_28_sangon_mapped_sorted.bam
##part 2
##考虑到基因重组,仅计算primary比对即可
####featureCounts ignoreDup=F,primaryOnly=TRUE
##featureCounts
library(Rsubread)
samples <- c("ab_1_c507_0440","ab_2_c507_0440","ab_3_c507_0440",
"ab_1", "ab_2")
gtf_file <- "ab030_gff.gtf"
featuretype <- "transcript"
attrtype <- "locus_tag"
anno_file <- "ab030_annotation_extraction.txt"
go_file <- "ab030_Sangon_go.txt"
output <- "ab_c507_0440_vs_ab_c"
Results <- list()
for(sample in samples){
tmp <- c()
tmp <- paste0(sample,"_sangon_mapped_sorted.bam")
Results[[sample]] <- featureCounts(tmp,annot.ext = gtf_file,isGTFAnnotationFile = TRUE,
GTF.featureType = featuretype,
GTF.attrType = attrtype,isPairedEnd = TRUE,
allowMultiOverlap=TRUE,primaryOnly=TRUE,
strandSpecific=2,nthreads=4)
}
###collect counts
fc_counts <- list()
for(sample in samples){
tmp <- data.frame()
fc <- list()
fc <- Results[[sample]]
tmp <- data.frame(Gene_ID=rownames(fc$counts),Pos=paste0(
fc$annotation$Chr,"[",fc$annotation$Strand,"]",
fc$annotation$Start,"-",fc$annotation$End),
Length=fc$annotation$Length,Count=fc$counts[,1],
TPM=((fc$counts[,1]/fc$annotation$Length)*1000000)/
(sum(fc$counts[,1]/fc$annotation$Length))
)
fc_counts[[sample]] <- tmp
}
##构建DESeq2 countData/colData
countData <- sapply(fc_counts,function(x)x$Count)
rownames(countData) <- fc_counts[[1]]$Gene_ID
#idx <- apply(countData,1,function(x)length(x[x>0])>1)
#countData <- countData[idx,]
colData <- matrix(666,nrow=length(fc_counts))
rownames(colData) <- colnames(countData)
colData[,1] <- c(rep("ab_c507_0440",3),rep("ab_c",2)) ##根据实际修改
colnames(colData) <- "condition"
##DESeq2差异分析
library(DESeq2)
dds <- DESeqDataSetFromMatrix(countData = countData,colData = colData,
design = ~ condition)
##设置factor levels
dds$condition <- factor(dds$condition,levels=c("ab_c507_0440","ab_c"))
##dds$condition <- relevel(dds$condition,ref="MDR")
##dds$condition <- droplevels(dds$level)
##DE analysis
##1, estimate of size factors: estimateSizeFactors
##2, estimate of dispersion: esitmatedispersions
##3, Negative Binomial GLM fitting and Wald statistics: nbinomWaldTest
##4, results函数生成log2倍数改变及对应p值
dds <- DESeq(dds)
##默认为last level vs. ref level
##resultsNames(dds) 查看coefficient名称可知
##这里通过contrast指定 MDR/AS,指定adjusted p-value cutoff (FDR)阈值为0.05
res <- results(dds,contrast=c("condition","ab_c507_0440","ab_c"))
#res <- results(dds,contrast = c("condition","28s","21s"),alpha=0.05)
##removeResults函数返回至DESeqDataSet对象
##collection and print to file
##因为重名,所有.1后缀为对应的TPM值,.2后缀为sizeFactor矫正后counts,
##然后通过sub替换对应名称
library(xlsx)
res_data_frame <- NULL
Total_counts <- NULL
res_data_frame <- as.data.frame(res)
Anno <- read.delim(anno_file,sep="\t",header=FALSE,
row.names = 3)
Total_counts <- data.frame(row.names=rownames(fc_counts[[1]]),
Gene_ID=fc_counts[[1]]$Gene_ID,
Pos=fc_counts[[1]]$Pos,
Length=fc_counts[[1]]$Length,
sapply(fc_counts,function(x)x$Count),
counts(dds,normalized=TRUE),
sapply(fc_counts,function(x)x$TPM)
)
colnames(Total_counts) <- sub("(*)\\.1","\\1_Nor_count",colnames(Total_counts))
colnames(Total_counts) <- sub("(*)\\.2","\\1_TPM",colnames(Total_counts))
Total_counts <- cbind(Total_counts,res_data_frame[rownames(Total_counts),])
Total_counts$Anno_location <- Anno[rownames(Total_counts),]$V2
Total_counts$Anno_geneid <- Anno[rownames(Total_counts),]$V4
Total_counts$Anno_protein <- Anno[rownames(Total_counts),]$V5
Total_counts$Anno_product <- Anno[rownames(Total_counts),]$V6
write.table(Total_counts,file=paste0("ab_c507_0440_vs_ab_c","_DE_total_3vs2.txt"),sep="\t",quote=FALSE)
write.xlsx(Total_counts,file =paste0("ab_c507_0440_vs_ab_c","_DE_total_3vs2.xlsx"),row.names=FALSE)
##res结果根据padj排序
resOrdered <- res[order(res$padj),]
##summary(resOrdered)
##lfcshrink, 仅为绘图和排序使用,最终差异结果和results无异
res_shrunken_normal <- lfcShrink(dds,contrast = c("condition","ab_c507_0440","ab_c"),
res=res,type="normal",alpha=0.05)
##绘制MA-plot
#plotMA(res,main="DESeq2",ylim=c(-2,2))
plotMA(res_shrunken_normal,main="DESeq2",ylim=c(-2,2))
##idx <- identity(res$baseMean,res$log2FoldChange)
##rownames(res)[idex] 交互式获得对应点坐标信息
##count plots,检查单个基因的count read
plotCounts(dds,gene=which.min(res$padj),intgroup = "condition")
##查看rsults返回结果描述
##p-values == NA
##1,一行中,所有样本counts都为0,baseMean为0,其他都为NA
##2,若一行中包含极端count值,prl和adjusted p都为NA
##outlier counts的检出是根据Cook's distance
##3,若一行由于低于mean normalized count
##而被automatic independent filtering, 那么只有adjusted p为NA
##mcols(resOrdered)$description
##详细查看metadata(res)
##sizeFacots(dds)
##coef(dds)
##
##为检测差异性表达,对原始counts数据使用离散分布分析。但是为了
##下游其他类型分析,例如可视化和聚类,counts数据对转换会更有用
##rlog/varianceStabilizingformation,都有参数blind,默认为TRUE,
##当期待许多或大部分基因会根据实验设计而出现大的counts差异时,
##此时blind dispersion estimation会导致偏大的离散度评估。通过设置
##blind=FALSE,将已得到dispesion用于数据转弯,若不存在,函数将根据
##design fromula重新评估
rld <- rlog(dds,blind=FALSE)
##绘制数据转换和变异关系图(row stand deviations vs row means,
##both are transfered)
library(vsn)
notAllZero <- (rowSums(counts(dds))>0)
meanSdPlot(assay(rld[notAllZero,]))
dev.off()
##Heatmap ,使用颜色展示矩阵数据的个体数值,这里的展示是经过转化后的
##数据的热图
#library(pheatmap)
#select <- order(rowMeans(counts(dds,normalized=TRUE)),decreasing = TRUE)[1:20]
#nt <- normTransform(dds) ##默认为logs(x+1),x为sizefactor处理后的counts
#log2.norm.counts <- assay(nt)[select,] ##选择前20个基因
#df <- as.data.frame(colData(dds)[,c("condition")])
#pheatmap(log2.norm.counts,cluster_rows = FALSE,show_rownames = FALSE,
# cluster_cols = FALSE,annotation_col = df)
#pheatmap(assay(rld)[select,],cluster_rows = FALSE,show_rownames = FALSE,
# cluster_cols = FALSE, annotation_col = df)
#dev.off()
##绘制样本间的距离热图
##dist(x, method="euclidean",diag=FALSE,upper=FALSE,p=2)默认参数
sampleDists <- dist(t(assay(rld)))
##直接使用plot绘制距离树状图
pdf(file=paste0(output,"_hclust_dist.pdf"))
plot(hclust(sampleDists))
dev.off()
library(RColorBrewer)
library(pheatmap)
sampleDistMatrix <- as.matrix(sampleDists)
colnames(sampleDistMatrix) <- NULL
colors <- colorRampPalette(rev(brewer.pal(9,"Blues")))(255)
pheatmap(sampleDistMatrix,clustering_distance_cols = sampleDists,
clustering_distance_rows = sampleDists,col=colors)
dev.off()
##绘制PCA,距离相关矩阵就是样本的PCA图
##plotPCA(rld,intgroup="condition")
##使用gglot2绘制
library(ggplot2)
library(ggrepel)
data <- plotPCA(rld,intgroup="condition",returnData=TRUE)
percentVar <- round(100 * attr(data,"percentVar"),digits = 3)
##aes尽量在需要用时才指定,ggplot里指定影响面太大
ggplot(data,aes(PC1,PC2))+
geom_point(aes(color=condition,shape=condition),size=3)+
geom_text_repel(aes(label=name))+labs(x=paste0("PC1(",percentVar[1],"%)"),
y=paste0("PC2(",percentVar[2],"%)"),
color="Type",shape="Type")
ggsave(paste0(output,"_PCA.pdf"),width=9.5,height=7)
dev.off()
##挑选出差异基因做GO/KEGG分析
DE_up <- Total_counts[Total_counts$log2FoldChange > 1 &
(! is.na(Total_counts$padj)) & Total_counts$padj < 0.05,]
DE_down <- Total_counts[Total_counts$log2FoldChange < -1 &
(! is.na(Total_counts$padj)) & Total_counts$padj < 0.05,]
write.table(DE_up,file=paste0("ab_c507_0440_vs_ab_c","_DE_up_3vs2.txt"),sep="\t",quote=FALSE)
write.xlsx(DE_up,file =paste0("ab_c507_0440_vs_ab_c","_DE_up_3vs2.xlsx"),row.names=FALSE)
write.table(DE_down,file=paste0("ab_c507_0440_vs_ab_c","_DE_down_3vs2.txt"),sep="\t",quote=FALSE)
write.xlsx(DE_down,file =paste0("ab_c507_0440_vs_ab_c","_DE_down_3vs2.xlsx"),row.names=FALSE)
###Part 3
##topGO enrichment
library(topGO)
## get the GO annotation file and filter the duplicated GO terms in Genes
geneID2GO <- readMappings(go_file)
geneID2GO <- sapply(geneID2GO,function(var)unique(var))
geneList_up <- as.factor(as.integer(Total_counts$Gene_ID %in%
DE_up$Gene_ID))
names(geneList_up) <- Total_counts$Gene_ID
geneList_down <- as.factor(as.integer(Total_counts$Gene_ID %in%
DE_down$Gene_ID))
names(geneList_down) <- Total_counts$Gene_ID
go_type <- c("MF","BP","CC")
##DE up 构建富集GOdata数据
up_go <- list()
for(i in 1:length(go_type)){
type=go_type[i]
godata <- new("topGOdata",ontology=type,allGenes=geneList_up,
description=paste("GOdata_up",type,sep="\t"),annot=annFUN.gene2GO,
gene2GO=geneID2GO,nodeSize=1)
##renew the GOdata
.geneList_up <- as.factor(as.integer(genes(godata) %in% sigGenes(godata)))
names(.geneList_up) <- genes(godata)
godata <- new("topGOdata", ontology=type,allGenes=.geneList_up,
description=paste("GOdata_up",type,sep="\t"),annot=annFUN.gene2GO,
gene2GO=geneID2GO,nodeSize=1)
up_go[[i]] <- godata
}
#statistic <- c("classic","weight01","elim"), 富集检测
up_go_results <- list()
up_go_results_table <- list()
up_go_results_gentable <- list()
for(i in 1:length(up_go)){
godata <- up_go[[i]]
result <- runTest(godata,algorithm = "classic",statistic = "fisher")
up_go_results_gentable[[i]] <- GenTable(godata,classic=result,orderBy="classic",ranksOf="classic",
topNodes=length(usedGO(godata)),numChar=50)
up_go_results_gentable[[i]]$qvalue <- p.adjust(sort(score(result)),"BH")
up_go_results_gentable[[i]] <- up_go_results_gentable[[i]][order(up_go_results_gentable[[i]]$qvalue),]
up_go_results_table[[i]] <- up_go_results_gentable[[i]][1:30,]
up_go_results_gentable[[i]]$Term <- ifelse(is.na(Definition(up_go_results_gentable[[i]]$GO.ID)),
up_go_results_gentable[[i]]$Term,Definition(up_go_results_table[[i]]$GO.ID))
up_go_results_gentable[[i]]$Sig_Genes <- sapply(
sapply(genesInTerm(godata,up_go_results_gentable[[i]]$GO.ID),function(x){
sigGenes(godata)[sigGenes(godata) %in% x]
}),function(y){paste(y,collapse = ",")})
up_go_results_table[[i]]$Sig_Genes <- up_go_results_gentable[[i]][1:30,]$Sig_Genes
up_go_results_gentable[[i]]$All_Genes <- sapply(
genesInTerm(godata,up_go_results_gentable[[i]]$GO.ID),
function(x){paste(x,collapse = ",")})
up_go_results[[i]] <- result
}
##DE down 构建GOdata数据
down_go <- list()
for(i in 1:length(go_type)){
type=go_type[i]
godata <- new("topGOdata",ontology=type,allGenes=geneList_down,
description=paste("GOdata_down",type,sep="\t"),annot=annFUN.gene2GO,
gene2GO=geneID2GO,nodeSize=1)
##renew the genelist
.geneList_down <- as.factor(as.integer(genes(godata) %in% sigGenes(godata)))
names(.geneList_down) <- genes(godata)
godata <- new("topGOdata",ontology=type,allGenes=.geneList_down,
description=paste("GOdata_down",type,sep="\t"),annot=annFUN.gene2GO,
gene2GO=geneID2GO,nodeSize=1)
down_go[[i]] <- godata
}
down_go_results <- list()
down_go_results_table <- list()
down_go_results_gentable <- list()
for(i in 1:length(down_go)){
godata <- down_go[[i]]
result <- runTest(godata,algorithm = "classic",statistic = "fisher")
down_go_results_gentable[[i]] <- GenTable(godata,classic=result,orderBy="classic",ranksOf="classic",
topNodes=length(usedGO(godata)),numChar=50)
down_go_results_gentable[[i]]$qvalue <- p.adjust(sort(score(result)),"BH")
down_go_results_gentable[[i]] <- down_go_results_gentable[[i]][order(down_go_results_gentable[[i]]$qvalue),]
down_go_results_table[[i]] <- down_go_results_gentable[[i]][1:30,]
down_go_results_gentable[[i]]$Term <- ifelse(is.na(Definition(down_go_results_gentable[[i]]$GO.ID)),
down_go_results_gentable[[i]]$Term,Definition(down_go_results_table[[i]]$GO.ID))
down_go_results_gentable[[i]]$Sig_Genes <- sapply(
sapply(genesInTerm(godata,down_go_results_gentable[[i]]$GO.ID),function(x){
sigGenes(godata)[sigGenes(godata) %in% x]
}),function(y){paste(y,collapse = ",")})
down_go_results_table[[i]]$Sig_Genes <- down_go_results_gentable[[i]][1:30,]$Sig_Genes
down_go_results_gentable[[i]]$All_Genes <- sapply(
genesInTerm(godata,down_go_results_gentable[[i]]$GO.ID),
function(x){paste(x,collapse = ",")})
down_go_results[[i]] <- result
}
##绘制GO 富集散点图,ggsave根据后缀判断类型(device)
library(ggplot2)
##创建富集结果输出目录
dir.create("./GO_enrichment_results")
for(i in 1:3){
tmp=up_go_results_table[[i]]
##命名图片名称
name=paste0(output,"_Up_",go_type[i],"_","Enrichment_Map")
tmp$Annot_comb <- paste(tmp$GO.ID,tmp$Term,sep=" : ")
tmp$qvalue <- as.numeric(tmp$qvalue)
tmp$Significant <- as.numeric(tmp$Significant)
tmp$Annot_comb <- factor(tmp$Annot_comb,levels = rev(tmp$Annot_comb))
p<- ggplot(tmp,aes(qvalue,Annot_comb))+geom_point(aes(size=Significant,color=qvalue))+
scale_color_gradient(low="red",high="green",limits=c(0,1))+
labs(color="P.adjust",size="Significant Count",x="P.adjust",
y="GO Terms")+theme(plot.title=element_text(hjust = 0.5))+theme_bw()
ggsave(paste0("./GO_enrichment_results/",name,".pdf"),plot=p,width=25,height=15,units = "cm")
printGraph(up_go[[i]],up_go_results[[i]],
firstSigNodes = 10,useInfo = "all",pdfSW=F,
fn.prefix=paste0("./GO_enrichment_results/",name,"DAG"))
write.table(up_go_results_gentable[[i]],file=paste0(
"./GO_enrichment_results/",name,".xls"),sep="\t",quote=F,row.names = F)
}
for(i in 1:3){
tmp=down_go_results_table[[i]]
name=paste0(output,"_Down_",go_type[i],"_","Enrichment_Map")
tmp$Annot_comb <- paste(tmp$GO.ID,tmp$Term,sep=" : ")
tmp$qvalue <- as.numeric(tmp$qvalue)
tmp$Significant <- as.numeric(tmp$Significant)
tmp$Annot_comb <- factor(tmp$Annot_comb,levels = rev(tmp$Annot_comb))
p<- ggplot(tmp,aes(qvalue,Annot_comb))+geom_point(aes(size=Significant,color=qvalue))+
scale_color_gradient(low="red",high="green",limits=c(0,1))+
labs(color="P.adjust",size="Significant Count",x="P.adjust",
y="GO Terms")+theme(plot.title=element_text(hjust = 0.5))+theme_bw()
ggsave(paste0("./GO_enrichment_results/",name,".pdf"),plot=p,width=25,height=15,units = "cm")
printGraph(down_go[[i]],down_go_results[[i]],
firstSigNodes = 10,useInfo = "all",pdfSW=F,
fn.prefix=paste0("./GO_enrichment_results/",name,"DAG"))
write.table(down_go_results_gentable[[i]],file=paste0(
"./GO_enrichment_results/",name,".xls"),sep="\t",quote=F,row.names = F)
}
##绘制netwrok和heatmap图
library(igraph)
library(fields)
library(reshape2)
library(ggplot2)
library(scales)
up_igraph_results_table <- list()
down_igraph_results_table <- list()
##DE up, 仅选取前15个显著性富集GO terms绘制
for(i in 1:3){
num <- 0
links <- data.frame()
vertices <- data.frame()
vertices <- up_go_results_table[[i]][1:15,c(1,2,4,7)] ##前15个
colnames(vertices) <- c("GO.ID","Term","Significant","qvalue")
for(j in 1:(nrow(up_go_results_table[[i]][1:15,])-1)){ ##前15个
from <- up_go_results_table[[i]][j,]$GO.ID
from_genes <- unlist( strsplit( up_go_results_table[[i]][j,]$Sig_Genes,","))
for(k in (j+1):nrow(up_go_results_table[[i]][1:15,])){ ##前15个
end <- up_go_results_table[[i]][k,]$GO.ID
end_genes <- unlist( strsplit( up_go_results_table[[i]][k,]$Sig_Genes,","))
from_end_num <- sum(from_genes %in% end_genes)
num <- num+1
links[num,"from"] <- from
links[num,"end"] <- end
links[num,"count"] <- from_end_num
}
}
links <- links[links$count>0,]
up_igraph_results_table[[i]] <- list(vertices,links)
}
##draw up the netwroks
for(n in 1:3){
tmp <- list()
vertices <- data.frame()
d <- data.frame()
tmp <- up_igraph_results_table[[n]]
vertices <- tmp[[1]]
d <- tmp[[2]]
net <- graph_from_data_frame(
d=d,vertices=vertices,directed = FALSE)
rbPal <- colorRampPalette(c("red","yellow"))
V(net)$color <- rbPal(1000)[cut(seq(0,1,0.001),breaks = 1000)][
round(vertices$qvalue,digits = 3)*1000+1] ##防止 qvalue=0
V(net)$label <- vertices$Term
V(net)$label.family <- "Times"
V(net)$label.cex <- 0.6
V(net)$label.dist <- 0.5
E(net)$width <- d$count*0.15
pdf(paste0("./GO_enrichment_results/",output,"_Up_",go_type[n],"_network.pdf"))
plot(net,layout=layout_nicely)
image.plot(zlim=seq(0,1),legend.only = TRUE,
col=rbPal(1000)[cut(seq(0,1,by=0.001),breaks=1000)],
horizontal = TRUE,legend.shrink=0.2,legend.width = 1,
legend.args=list(text="P.adjust",cex=0.7,line=0.1),
axis.args=list(at=c(0,0.5,1),labels=c(0,0.5,1),cex.axis=0.7,tck=-0.15,lwd=0,line=-0.95),
smallplot = c(0.85,0.95,0.9,0.95))
dev.off()
}
##DE down 同样绘制前15个GO terms
for(i in 1:3){
num <- 0
vertices <- data.frame()
links <- data.frame()
vertices <- down_go_results_table[[i]][1:15,c(1,2,4,7)] ##前15个
colnames(vertices) <- c("GO.ID","Term","Significant","qvalue")
for(j in 1:(nrow(down_go_results_table[[i]][1:15,])-1)){ ##前15个
from <- down_go_results_table[[i]][j,]$GO.ID
from_genes <- unlist( strsplit( down_go_results_table[[i]][j,]$Sig_Genes,","))
for(k in (j+1):nrow(down_go_results_table[[i]][1:15,])){ ##前15个
end <- down_go_results_table[[i]][k,]$GO.ID
end_genes <- unlist( strsplit( down_go_results_table[[i]][k,]$Sig_Genes,","))
from_end_num <- sum(from_genes %in% end_genes)
num <- num+1
links[num,"from"] <- from
links[num,"end"] <- end
links[num,"count"] <- from_end_num
}
}
links <- links[links$count>0,]
down_igraph_results_table[[i]] <- list(vertices,links)
}
##draw down the netwroks
for(n in 1:3){
tmp <- list()
vertices <- data.frame()
d <- data.frame()
tmp <- down_igraph_results_table[[n]]
vertices <- tmp[[1]]
d <- tmp[[2]]
net <- graph_from_data_frame(
d=d,vertices=vertices,directed = F)
rbPal <- colorRampPalette(c("red","yellow"))
V(net)$color <- rbPal(1000)[cut(seq(0,1,by=0.001),breaks = 1000)][
round(vertices$qvalue,digits=3)*1000+1] ##防止 qvalue=0
V(net)$label <- vertices$Term
V(net)$label.family <- "Times"
V(net)$label.cex <- 0.6
V(net)$label.dist <- 0.5
E(net)$width <- d$count*0.15
pdf(paste0("./GO_enrichment_results/",output,"_Down_",go_type[n],"_network.pdf"))
plot(net,layout=layout_nicely)
image.plot(zlim=seq(0,1),legend.only = TRUE,
col=rbPal(1000)[cut(seq(0,1,by=0.001),breaks=1000)],
horizontal = TRUE,legend.shrink=0.2,legend.width = 1,
legend.args=list(text="P.adjust",cex=0.7,line=0.1),
axis.args=list(at=c(0,0.5,1),labels=c(0,0.5,1),cex.axis=0.7,tck=-0.15,lwd=0,line=-0.95),
smallplot = c(0.85,0.95,0.9,0.95))
dev.off()
}
##up heatmap of GO terms with genes by names
for(m in 1:3){
tmp <- data.frame()
genes_up <- vector()
Data <- data.frame()
tmp=up_go_results_table[[m]]
for(i in 1:nrow(tmp)){
genes_up <- append(genes_up, unlist(strsplit(tmp[i,]$Sig_Genes,",")))
}
genes_up <- sort(unique(genes_up))
Data <- data.frame(matrix(1:length(genes_up),nrow=1))
for(j in 1:nrow(tmp)){
Data[j,] <- as.integer(genes_up %in% unlist(strsplit(tmp[j,]$Sig_Genes,",")))
}
colnames(Data) <- factor(genes_up,levels=genes_up)
rownames(Data) <- factor(tmp$GO.ID,levels=rev(tmp$GO.ID))
x1 <- vector()
x2 <- vector()
y1 <- vector()
y2 <- vector()
q <- vector()
d <- data.frame()
p <- NULL
for(k in 1:nrow(Data)){
for(n in 1:ncol(Data)){
x1 <- append(x1,Data[k,n]*(n-0.45))
x2 <- append(x2,Data[k,n]*(n+0.45))
y1 <- append(y1, Data[k,n]*(k-0.45))
y2 <- append(y2, Data[k,n]*(k+0.45))
}
q <- append(q,rep(tmp[k,]$qvalue,length(genes_up)))
}
d <- data.frame(x1=x1,x2=x2,y1=y1,y2=y2,q=q)
p <- ggplot() + theme_bw()+ geom_rect(
data=d,mapping=aes(xmin=x1,xmax=x2,ymin=y1,ymax=y2,fill=q))+
scale_fill_gradient(low="red",high="blue",limits=c(0,1))+
scale_y_continuous(breaks=seq(1,length(tmp$GO.ID)),labels=tmp$GO.ID,expand = c(0,0))+
scale_x_continuous(breaks=seq(1,length(genes_up)),labels=genes_up,expand = c(0,0))+
theme(axis.text.x=element_text(angle=60,vjust=1,hjust=1,size=6.5),
plot.title=element_text(hjust = 0.5))+
labs(y="GO Terms",x="DE Genes",fill="P.adjust")
ggsave(paste0("./GO_enrichment_results/",output,"_Up_",go_type[m],"_by_Name_heapmap.pdf"),
plot=p,width = 28,height=18,units = "cm")
}
##up heatmap of GO terms with genes sorted by fold
for(m in 1:3){
tmp <- data.frame()
genes_up <- vector()
Data <- data.frame()
tmp=up_go_results_table[[m]]
for(i in 1:nrow(tmp)){
genes_up <- append(genes_up, unlist(strsplit(tmp[i,]$Sig_Genes,",")))
}
genes_up <- sort(unique(genes_up))
genes_up <- genes_up[order(DE_up[genes_up,"log2FoldChange"],
decreasing = TRUE)]
Data <- data.frame(matrix(1:length(genes_up),nrow=1))
for(j in 1:nrow(tmp)){
Data[j,] <- as.integer(genes_up %in% unlist(strsplit(tmp[j,]$Sig_Genes,",")))
}
colnames(Data) <- factor(genes_up,levels=genes_up)
rownames(Data) <- factor(tmp$GO.ID,levels=rev(tmp$GO.ID))
x1 <- vector()
x2 <- vector()
y1 <- vector()
y2 <- vector()
q <- vector()
d <- data.frame()
p <- NULL
for(k in 1:nrow(Data)){
for(n in 1:ncol(Data)){
x1 <- append(x1,Data[k,n]*(n-0.45))
x2 <- append(x2,Data[k,n]*(n+0.45))
y1 <- append(y1, Data[k,n]*(k-0.45))
y2 <- append(y2, Data[k,n]*(k+0.45))
}
q <- append(q,rep(tmp[k,]$qvalue,length(genes_up)))
}
d <- data.frame(x1=x1,x2=x2,y1=y1,y2=y2,q=q)
p <- ggplot() + theme_bw()+ geom_rect(
data=d,mapping=aes(xmin=x1,xmax=x2,ymin=y1,ymax=y2,fill=q))+
scale_fill_gradient(low="red",high="blue",limits=c(0,1))+
scale_y_continuous(breaks=seq(1,length(tmp$GO.ID)),labels=tmp$GO.ID,expand = c(0,0))+
scale_x_continuous(breaks=seq(1,length(genes_up)),labels=genes_up,expand = c(0,0))+
theme(axis.text.x=element_text(angle=60,vjust=1,hjust=1,size=6.5),
plot.title=element_text(hjust = 0.5))+
labs(y="GO Terms",x="DE Genes",fill="P.adjust")
ggsave(paste0("./GO_enrichment_results/",output,"_Up_",go_type[m],"_by_Fold_heapmap.pdf"),
plot=p,width = 28,height=18,units = "cm")
}
##down heatmap of GO terms with genes by names
for(m in 1:3){
tmp <- data.frame()
genes_down <- vector()
Data <- data.frame()
tmp=down_go_results_table[[m]]
for(i in 1:nrow(tmp)){
genes_down <- append(genes_down, unlist(strsplit(tmp[i,]$Sig_Genes,",")))
}
genes_down <- sort(unique(genes_down))
Data <- data.frame(matrix(1:length(genes_down),nrow=1))
for(j in 1:nrow(tmp)){
Data[j,] <- as.integer(genes_down %in% unlist(strsplit(tmp[j,]$Sig_Genes,",")))
}
colnames(Data) <- factor(genes_down,levels=genes_down)
rownames(Data) <- factor(tmp$GO.ID,levels=rev(tmp$GO.ID))
x1 <- vector()
x2 <- vector()
y1 <- vector()
y2 <- vector()
q <- vector()
p <- NULL
for(k in 1:nrow(Data)){
for(n in 1:ncol(Data)){
x1 <- append(x1,Data[k,n]*(n-0.45))
x2 <- append(x2,Data[k,n]*(n+0.45))
y1 <- append(y1, Data[k,n]*(k-0.45))
y2 <- append(y2, Data[k,n]*(k+0.45))
}
q <- append(q,rep(tmp[k,]$qvalue,length(genes_down)))
}
d <- data.frame(x1=x1,x2=x2,y1=y1,y2=y2,q=q)
p <- ggplot() + theme_bw()+ geom_rect(
data=d,mapping=aes(xmin=x1,xmax=x2,ymin=y1,ymax=y2,fill=q))+
scale_fill_gradient(low="red",high="blue",limits=c(0,1))+
scale_y_continuous(breaks=seq(1,length(tmp$GO.ID)),labels=tmp$GO.ID,expand = c(0,0))+
scale_x_continuous(breaks=seq(1,length(genes_down)),labels=genes_down,expand = c(0,0))+
theme(axis.text.x=element_text(angle=60,vjust=1,hjust=1,size=6.5),
plot.title=element_text(hjust = 0.5))+
labs(y="GO Terms",x="DE Genes",fill="P.adjust")
ggsave(paste0("./GO_enrichment_results/",output,"_Down_",go_type[m],"_by_Name_heapmap.pdf"),
plot=p,width = 28,height=18,units = "cm")
}
##down heatmap of GO terms with genes by fold
for(m in 1:3){
tmp <- data.frame()
genes_down <- vector()
Data <- data.frame()
tmp=down_go_results_table[[m]]
for(i in 1:nrow(tmp)){
genes_down <- append(genes_down, unlist(strsplit(tmp[i,]$Sig_Genes,",")))
}
genes_down <- sort(unique(genes_down))
genes_down <- genes_down[order(DE_down[genes_down,"log2FoldChange"])]
Data <- data.frame(matrix(1:length(genes_down),nrow=1))
for(j in 1:nrow(tmp)){
Data[j,] <- as.integer(genes_down %in% unlist(strsplit(tmp[j,]$Sig_Genes,",")))
}
colnames(Data) <- factor(genes_down,levels=genes_down)
rownames(Data) <- factor(tmp$GO.ID,levels=rev(tmp$GO.ID))
x1 <- vector()
x2 <- vector()
y1 <- vector()
y2 <- vector()
q <- vector()
p <- NULL
for(k in 1:nrow(Data)){
for(n in 1:ncol(Data)){
x1 <- append(x1,Data[k,n]*(n-0.45))
x2 <- append(x2,Data[k,n]*(n+0.45))
y1 <- append(y1, Data[k,n]*(k-0.45))
y2 <- append(y2, Data[k,n]*(k+0.45))
}
q <- append(q,rep(tmp[k,]$qvalue,length(genes_down)))
}
d <- data.frame(x1=x1,x2=x2,y1=y1,y2=y2,q=q)
p <- ggplot() + theme_bw()+ geom_rect(
data=d,mapping=aes(xmin=x1,xmax=x2,ymin=y1,ymax=y2,fill=q))+
scale_fill_gradient(low="red",high="blue",limits=c(0,1))+
scale_y_continuous(breaks=seq(1,length(tmp$GO.ID)),labels=tmp$GO.ID,expand = c(0,0))+
scale_x_continuous(breaks=seq(1,length(genes_down)),labels=genes_down,expand = c(0,0))+
theme(axis.text.x=element_text(angle=60,vjust=1,hjust=1,size=6.5),
plot.title=element_text(hjust = 0.5))+
labs(y="GO Terms",x="DE Genes",fill="P.adjust")
ggsave(paste0("./GO_enrichment_results/",output,"_Down_",go_type[m],"_by_Fold_heapmap.pdf"),
plot=p,width = 28,height=18,units = "cm")
}
##part 4
##clusterProfiler kegg analysis
library(AnnotationDbi)
library(AnnotationForge)
library(AnnotationHub)
library(clusterProfiler)
library(enrichplot)
##abau for Acinetobacter baumannii AB0303
##KEGG Module是人工审核定义的功能单元,在一些情况下,KEGG Module具有明确直接的解释
##https://guangchuangyu.github.io/2016/04/kegg-module-enrichment-analysis/
##https://www.genome.jp/kegg/catalog/org_list.html
dir.create("./KEGG_enrichment_results")
locus_exchange <- read.delim("ab030_locustag_exchange.txt",header=F,sep="\t",quote="",row.names = 1)
DE_up$old_Gene_ID <- locus_exchange[DE_up$Gene_ID,]
DE_down$old_Gene_ID <- locus_exchange[DE_down$Gene_ID,]
kegg_DE_up <- enrichMKEGG(gene=DE_up$old_Gene_ID,organism = "abau",keyType ="kegg",minGSSize = 1,
pAdjustMethod="BH",pvalueCutoff =2,qvalueCutoff = 2)
kegg_DE_down <- enrichMKEGG(gene=DE_down$old_Gene_ID,organism = "abau",keyType="kegg",minGSSize= 1,
pAdjustMethod="BH",pvalueCutoff = 2,qvalueCutoff = 2)
kegg_DE_up_dataframe <- data.frame(kegg_DE_up)
kegg_DE_down_dataframe <- data.frame(kegg_DE_down)
write.table(kegg_DE_up_dataframe,file=paste0("./KEGG_enrichment_results/",output,"_KEGG_up_3vs2.txt"),
sep="\t",quote=FALSE)
write.xlsx(kegg_DE_up_dataframe,file =paste0("./KEGG_enrichment_results/",output,"_KEGG_up_3vs2.xlsx"),
row.names=FALSE)
write.table(kegg_DE_down_dataframe,file=paste0("./KEGG_enrichment_results/",output,"_KEGG_down_3vs2.txt"),
sep="\t",quote=FALSE)
write.xlsx(kegg_DE_down_dataframe,file =paste0("./KEGG_enrichment_results/",output,"_KEGG_down_3vs2.xlsx"),
row.names=FALSE)
#kegg_DE_up_simple <- enrichKEGG(gene=DE_up$old_Gene_ID,organism = "abau",keyType ="kegg",minGSSize = 1,
# pAdjustMethod="BH",pvalueCutoff =2,qvalueCutoff = 2)
#kegg_DE_down_simple <- enrichKEGG(gene=DE_down$old_Gene_ID,organism = "abau",keyType="kegg",minGSSize= 1,
# pAdjustMethod="BH",pvalueCutoff = 2,qvalueCutoff = 2)
##browseKEGG
##browseKEGG(kk, 'hsa04110')
##library("pathview")
#hsa04110 <- pathview(gene.data = geneList,
# pathway.id = "hsa04110",
# species = "hsa",
# limit = list(gene=max(abs(geneList)), cpd=1))
|
a58156e8c4beeeb48093f715a7e768ab8ba32b5f
|
1f8a1f92ce5098a89bc9a2558e03030d67a35eba
|
/data/ggd318_permafrost_and_ice_map_circumarctic/generate_pf.r
|
29998784c454d625c4139e4c844752a94cae4ea0
|
[] |
no_license
|
goose888/SBGC
|
6df866d87412181fc0ad092a7097386b86b79570
|
d2d6fc76848139096edb55ee18a4c1480327e830
|
refs/heads/master
| 2020-03-20T23:31:04.728924
| 2020-01-27T21:16:55
| 2020-01-27T21:16:55
| 137,850,115
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,494
|
r
|
generate_pf.r
|
# ===================================================
# Script for reproject permafrost extent data
# to WGS84 half by half degree geographical coord
# So called climate modeling grid (CMG)
# Author: Shijie Shu
# ===================================================
library(RNetCDF)
library(sp)
library(raster)
library(rgdal)
## Functions
# EPSG:3410, NSIDC EASE-Grid Global
convertGrid <- function(gridfile, header, name, inCRS="+init=epsg:3410"){
# inCRS="+init=epsg:3408"
d <- as.matrix(read.table(gridfile))
pf <- matrix(0, nrow=1441, ncol=1441)
for (i in 1:1441) {
for (j in 1:1441) {
pf[j, i] <- d[((i-1)*1441+j)]
}
}
# finfo <- readLines(header)
# Get the x-north, y
# x_westing <- as.numeric(substr(finfo[13], 47, 60))
# y_northing <- as.numeric(substr(finfo[13], 63, 74))
# x_res <- as.numeric(substr(finfo[13], 77, 93))
# y_res <- as.numeric(substr(finfo[13], 96, 112))
x_westing <- -9024309
y_northing <- 9024309
x_res <- 12533.7625
y_res <- 12533.7625
# sm$x <- seq((x_westing+x_res/2), (-x_westing-x_res/2), x_res)
# sm$y <- seq((y_northing-y_res/2), (-y_northing+y_res/2), -y_res)
# sm$x <- seq(1, length(sm$x), 1)
# sm$y <- seq(1, length(sm$y), 1)
# xp <- data.frame(x=sm$x,y=0)
# coordinates(xp)=~x+y
# proj4string(xp)=CRS(inCRS)
# xp=spTransform(xp,CRS("+init=epsg:3410"))
# yp <- data.frame(x=0,y=sm$y)
# coordinates(yp)=~x+y
# proj4string(yp)=CRS(inCRS)
# yp=spTransform(yp,CRS("+init=epsg:3410"))
# sm$z[is.na(sm$z)] <- -9999.
# sm$xp <- coordinates(xp)[,1]
# sm$yp <- coordinates(yp)[,2]
# sm$xp <- round(sm$xp, digits=4)
# sm$yp <- round(sm$yp, digits=4)
b<-extent(c(x_westing, -x_westing, -y_northing, y_northing))
smr <- raster(b, ncol=dim(pf)[1], nrow=dim(pf)[2], crs="+init=epsg:3408")
smr[] <- t(pf)
return(smr)
}
transformTo <- function(r1){
## 0.5*0.5 degree resolution and extent -180, 180, -90, 90
r <- raster(xmn=-180, xmx=180, ymn=-90, ymx=90,
nrows=180*2,ncols=360*2,crs="+init=epsg:4326")
r <- projectRaster(r1,r,method='ngb')
return(r)
}
## Main Program
# User defined variable
header <- 'FW_ML_DOMAIN.HDR' # Share the same header
varname <- 'pfcode'
output_nc <- TRUE
output_gtiff <- FALSE
# Do not edit below unless you have confidence
# Read in latitude and longitude
fname <- 'pf.ascii'
smr <- convertGrid(fname,header,varname)
# plot(smr)
## anything below zero is NA (-1 is missing data, soil moisture is +ve)
smr[smr < -0.1] <- NA
smrp = transformTo(smr) # takes a short while
if(output_nc) {
longvector <- seq(-179.75, 179.75, 0.5)
latvector <- seq(89.75, -89.75, -0.5)
avgdata <- t(as.matrix(smrp))
nc <- create.nc(paste(fname, '_proj.nc',sep=''))
# Define the dimensions
dim.def.nc(nc, "Lon", length(longvector))
dim.def.nc(nc, "Lat", length(latvector))
# Define the coordinate variables
var.def.nc(nc, "Lon", "NC_FLOAT", "Lon")
var.def.nc(nc, "Lat", "NC_FLOAT", "Lat")
var.def.nc(nc, "PFCODE", "NC_FLOAT", c("Lon", "Lat"))
# Put attributes
att.put.nc(nc, "PFCODE", "missing_value", "NC_FLOAT", -9999.)
att.put.nc(nc, "PFCODE", "long_name", "NC_CHAR", "Permafrost extent and ground ice conditions.")
# Put the data
var.put.nc(nc, "Lon", longvector, na.mode=0)
var.put.nc(nc, "Lat", latvector, na.mode=0)
var.put.nc(nc, "PFCODE", avgdata, na.mode=0)
close.nc(nc)
}
if(output_gtiff) {
rf <- writeRaster(smrp, filename=paste(fname,'_proj.tif',sep=''), format="GTiff", overwrite=TRUE)
}
|
6cb5260c69a54b048c431d7ec86711e8828ee587
|
20e0a3e02bc19b77817a9acfb644364e5cf63984
|
/plot3.R
|
a73afebb64e6b290b77b0b49a0ffd54ad1732b94
|
[] |
no_license
|
welkingliu/Course-Project-2
|
2e4b55800688d12afed3233852c8ab0746c394f7
|
1b1d1d53cf2097628276fbbd807afab7dd2634b1
|
refs/heads/master
| 2021-01-11T14:01:43.436015
| 2017-06-20T20:43:26
| 2017-06-20T20:43:26
| 94,933,158
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 789
|
r
|
plot3.R
|
type_bc <- unique(balt.city$type)
type_df <- matrix(NA,4,5)
type_df[,1] <- unique(balt.city$year)
n=2
for (i in type_bc)
{
data <- balt.city[balt.city$type==i,]
type_df[,n] <- with(data, tapply(Emissions, year,sum))
n = n+1
}
type_df <- data.frame(type_df)
colnames(type_df) <- c("Year", "Point","Nonpoint","Onroad","Nonroad")
# type_df
png("plot3.png", width = 480, height = 480)
ggplot(data=type_df, aes(x=Year))+geom_line(aes(y=Point,color=type_bc[1]))+
geom_line(aes(y=Nonpoint,color=type_bc[2]))+
geom_line(aes(y=Onroad,color=type_bc[3]))+
geom_line(aes(y=Nonroad,color=type_bc[4]))+
scale_colour_manual("Type", breaks=type_bc, values=c(1,2,3,4))+
ylab("Total Emissions")+labs(title="Types of Sources in the Baltimore City, Maryland")
dev.off()
|
f21cad6bcfcbe6c6bd40cd06aa8909fd43ec5572
|
13820464274573242439e68242de2b8dde270805
|
/tests/testthat/test_fit_configs.R
|
82379966e0cf9d7b993766759a23b19777a66e8c
|
[
"MIT"
] |
permissive
|
stephensrmmartin/MIRES
|
dc19f8dee6206f17d7608fb9d34139df6f9d4450
|
9e13c0ffe1613b1cb1b0d0962760e1faf5516607
|
refs/heads/master
| 2023-03-07T09:31:25.438014
| 2021-02-24T02:30:46
| 2021-02-24T02:30:46
| 326,791,248
| 0
| 1
|
NOASSERTION
| 2023-09-10T14:08:46
| 2021-01-04T19:46:15
|
R
|
UTF-8
|
R
| false
| false
| 1,610
|
r
|
test_fit_configs.R
|
test_that("Model configurations change as expected.", {
data(sim_loadings)
ds <- sim_loadings
expect_s3_class(sim_loadings, "data.frame")
form <- myFactor ~ x_1 + x_2 + x_3 + x_4 + x_5 + x_6 + x_7 + x_8
iter <- 10
cores <- 1
chains <- 1
config_matrix <- expand.grid(inclusion_model = c("dependent", "independent"),
identification = c("sum_to_zero", "hierarchical"),
save_scores = c(TRUE, FALSE),
stringsAsFactors = FALSE)
fits <- lapply(1:nrow(config_matrix), function(x) {
args <- list(formula = form,
group = quote(group),
data = ds,
iter = iter,
cores = cores,
chains = chains,
control = list(adapt_delta = .8))
args <- c(args, config_matrix[x,])
suppressWarnings((do.call(mires, args)))
})
for(i in 1:length(fits)) {
# Class
expect_s3_class(fits[[i]], "mires")
# Save scores
expect_equal(fits[[i]]$meta$save_scores, config_matrix[i, "save_scores"])
# Identification method
expect_equal(fits[[i]]$meta$sum_coding, config_matrix[i, "identification"] == "sum_to_zero")
# Inclusion model independent or dependent
expect_equal(fits[[i]]$meta$hmre, config_matrix[i, "inclusion_model"] == "dependent")
# All should be multi = FALSE for now.
expect_false(fits[[i]]$meta$multi)
expect_false(fits[[i]]$meta$eta_cor_nonmi)
}
})
|
aa8740673f1153dec19a7afe50c9fcbfc0e36ce9
|
41762d55e0600512241940c77b33239970c41557
|
/Liz.Brooks_SPR_MER_vs_MSY_for_Matias_july18_2013.r
|
aa29cb0c5ecc83f0bf10f93c131b68ee2abb607f
|
[] |
no_license
|
JuanMatiasBraccini/Git_reference.points
|
cf8365b16294307e56ae5d754c2aba965e474cd4
|
d295633004b5492ba1cc9e31fde2be0f6c9dcda8
|
refs/heads/master
| 2021-06-16T13:51:03.284644
| 2021-04-28T05:37:41
| 2021-04-28T05:37:41
| 191,690,517
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,104
|
r
|
Liz.Brooks_SPR_MER_vs_MSY_for_Matias_july18_2013.r
|
# Derive Reference points from life history parameters and selectivity
# (for Matias Braccini)
#
# Created on: 14 June 2013
# Last updated: 18 July 2013
#
# Liz Brooks (Liz.Brooks@noaa.gov)
#
# Example assumes 16 age classes with selectivity on first age only (fake data)
# the age 16 category is assumed to be a plus group
# Beverton-Holt functional form is assumed for the stock recruit relationship
#
# ***NOTE: Units for SSB are number of pups per female (not in biomass)
#
# This code creates a .csv file with estimates for MER and MSY
rm(list=ls(all=TRUE)) # Remove all variables, etc. from the session memory
graphics.off() # close any open graphics windows
#------------------USER MUST DEFINE THESE---------------------------#
if(!exists('handl_OneDrive')) source('C:/Users/myb/OneDrive - Department of Primary Industries and Regional Development/Matias/Analyses/SOURCE_SCRIPTS/Git_other/handl_OneDrive.R')
#working directory (copy from Explorer window and double all "\")
wd <- setwd(handl_OneDrive("Analyses/Reference Points/From Liz Brooks"))
# specify the biological parameters at age
nages <- 16 # specify the number of age classes
ages <- seq(1,nages) # this will create a vector from 1 to the number of ages
# natural mortality for ages 1-12
M.age <- c(0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1)
# mortality of pups (survival from age 0 to age 1)
# note: this should be the value that gives maximal pup survival when stock size is really low
M.pup <- 0.2
# maturity at age
mat.age <- c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.5, 0.75, 1, 1)
# pup production at age (number of pups born per female per year)
pup.prod.age <- c(0, 0, 0, 0, 0, 0, 0, 0,0, 0, 0, 0, 4, 6, 8, 8)
# sex ratio (if 50:50 male female, enter 0.5)
sex.ratio <- 0.5
# specify time of the year when spawning (or pupping) occurs as a fraction beteween 0 and 1
spawn.time = 0
# weight at age
wgt.age <- c(10.8, 60.9, 147.0, 253.0, 363.0, 469.0, 564.0, 646.0, 716.0, 773.0,
820.0, 858.0, 888.0, 912.0, 931.0, 946.0)
# fishery selectivity at age
sel.age <- c(1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
##-----------------------That's all----------------------------------
##-----------------Do not change anything below this line------------
##-------------------------------------------------------------------
setwd(wd)
#### FUNCTIONS ###########################################################
#-------Spawners per recruit -----------------------------
s.per.recr<-function(nages,fec.age,mat.age,M.age, F.mult, sel.age, spawn.time ) {
spr=0.0
cum.survive=1.0
z=0.0
for (i in 1:(nages-1) ) {
z=M.age[i] + F.mult*sel.age[i]
z.ts=(M.age[i]+F.mult*sel.age[i])*spawn.time
spr=spr+cum.survive*fec.age[i]*mat.age[i]*exp(-z.ts)
cum.survive=cum.survive*exp(-z )
}
z= M.age[nages] + F.mult*sel.age[nages]
z.ts=(M.age[nages]+F.mult*sel.age[nages])*spawn.time
spr=spr + fec.age[nages]*mat.age[nages]*cum.survive*exp(-z.ts)/( 1- exp(-z ) )
return(spr)
}
#-------Yield per recruit -----------------------------
ypr<-function(nages, wgt.age, M.age, F.mult, sel.age ) {
yield=0.0
cum.survive=1.0
z=0.0
for (i in 1:(nages-1) ) {
z=M.age[i] + F.mult*sel.age[i]
yield=yield + wgt.age[i]*F.mult*sel.age[i]*(1-exp(-z) )*cum.survive/z
cum.survive=cum.survive*exp(-z)
}
z= M.age[nages] + F.mult*sel.age[nages]
yield=yield + wgt.age[nages]*F.mult*sel.age[nages]*cum.survive/z
return(yield)
}
#---------------------------------------------------------
#-------Equilibrium SSB (includes Ricker option) ------------------------
ssb.eq<-function(recr.par, R0, spr, spr0, is.steepness=T, SRtype ) {
#SRtype=1 is BH
#SRtype=2 is Ricker
sprF=spr/spr0
if (SRtype==1) {
if (is.steepness==T) alpha.hat <- 4*recr.par/(1-recr.par)
if (is.steepness==F) alpha.hat <- recr.par
ssb=spr0*R0*(sprF*alpha.hat - 1.0)/(alpha.hat-1.0)
}
return(ssb)
}
#------------------------------------
bev.holt.alpha<-function(S,R0,recr.par,spr0, is.steepness=T){
if (is.steepness==T) alpha.BH <- 4*recr.par/(1-recr.par)
if (is.steepness==F) alpha.BH <- recr.par
y=rep(0,length(S))
y=R0*S*alpha.BH/(R0*spr0 +(alpha.BH-1.0)*S)
return(y)
}
#------------------------------------
##---- Calculate Fmsy, MSY given selectivity and SR relationship ------------------------------
get.MSY.vector <- function(nages, pup.prod.age, sex.ratio, mat.age, M.age,
sel.age, spawn.time, wgt.age, alpha.hat, R.vals=1, srtype=1) {
R0 <- 1
sr0 <- 1
sr0.calc <- s.per.recr(nages=nages, fec.age=(pup.prod.age*sex.ratio), mat.age=mat.age, M.age= M.age, F.mult=0, sel.age=sel.age, spawn.time=spawn.time)
ahat.val <- alpha.hat
MSY.soln <- rep(NA, 8)
names(MSY.soln) <- c("Fmsy", "MSY", "SPRmsy", "Rel.SSBmsy", "Rel.Rmsy", "YPRmsy",
"Convergence.code", "steepness")
F.start=0.12
get.yield.f.min <- function(F.start) {
temp.spr = s.per.recr(nages=nages, fec.age=(pup.prod.age*sex.ratio), mat.age=mat.age, M.age= M.age, F.mult=F.start, sel.age=sel.age, spawn.time=spawn.time)/sr0.calc
temp.ypr = ypr(nages=nages, wgt.age=wgt.age, M.age=M.age, F.mult=F.start, sel.age=sel.age )
temp.SSB = ssb.eq( recr.par=ahat.val, R0=R0, spr=temp.spr, spr0=sr0, is.steepness=F, SRtype=srtype )
yield = temp.ypr*temp.SSB/temp.spr
yy=-1*yield
return(yy)
} # end get.yield.f.min function
F.nlmin <- nlminb( start=F.start , objective=get.yield.f.min, lower=0.0001, upper=5.0,
control=list(eval.max=500, iter.max=500 ) )
MSY.soln[1] <- F.nlmin$par #Fmsy
MSY.soln[2] <- -1*F.nlmin$objective #MSY
# calculate other MSY quantities
spr.nlmin <- s.per.recr(nages=nages, fec.age=pup.prod.age*sex.ratio , mat.age=mat.age , M.age= M.age , F.mult=F.nlmin$par, sel.age=sel.age , spawn.time=spawn.time)/sr0.calc
MSY.soln[3] <- spr.nlmin/sr0 #SPRmsy
ssb.nlmin <- ssb.eq( recr.par=ahat.val, R0=R0 , spr=spr.nlmin, spr0=sr0 , is.steepness=F, SRtype=srtype )
MSY.soln[4] <- ssb.nlmin #relative SSBmsy
if (srtype==1) {
Rmsy.nlmin <- bev.holt.alpha(S=ssb.nlmin,R0=R0 ,recr.par=ahat.val ,spr0=sr0, is.steepness=F )
MSY.soln[5 ] <- Rmsy.nlmin #relative Rmsy
}
ypr.nlmin <- ypr(nages=nages, wgt.age=wgt.age , M.age=M.age , F.mult=F.nlmin$par, sel.age=sel.age )
MSY.soln[6 ] <- ypr.nlmin #YPRmsy
MSY.soln[7] <- F.nlmin$convergence #code=0 indicates convergence
MSY.soln[8] <- steep #steepness
return(MSY.soln)
} # end function
###-------------------------------------------------------------------------------------
#===============================================================================
#===============================================================================
## End of Functions
#===============================================================================
#===============================================================================
#unexploited spawners per recruit
s.per.r0 <- s.per.recr(nages=nages, fec.age=pup.prod.age*sex.ratio, mat.age=mat.age, M.age=M.age,
F.mult=0, sel.age=sel.age, spawn.time=spawn.time )
#maximum reproductive rate (sensu Myers)
alpha.hat <- exp(-M.pup)*s.per.r0
#steepness
steep <- alpha.hat/(alpha.hat+4)
1/sqrt(alpha.hat)
# solve for MER (assuming selectivity=1 for all ages)
MER_full_sel <- get.MSY.vector(nages=nages, pup.prod.age=pup.prod.age, sex.ratio=sex.ratio,
mat.age=mat.age, M.age=M.age, sel.age=rep(1,nages),spawn.time=spawn.time,
wgt.age=rep(1,nages), alpha.hat=alpha.hat, R.vals=1, srtype=1)
# solve for MER (using input selectivity vector)
MER_user_sel <- get.MSY.vector(nages=nages, pup.prod.age=pup.prod.age, sex.ratio=sex.ratio,
mat.age=mat.age, M.age=M.age, sel.age=sel.age, spawn.time=spawn.time,
wgt.age=rep(1,nages), alpha.hat=alpha.hat, R.vals=1, srtype=1)
# solve for MSY (assuming selectivity=1 for all ages)
MSY_full_sel <- get.MSY.vector(nages=nages, pup.prod.age=pup.prod.age, sex.ratio=sex.ratio,
mat.age=mat.age, M.age=M.age, sel.age=rep(1,nages), spawn.time=spawn.time,
wgt.age=wgt.age, alpha.hat=alpha.hat, R.vals=1, srtype=1)
# solve for MSY (using input selectivity vector)
MSY_user_sel <- get.MSY.vector(nages=nages, pup.prod.age=pup.prod.age, sex.ratio=sex.ratio,
mat.age=mat.age, M.age=M.age, sel.age=sel.age, spawn.time=spawn.time,
wgt.age=wgt.age, alpha.hat=alpha.hat, R.vals=1, srtype=1)
MSY_Table <- cbind(MER_full_sel, MER_user_sel, MSY_full_sel, MSY_user_sel)
# write out MSY_Table as a .csv file
write.csv(MSY_Table, file="MSY_Table.csv", row.names=T)
|
f744306e060f8129308743993506a226bc0edea8
|
588b34931c242745e0ab1968370dacf7183f6fc3
|
/scripts/lib/Generalize_bases_funcs.R
|
41a4557480c3b485739bc11c248431bab1077ae4
|
[] |
no_license
|
Blosberg/nanopiper
|
b3b4bc5a7b87172d5be82a615118d23977a687df
|
a8b53faf0fa185b116d954f023dd52dfc9181b13
|
refs/heads/master
| 2023-02-23T16:07:46.483564
| 2020-09-15T10:45:16
| 2020-09-15T10:45:16
| 107,108,751
| 0
| 0
| null | 2020-09-15T10:45:17
| 2017-10-16T09:47:41
|
R
|
UTF-8
|
R
| false
| false
| 4,255
|
r
|
Generalize_bases_funcs.R
|
# These are functions for handling the conversion of bases to more generalized constructs:
# e.g., if you want to input ACGNW, and you want the "N" to trace over all of ACGT, and the "W" to trace over the "Weak" bases: AT, etc. See IUPAC naming convention for a comprehensive list.
# ==================================================================
# --- produce list of sequences that trace over unknown bases (default mapping: IUPAC)
sequence_trace <- function( sequence_in = stop("sequence must be provided"),
base_tracemap_in = list(
# --- Standard IUPAC notation: ----
"A" = "A",
"C" = "C",
"G" = "G",
"T" = "T",
"W" = c("A","T"), # Strong
"S" = c("C","G"), # Weak
"M" = c("A","C"), # aMino
"K" = c("G","T"), # Keto
"R" = c("A","G"), # puRine
"Y" = c("C","T"), # pYrimidine
"B" = c("C","G","T"), # not-A
"D" = c("A","G","T"), # not-C
"H" = c("A","C","T"), # not-G
"V" = c("A","C","G"), # not-(T or U)
"N" = c("A","C","G","T")
)
)
{
# how long is the sequence?
k=nchar( sequence_in )
# break it up into individual characters
seq_in_singlechars = unlist( strsplit(sequence_in, split="") )
# Check how many map to explicit and trace bases:
Charmap_trace = match( seq_in_singlechars, unlist( names(base_tracemap_in) ) )
# check that all base positions are recognized and accounted for.
if ( length( Charmap_trace [!is.na(Charmap_trace ) ] ) != k )
{ stop("sequence contains unrecognized bases") }
# now go through all k's
seqs <- c()
seq_mapped <- as.vector( base_tracemap_in[ seq_in_singlechars] )
for (i in 1:k) {
if( i == 1) {
seqs <- unlist(strsplit( seq_mapped[[i]],
split = NULL)) # first entry will not need appending
} else {
# now to that previous vector we add all permutations of the current character [i]
seqs_new <- expand.grid(seqs,
unlist(strsplit(seq_mapped[[i]],
split = NULL)))
#collapse into strings
seqs_new <- apply(seqs_new,1,paste0, collapse = "")
seqs <- seqs_new #update the seqs vector
}
}
return(seqs)
}
# ==================================================================
# --- take average of bases in a given position
trace_histogram <- function( histlist_in = stop("reads must be provided"),
sequence_in = stop("sequence must be provided"),
return_as_histlist_element = FALSE
)
{
k = nchar(sequence_in) # number of bases under consideration.
sequence_list = sequence_trace( sequence_in = sequence_in )
nseqs = length( sequence_list)
# TODO: add a sanity cheeck to make sure all histlists are using the same x-binning.
dx_arr = diff( histlist_in[[ 1 ]]$breaks )
cumulative_hist=matrix( 0, length(dx_arr ), 1)
for( seq in sequence_list )
{
cumulative_hist = cumulative_hist + (histlist_in[[seq]]$density * dx_arr )
}
NORM = sum(cumulative_hist);
if( abs((NORM - nseqs)/nseqs) > 0.0001 )
{stop("ERROR: normalization violation in sequence trace function.")}
if( return_as_histlist_element )
{
result = list( "breaks" = histlist_in[[ 1 ]]$breaks,
"density" = (cumulative_hist/nseqs)/dx_arr,
"mids" = histlist_in[[ 1 ]]$mids,
"xname" = paste0(histlist_in$xname, "-->seq: ", sequence_in),
"equidist" = histlist_in[[ 1 ]]$equidist
)
}else{
result = (1/NORM)*cumulative_hist;
}
return( result )
}
|
8b84b0f852d48683d99b49452a865d768ba817d3
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/Strategy/examples/getStratName.Rd.R
|
4200d85b2ddf4c28ba44a98127b4a54764ae79ab
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 478
|
r
|
getStratName.Rd.R
|
library(Strategy)
### Name: getStratName
### Title: Get strategy function name from 'Strategy'-object
### Aliases: getStratName getStratName,Strategy-method
### ** Examples
##Not run:
# MA(200)-Strategy
params <- list(k=200)
myStrat.MA <- Strategy(assets=assets, strat="MA", strat.params=params)
# Get strategy function name from MA(200)-Strategy
getStratName(myStrat.MA) # returns "MA"
getStratName(myStrat.MA, include.params=TRUE) # returns "MA(200)"
##End(Not run)
|
9c088831520dda023ddc6ba6a2efc1a4fe094cfc
|
120de1ae49850f8212efc39ab9fa266f175dc4c6
|
/man/bbk.holidays.Rd
|
e12dd9865b8165a20ecee9fe648e178361772505
|
[] |
no_license
|
vsrimurthy/EPFR
|
168aed47aa2c48c98be82e3d8c833d89e1d11e04
|
544471a8d0cf75c7d65a195b9f6e95d6b1d6800f
|
refs/heads/master
| 2023-08-02T14:50:25.754990
| 2023-07-29T13:56:39
| 2023-07-29T13:56:39
| 118,918,801
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 797
|
rd
|
bbk.holidays.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/EPFR.r
\name{bbk.holidays}
\alias{bbk.holidays}
\title{bbk.holidays}
\usage{
bbk.holidays(x, y)
}
\arguments{
\item{x}{= a matrix/df of predictors, the rows of which are indexed by time}
\item{y}{= a matrix/df of the same dimensions as <x> containing associated forward returns}
}
\description{
Sets <x> to NA whenever <y> is NA
}
\seealso{
Other bbk: \code{\link{bbk.bin.rets.prd.summ}},
\code{\link{bbk.bin.rets.summ}},
\code{\link{bbk.bin.xRet}}, \code{\link{bbk.data}},
\code{\link{bbk.drawdown}}, \code{\link{bbk.fanChart}},
\code{\link{bbk.fwdRet}}, \code{\link{bbk.histogram}},
\code{\link{bbk.matrix}}, \code{\link{bbk.summ}},
\code{\link{bbk.turnover}}, \code{\link{bbk}}
}
\keyword{bbk.holidays}
|
9b79f23a14c86b58ad0803273dfa50f8bc410036
|
9681bd52218e64da9a5e705872dc22dc1f6b6b37
|
/man/openSkiesRoute.Rd
|
795b5bfb90a6d30afa2c9816ed62bfb907c4ac38
|
[] |
no_license
|
pebbleshx/openSkies
|
98cd8f38deafa9fd224c50d197752284d0b3e7c2
|
e9d48c0c28c555c277ae4c8c48509781e4973391
|
refs/heads/main
| 2023-03-13T15:08:41.051172
| 2021-03-06T17:20:27
| 2021-03-06T17:20:27
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,443
|
rd
|
openSkiesRoute.Rd
|
\name{openSkiesRoute}
\alias{openSkiesRoute}
\title{An \code{\link{R6Class}} object representing a flight route}
\description{
\code{\link{R6Class}} object representing a flight route, usually operated by
a commercial airline. Contains information about the callsign under which the
route is operated, the operator itself and the airports of origin and
destination. New instances can be manually created by providing values for at
least the fields \code{call_sign}, \code{origin_airport} and \code{destination_airport}.
Alternatively, \code{\link{getRouteMetadata}} will return an \code{openSkiesRoute}
object corresponding to the route with the provided callsign.
}
\usage{
openSkiesRoute
}
\section{Fields}{
\describe{
\item{\code{call_sign}}{String with callsign of the route
}
\item{\code{origin_airport}}{String with the ICAO 4-letter code of the airport
of origin
}
\item{\code{destination_airport}}{String with the ICAO 4-letter code of the
destination airport
}
\item{\code{operator_IATA}}{String with the IATA code for the operator of the
route
}
\item{\code{flight_number}}{String with the flight number for the route. The
callsign is usually composed of an airline identifier and the flight number
}
}}
\examples{
# Create an openSkiesRoute object corresponding to the American Airlines route
# with callsign AAL683
if(interactive()){
test_route <- getRouteMetadata("AAL683")
test_route
}
}
|
e35bd50154d08083a1637a08932d822b093df3c4
|
bdcf4ac07f23a9df633772ee252f0cc3c219e6ce
|
/R/genericMethods.R
|
4242a74a2147e77d3ac8ac220298d58b52d36447
|
[] |
no_license
|
brune001/FLFleet
|
f5cf2f48a2c640642a4b60bb8e81afb6674de008
|
0eb2c3b73ad2651e3b2500de2cab2f1e848572a7
|
refs/heads/master
| 2021-01-17T21:16:29.020668
| 2013-04-02T13:56:10
| 2013-04-02T13:56:10
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,772
|
r
|
genericMethods.R
|
# genericMethods - «Short one line description»
# genericMethods
# Copyright 2009 Iago Mosqueira, JRC. Distributed under the GPL 2 or later
# $Id: $
# Reference:
# Notes:
# FLMetier
setGeneric('FLMetier', function(catches, ...)
standardGeneric('FLMetier'))
# gear
setGeneric('gear', function(object, ...) standardGeneric('gear'))
setGeneric('gear<-', function(object, ..., value) standardGeneric('gear<-'))
# effshare
setGeneric('effshare', function(object, ...) standardGeneric('effshare'))
setGeneric('effshare<-', function(object, ..., value) standardGeneric('effshare<-'))
# vcost
setGeneric('vcost', function(object, ...) standardGeneric('vcost'))
setGeneric('vcost<-', function(object, ..., value) standardGeneric('vcost<-'))
# catches
setGeneric('catches', function(object, ...) standardGeneric('catches'))
# setGeneric('catches<-', function(object, ..., value) standardGeneric('catches<-'))
# effort
setGeneric("effort", function(object, metier, ...) standardGeneric("effort"))
setGeneric("effort<-", function(object, ..., value) standardGeneric("effort<-"))
# fcost
setGeneric('fcost', function(object, ...) standardGeneric('fcost'))
setGeneric('fcost<-', function(object, ..., value) standardGeneric('fcost<-'))
# capacity
setGeneric('capacity', function(object, ...) standardGeneric('capacity'))
setGeneric('capacity<-', function(object, ..., value) standardGeneric('capacity<-'))
# crewshare
setGeneric('crewshare', function(object, ...) standardGeneric('crewshare'))
setGeneric('crewshare<-', function(object, ..., value) standardGeneric('crewshare<-'))
# metiers
setGeneric('metiers', function(object, ...) standardGeneric('metiers'))
setGeneric('metiers<-', function(object, ..., value) standardGeneric('metiers<-'))
# FLFleet
setGeneric('FLFleet', function(object, ...)
standardGeneric('FLFleet'))
# metier
setGeneric('metier', function(object, metier, ...)
standardGeneric('metier'))
setGeneric('metier<-', function(object, metier, ..., value)
standardGeneric('metier<-'))
# FLCatch
setGeneric('FLCatch', function(object, ...)
standardGeneric('FLCatch'))
# addFLCatch
setGeneric('addFLCatch', function(e1, e2, ...)
standardGeneric('addFLCatch'))
# catchNames
setGeneric('catchNames', function(object, ...)
standardGeneric('catchNames'))
setGeneric('catchNames<-', function(object, ..., value)
standardGeneric('catchNames<-'))
# catch.sel
setGeneric('catch.sel', function(object, ...)
standardGeneric('catch.sel'))
# FLCatches
setGeneric("FLCatches", function(object, ...)
standardGeneric("FLCatches"))
# FLMetiers
setGeneric("FLMetiers", function(object, ...)
standardGeneric("FLMetiers"))
# FLFleets
setGeneric("FLFleets", function(object, ...)
standardGeneric("FLFleets"))
|
830930a64c09863699fc51843e059722c1092cbe
|
9653102c68d42bb1bae1284fb0840210cd28063f
|
/R/findIt.R
|
1f8cb355220825549519390c1251a7d4457bbeab
|
[] |
no_license
|
RonGuymon/ronsFunctions
|
caf4fd6180a6e169ce8326bf4df5f45a3026b324
|
a16af4c8441f1f926068e91ce8a8237971158456
|
refs/heads/master
| 2021-01-23T05:45:31.438373
| 2018-11-30T16:49:45
| 2018-11-30T16:49:45
| 92,987,270
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 958
|
r
|
findIt.R
|
#' @title Finds combinations of numbers that add up to a target value.
#' @description This was created to find a combination of check deposits that add up to a total deposit amount.
#' @param nums A vector of numbers.
#' @param target The target number.
#' @return A string with a list of numbers.
#' @export
findIt <- function(nums, target){
# Given a list of numbers in vector nums, it finds the combinations of those numbers that add up to the number in target
for(i in 1:length(nums)){
a <- combn(nums, i) # Creates a matrix of all possible combinantions of length i
a <- data.frame(nums = t(a)) # Transposes the matrix
a$sums <- rowSums(a) # sums up the columns for each row of the matrix
idx <- match(target, a$sums) # Gives the row number of the combination of numbers that matches the target number
if(!is.na(idx)){
b <- a[idx,]
# return(b)
assign(paste(b, i, sep = "_"), b, envir = .GlobalEnv)
}
}
}
|
f5f5862d0acf22a91e0f3d6fbf7b6a5ed6700967
|
2516c2e99df08ebe655cdc8c73f4928bc5801a3d
|
/practica_qdalda.R
|
15c88438f644f85dd15453008fa346c5b4d0cec4
|
[] |
no_license
|
martihomssoler/Keras_APA
|
1a4f53ff7a349377df335d3091e8ce5e638e633b
|
c547130b60bad8862d9c6fb57c9b61e5166abc81
|
refs/heads/master
| 2021-09-04T09:22:37.374555
| 2018-01-17T17:33:24
| 2018-01-17T17:33:24
| 115,360,055
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,959
|
r
|
practica_qdalda.R
|
#####################################
# APA Laboratori 5 ##
## LDA/QDA/NBayes/kNN/RegLog ##
## version of November, 2017 ##
#####################################
library(MASS)
####################################################################
## Example 1: Visualizing and classifying adultss with LDA and QDA
####################################################################
## We have the results of an analysis on adultss grown in a region in Italy but derived from three different cultivars.
## The analysis determined the quantities of 13 chemical constituents found in each of the three types of adultss.
## The goal is to separate the three types of adultss:
adults <- read.table("groupedall.txt", sep=",", dec=".", header=FALSE)
dim(adults)
colnames(adults) <- c('age','workclass','fnlwgt','education','educationnum','maritalstatus','occupation',
'relationship','race','sex','capitalgain','capitalloss','hoursperweek','nativecountry','morefifty')
# Clean up column names
colnames(adults) <- make.names(colnames(adults))
adults$morefifty<- as.factor(adults$morefifty)
adults$educationnum<-NULL
adults[["capitalgain"]] <- ordered(cut(adults$capitalgain,c(-Inf, 0,
median(adults[["capitalgain"]][adults[["capitalgain"]] >0]),
Inf)),labels = c(0,1,2))
adults[["capitalloss"]] <- ordered(cut(adults$capitalloss,c(-Inf, 0,
median(adults[["capitalloss"]][adults[["capitalloss"]] >0]),
Inf)), labels = c(0,1,2))
summary(adults)
## DO IT
#plot(subset(adults,select=-morefifty),col=unclass(adults$morefifty))
## For this example let's practice a different call mode to lda(), using a formula; this is most useful
## when our data is in a dataframe format:
lda.model <- lda (morefifty ~ ., data = adults)
lda.model
## We can see that neither Magnesium or Proline seem useful to separate the adultss; while
## Flavanoids and Nonflavanoid.phenols do. Ash is mainly used in the LD2.
## Plot the projected data in the first two LDs
## We can see that the discrimination is very good
plot(lda.model)
# alternatively, we can do it ourselves, with more control on color and text (adults number)
adults.pred <- predict(lda.model)
plot(adults.pred$x,type="n")
#as.character(rownames(adults.pred$x))
text(adults.pred$x,labels="o",col=as.integer(adults$morefifty))
legend('bottomright', c("<=50k",">50k"), lty=1, col=c('black', 'red'), bty='n', cex=.75)
# If need be, we can add the (projected) means to the plot
tab<-table(adults$morefifty, adults.pred$class)
tab
1 - sum(tab[row(tab)==col(tab)])/sum(tab)
# Let us switch to leave-one-out cross-validation
adults.predcv <- update(lda.model,CV=TRUE)
tab<-table(adults$morefifty,adults.predcv$class)
tab
1 - sum(tab[row(tab)==col(tab)])/sum(tab)
# 2 mistakes (on 178 observations): 1.12% error
## Quadratic Discriminant Analysis is the same, replacing 'lda' by 'qda'
## problems may arise if for some class there are less (or equal) observations than dimensions
## (is not the case for the adults data)
qda.model <- qda (morefifty ~ ., data = adults)
qda.model
## There is no projection this time (because projection is a linear operator and the QDA boundaries are quadratic ones)
# but let's have a look at classification:
adults.pred <- predict(qda.model)
tab<-table(adults$morefifty, adults.pred$class)
tab
1 - sum(tab[row(tab)==col(tab)])/sum(tab)
# Let us switch to leave-one-out cross-validation
adults.predcv <- update(qda.model,CV=TRUE)
tab<-table(adults$morefifty,adults.predcv$class)
tab
1 - sum(tab[row(tab)==col(tab)])/sum(tab)
####################################################################
# Example 2: The Naïve Bayes classifier
####################################################################
library (e1071)
N <- nrow(adults)
learn <- sample(1:N, round(2*N/3))
nlearn <- length(learn)
ntest <- N - nlearn
# First we build a model using the learn data
model <- naiveBayes(morefifty ~ ., data = adults[learn,])
model
# compute now the apparent error
pred <- predict(model, adults[learn,-1])
# form and display confusion matrix & overall error
tab <- table(pred, adults[learn,]$morefifty)
tab
1 - sum(tab[row(tab)==col(tab)])/sum(tab)
# compute the test (prediction) error
pred <- predict(model, newdata=adults[-learn,-1])
# form and display confusion matrix & overall error
tab <- table(Pred=pred, True=adults[-learn,]$morefifty)
tab
1 - sum(tab[row(tab)==col(tab)])/sum(tab)
# note how most errors (9/12) correspond to democrats wrongly predicted as republicans
## in the event of empty empirical probabilities, this is how we would setup Laplace correction (aka smoothing):
model <- naiveBayes(morefifty ~ ., data = adults[learn,], laplace = 1)
model
|
47c035436cbc27f8025ec63ebd173b6ba94ecce8
|
23117dc97389d6a98e3ddb19b866111519298208
|
/R/ProgrammingAssignment2.R
|
7b5b3bd08805b09118e02f582718fd22e312cd4b
|
[] |
no_license
|
Gammon-C/datasciencecoursera
|
6005734891f6569b48c5be3725a5dcf379f4896c
|
0d271b74fd858dbbdd3b950b114f0fd12cebcff1
|
refs/heads/master
| 2021-01-10T17:25:20.027023
| 2015-07-26T21:26:24
| 2015-07-26T21:26:24
| 36,087,777
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 892
|
r
|
ProgrammingAssignment2.R
|
##ProgrammingAssignment2
## function that creates a "special" matrix and that caches the inverse
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y){
x <<- y
inv <<- NULL
}
get <- function() x
setinverse <- function(inverse) inv <<- inverse
getinverse <- function() inv
list(set = set, get = get, setinverse = setinverse, getinverse = getinverse )
}
## function that computes the inverse of the special "matrix" returned by
## the function makeCacheMatrix. When the inverse has been calculated,
## and the matrix has not change, then cacheSolve will retrieve the inverse
## from cache.
cacheSolve <- function(x, ...) {
inv <- x$getinverse()
if(!is.null(inv)) {
message("... getting cached inverse matrix")
return (inv)
} else {
inv <- solve(x$get())
x$setinverse(inv)
return(inv)
}
}
|
bfa2610bae4ec1c51c44af50f133dd45ffd89d1b
|
d0afb5023cb9bb3c1dfb6613328d24a50a233c59
|
/man/fitted.FitARMA.Rd
|
cd35c4a8a4600d77fa24e7fcb092270f83d70c4e
|
[] |
no_license
|
cran/FitARMA
|
6c4c68b7c51873ea3595cee776e0a9316fb2053d
|
9f7d7e25ad1c79e121a9b1078ad1ce56bed31f1d
|
refs/heads/master
| 2020-05-18T17:55:16.120647
| 2019-01-04T15:33:55
| 2019-01-04T15:33:55
| 17,679,266
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 610
|
rd
|
fitted.FitARMA.Rd
|
\name{fitted.FitARMA}
\alias{fitted.FitARMA}
\title{ fitted method for class FitARMA }
\description{
The fitted values are the observed minus residuals.
If there is differencing, the observed values are those corresponding to the
differenced time series.
}
\usage{
\method{fitted}{FitARMA}(object, ...)
}
\arguments{
\item{object}{ class FitARMA object }
\item{\dots}{ auxiliary parameters}
}
\value{vector or ts object}
\author{ A.I. McLeod }
\seealso{ \code{\link{FitARMA}} }
\examples{
data(SeriesA)
out<-FitARMA(SeriesA, c(1,0,1))
fitted(out)
}
\keyword{ ts }
|
882bc1dbb6f6201e0be2c66aba8da513467bf259
|
db2eb208c8eb32d618742bf08f4a34d8b4c73a93
|
/R/u_meters_to_miles.R
|
49bf8a43c80ecc861ebfe573207e11066c746cb6
|
[
"MIT"
] |
permissive
|
nelsonroque/contextR
|
eee1fa5855dd437be7685ace94b9a373a71d0a59
|
e765937536ca82f72fe102d9162087e8e2059364
|
refs/heads/master
| 2021-07-21T05:50:12.519433
| 2020-04-25T08:46:50
| 2020-04-25T08:46:50
| 151,361,807
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 212
|
r
|
u_meters_to_miles.R
|
#' contextR
#' @name u_meters_to_miles
#' @export
#' @param meters class: numeric
#' @examples
#' u_meters_to_miles(meters)
u_meters_to_miles <- function(meters) {
miles <- meters * 0.00062137
return(miles)
}
|
15106e69c9f1db7ba48a2253ba64c27dfcf39400
|
0b90c7859a04d8590787af7b9f83135bde3f4ce0
|
/scidb command 2.R
|
886e6aa82baa10e8cdcd109d1067da03656211eb
|
[] |
no_license
|
mengluchu/bfast
|
e559497a57c2f5f8a90547cdb71eff3808c1939d
|
87c0b781101c2f7233f32fe222c987bcb9a7dd7c
|
refs/heads/master
| 2016-08-04T16:48:01.332449
| 2015-08-24T17:03:00
| 2015-08-24T17:03:00
| 23,347,493
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 23,029
|
r
|
scidb command 2.R
|
library(scidb)
install.packages("bfast", repos="http://R-Forge.R-project.org")
iquery("save(MOD09Q1_JUARA, '/home/menglu/shared/JUARA1.bin', -2, '(int16, int16, uint16)')")
iquery("
store(
project(
apply(
repart(
apply(
subarray(
MOD09Q1_JUARA,58828,48103,6,59679,49050,643),
evi2,2.5*((nir*0.0001-red*0.0001)/(nir*0.0001+2.4*red*0.0001+1.0))),
<red:int16,nir:int16,quality:uint16,evi2:double>[col_id=0:851,3,2,row_id=0:947,3,2,time_id=0:637,638,0]),
the_i,double(col_id),the_j,double(row_id),the_t,double(time_id)),
the_i, the_j,the_t, evi2),
allarray1)")
iquery("store(apply(try221b221,evi2,2.5*((nir*0.0001-red*0.0001)/(nir*0.0001+2.4*red*0.0001+1.0))),evi2t221221) ")
iquery("store(repart(evi2t2121, <red:int16,nir:int16,quality:uint16,evi2:double>
[col_id=0:20,3,0,row_id=0:20,3,0,time_id=0:637,638,0]),evi2t2121nooverlap3)");
iquery("store(apply(evi2t2121nooverlap3,the_i,double(col_id),the_j,double(row_id),the_t,double(time_id)),tryijt2) ")
iquery("store(project(tryijt2, the_i, the_j,the_t, evi2), evi210b210ijt3)")
iquery("show(allarray1)",return=TRUE)
library("bfast", lib.loc="C:/Users/m_lu0002/Documents/R/win-library/3.1")
scidbconnect("gis-bigdata.uni-muenster.de", 49974, "meng", "bigdata2014")
library(scidb)
scidbconnect("gis-bigdata.uni-muenster.de", 49972, "menglu", "ms3A5Bfn9PCycMr")
scidblist()
iquery('show(all150150o)',return=TRUE)
"try221b221"
iquery("store(repart(evi21b21ijt3, <red:int16,nir:int16,quality:uint16,evi2:double>
[col_id=0:20,3,2,row_id=0:20,3,2,time_id=0:637,638,0]),evi21b21o2)");
iquery("store(subarray(allarray1,0,0,0,848,944,635),all848944o)")
a<-scidb('all150150o')
which(is.na(a[,,][]))
length(
a[1:3,1:3,633][]
a<-scidb("MOD09Q1_JUARA")
which(is.na(
dim(
length(a[58930,48210,6:643][]$red)
)
)
)
#all848944o
#evi21b21o
#all150150
build(<v:int64>[k=0:9999,1000,0],random())
iquery("store(build(<val:double> [time=0:300,100,20],time),ttrc)")
iquery("show(ewmann)",return=TRUE)
a<-scidb("ewmann")
a[1,0][]
iquery("store(r_exec(apply(ttrc,t,double(time)),
'output_attrs=1',
'expr=
chunk<-as.double(length(t))
list(chunk)'),
ewmann)
")
iquery("subarray(try221b221,58828,48103,46,58835,48110,56)", `return` = TRUE, afl = TRUE, iterative = FALSE, n = 10000)
time1<-proc.time()
iquery("store(r_exec(all150150o1,
'output_attrs=2',
'expr=
require(bfast)
require(strucchange)
require(spdep)
load(\"/home/menglu/listcn636.Rdata\")
sarsarbfastt1<-array(, 636)
sarsarbfasts1<-array(, 636)
eviarray<-array(evi2,c(636,3,3)) # scidb array to r array
eviarray2<-aperm(eviarray,c(2,3,1))
bfa <-try(bfastsar(eviarray2, season= \"harmonic\",max.iter=1 ))
if(class(bfa) == \"try-error\")
{
sarsarbfastt1[1:636]<- 1
sarsarbfasts1[1:636]<- 1
} else if (bfa$nobp$Vt == FALSE)
{
bkpttr <- as.integer(bfa$output[[1]]$Vt.bp) # of 16 days e.g. year=16*50/365
sarsarbfastt1[ bkpttr]<-100
} else if (bfa$nobp$Wt == FALSE)
{
bkptse <- as.integer(bfa$output[[1]]$Wt.bp)
sarsarbfasts1[ bkptse]<-200
} else {
sarsarbfastt1[1:636]<-2
sarsarbfastt1[1:636]<-2
}
sarsarbfastt1[which(is.na(sarsarbfastt1))]<-0
sarsarbfasts1[which(is.na(sarsarbfasts1))]<-0
sarsarbfastt1<-as.data.frame.table (sarsarbfastt1)$Freq
sarsarbfasts1<-as.data.frame.table (sarsarbfasts1)$Freq
list( sarsarbfastt1,sarsarbfasts1)'),rexe4)",return=TRUE)
time3<-proc.time()-time1
time3
user system elapsed
4.93 3.72 164359.90
a<-scidb("rexe3")
a[1,1][]
iquery("store(r_exec(all2121o,
'output_attrs=2',
'expr=
require(bfast)
require(strucchange)
require(spdep)
load(\"/home/menglu/listcn636.Rdata\")
sarsarbfastt1<-array(,c(3,3,636))
sarsarbfasts1<-array(,c(3,3,636))
eviarray<-array(evi2,c(636,3,3)) # scidb array to r array
eviarray2<-aperm(eviarray,c(2,3,1))
bfa <-bfastsar(eviarray2, season= \"harmonic\",max.iter=1 )
if (bfa$nobp$Vt == FALSE)
{
bkpttr <- as.integer(bfa$output[[1]]$Vt.bp) # of 16 days e.g. year=16*50/365
sarsarbfastt1[2,2,bkpttr]<-100
}
if (bfa$nobp$Wt == FALSE)
{
bkptse <- as.integer(bfa$output[[1]]$Wt.bp)
sarsarbfasts1[2,2,bkptse]<-200
} else {
bkpttr<-0
bkptse<-0
}
sarsarbfastt1[which(is.na(sarsarbfastt1))]<-0
sarsarbfasts1[which(is.na(sarsarbfasts1))]<-0
sarsarbfastt1<-as.data.frame.table (sarsarbfastt1)$Freq
sarsarbfasts1<-as.data.frame.table (sarsarbfasts1)$Freq
sarsarbfastt1,sarsarbfasts1)'),rexe3)",return=TRUE)
load("/home/menglu/listcn636.Rdata")
sarsarbfastt1<-array(,c(148,148,636))
sarsarbfasts1<-array(,c(148,148,636))
636*9
eviarray<-array(rnorm(5724),c(636,3,3)) # scidb array to r array
eviarray2<-aperm(eviarray,c(2,3,1))
bfas <-bfastsar(eviarray2, season= "harmonic",max.iter=1 )
if (bfa$nobp$Vt == FALSE)
{
bkpttr <- as.integer(bfa$output[[1]]$Vt.bp) # of 16 days e.g. year=16*50/365
sarsarbfastt1[the_i,the_j,bkpttr]<-100
}
if (bfa$nobp$Wt == FALSE)
{
bkptse <- as.integer(bfa$output[[1]]$Wt.bp)
sarsarbfasts1[the_i,the_j,bkptse]<-200
} else {
bkpttr<-0
bkptse<-0
}
sarsarbfastt1[which(is.na(sarsarbfastt1))]<-0
sarsarbfasts1[which(is.na(sarsarbfasts1))]<-0
iquery("store(r_exec(t7,
'output_attrs=1',
'expr=
require(bfast)
require(strucchange)
require(spdep)
load(\"/home/menglu/listcn636.Rdata\")
output2 <-bfastsar(array(rnorm(5764),c(3,3,636)), season= \"harmonic\",max.iter=1 )
# output2<-bfast(ts(1:636,start=2001,frequency=46) , season= \"harmonic\",max.iter=1 )
a<-as.double(output2[1]$Yt)
list(a )'),rexe2)",return=TRUE)
scidbconnect("gis-bigdata.uni-muenster.de", 49962, "menglu", "m7KLKHYKdrQ8fQM") #enterprise
x <- as.scidb(matrix(rnorm(5000*20),nrow=5000))
y <- as.scidb(rnorm(5000))
M <- glm.fit(x, y)
iquery("list('aggregates')",return=TRUE)
coef(M)[]
scidblist()
iquery("load('t1.txt')")
iquery("create array JUARA <red:int16,nir:int16,quality:uint16> [col_id=58828:59679,502,5,row_id=48103:49050,502,5,time_id=0:9200,1,0]")
iquery("store(build <exposure:double> [i=1:8,1,0,j=1:8,1,0]),trya)")
iquery("show(MOD09Q1_JUARA )",return=TRUE)
iquery("load(JUARA,'/home/scidb/shared/JUARA.bin','(int16, int16, uint16)')")
wiquery("show(t1)",return=TRUE)
iquery("scan(t1)",return=TRUE)
iquery("window(TEST_ARRAY,1,1,1,1,median(num))",return=TRUE)
iquery("list('aggregates')",return=TRUE)
# Using glm (similar to glm)
# First, let's run a standard glm example from Dobson (1990) Page 93:
counts <- c(18,17,15,20,10,20,25,13,12)
outcome <- gl(3,1,9)
treatment <- gl(3,3)
d.AD <- data.frame(treatment, outcome, counts)
glm.D93 <- glm(counts ~ outcome + treatment, family = poisson(),data=d.AD)
summary(glm.D93)
sl<-
scidblist()
sl[-2]
x <- as.scidb(d.AD) # couldnt omit name
scidbremove( sl[-2],force=TRUE)
?rm
iquery("scan(x)",return=TRUE)
glm.D93_sci = glm(counts ~ outcome + treatment, family = poisson(), data=a2)
summary(glm.D93_sci)
persist(glm.D93_sci)
save(glm.D93_sci, file="my_model.rdata")
#ms3A5Bfn9PCycMr
#VxSxrk5cnXed74N
#MOD09Q1_JUARA
scidblist()
scidbversion()
iquery("load_library('dense_linear_algebra')", release=1,resp=FALSE)
iquery("load_library('linear_algebra')", release=1,resp=FALSE)
#iquery("load_library('collate')", release=1,resp=FALSE)
iquery("build(<z:double>[i=1:10 ,1,0],i*0.1+0.9)",return=TRUE)
x = iquery(" r_exec(build(<z:double>[i=1:100,100,0],0.1*i+0.9),
'expr=x<-c(0.01:0.08)
require(raster)
r1 <- raster(nrows=108, ncols=21, xmn=0, xmx=10)
r1<-setValues(r1,c(1:ncell(r1)))
xxx<-\\\'raster1\\\'
try<-values(r1)*0.1
list(try)')",return=TRUE);
##
#######
iquery(" r_exec(build(<z:double>[i=1:100,100 ,0],0.1*i+0.9),
'expr=
require(strucchange)
tsz<-ts(z,frequency=12)
btsz<-efp(tsz~1 )
wt<-coef(btsz)[1]
list( wt)')",return=TRUE)
##
iquery(" r_exec(build(<z:double>[i=1:100,100 ,0],0.1*i+0.9),
'expr= ts(seq())
require(zoo)
library(bfast)
NDVIb <- as.ts(zoo(som$NDVI.b, som$Time))
monb <- bfastmonitor(NDVIb, history=\\\'ROC\\\',start = c(2010, 13))
bre<-monb$model$residuals
list(as.double(bre))')
",return=TRUE)
plot(aa$expr_value_0,typ='l',main='bfastmonitor residuals')
############################################################
iquery('dimensions(MOD09Q1_MENG_20140416)',return=TRUE)
iquery('store(repart(smalltest1, <red:int16,nir:int16,quality:uint16> [col_id=0:3,1,0,row_id=0:3,1,0,time_id=0:636,637,0]),target2)')
iquery("store(apply(target2,evi2,2.5*((nir*0.0001-red*0.0001)/(nir*0.0001+2.4*red*0.0001+1.0))),smallevi2) ")
iquery("store(build(< z:int64> [i=1:8,2,0,j=1:8,2,0], i ),t4)")
iquery("store(apply(t4,z1,z+0.5),t5)")
iquery(" store(r_exec(t6,
'output_attrs=2',
'expr=
output1=z1-0.001;
location=c(rep(2.2',64));
list(output1,location)'),t8)
")
iquery(" store(r_exec(t6,
'output_attrs=2',
'expr=
output1=z1-0.001; #length(z1)=4
output2=c(1:4) ;
list(output1,output2*1.0)'),t9)
")
iquery("scan(t9)",return=TRUE)
iquery("store(project(t5,z1),t6)",return=TRUE)
iquery("store(build(< z:double> [i=1:800,2,0], i ),t7)")
#aggregate time series
#iquery("regrid(t7,100,avg(z)) ",return=TRUE)
iquery("dimensions(MOD09Q1_MENG_20140416)",return=TRUE)
require(bfast)
ts1= ts( evi2 ,frequency=46)
monb <- bfastmonitor(ts1, start = 3)
bre<-monb$model$residuals
as.double(bre
s1<-scidb("smallevi1")
ts1<-as.double(s1[][]$evi2[1:493])
ts2<-ts(ts1,frequency=46)
monb <- bfastmonitor(ts2, start = 8)
bre<-monb$model$residuals
# if you dont want to deal with chunk, set chunck size same as dimension
# test the instance and chunks
# it doesnt process sequentially, it sometimes put several chunks in one instance.
# In the output you will have no idea which result conrrespond to which chunk.
# if theres overlap, then looks more complex
iquery(" r_exec(build(<z:double>[i=1:8,2,0,j=1:8,2,0], i+j*0.1 ),
'expr=
x1<-z+0.0001
list(as.double(x1))')
",return=TRUE)
iquery(" r_exec(build(<z:double>[i=1:8,2,0,j=1:8,2,0], i+j*0.1 ),
'expr=
x1<-z+0.0001
list(as.double(x1))')
")
iquery("show(z)",return=TRUE)
####################################################################################
iquery("store( redimension(apply(project(sort(apply(build(<v:int64>[k=0:9999,1000,0],random()),p,k)),p),m,n),
<p:int64> [m=0:*,1000,0]),B)",return=TRUE)
iquery("show(B)",return=TRUE)
btsz<-bfast(tsz,h=0.15, max.iter=1)
plot(btsz)
z <- zoo(cbind(foo = rnorm(5), bar = rnorm(5)))
z$foo
z$xyz <- zoo(rnorm(3), 2:4)
z4 <- zoo(11:15, complex(real = c(1, 3, 4, 5, 6), imag = c(0, 1, 0, 0, 1)))
merge(z4, lag(z4))
merge(z5, lag(z5))
# index values relative to 2001Q1
z5 <- zoo(11:15, letters[1:5])
merge(z5, lag(z5))
str(btsz)
plot(simts)
as.double(simts$time.series)
library(zoo)
lm(z~1)$residuals)
r1 <- raster(nrows=108, ncols=21, xmn=0, xmx=10)
r1<-setValues(r1,c(1:ncell(r1)))
ncell(r1)
plot(r1)
values(r1)
x<-c(0.01:0.08)
y<-runif(10)
z<-seq(1.0,19,0.1)
z
ts(z,frequency=3)
sum(x^2+y^2)/250
lm(z~1)$coefficient
x <- as.scidb(matrix(rnorm(5000*20),nrow=5000))
y <- as.scidb(rnorm(5000))
counts <- c(18,17,15,20,10,20,25,13,12)
outcome <- gl(3,1,9)
treatment <- gl(3,3)
d.AD <- data.frame(treatment, outcome, counts)
glm.D93 <- glm(counts ~ outcome + treatment, family = poisson(),data=d.AD)
summary(glm.D93)
ff <- log(Volume) ~ log(Height) + log(Girth)
utils::str(m <- model.frame(ff, trees))
mat <- model.matrix(ff, m)
dd <- data.frame(a = gl(3,4), b = gl(4,1,12)) # balanced 2-way
options("contrasts")
model.matrix(~ a+b , dd)
model.matrix(~ a + b, dd, contrasts = list(a = "contr.sum"))
model.matrix(~ a + b, dd, contrasts = list(a = "contr.sum", b = "contr.poly"))
m.orth <- model.matrix(~a+b, dd, contrasts = list(a = "contr.helmert"))
crossprod(m.orth) # m.orth is ALMOST orthogonal
# Compare with:
d.AD_sci = as.scidb(d.AD)
glm.D93_sci = glm_scidb(counts ~ outcome + treatment, family = poisson(), data=d.AD_sci)
summary(glm.D93_sci)
M <- glm.fit(x, y)
coef(M)[]
M<-glm_scidb(x,y)
M <- glm.fit(x, y,weights=NULL,family=gaussian())
library('devtools')
install_github("SciDBR","paradigm4")
iquery('dimensions(MOD09Q1_MENG_20140416)',return=TRUE)
scidblist()
#ndviallr<-scidb('ndviinuseall')
#eviallr<-scidb('evi2all1')
iquery('store(subarray(MOD09Q1_MENG_20140416,58930,48210,6,59129,48409,643),inuse6)')
iquery("store(apply(inuse6,evi2,2.5*((nir*0.0001-red*0.0001)/(nir*0.0001+2.4*red*0.0001+1.0))),evi2inuse6) ")
iquery("store(apply(inuse6,ndvi,1.0*(nir-red)/(nir+red)),ndviinuse6) ")
iquery('load_library("linear_algebra")')
iquery("store(apply(inuse6,evi2,2.5*((nir*0.0001-red*0.0001)/(nir*0.0001+2.4*red*0.0001+1.0))),evi2inuse6) ")
iquery('dimensions(MOD09Q1_JUARA)',return=TRUE)
#150by150
quality<-scidb('inuse6')
length(quality[,,][]$quality==4096)
quality150<-quality[1:150,1:150,][]$quality
save(quality150, file='quality150')
load('quality150')
length(which(quality150==4096))/length(quality150)
malquality<-quality150[quality150!=4096]
length(malquality)
table(malquality)
length(quality150)/22500
arrquality150<-array(quality150,c(150,150,637))
qin1<-which(arrquality150!=4096,arr.ind=TRUE)
x<-qin1[,1]+58930-1
y<-qin1[,2]+48210-1
z<-qin1[,3]
coor.quality<-coordinates(getxyMatrix(cbind(x,y),231.6564))
length(coordinates(cbind(x,y)))
plot(coor.quality)
plot(x,y)
length(y)
22772/2
36964/2
plot(modis.mt52,col='purple',add=TRUE)
spplot(coor.quality)
plot(1)
dev.new()
hexbinplot(y~x)
qualitysp<-coordinates(cbind(x,y))
##3rd array: 41 by 43 around 520 for validation
iquery('store(subarray(MOD09Q1_MENG_20140416,59138,48712,6,59180,48752,643),around6)')
iquery("store(apply(around6,evi2,2.5*((nir*0.0001-red*0.0001)/(nir*0.0001+2.4*red*0.0001+1.0))),aevi5) ")
iquery('dimensions(MOD09Q1_MENG_20140416)',return=TRUE)
iquery('store(subarray(around6,1,1,1,4,4,638),smalltest1)')
# create evi around 52
evi52re<-scidb('aevi5')
id<-c()
id<-evi52re[1,1,][]$evi2
itry<-array(,c(43,41,637))
#i0<-1:41;i01<-rep(i0,43);j0<-1:43;j01<-rep(j0,each=41)
#tr1<-lapply(c(i01,j01), function(j01,i01) evi52re[j01,i01,][]$evi2)
for(i in 0:42)
{
for (j in 0:40)
{
itry[i,j,]<-evi52re[i,j,][]$evi2
}
}
itry[9,7,6]-evi52re[9,7,5][]$evi2
#str(evi_a52)
length(ic)/43/41
#evi52re[1,1,2][]$evi2
eviar510<-array(ic,c(41,43,638))
ib[1:10]
head(eviar510)
evi52re[1:10,1,1][]$evi2
head(ic)
evi_a520<-aperm (evi510,c(2,1,3)) #rotate # new second array
save(itry,file='evi510.Rdata')
save(itry,file='itry')
str(itry2)
setwd("C:/Users/m_lu0002/Desktop/Climate/minnesota")
load('evi510.Rdata')
evi52re[,1,1][]$evi2-as.numeric(eviar510[,1,2])
#############evi all
iquery("store(apply(MOD09Q1_MENG_20140416,evi2,2.5*((nir*0.0001-red*0.0001)/(nir*0.0001+2.4*red*0.0001+1.0))),evi2all1) ") #around 25 minutes
#2.5*((NIR-Red)/(NIR+2.4*Red+1)
###
##the 1st 100 by 100 array
# iquery('dimensions(inuse)',return=TRUE) 100*100*638
#No name start length chunk_interval chunk_overlap low high type
#1 0 col_id 58828 852 502 5 58832 58929 int64
#2 1 row_id 48103 948 502 5 48110 48209 int64
#3 2 time_id 0 9201 1 0 6 643 int64
###41 43
c(59138:59180)
c(48712:48753)
if(!file.exists("EQY_US_NYSE_TRADE_20130404.zip"))
{
download.file("ftp://ftp.nyxdata.com/Historical%20Data%20Samples/TAQ%20NYSE%20Trades/EQY_US_NYSE_TRADE_20130404.zip",
"EQY_US_NYSE_TRADE_20130404.zip")
}
# Load just the variable names first
u = unz("EQY_US_NYSE_TRADE_20130404.zip",filename="nysetrades20130404")
N = names(read.table(u,sep="|",stringsAsFactors=FALSE,header=TRUE,nrows=1))
# Now load a small part of the data (later in the day)...
u = unz("EQY_US_NYSE_TRADE_20130404.zip",filename="nysetrades20130404")
x = read.table(u,sep="|",stringsAsFactors=FALSE,header=FALSE,nrows=1e4,skip=1e6)
names(x) = N
# Let's limit our tiny example to three bank stocks
x = x[x$Symbol %in% c("JPM","MS","C"),]
# Assemble some of this into an xts object (requires the xts package)
library("xts")
options(digits.secs=5)
x$t = as.POSIXct(x$SourceTime/1000,origin="2013-4-4",tz="EDT")
x$price = x$PriceNumerator/10^x$PriceScaleCode
idx = x$Symbol=="C"
p = cbind(C=as.xts(x[idx,"price"],order.by=x[idx,"t"]))
idx = x$Symbol=="MS"
p = cbind(p,MS=as.xts(x[idx,"price"],order.by=x[idx,"t"]))
idx = x$Symbol=="JPM"
p = cbind(p,JPM=as.xts(x[idx,"price"],order.by=x[idx,"t"]))
# Let's cut this down to a really small example:
p = p[!duplicated(index(p)),]
p = p[1:20,]
# These data are both sparse (in time) and contain missing values.
print(p)
x$time = as.numeric(x$t)*1000
x$symbol = as.integer(as.integer(factor(x$Symbol,levels=c("C","MS","JPM"))) - 1)
# R doesn't have 64-bit integers, so we need to cast the big time values from
# doubles...
raw = project(bind(as.scidb(x[1:20,c("time","price","symbol")],types=c("double","double","int64")),"t","int64(time)"), c("t","symbol","price"))
# Redimension the raw data into a (sparse) time x symbol array:
prices = redimension(raw, dim=c("t","symbol"))
# Although the prices array is very sparse because its time coordinate axis is
# in milliseconds, it contains the same data as our R xts array p.
# Let's view the schema of that:
print(schema(prices))
load_library('r_exec');
############################ r exec############################
scidbremove("evi2t2121",force=TRUE)
iquery('store(subarray(MOD09Q1_MENG_20140416,58930,48210,6,58950,48230,643),try21b21)')
iquery('store(subarray(MOD09Q1_MENG_20140416,58930,48210,6,58950,48230,643),try21b21)')
iquery("store(apply(try21b21,evi2,2.5*((nir*0.0001-red*0.0001)/(nir*0.0001+2.4*red*0.0001+1.0))),evi2t2121) ")
iquery("store(repart(evi2t2121, <red:int16,nir:int16,quality:uint16,evi2:double>
[col_id=0:20,3,0,row_id=0:20,3,0,time_id=0:637,638,0]),evi2t2121nooverlap3)");
iquery("store(apply(evi2t2121nooverlap3,the_i,double(col_id),the_j,double(row_id),the_t,double(time_id)),tryijt2) ")
iquery("store(project(tryijt2, the_i, the_j,the_t, evi2), evi21b21ijt3)")
iquery("store(subarray(evi21b21ijt,0,0,0,2,2,635),test3by3)" )
iquery("store(subarray(evi21b21ijt,0,0,0,1,1,635),test2by2)" )
###########################################################################
#evi21b21ijt : array with attribute i j t for further retrieving array###
###########################################################################
iquery("scan(test2by2)",return=TRUE)
iquery("scan(evi21b21ijt)",n= 800, return=TRUE)
iquery("r_exec(MENG_test,
'output_attrs=3',
'expr=list(the_i, the_j, value + 100)')");
iquery("show(evi21b21ijt)",return=TRUE)
#evi21b21ijt 21 by 21
#test3by3 3 by 3
ptm <- proc.time()
iquery("store(r_exec(evi21b21ijt3,
'output_attrs=4',
'expr= eviarray<-array(evi2,c(638,3,3))
eviarray2<-aperm(eviarray,c(2,3,1))
list()
require(bfast)
eviarray<-array(evi2,c(638,3,3))
eviarray2<-aperm(eviarray,c(2,3,1))
eday <- as.Date(\"2000-01-30\")
e8day <- seq(eday, length.out=635, by=\"8 days\")
output2 <-bfaarray2(eviarray2,dates=e8day,aggre= \"month\",season= \"harmonic\",max.iter=1,level=0.05)
allbreakpoints<-output2[1:6,,]
allmagnitudes<-output2[7:12,,]
mtime<-allbreakpoints
dimx<-dim(output2)[2]
dimy<-dim(output2)[3]
dimt<-591
newarray<-array(,c(dimx,dimy,dimt))
mag<- allmagnitudes
t3darrbfamul<-tasd.bfa.mul(newarray,mtime,mag)
t3darrbfamul[which(is.na(t3darrbfamul))]<-0
t3df<-as.data.frame.table (t3darrbfamul)
t3dff<-as.data.frame.table (t3darrbfamul)$Freq
list( the_j,the_i,the_t,t3dff )'),rexec21by21)",return=TRUE)
proc.time() - ptm
#list( the_j,the_i,the_t)')",return=TRUE)
#start time 11:39
# iquery("show(rexec21by21)",return=TRUE)
#levels(t3df$Var1)<-c(1:length(levels(t3df$Var1)))
#levels(t3df$Var2) <-c(1:length(levels(t3df$Var2)))
#levels(t3df$Var3)<-c(1:length(levels(t3df$Var3)))
#as.double(t3df$Var1),as.double(t3df$Var2),as.double(t3df$Var3)
iquery("store(r_exec(MENGtest2, 'output_attrs=3', 'expr=list(thei, thej, value + 100)'), MENGtest3)")
iquery("store(attribute_rename(project(unpack(rexec21by21, tmpDim),
expr_value_0,expr_value_1,expr_value_2,expr_value_3),
expr_value_0, thei, expr_value_1, thej, expr_value_2, thet, expr_value_3,value), re2121)");
#Restore the dimensions to int64
iquery("store(project(apply(re2121, i, int64(thei),
j, int64(thej), t, int64(thet)), i, j, t, value), rexec2121)");
#Restore to 2D array
iquery("store(redimension(rexec2121, <value:double> [i=0:20,3,0,j=0:20,3,0,t=0:637,638,0]), rexec2121re)");
iquery("scan(rexec2121)",n=700,return=TRUE)
iquery("scan(try21b21)",n=700,return=TRUE)
test<-scidb("MOD09Q1_MENG_20140416")
test[58930,48210,570:589][]
########################################################################################
NDVIb <- as.ts(zoo(som$NDVI.b, som$Time))
monb <- bfastmonitor(NDVIb, history=\\\'ROC\\\',start = c(2010, 13))
bre<-monb$model$residuals
scidblist()
scidbremove("MENG_test_res",force=TRUE);
remove(MENG_test3);
remove(MENG_test4);
--Reduce to single dimension;
get rid of inst and n, rename attributes
store(attribute_rename(project(unpack(MENG_test1, tmpDim), expr_value_0,expr_value_1,expr_value_2), expr_value_0, the_i, expr_value_1, the_j, expr_value_2, value), MENG_test2);
--Restore the dimensions to int64
store(project(apply(MENG_test2, i, int64(the_i), j, int64(the_j)), i, j, value), MENG_test3);
--Restore to 2D array
store(redimension(MENG_test3, <value:double> [i=0:3,4,0,j=0:3,4,0]), MENG_test4);
iquery(" r_exec(evi21b21ijt3, 'output_attrs=4',
'expr=list(as.double(length(evi2)),as.double(length(the_i)),as.double(length(the_j)),as.double(length(the_t)))')", n=10, return=TRUE)
591*3*3
iquery(" r_exec(evi21b21ijt3, 'output_attrs=1',
'expr=list(as.double(the_t))')", n=700, return=TRUE)
iquery("dimensions(MOD09Q1_MENG_20140416)",n=10,return=TRUE)
iquery("show(evi21b21ijt3)",n=10,return=TRUE)
length(c(1,NA,1))'
iquery("store(build(<val:double NULL DEFAULT null>[i=0:9,9,0,j=0:9,9,0],random()%2), COMWAY)")
iquery("insert(project(apply(apply(apply(join(COMWAY, window(COMWAY, 1, 1, 1, 1, sum(val))), sum, val_sum - val),factor,iif((val = 0 and sum != 3), 0,iif((val = 0 or sum = 3), 1,iif(sum < 2, -1,iif(sum > 3,-1,0)))) ),newval, double(val + factor)), newval), COMWAY)")
iquery("scan(COMWAY)",return=TRUE)
|
efb434f8035e211e61460680da9cb651711061db
|
d6e719e3fbdff43ab3ee4f31c64988cb8c09b25c
|
/stats222/ex1.R
|
e53e308c911ce3095612bb94ac1080824a725dc3
|
[] |
no_license
|
whynotyet/r-code
|
61576920f192f964e22cd929a2747eeeb4fb720d
|
0f196077b6b3eee44a04dc323e9f429a9dba1796
|
refs/heads/master
| 2021-01-19T05:20:47.405177
| 2013-04-20T18:46:11
| 2013-04-20T18:46:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,965
|
r
|
ex1.R
|
###################################
# STATS 222 Exercise Set (Week 1) #
###################################
### 1. Time1-time2 regressions; Class example 4/2 ###
# a. Push hard on the regression fits shown in the handout (also linked) with standard regression diagnostic tools (residuals etc) to see if you can find anything askew with the time2 on W and time1 regressions (student question). Point is that it is the standard interpretation of the coefficient of W rather than the regression fit that is the issue here.
week1Xi = read.table(file="myth_data.txt", header = T)
colnames(week1Xi)<-c("Xi.1","Xi.3","Xi.5","W")
head(week1Xi)
week1Xi$theta = (week1Xi$Xi.5 - week1Xi$Xi.1)/4
attach(week1Xi)
cor(W,theta)
cor(week1Xi)
pairs(week1Xi)
pairs(week1Xi, pch = 20)
truereg1<-lm(Xi.5 ~ W + Xi.1)
truereg2<-lm(Xi.5 ~ W + Xi.3)
truereg3<-lm(theta ~ W)
summary(truereg1)
summary(truereg2)
summary(truereg3)
confint(truereg1)
confint(truereg2)
confint(truereg3)
truereg1a = lm(theta ~ W + Xi.1)
truereg2a = lm(theta ~ W + Xi.3)
summary(truereg1a)
summary(truereg2a)
confint(truereg1a)
confint(truereg2a)
#additional regression diagnostics (see http://www.statmethods.net/stats/rdiagnostics.html)
install.packages("car")
library(car)
#Test outliers --- no outliers
outlierTest(truereg1)
qqPlot(truereg1, main="QQ Plot")
leveragePlots(truereg1)
#Test non-normality --- reasonably normal in distribution
qqPlot(truereg1, main="QQ Plot")
library(MASS)
sresid <- studres(truereg1)
hist(sresid, freq=FALSE, main="Distribution of Studentized Residuals")
xfit<-seq(min(sresid),max(sresid),length=40)
yfit<-dnorm(xfit)
lines(xfit, yfit)
# b. Repeat the handout demonstration regressions using the fallible measures (the X's) from the bottom half of the linked data page. The X's are simply error-in-variable versions of the Xi's: X = Xi + error, with error having mean 0 and variance 10.
week1Xi = read.table(file="myth_data_bottom.txt", header = T)
colnames(week1Xi)<-c("Xi.1","Xi.3","Xi.5","W")
head(week1Xi)
week1Xi$theta = (week1Xi$Xi.5 - week1Xi$Xi.1)/4
attach(week1Xi)
cor(W,theta)
cor(week1Xi)
pairs(week1Xi)
pairs(week1Xi, pch = 20)
truereg1<-lm(Xi.5 ~ W + Xi.1)
truereg2<-lm(Xi.5 ~ W + Xi.3)
truereg3<-lm(theta ~ W)
summary(truereg1)
summary(truereg2)
summary(truereg3)
confint(truereg1)
confint(truereg2)
confint(truereg3)
truereg1a = lm(theta ~ W + Xi.1)
truereg2a = lm(theta ~ W + Xi.3)
summary(truereg1a)
summary(truereg2a)
confint(truereg1a)
confint(truereg2a)
#Test outliers --- no outliers
outlierTest(truereg1)
qqPlot(truereg1, main="QQ Plot")
leveragePlots(truereg1)
#Test non-normality --- reasonably normal in distribution
qqPlot(truereg1, main="QQ Plot")
sresid <- studres(truereg1)
hist(sresid, freq=FALSE, main="Distribution of Studentized Residuals")
xfit<-seq(min(sresid),max(sresid),length=40)
yfit<-dnorm(xfit)
lines(xfit, yfit)
# Compare 5-number summaries for the amount of change from the earliest time "1" to the final observation "5" using the "Xi" measurements (upper frame) and the fallible "X" observations (lower frame).
week1Xi = read.table(file="myth_data.txt", header = T)
colnames(week1Xi)<-c("Xi.1","Xi.3","Xi.5","W")
X_true <- week1Xi$Xi.5-week1Xi$Xi.1
week1Xi = read.table(file="myth_data_bottom.txt", header = T)
colnames(week1Xi)<-c("Xi.1","Xi.3","Xi.5","W")
X_diff_below <- week1Xi$Xi.5-week1Xi$Xi.1
fivenum(X_true)
fivenum(X_diff_below)
par(mfrow=c(1,2))
boxplot(X_true, ylim = c(0, 45), main="True Xs")
boxplot(X_diff_below, ylim = c(0, 45), main="Noisy Xs")
#saved as exc1_q1b_boxplots.png
### 2. Captopril and Blood pressure ###
# The file captopril.dat contains the data shown in Section 2.2 of Verbeke, Introduction to Longitudinal Data Analysis, slides. Captopril is an angiotensin-converting enzyme inhibitor (ACE inhibitor) used for the treatment of hypertension. Use the before and after Spb measurements to examine the improvement (i.e. decrease) in blood pressure.
capt = read.table(file="captopril.dat", header = T)
head(capt)
library(reshape)
capt.Sbp = subset(capt, select = c(ID, BSbp, ASbp))
names(capt.Sbp) = c("ID", "t0", "t1")
capt.molten = melt(capt.Sbp, id.vars = c("ID"))
capt.long = cast(capt.molten, ID + variable ~ .)
head(capt.long)
names(capt.long) = c("ID", "time", "blood_pressure")
attach(capt.long)
write.table(capt.long, file="captopril_long.dat", quote=F)
detach(capt.long)
capt.long<-read.table(file="captopril_long.dat", header=T)
# Obtain a five-number summary for observed improvement.
capt.improv <- capt$ASbp-capt$BSbp
fivenum(capt.improv)
boxplot(fivenum(capt.improv), main="Blood pressure improvement")
# What is the correlation between change and initial blood pressure measurement? Obtain a confidence interval for the correlation and show the corresponding scatterplot.
cor.test(capt.improv,capt$BSbp)
# 95 percent confidence interval:
# -0.6933538 0.2703115
xyplot(blood_pressure ~ time | ID, type=c("p", "r"), index.cond=function(x,y)
{coef(lm(y ~ x))[1]}, data=capt.long)
xyplot(blood_pressure ~ time , groups =ID, type=c("r"), index.cond=function(x,y)
{coef(lm(y ~ x))[1]}, data=capt.long, col = c("black"))
### 3. (more challenging). Use mvrnorm to construct a second artificial data example (n=100) mirroring the 4/2 class handout BUT with the correlation between true individual rate of change and W set to .7 instead of 0. Carry out the corresponding regression demonstration.
n =100
mu = c(50,5,10)
cov = matrix(c(48, 0,.6*sqrt(48*4) ,0,5.3333333, .7*sqrt(4*5.33333), .6*sqrt(48*4) , .7*sqrt(4*5.33333), 4 ),3,3)
dataset<-mvrnorm(n,mu,cov,empirical=T)
dataframe<-as.data.frame(dataset)
head(dataframe)
names(dataframe)<-c("Xt0","theta","W")
dataframe$Xt1<-dataframe$Xt0 - dataframe$theta
dataframe$Xt3<-dataframe$Xt0 + dataframe$theta
dataframe$Xt5<-dataframe$Xt0 + 3*dataframe$theta
attach(dataframe)
truediffreg<-lm(I(Xt5-Xt3)~W)
truereg1<-lm(Xt5~W+Xt1)
truereg2<-lm(Xt5~W+Xt3)
summary(truediffreg)
summary(truereg1)
summary(truereg2)
|
5f1f4d8ef55aab23ae03a96da176acc60efbd0dc
|
209af0d72fc59bf161725e157ec71706c7d13a7c
|
/Code/2020_1105_FashionMnist_VerifyMissclassificationReconstructedIMG.R
|
c710070a58f84a0783fcadf3c03117003ff14b71
|
[] |
no_license
|
QiuBingCheng/XAI-Practice
|
71bd2294e7531c75a4e00dfcd98a9eca81556bca
|
36d8e7cf1537240f1f63f33266dcf1f64ba4069f
|
refs/heads/main
| 2023-08-22T01:45:21.997671
| 2021-10-15T07:15:35
| 2021-10-15T07:15:35
| 417,400,140
| 0
| 0
| null | null | null | null |
ISO-8859-3
|
R
| false
| false
| 8,353
|
r
|
2020_1105_FashionMnist_VerifyMissclassificationReconstructedIMG.R
|
#################### check model classify #############################
library(tensorflow)
library(keras)
library(caret)
library(data.table)
library(dplyr)
library(ggplot2)
fashion_mnist <- dataset_fashion_mnist()
fashion_mnist$train$x
x_train <- fashion_mnist$train$x/255 #60000 28 28
x_test <- fashion_mnist$test$x/255 #10000 28 28
y_train <- fashion_mnist$train$y
y_test <- fashion_mnist$test$y
x_train[1,,]
round(x_train[1,,],digits = 3)
xtrain <- array_reshape(x_train, dim=c(dim(x_train)[1],dim(x_train)[2],dim(x_train)[3],1)) #60000 28 28 1
xtest <- array_reshape(x_test, dim=c(dim(x_test)[1],dim(x_test)[2],dim(x_test)[3],1))#10000 28 28 1
## input data (xtrainĦBxtestĦBtestYĦBtrainY)
xtrain <- readRDS("C:/Users/cindy chu/Desktop/109-1/research/Fashion_mnist/xtrain")
xtest <- readRDS("C:/Users/cindy chu/Desktop/109-1/research/Fashion_mnist/xtest")
testY <- readRDS("C:/Users/cindy chu/Desktop/109-1/research/Fashion_mnist/testY")
trainY <- readRDS("C:/Users/cindy chu/Desktop/109-1/research/Fashion_mnist/trainY")
cae_model <- load_model_hdf5("C:/Users/cindy chu/Desktop/109-1/research/Fashion_mnist/visualization/classification CAE/nolossweight/cae4.h5")
summary(cae_model)
## research model-------------------------------------
layer_name<-"classification"
classify <- keras_model(inputs=cae_model$input,outputs=get_layer(cae_model,layer_name)$output)
summary(classify)
classify_pred = classify %>% predict(xtest)# 10000 10
library(ramify)
c<-argmax(classify_pred, rows = TRUE)
data.table(c)
categories <- c("T-shirt", "Trouser", "Pullover", "Dress", "Coat", "Sandal", "Shirt",
"Sneaker", "Bag", "Boot")
pred <- factor(c, labels=categories)
actual <- factor (y_test, labels = categories)
confusion<- caret::confusionMatrix(as.factor(pred), as.factor(actual ))
table <- data.frame(confusionMatrix(pred, actual)$table)
# Heatmap
# percentage
plotTable <- table %>%
group_by(Reference)%>%
mutate(prop = Freq/sum(Freq))
ggplot(plotTable, aes(x=Prediction, y=Reference, fill=prop),width=200,height=200) +
geom_tile() + theme_bw() + coord_equal() +
scale_fill_distiller(palette="Blues", direction=1) +
guides(fill=F, title.position = "top", title.hjust = 0.5) +
labs(title = "Confusion matrix")+
geom_text(aes(label=prop), color="black",face="bold")+
theme(plot.title = element_text(size=17,hjust = 0.5,face="bold"),
legend.text = element_text(size=10,face="bold"),
legend.title= element_text(size=10,face="bold"))
# quantity
ggplot(table, aes(x=Prediction, y=Reference, fill=Freq),width=200,height=200) +
geom_tile() + theme_bw() + coord_equal() +
scale_fill_distiller(palette="Blues", direction=1) +
guides(fill=F, title.position = "top", title.hjust = 0.5) +
labs(title = "Confusion matrix")+
geom_text(aes(label=Freq), color="black",face="bold")+
theme(plot.title = element_text(size=17,hjust = 0.5,face="bold"),
legend.text = element_text(size=10,face="bold"),
legend.title= element_text(size=10,face="bold"))
# view predict True and False
pred <- factor(c, labels=categories)
actual <- factor (y_test, labels = categories)
tmp <- cbind(actual,pred)
tmp <- as.data.frame(tmp)
tmp$actual
########################## Shirt ##########################################################
#### prediction
label_predict <- data.frame(pred)
label_actual <- data.frame(actual )
col_seq <- seq(from=1,to=10000,by=1)
label_matrix <- cbind(col_seq,label_actual,label_predict)
shirt <- label_matrix %>% filter(label_matrix$actual=="Shirt")
falsepredict_tshirt <- shirt %>% filter(shirt$pred=="T-shirt") %>% select(col_seq) #141 1
falsepredict_trouser <- shirt %>% filter(shirt$pred=="Trouser") %>% select(col_seq)
falsepredict_pullover <- shirt %>% filter(shirt$pred=="Pullover") %>% select(col_seq)
falsepredict_dress <- shirt %>% filter(shirt$pred=="Dress") %>% select(col_seq)
falsepredict_coat <- shirt %>% filter(shirt$pred=="Coat") %>% select(col_seq)
falsepredict_bag<- shirt %>% filter(shirt$pred=="Bag") %>% select(col_seq)
## t_shirt
tshirt <- shirt %>% filter(shirt$pred=="T-shirt")
color <- '#bb0000'
op = par(mfrow=c(3,5), mar=c(1,0,0,0))
for(i in 1L:141L)
{
j <- falsepredict_tshirt[i,]
predict <- as.matrix(tshirt$pred[i])
actual <- as.matrix(tshirt$actual[i])
#plot(as.raster(decoded_imgs[j,,]))+
plot(as.raster(x_test[j,,]))+
#title(paste0(actual,"(",predict,")"), cex.main=1,line=-2,col="red")
title(paste0(actual,"(",predict,")"), cex.main=1,line=-1,col="red")
}
## trouser
trouser <- shirt %>% filter(shirt$pred=="Trouser")
color <- '#bb0000'
op = par(mfrow=c(3,5), mar=c(1,0,0,0))
for(i in 1L:1L)
{
j <- falsepredict_trouser[i,]
predict <- as.matrix(trouser$pred[i])
actual <- as.matrix(trouser$actual[i])
plot(as.raster(x_test[j,,]))+
title(paste0(actual,"(",predict,")"), cex.main=1,line=-1,col="red")
}
## Pullover
pull <- shirt %>% filter(shirt$pred=="Pullover")
color <- '#bb0000'
op = par(mfrow=c(3,5), mar=c(1,0,0,0))
for(i in 1L:dim(falsepredict_pullover)[1])
{
j <- falsepredict_pullover[i,]
predict <- as.matrix(pull$pred[i])
actual <- as.matrix(pull$actual[i])
plot(as.raster(x_test[j,,]))+
title(paste0(actual,"(",predict,")"), cex.main=1,line=-1,col="red")
}
## Dress
dress <- shirt %>% filter(shirt$pred=="Dress")
color <- '#bb0000'
op = par(mfrow=c(3,5), mar=c(1,0,0,0))
for(i in 1L:dim(falsepredict_dress)[1])
{
j <- falsepredict_dress[i,]
predict <- as.matrix(dress$pred[i])
actual <- as.matrix(dress$actual[i])
plot(as.raster(x_test[j,,]))+
title(paste0(actual,"(",predict,")"), cex.main=1,line=-1,col="red")
}
## Coat
coat <- shirt %>% filter(shirt$pred=="Coat")
color <- '#bb0000'
op = par(mfrow=c(3,5), mar=c(1,0,0,0))
for(i in 1L:dim(falsepredict_coat)[1])
{
j <- falsepredict_coat[i,]
predict <- as.matrix(coat$pred[i])
actual <- as.matrix(coat$actual[i])
plot(as.raster(x_test[j,,]))+
title(paste0(actual,"(",predict,")"), cex.main=1,line=-1,col="red")
}
## Bag
bag <- shirt %>% filter(shirt$pred=="Bag")
color <- '#bb0000'
op = par(mfrow=c(3,5), mar=c(1,0,0,0))
for(i in 1L:dim(falsepredict_bag)[1])
{
j <- falsepredict_bag[i,]
predict <- as.matrix(bag$pred[i])
actual <- as.matrix(bag$actual[i])
plot(as.raster(x_test[j,,]))+
title(paste0(actual,"(",predict,")"), cex.main=1,line=-1,col="red")
}
### non normalize -----------------------------------------------------------
fashion_mnist <- dataset_fashion_mnist()
fashion_mnist$train$x
x_train <- fashion_mnist$train$x #60000 28 28
x_test <- fashion_mnist$test$x #10000 28 28
y_train <- fashion_mnist$train$y
y_test <- fashion_mnist$test$y
library(reshape2)
library(plyr)
subarray <- apply(x_test[1:10, , ], 1, as.data.frame)
subarray <- lapply(subarray, function(df){
colnames(df) <- seq_len(ncol(df))
df['y'] <- seq_len(nrow(df))
df <- melt(df, id = 'y')
return(df)
})
cloth_cats = data.frame(category = c('Top', 'Trouser', 'Pullover', 'Dress', 'Coat',
'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Boot'),
label = seq(0, 9))
plotdf <- rbind.fill(subarray)
first_ten_labels <- cloth_cats$category[match(y_test[1:10], cloth_cats$label)]
first_ten_categories <- paste0('Image ', 1:10, ': ', first_ten_labels)
plotdf['Image'] <- factor(rep(first_ten_categories, unlist(lapply(subarray, nrow))),
levels = unique(first_ten_categories))
library(ggplot2)
ggplot() +
geom_raster(data = plotdf, aes(x = variable, y = y, fill = value)) +
facet_wrap(~ Image, nrow = 5, ncol = 2) +
scale_fill_gradient(low = "white", high = "black", na.value = NA) +
theme(aspect.ratio = 1, legend.position = "none") +
labs(x = NULL, y = NULL) +
scale_x_discrete(breaks = seq(0, 28, 7), expand = c(0, 0)) +
scale_y_reverse(breaks = seq(0, 28, 7), expand = c(0, 0))
#####################################################################################
label_actual <- data.frame(actual )
col_seq <- seq(from=1,to=10000,by=1)
label_matrix <- cbind(col_seq,label_actual)
value <- label_matrix %>% filter(label_matrix$actual=="Shirt")
head(value)
dim(xtest) <- c(nrow(xtest), 28, 28)
plot(as.raster(xtest[8,,]))
categories <- c("T-shirt", "Trouser", "Pullover", "Dress", "Coat", "Sandal", "Shirt",
"Sneaker", "Bag", "Boot")
|
0d73d0270de74994a89f20af373b10e1e7265a99
|
811739d3b536c0aea041b7e8e09c8443c84dc2df
|
/test_models.R
|
b0796c4877d87ee93b35eda9c7401d05e3e28339
|
[
"MIT"
] |
permissive
|
mseewaters/abduction
|
6479da721dd08dc7cf23e8dd41aefc77e8c88e16
|
ebcf70f638cbd2387df01bc49eb8bf1f27c33841
|
refs/heads/master
| 2021-03-12T23:14:40.322035
| 2014-09-27T17:20:37
| 2014-09-27T17:20:37
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 10,805
|
r
|
test_models.R
|
library(DMwR)
library(ggplot2)
library(e1071)
library(performanceEstimation)
library(randomForest)
library(rpart)
library(plyr)
library(corrplot)
library(ggmap)
library(gridExtra)
# Data Understanding/Preparation
# Using the existing data set, we will
# • Load the data into R
# • Evaluate the missing fields and determine the best methods to handle (remove or fill with similar)
# • Create the target variable (currently free-form text)
# • Investigate the data to look for correlations, outliers, etc that may yield insights
# • Plot the data geographically. (This is not to identify states as risk factor, rather to understand the nature of the data.)
# • Plot relevant distributions (i.e. age of victim, age of offender)
# loadData ----------------------------------------------------------------
## Load the data into R
data.raw <- read.csv("Sample_data.csv", stringsAsFactors=TRUE, na.strings = c("NA","U",""," "))
names.data <- c("Date","Victim.Age","Victim.Race","Victim.Gender","Harm","Publicized","Location","Region",
"Relationship","RSO","Offender.Age","Offender.Race","Offender.Gender","Rural.City","Missing.Location",
"Recovery.Location", "Recovery", "Number.Victims")
colnames(data.raw) <- names.data
summary(data.raw)
# Remove all carriage returns
for (i in 1:length(data))
{
if (class(data[,i]) == "factor") levels(data[,i]) <- gsub("\n","", levels(data[,i]))
}
# missingData -------------------------------------------------------------
## Evaluate missing data and determine best method to handle
# Remove lines where victim info and relationship is not provided
data <- data.raw[complete.cases(data.raw[,c(2,3,4,5,9)]),]
# Count NA by variable and isolate fields with at least 1 NA
data.empty = NULL
for (i in 1:length(data))
{
data.empty[i] <- length(which(is.na(data[i])))
}
x <- as.data.frame(cbind(names.data, data.empty)[order(-data.empty),])
x <- x[which(x$data.empty != 0),]
x
# Remove Recovery.Location and Missing.Location
# Almost half of observations missing data
data <- data[,-which(names(data) %in% c("Recovery.Location","Missing.Location"))]
# Fill RSO, Offender.Race, Offender.Age, Rural.City using nearest neighbors
data <- knnImputation(data, k=3)
check <- nrow(data[!complete.cases(data),])
summary(data)
# additionalVariables -----------------------------------------------------
## Create additional variable groups as needed
# Create victim age group
data$Victim.AgeGroup <- cut(data$Victim.Age,
breaks=c(-0.5,5.5,10.5,Inf),
labels=c('0-5','6-10','11-15'))
table(data$Victim.AgeGroup)
data$Offender.AgeGroup <- cut(data$Offender.Age,
breaks=c(20,30,40,50,Inf),
labels=c('<30','30-40','40-50','>50'))
table(data$Offender.AgeGroup)
relate.list <- as.data.frame(unique(unlist(data$Relationship), use.names=FALSE))
colnames(relate.list) <- c("Relationship")
relate.list$Relate.Group <- as.factor(c("Family Friend", "Relative", "Parent","Parent","Family Friend","Family Friend","Family Friend","Family Friend","Relative","Parent"))
data <- merge(data, relate.list, by = "Relationship")
table(data$Relate.Group)
# targetVariable ----------------------------------------------------------
# Create a list of harm responses and identify them as 1/0
harm.list <- as.data.frame(unique(unlist(data$Harm), use.names=FALSE))
colnames(harm.list) <- c("Harm")
harm.list$target <- c(0,1,1,1,0,1)
data <- merge(data, harm.list, by = "Harm")
table(data$target)
# investigateData ---------------------------------------------------------
# Correlations - factors converted to numeric
cor.data <- data[,c(4:7,11:14,9,19,20)]
for (i in 1:length(cor.data))
{
cor.data[,i] <- as.numeric(cor.data[,i])
}
c <- cor(cor.data)
corrplot(c)
# detailed bar charts of interesting correlations
p1 <- ggplot(data, aes(x=Rural.City, fill=Victim.Gender)) + geom_bar(stat='bin') + ggtitle("Victim Gender and Rural/City")
p2 <- ggplot(data, aes(x=Offender.Race, fill=Victim.Race)) + geom_bar(stat='bin') + ggtitle("Victim and Offender Race")
p3 <- ggplot(data, aes(x=Offender.Age, fill=Publicized)) + geom_bar(stat='bin') + ggtitle("Offender Age and Publicized")
p4 <- ggplot(data, aes(x=Relate.Group, fill=Offender.Gender)) + geom_bar(stat='bin') + ggtitle("Relationship and Offender Gender")
grid.arrange(p1,p2,p3,p4,ncol=2)
#Factors with insuffiencient representative data
p1 <- ggplot(data, aes(x=Recovery, fill=Recovery)) + geom_bar(stat="bin")
p2 <- ggplot(data, aes(x=RSO, fill=RSO)) + geom_bar(stat="bin")
grid.arrange(p1,p2,ncol=2)
# geographicPlot ----------------------------------------------------------
library(ggmap)
loc <- data.frame()
all_locs <- paste0(as.character(data$Location),", US")
unique_locs <- unique(all_locs)
for (i in 1:length(unique_locs))
{
geo.res <- geocode(unique_locs[i], messaging = FALSE)
loc[i,1] <- geo.res[1]
loc[i,2] <- geo.res[2]
loc[i,3] <- length(all_locs[all_locs==unique_locs[i]])
}
map <- get_map("united states", zoom = 4)
ggmap(map, extent="device") + geom_point(aes(x=lon, y=lat),
data=loc, color="darkred", size=3+3*loc$V3)
# distributionsPlot -------------------------------------------------------
#The father appears to be the highest abduction rate, however statically its the mother
ggplot(data, aes(x=Victim.Age, y=Relationship)) + geom_boxplot() +
stat_summary(fun.y=mean, geom="point", shape=5, size=4)
#Higher counts in the Souteast and South Central and again the father appears to be the abductor
ggplot(data, aes(x=Relationship)) + geom_histogram(binwidth=.5, colour="black", fill="blue") +
facet_grid(Region ~ .)
#Density Plots of age of Victim and Offender with Mean line
ggplot(data, aes(x=Victim.Age)) + geom_histogram(binwidth=.5, colour="black", fill="white") +
geom_vline(aes(xintercept=mean(Victim.Age, na.rm=T)), # Ignore NA values for mean
color="red", linetype="dashed", size=1)
ggplot(data, aes(x=Offender.Age)) + geom_histogram(binwidth=.5, colour="black", fill="white") +
geom_vline(aes(xintercept=mean(Victim.Age, na.rm=T)), # Ignore NA values for mean
color="red", linetype="dashed", size=1)
#Our data show generally no harm is done to the child
ggplot(data, aes(x=Harm, fill=Victim.Gender)) + geom_histogram(binwidth=.5, position="dodge")
#Interesting for the most part children are recovered, girls are the only missing in our data
ggplot(data, aes(x=target, fill=Victim.Gender)) + geom_histogram(binwidth=.5, position="dodge")
#The majority of cases are pubicized with the highest being in the South Central Region
ggplot(data, aes(x=Publicized, fill=Region)) + geom_histogram(binwidth=.5, position="dodge")
#Southeast and West appear to have the highest rates of abduction
ggplot(data, aes(x=Victim.Age, fill=Region)) + geom_histogram(binwidth=.5, position="dodge")
#Switching the axis South Central appears to have the highest overall counts
ggplot(data, aes(x=Region, fill=Victim.Age)) + geom_histogram(binwidth=.5, position="dodge")
#Whites and Blacks dominate abduction cases with whites dominating in Southeast, Central and
#blacks dominating in South Central and West, hispanic abduction appears rare in our data from
#my additional research with DCFS this is due to non reporting and immigration status
ggplot(data, aes(x=Region, fill=Victim.Race)) + geom_histogram(binwidth=.5, position="dodge")
# We will use the performanceEstimation package and evaluate several
# common classification models
# • SVM
# • Decision trees (rpart, randomForest)
# • NaïveBayes
# We will evaluate methods for up sampling.
## Recommendation to use bootstrapping for sampling
# The models will be evaluated based on Precision and Recall,
# with Recall having a higher weight
# modelBuildAndEvaluate --------------------------------------------------------------
# Remove extra attributes and those not known at the time of abduction
data.m <- data[,-which(names(data) %in% c("Publicized", "RSO", "Harm","Date","Victim.Age","Offender.Age", "Relationship", "Location", "Recovery","Number.Victims"))]
# In SMOTE, target must be factor and last item in dataframe
data.m$target <- as.factor(data.m$target)
data.smote <- SMOTE(target ~ ., data.m, perc.over = 500)
data.smote2 <- SMOTE(target ~ ., data.m, perc.over = 500, perc.under = 150)
smote.output <- as.data.frame(rbind(table(data.m$target),table(data.smote$target),table(data.smote2$target)))
rownames(smote.output) <- c("original data","upsampling","upsampling/downsampling")
# rpart requires non factored target
data.m.nf <- data.m
data.m.nf$target <- as.numeric(data.m.nf$target)
data.smote.nf <- data.smote
data.smote.nf$target <- as.numeric(data.smote.nf$target)
data.smote2.nf <- data.smote2
data.smote2.nf$target <- as.numeric(data.smote2.nf$target)
# Build and evaluation for SVM, NaiveBayes, RandomForest
res <- performanceEstimation(
c(PredTask(target ~ ., data.m),PredTask(target ~ ., data.smote),
PredTask(target ~ ., data.smote2)),
c(workflowVariants("standardWF", learner = "svm",
learner.pars=list(cost=c(1,10,100), gamma=c(0.1,0.01))),
workflowVariants("standardWF", learner = "randomForest",
learner.pars=list(ntree = c(5,50,200), nodesize = c(2,5))),
workflowVariants("standardWF", learner = "naiveBayes")),
BootSettings(type=".632", nReps=200))
# Build and evaluation for rpart (requires non factored target)
res.nf <- performanceEstimation(
c(PredTask(target ~ ., data.m.nf),PredTask(target ~ ., data.smote.nf),
PredTask(target ~ ., data.smote2.nf)),
workflowVariants("standardWF", learner = "rpartXse", learner.pars=list(se=c(0,0.5,1))),
BootSettings(type=".632", nReps=200))
#Show results
plot(res)
plot(res.nf)
# Show best model
topPerformers(res)
topPerformers(res.nf)
# Generally svm, randomForest, or naiveBayes has lowest error
getWorkflow("svm.v6", res)
# modelFinalBuild -----------------------------------------------------------
# Best model seems to be randomForest
# although NaiveBayes sometimes rises to the top
# Will evaluate all models on additional data
# Commentary on need to understand factors
# Evaluation of information gain using naiveBayes
library("RWeka")
NB <- make_Weka_classifier("weka/classifiers/bayes/NaiveBayes")
model.weka <- NB(target ~ ., data.smote2)
infogain <- as.data.frame(InfoGainAttributeEval(target ~ ., data.smote2))
ig <- cbind(rownames(infogain),infogain)
colnames(ig) <- c("attribute","gain")
summary(model.weka)
ig$attribute <- factor(ig$attribute, levels = ig[order(-ig$gain),]$attribute)
ggplot(data=ig, aes(x=attribute, y=gain, fill=gain)) + geom_bar(stat="identity")
# Include comments from Ashli-Jade and our preliminary findings
|
0c9f7452ee6e6f6b739d34b4873d3e82d02a4918
|
76e1ad0b4fea46946939c33e795b73d86ba2124b
|
/task4/evaluate2.R
|
18b975dd2c2fec1437b84957b45272336a493d8a
|
[] |
no_license
|
guidogallopyn/TextPrediction
|
a559940bd1e05ea9ff1e07a71c87dbc4b51ccba5
|
dfeb4d109c686ea004cd1736a60cf418b7f37970
|
refs/heads/master
| 2021-01-02T09:15:11.169480
| 2015-04-27T18:21:36
| 2015-04-27T18:21:36
| 34,554,810
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,770
|
r
|
evaluate2.R
|
# script to evaluate LM modele performance
# KPIs to evaluate
# coverage
# ==> in this script perplexity
# prediction accuracy
# prediction top-10 accuracy (if word appear in 10best it is counted corect)
setwd("~/Documents/Courses/DataScience/CapStone")
source("NGramLM.R")
library(tm)
library(dplyr)
library(tidyr)
library(ggplot2)
#what model files are available
(df <- LMparams(list.files("models")))
loadLM <- function(name,path="models"){
load(file=file.path(path, name))
return(mod)
}
# load subset of models to evaluate
for( name in filter(df, (data=="small" & N==4 & K %in% c(1,2)) |
(data=="medium" & N==4 & K %in% c(2,3,5)) |
(data=="large" & N==3 & K %in% c(7,10,20)), debug==FALSE)$name)
assign(name,loadLM(name))
(models <- grep("^LM[.]",ls(),value=TRUE))
# load evaluation corpus, and clean it and tokenize
corpus <- VCorpus(DirSource(file.path(file.path("data","eval"),"en_US")))
# subsample the eval corpus to much smaller eval set
set.seed(123)
microeval <- tm_map(corpus, subsample.TextDocument, 1/1000) #subsample
# perplexity calculations
#df %>% filter(name %in% models) %>%
# mutate(Voc = length(get(as.character(name))[["logp1"]])) %>% # BUG
# bind_rows(mutate(.,N=1),mutate(.,N=2))
# mutate(PP = CorpusPerplexity(get(as.character(name)), microeval, N)
moddf <- filter(df, name %in% models)
moddf$Voc <- sapply(moddf$name, function(name) length(get(as.character(name))[["logp1"]]))
#moddf<- rbind(moddf,mutate(moddf,N=3),mutate(moddf,N=2),mutate(moddf,N=1))
moddf<- bind_rows(mutate(moddf,N=3), mutate(moddf,N=2), mutate(moddf,N=1)) # only for bigrams and unigrams now
moddf$PP <- unlist(Map(function(name,N) CorpusPerplexity(get(as.character(name)), microeval, N),
moddf$name,
moddf$N))
print(moddf)
save(moddf,file="perplexity.RData")
# display evaluation corpus perplexity in function of N and K ==> basically benefit from bigrams is huge, benefit trigrams is small
moddf %>% ggplot(aes(Voc,PP,group=data,colour=data)) +
geom_line() +
geom_point(size=9, shape=21) +
geom_text(aes(label=K), size=6) +
facet_grid(N ~ . )
moddf %>% ggplot(aes(Voc,PP,group=N,colour=as.factor(N))) +
geom_line() +
geom_point(size=9, shape=21) +
geom_text(aes(label=K), size=6) +
facet_grid(data ~ . )
# eval on large models only, with bigger eval set
for( name in filter(df,data=="large" & N==3, debug==FALSE)$name)
assign(name,loadLM(name))
(models <- grep("^LM[.]",ls(),value=TRUE))
set.seed(123)
eval <- tm_map(corpus, subsample.TextDocument, 1/100) #subsample
moddf <- filter(df, name %in% models)
moddf$Voc <- sapply(moddf$name, function(name) length(get(as.character(name))[["logp1"]]))
moddf<- bind_rows(mutate(moddf,N=3), mutate(moddf,N=2), mutate(moddf,N=1))
moddf$PP <- unlist(Map(function(name,N) CorpusPerplexity(get(as.character(name)), eval, N),
moddf$name,
moddf$N))
print(moddf)
save(moddf,file="perplexity.large.RData")
moddf %>% ggplot(aes(Voc,PP,group=N,colour=as.factor(N))) +
geom_line() +
geom_point(size=3, shape=21)
# summary table
moddf %>% spread(N,PP) %>% select(-data,-debug,-name)
#measurement per subset (Blogs, News,Tweets), take best model
pptab <- data.frame(Data=c("All","Blogs","News","Tweets"))
pptab$PP <- c(CorpusPerplexity(LM.en_US.large.Katz.N3.K3.RData, eval, 3),
CorpusPerplexity(LM.en_US.large.Katz.N3.K3.RData, eval[1], 3),
CorpusPerplexity(LM.en_US.large.Katz.N3.K3.RData, eval[2], 3),
CorpusPerplexity(LM.en_US.large.Katz.N3.K3.RData, eval[3], 3))
print(pptab)
save(moddf,pptab,file="perplexity.large.RData")
|
cd26e8a785340155c6f4c18c160a058eaf2471c1
|
2123fdc63d783c74cef59ad980cbbff8de642363
|
/Script R SBT 13.R
|
8eae961799a0d318cce29d5adbbbc698508fced8
|
[] |
no_license
|
Anis-Bensaid/Projet-SBT13-Addictologie
|
6ccfbb7df8568436976f5c1b0d35b8612279747f
|
10113be6ba593451c55c0ae416c28bc9a3f32256
|
refs/heads/master
| 2021-01-24T23:52:00.000041
| 2017-05-10T15:07:40
| 2017-05-10T15:07:40
| 82,041,139
| 0
| 0
| null | 2017-04-14T15:36:03
| 2017-02-15T09:08:05
|
R
|
UTF-8
|
R
| false
| false
| 33,103
|
r
|
Script R SBT 13.R
|
## Nettoyage de l'espace de travail
rm(list=ls())
# install.packages("readxl")
# install.packages("plot3D")
# install.packages("FactoMineR")
# install.packages("factoextra")
## Packages pour la méthode des plus proches voisins
# install.packages("VIM")
# library(VIM)
# source("http://bioconductor.org/biocLite.R") # essayer avec http:// if not supported
# biocLite("impute") #équivalent de install.packages
library(plot3D)
library(FactoMineR)
library(readxl)
library(impute)
library(factoextra)
#####################################
### Lecture de la base de données ###
#####################################
## Anis
# bd <- read_excel("D:/Users/enysb/Google Drive/Etudes/Git/Projet-SBT13-Addictologie/bdmieRpp2.xls")
# setwd("D:/Users/enysb/Google Drive/Etudes/Git/Projet-SBT13-Addictologie")
## Arthur
# bd <- read_excel("~/Documents/Projet Enjeux/Projet-SBT13-Addictologie/bdmieRpp2.xls")
# setwd("~/Documents/Projet Enjeux/Projet-SBT13-Addictologie")
## Benjamin
# bd<- read_excel("~/GitHub/Projet-SBT13-Addictologie/bdmieRpp2.xls")
# setwd("~/GitHub/Projet-SBT13-Addictologie")
## Emilio
# bd <- read_excel("C:/Users/Emilio/Desktop/intercambio/clases/enjeux/sbt/Projet-SBT13-Addictologie/bdmieRpp2.xls")
# setwd("C:/Users/Emilio/Desktop/intercambio/clases/enjeux/sbt/Projet-SBT13-Addictologie")
## Haim
bd <- read_excel("~/Desktop/Projet_SBT13/Projet-SBT13-Addictologie-Github/bdmieRpp2.xls")
setwd("~/Desktop/Projet_SBT13/Projet-SBT13-Addictologie-Github")
#############################################
### Restructuration de la base de données ###
#############################################
# On ne séléctionne que les personnes âgées de moins de 31 ans.
bd1 <-bd[bd$age<31,]
Nl=dim(bd1)[1] #nombre de lignes
bdscore=data.frame(matrix(data=NA,nrow=Nl,ncol=1))
# ID de l'individu interrogé et du collecteur
# on n'a pas besoin d'utiliser les ID car toutes les données sont rassemblées dans un unique tableau
bdscore$ID_indiv <-bd1[1]
# bdscore$collecteur <- bd1[2]
# Suppression d'une colonne inutile :
bdscore<-bdscore[,-1]
# on transforme les réponses de l'AQOLS en score et on les insére dans la data.frame
a1unique <- unique(bd1$A1)
bdscore$a1 <- ifelse(bd1$A1== a1unique[1], 0, ifelse(bd1$A1== a1unique[2], 1,ifelse(bd1$A1==a1unique[4], 2,ifelse(bd1$A1==a1unique[3], 3,NA))))
a2unique <- unique(bd1$A2)
bdscore$a2 <- ifelse(bd1$A2== a2unique[1], 0, ifelse(bd1$A2== a2unique[3], 1,ifelse(bd1$A2==a2unique[2], 2,ifelse(bd1$A2==a2unique[4], 3,NA))))
a3unique <- unique(bd1$A3)
bdscore$a3 <- ifelse(bd1$A3== a3unique[1], 0, ifelse(bd1$A3== a3unique[2], 1,ifelse(bd1$A3==a3unique[3], 2,ifelse(bd1$A3==a3unique[4], 3,NA))))
a4unique <- unique(bd1$A4)
bdscore$a4 <- ifelse(bd1$A4== a4unique[1], 0, ifelse(bd1$A4== a4unique[2], 1,ifelse(bd1$A4==a4unique[3], 2,ifelse(bd1$A4==a4unique[4], 3,NA))))
a5unique <- unique(bd1$A5)
bdscore$a5 <- ifelse(bd1$A5== a5unique[1], 0, ifelse(bd1$A5== a5unique[2], 1,ifelse(bd1$A5==a5unique[3], 2,ifelse(bd1$A5==a5unique[4], 3,NA))))
a6unique <- unique(bd1$A6)
bdscore$a6 <- ifelse(bd1$A6== a6unique[1], 0, ifelse(bd1$A6== a6unique[2], 1,ifelse(bd1$A6==a6unique[3], 2,ifelse(bd1$A6==a6unique[4], 3,NA))))
a7unique <- unique(bd1$A7)
bdscore$a7 <- ifelse(bd1$A7== a7unique[1], 0, ifelse(bd1$A7== a7unique[2], 1,ifelse(bd1$A7==a7unique[3], 2,ifelse(bd1$A7==a7unique[4], 3,NA))))
a8unique <- unique(bd1$A8)
bdscore$a8 <- ifelse(bd1$A8== a8unique[1], 0, ifelse(bd1$A8== a8unique[2], 1,ifelse(bd1$A8==a8unique[3], 2,ifelse(bd1$A8==a8unique[4], 3,NA))))
a9unique <- unique(bd1$A9)
bdscore$a9 <- ifelse(bd1$A9== a9unique[1], 0, ifelse(bd1$A9== a9unique[2], 1,ifelse(bd1$A9==a9unique[3], 2,ifelse(bd1$A9==a9unique[4], 3,NA))))
a10unique <- unique(bd1$A10)
bdscore$a10 <- ifelse(bd1$A10== a10unique[1], 0, ifelse(bd1$A10== a10unique[2], 1,ifelse(bd1$A10==a10unique[3], 2,ifelse(bd1$A10==a10unique[4], 3,NA))))
a11unique <- unique(bd1$A11)
bdscore$a11 <- ifelse(bd1$A11== a11unique[1], 0, ifelse(bd1$A11== a11unique[2], 1,ifelse(bd1$A11==a11unique[3], 2,ifelse(bd1$A11==a11unique[4], 3,NA))))
a12unique <- unique(bd1$A12)
bdscore$a12 <- ifelse(bd1$A12== a12unique[1], 0, ifelse(bd1$A12== a12unique[3], 1,ifelse(bd1$A12==a12unique[2], 2,ifelse(bd1$A12==a12unique[4], 3,NA))))
a13unique <- unique(bd1$A13)
bdscore$a13 <- ifelse(bd1$A13== a13unique[1], 0, ifelse(bd1$A13== a13unique[2], 1,ifelse(bd1$A13==a13unique[3], 2,ifelse(bd1$A13==a13unique[4], 3,NA))))
a14unique <- unique(bd1$A14)
bdscore$a14 <- ifelse(bd1$A14== a14unique[1], 0, ifelse(bd1$A14== a14unique[2], 1,ifelse(bd1$A14==a14unique[3], 2,ifelse(bd1$A14==a14unique[4], 3,NA))))
a15unique <- unique(bd1$A15)
bdscore$a15 <- ifelse(bd1$A15== a15unique[1], 0, ifelse(bd1$A15== a15unique[2], 1,ifelse(bd1$A15==a15unique[3], 2,ifelse(bd1$A15==a15unique[4], 3,NA))))
a16unique <- unique(bd1$A16)
bdscore$a16 <- ifelse(bd1$A16== a16unique[1], 0, ifelse(bd1$A16== a16unique[2], 1,ifelse(bd1$A16==a16unique[3], 2,ifelse(bd1$A16==a16unique[4], 3,NA))))
a17unique <- unique(bd1$A17)
bdscore$a17 <- ifelse(bd1$A17== a17unique[2], 0, ifelse(bd1$A17== a17unique[1], 1,ifelse(bd1$A17==a17unique[3], 2,ifelse(bd1$A17==a17unique[4], 3,NA))))
a18unique <- unique(bd1$A18)
bdscore$a18 <- ifelse(bd1$A18== a18unique[1], 0, ifelse(bd1$A18== a18unique[2], 1,ifelse(bd1$A18==a18unique[3], 2,ifelse(bd1$A18==a18unique[4], 3,NA))))
a19unique <- unique(bd1$A19)
bdscore$a19 <- ifelse(bd1$A19== a19unique[1], 0, ifelse(bd1$A19== a19unique[2], 1,ifelse(bd1$A19==a19unique[3], 2,ifelse(bd1$A19==a19unique[4], 3,NA))))
a20unique <- unique(bd1$A20)
bdscore$a20 <- ifelse(bd1$A20== a20unique[1], 0, ifelse(bd1$A20== a20unique[3], 1,ifelse(bd1$A20==a20unique[2], 2,ifelse(bd1$A20==a20unique[4], 3,NA))))
a21unique <- unique(bd1$A21)
bdscore$a21 <- ifelse(bd1$A21== a21unique[1], 0, ifelse(bd1$A21== a21unique[2], 1,ifelse(bd1$A21==a21unique[3], 2,ifelse(bd1$A21==a21unique[4], 3,NA))))
a22unique <- unique(bd1$A22)
bdscore$a22 <- ifelse(bd1$A22== a22unique[2], 0, ifelse(bd1$A22== a22unique[1], 1,ifelse(bd1$A22==a22unique[3], 2,ifelse(bd1$A22==a22unique[4], 3,NA))))
a23unique <- unique(bd1$A23)
bdscore$a23 <- ifelse(bd1$A23== a23unique[1], 0, ifelse(bd1$A23== a23unique[2], 1,ifelse(bd1$A23==a23unique[3], 2,ifelse(bd1$A23==a23unique[4], 3,NA))))
a24unique <- unique(bd1$A24)
bdscore$a24 <- ifelse(bd1$A24== a24unique[2], 0, ifelse(bd1$A24== a24unique[1], 1,ifelse(bd1$A24==a24unique[4], 2,ifelse(bd1$A24==a24unique[3], 3,NA))))
a25unique <- unique(bd1$A25)
bdscore$a25 <- ifelse(bd1$A25== a25unique[1], 0, ifelse(bd1$A25== a25unique[2], 1,ifelse(bd1$A25==a25unique[4], 2,ifelse(bd1$A25==a25unique[3], 3,NA))))
a26unique <- unique(bd1$A26)
bdscore$a26 <- ifelse(bd1$A26== a26unique[2], 0, ifelse(bd1$A26== a26unique[1], 1,ifelse(bd1$A26==a26unique[4], 2,ifelse(bd1$A26==a26unique[3], 3,NA))))
a27unique <- unique(bd1$A27)
bdscore$a27 <- ifelse(bd1$A27== a27unique[1], 0, ifelse(bd1$A27== a27unique[2], 1,ifelse(bd1$A27==a27unique[3], 2,ifelse(bd1$A27==a27unique[4], 3,NA))))
a28unique <- unique(bd1$A28)
bdscore$a28 <- ifelse(bd1$A28== a28unique[1], 0, ifelse(bd1$A28== a28unique[2], 1,ifelse(bd1$A28==a28unique[3], 2,ifelse(bd1$A28==a28unique[4], 3,NA))))
a29unique <- unique(bd1$A29)
bdscore$a29 <- ifelse(bd1$A29== a29unique[1], 0, ifelse(bd1$A29== a29unique[2], 1,ifelse(bd1$A29==a29unique[4], 2,ifelse(bd1$A29==a29unique[3], 3,NA))))
a30unique <- unique(bd1$A30)
bdscore$a30 <- ifelse(bd1$A30== a30unique[1], 0, ifelse(bd1$A30== a30unique[2], 1,ifelse(bd1$A30==a30unique[3], 2,ifelse(bd1$A30==a30unique[4], 3,NA))))
a31unique <- unique(bd1$A31)
bdscore$a31 <- ifelse(bd1$A31== a31unique[1], 0, ifelse(bd1$A31== a31unique[2], 1,ifelse(bd1$A31==a31unique[3], 2,ifelse(bd1$A31==a31unique[4], 3,NA))))
a32unique <- unique(bd1$A32)
bdscore$a32 <- ifelse(bd1$A32== a32unique[1], 0, ifelse(bd1$A32== a32unique[2], 1,ifelse(bd1$A32==a32unique[3], 2,ifelse(bd1$A32==a32unique[4], 3,NA))))
a33unique <- unique(bd1$A33)
bdscore$a33 <- ifelse(bd1$A33== a33unique[1], 0, ifelse(bd1$A33== a33unique[2], 1,ifelse(bd1$A33==a33unique[3], 2,ifelse(bd1$A33==a33unique[4], 3,NA))))
a34unique <- unique(bd1$A34)
bdscore$a34 <- ifelse(bd1$A34== a34unique[2], 0, ifelse(bd1$A34== a34unique[1], 1,ifelse(bd1$A34==a34unique[3], 2,ifelse(bd1$A34==a34unique[4], 3,NA))))
#Argent
finunique <- unique(bd1$fin)
bdscore$Argent <- ifelse(bd1$fin==finunique[2], 0, ifelse(bd1$fin == finunique[1], 1, ifelse(bd1$fin==finunique[5], 2, ifelse(bd1$fin==finunique[3], 3, ifelse(bd1$fin==finunique[6], 4, NA)))))
# Age
bdscore$Age<-bd1$age
# Genre
genreunique <- unique(bd1$sex)
bdscore$Genre <- ifelse(bd1$sex== genreunique[2], 1, ifelse(bd1$sex==genreunique[1], 2,ifelse(bd1$sex== genreunique[5], NA ,ifelse(bd1$sex == genreunique[3], NA,NA))))
# Niveau d'étude après le Bac
niveauunique <- unique(bd1$niv)
bdscore$Niveau <- ifelse(bd1$niv==niveauunique[3], 1, ifelse(bd1$niv==niveauunique[1], 2, ifelse(bd1$niv==niveauunique[4],3, ifelse(bd1$niv==niveauunique[5], 4, ifelse(bd1$niv==niveauunique[6],5, ifelse(bd1$niv==niveauunique[2], 6, NA))))))
## Nivautr (dans le tableau bd1)
# c'est une colonne vide, elle n'a pas été remplie par les personnes interrogées
# Test :
## A <- bd1$nivautre
## N = 16930
## B=matrix(bdscore = NA, nrow=N, ncol = 1)
## for (i in (1:N)){
## if (is.na(A[i])){B[i]=0}
## else {B[i]=1}
## }
## S=0
## for (i in (1:N)){
## if (is.null(B[i])){S=S+1}
## }
## on obtient S = 0
# Discipline
# bdscore$Disc<-bd1$disc --> colonne qualitative, qu'on a préféré scinder en plusieurs
# colonnes avec un résultat qualitatif
study <- unique(bd1$disc)
bdscore$StudyHuma <- ifelse(bd1$disc==study[3],1,NA)
bdscore$StudyHuma[is.na(bdscore$StudyHuma)]<-0
bdscore$StudyProf <- ifelse(bd1$disc==study[1],1,NA)
bdscore$StudyProf[is.na(bdscore$StudyProf)]<-0
bdscore$StudyLawEco <- ifelse(bd1$disc==study[4],1,NA)
bdscore$StudyLawEco[is.na(bdscore$StudyLawEco)]<-0
bdscore$StudyScience <- ifelse(bd1$disc==study[2],1,NA)
bdscore$StudyScience[is.na(bdscore$StudyScience)]<-0
bdscore$StudyMed <- ifelse(bd1$disc==study[5],1,NA)
bdscore$StudyMed[is.na(bdscore$StudyMed)]<-0
bdscore$StudyAutre <- ifelse(bd1$disc==study[6],1,NA)
bdscore$StudyAutre[is.na(bdscore$StudyAutre)]<-0
# Autre cursus, c'est une donnée qualitative qui nous semble inutilisable
# bdscore$AutreCursus <- bd1[8]
# Fréquence binge-drinking
bdscore$FreqBinge <- ifelse(bd1$frqoh==unique(bd1$frqoh)[6], 0, ifelse(bd1$binge==unique(bd1$binge)[2], 0, ifelse(is.na(bd1$frqb1),ifelse(is.na(bd1$frqb2),ifelse(is.na(bd1$frqb3), ifelse(is.na(bd1$frqb6),ifelse(is.na(bd1$frqb10),NA,5),4),3),2),1)))
# Audit-C et consommation d'alcool
# Fréquence de consommation d'alcool
frqohunique <- unique(bd1$frqoh)
bdscore$FreqConso <- ifelse(bd1$frqoh==frqohunique[6], 0, ifelse(bd1$frqoh==frqohunique[3], 1, ifelse(bd1$frqoh== frqohunique[2], 2, ifelse(bd1$frqoh == frqohunique[1], 3, ifelse(bd1$frqoh==frqohunique[4], 4, NA)))))
# Nombre de verres consommés en moyenne à une occasion
nbverreunique <- unique(bd1$nbvrtyp)
bdscore$NbVerreMoy <- ifelse(bd1$nbvrtyp==nbverreunique[4], 0, ifelse(bd1$nbvrtyp ==nbverreunique[3], 1, ifelse(bd1$nbvrtyp == nbverreunique[2], 2, ifelse(bd1$nbvrtyp == nbverreunique[5], 3, ifelse(bd1$nbvrtyp ==nbverreunique[1], 4, NA)))))
#Fréquence de consommation de plus de six verres en une occasion
bdscore$FreqSupSixVerre <-bd1$sixvr
# Autres substances
tabacunique <- unique(bd1$tbc)
bdscore$Tabac <- ifelse(bd1$tbc== tabacunique[3], 0, ifelse(bd1$tbc== tabacunique[4], 1, ifelse(bd1$tbc==tabacunique[2], 1, ifelse(bd1$tbc==tabacunique[5], 2, ifelse(bd1$tbc==tabacunique[1], 3, NA)))))
cannabisunique <- unique(bd1$thc)
bdscore$Cannabis <- ifelse(bd1$thc== cannabisunique[4], 0, ifelse(bd1$thc== cannabisunique[5], 1, ifelse(bd1$thc==cannabisunique[3], 1, ifelse(bd1$thc==cannabisunique[1], 2, ifelse(bd1$thc==cannabisunique[2], 3, NA)))))
cocaunique <- unique(bd1$coc)
bdscore$Cocaine <- ifelse(bd1$coc== cocaunique[2], 0, ifelse(bd1$coc == cocaunique[4], 1, ifelse(bd1$coc== cocaunique[1], 1, ifelse(bd1$coc==cocaunique[5], 2, ifelse(bd1$coc==cocaunique[6], 3, NA)))))
herounique<- unique(bd1$hero)
bdscore$Heroine <- ifelse(bd1$hero== herounique[2], 0, ifelse(bd1$hero== herounique[1], 1, ifelse(bd1$hero==herounique[6], 1, ifelse(bd1$hero==herounique[7], 2, ifelse(bd1$hero==herounique[4], 3, NA)))))
MDunique<-unique(bd1$md)
bdscore$MD <- ifelse(bd1$md== MDunique[2], 0, ifelse(bd1$md== MDunique[4], 1, ifelse(bd1$md==MDunique[1], 1, ifelse(bd1$md==MDunique[6], 2, ifelse(bd1$md==MDunique[5], 3, NA)))))
popunique<-unique(bd1$pop)
bdscore$Poppers <- ifelse(bd1$pop== popunique[1], 0, ifelse(bd1$pop== popunique[2], 1, ifelse(bd1$pop==popunique[4], 1, ifelse(bd1$pop==popunique[6], 2, ifelse(bd1$pop==popunique[5], 3, NA)))))
jeuunique<-unique(bd1$jeu)
bdscore$Jeu <- ifelse(bd1$jeu== jeuunique[1], 0, ifelse(bd1$jeu== jeuunique[2], 1, ifelse(bd1$jeu==jeuunique[6], 1, ifelse(bd1$jeu==jeuunique[4], 2, ifelse(bd1$jeu==jeuunique[5], 3, NA)))))
# Image
# Faire la fête fait partie de l'image que j'ai de moi
bdscore$FeteImagePerso <- bd1$idt1
# Faire la fête fait partie de "qui je suis"
bdscore$FeteEtre <- bd1$idt2
# Faire la fête fait partie de ma personnalité
bdscore$FetePerso <-bd1$idt3
# Faire la fête fait partie de mon quotidien
bdscore$FeteQuotidien <- bd1$idt4
# Les autres considérent que faire la fête fait partie de ma personnalité
bdscore$FeteImageAutre <- bd1$idt5
# Mobilité
bdscore$Mobilite <- bd1$eqmob
# Autonomie
bdscore$Autonomie <- bd1$eqaut
# Habitudes
bdscore$Habitudes <- bd1$eqhab
# Douleurs/Malaise
bdscore$Douleur <- bd1$eqdoul
# Dépression
bdscore$Depression <- bd1$eqdep
# Lieu de résidence : Famille/tuteur, logement indépendant, résidence collective, ailleurs
log <- unique(bd1$logou)
bdscore$LogFamille <- ifelse(bd1$logou==log[1],1,NA)
bdscore$LogFamille[is.na(bdscore$LogFamille)]<-0
bdscore$LogInd <-ifelse(bd1$logou==log[3],1,NA)
bdscore$LogInd [is.na(bdscore$LogInd)]<-0
bdscore$LogRes <-ifelse(bd1$logou==log[2],1,NA)
bdscore$LogRes[is.na(bdscore$LogRes)]<- 0
bdscore$LogAutre <-ifelse(bd1$logou==log[5],1,NA)
bdscore$LogAutre[is.na(bdscore$LogAutre)]<-0
# Seul
bdscore$Seul <- ifelse(bd1$logwho1==unique(bd1$logwho1)[2],1,NA)
bdscore$Seul[is.na(bdscore$Seul)]<--0
# En couple
bdscore$Couple <- ifelse(bd1$logwho2==unique(bd1$logwho2)[2],1,NA)
bdscore$Couple[is.na(bdscore$Couple)]<--0
# Avec les enfants
bdscore$Enfants <- ifelse(bd1$logwho3==unique(bd1$logwho3)[2],1,NA)
bdscore$Enfants[is.na(bdscore$Enfants)]<--0
# Colocation avec amis
bdscore$ColocFriend <- ifelse(bd1$logwho4==unique(bd1$logwho4)[2],1,NA)
bdscore$ColocFriend[is.na(bdscore$ColocFriend)]<--0
# Colocation avec autres personnes
bdscore$ColocAutres <- ifelse(bd1$logwho5==unique(bd1$logwho5)[2],1,NA)
bdscore$ColocAutres[is.na(bdscore$ColocAutres)]<--0
# Maladie chronique Booléen
bdscore$MaladieChroniqueBool <- ifelse(bd1$ald=="Oui",1,ifelse(bd1$ald=="Non",0,NA))
# Bourse
bdscore$Bourse <- ifelse(bd1$bours=="Oui",1, ifelse(bd1$bours =="Non",0,NA))
# Nous avons décidé de ne pas analyser la colonne "aldquoi" car les interrogés ont répondu librement
## Exportation de la base de données bdscore
write.csv2(bdscore,file="bdscore.csv",row.names = FALSE)
# Descriptions des données
# moyenne, écart-type, nombre de NA dans chaque items
Nom_stats = c("Moyenne","Mediane","Maximum","Minimum","Nb de NA","Ecart-type","Part de NA")
N_stats = length(Nom_stats)
Nc=dim(bdscore)[2] # nombre d'items
info=data.frame(matrix(data=NA,nrow=N_stats,ncol=Nc-1))
rownames(info) <- Nom_stats
colnames(info) <- colnames(bdscore)[2:Nc]
for (i in (2:Nc)) {
y=bdscore[i]
info[1,i-1]<-apply(na.omit(y),2,mean) # moyenne
info[2,i-1] <-apply(na.omit(y),2,median) # médiane
info[3,i-1] <- max(na.omit(y)) # maximum
info[4,i-1] <- min(na.omit(y)) # Minimum
info[5,i-1] <- sum(1*is.na(bdscore[i])) #nb de NA
info[6,i-1] <- apply(na.omit(y), 2, sd) # écart-type
info[7,i-1] <- sum(1*is.na(bdscore[i]))/Nl # Part de NA
}
# Données manquantes
# Taux de réponse de chaque individu et individu dont le nombre de reponses sont insuffisants
reponses=1*cbind(bdscore[1],is.na(bdscore[2:Nc])) # le 1* permet de changer les False/True en 0/1
reponses$Total <- rowSums(reponses[,3:Nc]) # nombre d'items où l'individu n'a pas repondu
reponses$Pourcent <- 100*reponses$Total/Nc # taux de "non-réponse"
faible_taux=reponses[reponses$Pourcent>60,]
fort_taux=reponses[reponses$Pourcent<=0,]
taux_global=100*sum(reponses$Total)/(Nc*Nl) # taux global de réponses manquantes
###########################################
### Méthode des plus proches voisins ###
###########################################
#aggr(bdscore, col=c('navyblue','red'), numbers=TRUE, combined = FALSE, sortVars=TRUE, labels=names(bdscore), cex.axis=.7, gap=3, ylab=c("Histogram of missing bdscore","Pattern"))
# graphique de gauche pour illustrer la part de données manquantes
# imputation
NA_max_col=max(info[7,])
NA_max_row= max(reponses$Pourcent)/100
mat = impute.knn(as.matrix(bdscore),k=100,rowmax=NA_max_row,colmax=NA_max_col)
full_data = as.data.frame(mat$data)
#Atot
#full_data$atot <- full_data$a1 + full_data$a2 + full_data$a3 + full_data$a4 + full_data$a5 + full_data$a6 + full_data$a7 + full_data$a8 + full_data$a9 + full_data$a10 + full_data$a11 + full_data$a12 + full_data$a13 + full_data$a14 + full_data$a15 + full_data$a16 + full_data$a17 + full_data$a18 + full_data$a19 + full_data$a20 + full_data$a21 + full_data$a22 + full_data$a23 + full_data$a24 + full_data$a25+full_data$a26+ full_data$a27+full_data$a28+full_data$a29+ full_data$a30+full_data$a31+full_data$a32+ full_data$a33+ full_data$a34
#Audit-C
#full_data$Audit <- full_data$FreqConso + full_data$NbVerreMoy+ full_data$FreqSupSixVerre
## Exportation de la base de données full_data
write.csv2(full_data,file="full_data.csv",row.names = FALSE)
# information sur la nouvelle matrice de données
Ncf=dim(full_data)[2]
info_full=data.frame(matrix(data=NA,nrow=N_stats,ncol=Ncf-1))
rownames(info_full) <- Nom_stats
colnames(info_full) <- colnames(full_data)[2:Ncf]
Ncf=dim(full_data)[2]
for (i in (2:Ncf)) {
y=full_data[,i]
info_full[1,i-1]<- mean(y) # moyenne
info_full[2,i-1] <-median(y) # médiane
info_full[3,i-1] <- max(y) # maximum
info_full[4,i-1] <- min(y) # Minimum
info_full[5,i-1] <- sum(1*is.na(full_data[i])) #nb de NA
info_full[6,i-1] <- sd(y) # écart-type
info_full[7,i-1] <- sum(1*is.na(full_data[i]))/Nl # Part de NA
}
# j'aimerais évaluer l'écart entre les statistiques de la base de données non corrigée et
# les statistiques de la base corrigée
erreur_impute=data.frame(matrix(data=NA,nrow=5,ncol=Nc-1))
rownames(erreur_impute) <- c("Moyenne","Mediane","Maximum","Minimum","Ecart-type")
colnames(erreur_impute) <- colnames(bdscore)[2:Nc]
for (i in (2:Nc)) {
y=full_data[,i]
erreur_impute[1,i-1]<- abs(info[1,i-1]-info_full[1,i-1]) # moyenne
erreur_impute[2,i-1] <-abs(info[2,i-1]-info_full[2,i-1]) # médiane
erreur_impute[3,i-1] <- abs(info[3,i-1]-info_full[3,i-1]) # maximum
erreur_impute[4,i-1] <- abs(info[4,i-1]-info_full[4,i-1]) # Minimum
erreur_impute[5,i-1] <- abs(info[6,i-1]-info_full[6,i-1]) # écart-type
}
#aggr(full_data, col=c('navyblue','red'), numbers=TRUE, sortVars=TRUE, labels=names(bdscore), cex.axis=.7, gap=3, ylab=c("Histogram of missing bdscore","Pattern"))
# ce dernier affichage est une petite vérification graphique pour s'assurer
# qu'il n'y a plus de données manquantes
###############################################################################################################################################################################################################
###############################################################################################################################################################################################################
###############################################################################################################################################################################################################
###############################################################################################################################################################################################################
# Pour cette partie il faut avoir la base full_data. Il faut donc soit exécuter la première partie du code,
# soit en cas de bug utiliser setwd pour définir le chemin vers la BDD et importer la base full_data.
# setwd("")
# full_data=read.csv2("full_data.csv")
###############################
### Correlation de Spearman ###
###############################
# Première étapes de l'étude de la base. Aucune corrélation forte ne semble être présente.
# CorrelationP=matrix(data=NA,nrow=35,ncol=43)
# CorrelationR=matrix(data=NA,nrow=35,ncol=43)
# nomlignes=c()
# nomcolonnes=c()
# for (i in (1:35)){nomlignes=c(nomlignes,names(bdscore[i]))}
# for (i in (36:78)){nomcolonnes=c(nomcolonnes,names(bdscore[i]))}
# rownames(CorrelationP)=nomlignes
# colnames(CorrelationP)=nomcolonnes
# rownames(CorrelationR)=nomlignes
# colnames(CorrelationR)=nomcolonnes
#
# for (i in (1:35))
# {for (j in (36:78))
# {Testspm=cor.test(as.numeric(unlist(bdscore[i])), as.numeric(unlist(bdscore[j])), method="spearman")
# CorrelationP[i,j-35]=as.numeric(Testspm[3])
# CorrelationR[i,j-35]=as.numeric(Testspm[4])}}
#
# View(CorrelationP)
# View(CorrelationR)
#
# persp3D(z = CorrelationP, theta=30,phi=15,xlab='AQoLS',ylab='Consommations',zlab='p-value',expand=0.5,shade=0.8,ticktype="detailed")
# persp3D(z = CorrelationR, theta=30,phi=15,xlab='AQoLS',ylab='Consommations',zlab='R',expand=0.5,shade=0.8,ticktype="detailed")
#
###############
### K-means ###
###############
Kmeans=function(bdscore,nbclus){
clus= kmeans(na.omit(bdscore), nbclus, iter.max = 10, nstart = 1, algorithm = c("Hartigan-Wong", "Lloyd", "Forgy","MacQueen"), trace=FALSE)
Repartition=clus$cluster
# On range les clusters dans une liste Clusters de bdscoreframes
Clusters=list()
for (i in 1:nbclus){
Clusters[[i]]=bdscore[Repartition==i,]
}
return(Clusters)
}
KClusters=Kmeans(full_data,10)
########################################################################
### ACP: Réduction du nombre de dimension et sélection des variables ###
########################################################################
## On applique une ACP sur l'ensemble des données :
ACP <- PCA(full_data[-1],ncp=78)
#La fonction plot.PCA permet d'afficher la représentation des variables () et des individus (Individuals factor map (PCA)) dans le plan des deux premiers facteurs principaux
## Graphique des variables
fviz_pca_var(ACP, col.var="contrib",
gradient.cols = c("#00AFBB", "#E7B800", "#FC4E07"),
repel = TRUE # Avoid text overlapping
)
# Contributions of variables to PC1
fviz_contrib(ACP, choice = "var", axes = 1, top = 78)
# Contributions of variables to PC2
fviz_contrib(ACP, choice = "var", axes = 2, top = 78)
## On détermine le nombre de dimension à concerver à l'aide du critère du coude
## Histogramme des valeurs propores
barplot(ACP$eig[1:dim(full_data)[2],2], main="Histogramme des valeurs propres",
names.arg=1:dim(full_data)[2], xlab="Axes", ylab="Pourcentage d'inertie",
cex.axis=0.8, font.lab=3, ylim=c(0, 12), col="green")
## Graphique des individus
fviz_pca_ind(ACP, col.ind="cos2", geom = "point")
## Nous enlevons les variables proche du centre dans la représentation Variable factor map (PCA)
## Variables retenues :
NlDBACP=dim(full_data)[1]
DBACP=data.frame(matrix(data=NA,nrow=NlDBACP,ncol=1))
DBACP<-DBACP[,-1]
DBACP$a1<-full_data$a1
DBACP$a2<-full_data$a2
DBACP$a3<-full_data$a3
DBACP$a4<-full_data$a4
DBACP$a5<-full_data$a5
DBACP$a6<-full_data$a6
DBACP$a7<-full_data$a7
DBACP$a8<-full_data$a8
DBACP$a9<-full_data$a9
DBACP$a10<-full_data$a10
DBACP$a11<-full_data$a11
DBACP$a12<-full_data$a12
DBACP$a13<-full_data$a13
DBACP$a14<-full_data$a14
DBACP$a15<-full_data$a15
DBACP$a16<-full_data$a16
DBACP$a17<-full_data$a17
DBACP$a18<-full_data$a18
DBACP$a19<-full_data$a19
DBACP$a20<-full_data$a20
DBACP$a21<-full_data$a21
DBACP$a22<-full_data$a22
DBACP$a23<-full_data$a23
DBACP$a24<-full_data$a24
DBACP$a25<-full_data$a25
DBACP$a26<-full_data$a26
DBACP$a27<-full_data$a27
DBACP$a28<-full_data$a28
DBACP$a29<-full_data$a29
DBACP$a30<-full_data$a30
DBACP$a31<-full_data$a31
DBACP$a32<-full_data$a32
DBACP$a33<-full_data$a33
DBACP$a34<-full_data$a34
DBACP$Genre<-full_data$Genre
DBACP$FeteImagePerso<-full_data$FeteImagePerso
DBACP$FeteEtre<-full_data$FeteEtre
DBACP$FetePerso<-full_data$FetePerso
DBACP$FeteQuotidien<-full_data$FeteQuotidien
DBACP$FeteImageAutre<-full_data$FeteImageAutre
DBACP$Tabac<-full_data$Tabac
DBACP$NbVerreMoy<-full_data$NbVerreMoy
DBACP$FreqBinge<-full_data$FreqBinge
DBACP$FreqConso<-full_data$FreqConso
DBACP$FreqSupSixVerre<-full_data$FreqSupSixVerre
## Exportation de la base de données DBACP
write.csv2(DBACP,file="DBACP.csv",row.names = FALSE)
## On fait une ACP sur la nouvelle base :
ACPred <- PCA(DBACP,ncp=7)
## Graphique des variables
fviz_pca_var(ACPred, col.var="contrib",
gradient.cols = c("#00AFBB", "#E7B800", "#FC4E07"),
repel = TRUE # Avoid text overlapping
)
# Contributions of variables to PC1
fviz_contrib(ACPred, choice = "var", axes = 1, top = 78)
# Contributions of variables to PC2
fviz_contrib(ACPred, choice = "var", axes = 2, top = 78)
## Graphique des individus
fviz_pca_ind(ACP, col.ind="cos2", geom = "point")
#La fonction plot.PCA permet d'afficher la représentation des variables
#(Variable factor map (PCA)) et des individus (Individuals factor map (PCA)) dans
#le plan des deux premiers facteurs principaux
plot.PCA(ACPred,col.quali="blue", label="quali")
## On détermine le nombre de dimension à concerver à l'aide du critère du coude
## Histogramme des valeurs propores
barplot(ACPred$eig[1:dim(full_data)[2],2], main="Histogramme des valeurs propres",
names.arg=1:dim(full_data)[2], xlab="Axes", ylab="Pourcentage d'inertie",
cex.axis=0.8, font.lab=3, ylim=c(0, 12), col="orange")
## 8 semble un bon nombre de dimension d'après le critère du coude
print(ACPred$eig)
## Avec 8 dimensions on explique 54.4% de la variabilité
########################################################
### Première classification hiérarchique ascendante ###
########################################################
## Création d'un Cluster :
#On calcule la matrice des distances de ACP2
Distance=dist(ACPred$ind$coord)
#hclust permet de créer les clusters avec la méthode de WARD
CHA=hclust(Distance,method="ward.D2")
#plot permet de tracer le dendrograme : ATTENTION ! NECESSITE UNE MACHINE PERFORMANTE !
plot(CHA)
rect.hclust(CHA,k=5,border=2:6)
#cutree(tree,k) permet de couper le dendrograme pour former k clusters:
Repartition=cutree(CHA,7)
#On regroupe les ligne du premier cluster dans une même base
Cluster1=full_data[Repartition==1,]
##################################################
### Itération de la méthode avec des fonctions ###
##################################################
#ClusterCHA prend en argument la nombre de dimensison de ACP dimacp, le nombre de cluster
#à créer nbclus, bdscoreACP la base qui regroupe les variables pour l'ACP et full_data la base complète.
#Elle retourne une liste des nbclus Clusters.
ClusterCHA=function(dimacp,nbclus,bdscoreACP,full_data){
#On applique la méthode de l'Analyse par composantes principales
#à l'aide de la fonction PCA du package FactoMineR
print("ACP encours ... ")
ACP=PCA(bdscoreACP,ncp=dimacp)
print(c("Pourcentage de variabilité expliqué :",ACP$eig$`cumulative percentage of variance`[dimacp]))
plot.PCA(ACP,col.quali="blue", label="quali")
print("Traçage de l'histogramme des valeurs propres ...")
barplot(ACP$eig[1:dim(bdscoreACP)[2],2], main="Histogramme des valeurs propres",
names.arg=1:dim(bdscoreACP)[2], xlab="Axes", ylab="Pourcentage d'inertie",
cex.axis=0.8, font.lab=3, ylim=c(0, 15), col="orange")
#La fonction plot.PCA permet d'afficher la représentation des variables
#et des individus (Individuals factor map (PCA)) dans le plan des deux premiers facteurs principaux
#plot.PCA(ACP,col.quali="blue", label="quali")
# La fonction dist prend comme argument la bdscoreframe et retourne
#la matrice des distances en utilisant la norme euclidienne
print("Calcul de la matrice de distance ...")
Distance=dist(ACP$ind$coord)
# La fonction hclust prend comme argument la bdscoreframe et la
# matrice de distances et retourne la Classification ascendante hiérarchique
print('Formation des clusters ...')
CHA=hclust(Distance,method="ward.D2")
# Le plot de CHA donne le Dendogramme de la classification hiérarchique
# print("Traçage du dendogramme")
# plot(CHA)
# rect.hclust permet de tracer le dendrograme avec des cadres autour des clusters sélectionné
# rect.hclust(CHA,nbclus)
# La fonction cutree permet de couper le dendogramme et donne nbclus clusters
Repartition=cutree(CHA,nbclus)
# On range les clusters dans une liste Clusters de bdscoreframes
Clusters=list()
for (i in 1:nbclus){
Clusters[[i]]=full_data[Repartition==i,]
}
return(Clusters)
}
###################################
### Classification des Clusters ###
###################################
#ClassificationClusters prend en argument une liste de clusters et retourne les
#indinces des clusters triés par ordre décroissant selon la valeur moyenne de atot
ClassificationClustersAtot=function(Clusters){
nbclus=length(Clusters)
ordre=(1:nbclus)
for (i in (1:nbclus)){
Temp=as.integer(ordre[i])
j=i
while(j>1 && mean(Clusters[[ordre[j-1]]]$atot)<mean(Clusters[[Temp]]$atot)){
ordre[j]=as.integer(ordre[j-1])
j=j-1
}
ordre[j]=Temp
}
return(ordre)
}
#classement à partir de Audit C
ClassificationClustersAudit=function(Clusters){
nbclus=length(Clusters)
ordre=(1:nbclus)
for (i in (1:nbclus)){
Temp=as.integer(ordre[i])
j=i
while(j>1 && mean(Clusters[[ordre[j-1]]]$Audit)<mean(Clusters[[Temp]]$Audit)){
ordre[j]=as.integer(ordre[j-1])
j=j-1
}
ordre[j]=Temp
}
return(ordre)
}
##########################################
### Affichage des comparatifs Clusters ###
##########################################
## Moyenne :
CompareMean=function(Clusters){
Comparenbcol=dim(Clusters[[1]])[2]-1
nbclus=length(Clusters)
NomCol=colnames(Clusters[[1]])[-1]
CompareArray=data.frame(matrix(data=NA,nrow=nbclus,ncol=Comparenbcol))
colnames(CompareArray)=NomCol
for (i in (1:nbclus)){
for (j in (1:Comparenbcol)){
CompareArray[i,j]=mean(Clusters[[i]][[j+1]])
}
}
return(CompareArray)
}
## Ecart-type :
CompareSD=function(Clusters){
Comparenbcol=dim(Clusters[[1]])[2]-1
nbclus=length(Clusters)
NomCol=colnames(Clusters[[1]])[-1]
CompareArray=data.frame(matrix(data=NA,nrow=nbclus,ncol=Comparenbcol))
colnames(CompareArray)=NomCol
for (i in (1:nbclus)){
for (j in (1:Comparenbcol)){
CompareArray[i,j]=sd(Clusters[[i]][[j+1]])
}
}
return(CompareArray)
}
## Minimum
CompareMin=function(Clusters){
Comparenbcol=dim(Clusters[[1]])[2]-1
nbclus=length(Clusters)
NomCol=colnames(Clusters[[1]])[-1]
CompareArray=data.frame(matrix(data=NA,nrow=nbclus,ncol=Comparenbcol))
colnames(CompareArray)=NomCol
for (i in (1:nbclus)){
for (j in (1:Comparenbcol)){
CompareArray[i,j]=min(Clusters[[i]][[j+1]])
}
}
return(CompareArray)
}
## Maximum
CompareMax=function(Clusters){
Comparenbcol=dim(Clusters[[1]])[2]-1
nbclus=length(Clusters)
NomCol=colnames(Clusters[[1]])[-1]
CompareArray=data.frame(matrix(data=NA,nrow=nbclus,ncol=Comparenbcol))
colnames(CompareArray)=NomCol
for (i in (1:nbclus)){
for (j in (1:Comparenbcol)){
CompareArray[i,j]=max(Clusters[[i]][[j+1]])
}
}
return(CompareArray)
}
## Median
CompareQuantile=function(Clusters,percent=0.5){
Comparenbcol=dim(Clusters[[1]])[2]-1
nbclus=length(Clusters)
NomCol=colnames(Clusters[[1]])[-1]
CompareArray=data.frame(matrix(data=NA,nrow=nbclus,ncol=Comparenbcol))
colnames(CompareArray)=NomCol
for (i in (1:nbclus)){
for (j in (1:Comparenbcol)){
CompareArray[i,j]=quantile(Clusters[[i]][[j+1]],percent)
}
}
return(CompareArray)
}
##########################
### Etude des Clusters ###
##########################
nbcl=5
dimacp=7
Clusters=ClusterCHA(dimacp,nbcl,DBACP,full_data)
for (i in (1:length(Clusters))) {
print(dim(Clusters[[i]]))
}
CompMoyenne=CompareMean(Clusters)
CompEcart=CompareSD(Clusters)
CompMin=CompareMin(Clusters)
CompMax=CompareMax(Clusters)
CompMedian=CompareQuantile(Clusters,0.5)
View(CompMoyenne)
View(CompEcart)
View(CompMin)
View(CompMax)
View(CompMedian)
install.packages("NbClust")
library("NbClust")
Nbanalyse=NbClust(data=full_data,diss=NULL,distance="euclidean",min.nc=2, max.nc=10,method="ward.D2",index=c('ch'))
|
3eec6dd17c69c3ec4e942db6ef35eac56dffaece
|
3a4729d21d7b9467787d8b668983109a8b45e9bc
|
/Chapter14/tmp/runit.f2c.R
|
5f61eb2ed62cc86c93f55de30c3bcb3100181f02
|
[] |
no_license
|
IshidaMotohiro/R_Handbook_Sjis
|
ee32d5926720f6a857282ca2d8c0de345644095e
|
9cd4e190dd04ee2202ab9c8ef94b3a6b8fac428b
|
refs/heads/master
| 2020-12-29T02:24:02.857676
| 2016-12-14T02:21:11
| 2016-12-14T02:21:11
| 50,570,885
| 2
| 2
| null | null | null | null |
SHIFT_JIS
|
R
| false
| false
| 350
|
r
|
runit.f2c.R
|
# k華氏を摂氏に変換
f2c <- function(f) return (5/9 * f - 32) # 正しくは # 5/9 * (f - 32)
## テスト関数
## ---------------------
test.f2c <- function() {
checkEquals(f2c(32), 0)
## 文字列を与えてみる
checkException (f2c ("xx"))
}
test.errordemo <- function() {
stop("不正な引数です")
}
|
fb5df7873c5be4af974a5c649b1b40ad5f6a44f4
|
e54af34bc7f6952e0f8fe536827182b07d38327f
|
/man/get_heart_rate_intraday_time_series.Rd
|
545e30cf00b1ca469c43fcbb2b457445ae7e47ef
|
[
"MIT"
] |
permissive
|
tracker-neRds/wearable
|
a375543b432e8a757ac18b744625bd1791e6a34f
|
7d6ba0999cacac6a72ec819797790228eadfdc94
|
refs/heads/master
| 2020-11-24T01:40:05.693741
| 2019-12-13T22:07:55
| 2019-12-13T22:07:55
| 227,908,256
| 0
| 2
|
MIT
| 2019-12-19T06:16:11
| 2019-12-13T19:27:52
|
R
|
UTF-8
|
R
| false
| true
| 1,518
|
rd
|
get_heart_rate_intraday_time_series.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/heart-rate.R
\name{get_heart_rate_intraday_time_series}
\alias{get_heart_rate_intraday_time_series}
\title{Get Heart Rate Intraday Time Series}
\usage{
get_heart_rate_intraday_time_series(token, date = "", detail_level = "1min",
start_time = NULL, end_time = NULL, simplify = TRUE)
}
\arguments{
\item{token}{An OAuth 2.0 token generated by oauth_token()}
\item{date}{The end date of the period specified in the format "yyyy-MM-dd" or "today" as a string or Date object.}
\item{detail_level}{Number of data points to include. Either "1sec" or "1min".}
\item{start_time}{The start of the period, in the format "HH:mm".}
\item{end_time}{The end of the period, in the format "HH:mm"}
\item{simplify}{logical; should the result be simplified from a list to a data.frame if possible}
}
\description{
\code{get_heart_rate_intraday_time_series()} returns the intraday time series.
If your application has the appropriate access, your calls to a time series endpoint for a specific day (by using start and end dates on the same day or a period of 1d),
the response will include extended intraday values with a one-minute detail level for that day.
Access to the Intraday Time Series for personal use (accessing your own data) is available through the "Personal" App Type.
}
\details{
See \url{'https://dev.fitbit.com/reference/web-api/heart-rate/#get-heart-rate-intraday-time-series} for more details.
}
|
042e4cbf6370ba2809a9cef853b47269516fa781
|
725cce758902d6d9db049e87dc211c40ff10921e
|
/R/Pr.S.up.R
|
84446dd0e2e9703149162bc1875a9b4f26a276d7
|
[] |
no_license
|
yaomin/dptmethods
|
a589e21dbff91075cea72fbbed40626fa643acee
|
846a42d01c05e1a27fec290498e011b0f05d6882
|
refs/heads/master
| 2021-01-17T10:33:38.755292
| 2013-07-11T23:54:16
| 2013-07-11T23:54:16
| 7,569,298
| 1
| 0
| null | 2014-10-13T15:59:10
| 2013-01-11T23:58:27
|
R
|
UTF-8
|
R
| false
| false
| 81
|
r
|
Pr.S.up.R
|
Pr.S.up <-
function(r,k,n,p) {
lambda <- (n-k+1)*f.f(r,k,p)
exp(-1*lambda)
}
|
a1ce36f5e1fe7bf7176062a7c5ab92f6746a3d20
|
e3fb5b8a7a00fde19e9101c4199c143081cf06c0
|
/tests/testthat.R
|
eb6ac41a1784d2a9cb9d2e6bbb3c8c9456f1322b
|
[
"MIT"
] |
permissive
|
lindemann09/singcar
|
8093ac944c0aaf24463fa881a7c70c9ae83fd66d
|
a2cbf841f1430fc6f0afba3da5e9b4ba0c850b38
|
refs/heads/master
| 2023-08-12T13:28:04.152637
| 2021-10-11T14:07:36
| 2021-10-11T14:07:36
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 58
|
r
|
testthat.R
|
library(testthat)
library(singcar)
test_check("singcar")
|
9c1889ad59728cdf50c1c17b8c3b2ada95039217
|
13b21646937de87022c30c8512c0151d5bb3f03f
|
/openintro/man/gov.poll.Rd
|
f2f399c122048182cd9f25ee54ca137f430214a5
|
[] |
no_license
|
mine-cetinkaya-rundel/openintro-r-package
|
374a104b4241eec05fb4688a5231ba8979e59ad0
|
099da5b776784462f4ff384434cd7a517dfe37d6
|
refs/heads/master
| 2020-03-28T09:40:40.626621
| 2018-09-01T03:20:39
| 2018-09-01T03:20:39
| 148,051,227
| 2
| 0
| null | 2018-09-09T17:54:34
| 2018-09-09T17:54:34
| null |
UTF-8
|
R
| false
| false
| 748
|
rd
|
gov.poll.Rd
|
\name{gov.poll}
\alias{gov.poll}
\docType{data}
\title{Pew Research poll on goverment approval ratings}
\description{The poll's focus is on Obama and then Democrats and Republicans in Congress.}
\usage{data("gov.poll")}
\format{
A data frame with 4223 observations on the following 2 variables.
\describe{
\item{\code{poll}}{a factor with levels \code{approve} \code{disapprove}}
\item{\code{eval}}{a factor with levels \code{Democrats} \code{Obama} \code{Republicans}}
}
}
\source{See the Pew Research website: www.people-press.org/2012/03/14/romney-leads-gop-contest-trails-in- matchup-with-obama. The counts in Table 6.19 are approximate.}
\examples{
data(gov.poll)
## maybe str(gov.poll) ; plot(gov.poll) ...
}
\keyword{datasets}
|
5e07ebc9cc62b9211a96c2c300a4a501dec7f6bd
|
0a906cf8b1b7da2aea87de958e3662870df49727
|
/grattan/inst/testfiles/IncomeTax/libFuzzer_IncomeTax/IncomeTax_valgrind_files/1610051241-test.R
|
424c10ba37e0d9430faa169aa363ba966e894868
|
[] |
no_license
|
akhikolla/updated-only-Issues
|
a85c887f0e1aae8a8dc358717d55b21678d04660
|
7d74489dfc7ddfec3955ae7891f15e920cad2e0c
|
refs/heads/master
| 2023-04-13T08:22:15.699449
| 2021-04-21T16:25:35
| 2021-04-21T16:25:35
| 360,232,775
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 420
|
r
|
1610051241-test.R
|
testlist <- list(rates = numeric(0), thresholds = numeric(0), x = c(7.57771153258705e-304, -6.56725888219402e-287, NaN, NaN, NaN, NaN, -1.05658906227133e+270, 2.12199579047121e-314, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))
result <- do.call(grattan::IncomeTax,testlist)
str(result)
|
2610e9e3f023857d3177b05ad588ffe0f7628910
|
e28329986404a8281fda97efa340ffacc001dc02
|
/R/constrr.R
|
94e46e19a3eef35b5ae4745a4c01a1d61020819f
|
[] |
no_license
|
JoelShodams/s3oopJoe
|
3ea47e7c4d0b2f1236f0c5281e6436cd34445f10
|
40091ab1d9847c56cd8cf4c2ed494205dc1565a1
|
refs/heads/master
| 2023-04-11T21:54:48.288412
| 2021-05-07T23:14:50
| 2021-05-07T23:14:50
| 362,676,564
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,786
|
r
|
constrr.R
|
#' @title Constructor Function for Hypothesis Test
#'
#' This functions main purpose is to
#'
#' The constructor makes an object and tests for NULL Hypothesis to
#' verify if the underlying means of two populations are equal
#'
#' @param x vector sample from First population
#' @param y vector sample from second population
#' @param alpha a value that is within the range of 0 and 1
#' @param paired a logical value of either True or False
#'
#'
#' @return List of data containing several outputs
#'
#'
#' @export
#'
#' @examples
#'
#' x=rnorm(20,mean=10,sd=15);y=rnorm(10,mean=8,sd=15);constrr(x,y,0.05,FALSE)
myttest=function(x,y, alpha=double(), paired=logical()){
stopifnot(is.double(alpha)) #checks the data structure for alpha
stopifnot(is.logical(paired)) #checks the data structure for paired
#Checks if the data given is paired if not..
if(paired=="FALSE"){
var_test=var.test(x,y)
if(var_test$p.value>alpha){
ttest=t.test(x,y,paired=FALSE,var.equal=TRUE)
testresult="T-test"
}
else{
ttest=t.test(x,y,paired=FALSE,var.equal=FALSE)
testresult="Welch"
}
}
#Conditional statement for paired data
else{
stopifnot(length(x)==length(y))
ttest=t.test(x,y,paired=TRUE)
testresult="Paired"}
#conditional statement to check for Null Hypothesis if pvalue is greater or less than alpha
if(ttest$p.value<alpha){
NH="Y"
}
else{NH="N"}
columns_ <- rep(c("x","y"), c(length(x),length(y)))
values<- c(x,y)
Table_=data.frame("values"=values, "categories"=columns_)
#List to be printed
llist=list(Test_type=testresult,
"Reject NUll?"=NH,
t_test=ttest,data=Table_,paired=paired, CI=ttest$conf.int)
class(llist) <- "Rttest" #A Class is created and attached
llist
}
|
1111611ddd69dee0a7441cf13ec23d45bf76e9ad
|
2da2406aff1f6318cba7453db555c7ed4d2ea0d3
|
/inst/snippet/pvals02.R
|
4a3238367277c1f5f82543740bf2dda50bf8097c
|
[] |
no_license
|
rpruim/fastR2
|
4efe9742f56fe7fcee0ede1c1ec1203abb312f34
|
d0fe0464ea6a6258b2414e4fcd59166eaf3103f8
|
refs/heads/main
| 2022-05-05T23:24:55.024994
| 2022-03-15T23:06:08
| 2022-03-15T23:06:08
| 3,821,177
| 11
| 8
| null | null | null | null |
UTF-8
|
R
| false
| false
| 215
|
r
|
pvals02.R
|
Pvals.alt <- do(10000) * { t.test(rnorm(25, 1/2, 1)) %>% pval() }
gf_dhistogram(~ p.value, data = Pvals.alt, binwidth = 0.01, center = 0.005)
gf_qq(~ p.value, data = Pvals.alt, distribution = qunif, geom = "line")
|
85357d9e173cd729f537efabace96a5f98594534
|
aef08336a284e153cc381382904e509d5bb3b078
|
/plot4.R
|
7e3e6ef2f64ea05c4adadb7dd57601925101ce5c
|
[] |
no_license
|
sovello/XPDAnalysis
|
761e9bf2ee8003d4254c06cea1bb0caf9399fb57
|
6961a5ff5656bf86501bea4852478a12f9265e87
|
refs/heads/master
| 2021-01-01T06:32:30.683319
| 2014-07-26T13:32:20
| 2014-07-26T13:32:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,196
|
r
|
plot4.R
|
#read in the data
emissions <- readRDS("summarySCC_PM25.rds")
codes <- readRDS("Source_Classification_Code.rds")
#merging the two datasets will return a very large, hard to manage data set in terms of resourses.
#am going to search for the contamination source and return their SCC
coalRelated <- codes[codes$EI.Sector %in% grep("coal", codes$EI.Sector, perl=T, ignore.case=T, value=T),]
#converting so it is easy to make the comparison
coalRelated$SCC <- as.character(coalRelated$SCC)
#now, am going to return all the records from emissions that have their SCC in the subset SCC from coalRelated
coalRelatedRecords <- emissions[emissions$SCC %in% coalRelated$SCC,]
#calculate sums of emissions from coal related over the years
coalEmissions <- aggregate(coalRelatedRecords$Emissions~coalRelatedRecords$year, FUN=sum, data=coalRelatedRecords)
##Prepare a png device where to plot the
png(filename="plot4.png", width=480, height=480, units="px")
#plot using the base system
plot(coalEmissions[,1], coalEmissions[,2],
main="Emissions from Coal Combustion Related Sources",
xlab="Years", ylab="Total Emissions (in tons)",
col="blue", type="s")
##close the png device
dev.off()
|
95a728731bf31ee7786825e87fede70a1e9c27f1
|
0ea92a0e1eace26312972c5d2be22ae49b59c99c
|
/R/wsp/wsp2020/FoundationDataset/geo_summaries/fips_in_minorbasin.R
|
8512c3b511f1060251a0c209b1b92ebc6d662eb0
|
[] |
no_license
|
HARPgroup/vahydro
|
852425ccf9271ebe2ff95fb6b48d8b7eb23e473f
|
73ea02207eee96f574f24db95fad6a2f6a496e69
|
refs/heads/master
| 2023-09-04T00:11:29.732366
| 2023-09-01T18:13:46
| 2023-09-01T18:13:46
| 147,414,383
| 0
| 0
| null | 2023-09-08T13:52:26
| 2018-09-04T20:37:13
|
R
|
UTF-8
|
R
| false
| false
| 12,183
|
r
|
fips_in_minorbasin.R
|
# FIPS IN MINOR BASINS
library(ggplot2)
library(rgeos)
library(ggsn)
library(rgdal) # needed for readOGR()
library(dplyr) # needed for case_when()
library(sf) # needed for st_read()
library(sqldf)
library(kableExtra)
library(viridis) #magma
library(wicket) #wkt_centroid()
library(cowplot) #plot static legend
library(magick) #plot static legend
library(ggrepel) #needed for geom_text_repel()
library(ggmap) #used for get_stamenmap, get_map
######################################################################################################
# LOAD FILES
######################################################################################################
#site <- "https://deq1.bse.vt.edu/d.dh/"
site <- "http://deq2.bse.vt.edu/d.dh/"
basepath <- "/var/www/R/"
source(paste(basepath,"config.local.private",sep = '/'))
localpath <- tempdir()
#DOWNLOAD MINOR BASIN LAYER DIRECT FROM GITHUB
MinorBasins.csv <- read.table(file = 'https://raw.githubusercontent.com/HARPgroup/hydro-tools/master/GIS_LAYERS/MinorBasins.csv', sep = ',', header = TRUE)
#DOWNLOAD FIPS LAYER DIRECT FROM VAHYDRO
fips_filename <- paste("vahydro_usafips_export.csv",sep="")
fips_destfile <- paste(localpath,fips_filename,sep="\\")
download.file(paste(site,"usafips_geom_export",sep=""), destfile = fips_destfile, method = "libcurl")
fips.csv <- read.csv(file=paste(localpath , fips_filename,sep="\\"), header=TRUE, sep=",")
#LOAD MINOR BASIN EXTENT FUNCTION
source(paste(vahydro_location,"R/wsp/wsp2020/FoundationDataset/geo_summaries/mb.extent.R",sep = '/'))
#LOAD VIRGINIA POPULATION PROJECTION FILE
vapop_folder <- "U:/OWS/foundation_datasets/wsp/Population Data/"
vapop <- read.csv(paste0(vapop_folder, "VAPopProjections_Total_2020-2040_final.csv"))
vapop <- sqldf('SELECT FIPS, Geography_Name, round(x2020,0), round(x2030,0), round(x2040,0), round(((X2040 - X2020) / X2020)*100, 2) AS pct_change
FROM vapop')
###############################################################################################
#START FUNCTION ###############################################################################
###############################################################################################
FIPS_in_basins <- function(minorbasin){
# # SELECT MINOR BASIN NAME
# mb_name <-sqldf(paste('SELECT name
# FROM "MinorBasins.csv"
# WHERE code == "',minorbasin,'"',sep=""))
#select minor basin code to know folder to save in
mb_code <- minorbasin
# change all EL minorbasin_code values to ES
if (minorbasin == 'ES') {
mp_all$MinorBasin_Code <- recode(mp_all$MinorBasin_Code, EL = "ES")
mb_name <- "Eastern Shore"
} else {
mb_name <- sqldf(paste('SELECT distinct MinorBasin_Name
From mp_all
WHERE MinorBasin_Code = ','\"',minorbasin,'\"','
',sep=""))
mb_name <- as.character(levels(mb_name$MinorBasin_Name)[mb_name$MinorBasin_Name])
}
#switch around the minor basin names to more human readable labels
mb_name <- case_when(
mb_name == "James Bay" ~ "Lower James",
mb_name == "James Lower" ~ "Middle James",
mb_name == "James Upper" ~ "Upper James",
mb_name == "Potomac Lower" ~ "Lower Potomac",
mb_name == "Potomac Middle" ~ "Middle Potomac",
mb_name == "Potomac Upper" ~ "Upper Potomac",
mb_name == "Rappahannock Lower" ~ "Lower Rappahannock",
mb_name == "Rappahannock Upper" ~ "Upper Rappahannock",
mb_name == "Tennessee Upper" ~ "Upper Tennessee",
mb_name == "York Lower" ~ "Lower York",
mb_name == "Eastern Shore Atlantic" ~ "Eastern Shore",
mb_name == mb_name ~ mb_name) #this last line is the else clause to keep all other names supplied that don't need to be changed
print(paste("PROCESSING: ",mb_name,sep=""))
######################################################################################################
# DETERMINE MAP EXTENT FROM MINOR BASIN CENTROID
extent <- mb.extent(minorbasin,MinorBasins.csv)
######################################################################################################
# BOUNDING BOX
bb=readWKT(paste0("POLYGON((",extent$x[1]," ",extent$y[1],",",extent$x[2]," ",extent$y[1],",",extent$x[2]," ",extent$y[2],",",extent$x[1]," ",extent$y[2],",",extent$x[1]," ",extent$y[1],"))",sep=""))
bbProjected <- SpatialPolygonsDataFrame(bb,data.frame("id"), match.ID = FALSE)
bbProjected@data$id <- rownames(bbProjected@data)
bbPoints <- fortify(bbProjected, region = "id")
bbDF <- merge(bbPoints, bbProjected@data, by = "id")
######################################################################################################
### PROCESS Minor Basin LAYER #######################################################################
######################################################################################################
mb_data <- MinorBasins.csv
MB_df_sql <- paste('SELECT *
FROM mb_data
WHERE code = "',minorbasin,'"'
,sep="")
mb_data <- sqldf(MB_df_sql)
mb_data$id <- as.character(row_number(mb_data$code))
MB.list <- list()
for (z in 1:length(mb_data$code)) {
MB_geom <- readWKT(mb_data$geom[z])
MB_geom_clip <- gIntersection(bb, MB_geom)
MBProjected <- SpatialPolygonsDataFrame(MB_geom_clip, data.frame('id'), match.ID = TRUE)
MBProjected@data$id <- as.character(z)
MB.list[[z]] <- MBProjected
}
MB <- do.call('rbind', MB.list)
MB@data <- merge(MB@data, mb_data, by = 'id')
MB@data <- MB@data[,-c(2:3)]
MB.df <- fortify(MB, region = 'id')
MB.df <- merge(MB.df, MB@data, by = 'id')
######################################################################################################
### PROCESS FIPS CENTROID WITHIN MINOR BASIN #########################################################
######################################################################################################
# #PADDING TO ENSURE FIPS NAMES DONT GO BEYOND PLOT WINDOW
# fips_extent <- data.frame(x = c(extent$x[1]+0.25, extent$x[2]-0.25),
# y = c(extent$y[1]+0.25, extent$y[2]-0.25))
# fips_bb=readWKT(paste0("POLYGON((",fips_extent$x[1]," ",fips_extent$y[1],",",fips_extent$x[2]," ",fips_extent$y[1],",",fips_extent$x[2]," ",fips_extent$y[2],",",fips_extent$x[1]," ",fips_extent$y[2],",",fips_extent$x[1]," ",fips_extent$y[1],"))",sep=""))
fips_layer <- fips.csv
fips_layer$id <- fips_layer$fips_hydroid
fips.list <- list()
for (f in 1:length(fips_layer$fips_hydroid)) {
fips_geom <- readWKT(fips_layer$fips_centroid[f])
fips_geom_clip <- gIntersection(MB_geom, fips_geom) #SHOW ONLY FIPS NAMES WITHIN MINOR BASIN
if (is.null(fips_geom_clip) == TRUE) {
# print("FIPS OUT OF MINOR BASIN EXTENT - SKIPPING")
next
}
fipsProjected <- SpatialPointsDataFrame(fips_geom_clip, data.frame('id'), match.ID = TRUE)
fipsProjected@data$id <- as.character(fips_layer[f,]$id)
fips.list[[f]] <- fipsProjected
}
length(fips.list)
#REMOVE THOSE FIPS THAT WERE SKIPPED ABOVE (OUT OF MINOR BASIN EXTENT)
fips.list <- fips.list[which(!sapply(fips.list, is.null))]
length(fips.list)
if (length(fips.list) != 0) {
# print("NO FIPS GEOMS WITHIN MINOR BASIN EXTENT - SKIPPING")
fips <- do.call('rbind', fips.list)
fips@data <- merge(fips@data, fips_layer, by = 'id')
fips@data <- fips@data[,-c(2:3)]
fips_centroid.df <- data.frame(fips)
} else {
print("NO FIPS GEOMS WITHIN MINOR BASIN EXTENT")
fips_centroid.df <- data.frame(id=c(1,2),
fips_latitude =c(1,2),
fips_longitude =c(1,2),
fips_name = c(1,2),
stringsAsFactors=FALSE)
}
#append minor basin name to fips_centroid.df
fips_centroid.df$mb_name <- mb_name
fips_centroid.df$mb_code <- minorbasin
#print(fips_centroid.df)
######################################################################################################
### PROCESS FIPS BOUNDARY GEOMETRY WITHIN MINOR BASIN ################################################
######################################################################################################
fips_layer <- fips.csv
fips_layer$id <- fips_layer$fips_hydroid
fips.list <- list()
for (f in 1:length(fips_layer$fips_hydroid)) {
fips_geom <- readWKT(fips_layer$fips_geom[f])
fips_geom_clip <- gIntersection(MB_geom, fips_geom) #SHOW ONLY FIPS NAMES WITHIN MINOR BASIN
if (is.null(fips_geom_clip) == TRUE) {
# print("FIPS OUT OF MINOR BASIN EXTENT - SKIPPING")
next
}
fipsProjected <- SpatialPointsDataFrame(fips_geom_clip, data.frame('id'), match.ID = TRUE)
fipsProjected@data$id <- as.character(fips_layer[f,]$id)
fips.list[[f]] <- fipsProjected
}
length(fips.list)
#REMOVE THOSE FIPS THAT WERE SKIPPED ABOVE (OUT OF MINOR BASIN EXTENT)
fips.list <- fips.list[which(!sapply(fips.list, is.null))]
length(fips.list)
if (length(fips.list) != 0) {
# print("NO FIPS GEOMS WITHIN MINOR BASIN EXTENT - SKIPPING")
fips <- do.call('rbind', fips.list)
fips@data <- merge(fips@data, fips_layer, by = 'id')
fips@data <- fips@data[,-c(2:3)]
fips.df <- data.frame(fips)
} else {
print("NO FIPS GEOMS WITHIN MINOR BASIN EXTENT")
fips.df <- data.frame(id=c(1,2),
fips_latitude =c(1,2),
fips_longitude =c(1,2),
fips_name = c(1,2),
stringsAsFactors=FALSE)
}
#append minor basin name to fips.df
fips.df$mb_name <- mb_name
fips.df$mb_code <- minorbasin
fips.df <- sqldf('SELECT a.fips_name, b.pct_change, a.fips_code, a.fips_centroid, a.mb_name, a.mb_code
FROM "fips.df" as a
LEFT OUTER JOIN vapop as b
ON a.fips_code = b.FIPS
WHERE a.fips_code LIKE "51%"')
fips.df <- sqldf('SELECT fips_name as "Localities", pct_change as "20 Year % Change"
FROM "fips.df"
WHERE fips_name != "Bedford City"')
#return(fips.df),
# OUTPUT TABLE IN KABLE FORMAT
localities_tex <- kable(fips.df, booktabs = T,format = "latex", align = c("l","c"),
caption = paste0("Population Trend by Locality in ", mb_name, " Basin"),
label = paste0(minorbasin,"_localities")) %>%
kable_styling(latex_options = "striped")
end_wraptext <- if (nrow(fips.df) < 15) {
nrow(fips.df) + 10
} else if (nrow(fips.df) < 20){
nrow(fips.df) + 4
} else {nrow(fips.df)}
#print(end_wraptext)
#CUSTOM LATEX CHANGES
#change to wraptable environment
localities_tex <- gsub(pattern = "\\begin{table}[t]",
repl = paste0("\\begin{wraptable}[",end_wraptext,"]{r}{5cm}"),
x = localities_tex, fixed = T )
localities_tex <- gsub(pattern = "\\end{table}",
repl = "\\end{wraptable}",
x = localities_tex, fixed = T )
localities_tex <- gsub(pattern = "\\addlinespace",
repl = "",
x = localities_tex, fixed = T )
localities_tex %>%
cat(., file = paste(folder,"tables_maps/Xtables/",minorbasin,"_localities_table.tex",sep=""))
localities_tex
}
################ RUN FIPS IN BASINS FUNCTION ##################################################
# SINGLE BASIN
PU_fips <- FIPS_in_basins(minorbasin = "PU")
# ALL BASINS
basins <- c('PS', 'NR', 'YP', 'TU', 'RL', 'OR', 'EL', 'ES', 'PU', 'RU', 'YM', 'JA', 'MN', 'PM', 'YL', 'BS', 'PL', 'OD', 'JU', 'JB', 'JL')
all_basins <- list()
for (b in basins) {
basin_b <- FIPS_in_basins(minorbasin = b)
all_basins <- rbind(all_basins,basin_b)
}
write.csv(all_basins, file = "U:\\OWS\\foundation_datasets\\wsp\\wsp2020\\tables_maps\\Xtables\\VA_fips_in_minorbasins.csv" , row.names = F)
|
fa8946b6270179c76bc837584604d0363ad3fcbd
|
286259106ed5e6498ffe20b68203cc1da77dee33
|
/ui.R
|
3f63b80c4fde9810ce45caa5f1c7d0c29812d775
|
[] |
no_license
|
jaguidini/terrorismo
|
cc4bf81c940e2268991a758df5d40f39a13a2524
|
92a15bbf72641e241c1a7f911b379fcc33f30565
|
refs/heads/master
| 2020-04-08T20:38:36.164495
| 2018-11-30T08:29:34
| 2018-11-30T08:29:34
| 159,708,239
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,261
|
r
|
ui.R
|
# Gera lista de anos
anos <- unique(dataset$ano)
ano_min <- min(anos)
ano_max <- max(anos)
# Gera lista de países
paises <- dataset %>% group_by(pais) %>% summarise(atentados = n()) %>% filter(atentados > 50)
paises <- paises[order(paises$pais), ] %>% select(pais)
dashboardPage(
dashboardHeader(title = 'Global Terrorism'),
dashboardSidebar(
sidebarMenu(
# Home
menuItem('Home', tabName = 'tabHome', icon = icon('home')),
# Introducao
menuItem('Introdução', tabName = 'tabIntroducao', icon = icon('comments-o')),
# Database
menuItem('Base de dados', tabName = 'tabBasedados', icon = icon('database')),
# Atentados
menuItem(text = 'Eventos',
tabName = 'atentados',
icon = icon('fire'),
# Ao longo dos anos
menuSubItem(text= 'Ao longo dos anos',
tabName = 'atentados_anos',
icon = icon('line-chart')),
# Tipo de ataque
menuSubItem(text = 'Tipo de ataque',
tabName = 'tipo_ataque',
icon = icon('bar-chart'))),
# Grupos Terroristas
menuItem(text = 'Grupos Terroristas',
tabName = 'grupos',
icon = icon('fire'),
# Mais atuantes
menuSubItem(text = 'Mais atuantes',
tabName = 'grupos_mais_atuantes',
icon = icon('bar-chart'))),
# Mapa
menuItem(text = 'Mapa',
tabName = 'mapa',
icon = icon('map-marker'))
)
),
dashboardBody(
tabItems(
# Home
tabItem(
tabName = 'tabHome',
fluidRow(
column(12, includeHTML("html/home.html"))
)
),
# Introducao
tabItem(
tabName = 'tabIntroducao',
fluidRow(
box(
width = 12,
column(12, includeHTML("html/introducao.html")),
plotOutput('correlacao', height = 500)
)
)
),
# Base de dados
tabItem(
tabName = 'tabBasedados',
fluidRow(
column(12, includeHTML("html/base.html"))
)
),
# Atentados ao longo dos anos
tabItem(
tabName = 'atentados_anos',
fluidRow(
column(
width = 4,
box(
width = 12,
h2('Filtros'),
sliderInput('interval', 'Intervalo:', ano_min, ano_max, c(dataset), step = 1, dragRange = TRUE),
selectInput('countries', 'País', paises, selected = 'United States', multiple = TRUE, selectize = TRUE)
)
),
column(
width = 8,
fluidRow(
box(
title = 'Eventos',
width = 12,
plotlyOutput('atentados_por_ano')
),
box(
title = 'Eventos',
width = 12,
plotlyOutput('atentados_por_pais')
)
),
fluidRow(
box(
title = 'Vítimas',
width = 12,
plotlyOutput('mortos_feridos_por_ano')
),
box(
title = 'Efetividade',
width = 12,
plotlyOutput('atentados_sucesso_falha')
)
)
)
)
),
# Tipo de ataque
tabItem(
tabName = 'tipo_ataque',
fluidRow(
column(
width = 4,
box(
width = 12,
h2('Filtros'),
sliderInput('interval2', 'Intervalo:', ano_min, ano_max, c(dataset), step = 1, dragRange = TRUE),
selectInput('countries2', 'País', paises, selected = 'United States', multiple = TRUE, selectize = TRUE)
)
),
column(
width = 8,
fluidRow(
box(
title = 'Eventos por tipo',
width = 12,
plotlyOutput('atentados_por_tipo_ataque')
)
)
)
)
),
# Grupos mais atuantes
tabItem(
tabName = 'grupos_mais_atuantes',
fluidRow(
column(
width = 4,
box(
width = 12,
h2('Filtros'),
sliderInput('interval3', 'Intervalo:', ano_min, ano_max, c(dataset), step = 1, dragRange = TRUE),
selectInput('countries3', 'País', paises, selected = 'United States', multiple = TRUE, selectize = TRUE)
)
),
column(
width = 8,
fluidRow(
box(
title = 'Grupos mais atuantes',
width = 12,
plotlyOutput('grupos_mais_atuantes')
),
box(
title = 'Atividades por grupo ao longo dos anos',
width = 12,
plotlyOutput('atividades_grupos_mais_atuantes')
)
)
)
)
),
tabItem(
tabName = 'mapa',
column(
width = 4,
box(
width = 12,
h2('Filtros'),
sliderInput('interval4', 'Intervalo:', ano_min, ano_max, c(dataset), step = 1, dragRange = TRUE),
selectInput('countries4', 'País', paises, selected = 'United States', multiple = TRUE, selectize = TRUE)
)
),
column(
width = 8,
fluidRow(
box(
title = 'Mapa',
width = 12,
leafletOutput("mapa")
)
)
)
)
)
)
)
|
89beee15baefd05d8bcd83a8a67e0e3b905ce888
|
263b85c11e28dbb0c40f618993b2da0f1d36aebe
|
/Covid.R
|
aeb9d1f55e8bcec9c34aba2eb712b9bbe4afa3b9
|
[] |
no_license
|
lucamlouzada/COVID_Brazil_Heatmap
|
a05ff7b3dcf45e39b85c3368a523037f42d06a1e
|
a4b162ba86f3c867a655fee4861107df3adc40d0
|
refs/heads/main
| 2023-01-19T07:35:35.744124
| 2020-11-27T12:59:27
| 2020-11-27T12:59:27
| 316,483,837
| 0
| 0
| null | null | null | null |
ISO-8859-1
|
R
| false
| false
| 2,523
|
r
|
Covid.R
|
library(RColorBrewer)
library(stringr)
library(tidyr)
library(lubridate)
library(dplyr)
library(ggplot2)
#Para os nomes dos meses aparecerem em português
Sys.setlocale("LC_TIME", "Portuguese")
#Preparando o ambiente e importando o dataset.
#Baixe os dados daqui: https://opendatasus.saude.gov.br/dataset/bd-srag-2020
rm(list=ls())
setwd("C:/Users/lucam/Downloads")
data=read.csv("INFLUD-23-11-2020.csv",sep=";")
#Caso queira filtrar por estado inclua a linha a seguir
#data=data[data$SG_UF_NOT=="PR",]
#Preparando o df
df=data[,c("DT_NOTIFIC","NU_IDADE_N","CLASSI_FIN")]
df=na.omit(df)
df=rename(df,Date=1,Age=2,Class=3)
#Filtra para classificação = 5, em que 5 é COVID 19 confirmado.
#4 é SRAG não identificada; se quiser incluir também, troque 5 na linha abaixo por 4|5
df=df[df$Class==5,]
df=df[,c(1,2)]
#Remover dados que pareçam ter erro no input de idade
df=df[df$Age>0,]
df=df[df$Age<98,]
#Agrupa as idades
df=df %>% mutate(cuts = cut(Age, c(0,10,20,30,40,50,60,70,80, Inf))) %>%
group_by(cuts,Date) %>%
summarize(n=n()) %>%
rename(Age=1,Date=2,Count=3)
levels(df$Age)= c("Menos de 10 anos", "Entre 10 e 19 anos","Entre 20 e 29 anos","Entre 30 e 39 anos","Entre 40 e 49 anos","Entre 50 e 59 anos","Entre 60 e 69 anos","Entre 70 e 79 anos","80 anos ou mais")
#Formata as datas
dates=str_split_fixed(df$Date,"/",n=3) %>%
data.frame() %>%
rename(day=1,mon=2,year=3)
df = paste(dates$year, dates$mon, dates$day, sep="-") %>% ymd() %>% as.Date() %>%
data.frame(df$Age,df$Count) %>%
rename(Date=1,Age=2,Count=3)
df=df[df$Date>as.Date("2020-04-01",format="%Y-%m-%d"),]
#Agrupa as datas em intervalos semanais
df=df %>% mutate(cuts = cut(Date,"7 days")) %>%
group_by(cuts,Age,Count) %>%
summarize(n=n()) %>% select(c(1,2,3)) %>%
rename(Date=1,Age=2,Count=3)
df$Date = format(strptime(as.character(df$Date),"%Y-%m-%d"),"%Y/%B/%d")
df$Date = as.Date(df$Date,format="%Y/%B/%d")
#Plota o gráfico
ggplot(df, aes(Date, Age, fill= Count)) +
geom_tile(color="white")+ scale_fill_distiller(palette = "RdYlGn",direction=-1,name="Casos")+theme(plot.title=element_text(size=16,face="bold"),legend.title = element_text(size=10),plot.caption = element_text(hjust = 0.25,size=7))+ylab(NULL)+xlab("Mês")+
labs(title="Casos confirmados de COVID-19 no Brasil por idade*",caption = "Fonte: SVS SRAG OpenDATASUS. * Inclui apenas os casos registrados no sistema, cerca de 10% do total")+coord_fixed(ratio=10)
|
1d6d91c4057740cb31739589839c3f9199119308
|
0abba1e635adf5e9e38694321593c64e27c0f0f3
|
/R/write.pedfile.R
|
6c36629f791c2b0ac0a6a24ba2aef82ece7b93d6
|
[] |
no_license
|
cran/rJPSGCS
|
0a7e1a5a7b5ad903455fc81cca8a3496a61628ef
|
cd02315593d2ca2efa50ba2ff8b49823e2efff28
|
refs/heads/master
| 2021-01-23T19:46:19.227254
| 2018-05-13T15:12:41
| 2018-05-13T15:12:41
| 17,698,943
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,797
|
r
|
write.pedfile.R
|
write.pedfile <-
function(pedinf, snp.data, file, transpose=FALSE, sep=" ", eol="\n", na="0") {
# Error checking
if (!is(snp.data, "snp.matrix")) {
stop("argument snp.data must be a snp.matrix object")
}
if(length(pedinf)==1 && pedinf=="unrelated") {
pedinf<-make.unrel.pedinf(nrow(snp.data))
}
if(is.data.frame(pedinf)) {
pedinf <- as.matrix(pedinf)
}
if(nrow(pedinf) != nrow(snp.data)) {
stop("pedinf and snp.data must have the same number of rows")
}
for(i in 5:dim(pedinf)[2]){
if (!(all(pedinf[,i] %in% c(0,1,2)))) {
stop("pedinf columns 5 onward must be numeric with values 0, 1 or 2")
}
}
# Done error checking, now write appropriate file
if(transpose) {
res <- .C(write_as_transposed_pedfile, as.character(file),
as.integer(pedinf), snp.data@.Data, as.integer(nrow(snp.data)), as.integer(ncol(snp.data)),
as.integer(ncol(pedinf)), as.character(sep), as.character(eol),
as.character(na), logical(1))
} else {
res <- .C(write_as_pedfile, as.character(file),
as.character(pedinf), snp.data@.Data, as.integer(nrow(snp.data)), as.integer(ncol(snp.data)),
as.integer(ncol(pedinf)), as.character(sep), as.character(eol),
as.character(na), logical(1))
}
error <- res[[9]]
if (error==1)
stop("Couldn't open output file")
else
c(nrow(snp.data), ncol(snp.data))
}
# function that creates post-MAKEPED pedfile from SNP data on
# unrelated individuals
make.unrel.pedinf=function(n){
# adapted from pedfunc function in CrypticIBDcheck
#snobj: a snp.matrix object
#file: name of the ped file to be generated
ones <- rep(1,n)
zeros <- rep(0,n)
ids <- (1:n)
pedinf <- cbind(ids,ids,zeros,zeros,zeros,zeros,zeros,ones,ones)
return(pedinf)
}
|
306e1df9e01fe923bda9f13271fa0f82916355c2
|
545a979b8e667b1725657ba2a00c898472a670f9
|
/R/Session.R
|
6b8921e782ea5c37798ec2837fe6d8173603ed85
|
[] |
no_license
|
Promorphics/appstax-rsdk
|
c95e121472ef999ab8d71c9128130fbed60c6c26
|
eef10e854af82ae119879a5efcce02f4c90c4e48
|
refs/heads/master
| 2016-09-05T09:06:40.011601
| 2015-02-24T21:50:04
| 2015-02-24T21:50:04
| 25,992,318
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,822
|
r
|
Session.R
|
.getCode = function(h) {
vals <- strsplit(h$value(), "\r\n")
res <- "200"
check <- list("HTTP/1.1")[[1]]
for (i in 2:length(vals[[1]])) {
val <- vals[[1]][i]
tokens <-strsplit(val, " ")[[1]]
if ("HTTP/1.1" %in% tokens[1]) {
res <- tokens[2]
}
}
res
}
#' The base request object for making calls to TSAP's API
.Session <- setRefClass("Session",
fields=list(
protocol = "character",
host = "character",
port = "character",
context = "character",
curl_session = "character",
username = "character",
password = "character"
),
methods = list(
doGET = function(endpoint, params=list()) {
url <- .buildUrl(.self, endpoint, params)
h <- basicTextGatherer()
cat(url)
res <- tryCatch(
{
getURL(url, httpHeader = c(Accept = "application/json"), headerfunction = h$update) #, curl=.self$curl_session
},
error = function(e) {
cat(e)
})
if (nchar(res) > 1) {
response <- fromJSON(res)
}
else {
response = InvalidQueryError(code = .getCode(h), message = res)
}
response
}
)
)
#' Build a URL from an existing request object and an endpoint
.buildUrl <- function(req, endpoint, params = list()) {
base <- paste(req$protocol, "://", req$username, ":", req$password, "@", req$host, ":", req$port, sep="")
if (!is.null(req$context)) {
base <- paste(base, req$context, sep="")
}
result <- paste(base, endpoint, sep="")
if (length(params) > 0) {
qs <- "?"
s <- ""
keys <- attributes(params)$names
for (i in 1 : length(keys)) {
key <- keys[i]
qs <- paste(qs, s, curlEscape(keys[i]), "=", curlEscape(params[[key]]), sep="")
s <- "&"
}
result <- paste(result, qs, sep = "")
}
result
}
#' Constructor for a new SDK object
#'
#' @export
SDK <- function(username, password, host=character(), port=integer(), secure=T) {
if (secure) {
protocol = "https"
default_port = "443"
}
else {
protocol = "http"
default_port = "80"
}
if (length(port) == 0) {
port = default_port
}
if (length(host) == 0) {
host = "surfcity.timeseriesgroup.com"
}
req <- .Session(protocol = protocol, host = host, port= paste(port,"", sep=""), username = username, password = password, curl_session = "TSAP", context = "/rest")
req
}
|
931d5209f4744dc01f113666a9c87ba14bd039c7
|
bee32e384df3fa7dc62cc65b21f3ea14b6772c53
|
/man/dat.sc.Rd
|
94b799c228bc185682280915e3eca8402915c43b
|
[] |
no_license
|
cran/fragility
|
29e47294483c1dfe79d4838097cdd778020747e5
|
b623c1726d397388d9bb175d29be9bbbab429e2a
|
refs/heads/master
| 2022-09-09T04:23:55.418462
| 2022-08-29T20:10:06
| 2022-08-29T20:10:06
| 254,030,469
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,411
|
rd
|
dat.sc.Rd
|
\name{dat.sc}
\alias{dat.sc}
\docType{data}
\title{
Network Meta-Analysis of Smoking Cessation
}
\description{
An illustrative example of network meta-analysis with a binary outcome.
}
\usage{data("dat.sc")}
\format{
A data frame containing 24 studies and 4 treatments, with the following 4 variables.
\describe{
\item{\code{sid}}{a numeric vector indicating study IDs.}
\item{\code{tid}}{a numeric vector indicating treatment IDs.}
\item{\code{e}}{a numeric vector indicating event counts.}
\item{\code{n}}{a numeric vector indicating sample sizes.}
}
}
\details{
The dataset was originally reported by Hasselblad (1998) and reanalyzed by Lu and Ades (2006) using formal network meta-analysis methods. It investigates the effects of four treatments on smoking cessation, including 1) no contact; 2) self-help; 3) individual counseling; and 4) group counseling. The outcome is binary, which is the smoking cessation status (yes or no) of a participant after treatment.
}
\source{
Hasselblad V (1998). "Meta-analysis of multitreatment studies." \emph{Medical Decision Making}, \bold{18}(1), 37--43. <\doi{10.1177/0272989X9801800110}>
Lu G, Ades AE (2006). "Assessing evidence inconsistency in mixed treatment comparisons." \emph{Journal of the American Statistical Association}, \bold{101}(474), 447--59. <\doi{10.1198/016214505000001302}>
}
\keyword{datasets}
|
8cf9239217719d037a998c369eb8c64d168dfc19
|
d9d00a88f700653b2c4af2b1845f82dd970d0915
|
/get_dem.R
|
9f78ec225c0e2d99c8941f0a2056c9b237b7ddc2
|
[] |
no_license
|
Mayeda-Research-Group/CancerAD-diagnosissims
|
f2c631255261634aa25cec11e09b9cde0e5ad475
|
cd6c5e5b14d096f96cadd1f3d88c87b47af79dd8
|
refs/heads/main
| 2023-03-17T04:04:05.758211
| 2023-03-09T21:14:41
| 2023-03-09T21:14:41
| 326,770,344
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,454
|
r
|
get_dem.R
|
#This script contains all of the functions for dementia incidence rates
# test<-rationalfit(AD_in_formod$Age[2:7], AD_in_formod$Rate_all[2:7], d1=4,d2=5)
# p1 <- test$p1; p2 <- test$p2
# ys <- polyval(p1,seq(0,40,1/12)) / polyval(p2,seq(0,40,1/12))
# plot(AD_in_formod$Age[2:7], AD_in_formod$Rate_all[2:7], type="l", col = "blue", xlim=c(0,40), ylim=c(0,0.3))
# points(seq(0,40,1/12), Re(ys), col="red")
# points(AD_in_formod$Age, AD_in_formod$Rate_all, col="blue", pch=15)
#Define reference dementia incidence rate as a function of time using ACT data
get.dem <- function(time, S1.dem.rateratio, Sprev, S1.mort.rateratio,
calibration_data, Dem_inc){
demratecorrection <-
if (S1.dem.rateratio == 1 | Sprev == 0) {as.matrix(rep(1, 6))
} else{
calibration_data[, paste0("demrate.adj.S_IRR", S1.dem.rateratio,
"_prev", Sprev, "_mortRR", S1.mort.rateratio)]
}
#I had to fill in NAs to put this in a dataframe so now get rid of them
demratecorrection <- demratecorrection[!is.na(demratecorrection)]
max_index = length(demratecorrection)
if(time %% 5 == 0){
index = time/5 + 1
} else{
index = ceiling(time/5)
}
if(index > max_index){index = max_index}
AD_rate<- polyval(Dem_inc$p1,time) / polyval(Dem_inc$p2,time)
return(Re(AD_rate)*demratecorrection[index])
}
#------------------------------------------------------------------
# Defining dementia incidence rates based on time (i.e. age)
#------------------------------------------------------------------
#For those with survival characteristic, reference rate * protective IRR
get.dem.S1 <- function(time, S1.dem.rateratio, Sprev, S1.mort.rateratio,
calibration_data, Dem_inc){
return(get.dem(time, S1.dem.rateratio, Sprev, S1.mort.rateratio,
calibration_data, Dem_inc)*S1.dem.rateratio)}
#For those without survival characteristic, dementia incidence rate is reference rate
get.dem.S0 <- function(time, S1.dem.rateratio, Sprev, S1.mort.rateratio,
calibration_data, Dem_inc){
return(get.dem(time, S1.dem.rateratio, Sprev, S1.mort.rateratio,
calibration_data, Dem_inc))}
# #Unit test
# get.dem(40, 1,0,1,calibration_data,Dem_inc)
# get.dem.S1(6.5, 0.7,0,1,calibration_data,Dem_inc)
# get.dem.S0(6.5, 0.7,0,1,calibration_data,Dem_inc)
|
27695ab273c4747271144a8557199c00af940f03
|
5c511a121d460a6f2130b2cb269ab060b24b257f
|
/Part2/Session1/Script01.R
|
d6a34958bf118c1b704238f9028bd240bcc4b4d6
|
[] |
no_license
|
ricardgardella/BSG_Code
|
0cdd8471d6e66de725f94ce518edd00909e720fd
|
dfb194676bcff01ed471c70f7d808f105ebec562
|
refs/heads/master
| 2020-06-26T04:19:21.734945
| 2019-07-29T21:19:55
| 2019-07-29T21:19:55
| 199,527,419
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,752
|
r
|
Script01.R
|
#
# Exercise descriptive analysis of genetic markers.
#
#
# First part: SNP Data
#
filename <- url("http://www-eio.upc.es/~jan/Data/bsg/Chromosome1_CHBPopSubset.rda")
load(filename)
#install.packages("genetics")
library(genetics)
dim(Ysub)
class(Ysub)
Ysub[1:5,1:5]
n <- nrow(Ysub)
p <- ncol(Ysub)
n
p
Ysub[Ysub=="NN"] <- NA
perc.mis <- 100*sum(is.na(Ysub))/(n*p)
perc.mis
SNP1 <- Ysub[,1]
SNP1.g <- genotype(SNP1,sep="")
summary(SNP1.g)
SNP2 <- Ysub[,2]
SNP2.g <- genotype(SNP2,sep="")
summary(SNP2.g)
SNP3 <- Ysub[,3]
SNP3.g <- genotype(SNP3,sep="")
summary(SNP3.g)
nmis <- function(x) {
y <- sum(is.na(x))
return(y)
}
nmis.per.ind <- apply(Ysub,1,nmis)
pmis.per.ind <- 100*nmis.per.ind/p
plot(1:n,pmis.per.ind,xlab="Individual",ylab="Perc. Missing")
nmis.per.snp <- apply(Ysub,2,nmis)
pmis.per.snp <- 100*nmis.per.snp/n
plot(1:p,pmis.per.snp,xlab="SNP",ylab="Perc. Missing")
sum(nmis.per.snp==n)
x <- table(SNP3)
x
sum(x)
sum(x,na.rm=TRUE)
n
pC <- (2*x[1]+x[2])/(2*sum(x,na.rm=TRUE))
pT <- (2*x[3]+x[2])/(2*sum(x,na.rm=TRUE))
pC
pT
pC+pT
summary(SNP3.g)
Y2 <- Ysub[,nmis.per.snp < n]
affirst <- function(x){
x <- genotype(x,sep="")
out <- summary(x)
af1 <- out$allele.freq[1,2]
return(af1)
}
affirst(Ysub[,1])
af.first.allele <- apply(Y2,2,affirst)
hist(af.first.allele)
maf <- function(x){
x <- genotype(x,sep="")
out <- summary(x)
af1 <- min(out$allele.freq[,2],na.rm=TRUE)
af1[af1==1] <- 0
return(af1)
}
maf.per.snp <- apply(Y2,2,maf)
hist(maf.per.snp)
#
# Second part: STR data
#
rm(list=ls())
filename <- url("http://www-eio.upc.es/~jan/Data/bsg/JapanaseSTRs.rda")
load(filename)
ls()
Japanese[1:5,1:10]
X <- Japanese[,6:ncol(Japanese)]
n <- nrow(X)/2
p <- ncol(X)
n
p
sum(X==-9)
X[X==-9] <- NA
sum(is.na(X))
class(X)
STR1 <- X[,1]
table(STR1,useNA="always")
length(unique(STR1))
n.alleles <- function(x) {
y <- length(unique(x[!is.na(x)]))
return(y)
}
n.alleles(STR1) # number of alleles
table(STR1) # allele counts
STR1 <- STR1[!is.na(STR1)]
na <- length(STR1)
na
index.1 <- seq(1,na,2)
index.2 <- seq(2,na,2)
allele.1 <- STR1[index.1]
allele.2 <- STR1[index.2]
allele.1 <- pmin(allele.1,allele.2)
allele.2 <- pmax(allele.1,allele.2)
allele.1
allele.2
individuals <- paste(allele.1,allele.2,sep="/")
g.counts <- table(individuals) # genotype counts
g.counts
unique(names(g.counts))
names(g.counts)
sum(g.counts)
length(g.counts) # number of genotypes
K <- n.alleles(STR1) # number of alleles
K
n.genotypes <- 0.5*K*(K+1)
n.genotypes
n.alleles.per.STR <- apply(X,2,n.alleles)
barplot(table(n.alleles.per.STR))
|
f8793c2b34f1307d0e9caf4f1a78b2324b1db9ca
|
defbef9b615b0adc30878fe31581e2c620ef1360
|
/run_analysis.R
|
1639e24dbea60df77d2269bf6f931c1ebef777fd
|
[] |
no_license
|
n-za/GetAndCleanData
|
0e4bcf145eadb9baa1d31bfb32f2979d727993d5
|
d8e4278b3ebaeccf7ad6c956515fca2b8be44f22
|
refs/heads/master
| 2021-01-20T09:01:57.801964
| 2014-04-26T08:37:31
| 2014-04-26T08:37:31
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,642
|
r
|
run_analysis.R
|
## Emits a warning when the file does not exist
## return 1 in the error case and 0 otherwise
CheckFile <- function(filename) {
if (!file.exists(filename)) {
warning(sprintf("Directory or File %s does not exist.", filename))
1
} else {
0
}
}
## Checks wether the current directory contains all the subdirectories and file neeeded.
## returns the number of missing objects (file or dir)
## at the exit the current directory is the same at the entry
SanityCheck <- function() {
ret <- 0 # number of missing files
ret <- ret + sum(sapply(c("features_info.txt", "features.txt", "activity_labels.txt", "test", "train"),
CheckFile))
setwd("test")
ret <- ret + sum(sapply(c("X_test.txt", "subject_test.txt", "y_test.txt"),
CheckFile))
setwd("..")
setwd("train")
ret <- ret + sum(sapply(c("X_train.txt", "subject_train.txt", "y_train.txt"),
CheckFile))
setwd("..")
ret
}
## ReadOnePart load the contents of the directory defined by suffix (either test or train)
## actlabel is a data.frame containing the labels for the activities
## actlabel[,1] is the level of the factor
## acrlabel[,2] is the label of the factor
## returns a data.frame whose 1st column is the dir name, the 2nd the activity,
## the sequel contains all the remaining 561 variables
ReadOnePart <- function(suffix, actlabel) {
setwd(suffix)
df <- read.table(sprintf("X_%s.txt", suffix), colClasses = c("numeric"))
subject <- read.table(sprintf("subject_%s.txt", suffix), colClasses = c("factor"))
y <- read.table(sprintf("y_%s.txt", suffix), colClasses = c("factor"))
y <- factor(y[,1], levels = sapply(actlabel[,1], as.character), labels = actlabel[,2])
setwd("..")
cbind(suffix, y, subject, df)
}
## ExtractOnePart select among the 561 variables those with a variable name containing the regexp pattern
## the labels of the variables are defined in features (1st column feature index, 2nd column feature label)
## common contains the data from test and train
ExtractOnePart <- function(features, pattern, common) {
# retrieve the indices fullfilling the pattern
rows <- grep(pattern, features[,2])
# subset these variables
# caveat: the three first columns are dirname, activity, and subject, hence the offset 3.
df <- common[,features[rows,1] + 3]
# give the
names(df) <- features[rows,2]
df
}
## MergeTrainTest read the data in the train and test directory
## actlabel is the data.frame with the activity labels
MergeTrainTest <- function(actlabel) {
test <- ReadOnePart("test", actlabel)
train <- ReadOnePart("train", actlabel)
rbind(test, train)
}
## GetFactor retrieves the pos-th component of an interaction with sep="."
GetFactor <- function(inter, pos) {
unlist(strsplit(inter, ".", fixed=T))[pos]
}
## Main procedure
## datadir is the directory containing the unzipped data
## tidyfile is the tidy data set of question 5 as a csv file with header
run_analysis <- function(datadir = ".", tidyfile = "final.csv") {
saveddir <- getwd() # save the current directory
# switch to the data directory
setwd(datadir)
# check wether the data needed are there
stopifnot(SanityCheck() == 0)
# read the activity labels
actlabel <- read.table("activity_labels.txt")
print("1. Merging the training and the test sets to create one data set.")
common <- MergeTrainTest(actlabel)
print("Done.")
print("2. Extracting only the measurements on the mean and standard deviation for each measurement.")
# read the feature labels
features <- read.table("features.txt")
# read the variables containing mean() in their names
m <- ExtractOnePart(features, "mean\\(\\)", common)
# read the variables containing std() in their names
s <- ExtractOnePart(features, "std\\(\\)", common)
# concatenate columnwise the two data.frames
Extract <- cbind(m,s)
print("Done.")
print("3. Using descriptive activity names to name the activities in the data set")
print("solution: use the labels in activities_label.txt")
print("Done along with the first step.")
print("4. Appropriately labeling the data set with descriptive activity names.")
print("solution: define the activities as labels of a factor")
print("labels for activities in the data set are:")
print(unique(common[,2]))
print("Done.")
print("5. Creating a second, independent tidy data set with the average of each variable for each activity and each subject.")
# retrieve the grouping columns as a data.frame of factors
Groups <- common[,c(2,3)]
# split the Extract data.frame into a list of chunks,
# each list item corresponds to a factor combinaison
sp <- split(Extract, Groups)
# compute the means for each factor combinaison columnwise.
tmp <- sapply(sp, function(x) colMeans(x, na.rm = T))
# reatrieve the labels of the factor combinaison
fac <- dimnames(tmp)[2][[1]]
# Extract the Activity component of the factor combinaison (interaction)
Activities <- sapply(fac, GetFactor, 1)
# Extract the Subject component of the factor combinaison
Subjects <- sapply(fac, GetFactor, 2)
# Add the activities and the Subjects as the first two column of the tidy data.frame
# we want Activities Subjects and the mean/std as rows and not as columns: hence the transpose t
Averages <- as.data.frame(t(rbind(Activities, Subjects, tmp)))
# output the data.frame as csv file with header into the file named tidyfile
write.csv(Averages, tidyfile, row.names = FALSE)
print(sprintf("*** The result is written in %s.", tidyfile))
print("Done.")
setwd(saveddir)
}
run_analysis()
|
d6d5d164890044b1811ff60d738ceda313d66a90
|
bfffb82378fa9814e656f87df8532a30b25f68d1
|
/operatorDecider.R
|
4e90dd495f93063b278686f5ba855674cded1251
|
[] |
no_license
|
burgus7/operator-decider
|
9723c7c4698ef4303406cf41dc0d7e5c98284d16
|
4da2b8aa622a08af783881492e18a460b930ee73
|
refs/heads/master
| 2020-04-14T08:37:31.899375
| 2019-01-03T00:37:30
| 2019-01-03T00:37:30
| 163,739,509
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,564
|
r
|
operatorDecider.R
|
rm(list = ls())
#variables
num <- c(14, 2, 7, 3, 9)
ans <- 10
temp <- vector(mode = "numeric", length = (length(num)-1)*4+1)
base <- temp
result <- ans + 1
yay <- temp
## initialize temp
m <- 4
while (m < length(base)) {
base[m] <- 1
base[m + 1] <- 1
m <- m + 4
}
temp <- base
temp[1] <- num[1]
## iterate through num
for (a in 1:4) {
temp[a+1] <- num[2]
for (b in 1:4) {
temp[b+5] <- num[3]
for (c in 1:4) {
temp[c+9] <- num[4]
for (d in 1:4) {
temp[d+13] <- num[5]
# print(temp)
## calculate result
k <- 2
result <- num[1]
while (k < (length(temp) - 2)) {
result <- (result + temp[k] - temp[k + 1]) * temp[k + 2] / temp[k + 3]
k <- k + 4
}
# print(result)
if (result == ans) {
# print ("success!")
yay <- temp
}
temp[d+13] <- base[d + 13]
}
temp[c + 9] <- base[c + 9]
}
temp[b+5] <- base[b + 5]
}
temp[a+1] <- base[a + 1]
}
#print yay
final <- c("Answer:", num[1])
print (yay)
for (i in 2:length(yay)){
for (j in 2:length(num)) {
if(yay[i]==num[j]) {
if(i %% 4 == 2){
final <- c(final, "+", num[j])
} else if (i %% 4 == 3) {
final <- c(final, "-", num[j])
} else if (i %% 4 == 0) {
final <- c(final, "*", num[j])
} else if (i %%4 == 1) {
final <- c(final, "/", num[j])
}
}
}
}
paste(final, sep = " ")
|
0eb0b2ae654c382cc9d40f8258377c608b879c12
|
73129c7bf5a84f9a841f11de695f5fabea3d8073
|
/man/dist2motif2.Rd
|
dfcc0a6ac0df3d6aa2c06ccb1c34f79a842949b3
|
[
"MIT"
] |
permissive
|
nriddiford/svBreaks
|
34af47eca66f686be858480791a3160c5053ef4d
|
5e128468a41e4c206a225e0d81f2752bb6225f35
|
refs/heads/master
| 2021-12-29T11:22:06.796312
| 2021-12-19T14:51:55
| 2021-12-19T14:51:55
| 102,706,907
| 3
| 1
| null | 2019-03-18T11:11:17
| 2017-09-07T07:41:14
|
R
|
UTF-8
|
R
| false
| true
| 597
|
rd
|
dist2motif2.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dist2motif2.R
\name{dist2motif2}
\alias{dist2motif2}
\title{dist2Motif2
Calculate the distance from each breakpoint to closest motif in a directory of files}
\usage{
dist2motif2(
...,
df,
breakpoints,
feature_file,
featureDir = system.file("extdata", "features", package = "svBreaks"),
chroms = c("2L", "2R", "3L", "3R", "4", "X", "Y"),
sim = FALSE,
position = "centre"
)
}
\description{
dist2Motif2
Calculate the distance from each breakpoint to closest motif in a directory of files
}
\keyword{motif}
|
3982b37b016aaa41a67bad316dd6e9528e4b9bdf
|
120ae24d48e4a702eadd34ea2d0700b469368f27
|
/man/gtable_spacer.Rd
|
0896a4d5fb2e26145b5367fa0cc2099b4c9d484e
|
[] |
no_license
|
baptiste/gtable
|
3341dbe506e32f5b10f5ef2d492d4dfb73dc8c09
|
bcbcccfef7c910eb332efa4d80ad62821c61ba91
|
refs/heads/master
| 2020-06-12T08:28:18.264098
| 2019-06-26T07:57:33
| 2019-06-26T07:57:33
| 20,383,206
| 9
| 3
| null | null | null | null |
UTF-8
|
R
| false
| true
| 689
|
rd
|
gtable_spacer.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/gtable-layouts.r
\name{gtable_spacer}
\alias{gtable_spacer}
\alias{gtable_row_spacer}
\alias{gtable_col_spacer}
\title{Create a row/col spacer gtable.}
\usage{
gtable_row_spacer(widths)
gtable_col_spacer(heights)
}
\arguments{
\item{widths}{unit vector of widths}
\item{heights}{unit vector of heights}
}
\value{
A gtable object
}
\description{
Create a zero-column or zero-row gtable with the given heights or widths
respectively.
}
\seealso{
Other gtable construction: \code{\link{gtable_col}},
\code{\link{gtable_matrix}}, \code{\link{gtable_row}},
\code{\link{gtable}}
}
\concept{gtable construction}
|
9e14d7c908ea5905a6578b343a7b4f3bd7ad3a99
|
f5428950323f84deb2339fff37cf3671b32ac8cd
|
/R/pride.R
|
259867312d4033a390dae65c61c02d9a801ffa9b
|
[] |
no_license
|
Chebuu/foa
|
a867d13e3f15f731bb87a7f53f3dd8a8adf6294f
|
2d4ff80cdab88d4fda4514d21828ec177ee63fbe
|
refs/heads/master
| 2023-03-03T10:05:25.349753
| 2021-02-17T15:12:19
| 2021-02-17T15:12:19
| 339,762,324
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 97
|
r
|
pride.R
|
# http://www.ebi.ac.uk/pride/archive/projects/PXD000764
# http://iomics.ugent.be/tabloidproteome
|
a2c53b76fb1a65d7b3fd03c001fcd64478711a05
|
6f48069b3735ad82603c3d3b00683a99ea1406fb
|
/Data_Modeling_Chap1-2.R
|
608c416ec6a41cc84c3669e7f1232f641590c19e
|
[] |
no_license
|
christianpark6/Data-Modeling-Statistic-Book-Assignments
|
7dbfa568b8e129d4fd801df96f7252d4845fc45f
|
5b08b56e5287d825e886fce682321499b36089f6
|
refs/heads/master
| 2022-12-31T13:30:16.330606
| 2020-10-18T19:10:18
| 2020-10-18T19:10:18
| 289,818,649
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,918
|
r
|
Data_Modeling_Chap1-2.R
|
setwd('~/Chapter_1-2')
glowworms <- read.csv('~/Chapter_1-2/GlowWorms.csv')
glowworm_reg = lm(Eggs~Lantern, data = glowworms)
summary(glowworm_reg)
with(glowworms,plot(Lantern,Eggs))
plot(glowworm_reg)
predict(glowworm_reg,data.frame(Lantern=14),interval='confidence')
RailsTrails <- read.csv('~/Chapter_1-2/RailsTrails.csv')
with(RailsTrails, plot(squarefeet,adj2007))
railstrails_reg <-lm(adj2007~squarefeet, data = RailsTrails)
summary(railstrails_reg)
volts <- read.csv('~/Chapter_1-2/Volts.csv')
volts_reg = lm(Voltage~Time, data = volts)
with(volts, plot(Time,log(Voltage))
par(mfrow=c(2,2))
plot(volts_reg)
with(volts, plot(Time, logvoltage))
logvolts_reg = lm(logvoltage~Time, data = volts)
logvoltage= log(Voltage)
log(5)
with(volts,log(Voltage))
volts$logvoltage <- with(volts,log(Voltage))
summary(logvolts_reg)
par(mfrow =c(2,2))
plot(logvolts_reg)
baseballtimes <- read.csv('~/Chapter_1-2/BaseballTimes2017.csv')
times <- with(baseballtimes,plot(Time))
timesvpitches = lm(Pitchers~Time, data = baseballtimes)
summary(timesvpitches)
timesvruns = lm(Runs~Time, data = baseballtimes)
summary(timesvruns)
confint(timesvruns)
confint(timesvpitches)
plot(timesvruns)
with(baseballtimes, plot(Time,Runs))
with(baseballtimes, plot(Time,Pitchers))
with(baseballtimes, cor(Time,Pitchers))
with(baseballtimes, cor(Time,Runs))
summary(timesvpitches)
summary(timesvruns)
countyhealth <- read.csv('~/Chapter_1-2/CountyHealth.csv')
countyhealth$logmd <- with(countyhealth, log(MDs))
countyhealth$loghospital <-with(countyhealth, log(Hospitals))
with(countyhealth, plot(Hospitals,MDs))
with(countyhealth, plot(loghospital,logmd))
pines <- read.csv('~/Chapter_1-2/Pines.csv')
pinesheight_reg = lm(Hgt97~Hgt90, data = pines)
summary(pinesheight_reg)
anova(pinesheight_reg)
plot(pinesheight_reg)
with(pines, plot(Hgt90,Hgt97))
pines$sqrt90 <- with(pines, sqrt(Hgt90))
pines$sqrt97 <- with(pines, sqrt(Hgt97))
with(pines,plot(sqrt90,sqrt97))
confint(pinesheight_reg)
pinesheight_reg2 = lm(Hgt97~Hgt96, data = pines)
summary(pinesheight_reg2)
anova(pinesheight_reg2)
confint(pinesheight_reg2)
horsemdf <-data.frame(horsem)
horseprices<- read.csv('~/Chapter_1-2/HorsePrices.csv')
horsepricevage = lm(Price~Age, data =horseprices)
horsepricevheight = lm(Price~Height, data)
horsem <- filter(horseprices, Sex == 'm')
horsem <- data.frame(horseprices, Sex == 'm')
my_data <- filter(horseprices.Sex == 'm')
horseprices[1:4, 1:50]
df1 <- data.frame(horseprices[1,2])
horsem <- horseprices[horseprices$Sex == "m" ,]
horsef<- horseprices[horseprices$Sex == "f",]
horsempricevage = lm(Price~Age, data = horsem)
horsempricevheight = lm(Price~Height, data = horsem)
summary(horsempricevage)
summary(horsempricevheight)
horsefpricevage = lm(Price~Age, data = horsef)
horsefpricevheight = lm(Price~Height, data = horsef)
summary(horsefpricevage)
summary(horsefpricevheight)
confint(horsefpricevage)
confint(horsempricevheight)
|
f8010a33d3c50d99e059cae1b54ad315c3555eef
|
12c20cd032553e7fd8e23238ee28e8d3e8d32be1
|
/misc/hematopoiesis/prep-jan2021/buildTable.R
|
a8baa1ab6606b8f048e9e238ece6c5e12e1484ec
|
[
"MIT"
] |
permissive
|
PriceLab/TrenaMultiScore
|
5b33bdc4c8a00870c8a76523ed4542f76e338717
|
c6d91402d83534fee27d157a933a0c2687a7b3d3
|
refs/heads/master
| 2022-09-03T03:52:27.013503
| 2022-08-19T21:00:22
| 2022-08-19T21:00:22
| 233,648,550
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 12,511
|
r
|
buildTable.R
|
#------------------------------------------------------------------------------------------------------------------------
library(httr)
library(jsonlite)
library(raster) # for cv
library(RUnit)
library(igvR)
library(TrenaMultiScore)
library(TrenaProjectErythropoiesis)
library(org.Hs.eg.db)
#------------------------------------------------------------------------------------------------------------------------
if(!exists("tpe"))
tpe <- TrenaProjectErythropoiesis(quiet=FALSE)
#------------------------------------------------------------------------------------------------------------------------
runTests <- function()
{
test_getRegion()
test_CHD4()
test_build.one()
test_goi()
} # runTests
#------------------------------------------------------------------------------------------------------------------------
getRegion <- function(goi, shoulder)
{
uri <- sprintf("http://localhost:8000/geneLoc")
body.jsonString <- sprintf('%s', toJSON(list(gene=goi, genome="hg38", shoulder=shoulder)))
r <- POST(uri, body=body.jsonString)
tbl.geneLoc <- fromJSON(httr::content(r)[[1]])
list(chrom=tbl.geneLoc$chrom, start=tbl.geneLoc$start-shoulder, end=tbl.geneLoc$end+shoulder)
} # getRegion
#------------------------------------------------------------------------------------------------------------------------
test_getRegion <- function()
{
message(sprintf("--- test_getRegion"))
x <- getRegion("CHD4", 0)
checkEquals(x, list(chrom="chr12", start=6556886, end=6608430))
# entrez: chr12:6,570,082-6,614,524(GRCh38/hg38), Size:44,443 basesOrientation:Minus strand
# igv: chr12:6,569,082-6,608,379
} # test_getRegion
#------------------------------------------------------------------------------------------------------------------------
test_CHD4 <- function()
{
tms.chd4 <- TrenaMultiScore(tpe, "CHD4");
getGeneHancerRegion(tms.chd4)
findOpenChromatin(tms.chd4)
findFimoTFBS(tms.chd4, fimo.threshold=1e-3)
scoreMotifHitsForConservation(tms.chd4)
scoreMotifHitsForGeneHancer(tms.chd4)
addDistanceToTSS(tms.chd4)
mtx <- getExpressionMatrix(tpe, "brandLabDifferentiationTimeCourse-27171x28-namesCorrected")
addGeneExpressionCorrelations(tms.chd4, mtx)
timepoints <- c("day0.r1", "day0.r2", "day2.r1", "day2.r2", "day4.r1", "day4.r2")
addGeneExpressionCorrelations(tms.chd4, mtx, "corEarly", timepoints)
addGenicAnnotations(tms.chd4)
addChIP(tms.chd4)
tbl <- getMultiScoreTable(tms.chd4)
tbl$targetGene <- "CHD4"
tbl.strongHits <- as.data.frame(sort(table(subset(tbl, abs(cor) > 0.75 & p.value < 1e-6)$tf)))
tbl.strongHits <- subset(tbl.strongHits, Freq >= 3)
checkTrue(all(c("E2F6", "ZNF410", "KLF16", "PATZ1", "ZNF263", "MAZ") %in% tbl.strongHits$Var1))
} # test_CHD4
#------------------------------------------------------------------------------------------------------------------------
get.goi <- function()
{
oxidative.phosphorylation <- c("ATP5PB","ATP5PF", "ATP5PO", "COX4I1", "COX5B", "COX7A2", "COX7C", "COX15", "CYC1",
"DLD", "MECP2", "NDUFA2", "NDUFA3", "NDUFA4", "NDUFA5", "NDUFA7", "NDUFA8", "NDUFA9",
"NDUFAB1", "NDUFB1", "NDUFB2", "NDUFB3", "NDUFB4", "NDUFB5", "NDUFB6", "NDUFB7",
"NDUFB8", "NDUFB9", "NDUFB10", "NDUFC2", "NDUFS1", "NDUFS2", "NDUFS3", "NDUFV1",
"NDUFS4", "NDUFS5", "NDUFS6", "NDUFS8", "NDUFV2", "NDUFV3", "UQCRB", "UQCRC1",
"UQCRC2", "UQCRH", "COX5A", "ATP5PD", "ATP5MG", "UQCRQ", "UQCR10", "STOML2",
"NDUFA12", "NDUFS7", "COX6C", "COX8A", "ECSIT", "NDUFB11", "SLC25A6", "VDAC1",
"SLC25A13", "NRF1", "TFAM", "ATP2A3", "CEBPA", "ATF2", "FXN", "SSBP1", "SUPV3L1",
"TOMM20", "TOMM40", "MTX2", "IMMT", "TIMM10", "TIMM9", "COA3", "TIMM21", "PAM16",
"GDAP1", "TOMM7", "CHCHD3", "ATAD3A", "RHOT1", "DNAJC11", "AGK", "TOMM22", "APOO",
"DNAJC19", "APOOL", "ROMO1", "TOMM5", "CISD1", "NME6", "MYC", "HBZ", "PRDX1", "PNKD",
"PSAT1", "KDM3A", "PM20D2")
translation <- c("MRPL49", "EEF1A2", "RPL10A", "RPL5", "RPL6", "RPL11", "RPL17", "RPL23A", "RPL37A", "RPL38", "RPLP0",
"RPLP1", "RPLP2", "RPS3A", "RPS5", "RPS10", "RPS12", "RPS13", "RPS14", "RPS18", "RPS19", "RPS21",
"RPS27", "RPS28", "SRP9", "SRP14", "SSR4", "EIF5B", "MRPS30", "SEC61B", "RPL35", "RPL36", "MRPS18B",
"MRPL13", "MRPL15", "MRPS16", "MRPS23", "MRPS33", "SARS2", "MRPL16", "PTCD3", "MRPS18A", "MRPL47",
"MRPS25", "MRPS9", "MRPS6", "MRPL41", "MRPL40", "MRPL38", "MRPL11", "MRPS34", "MRPL43", "MRPS36",
"MRPL54", "CHCHD1", "MRPL52", "ABCF1", "DDX3X", "GLE1", "ILF3", "NDUFA7", "TARBP2", "TCOF1", "PUM1",
"METAP1", "PUM2", "GEMIN5", "COA3", "HSPA14", "TRMT10C", "SRBD1", "UPF3B", "EIF2A", "SUPV3L1",
"TFAM", "MAP1A", "CLASP2", "TMOD2", "MYC", "CHMP1B", "VDAC1", "TOMM7")
ribosome <- c("RPL10A", "RPL5", "RPL6", "RPL11", "RPL17", "RPL23A", "RPL37A", "RPL38", "RPLP0", "RPLP1", "RPLP2",
"RPS3A", "RPS5", "RPS10", "RPS12", "RPS13", "RPS14", "RPS18", "RPS19", "RPS21", "RPS27", "RPS28",
"RPL35", "RPL36", "MRPL13", "MRPL15", "MRPS16", "MRPL16", "MRPS18A", "MRPS9", "MRPS6", "MRPL11",
"SRP9", "SRP14", "SSR4", "SEC61B", "GPAA1", "SEC16A", "UBAC2", "EEF1A2", "SLC25A6", "UQCRC2",
"TOMM20", "TOMM40", "MTX2", "CHP1", "TIMM10", "TIMM9", "TIMM21", "PAM16", "GDAP1", "TOMM7", "AGK",
"TOMM22", "DNAJC19", "ROMO1", "TOMM5", "HIST1H1D", "HNRNPU", "ILF3", "HNRNPM", "YBX1", "TCOF1",
"H1FX", "EIF5B", "DKC1", "HIST1H1B", "DNAJA1", "H2AFY", "UPF3B", "DDX6", "SUPV3L1", "PUM1", "PUM2",
"EDC4", "MYEF2", "ZC3H14", "DDX3X", "GLE1", "EIF2A", "TRMT10C", "MRM3", "NOP10", "DHX30", "CLASP2",
"SNRNP35", "PUF60", "GEMIN5", "RBMX", "TRMT6", "FYTTD1", "PM20D2", "CRIPT", "TM9SF3", "B2M", "TAF15",
"ELMO1", "DLD", "NDUFAB1", "PSAT1", "TARBP2", "HSPA14", "YES1", "PRKRA", "SARS2")
cholesterol <- c("EBP", "MVD", "SQLE", "HMGCR", "CYP51A1", "DHCR7", "HMGCS1", "ACLY", "MVK", "HSD17B7")
small.molecule.metabolism <- c("NDUFAF4", "B3GALT6", "PNMT", "HMGCR", "CSPG4", "ACOT1", "ACOT4", "CMBL",
"HMOX2", "MCCC2", "ELOVL2", "CHST14", "SPR", "NQO1", "DDAH1", "DCTPP1",
"DHCR24", "LPCAT2", "TCN1", "SQLE", "PANK1", "NME1", "ALDH1B1", "PUDP",
"MVK", "PLA2G3", "SLC27A2", "XYLB", "HSD3B2", "CYP1B1", "MVD", "CYP51A1",
"LUM", "GNG13", "HMGCS1", "PLPP1", "SLC19A1", "AGMAT", "PDSS1", "PFAS",
"GPD1L", "UGT1A6", "CBR1", "ALB", "DHCR7", "ADRA2A", "FASN", "GCSH",
"GALE", "HS6ST1", "BDH1", "HSD17B7", "HPGDS", "EBP", "SCD", "MAOB", "ACLY",
"CYP4F11", "KHK", "LIPG", "SLC5A6", "AACS", "HPGD", "FABP5", "MGST1", "MGST2")
others <- c("TAL1", "LYL1", "GATA2", "GFI1B", "CIAPIN1", "ADNP", "PFKP", "ATP2A3", "ATP2B4", "VDAC1", "GNA14", "MYC")
original.set <- c("AFP", "AKAP4", "ATP6V1B1-AS1", "C10orf107", "C1orf194", "C8orf48", "CALR3", "CAMK2N2", "CCL1", "CLCA2",
"CYP4F11", "DHRS2", "DRD2", "ENTHD1", "EPB41L4A-AS2", "HMMR-AS1", "KRT16", "LINC01602", "LUM", "MIP",
"MIR5581", "MMP12", "NLRP11", "SLC25A48", "SVEP1", "TEX19", "TMEM26", "ZNF560")
new.genes <- c("CHD4")
length(oxidative.phosphorylation) # 98
length(translation) # 82
length(ribosome) # 105
length(cholesterol) # 10
length(small.molecule.metabolism) # 66
length(others) # 12
length(original.set) # 28
length(new.genes) # 1
goi <- c(oxidative.phosphorylation, translation, ribosome, cholesterol,
small.molecule.metabolism, others, original.set, new.genes)
length(goi) # 402
length(unique(goi)) # 310
x <- toupper(unique(goi))
name.fixes <- list("C10ORF107"="CABCOCO1",
"EPB41L4A-AS2"="EPB41L4A-DT",
#"HIST1H1B"="H1-5",
# "HIST1H1D"="H1-3",
#"H1FX"="H1-10",
# "H2AFY"="MACROH2A1",
"C1ORF194"="C1orf194",
"C8ORF48"="C8orf48")
matched.indices <- match(names(name.fixes), x)
x[matched.indices] <- as.character(name.fixes)
sort(x)
} # get.goi
#------------------------------------------------------------------------------------------------------------------------
# make sure that all of our genes have standardized gene symbol names, which can be used
# to look up hg38 location, and in genehancer, to get promoter and enhancer regions,
# and which match the expression matrix use to calculate tf/targetGene correlations
test_goi <- function()
{
message(sprintf("--- test_goi"))
goi <- get.goi()
tbl.syms <- select(org.Hs.eg.db, keytype="SYMBOL", keys=goi, columns=c("ENTREZID", "SYMBOL")) # "ENSEMBL",
checkEquals(nrow(tbl.syms), length(goi))
mtx <- getExpressionMatrix(tpe, "brandLabDifferentiationTimeCourse-27171x28-namesCorrected")
all(goi %in% rownames(mtx))
# missing.genes <- setdiff(goi, rownames(mtx))
# missing.genes.original.names.in.matrix <- intersect(names(name.fixes), rownames(mtx))
#
# # mtx genehancer & current hg38 annotations
# matrix.fixes <- list("ATP5L"="ATP5MG",
# "ATP5F1"="ATP5PB",
# "ATP5H"="ATP5PD",
# "ATP5J"="ATP5PF",
# "ATP5O"="ATP5PO",
# "C10orf107"="CABCOCO1",
# "EPB41L4A-AS2"="EPB41L4A-DT")
# mtx.new <- mtx
# rownames(mtx.new)[match(names(matrix.fixes), rownames(mtx.new))] <- as.character(matrix.fixes)
# all(goi %in% rownames(mtx.new))
# mtx <- mtx.new
# save(mtx, file="~/github/TrenaProjectErythropoiesis/inst/extdata/expression/brandLabDifferentiationTimeCourse-27171x28-namesCorrected.RData")
} # test_goi
#------------------------------------------------------------------------------------------------------------------------
build.one <- function(gene)
{
tms.gene <- TrenaMultiScore(tpe, gene);
x <- getGeneHancerRegion(tms.gene)
count <- findOpenChromatin(tms.gene)
if(count == 0)
return(data.frame())
findFimoTFBS(tms.gene, fimo.threshold=1e-3)
scoreMotifHitsForConservation(tms.gene)
scoreMotifHitsForGeneHancer(tms.gene)
addDistanceToTSS(tms.gene)
mtx <- getExpressionMatrix(tpe, "brandLabDifferentiationTimeCourse-27171x28-namesCorrected")
addGeneExpressionCorrelations(tms.gene, mtx)
timepoints <- c("day0.r1", "day0.r2", "day2.r1", "day2.r2", "day4.r1", "day4.r2")
addGeneExpressionCorrelations(tms.gene, mtx, "corEarly", timepoints)
addGenicAnnotations(tms.gene)
addChIP(tms.gene)
tbl <- getMultiScoreTable(tms.gene)
tbl$targetGene <- gene
invisible(tbl)
} # build.one
#------------------------------------------------------------------------------------------------------------------------
test_build.one <- function()
{
tbl.cab <- build.one("CABCOCO1")
tbl.sub <- subset(tbl.cab, gh > 10 & abs(cor) > 0.7)
tbl.freq <- as.data.frame(table(tbl.sub$tf))
checkTrue(all(c("ETV4", "GATA2", "MEF2C", "TEAD4") %in% tbl.freq$Var1))
} # test_build.one
#------------------------------------------------------------------------------------------------------------------------
build.goi <- function()
{
goi <- get.goi()
output.dir <- "out.27jan21"
if(!file.exists(output.dir))
dir.create(output.dir)
for(gene in goi[c(1:10, 122:length(goi))]){
tbl.gene <- build.one(gene)
save(tbl.gene, file=file.path(output.dir, sprintf("%s.RData", gene)))
} # for gene
} # build.goi
#------------------------------------------------------------------------------------------------------------------------
if(!interactive())
build.goi()
|
0c959892fda7f08570b8ef35e35551b2074a9700
|
eae701f6f0eb4c18d9053b5ad9cf941a7cdc5ae2
|
/cachematrix.R
|
9d501b8047177b4d519786659e22fc4b8e1e960d
|
[] |
no_license
|
bernreyes/ProgrammingAssignment2
|
28f4a73158f4ea7b47eefe2c468c8676017babed
|
7fe8e3474c608cfdd3920b670371105ba44a751c
|
refs/heads/master
| 2020-12-11T09:24:24.106341
| 2016-04-23T11:10:45
| 2016-04-23T11:10:45
| 56,902,842
| 0
| 0
| null | 2016-04-23T05:50:16
| 2016-04-23T05:50:16
| null |
UTF-8
|
R
| false
| false
| 770
|
r
|
cachematrix.R
|
## cache the inverse of a matrix
## creates a special "matrix" object that can cache its inverse
makeCacheMatrix <- function(x = matrix()) {
i <- NULL
set <- function(y){
x <<- y
i <<- NULL
}
get <- function() x
setinverse <-function(inverse) i <<- inverse
getinverse <- function() i
list(set=set,
get=get,
setinverse=setinverse,
getinverse=getinverse)
}
## It computes the inverse of the special "matrix".
## If it's already calculated,it will retrieve the inverse from the cache.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
i <- x$getinverse()
if(!is.null(i)){
message("getting cached data")
return(i)
}
data<-x$get()
i<-solve(data,...)
x$setinverse(i)
i
}
|
89a248da9857910c97d1e4d7e319bbf71dd66bd2
|
b9df86ea2b7700091a71527bb7fbe1383de166d0
|
/man/theme_nr.Rd
|
557224c6d452c7e1776e3c71568229b722ce6c47
|
[] |
no_license
|
d4gg4d/noamtools
|
c3bedc8e3e34801b2e6620ce1ff91a0a3a5b805c
|
051cb8dc4bb040f851bdc5e61ca77517d03b7bc9
|
refs/heads/master
| 2020-12-26T04:55:22.645208
| 2014-01-18T14:51:12
| 2014-01-18T14:51:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 148
|
rd
|
theme_nr.Rd
|
\name{theme_nr}
\alias{theme_nr}
\title{Noam's default ggplot2 styling}
\usage{
theme_nr(...)
}
\description{
Noam's default ggplot2 styling
}
|
bf062cc5931c6e9a40c8e5f21c9e3f782a7d42c4
|
4f2743db548d08f57ec5c441011d94c28aa0ccac
|
/man/sub-sub-.model.Rd
|
6adeddd4ff67288bbec819b6c09ee0d8a3a23b4f
|
[] |
no_license
|
bergsmat/nonmemica
|
85cdf26fa83c0fcccc89112c5843958669373a2a
|
8eddf25fdd603a5aca719a665c5b9475013c55b3
|
refs/heads/master
| 2023-09-04T06:10:48.651153
| 2023-08-28T13:23:18
| 2023-08-28T13:23:18
| 78,268,029
| 4
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 598
|
rd
|
sub-sub-.model.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/model.R
\name{[[.model}
\alias{[[.model}
\title{Select model Element}
\usage{
\method{[[}{model}(x, ..., drop = TRUE)
}
\arguments{
\item{x}{model}
\item{...}{passed arguments}
\item{drop}{passed to element select}
}
\value{
element
}
\description{
Selects model element.
}
\seealso{
Other as.model:
\code{\link{[.model}()},
\code{\link{as.model.character}()},
\code{\link{as.model.numeric}()},
\code{\link{as.model}()},
\code{\link{read.model}()},
\code{\link{write.model}()}
}
\concept{as.model}
\keyword{internal}
|
ea43984c3611f9900f43327440915693eb6b395c
|
822bec59df866c799c049b9138e99eac606965d3
|
/man/pairing_status.Rd
|
177c6d211a0b7393ece39ecc076fae1e49cf89ab
|
[] |
no_license
|
mpio-be/beR
|
2424428371e0676dc728a1e1e223382c524b1600
|
c3cbb7eca1ca6375c0c98409f0ad32d1fbfc3cfe
|
refs/heads/master
| 2022-05-28T05:31:58.164345
| 2022-05-14T20:24:59
| 2022-05-14T20:24:59
| 129,069,637
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 463
|
rd
|
pairing_status.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/divorce.R
\name{pairing_status}
\alias{pairing_status}
\title{Divorced pairs}
\usage{
pairing_status(d)
}
\arguments{
\item{d}{data.frame containing maleID, femaleID and season}
}
\value{
a data.table similar to d but with two additional columns: male_status and female_status
}
\description{
Divorced pairs
}
\examples{
data(divorcees)
d = divorcees
x = pairing_status(divorcees)
x
}
|
f4ed05e389ea515af21af376f615bd1a61ee0ea2
|
da7f013d52e65313d25798d075b29733b218a64c
|
/3_Function_defineandcheck.R
|
375dec23b17d8108a4bf6d1fadb9dae3780891a4
|
[] |
no_license
|
KrishnaKuyate/Bollywood-Movies-Data-Analysis
|
6b1a0af39853f6fea213a66fac90ab736be79644
|
3d2239ca7b4b79ea86f4a5bc1e44b2423ccf558f
|
refs/heads/master
| 2020-04-04T16:22:55.919791
| 2018-11-22T19:30:08
| 2018-11-22T19:30:08
| 156,075,256
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,412
|
r
|
3_Function_defineandcheck.R
|
Top_act_for_collection_g<-function(Collection_type,agg_by,title,yaxix)
{
aggregate(Collection_type,by=list(agg_by),FUN=sum)->aff_col_df
as.data.frame(aff_col_df)->aff_col_df
aff_col_df[order(aff_col_df$x,decreasing = TRUE),]->aff_col_df5
aff_col_df5[1:5,]->aff_col_df5_t
as.data.frame(aff_col_df5_t)->aff_col_df5_tdf
colnames(aff_col_df5_tdf)<-c("Actor","Total Collection 2015")
#Table:
aff_col_df5_tdf
#Graph:
graph_ti=title
yaxis_g=yaxix
ggplot(aff_col_df5_tdf,aes(x=Actor,y=`Total Collection 2015`,fill=Actor))+ geom_bar(stat = "identity",position="dodge")+ggtitle(graph_ti)+theme(plot.title = element_text(face = "bold.italic"))+xlab("Actor Name")+ylab(yaxis_g)+theme(axis.text.x = element_text(angle = 45, hjust = 1))
}
Top_act_for_collection_g(acto_coll_df$O_Collection_a,acto_coll_df$Actor_cl,"Actor Name Vs O Collection","O Collection")
Top_act_for_collection_t<-function(Collection_type,agg_by)
{
aggregate(Collection_type,by=list(agg_by),FUN=sum)->aff_col_df
as.data.frame(aff_col_df)->aff_col_df
aff_col_df[order(aff_col_df$x,decreasing = TRUE),]->aff_col_df5
aff_col_df5[1:5,]->aff_col_df5_t
as.data.frame(aff_col_df5_t)->aff_col_df5_tdf
colnames(aff_col_df5_tdf)<-c("Actor","Total Collection 2015")
#Table:
aff_col_df5_tdf
}
Top_act_for_collection_t(acto_coll_df$O_Collection_a,acto_coll_df$Actor_cl)
|
8b3311d697b0b0a505ea7c169ab2d8055acbac76
|
c5b1732008f228618ff73e704dcf2e7b4c56de7e
|
/series_temporales/series_temporales.R
|
9b8d6b625ddfa98cc71de6b198cd8c64f4bf332d
|
[] |
no_license
|
joseramoncajide/customer_analytics
|
63698de266eeb0fa303b6df8d0027ab336ab577a
|
c4c4cacd57f68c25d487dc8e2de924064b624571
|
refs/heads/master
| 2021-06-19T06:13:51.773996
| 2019-10-23T07:35:59
| 2019-10-23T07:35:59
| 105,311,729
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,421
|
r
|
series_temporales.R
|
##########################################################################
# Jose Cajide - @jrcajide
# Customer Analytics: Time series manipulation, analysis and forecasting
##########################################################################
rm(list=ls())
cat("\014")
list.of.packages <- c("tidyverse", "forecast")
new.packages <- list.of.packages[!(list.of.packages %in% installed.packages()[,"Package"])]
if(length(new.packages)) install.packages(new.packages)
library(tidyverse)
library(forecast)
#----------------------------------------------------------------------------
# Load data
#----------------------------------------------------------------------------
customers.df <- read_csv('series_temporales/customers_by_cohort.csv')
# customers.df <- tail(customers.df, nrow(customers.df)-1)
#
# # customers.df <- head(customers.df, nrow(customers.df)-1)
# #
# # customers.df %>% write_csv('./series_temporales/customers_by_cohort.csv')
(customers <- customers.df$client_id)
#----------------------------------------------------------------------------
# Time series with base R
#----------------------------------------------------------------------------
(customers.ts <- ts(data = customers,frequency = 12, start = c(2010, 09)))
start(customers.ts)
end(customers.ts)
frequency(customers.ts)
summary(customers.ts)
plot(customers.ts, main = "customers")
autoplot(customers.ts) + labs(title = "customers")
# Differentiation
(customers_diff.ts <- diff(customers.ts,1))
plot(customers_diff.ts, main="customers")
(customers_diff.ts <- diff(customers.ts,12))
plot(customers_diff.ts, main="customers con una diferencia estacional")
# Average method
customers.mean <- meanf(customers.ts, h = 12)
summary(customers.mean)
plot(customers.mean)
autoplot(customers.mean)
# Naive
customers.naive <- naive(customers.ts, 12)
summary(customers.naive)
plot(customers.naive)
autoplot(customers.naive)
# Simple Moving Average
library(smooth)
# Predict customers would be for next 12 weeks based on the the last 12 weeks
# Order 2
fit2<-sma(customers.ts, order = 2, h = 12, holdout = T, level = .95)
round(fit2$forecast,0)
plot(forecast(fit2))
# Automatic
fitX<-sma(customers.ts, h = 12, holdout = T, level = .95, ic = 'AIC')
round(fitX$forecast,0)
plot(forecast(fitX))
# ts components -----------------------------------------------------------
# the trend is the long-term increase or decrease in the data
# the seasonal pattern occurs when a time series is affected by seasonal factors such as the time of the year or the day of the week
# the cycle occurs when the data exhibit rises and falls that are not of a fixed period
#----------------------------------------------------------------------------
customers.decomp <- decompose(customers.ts)
plot(customers.decomp)
ggseasonplot(customers.ts)
ggseasonplot(customers.ts , polar = TRUE)
ggsubseriesplot(customers.ts)
# Seasonally adjusted or deseasonalized data ------------------------------
# Obteining a seasonal stationary ts
customers_des.ts <- customers.ts-customers.decomp$seasonal
plot(customers.ts,main="customers. Serie normal Vs desestacionalizada")
lines(customers_des.ts, col='red')
# Forecasting -------------------------------------------------------------
# Holt-Winters Forecasting or Triple Exponential Smoothing ----------------
# https://en.wikipedia.org/wiki/Exponential_smoothing#Triple_exponential_smoothing
fit_hw <- HoltWinters(customers.ts)
plot(fit_hw)
forecast_out_sample <- forecast(fit_hw, h=12)
round(print(forecast_out_sample),2)
plot(forecast_out_sample)
# More on TS --------------------------------------------------------------
monthplot(customers.ts, main="customers Monthly Patterns",
xlab="Month", ylab="Count", col="darkblue", lwd=2, lty.base=2, lwd.base=1,
col.base="gray40")
seasonplot(customers.ts, main="customers Seasonal Trends",
col=rainbow(10), year.labels=TRUE, year.labels.left=TRUE, cex=0.7,
cex.axis=0.8)
# Identifying possible breakpoints in a time series -----------------------
require(strucchange)
breakpoints(customers.ts ~ 1)
plot(customers.ts, main="customers breakpoints", ylab="customers", col="darkblue", lwd=1.5)
# Plot the line at the optimal breakpoint
lines(breakpoints(customers.ts ~ 1), col="darkgreen")
# Plot a 90% confidence interval
lines(confint(breakpoints(customers.ts ~ 1), level=0.90), col="darkgreen")
|
9cbed600bf976588e4ac65bcb87022c57075c4b1
|
117c8dc94e72fd2647e4712d3472a26874df204b
|
/drafts/20201203_chess_f.R
|
2143445dff9242a8f0eb6617a056153c248df476
|
[] |
no_license
|
Tai-Rocha/lives
|
ce1e6b2961dc9f717c433bee1f20c1ee4032d436
|
106b35ef116fa15e76843a607e3097b5e2993d92
|
refs/heads/master
| 2023-08-05T00:42:10.286918
| 2021-09-16T01:19:41
| 2021-09-16T01:19:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 870
|
r
|
20201203_chess_f.R
|
#' Author: Fernando
#' Subject: XADREZ PORRA
# devtools::install_github("curso-r/chess")
# library(tidyverse)
# install.packages("rsvg")
# chess::install_chess()
library(chess)
chess::can_claim_draw()
# Import -----------------------------------------------------------------------
# Read first game from My 60 Memorable Games
file <- system.file("m60mg.pgn", package = "chess")
fischer_sherwin <- read_game(file, n_max = 1)
plot(fischer_sherwin)
for(i in 1:20){
fischer_sherwin %>%
forward(63)
plot()
}
# Tidy -------------------------------------------------------------------------
# Visualize --------------------------------------------------------------------
# Model ------------------------------------------------------------------------
# Export -----------------------------------------------------------------------
# readr::write_rds(d, "")
|
fde4261679eece89f71618bf07e4751c77ce2b31
|
77b9c26131fb30f90adedeb0615702b9c81bf7dd
|
/time_series_SSAR3.R
|
297e820197c416e68a69e2fba20dfe24f3985ec1
|
[] |
no_license
|
mteguchi/Indonesia_nesting
|
a4c940bcdec06b6d1b803e6e8e239d4ca7b87bc6
|
3c074e88c264c811857a67a566a3180e2846b85f
|
refs/heads/master
| 2023-07-20T10:38:36.792593
| 2023-07-13T22:50:25
| 2023-07-13T22:50:25
| 126,876,068
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,402
|
r
|
time_series_SSAR3.R
|
#time series analysis
rm(list=ls())
source('Dc_Indonesia_nesting_fcns.R')
library(rjags)
library(bayesplot)
n.chains <- 3
n.iter <- 10000
n.update <- 10000
# get JM data first:
data.0.JM <- read.csv('data/NestCounts_JM_09Feb2018.csv')
# create time-duration filed (in yrs)
# define dates with begin and end dates:
data.0.JM %>% reshape2::melt(id.vars = "YEAR",
variable.name = "month",
value.name = "count") -> data.1.JM
data.1.JM$MONTH <- unlist(lapply(data.1.JM$month, FUN = mmm2month))
data.1.JM <- mutate(data.1.JM, f.month = as.factor(MONTH),
f.year = as.factor(YEAR))%>%
mutate(Frac.Year = YEAR + (MONTH-0.5)/12)
data.1.JM.2005 <- filter(data.1.JM, YEAR > 2004)
bugs.data <- list(y = data.1.JM.2005$count,
T = 156)
inits.function <- function(){
mu <- rnorm(1, 0, 10)
theta1 <- rnorm(1, 0, 1)
theta2 <- rnorm(1, 0, 1)
theta3 <- rnorm(1, 0, 1)
#sigma.pro <- runif(1, 0, 50)
#sigma.obs <- runif(1, 0, 50)
A <- list(mu = mu, theta1 = theta1, theta2=theta2, theta3=theta3)
# sigma.pro = sigma.pro, sigma.obs = sigma.obs)
return(A)
}
params <- c('mu', 'theta1', 'theta2','theta3','sigma.pro', 'sigma.obs')
jm <- jags.model(file = 'models/model_SSAR3.txt',
data = bugs.data,
inits = inits.function,
n.chains = n.chains,
n.adapt = n.iter)
# check for convergence first.
zm <- coda.samples(jm,
variable.names = params,
n.iter = n.iter)
gelman.diag(zm)
# plot posterior densities using bayesplot functions:
mcmc_dens(zm, 'theta1')
#mcmc_trace(zm, 'theta1')
mcmc_dens(zm, 'theta2')
#mcmc_trace(zm, 'theta2')
mcmc_dens(zm, 'theta3')
#mcmc_trace(zm, 'theta3')
mcmc_dens(zm, 'sigma.pro')
#mcmc_trace(zm, 'sigma')
mcmc_dens(zm, 'sigma.obs')
# then sample y
params <- c('theta1', 'theta2','theta3',
'sigma.pro', 'sigma.obs', 'y', 'X', 'mu')
zm <- coda.samples(jm,
variable.names = params,
n.iter = n.iter)
summary.zm <- summary(zm)
# extract ys
ys.stats <- data.frame(summary.zm$quantiles[grep(pattern = 'y[/[]',
row.names(summary.zm$quantiles)),
c('2.5%', '50%', '97.5%')])
colnames(ys.stats) <- c('low_y', 'mode_y', 'high_y')
ys.stats$time <- data.1.JM.2005$Frac.Year
ys.stats$obsY <- data.1.JM.2005$count
# extract Xs - the state model
Xs.stats <- data.frame(summary.zm$quantiles[grep(pattern = 'X[/[]',
row.names(summary.zm$quantiles)),
c('2.5%', '50%', '97.5%')])
colnames(Xs.stats) <- c('low_X', 'mode_X', 'high_X')
Xs.stats$time <- data.1.JM.2005$Frac.Year
Xs.stats$obsY <- data.1.JM.2005$count
ggplot() +
geom_point(data = Xs.stats,
aes(x = time, y = mode_X), color = "blue") +
geom_line(data = Xs.stats,
aes(x = time, y = mode_X), color = 'blue') +
geom_point(data = ys.stats,
aes(x = time, y = mode_y), color = "red",
alpha = 0.5) +
geom_line(data = ys.stats,
aes(x = time, y = mode_y), color = "red",
alpha = 0.5) +
geom_point(data = ys.stats,
aes(x = time, y = obsY), color = "green",
alpha = 0.5)
|
edb0d1f0784cb0a2858029bc86dfa835904246b8
|
836a779c4e3405bb6ffe79117964b6b516cb3bc2
|
/man/errAS.Rd
|
e057a5e46863353a1a519db1c2702b7cb625a942
|
[] |
no_license
|
RajeswaranV/proportion
|
657fe69c4ad62c00ee95077d40683d4452d42c66
|
c0e0a60d43113004c0366c71a2b2dac36bf1fc86
|
refs/heads/master
| 2022-06-21T13:51:47.915249
| 2022-06-11T06:31:18
| 2022-06-11T06:31:18
| 46,557,957
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,685
|
rd
|
errAS.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/501.Error-Failure_LimitBased_BASE_All.R
\name{errAS}
\alias{errAS}
\title{Calculates error, long term power and pass/fail criteria for ArcSine method}
\usage{
errAS(n, alp, phi, f)
}
\arguments{
\item{n}{- Number of trials}
\item{alp}{- Alpha value (significance level required)}
\item{phi}{- Null hypothesis value}
\item{f}{- Failure criterion}
}
\value{
A dataframe with
\item{delalp}{ Delta-alpha is the increase of the nominal error with respect to real error}
\item{theta}{ Long term power of the test}
\item{Fail_Pass}{Fail/pass based on the input f criterion}
}
\description{
Calculates error, long term power and pass/fail criteria for ArcSine method
}
\details{
Evaluation of Wald-type interval for the arcsine transformation of the parameter
\code{p} error due to the difference of achieved and nominal level of
significance for the \eqn{n + 1} intervals
}
\examples{
n=20; alp=0.05; phi=0.05; f=-2
errAS(n,alp,phi,f)
}
\references{
[1] 2014 Martin Andres, A. and Alvarez Hernandez, M.
Two-tailed asymptotic inferences for a proportion.
Journal of Applied Statistics, 41, 7, 1516-1529
}
\seealso{
Other Error for base methods:
\code{\link{PloterrAS}()},
\code{\link{PloterrAll}()},
\code{\link{PloterrBA}()},
\code{\link{PloterrEX}()},
\code{\link{PloterrLR}()},
\code{\link{PloterrLT}()},
\code{\link{PloterrSC}()},
\code{\link{PloterrTW}()},
\code{\link{PloterrWD}()},
\code{\link{errAll}()},
\code{\link{errBA}()},
\code{\link{errEX}()},
\code{\link{errLR}()},
\code{\link{errLT}()},
\code{\link{errSC}()},
\code{\link{errTW}()},
\code{\link{errWD}()}
}
\concept{Error for base methods}
|
7e24808da047fb2af0a1e0525ef0e7b9ed53119f
|
a4bfefac7d398d460bc05863b0f528ad147ba126
|
/R/chisq_test.R
|
26e4be2ae94bb6b89989e9abb3107856cf285cc5
|
[] |
no_license
|
zjsichen/StatComp20095
|
bb9577477953b3da8f5b34db96d507bc8586cdfe
|
18cd035fb9469d2d3c08ad6abad0f5b763f869ee
|
refs/heads/master
| 2023-02-01T12:07:08.337300
| 2020-12-20T04:19:51
| 2020-12-20T04:19:51
| 322,737,776
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,300
|
r
|
chisq_test.R
|
#' @title Make a faster version of chisq.test()
#' @name chisq_test
#' @description The chi-square statistic of the cross-contingency table is calculated to verify the independence between two variables.
#' @param x A contingency table matrix.
#' @return Chi-square test gives chi-square values, degrees of freedom and P values.
#' @examples
#' \dontrun{
#' M <- as.table(rbind(c(762, 327, 468), c(484, 239, 477)))
#' dimnames(M) <- list(gender = c("F", "M"),party = c("Democrat","Independent", "Republican"))
#' (M)
#' (Xsq <- chisq.test(M))
#' chisq_test(M)
#' print(microbenchmark::microbenchmark(chisq_test(M),chisq.test(M)))
#' }
#' @import microbenchmark
#' @importFrom stats dchisq
#' @export
chisq_test <- function(x){
m <- nrow(x); n <- ncol(x); N <- sum(x)
E <- matrix(0,m,n)
rowsums <- unlist(lapply(1:m, function(i) sum(x[i,]))) # which is used to computed pi.
colsums <- unlist(lapply(1:n, function(j) sum(x[,j])))
for (i in 1:m){
for (j in 1:n) {
E[i,j] <- rowsums[i]*colsums[j]/N#Compute the number if the calculation is independent.
}
}
# degree of freedom
df <- (m-1) * (n-1)
#Calculate the chi-square statistic
chi_sqr <- sum((x-E)^2/E)
p_value <- dchisq(chi_sqr, df = df)
(test <- list(chi_sqr = chi_sqr, df = df, p_value = p_value))
}
|
3c76c22e80f3a7abe7561e3442d1b0fbae52675f
|
8bef64009c1256ace384188118ec8611429256ed
|
/man/comp_scat2d.Rd
|
066a9577c69ffd213ec05fea8b2d5dd10a758e20
|
[] |
no_license
|
NathanWycoff/iPLSV
|
3f78cde543aff868dca4303dc9bf19c5055906b3
|
cfc4ffe6884f005fc076312b45cf0f3d4cc211a5
|
refs/heads/master
| 2020-03-13T00:03:06.642527
| 2018-06-28T22:04:05
| 2018-06-28T22:04:05
| 130,879,295
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,156
|
rd
|
comp_scat2d.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/lib.R
\name{comp_scat2d}
\alias{comp_scat2d}
\title{Compare two sets of 2D points by plotting a scatterplot, optionally rotating the data such that they are as similar as possible (useful when visualizing projections which are invariante to unitary transformations (rotations and reflections). A will be in black, B will be in red.}
\usage{
comp_scat2d(A, B, rot = FALSE, scale_ax = FALSE, obnoxious = FALSE)
}
\arguments{
\item{A}{A first set of points, should be of the same dimensions as B}
\item{B}{A first set of points, should be of the same dimensions as A}
\item{rot}{If TRUE, solves the orthogonal procrustes problem to rotate B to match A.}
\item{scale_ax}{If TRUE, scales each axis so as to minimize error (performed after procrustes analysis if rot is also TRUE).}
}
\description{
Compare two sets of 2D points by plotting a scatterplot, optionally rotating the data such that they are as similar as possible (useful when visualizing projections which are invariante to unitary transformations (rotations and reflections). A will be in black, B will be in red.
}
|
e6e28ae95a7a5eb7437651d04a6c39c30bf7b285
|
87502e5d65358d4069dfab82a6c8aafea627f936
|
/Code/KNN/01/baye_apperanceCount.R
|
1db2ef55445b41cebe0edf9221901c1b6d330dc8
|
[] |
no_license
|
niive12/SML
|
4c5dc361480d24652dff4602db4b84e96d7ef306
|
a21570b8b2e6995dbcee083901160c718424ff8c
|
refs/heads/master
| 2020-06-05T08:55:59.875092
| 2015-06-25T09:38:43
| 2015-06-25T09:38:43
| 30,865,578
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,450
|
r
|
baye_apperanceCount.R
|
source("load_people_data.R")
source("normalize.R")
source("Baye.R")
library("gplots") # contour plot colors
divs = 1:10
bis = 2:10
laplace = 0
contour_data <- matrix(,length(bis),length(divs))
if(FALSE){
data <- prepareOneAlone(3,2, 400, 400)
startTime <- proc.time() # used for timings
# reduce data (binning and grouping)
for(bin_i in 1:length(bis)){
data_b <- normalizeData(data, "bin", bins = bis[bin_i])
for(div_i in 1:length(divs)){
data_bin <- countBinning(data_b, 20, 0:(bis[bin_i]-1), pictureDevisions = divs[div_i])
succ <- (baye_predict(data_bin, laplace = laplace))$success
contour_data[bin_i,div_i] = succ
print(paste(c((bin_i-1)*length(divs)+div_i, " / ", (length(bis)*length(divs)), " Time taken till now: ",(proc.time()-startTime)[3], " seconds."),collapse=""))
}
}
save(contour_data, file = "contour_baye_bin-vs-divs.Rdata")
}else {
load("contour_baye_bin-vs-divs.Rdata")
}
filled.contour(y = divs, x = bis, contour_data, col=colorpanel(20, "black", "white"), levels=seq(min(contour_data), max(contour_data), length.out= 21))
title(main = NULL, xlab = "No. of bins", ylab = "No. of divisions")
setEPS()
postscript("contour_bins_vs_divs.eps",height = 6, width = 8)
filled.contour(y = divs, x = bis, contour_data, col=colorpanel(20, "black", "white"), levels=seq(min(contour_data), max(contour_data), length.out= 21))
title(main = NULL, xlab = "No. of bins", ylab = "No. of divisions")
dev.off()
|
5aea670426a729ab7beaec1975e6829c137c09df
|
0a906cf8b1b7da2aea87de958e3662870df49727
|
/diceR/inst/testfiles/indicator_matrix/libFuzzer_indicator_matrix/indicator_matrix_valgrind_files/1609960151-test.R
|
ad57d4c663a4597d73828a9ab3356eadf161f5d7
|
[] |
no_license
|
akhikolla/updated-only-Issues
|
a85c887f0e1aae8a8dc358717d55b21678d04660
|
7d74489dfc7ddfec3955ae7891f15e920cad2e0c
|
refs/heads/master
| 2023-04-13T08:22:15.699449
| 2021-04-21T16:25:35
| 2021-04-21T16:25:35
| 360,232,775
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 546
|
r
|
1609960151-test.R
|
testlist <- list(x = c(2.52017807400935e-300, -4.31064365405349e-282, NaN, 6.14293298947794e-183, NA, 6.14293298947794e-183, 6.14293298947794e-183, 6.14293298947805e-183, 6.14293298947794e-183, 6.64754152697478e-148, 3.62992441230979e-308, -9.77725895036027e-292, 6.14293298947792e-183, NaN, NaN, NaN, Inf, NaN, 5.43230922486616e-312, 0, 0, 0, 0, 0, 0, 0, 0, NaN, NaN, NaN, 2.78555659330022e-312, NaN, -5.82900682309329e+303, 2.40653430831987e-309, 3.82993010819242e-306, 0))
result <- do.call(diceR:::indicator_matrix,testlist)
str(result)
|
74c550fb857f4e9c5c2d0073a2ca604df26c592c
|
84af2a5d4cc82c218c6ea63ef6a3f8fdc299c1ac
|
/man/ESTAR_ECM.Rd
|
34aaaaf093d7f3858be68a9a1bb07f5ef60f9db7
|
[] |
no_license
|
cran/NonlinearTSA
|
c173ef17fad7165d3f13ad3034ae3b6de4520f4b
|
8fa55067e97fa0d4ceafe62999b03169c6dca109
|
refs/heads/master
| 2023-02-25T02:38:07.707991
| 2021-01-23T15:30:02
| 2021-01-23T15:30:02
| 270,956,992
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,074
|
rd
|
ESTAR_ECM.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ESTAR_ECM.R
\name{ESTAR_ECM}
\alias{ESTAR_ECM}
\title{STAR Vector Error Correction Model}
\usage{
ESTAR_ECM(y, x, lags)
}
\arguments{
\item{y}{series name,}
\item{x}{series name}
\item{lags}{lag length}
}
\value{
"Model" Estimated model
"AIC" Akaike information criteria
"BIC" Schwarz information criteria
}
\description{
This function allows you to estimate ESTAR Vector Error Correction Model
}
\details{
Exponential smooth transition error correction model as follows:
}
\examples{
x <- cumsum(rnorm(1000))
y <- cumsum(rnorm(1000))
ESTAR_ECM(x, y, lags = 6)
data(MarketPrices)
ESTAR_ECM(MarketPrices[,1],MarketPrices[,2],lags = 2)
}
\references{
Kapetanios, G., Shin, Y., & Snell, A. (2006). Testing for cointegration in nonlinear smooth transition error correction models. Econometric Theory, 22(2), 279-303.
Burak Guris, R Uygulamalı Dogrusal Olmayan Zaman Serileri Analizi, DER Yayinevi, 2020.
}
\keyword{STAR}
\keyword{correction}
\keyword{error}
\keyword{model}
\keyword{vector}
|
9702918c3bde42796051ac35e1e22916f8cee2be
|
28be8c22b0bd4d4e950f01118993de17d28c7e69
|
/survival_analysis.R
|
3881e83e3b74932bcd46bdb1721f0d76d8f19cfa
|
[] |
no_license
|
tyleredouglas/DouglasETAL2020_CurrentBio
|
1d757668c0c5f5175cd4b76c4ab3cb0507654666
|
0128a2ed0100889458e53dad75df7d02b7d38cab
|
refs/heads/master
| 2023-02-12T03:50:47.430080
| 2021-01-05T22:14:45
| 2021-01-05T22:14:45
| 277,947,959
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,459
|
r
|
survival_analysis.R
|
library(ggplot2)
library(tidyverse)
library(dplyr)
library(cowplot)
library(ggpubr)
library(nlme)
setwd('C:/Users/Tyler/Desktop/Toxic Flies Paper/data/Nicotine Survival/run2')
data <- read.csv('nicotine_survival2.csv', stringsAsFactors = FALSE)
str(data)
data$vial <- recode(data$vial, '1'='V1','2'='V2','3'='V3','4'='V4','5'='V5','6'='V6','7'='V7','8'='V8','9'='V9','10'='V10')
data$treatment <- recode(data$treatment, "0.0 ul/ml" = "0.0 mM", "0.2 ul/ml" = "1.25 mM", "0.4 ul/ml" = "2.50 mM")
#-------------------------------- percent survival analysis -------------------------------
survival_data <- data %>%
group_by(treatment, vial) %>%
summarise(fly_total = sum(adults)) %>%
select(treatment, vial, fly_total)
survival_data["percent_survival"] <- 0
survival_data$percent_survival[survival_data$treatment == '1.25 mM'] <- (survival_data$fly_total[survival_data$treatment == '1.25 mM']/30)*100
survival_data$percent_survival[survival_data$treatment == '2.50 mM'] <- (survival_data$fly_total[survival_data$treatment == '2.50 mM']/24)*100
survival_data$percent_survival[survival_data$treatment == '0.0 mM'] <- (survival_data$fly_total[survival_data$treatment == '0.0 mM']/30)*100
survival_summary <- survival_data %>%
group_by(treatment) %>%
summarise(avg_survival = mean(percent_survival), n = n(), stdev = sd(percent_survival), SE = stdev/(sqrt(n)))
#analysis
#data not normally distributed
shapiro.test(survival_data$percent_survival)
compare_means(percent_survival ~ treatment, survival_data, method = "kruskal.test")
compare_means(percent_survival ~ treatment, survival_data, method = "wilcox.test", paired = FALSE)
wilcox.test(survival_data$percent_survival[survival_data$treatment == "0.0 mM"], survival_data$percent_survival[survival_data$treatment == "1.25 mM"])
# boxplot of percent survival across treatments
ggplot(data = survival_data, mapping = aes(x = treatment, y = percent_survival, fill = treatment)) +
geom_boxplot(color = "black", alpha = .8) +
geom_jitter(aes(fill = treatment), color = 'black', shape = 21, size = 3.5, width = 0.2, alpha = .6) +
ylab("Percent Survival") +
xlab("Nicotine Concentration (mM)") +
ggtitle("A4 Survival to Adulthood") +
theme_pubr()+
theme(plot.title = element_text(hjust = 0.5, size = 12, face = "bold")) +
theme(legend.position = "none") +
scale_fill_manual(values = c("darkorchid2", "green2", "deepskyblue"))
|
067a9d45e2f1f289822894cdbe2dd20866140080
|
56efee66de74609ccff457427236520ac661ea5c
|
/inst/examples/ex-winequality/main_wine.r
|
4e4c857549c79493c58e0a0024588f87b2954d41
|
[] |
no_license
|
cran/TDMR
|
cc3e8b256624cd53eb350ce34e32ef34c92fe1ab
|
0554aaaa8c3f828a3e2e9c898deddf7cc9f4d609
|
refs/heads/master
| 2021-01-18T23:27:21.160115
| 2020-03-02T16:20:02
| 2020-03-02T16:20:02
| 17,693,854
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,986
|
r
|
main_wine.r
|
#
# Template for a data mining process (both with or w/o CV)
# (dataset: winequality, method: Random Forest)
#
# Example usage:
# opts = list(path=".", dir.data="data", filename="winequality-white-train.csv",READ.TrnFn=readTrnWine)
# opts <- setParams(opts,defaultOpts(),keepNotMatching=T)
# result <- main_wine(opts)
#
# see also: start_wine.r
#
main_wine <- function(opts=NULL,dset=NULL,tset=NULL) {
if (is.null(opts)) stop("Argument opts missing");
opts <- tdmOptsDefaultsSet(opts); # fill in all opts params which are not yet set (see tdmOptsDefaults.r)
gdObj <- tdmGraAndLogInitialize(opts); # init graphics and log file
#===============================================
# PART 1: READ DATA
#===============================================
if (is.null(dset)) {
cat1(opts,opts$filename,": Read data ...\n")
dset <- tdmReadDataset(opts);
}
# which variable is response variable:
response.variable <- "quality"
ID.variable <- "TST.COL"
# which variables are input variables (in this case all others):
input.variables <- setdiff(names(dset), c(response.variable,ID.variable))
#===============================================
# PART 2 - 6
#===============================================
result <- tdmClassifyLoop(dset,response.variable,input.variables,opts,tset);
# print summary output and attach certain columns (here: y,sd.y,dset) to list result:
result <- tdmClassifySummary(result,opts,dset);
tdmGraAndLogFinalize(opts,gdObj); # close graphics and log file
result;
}
readTrnWine <- function(opts) {
ds <- read.csv2(file=paste(opts$path,opts$dir.data, opts$filename, sep="/"), dec=".", sep=";", nrow=opts$READ.NROW,header=T);
for(i in 1:(ncol(ds)-1)){
ds[,i] <- as.numeric(ds[,i])
}
ds[,12] <- as.factor(ds[,12])
return(ds)
}
readTstWine <- function(opts) {
ds <- read.csv2(file=paste(opts$path,opts$dir.data, opts$filetest, sep="/"), dec=".", sep=";", nrow=opts$READ.NROW,header=T);
for(i in 1:(ncol(ds)-1)){
ds[,i] <- as.numeric(ds[,i])
}
ds[,12] <- as.factor(ds[,12])
return(ds)
}
# splitWine: just a helper function for startWine and finalWine which distributes the original
# wine quality data in 80% winequality-white-train.csv and 20% winequality-white-test.csv.
# Change the argument of set.seed, if you want another random distribution.
splitWine <- function(opts) {
ds <- read.csv2(file=paste(opts$dir.data, "winequality-white-orig.csv", sep=""), dec=".", sep=";", header=T);
set.seed(4)
smp = sample(1:nrow(ds),0.2*nrow(ds))
write.table(ds[ smp,],file=paste(opts$path,opts$dir.data, opts$filetest, sep="/"), dec=".", sep=";", col.names=T, row.names=F, quote=F)
write.table(ds[-smp,],file=paste(opts$path,opts$dir.data, opts$filename, sep="/"), dec=".", sep=";", col.names=T, row.names=F, quote=F)
}
|
bb4d8ca763026df68626af65d1cd246af6eb9945
|
2415130ab3879d50deac0397e6e5859af723912d
|
/archive/oldSIRS.R
|
7159a43021fc3b741fd8d8caa1abd8024cc91db2
|
[] |
no_license
|
NutchaW/datacode
|
64c04e621c0c4eb4ab46ca3de560912551505c99
|
a93e47be86380b147cea0a231553fad0157ab83d
|
refs/heads/master
| 2020-05-07T20:11:50.919700
| 2019-04-22T22:21:46
| 2019-04-22T22:21:46
| 180,848,384
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,540
|
r
|
oldSIRS.R
|
library(matrixStats)
library(abind)
SIRS <- function(x,ts,dt,tmstep,N,AH,seed,discrete) {
# x=[S,I,total incidence,R0max,R0min,L,D]
if (discrete==0) {
AH=c(AH,AH)
num_ens=dim(x)[2]
BT1=matrix(0,length(AH),num_ens)
BETA=matrix(0,length(AH),num_ens)
for (i in 1:num_ens) {
# b=log(x[4,i]-x[5,i])
if ((x[4,i]-x[5,i])>=0) {
b=log(x[4,i]-x[5,i])
}
else {
b=complex(real=log(-(x[4,i]-x[5,i])),imaginary = pi)
}
a=-180
BT1[,i]=exp(a*AH+b)+x[5,i]
BETA[,i]=BT1[,i]/x[7,i]
}
# Sr=array(0,c(3,num_ens,tmstep+1))
# Incidence=matrix(0,(tmstep+1),num_ens)
Sr=array(0,c(3,num_ens,abs(tmstep)+1))
Incidence=matrix(0,(abs(tmstep)+1),num_ens)
Sr[,,1]=x[1:3,]
L=x[6,]
D=x[7,]
# start integration
tcnt=0
for (t in seq(ts+dt,ts+tmstep,by = dt)) {
tcnt=tcnt+1
Eimmloss=dt*((N*rep(1,num_ens)-Sr[1,,tcnt]-Sr[2,,tcnt])/L)
tempind=Mod(dt*(BETA[t,]*Sr[1,,tcnt]*Sr[2,,tcnt]/N))>Mod(Sr[1,,tcnt])
Einf=dt*(BETA[t,]*Sr[1,,tcnt]*Sr[2,,tcnt]/N)
Einf[tempind]=Sr[1,,tcnt][tempind]
tempind2=Mod(dt*(Sr[2,,tcnt]/D))>Mod(Sr[2,,tcnt])
Erecov=dt*(Sr[2,,tcnt]/D)
Erecov[tempind2]=Sr[2,,tcnt][tempind2]
Eimmloss[Mod(Eimmloss)<0]=0
Einf[Mod(Einf)<0]=0
Erecov[Mod(Erecov)<0]=0
sk1=Eimmloss-Einf
ik1=Einf-Erecov
ik1i=Einf
Ts1=Sr[1,,tcnt]+round(sk1/2)
Ti1=Sr[2,,tcnt]+round(ik1/2)
Eimmloss=dt*((N*rep(1,num_ens)-Ts1-Ti1)/L)
tempind=Mod(dt*(BETA[t,]*Ts1*Ti1/N))>Mod(Ts1)
Einf=dt*(BETA[t,]*Ts1*Ti1/N)
Einf[tempind]=Ts1[tempind]
tempind2=Mod(dt*(Ti1/D))>Mod(Ti1)
Erecov=dt*(Ti1/D)
Erecov[tempind2]=Ti1[tempind2]
Eimmloss[Mod(Eimmloss)<0]=0
Einf[Mod(Einf)<0]=0
Erecov[Mod(Erecov)<0]=0
sk2=Eimmloss-Einf
ik2=Einf-Erecov
ik2i=Einf
Ts2=Sr[1,,tcnt]+round(sk2/2)
Ti2=Sr[2,,tcnt]+round(ik2/2)
Eimmloss=dt*((N*rep(1,num_ens)-Ts2-Ti2)/L)
tempind=Mod(dt*(BETA[t,]*Ts2*Ti2/N))>Mod(Ts2)
Einf=dt*(BETA[t,]*Ts2*Ti2/N)
Einf[tempind]=Ts2[tempind]
tempind2=Mod(dt*(Ti2/D))>Mod(Ti2)
Erecov=dt*(Ti2/D)
Erecov[tempind2]=Ti2[tempind2]
Eimmloss[Mod(Eimmloss)<0]=0
Einf[Mod(Einf)<0]=0
Erecov[Mod(Erecov)<0]=0
sk3=Eimmloss-Einf
ik3=Einf-Erecov
ik3i=Einf
Ts3=Sr[1,,tcnt]+round(sk3)
Ti3=Sr[2,,tcnt]+round(ik3)
Eimmloss=dt*((N*rep(1,num_ens)-Ts3-Ti3)/L)
tempind=Mod(dt*(BETA[t,]*Ts3*Ti3/N))>Mod(Ts3)
Einf=dt*(BETA[t,]*Ts3*Ti3/N)
Einf[tempind]=Ts3[tempind]
tempind2=Mod(dt*(Ti3/D))>Mod(Ti3)
Erecov=dt*(Ti3/D)
Erecov[tempind2]=Ti3[tempind2]
Eimmloss[Mod(Eimmloss)<0]=0
Einf[Mod(Einf)<0]=0
Erecov[Mod(Erecov)<0]=0
sk4=Eimmloss-Einf
ik4=Einf-Erecov
ik4i=Einf
Sr[1,,tcnt+1]=Sr[1,,tcnt]+round(sk1/6+sk2/3+sk3/3+sk4/6)-seed
Sr[2,,tcnt+1]=Sr[2,,tcnt]+round(ik1/6+ik2/3+ik3/3+ik4/6)+seed
Incidence[tcnt+1,]=round(ik1i/6+ik2i/3+ik3i/3+ik4i/6)+seed
}
x[1,]=Sr[1,,tcnt+1]
x[2,]=Sr[2,,tcnt+1]
x[3,]=colSums(Incidence)/(N/100000)
}
if (discrete==1) {
AH=c(AH,AH)
num_ens=dim(x)[2]
BT1=matrix(0, length(AH),num_ens)
BETA=matrix(0,length(AH),num_ens)
for (i in 1:num_ens) {
# b=log(x[4,i]-x[5,i])
if ((x[4,i]-x[5,i])>=0) {
b=log(x[4,i]-x[5,i])
}
else {
b=complex(real=log(-(x[4,i]-x[5,i])),imaginary = pi)
}
a=-180
BT1[,i]=exp(a*AH+b)+x[5,i]
BETA[,i]=BT1[,i]/x[7,i]
}
Sr=array(0,c(3,num_ens,abs(tmstep)+1))
Incidence=matrix(0,abs(tmstep)+1,num_ens)
Sr[,,1]=x[1:3,]
L=x[6,]
D=x[7,]
# start integration
tcnt=0
for (t in seq(ts+dt,ts+tmstep,by = dt)) {
tcnt=tcnt+1
Eimmloss=dt*((N*rep(1,num_ens)-Sr[1,,tcnt]-Sr[2,,tcnt])/L)
tempind=Mod(dt*(BETA[t,]*Sr[1,,tcnt]*Sr[2,,tcnt]/N))>Mod(Sr[1,,tcnt])
Einf=dt*(BETA[t,]*Sr[1,,tcnt]*Sr[2,,tcnt]/N)
Einf[tempind]=Sr[1,,tcnt][tempind]
tempind2=Mod(dt*(Sr[2,,tcnt]/D))>Mod(Sr[2,,tcnt])
Erecov=dt*(Sr[2,,tcnt]/D)
Erecov[tempind2]=Sr[2,,tcnt][tempind2]
Eimmloss[Mod(Eimmloss)<0]=0
Einf[Mod(Einf)<0]=0
Erecov[Mod(Erecov)<0]=0
l1=rbind(Eimmloss,Einf,Erecov)
l <- rpois(length(l1), l1)
dim(l) <- dim(l1)
sk1=l[1,]-l[2,]
ik1=l[2,]-l[3,]
ik1i=l[2,]
Ts1=Sr[1,,tcnt]+round(sk1/2)
Ti1=Sr[2,,tcnt]+round(ik1/2)
Eimmloss=dt*((N*rep(1,num_ens)-Ts1-Ti1)/L)
tempind=Mod(dt*(BETA[t,]*Ts1*Ti1/N))>Mod(Ts1)
Einf=dt*(BETA[t,]*Ts1*Ti1/N)
Einf[tempind]=Ts1[tempind]
tempind2=Mod(dt*(Ti1/D))>Mod(Ti1)
Erecov=dt*(Ti1/D)
Erecov[tempind2]=Ti1[tempind2]
Eimmloss[Mod(Eimmloss)<0]=0
Einf[Mod(Einf)<0]=0
Erecov[Mod(Erecov)<0]=0
l1=rbind(Eimmloss,Einf,Erecov)
l <- rpois(length(l1), l1)
dim(l) <- dim(l1)
sk2=l[1,]-l[2,]
ik2=l[2,]-l[3,]
ik2i=l[2,]
Ts2=Sr[1,,tcnt]+round(sk2/2)
Ti2=Sr[2,,tcnt]+round(ik2/2)
Eimmloss=dt*((N*rep(1,num_ens)-Ts2-Ti2)/L)
tempind=Mod(dt*(BETA[t,]*Ts2*Ti2/N))>Mod(Ts2)
Einf=dt*(BETA[t,]*Ts2*Ti2/N)
Einf[tempind]=Ts2[tempind]
tempind2=Mod(dt*(Ti2/D))>Mod(Ti2)
Erecov=dt*(Ti2/D)
Erecov[tempind2]=Ti2[tempind2]
Eimmloss[Mod(Eimmloss)<0]=0
Einf[Mod(Einf)<0]=0
Erecov[Mod(Erecov)<0]=0
l1=rbind(Eimmloss,Einf,Erecov)
l <- rpois(length(l1), l1)
dim(l) <- dim(l1)
sk3=l[1,]-l[2,]
ik3=l[2,]-l[3,]
ik3i=l[2,]
Ts3=Sr[1,,tcnt]+round(sk3)
Ti3=Sr[2,,tcnt]+round(ik3)
Eimmloss=dt*((N*rep(1,num_ens)-Ts3-Ti3)/L)
tempind=Mod(dt*(BETA[t,]*Ts3*Ti3/N))>Mod(Ts3)
Einf=dt*(BETA[t,]*Ts3*Ti3/N)
Einf[tempind]=Ts3[tempind]
tempind2=Mod(dt*(Ti3/D))>Mod(Ti3)
Erecov=dt*(Ti3/D)
Erecov[tempind2]=Ti3[tempind2]
Eimmloss[Mod(Eimmloss)<0]=0
Einf[Mod(Einf)<0]=0
Erecov[Mod(Erecov)<0]=0
l1=rbind(Eimmloss,Einf,Erecov)
l <- rpois(length(l1), l1)
dim(l) <- dim(l1)
sk4=l[1,]-l[2,]
ik4=l[2,]-l[3,]
ik4i=l[2,]
travel=rpois(1,seed)
Sr[1,,tcnt+1]=Sr[1,,tcnt]+round(sk1/6+sk2/3+sk3/3+sk4/6)-travel
Sr[2,,tcnt+1]=Sr[2,,tcnt]+round(ik1/6+ik2/3+ik3/3+ik4/6)+travel
Incidence[tcnt+1,]=round(ik1i/6+ik2i/3+ik3i/3+ik4i/6)+travel
}
x[1,]=Sr[1,,tcnt+1]
x[2,]=Sr[2,,tcnt+1]
x[3,]=colSums(Incidence)/(N/100000)
}
return (x)
}
|
b4b94575a27e203a21b6415dabe20a5f7afca4a4
|
8bab16b727707fef6c8cef6418d16ffe293338e7
|
/server.R
|
b8c66c1e29b06a9e884af961aec4dea4104da3af
|
[] |
no_license
|
priyanka20-columbia/SQL_projects
|
3d6b04cb09e38502ce8fafe88d2eda7edf48f8eb
|
4667b5ff1b08c31b6b60227a09e41e8987ea7228
|
refs/heads/main
| 2023-03-12T05:37:23.740472
| 2021-03-05T01:00:14
| 2021-03-05T01:00:14
| 344,652,175
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,254
|
r
|
server.R
|
server <- function(input, output, session) {
# background image----
output$image <- renderUI(
if (input$arts != '')
{
imageName = sub(" ", "", tolower(input$arts))
setBackgroundImage(src = paste(imageName, ".png", sep =""))
}
)
# small image----
output$artImg <- renderUI(
if (input$arts != '') {
imageName = sub(" ", "", tolower(input$arts))
div(img(src = paste(imageName, ".png", sep =""), width = "100%"))
}
)
# plays title----
output$PlaysTitle <- renderText(
if (input$arts != '') {
title = paste0("<h4>Plays of ", input$arts, " Songs by Month</h4>",sep = " ")
}
)
# songs title----
output$SongsTitle <- renderText(
if (input$arts != '') {
title = paste("<h4>Songs by", input$arts , "</h4>",sep = " ")
}
)
# reactive variables----
a <- reactiveValues(sel = NULL, nS = 0, aR = 0, nP = 0)
# render the summary metrics----
output$nSongs <- renderText(paste0('<h4>', a$nS, '</h4>'))
output$avgRat <- renderText(paste0('<h4>', a$aR, '</h4>'))
output$nPlays <- renderText(paste0('<h4>', a$nP, '</h4>'))
# chart of plays by month----
output$playChrt <- renderPlot(
{
if (input$arts != '') {
d <- dbGetQuery(
conn = con,
statement <- paste0('select Count(play_id)::numeric "Plays", Extract(month from dop)::numeric "Month" ',
'from plays ',
'where artist ilike \'%', input$arts, '%\' ',
'group by Extract(month from dop) ;')
)
ggplot(d, aes(x = Month, y = Plays )) +
theme_minimal() +
scale_x_discrete(limits=c( 3, 6, 9, 12)) +
theme(
axis.title.x = element_text(size=16),
axis.title.y = element_text(size=16),
axis.text.x = element_text(size = 14),
axis.text.y = element_text(size = 14)
) +
geom_point(size = 2.5, color="red") +
geom_smooth()
#
# p2 <- ggplot(data, aes(x=my_x, y=my_y)) +
# geom_point() +
# geom_smooth(method=lm , color="red", se=FALSE) +
# theme_ipsum()
}
}
)
# populate the reactive variable a when an artist is selected----
observeEvent(
eventExpr = input$arts,
{
session = session
inputId = 'arts'
# _data for the songs table----
a$sel <- dbGetQuery(
conn = con,
statement = paste0('Select dor "Date of Release", song_name "Song Name", ',
'case rating ',
'when 1 then \'*\' ',
'when 2 then \'* *\' ',
'when 3 then \'* * *\' ',
'when 4 then \'* * * *\' ',
'when 5 then \'* * * * *\' ',
'end as "Rating" ',
'from songs ',
'where artist ilike \'%', input$arts, '%\'; ')
)
# _data for number of songs metric----
if (input$arts == '') {
a$nS = 0
}else{
a$nS <- dbGetQuery(
conn = con,
statement = paste0('Select count(*)::numeric ',
'from songs ',
'where artist ilike \'%', input$arts, '%\'; ')
)
}
# _data for average rating metric----
if (input$arts == '') {
a$aR = 'NA'
}else{
a$aR <- dbGetQuery(
conn = con,
statement = paste0('Select ROUND(avg(rating),2) ',
'from songs ',
'where artist ilike \'%', input$arts, '%\'; ')
)
}
# _data for number of plays metric----
if (input$arts == '') {
a$nP = 0
}else{
a$nP <- dbGetQuery(
conn = con,
statement = paste0('Select count(*)::numeric ',
'from plays ',
'where artist ilike \'%', input$arts, '%\'; ')
)
}
}
)
# table of songs by selected artist----
output$songsA <- renderDataTable(
if (input$arts != '') {
data = a$sel
}, options = list(
pageLength = 10
)
)
}
|
346c9551383bc8032aabb0ab829020598531fb13
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/simone/examples/plot.simone.Rd.R
|
041b7dc947b1574a9af91b297745c26d37dc82e2
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 637
|
r
|
plot.simone.Rd.R
|
library(simone)
### Name: plot.simone
### Title: Graphical representation of SIMoNe outputs
### Aliases: plot.simone
### Keywords: hplot
### ** Examples
## data set and network generation
g <- rNetwork(p=50, pi=50)
data <- rTranscriptData(300,g)
attach(data)
## running simone
res <- simone(X, type="steady-state")
## plotting the results: just the ROC curve
plot(res, output=c("ROC"), ref.graph=g$A)
## plotting the results: just the path as a function of the penalty
plot(res, output=c("path.penalty"), main="I want to put my own title")
## plotting the results: everything possible (the default)
plot(res)
detach(data)
|
5936cdbebcbd4d4e7893ed22453561d0b8fee0ea
|
d8978ecd115f95d9e4f6d987d54c2cb6541a6bf4
|
/code/4_analyzeData/wrds/discountingBehaviorModules.R
|
2db7bedcf5e79171d48652aea25e6d5fe3e4e984
|
[] |
no_license
|
emallickhossain/WarehouseClubs
|
f0eaab1b645e13654de655c2f13e47aa72b02a42
|
7867171cdb3ca3fe32ec778dd8043d538ab1f6ef
|
refs/heads/master
| 2021-06-28T05:37:21.813087
| 2020-09-16T21:49:48
| 2020-09-16T21:49:48
| 149,994,881
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 13,608
|
r
|
discountingBehaviorModules.R
|
# Computes household propensity for different discounting behaviors
# Fact 1: About 40% of purchases are made without any discounts and another 45%
# are made with only 1 type of discount. Less than 15% of purchases combine 2 or
# more discount methods.
# Fact 2: These patterns are correlated with income with high income households
# more likely to use any kind of discounting as well as using multiple kinds of
# discounts. Separating out single discounts versus multiple discounts
# does not qualitatively change the results.
# Fact 3: Buying in bulk (defined as a purchase in the top 2 quintiles of the
# product module's size distribution) is the most common form of discounting
# and it is more common for storable products.
# Fact 4: Rich households are more likely to buy in bulk and this differences is
# primarily concentrated among storable products.
# Fact 5: Buying generic brands is decreasing in income.
# Fact 6: There are some changes in behavior over time (especially during the
# Great Recession), but even allowing for these trends by including a income * year
# regressor does not change the core relationship between bulk purchasing and income.
# Fact 7: After adjusting for household demographics, as income increases, both
# the median propensity to make bulk purchases increases as well as the overall
# tail of the distribution.
library(data.table)
library(ggplot2)
library(ggthemes)
library(ggridges)
library(lfe)
library(purrr)
library(stringr)
library(stargazer)
threads <- 8
# Getting trips and panel data for demographics and to deflate expenditures
trips <- fread("/scratch/upenn/hossaine/fullTrips.csv", nThread = threads,
select = c("trip_code_uc", "household_code", "panel_year", "purchase_date"))
trips[, c("yearMonth", "purchase_date") := .(substr(purchase_date, 1, 7), NULL)]
# Getting CPI to deflate values to Jan-2010 base
cpi <- fread("/scratch/upenn/hossaine/cpi.csv")
cpi[, "base" := cpi[date == "2010-01-01"]$value]
cpi[, "newIndex" := value / base * 100]
cpi[, c("base", "value") := NULL]
cpi[, "yearMonth" := .(substr(date, 1, 7))]
cpi[, "date" := NULL]
# Combining with CPI for price deflation
setkey(trips, yearMonth)
setkey(cpi, yearMonth)
fullData <- merge(trips, cpi, by = "yearMonth")[, "yearMonth" := NULL]
# Housekeeping
rm(trips, cpi)
setkey(fullData, trip_code_uc)
# Getting all purchases and coding them by discounting behavior
discBehaviorModule <- NULL
for (yr in 2004:2017) {
print(yr)
purch <- fread(paste0("/scratch/upenn/hossaine/fullPurch", yr, ".csv"),
nThread = threads,
select = c("trip_code_uc", "coupon_value",
"brand_code_uc", "product_module_code", "food",
"packagePrice", "quintile", "quantity"),
key = "trip_code_uc")
purch[, ':=' (coupon = as.integer(coupon_value > 0),
generic = as.integer(brand_code_uc == 536746),
bulk = as.integer(quintile >= 4),
totalExpenditure = packagePrice * quantity)]
purch[, c("coupon_value", "packagePrice", "quantity", "brand_code_uc") := NULL]
# Deflating expenditures
setkey(purch, trip_code_uc)
purch <- merge(purch, fullData, by = "trip_code_uc")[, "trip_code_uc" := NULL]
purch[, "realExp" := totalExpenditure / newIndex * 100]
purch[, c("totalExpenditure", "newIndex") := NULL]
# Coding combination discounts
purch[, "savingsTypes" := coupon + generic + bulk]
purch[, ':=' (none = (savingsTypes == 0),
one = (savingsTypes == 1),
two = (savingsTypes == 2),
three = (savingsTypes == 3))]
# Getting household propensities by product type
cols <- c("coupon", "generic", "bulk", "none", "one", "two", "three")
householdAvgModule <- purch[, lapply(.SD, weighted.mean, w = realExp),
.SDcols = cols,
by = .(household_code, panel_year,
product_module_code, food)]
# Combining
discBehaviorModule <- rbindlist(list(discBehaviorModule, householdAvgModule),
use.names = TRUE)
}
# Saving
fwrite(discBehaviorModule, "/scratch/upenn/hossaine/discBehaviorModule.csv",
nThread = threads)
# Adding demographics
discBehaviorModule <- fread("/scratch/upenn/hossaine/discBehaviorModule.csv",
nThread = threads)
panel <- fread("/scratch/upenn/hossaine/fullPanel.csv", nThread = threads,
select = c("panel_year", "household_code", "projection_factor",
"household_income", "age", "men", "women", "nChildren",
"dma_cd", "household_income_coarse", "married",
"college", "type_of_residence", "carShare"),
key = c("household_code", "panel_year"))
panel[, "household_income" := as.factor(household_income)]
panel[, "adults" := men + women]
discBehaviorModule <- merge(discBehaviorModule, panel,
by = c("household_code", "panel_year"))
# Running regressions
# If you add an interaction with panel year, you can test if there have been
# different trends over time, but nothing seemed significant or big enough
# to change the overall picture of increasing bulk with income.
runRegAll <- function(y, module) {
regForm <- as.formula(paste0(y, "~", "household_income + age + adults + ",
"nChildren + married + type_of_residence + carShare +",
"college | dma_cd + panel_year"))
# All products
regData <- discBehaviorModule[product_module_code == module]
regAll <- felm(data = regData, formula = regForm, weights = regData$projection_factor)
coefsAll <- as.data.table(summary(regAll)$coefficients, keep.rownames = TRUE)
confIntAll <- as.data.table(confint(regAll), keep.rownames = TRUE)
coefsAll <- merge(coefsAll, confIntAll, by = "rn")
coefsAll[, c("discount", "module") := .(y, module)]
return(coefsAll)
}
discounts <- c("bulk")
modules <- c(7260, 7734, 8444, 3625, 4100)
toRun <- expand.grid(y = discounts, module = modules)
finalCoefs <- rbindlist(map2(toRun$y, toRun$module, runRegAll), use.names = TRUE)
# Adding in module names
finalCoefs[module == 7260, "Product" := "Toilet Paper"]
finalCoefs[module == 7734, "Product" := "Paper Towels"]
finalCoefs[module == 8444, "Product" := "Diapers"]
finalCoefs[module == 7270, "Product" := "Tampons"]
finalCoefs[module == 3625, "Product" := "Milk"]
finalCoefs[module == 4100, "Product" := "Eggs"]
# Organizing graph data
graphData <- finalCoefs[grepl("household_income", rn)]
graphData[, "rn" := gsub("household_income", "", rn)]
graphData[, "rn" := factor(rn, levels = c(4, 6, 8, 10, 11, 13, 15, 16, 17, 18, 19, 21, 23, 26, 27),
labels = c(6.5, 9, 11, 13.5, 17.5, 22.5, 27.5, 32.5, 37.5, 42.5, 47.5, 55, 65, 85, 100),
ordered = TRUE)]
graphData[, "rn" := as.numeric(as.character(rn))]
setnames(graphData, c("rn", "beta", "se", "t", "p", "LCL", "UCL", "disc",
"module", "Product"))
# Housekeeping
graphData[, "food" := ifelse(Product %in% c("Milk", "Eggs"), "Food", "Non-Food")]
# Graphing
ggplot(data = graphData,
aes(x = rn, y = beta * 100, color = Product)) +
geom_errorbar(aes(ymin = 100 * LCL,
ymax = 100 * UCL), width = 0.2) +
geom_point(aes(shape = Product), size = 3) +
geom_vline(xintercept = 0) +
geom_hline(yintercept = 0) +
facet_wrap(vars(food), scales = "fixed") +
labs(x = "Household Income",
y = "Difference in Bulk Purchasing (Percentage Points)") +
theme_tufte() +
theme(axis.title = element_text(),
plot.caption = element_text(hjust = 0),
legend.position = "bottom") +
scale_shape_manual(values = c(7, 15:19)) +
scale_color_grey()
# scale_color_colorblind()
ggsave("./figures/savingsBehaviorModules.pdf", height = 4, width = 6)
# ggsave("./figures/savingsBehaviorModulesColor.pdf", height = 4, width = 6)
################################################################################
########### ROBUSTNESS #########################################################
################################################################################
#Redoing regressions slowly dropping in covariates and looking at coefficient changes
# 7260: Toilet Paper
getRobust <- function(modCode) {
reg1 <- felm(formula = bulk ~ household_income,
data = discBehaviorModule[product_module_code == modCode],
weights = discBehaviorModule[product_module_code == modCode]$projection_factor)
reg2 <- felm(formula = bulk ~ household_income + married,
data = discBehaviorModule[product_module_code == modCode],
weights = discBehaviorModule[product_module_code == modCode]$projection_factor)
reg3 <- felm(formula = bulk ~ household_income + married + age,
data = discBehaviorModule[product_module_code == modCode],
weights = discBehaviorModule[product_module_code == modCode]$projection_factor)
reg4 <- felm(formula = bulk ~ household_income + married + age + adults +
nChildren,
data = discBehaviorModule[product_module_code == modCode],
weights = discBehaviorModule[product_module_code == modCode]$projection_factor)
reg5 <- felm(formula = bulk ~ household_income + married + age + adults +
nChildren + type_of_residence,
data = discBehaviorModule[product_module_code == modCode],
weights = discBehaviorModule[product_module_code == modCode]$projection_factor)
reg6 <- felm(formula = bulk ~ household_income + married + age + adults +
nChildren + type_of_residence + carShare,
data = discBehaviorModule[product_module_code == modCode],
weights = discBehaviorModule[product_module_code == modCode]$projection_factor)
reg7 <- felm(formula = bulk ~ household_income + married + age + adults +
nChildren + type_of_residence + carShare + college,
data = discBehaviorModule[product_module_code == modCode],
weights = discBehaviorModule[product_module_code == modCode]$projection_factor)
reg8 <- felm(formula = bulk ~ household_income + married + age + adults +
nChildren + type_of_residence + carShare + college | dma_cd,
data = discBehaviorModule[product_module_code == modCode],
weights = discBehaviorModule[product_module_code == modCode]$projection_factor)
reg9 <- felm(formula = bulk ~ household_income + married + age + adults +
nChildren + type_of_residence + carShare + college | dma_cd + panel_year,
data = discBehaviorModule[product_module_code == modCode],
weights = discBehaviorModule[product_module_code == modCode]$projection_factor)
stargazer(reg1, reg2, reg3, reg4, reg5, reg6, reg7, reg8, reg9, type = "text",
add.lines = list(c("Market FE's", "N", "N", "N", "N", "N", "N", "N", "N", "Y", "Y"),
c("Year FE's", "N", "N", "N", "N", "N", "N", "N", "N", "N", "Y")),
single.row = FALSE, no.space = TRUE, omit.stat = c("ser", "rsq"),
out.header = FALSE,
dep.var.caption = "", dep.var.labels.include = FALSE,
omit = c("Constant"),
covariate.labels = c("8-10k", "10-12k", "12-15k", "15-20k", "20-25k",
"25-30k", "30-35k", "35-40k", "40-45k", "45-50k",
"50-60k", "60-70k", "70-100k", ">100k", "Married",
"Age", "Adults", "Children", "Single-Family Home",
"% Car Access", "College"),
notes.align = "l",
notes.append = TRUE,
digits = 3,
notes = c("Source: Author calulations from Nielsen Consumer Panel. Columns (7) and (8) ",
"cluster standard errors at the market level"),
label = paste0("tab:discountingBehavior", modCode),
title = "Correlation of Bulk Buying and Demographics (Toilet Paper)",
out = paste0("tables/AppendixDiscountingBehavior", modCode, ".tex"))
return(reg9)
}
regtp <- getRobust(7260)
regPaperTowel <- getRobust(7734)
regDiaper <- getRobust(8444)
regTamp <- getRobust(7270)
regMilk <- getRobust(3625)
regEggs <- getRobust(4100)
# Combining only the last regressio for each product
stargazer(regtp, regPaperTowel, regDiaper, regEggs, regMilk, type = "text",
add.lines = list(c("Market FE's", "Y", "Y", "Y", "Y", "Y"),
c("Year FE's", "Y", "Y", "Y", "Y", "Y")),
single.row = FALSE, no.space = TRUE, omit.stat = c("ser", "rsq"),
out.header = FALSE,
dep.var.caption = "", dep.var.labels.include = FALSE,
omit = c("Constant"),
column.labels = c("Toilet Paper", "Paper Towels", "Diapers", "Eggs", "Milk"),
covariate.labels = c("8-10k", "10-12k", "12-15k", "15-20k", "20-25k",
"25-30k", "30-35k", "35-40k", "40-45k", "45-50k",
"50-60k", "60-70k", "70-100k", ">100k", "Married",
"Age", "Adults", "Children", "Single-Family Home",
"% Car Access", "College"),
notes.align = "l",
notes.append = TRUE,
digits = 3,
out = "tables/AppendixDiscountingBehaviorMods.tex")
|
7d0d0f94fbc37ebc6568051ed7363f85a356f213
|
265a5fef2d4ddafce8e9cc3417b114a26faee9d2
|
/man/lines.Rd
|
9696e173ac43b3e1c0b0f2945496942bfd9b096e
|
[] |
no_license
|
cran/tutoR
|
8802d45239f6e4175417b280864be91a9e496b2c
|
fc31e8cb4d25a03d0f9a43ab0780d9f764115976
|
refs/heads/master
| 2021-01-01T15:50:42.410396
| 2006-11-24T00:00:00
| 2006-11-24T00:00:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,179
|
rd
|
lines.Rd
|
\name{lines}
\alias{lines}
\title{Add Connected Line Segments to a Plot - tutoR mask}
\description{
A generic function taking coordinates given in various ways and
joining the corresponding points with line segments.
}
\usage{
lines(x, y, \dots)
}
\arguments{
\item{x, y}{coordinate vectors of points to join.}
\item{\dots}{Further parameters are as follows.
\code{type}: character indicating the type of plotting; actually any of
the \code{type}s as in \code{\link{plot}}.
\code{col}: color to use. This can be vector of length greater than
one, but only the first value will be used for lines (but all will
be used for symbols).
\code{lty}: line type to use.
\code{\dots}: Further graphical parameters (see \code{\link{par}}) may
also be supplied as arguments, particularly, line width, \code{lwd}
and, for \code{type = "b"}, \code{pch}.
}
}
\details{
The tutoR mask for \code{lines} validates inputs received,
as with the tutoR plot() mask.
The coordinates can be passed to \code{lines} in a plotting structure
(a list with \code{x} and \code{y} components), a time series,
etc. See \code{\link{xy.coords}}.
The coordinates can contain \code{NA} values. If a point contains
\code{NA} in either its \code{x} or \code{y} value, it is omitted from
the plot, and lines are not drawn to or from such points. Thus
missing values can be used to achieve breaks in lines.
For \code{type = "h"}, \code{col} can be a vector and will be recycled
as needed.
\code{lwd} can be a vector: its first element will apply to lines but
the whole vector to symbols (recycled as necessary).
}
\references{
Becker, R. A., Chambers, J. M. and Wilks, A. R. (1988)
\emph{The New S Language}.
Wadsworth \& Brooks/Cole.
}
\seealso{
\code{\link{lines.default}},
\code{\link{points}}, particularly for \code{type \%in\% c("p","b","o")},
\code{\link{plot}},
and the underlying \dQuote{primitive} \code{\link{plot.xy}}.
\code{\link{par}} for how to specify colors.
}
\examples{
# draw a smooth line through a scatter plot
plot(cars, main="Stopping Distance versus Speed")
lines(lowess(cars))
}
\keyword{aplot}
|
a250d68584dc3c5373a853698bc16427806b6049
|
462e85f721cccf24af2f05cc4ed3ed5d4047487a
|
/R/teomporal.R
|
dc897394dd0600bf1245ba9babd9422f5f92e399
|
[] |
no_license
|
reggenlab/UniPath
|
e62941c60040feee4c0d70144ce4729874b5e0e5
|
93758460d680a5be49dc74321cdab0bb71f5095d
|
refs/heads/master
| 2021-07-04T08:03:14.700142
| 2020-11-24T17:04:24
| 2020-11-24T17:04:24
| 201,252,990
| 2
| 4
| null | null | null | null |
UTF-8
|
R
| false
| false
| 989
|
r
|
teomporal.R
|
#' Differential Pathways
#'
#' @param data Adjusted raw p-values matrix
#' @param group group of cell types among which differential pathway analysis needs to be performed.
#'
#' @return A list containing p-value based on wilcoxon rank sum test, fold change based on mean and median
#' @export
#'
#' @examples
#' temporaldif()
temporaldif <- function( data , group)
{
clases = unique(group) ;
data[is.na(data)] = 1 ;
data = -log2(data + 1E-20) ;
#data = log2(data+1)
wilk = matrix(1, nrow(data) , length(clases)) ;
FC = wilk ; FC1 = wilk ;
rownames(wilk) = rownames(data) ;
for( i in 1:length(clases) )
{
pos = which(group == clases[i]) ;
pos1 = which(group != clases[i]) ;
for (j in 1:length(data[,1]))
{
tri = wilcox.test( data[j, pos] , data[j, pos1] ) ;
wilk[j,i] = tri$p.value ;
FC[j,i] = mean(data[j,pos]) - mean(data[j, pos1]) ;
FC1[j,i] = median(data[j,pos]) - median(data[j, pos1]) ;
}
}
return( list(wilk=wilk, FC=FC, FC1 = FC1)) ;
}
|
d7a216394dbe3ce122369361bc53c91215017830
|
b0d0a890851d4f3f658e3c7d9fd8d14d0bb33f3e
|
/Supplementary Material/Replication Code/US-SCF/code/step1e_scf_iop_age_adj_imp5.R
|
266acd65125b968af424ad185fb1322dc8e8b4d0
|
[] |
no_license
|
Juannadie/oep-wealth-inequality-inh-fam-background
|
7eff1879c5c7c67f98296804c1f3253a8c43b60c
|
9e45ff2002e18c180a94d123448aec071f47aa5f
|
refs/heads/master
| 2023-06-30T14:59:29.829201
| 2021-08-06T22:58:55
| 2021-08-06T22:58:55
| 393,522,711
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,343
|
r
|
step1e_scf_iop_age_adj_imp5.R
|
#NOW WE PROCEED TO WORK WITH THE AGGREGATE DATABASE
setwd("/Users/mac/Google Drive/A-UK-Research/IO-Wealth-All-Countries/SCF-IOp/code") #Set Working Directory
setwd("/Users/Juan/Google Drive/A-UK-Research/IO-Wealth-All-Countries/SCF-IOp/code") #Set Working Directory for the LAPTOP
options ("scipen"=100, "digits"=10)
library(Hmisc)
library(reldist)
library(tidyverse)
library(sjPlot)
library(sjmisc)
library(sjlabelled)
library(snakecase)
library(survey)
library(quantreg)
#LET US LOAD THE DATA
dataush <- readRDS(file = "datasets/SCF-2016-all-after-1d-imp5.rds")
#THEN WE CONVERT THE WEALTH DATA INTO NET TERMS OF AGE AND GENDER
dataush$agedif <- dataush$age - 65
dataush$agedif2 <- (dataush$agedif)^2
dataush$agedif3 <- (dataush$agedif)^3
dataush$agedif4 <- (dataush$agedif)^4
dataush$femaledummy <- 0
dataush$femaledummy[dataush$sex == "2"] <- 1
#We convert sex to factor for the graph
dataush$sexfactor <- as.factor (dataush$sex)
levels(dataush$sexfactor) <- c("Male","Female")
#
dataush$femaleagedif <- dataush$agedif * dataush$femaledummy
dataush$femaleagedif2 <- (dataush$femaleagedif)^2
dataush$femaleagedif3 <- (dataush$femaleagedif)^3
dataush$femaleagedif4 <- (dataush$femaleagedif)^4
dataush$wealth <- dataush$eqwealth
NROW(dataush$wealth[dataush$wealth <= 0])
modelwealth <- lm(log(wealth) ~ agedif + agedif2 + agedif3 + agedif4 + femaledummy + femaleagedif + femaleagedif2 + femaleagedif3 + femaleagedif4, data = dataush, weights = weight)
#modelwealth <- lm((wealth) ~ agedif + agedif2 + agedif3 + agedif4 + femaledummy + femaleagedif + femaleagedif2 + femaleagedif3 + femaleagedif4, data = dataush, weights = weight)
#modelwealth <- rq((wealth) ~ agedif + agedif2 + agedif3 + agedif4 + femaledummy + femaleagedif + femaleagedif2 + femaleagedif3 + femaleagedif4,tau = .5, data = dataush, weights = weight)
#The model
#modelwealthpoisson <- svyglm(formula = (wealth) ~ agedif + agedif2 + agedif3 + agedif4 + femaledummy + femaleagedif + femaleagedif2 + femaleagedif3 + femaleagedif4, design = svydesign(ids = ~1, weights = dataush$weight, data = dataush), rescale = T,family = poisson)
#summodelwealthpoisson<- summary (svyinhpoisson3)
#modelwealthus <- tab_model(modelwealthpoisson, digits = 3, digits.p = 3, show.fstat = T, show.se = T, show.aic = T)
#modelwealthus
#dataush$wealthpredict <- modelwealth$coefficients[1] + modelwealth$resid
#modelwealth <- lm((wealth) ~ log(agedif^2) + femaledummy + log(femaleagedif^2), data = dataush, weights = weight)
summary(modelwealth)
modelwealthus <- tab_model(modelwealth, digits = 3, digits.p = 3, show.fstat = T, show.se = T, show.aic = T)
modelwealthus
#dataush$wealthpredict <- predict(modelwealth)
dataush$wealthpredict <- modelwealth$coefficients[1] + modelwealth$resid
dataush$wealthstandard <- dataush$wealth - dataush$wealthpredict
summary(dataush$wealthstandard)
summary(dataush$wealth)
summary(dataush$wealthpredict)
summary(modelwealth$residuals)
summary(modelwealth$resid)
summary(modelwealth$coefficients[1])
#dataush$wealthpredictexp <- (dataush$wealthpredict)
dataush$wealthpredictexp <- exp(dataush$wealthpredict)
summary(dataush$wealth)
summary(dataush$wealthpredictexp)
NROW(dataush$wealthpredictexp[dataush$wealthpredictexp <= 0])
saveRDS(dataush, file = "datasets/SCF-2016-all-after-1e-imp5.rds")
#####
|
0f61a53221c983e4ec8d2a72dbd77cc406134ba6
|
0099e89834cdf23e08db0883664776b28cc61881
|
/man/fround.Rd
|
572ad116fec25d08ff7f35fe32864d9a9dc0635d
|
[] |
no_license
|
suyusung/arm
|
97ede84539a6d9078b5529202bb2ee6c2f13404e
|
24bd6b3e1ebce5005b92c34a78f486908a436d37
|
refs/heads/master
| 2022-09-17T12:20:31.685153
| 2022-08-28T19:33:10
| 2022-08-28T19:33:10
| 66,146,872
| 21
| 5
| null | 2021-09-30T14:12:13
| 2016-08-20T12:58:19
|
R
|
UTF-8
|
R
| false
| false
| 850
|
rd
|
fround.Rd
|
\name{fround}
\alias{fround}
\alias{pfround}
\title{Formating the Rounding of Numbers}
\description{
\code{fround} rounds the values in its first argument to the specified
number of decimal places with surrounding quotes.
\code{pfround} rounds the values in its first argument to the specified
number of decimal places without surrounding quotes.
}
\usage{
fround(x, digits)
pfround(x, digits)
}
\arguments{
\item{x}{a numeric vector.}
\item{digits}{integer indicating the precision to be used.}
}
\author{Andrew Gelman \email{gelman@stat.columbia.edu};
Yu-Sung Su \email{suyusung@tsinghua.edu.cn}
}
\seealso{
\code{\link{round}}
}
\examples{
x <- rnorm(1)
fround(x, digits=2)
pfround(x, digits=2)
}
\keyword{manip}
\keyword{print}
|
8758c93cc437790d2ef11f7b790624eb69c58384
|
3ce712c44b6d1494fbc151eae4980f163cfa4a26
|
/stochastic_grad_desc.R
|
12225662d81560aca9c98dd69715d3a010a81e32
|
[] |
no_license
|
robin93/CDS
|
549d174bcd8a619161374a7804ece3c777e39211
|
43b1456705203f14f9749b2936d870ebdf80f10b
|
refs/heads/master
| 2021-01-01T15:18:50.307668
| 2015-10-09T10:12:55
| 2015-10-09T10:12:55
| 40,328,185
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 974
|
r
|
stochastic_grad_desc.R
|
training_data <- Boston
scaled_t_data_pre <- training_data
scaled_t_data <- as.matrix(cbind(Xo=1,scaled_t_data_pre))
para_vect_op <- scaled_t_data[,c("Xo","rm","medv")]
alpha <- 0.0001
k <- 1/nrow(para_vect_op)
coeff_matrix<- matrix(c(0,0,-1),nrow=1)
cost_1 <- k*sum((para_vect_op%*%t(coeff_matrix))^2)/2
cost_2 <- 0
# iterations_cost <- c(0,cost_1)
# loop <- 0
while (abs(cost_2-cost_1)>0.000001){
cost_1 <- cost_2
print(cost_2)
#print(coeff_matrix)
for (i in 1:nrow(para_vect_op)){
#print(i)
del_theta_1 <- para_vect_op[i,]%*%t(coeff_matrix)*para_vect_op[i,1]
del_theta_2 <- para_vect_op[i,]%*%t(coeff_matrix)*para_vect_op[i,2]
#print(del_theta_1,del_theta_2)
coeff_matrix[1,1] <- coeff_matrix[1,1] - alpha*del_theta_1
coeff_matrix[1,2] <- coeff_matrix[1,2] - alpha*del_theta_2
}
cost_2 <- k*sum((para_vect_op%*%t(coeff_matrix))^2)/2
# loop <- loop + 1
# iterations_cost <- cbind(iterations_cost,c(loop,cost_2))
}
coeff_matrix
|
716002b07f6a91ee00638b099c8e6824ee981bef
|
4d962148f95fc27c4eee71794d0d14d04eb3ee2b
|
/man/funchir-table.Rd
|
e6487be011f6363718a81848269b7368d87e8082
|
[] |
no_license
|
SarenT/funchir
|
e19c242d4dc9c57b50b221c31f917edc0cc98cb8
|
c56edb0fa36f8ec87a906b3d4610013f04be5d62
|
refs/heads/master
| 2022-04-22T23:16:50.515157
| 2020-04-11T08:21:29
| 2020-04-11T08:21:29
| 254,820,330
| 0
| 0
| null | 2020-04-11T08:00:31
| 2020-04-11T08:00:31
| null |
UTF-8
|
R
| false
| false
| 2,080
|
rd
|
funchir-table.Rd
|
\name{funchir-table}
\alias{table2}
\alias{sanitize2}
\title{ Convenient Wrappers for creating and printing tables }
\description{
Here are wrappers for common table creation/manipulation/printing operations.
}
\usage{
table2(..., dig = if (prop) 2L else NULL, prop = FALSE, ord = FALSE, pct = FALSE)
sanitize2(str)
}
\arguments{
\item{...}{ For both functions, the code to be passed to the wrapped function. For \code{table2}, can also include arguments to \code{prop.table}. }
\item{dig}{ Number of digits to be printed in the output. Defaults to 2 for proportion tables because output tends to be ugly if unrestricted. }
\item{prop}{ Print a proportional table? This will activate a call to \code{prop.table}. }
\item{ord}{ Should the table be ordered? Three inputs are understood: 1) \code{FALSE}; returns table in standard order, 2) \code{TRUE}; returns table in \emph{increasing} order, 3) \code{"dec"}; returns table in \emph{decreasing} order. }
\item{pct}{ Should table values be converted to a percentage (i.e., multiplied by 100)? Typically used with \code{prop = TRUE}; note that \code{round} is applied \emph{after} percentage conversion, so keep this in mind if using \code{dig} simultaneously. }
\item{str}{ \code{character} vector. }
}
\details{
\code{table2} is especially useful for two common usages of \code{table}--producing ordered frequency tables of a variable (to find most/least common realizations) and producing proportional two-way tables of two variables.
\code{sanitize2} is a replacement to the internal \code{sanitize} function used by default in \code{xtable}. Adds items for fixing left and right square brackets, which are (in the current--2017/03/03--version of \code{print.xtable} ) by default left alone, which can cause errors.
}
\seealso{
\code{\link{table}}, \code{\link{prop.table}}
}
\examples{
x <- sample(10, size = 100, replace = TRUE)
y <- sample(3, size = 100, replace = TRUE)
tbl <- table2(x, y, prop = TRUE, margin = 1)
table2(x, ord = "dec")
table2(y, ord = TRUE)
table2(y, dig = -1L)
}
|
78965ebaa55bd38d12be5417605f058a941d8348
|
1ec30f29c06012b25594aee83163c2eda82cd797
|
/R/importGamry.R
|
44b84612e4ac4fcdf08dfa635b4d5d7174fe9c43
|
[
"MIT"
] |
permissive
|
mikegerhardt/echem
|
c8afb85bba89d53369a4efb864a523e1b85e211c
|
0410ce141b0683acaeef18157ee98867b8777656
|
refs/heads/master
| 2021-01-10T06:39:45.454627
| 2016-01-29T20:13:34
| 2016-01-29T20:13:34
| 44,460,203
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,161
|
r
|
importGamry.R
|
# Electrochemistry package
# Written by Mike Gerhardt
# 10/17/2015
#' Imports a Gamry data file.
#'
#' \code{importGamry} imports a Gamry .dta file and returns a data frame with
#' each column corresponding to a column in the data file.
#'
#' This function needs to know which lines in the .dta file to import. The
#' default behavior is to count each line in the file and assume that the
#' number that appears the most times is where the data is stored in the file.
#' Sometimes this fails because Gamry adds extensive header documentation to
#' their files. The ncols parameter can be used to manually specify which lines
#' to import based on how many columns each line has.
#'
#' @param inputfile File to be imported.
#' @param ncols Number of expected data columns. The default of 0 has the
#' function make an educated guess about how many columns there are.
#'
#' @return A data frame with all the data in the .dta file.
#' @author Mike Gerhardt
#' @export
importGamry <- function(inputfile = file.choose(), ncols = 0){
alldata <- readLines(inputfile)
fields <- sapply(strsplit(alldata, split = "\t"), length)
if(ncols == 0){
fields.to.remove <- grep(which.max(tabulate(fields)),
fields, invert = TRUE)
} else {
fields.to.remove <- grep(ncols, fields, invert = TRUE)
}
usable.data <- alldata[-fields.to.remove] # this line removes all the random header fields etc
# name.repeats finds all the times Gamry repeats the names of the columns
# This happens when doing multiple CV cycles, and it screws up read.table
# because read.table then imports everything as factors (bad)
if(usable.data[1] %in% usable.data[-1]){
# if the first line of usable.data is repeated elsewhere in usable.data
# then it must be removed prior to read.table
name.repeats <- grep(usable.data[1], usable.data)[-1]
importable.data <- usable.data[-name.repeats]
} else {
# if the first line of usable.data doesn't appear again, don't do anything
importable.data <- usable.data
}
outdf <- read.table(textConnection(importable.data), header = TRUE)
return(outdf)
}
|
9cca31f989e08bb2ab5288a810f5584babf84d84
|
d683efb2c91b6e03840f273c1c885e4df08282ee
|
/R/rq_ker.R
|
0c9a076c231a17aefab77229bf5c5f4f71ad7345
|
[] |
no_license
|
chenghanyustats/DGP-MCEM
|
065cdc0ca9e0d21e69533e2083e695befb493e1d
|
e48a12be66f0633c2564a075e5e1c718d89205d4
|
refs/heads/main
| 2023-07-18T13:04:03.777061
| 2021-09-04T19:00:34
| 2021-09-04T19:00:34
| 357,803,379
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 149
|
r
|
rq_ker.R
|
# Rational quadratic kernel
rq_ker <- function(x, y, alpha = 1, tau = 1, h = 1) {
tau ^ 2 * (1 + (x - y) ^ 2 / (2 * alpha * h ^ 2)) ^ (-alpha)
}
|
55c7c1eddbfce75dd6b81c5db868ba0d19ccda67
|
0f50fb0ed2839c3b8476e4e2831fa9abcfd4d15d
|
/3.7.4 effect_of_continued_practice_FU3.R
|
a1877ece93e002a8f1c16e4b78ca30a8a5148659
|
[] |
no_license
|
sjunker01/MPhil-Thesis-R-Scripts
|
46b00dba97c96b40a04e1c14ca987b26adbadcde
|
84470b99c36522fb055eafb412eab602a6fe2f2c
|
refs/heads/master
| 2020-06-24T05:32:58.091460
| 2019-08-06T14:42:51
| 2019-08-06T14:42:51
| 198,863,825
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,256
|
r
|
3.7.4 effect_of_continued_practice_FU3.R
|
library(MASS)
library(tidyverse)
library(lme4)
library(lmerTest)
library(olsrr)
library(fmsb)
library(yhat)
# Create function to round numeric values in a heterogenic data frame to x digits
round_df <- function(x, digits) {
# round all numeric variables
# x: data frame
# digits: number of digits to round
numeric_columns <- sapply(x, mode) == 'numeric'
x[numeric_columns] <- round(x[numeric_columns], digits)
x
}
### Create color palette
cols <- c("#e47e32", "#ff9e1f", "#ae9764", "#719f8d", "#509094", "#d2c078")
col1 <- cols[1]
col2 <- cols[2]
col3 <- cols[3]
col4 <- cols[4]
col5 <- cols[5]
col6 <- cols[6]
# Read in file
wide <- read_csv("data_input(4)/mindful_clean.csv")
# Create data frame which contains only people who have data for all the tested variables
wide.reduced.c <- wide %>% filter(!is.na(core_fu3) & !is.na(formal_h_after_fu3) &
!is.na(informal_after_fu3)) %>%
mutate(row = row_number()) # Has 32 observations...
## Model
lm.core.1 <- lm(core_fu3 ~ formal_h_after_fu3 + informal_after_fu3 +
sessions_attended, data = wide.reduced.c)
# Check this model for normality
par(mfrow = c(2,2))
plot(lm.core.1)
par(mfrow = c(1,1))
hist(lm.core.1$residuals)
ols_test_normality(lm.core.1) # Ok
# Compute the cook's distance for each point
cooksd <- cooks.distance(lm.core.1)
# Plot that.
plot(cooksd, pch="*", cex=2, main="Influential Obs by Cooks distance") # plot cook's distance
abline(h = 4*mean(cooksd, na.rm=T), col="red") # add cutoff line
text(x=1:length(cooksd)+2, y=cooksd, labels=ifelse(cooksd>4*mean(cooksd, na.rm=T),names(cooksd),""), col="red") # add labels
# Let's remove row 11
wide.out2 <- wide.reduced.c %>% filter(row != 11) %>% mutate(row = row_number())
# Next model
lm.core.2 <- lm(core_fu3 ~ formal_h_after_fu3 + informal_after_fu3 +
sessions_attended, data = wide.out2)
# Cooks distance
cooksd <- cooks.distance(lm.core.2)
plot(cooksd, pch="*", cex=2, main="Influential Obs by Cooks distance") # plot cook's distance
abline(h = 4*mean(cooksd, na.rm=T), col="red") # add cutoff line
text(x=1:length(cooksd)+2, y=cooksd, labels=ifelse(cooksd>4*mean(cooksd, na.rm=T),names(cooksd),""), col="red") # add labels
# Let's leave it
summary(lm.core.1)
summary(lm.core.2)
# Similar
##### WEMWBS
# Create dataset
wide.reduced.w <- wide %>% filter(!is.na(wb_fu3) & !is.na(formal_h_after_fu3) &
!is.na(informal_after_fu3)) %>%
mutate(row = row_number()) # Has 32 observations...
## Model with square root transformation of dependent variable.
lm.wemwbs.1 <- lm(wb_fu3 ~ formal_h_after_fu3 + informal_after_fu3 +
sessions_attended, data = wide.reduced.w)
# Check this model for normality
par(mfrow = c(2,2))
plot(lm.wemwbs.1)
par(mfrow = c(1,1))
hist(lm.wemwbs.1$residuals)
ols_test_normality(lm.wemwbs.1) # Ok
# Compute the cook's distance for each point
cooksd <- cooks.distance(lm.wemwbs.1)
# Plot that.
plot(cooksd, pch="*", cex=2, main="Influential Obs by Cooks distance") # plot cook's distance
abline(h = 4*mean(cooksd, na.rm=T), col="red") # add cutoff line
text(x=1:length(cooksd)+2, y=cooksd, labels=ifelse(cooksd>4*mean(cooksd, na.rm=T),names(cooksd),""), col="red") # add labels
# Let's remove row 27
wide.out2 <- wide.reduced.w %>% filter(row != 27) %>% mutate(row = row_number())
# Next model
lm.wemwbs.2 <- lm(wb_fu3 ~ formal_h_after_fu3 + informal_after_fu3 +
sessions_attended, data = wide.out2)
# Cooks distance
cooksd <- cooks.distance(lm.wemwbs.2)
plot(cooksd, pch="*", cex=2, main="Influential Obs by Cooks distance") # plot cook's distance
abline(h = 4*mean(cooksd, na.rm=T), col="red") # add cutoff line
text(x=1:length(cooksd)+2, y=cooksd, labels=ifelse(cooksd>4*mean(cooksd, na.rm=T),names(cooksd),""), col="red") # add labels
# Remove 28
wide.out3 <- wide.out2 %>% filter(row != 28) %>% mutate(row = row_number())
# Next model
lm.wemwbs.3 <- lm(wb_fu3 ~ formal_h_after_fu3 + informal_after_fu3 +
sessions_attended, data = wide.out3)
# Cooks distance
cooksd <- cooks.distance(lm.wemwbs.3)
plot(cooksd, pch="*", cex=2, main="Influential Obs by Cooks distance") # plot cook's distance
abline(h = 4*mean(cooksd, na.rm=T), col="red") # add cutoff line
text(x=1:length(cooksd)+2, y=cooksd, labels=ifelse(cooksd>4*mean(cooksd, na.rm=T),names(cooksd),""), col="red") # add labels
# Ok
summary(lm.wemwbs.1)
summary(lm.wemwbs.2)
summary(lm.wemwbs.3)
# Similar
### Extract values
## CORE
stargazer(cbind(Estimate = coef(lm.core.1), Std.Error = coef(summary(lm.core.1))[,2],
z.value = coef(summary(lm.core.1))[,3], confint(lm.core.1),
p_value = coef(summary(lm.core.1))[,4]), type = "text", style = "qje", digits = 3)
## WEMWBS
stargazer(cbind(Estimate = coef(lm.wemwbs.1), Std.Error = coef(summary(lm.wemwbs.1))[,2],
z.value = coef(summary(lm.wemwbs.1))[,3], confint(lm.wemwbs.1),
p_value = coef(summary(lm.wemwbs.1))[,4]), type = "text", style = "qje", digits = 3)
|
ae1ca851ba3fa484761ce716c8ee487abb38f540
|
b3c3936e23eec89b0f3a9c5b6f7b3219f5fa7916
|
/workflow/scripts/expansion.R
|
e7c3c8a68d87e6c12d45d593a279dee8529b961c
|
[
"MIT"
] |
permissive
|
tgstoecker/A2TEA.Workflow
|
8a788d7aa5649f1c95306e8250451cffc0cdcb12
|
d3a1d6447e8631ddae4c339988dc8fc2d34895e8
|
refs/heads/master
| 2023-04-29T04:24:48.008321
| 2023-04-18T11:50:33
| 2023-04-18T11:50:33
| 526,283,448
| 1
| 1
|
MIT
| 2023-03-12T17:23:37
| 2022-08-18T16:10:19
|
Python
|
UTF-8
|
R
| false
| false
| 30,567
|
r
|
expansion.R
|
#logging
log <- file(snakemake@log[[1]], open="wt")
sink(log, append=TRUE)
sink(log, append=TRUE, type="message")
# Restore output to console
#sink()
#sink(type="message")
#backup R-based installation if conda didn't work or wasn't used
#we check if packages are installed first
#fix for: libicui18n.so.68: cannot open shared object file
#reinstall of stringi connects libraries correctly
#install.packages("stringi", repos="http://cran.us.r-project.org")
# list of cran packages
#cran_packages = c("readr", "plyr", "dplyr", "stringr", "tidyr", "tibble", "reshape2", "foreach", "doParallel")
# load or install&load all
#package.check <- lapply(
# cran_packages,
# FUN = function(x) {
# if (!require(x, character.only = TRUE)) {
# install.packages(x, dependencies = TRUE, repos = "http://cran.us.r-project.org")
# library(x, character.only = TRUE)
# }
# }
#)
#load libraries
library(readr)
library(plyr)
library(dplyr)
library(stringr)
library(tidyr)
library(tibble)
library(reshape2)
library(foreach)
library(doParallel)
message("Acquiring hypothesis variables:")
num = snakemake@params[["num"]]
name = snakemake@params[["name"]]
#threads info for parallel FORK cluster
threads = as.numeric(snakemake@threads)
# define the number of additional best blast hits to include in the follow-up analyses
add_OGs = snakemake@params[["add_OGs"]]
# define the minimum expansion factor & expansion difference to call an expansion (part of hypothesis.tsv)
# + minimum number of genes of expanded species to even consider an OG
expansion_factor = as.numeric(snakemake@params[["expansion_factor"]])
expansion_difference = as.numeric(snakemake@params[["expansion_difference"]])
expanded_genes_min = as.numeric(snakemake@params[["expanded_genes_min"]])
# get user choice whether to perform ploidy normalization or not
ploidy_normalization = as.character(snakemake@params[["ploidy_normalization"]])
# get ploidy information of each species (supplied by user; part of species.tsv)
species_table <- read.delim("config/species.tsv", header = TRUE, sep = "\t", row.names = "species")
# using snakemake propagation + python strsplit() is really cool since the input is just a vector
## even if we have multiple species in expanded, compared or both ;D
expanded_in = snakemake@params[["expansion"]]
compared_to = snakemake@params[["comparison"]]
c_t_species <- compared_to
all_species <- unique(c(expanded_in, compared_to))
# defining:
# at least Nmin_expanded_in expanded species that are expanded in at least Nmin_compared_to compared_to species
Nmin_expanded_in <- as.numeric(snakemake@params[["Nmin_expanded_in"]])
Nmin_compared_to <- as.numeric(snakemake@params[["Nmin_compared_to"]])
# define whether or not only HOGs should be considered that have at least 1 gene from each expanded_in/compared_to species
expanded_in_all_found <- as.character(snakemake@params[["expanded_in_all_found"]])
compared_to_all_found <- as.character(snakemake@params[["compared_to_all_found"]])
#read-in Orthogroup-GeneCounts-table
message("Reading in Orthogroup-GeneCounts-table:")
OG.GC <- readr::read_tsv("orthofinder/final-results/Orthogroups/Orthogroups.GeneCount.tsv")
#concatenate all BLAST seaches to one file and create parseable tibble with read_delim
#could add that only BLAST searches of our species in this hypothesis are used - toDO for later
#easiest and efficient way is to bind outside of the loop
#do.call is a nice way rbind(bind_rows[[1]], bind_rows[[2]], ...)
message("Concatenating all BLAST seaches to one file and creating parseable tibble:")
datalist = list()
for (i in Sys.glob("orthofinder/search/*.txt")) {
datalist[[i]] <- readr::read_delim(file = i,
delim = "\t",
col_names = c(
"qseqid",
"sseqid",
"pident",
"length",
"mismatch",
"gapopen",
"qstart",
"qend",
"sstart",
"send",
"evalue",
"bitscore"),
col_types = c(
qseqid = col_character(),
sseqid = col_character(),
pident = col_double(),
length = col_double(),
mismatch = col_double(),
gapopen = col_double(),
qstart = col_double(),
qend = col_double(),
sstart = col_double(),
send = col_double(),
evalue = col_double(),
bitscore = col_double()
)
)
}
all_BLAST <- base::do.call(bind_rows, datalist)
#read-in the conversion table: orthofinder sequence IDs and corresponding actual gene/protein names
#with "str_remove_all" I also easily get rid of the ":"
message("Reading in conversion table based on SequenceIDs.txt:")
conversion <- readr::read_delim(file = "orthofinder/SequenceIDs.txt",
delim = " ",
col_names = c("seqid", "name"),
col_types = c(
seqid = col_character(),
name = col_character())
)
conversion <- conversion %>% dplyr::mutate(seqid = str_remove_all(seqid, ":"))
message("Creating reformatted output with actual gene/protein names:")
#create new columns with the actual gene/protein names
all_BLAST_reformatted <- all_BLAST %>% dplyr::left_join(conversion, by = c("qseqid" = "seqid"))
all_BLAST_reformatted <- all_BLAST_reformatted %>% dplyr::left_join(conversion, by = c("sseqid" = "seqid"))
#position after qseqid and sseqid respectively
all_BLAST_reformatted <- all_BLAST_reformatted %>% dplyr::relocate(name.x, .after = qseqid)
all_BLAST_reformatted <- all_BLAST_reformatted %>% dplyr::relocate(name.y, .after = sseqid)
#rename to qseqid/sseqid_name
all_BLAST_reformatted <- dplyr::rename(all_BLAST_reformatted, qseqid_name = name.x)
all_BLAST_reformatted <- dplyr::rename(all_BLAST_reformatted, sseqid_name = name.y)
message("Data read-in and reformat:")
ph_orthogroups <- readr::read_delim(file = "orthofinder/final-results/Phylogenetic_Hierarchical_Orthogroups/N0.tsv",
delim = "\t")
#create dataframe with numbers of genes per PHOG
#we combine expanded_in and compared_to vectors to easily compute for everything we need
HOG_df <- setNames(base::data.frame(matrix(ncol = 1 + length(all_species), nrow = 0)),
c("HOG", all_species))
for (i in 1:nrow(ph_orthogroups)) {
row = 0
#print(ph_orthogroups[i,]$HOG)
row <- c(ph_orthogroups[i,]$HOG)
for (j in c(all_species)) {
if (is.na(ph_orthogroups[i,][[j]]))
{
test = 0
row <- c(row, test)
}
else
{
test = length(unlist(strsplit(ph_orthogroups[i,][[j]], ",")))
row <- c(row, test)
}
}
HOG_df[i,] <- row
}
HOG_tibble <- as_tibble(HOG_df)
for (k in 1:length(all_species)) {
o = all_species[k]
HOG_tibble[[o]] <- as.numeric(HOG_tibble[[o]])
}
#create "copy" as HOG_tibble is currently modified in the upcoming step
HOG_tibble_complete <- HOG_tibble
#here, we apply the expansion rule/s - could be also be parsed from config? (toDO!)
#get() function solves my problems of using the character functions inside dplyr
message("Applying expansion rule/s per hypothesis:")
# function has to reduce input dataset; this way each iteration (compared species) is included
# for all compared species first check for each HOG if expanded species is >= #*
# then keep rows in which expanded species has at least # (default: 2)
for (e in expanded_in) {
# create expansion counter column for each expanded species
exp_species_name = paste0("exp_counter_", e)
HOG_tibble <- HOG_tibble %>% tibble::add_column(!!(exp_species_name) := 0)
if (ploidy_normalization == "YES") {
for (c in c_t_species) {
HOG_tibble <- HOG_tibble %>%
mutate(!!(exp_species_name) :=
dplyr::case_when(
# although cases in which the ct species has no genes are ignored via
# the multiplication in the 2nd step, this underlines that we do so
# now with the second expansion distance criterium we should have it anyway
get(c) > 0 &
get(e)/species_table[e, "ploidy"]*2 >= expansion_factor*(get(c))/species_table[c, "ploidy"]*2 ~ get(exp_species_name) + 1,
get(c) > 0 &
get(e)/species_table[e, "ploidy"]*2 >= expansion_difference + (get(c))/species_table[c, "ploidy"]*2 ~ get(exp_species_name) + 1,
TRUE ~ 0,
)
)
}
}
#if the user did not set ploidy_normalization to "YES"
#here, no ploidy information is used to divide the number of genes per OG
else {
for (c in c_t_species) {
HOG_tibble <- HOG_tibble %>%
mutate(!!(exp_species_name) :=
dplyr::case_when(
get(c) > 0 &
get(e) >= expansion_factor*(get(c)) ~ get(exp_species_name) + 1,
get(c) > 0 &
get(e) >= expansion_difference + get(c) ~ get(exp_species_name) + 1,
TRUE ~ 0,
)
)
}
}
}
# then we perform summing over all exp_counter_ columns based on user choices
# at least Nmin_expanded_in expanded species that are expanded in at least Nmin_compared_to compared_to species
# create new column
# for each exp_counter_* species check if value >= y
# all passing exp_counter_* species are counted; if sum >= x retain HOG
HOG_tibble <- HOG_tibble %>%
mutate(pass =
# selecting columns that we want to use use for subsequent function
select(., contains("exp_counter_")) %>%
# use pmap on this subset to get a vector of min from each row
# dataframe is a list so pmap works on each element of the list; here, each row
# we sum the occurences (per row!) of exp_counter_* cols that greater/equal to user cutoff
purrr::pmap_dbl(., ~ sum(c(...) >= Nmin_compared_to))
)
# lastly, retain rows/HOGs that pass x cutoff = number of expanded species
# & drop unnecessary columns
HOG_tibble <- HOG_tibble %>%
filter(pass >= Nmin_expanded_in) %>%
select(-contains("exp_counter_"), -pass)
# additional optional filter1 - expanded_in gene family complete criterium
if (expanded_in_all_found == "YES") {
HOG_tibble <- HOG_tibble %>%
# create column with per-row sum of expanded_in species that have at least 1 gene in the HOG
mutate(expanded_in_pass =
select(., contains(expanded_in)) %>%
purrr::pmap_dbl(., ~ sum(c(...) >= 1))
) %>%
# only keep rows/HOGs in which at least 1 gene of each expanded_in species occurs
filter(expanded_in_pass >= length(expanded_in)) %>%
# remove expanded_in_pass column
select(-expanded_in_pass)
}
# additional optional filter2 - compared_to gene family complete criterium
if (compared_to_all_found == "YES") {
HOG_tibble <- HOG_tibble %>%
# create column with per-row sum of compared_to species that have at least 1 gene in the HOG
mutate(compared_to_pass =
select(., contains(compared_to)) %>%
purrr::pmap_dbl(., ~ sum(c(...) >= 1))
) %>%
# only keep rows/HOGs in which at least 1 gene of each compared_to species occurs
filter(compared_to_pass >= length(compared_to)) %>%
# remove compared_to_pass column
select(-compared_to_pass)
}
# optional hard filter for at least # genes in all expanded species
# this is useful in difficult ploidy cases and solves downstream issues in small OGs
HOG_tibble <- HOG_tibble %>%
filter(if_all(contains(expanded_in), ~ . >= expanded_genes_min))
# new object: expanded_HOGs
expanded_HOGs <- HOG_tibble
# based on filtering criteria create per hypothesis table with:
# all HOGs and gene counts + additional column expansion (yes/no)
# this is later used in the creation of final tea outputs to create a HOG level table per hypothesis
#need to check if there is at least one expanded (H)OG
#if not replace_na woud throw error since we are changing types
#all of the downstream stuff in this script only makes sense if we in fact do have expanded groups
if (nrow(expanded_HOGs) > 0) {
#create expansion vector with length of expandsion HOGs
expansion <- replicate(nrow(expanded_HOGs), "yes")
#print(expansion)
expansion_tibble <- full_join(HOG_tibble_complete, tibble::add_column(expanded_HOGs, expansion, .after = "HOG"),
by = c("HOG"), suffix = c("", ".remove")) %>%
tidyr::replace_na(list(expansion = "no")) %>%
select(-c(ends_with(".remove")))
dir.create(paste("tea/", num, "/expansion_tibble/", sep = ""))
saveRDS(expansion_tibble, paste("tea/", num, "/expansion_tibble/expansion_tibble.rds", sep = ""))
# create genes column in ph_orthogroups file
# row merge? - no unite function, really handy ;D
ref_ph_orthogroups <- ph_orthogroups %>% tidyr::unite("genes", all_of(all_species), sep =", ", na.rm = TRUE, remove = TRUE)
message("Creating .txt files for all expanded OGs with reciprocal best BLAST hits of species in respective hypothesis:")
#for each gene/protein name in an interesting OG do (is there a file just with names per OG?):
#check "all_BLAST_reformatted" for all entries including these names and create new dataframe/tibble
# then, perform filtering and retain certain set of genes/proteins per OG analysis
# then, create .txt file per OG with these gene names
## come-up with filter criteria to have better trees?
## I could of course just keep everything and save the evalues, etc.; well, problem for later.. ;D
####> output for snakemake? what about inidividual OG txt files, because starting here parallelisation can really impact
dir.create(paste("tea/", num, "/expansion_cp_target_OGs/", sep = ""))
## define custom class for extended blast hits
# need a list object to hold all data of this class
extended_BLAST_hits <- list()
# class for extended BLAST hits info
setClass("extended_BLAST_hits",
slots=list(blast_table="tbl_df")
)
for (i in expanded_HOGs$HOG) {
exp_og_genes <- unlist(strsplit(ref_ph_orthogroups[ref_ph_orthogroups$HOG == i,]$genes, split = ", "))
BLAST_hits_exp_og_genes <- dplyr::filter(all_BLAST_reformatted,
qseqid_name %in% exp_og_genes | sseqid_name %in% exp_og_genes)
sorted_BLAST_hits_exp_og_genes <- arrange(BLAST_hits_exp_og_genes, evalue, -bitscore, -pident)
# get gene name of last gene to be added based on number of add_blast_hits
all_blast_genes <- na.omit(
unique(
c(
rbind(
sorted_BLAST_hits_exp_og_genes$qseqid_name,
sorted_BLAST_hits_exp_og_genes$sseqid_name
)
)
)
)
# set of all extended blast hits (based on threshold) - vector of gene names (ordered!)
# also nice: don't need a conditional since `%>% head(n = add_blast_hits)` will work,
# even if add_blast_hits param is > setdiff(all_blast_genes, exp_og_genes)
extended_blast_hits_genes <- setdiff(all_blast_genes, exp_og_genes) %>% head(n = add_OGs)
# non redundant set of gene names of HOG + n additional blast hits as defined in the user threshold
HOG_and_ext_blast_hits_genes <- c(exp_og_genes, extended_blast_hits_genes)
#create subset of sorted_BLAST_hits_exp_og_genes table in which only:
# exp_og_genes & extended_blast_hits_genes are allowed to appear
# this way we have cutoff for the nth best blast hit/gene but also keep all secondary hits
HOG_and_ext_blast_hits_table <- sorted_BLAST_hits_exp_og_genes %>%
filter(qseqid_name %in% HOG_and_ext_blast_hits_genes) %>%
filter(sseqid_name %in% HOG_and_ext_blast_hits_genes)
#tt write_lines("",
#tt paste("tea/", num, "/expansion_cp_target_OGs/", i, ".txt", sep = "")
#tt )
# for each exp. HOG create an extended_BLAST_hits S4 object and collect as part of list
ext_B_hits <- new("extended_BLAST_hits",
blast_table=HOG_and_ext_blast_hits_table
)
# assign name based on name of the underlying expanded HOG
ext_B_hits <- list(ext_B_hits)
names(ext_B_hits) <- paste0(i)
# append to list object
extended_BLAST_hits <- c(extended_BLAST_hits, ext_B_hits)
}
# save extended BLAST hits to hypothesis specific ("num") RDS file
#-> to be read and used in final_tea_computation.R script
saveRDS(extended_BLAST_hits, paste("tea/", num, "/extended_BLAST_hits/extended_BLAST_hits.RDS", sep = ""))
### Adding OGs instead of BLAST hits ###
message("Adding OGs instead of BLAST hits")
#transforming ph_orthogroups to long format - nice & neat lookup table ;D
long_ph_orthogroups <- ph_orthogroups %>%
select(-OG, -`Gene Tree Parent Clade`) %>%
melt(., id.vars = c("HOG")) %>%
rename(species=variable, id=value) %>%
mutate(species = as.character(species)) %>%
separate_rows(id, sep = ", ") %>%
drop_na()
#create final summary list - per OG (name) the cumulative steps of additional OGs
summary_add_OG_analysis_list <- vector(mode = "list", length = length(expanded_HOGs$HOG))
length(summary_add_OG_analysis_list)
#create classes for nested S3-list structure holding all additional OG sets per OG analysis
setClass("add_OG_analysis",
slots=list(add_OG_analysis="list")
)
setClass("add_OG_set",
slots=list(genes="tbl_df")
)
dir.create(paste("tea/", num, "/add_OGs_sets/id_lists/", sep = ""))
#removing all big files to minimize mem impact of FORK cluster
rm(datalist)
rm(all_BLAST)
rm(all_blast_genes)
rm(HOG_and_ext_blast_hits_table)
rm(HOG_and_ext_blast_hits_genes)
rm(sorted_BLAST_hits_exp_og_genes)
rm(extended_BLAST_hits)
rm(ext_B_hits)
#### FORK cluster since I expect a Linux machine
#### autostop=TRUE since I don't want to handle this manually
#with my.cluster & stopCluster(my.cluster) I could check the status
setup_cluster <- function(){
#define cluster
parallel::detectCores()
n.cores <- threads
n.cores
#create the cluster - FORK because this way libraries, variables etc. are copied to the clusters!
my.cluster <- parallel::makeForkCluster(
n.cores,
type = "FORK",
autostop=TRUE
)
#check cluster definition (optional)
print(my.cluster)
#register it to be used by %dopar%
doParallel::registerDoParallel(cl = my.cluster)
#check if it is registered (optional)
print(
foreach::getDoParRegistered()
)
#how many workers are available? (optional)
print(
foreach::getDoParWorkers()
)
}
#function to completely remove a fork cluster
burn_socks <- function(x){
close.connection(getConnection(x))
}
#function to truly get rid of old Cluster/sockets
rm_cluster <- function(){
stopImplicitCluster()
connections <- showConnections(all = FALSE)
socket_connections <- as.data.frame(connections) %>%
filter(class == "sockconn") %>%
rownames()
message("Removing all unwanted FORK connections - purging closed cluster sockets")
message("This will kill zombie proesses and free up RAM")
lapply(X = socket_connections,
FUN = burn_socks)
}
#setup & start FORK cluster
setup_cluster()
#we iterate over the expanded OGs
pre_summary_add_OG_analysis_list <- foreach(i = expanded_HOGs$HOG) %dopar% {
exp_og_genes <- unlist(strsplit(ref_ph_orthogroups[ref_ph_orthogroups$HOG == i,]$genes, split = ", "))
BLAST_hits_exp_og_genes <- dplyr::filter(all_BLAST_reformatted,
qseqid_name %in% exp_og_genes | sseqid_name %in% exp_og_genes)
sorted_BLAST_hits_exp_og_genes <- arrange(BLAST_hits_exp_og_genes, evalue, -bitscore, -pident)
#after the sorting of BLAST hits we move on to merging the OG information into the current exp. OG BLAST hits results
#we can create a merged dataframe from the blast table (HOG specific) and the long format HOG table (general)
#difficulty is that in case of singletons we have NA under HOG
#easiest way I can think of is a named list
self_and_closest_ogs <- left_join(sorted_BLAST_hits_exp_og_genes, long_ph_orthogroups, by = c("sseqid_name" = "id")) %>%
group_by(sseqid_name) %>%
arrange(evalue, -bitscore, -pident) %>%
slice(1) %>%
ungroup() %>%
arrange(evalue, -bitscore, -pident) %>%
mutate(HOG = as.character(HOG),
HOG = ifelse(is.na(HOG), paste("singleton", sep = "-", cumsum(is.na(HOG))),
as.character(HOG))) %>%
group_by(HOG) %>%
arrange(evalue, -bitscore, -pident) %>%
slice(1) %>%
ungroup() %>%
arrange(evalue, -bitscore, -pident)
#first row will should correspond to self match - the OG itself
#self_and_closest_ogs
#here the information provided by the user regarding max. additional OGs is used
#we copy the value to an "internal" object since in the following commands we modify in each iteration depending on the underlying OG/s
#the initial add_OGs value chosen by the user is used later and must not be modified
add_OGs_internal = add_OGs
#need to add check for number of additonal OGs/singletons
#e.g. user sets max. add. OGs/singletons to 5 but we only can provide 4
#+ have to consider the expanded OG itself, so:
available_add_OGs <- nrow(self_and_closest_ogs) - 1
#available_add_OGs
#in this case set add_OGs to max available
if (available_add_OGs < add_OGs_internal) {
add_OGs_internal <- available_add_OGs
}
#empty list of precomputed size - add OGs plus the expanded OG itself
add_OG_analysis_list <- vector(mode = "list", length = add_OGs_internal + 1)
for (j in 1:(add_OGs_internal + 1)) {
og_name <- self_and_closest_ogs[j,] %>%
pull(HOG)
#differnetiate between OG case and a singleton case - different handling:
#for HOG get all associated genes/proteins from large long format table
#for singletons get singelton gene/protein from the table itself
if (!str_detect(og_name, "singleton") == TRUE) {
name_curr_close_HOG <- self_and_closest_ogs[j,] %>%
pull(HOG)
ids_curr_close_HOG <- long_ph_orthogroups %>%
filter(HOG %in% name_curr_close_HOG) %>%
pull(id)
add_OG_analysis_list[[j]] <- ids_curr_close_HOG
names(add_OG_analysis_list)[j] <- name_curr_close_HOG
} else {
name_curr_close_singleton <- self_and_closest_ogs[j,] %>%
pull(HOG)
id_curr_close_HOG <- self_and_closest_ogs[j,] %>%
pull(sseqid_name)
add_OG_analysis_list[[j]] <- id_curr_close_HOG
names(add_OG_analysis_list)[j] <- name_curr_close_singleton
}
}
#create copy of the list and remove the names
#add all previous gene/proteins to the next OG
# -> e.g. the former seventh OG list element will contain all genes/proteins of all lower numbered OGs
#the final "cum_add_OG_analysis_list" will be a list with each next element having the cumulative set of all previous and own gene/protein ids
cum_add_OG_analysis_list <- add_OG_analysis_list
names(cum_add_OG_analysis_list) <- NULL
#copy_add_OG_analysis_list
for (k in 0:(length(cum_add_OG_analysis_list)-1)) {
cum_add_OG_analysis_list[[k+1]] <- unlist(c(cum_add_OG_analysis_list[k], cum_add_OG_analysis_list[k+1]))
}
#create directory/ies - for each HOG under "add_OGs_sets/id_lists/"
dir.create(paste("tea/", num, "/add_OGs_sets/id_lists/", i, sep = ""))
#iterate over cum. list and write cumulative sets into seperate files
for (l in 1:length(cum_add_OG_analysis_list)) {
write_lines(cum_add_OG_analysis_list[[l]],
paste("tea/", num, "/add_OGs_sets/id_lists/", i, "/add_OGs_set_num-", l, ".txt", sep = "")
#paste("../tea/", num, "/add_OGs_sets/", i, "/add_OGs_set_num-", l, ".txt", sep = "")
)
}
#append current cum. list to summary list & name element after OG
#actually we need to do this again and this is redundant;toDo
names(cum_add_OG_analysis_list) <- i
return(cum_add_OG_analysis_list)
}
#after cluster is done - remove it
rm_cluster()
#some final formatting
for (i in 1:length(pre_summary_add_OG_analysis_list)) {
names(pre_summary_add_OG_analysis_list[[i]]) <- NULL
}
names(pre_summary_add_OG_analysis_list) <- expanded_HOGs$HOG
summary_add_OG_analysis_list <- pre_summary_add_OG_analysis_list
## in this final step we use the summary list to create S3-list nest structure which contains all info
## of the summary but in the format we will embed in the final results object as part of the final_tea computation
#length of add OGs analysis summary list
summary_length <- length(summary_add_OG_analysis_list)
#create empty list of length of the summary list
add_og_complete_object <- vector(mode = "list", length = summary_length)
#iterate over all OGs in summary list
for (og in 1:summary_length) {
#amount of sets for current OG - (necessary since less addtional OGs than max wished by user is possible)
curr_og_n_sets <- length(summary_add_OG_analysis_list[[og]])
#empty list with n elements = n sets for currently analyzed OG
og_all_sets <- vector(mode = "list", length = curr_og_n_sets)
for (set in 1:curr_og_n_sets) {
curr_set <- new("add_OG_set",
genes=as_tibble(
unlist(
summary_add_OG_analysis_list[[og]][set],
)
)
)
curr_set <- list(curr_set)
og_all_sets[set] <- curr_set
names(og_all_sets)[set] <- paste0("set_", set)
}
curr_add_og <- new("add_OG_analysis",
add_OG_analysis=og_all_sets
)
curr_add_og <- list(curr_add_og)
add_og_complete_object[og] <- curr_add_og
names(add_og_complete_object)[og] <- names(summary_add_OG_analysis_list[og])
}
# save summary table for aditional OG analysis to hypothesis specific ("num") RDS file
saveRDS(add_og_complete_object, paste("tea/", num, "/add_OGs_object/add_OG_analysis_object.RDS", sep = ""))
# nested snakemake checkpoints are annoying at the moment
# quick fix - create empty addtional set_num files which we can ignore but nonetheless exist
message("adding empty missing files - current workaround to avoid nested snakemake checkpoints")
#max_n_sets + 1 since user choice is the number of ADDITIONAL sets but we have ti remember the expanded OG itself (always set_num = 1)
max_n_sets = add_OGs + 1
for (n in names(add_og_complete_object)) {
og_n_sets <- length(
list.files(path = paste0("tea/", num, "/add_OGs_sets/id_lists/", n))
)
og_sets <- list.files(path = paste0("tea/", num, "/add_OGs_sets/id_lists/", n))
if (og_n_sets < max_n_sets) {
n_missing_sets <- max_n_sets - og_n_sets
for (m in 1:n_missing_sets) {
value <- max_n_sets - m + 1
missing_file <- paste0("tea/", num, "/add_OGs_sets/id_lists/", n, "/", "add_OGs_set_num-", value, ".txt")
file.create(missing_file)
}
}
}
#all of the previous only runs in case we actually find an expanded group in this particular hypothesis
#if this not the case we have to perform considerably less work (although of course the hypothesis is rather uninformative)
} else {
#just add expansion column with "no" for all rows
expansion_tibble <- HOG_tibble_complete %>%
mutate(expansion = "no")
dir.create(paste("tea/", num, "/expansion_tibble/", sep = ""))
saveRDS(expansion_tibble, paste("tea/", num, "/expansion_tibble/expansion_tibble.rds", sep = ""))
message("No expanded OGs for this hypothesis - creating empty outputs")
extended_BLAST_hits <- "empty"
# save extended BLAST hits to hypothesis specific ("num") RDS file
#-> to be read and used in final_tea_computation.R script
saveRDS(extended_BLAST_hits, paste("tea/", num, "/extended_BLAST_hits/extended_BLAST_hits.RDS", sep = ""))
add_og_complete_object <- "empty"
# save summary table for aditional OG analysis to hypothesis specific ("num") RDS file
saveRDS(add_og_complete_object, paste("tea/", num, "/add_OGs_object/add_OG_analysis_object.RDS", sep = ""))
dir.create(paste("tea/", num, "/expansion_cp_target_OGs/", sep = ""))
#create empty file under add_OG id_lists
dir.create(paste0("tea/", num, "/add_OGs_sets/id_lists/"))
file.create(paste0("tea/", num, "/add_OGs_sets/id_lists/", "empty.txt"))
}
#####
#### Lastly, create .check to know everything is done
#message("Creating .check - Expansions successfully computed for hypothesis ", num)
#expansion_cp_target_OGs.check <- "check"
#dir.create(paste("checks/tea/", num, "/", sep=""))
#write_file(expansion_cp_target_OGs.check, paste("checks/tea/", num, "/expansion_cp_target_OGs.check", sep = ""))
|
2588750b1a4e1c7b7107aac80c9f10bf63feab11
|
d96c0d0c39d1e96fa98154e1206c136ccef7686f
|
/code/04-sim_data-scDD.R
|
c8c65f49ca55039d8cf04ba269f61b74f7a75ee0
|
[] |
no_license
|
HelenaLC/simulation-comparison
|
d94df6f9a68e46f974d6342a81c75066aad7e1d7
|
f89e6650f8acae1c32b8fe27d644ec7c7be63051
|
refs/heads/master
| 2023-04-17T22:06:50.698560
| 2022-08-10T09:32:28
| 2022-08-10T09:32:28
| 344,418,980
| 7
| 3
| null | null | null | null |
UTF-8
|
R
| false
| false
| 254
|
r
|
04-sim_data-scDD.R
|
suppressPackageStartupMessages({
library(BiocParallel)
library(scDD)
library(SingleCellExperiment)
})
fun <- function(x) {
sink(tempfile())
y <- do.call(simulateSet, x)
sink()
colData(y) <- rowData(y) <- NULL
return(y)
}
|
833f63bd1463e54aa4e5e963180b28143d6dbc12
|
de1078420a67ca6f6c950c717bfb2b8bb9896e32
|
/R scripts/Survival Analysis/generate_cascades.R
|
d16ed8e990bd701cbc4812c64de664a0bca89f78
|
[] |
no_license
|
gmanco/SurvivalFactorization
|
26e9a555510fe73189d35fccb55b7185b415e69a
|
3417d4b2852505f9be96dd4f970be436ff864dc7
|
refs/heads/master
| 2021-01-17T10:07:23.230861
| 2019-01-14T19:57:15
| 2019-01-14T19:57:15
| 38,918,364
| 3
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,718
|
r
|
generate_cascades.R
|
require('igraph')
require('Matrix')
# The algorithm for generating cascades works like follows:
# 1. For each node n and each factor k we generate S[n,k] and A[n,k]
# 2. We randomly choose the initial nodes for each cascade
# 3. for each cascade to generate, we proceed like follows:
# 3.1 randomly pick a node n among those active nodes and remove it from the list of active
# 3.2 try to activate all his neighbors
# 3.3 for each neighbor m
# 3.3.1 generate a delay D using S[m,k]*A[n,k]. Add the delay to the activation time of n to obtain the activation time of m
# 3.3.2 if the activation time is greater than Tmax, then we skip it
# 3.3.3 otherwise we add it to the list of active nodes
# 4 the algorithm terminates when the list of active nodes is empty
generate_cascades <- function(g,numNodes,numCascades,numFactors,Tmax,Tmin,S, A){
in.d.g = degree(g, mode = "in")
in.d.g = in.d.g / sum(in.d.g)
out.d.g = degree(g, mode = "out")
out.d.g = out.d.g / sum(out.d.g)
#FIXME: users with few outgoing edges are more susceptibles, users with more incoming edges are more authoritative
cascades = Matrix(0,
nrow = numCascades,
ncol = numNodes,
sparse = TRUE)
# initial_nodes = sample(numNodes,numCascades, replace = TRUE,prob = in.d.g)
totnodes = 1:numNodes
for (c in 1:numCascades) {
triggers = rep(0, numNodes)
K = sample(numFactors, 1)
# currnode = initial_nodes[c]
currnode = sample(numNodes,1,prob = A[,K]/sum(A[,K]))
timestamp = runif(1, 0, Tmin)
cascades[c, currnode] = timestamp
canExpand = TRUE
while (canExpand) {
cond = cascades[c,] > 0 & triggers == 0
currnodes = totnodes[cond]
if (length(currnodes) > 0) {
if (length(currnodes) > 1) {
p = A[currnodes, K] / sum(A[currnodes, K])
currnode = sample(currnodes, size = 1, prob = p)
} else {
currnode = currnodes[1]
}
triggers[currnode] = 1
nb = as.numeric(neighbors(g, currnode, mode = "in"))
for (nextnode in nb) {
if (cascades[c, nextnode] == 0) {
rate = S[nextnode, K] * A[currnode, K]
timestamp = cascades[c, currnode] + rexp(1, rate)
if (timestamp < Tmax) {
cascades[c, nextnode] <- timestamp
}
}
}
} else {
canExpand = FALSE
}
}
}
return(cascades)
}
generateInfluenceMatrices <- function(g,numNodes,numFactors){
# Generate the matrix with default (low) values
S = matrix(runif(numNodes * numFactors, min = 0.001, max = 0.01), ncol = numFactors)
A = matrix(runif(numNodes * numFactors, min = 0.001, max = 0.01), ncol = numFactors)
for (k in 1:numFactors){
#For each factor we choose a number of nodes whose influence is boosted
# The nodes are split into batches
numNodesToChoose = ceiling(numNodes/(2*numFactors))
nodesToChoose = sample(numNodes,numNodesToChoose)
# boost the influence degree for those nodes
A[nodesToChoose,k] = runif(nodesToChoose, min = 0.1, max = 1)
# Collect all incoming nodes
neighborsOfNodesToChoose = c()
for (i in 1:length(nodesToChoose)) {
neighborsOfNodesToChoose = c(neighborsOfNodesToChoose,as.numeric(neighbors(g,nodesToChoose[i],mode="in")))
}
neighborsOfNodesToChoose = unique(neighborsOfNodesToChoose)
# and boost their susceptibility as well
if (length(neighborsOfNodesToChoose) > 0){
S[neighborsOfNodesToChoose,k] = runif(neighborsOfNodesToChoose, min = 0.1, max = 1)
}
}
output<-list(S,A)
return(output)
}
|
b997e42f973104c1b854fd6bde8f13765d14256b
|
db256e49b1fd5ce3876db65341a570d22dd5f092
|
/Master_model.R
|
b357441513fe991bfb06bbdecdbc6c91df2d8205
|
[] |
no_license
|
BassEngD/COVID_UHB
|
0b1f4c740991222525e866d92c1da7b206d4ccda
|
72fad0e30abbbb677462c614fd629e4c090487d0
|
refs/heads/master
| 2021-04-16T14:40:50.592547
| 2020-03-31T22:03:25
| 2020-03-31T22:03:25
| 249,363,239
| 0
| 0
| null | 2020-03-24T10:02:37
| 2020-03-23T07:29:42
|
HTML
|
UTF-8
|
R
| false
| false
| 18,880
|
r
|
Master_model.R
|
library(dplyr)
library(purrr)
# Progression of illness in simple model
# Incubating -> (incubation period)
#-> Infectious -> (contagious period)
#-> recovering -> (recovery period - contagious period)
#-> recovered or died
demographics <- data.frame(
age_group_id = 1:9
, age_group = c("0-9"
, "10-19"
, "20-29"
, "30-39"
, "40-49"
, "50-59"
, "60-69"
, "70-79"
, "80+"
)
, hospitalisation_percentage = c(0.001
, 0.003
, 0.002
, 0.032
, 0.049
, 0.102
, 0.166
, 0.243
, 0.273
)
, require_ICU_percentage = c(0.050
, 0.050
, 0.050
, 0.050
, 0.063
, 0.122
, 0.274
, 0.432
, 0.709
)
, baseline_fatality_percentage = c(0.00002
, 0.00006
, 0.0003
, 0.0008
, 0.0015
, 0.0060
, 0.0220
, 0.0510
, 0.0930
)
, age_demographic = c(0.12
, 0.13
, 0.14
, 0.13
, 0.12
, 0.13
, 0.11
, 0.07
, 0.04
)
)
default_R0 <- 2.4
trust_population <- 1200000
effective_R0 <- data.frame(date = seq.Date(from = as.Date.character("2020-03-01", format = "%Y-%m-%d")
, to = as.Date.character("2020-05-31", format = "%Y-%m-%d")
, by = "day"
)
, R0 = rep(default_R0, times = 92)
)
UHB_share <- 0.9 #percentage of patients in area being served by UHB
total_beds <- 1000
total_ICUs <- 50
occupied_beds_percentage <- 0.6 # beds that are being used by non-covid patients
occupied_ICU_percentage <- 0.6 # ICUs that are being used by non-covid patients
recovery_period_days <- 14
incubation_period_days <- 5
contagious_period_days <- 4
hospitalisation_period_days <- 7 # how long after hospitalisation is an admission released
ICU_period_days <- 5 # how long after hospitalisation is an admission needing ICU released (from ICU, not hospital)
overloaded_beds_fatality_multiplier <- 1.05 # increase the fatality rate if beds are overloaded
overloaded_ICU_fatality_multiplier <- 1.25 # increase the fatality rate if ICUs are overloaded
infections_per_day_per_infectious <- function(contagious_period_days, R0){
contagious_period_days / R0
}
recovering_patients <- function(results_df, date_to_model, recovery_period_days, contagious_period_days){
new_date_row_number <- which(results_df[["Date"]] == date_to_model)
recovering_period_days_ago_row_number <- (new_date_row_number - recovery_period_days) + contagious_period_days
if(recovering_period_days_ago_row_number < 1){
return(0)
} else {
recovering <- results_df$newly_incubating[recovering_period_days_ago_row_number]
return(recovering)
}
}
# patients will either recover or die at the end of the recovery period
recover_patients <- function(results_df, date_to_model, recovery_period_days){
new_date_row_number <- which(results_df[["Date"]] == date_to_model)
recovery_period_days_ago_row_number <- new_date_row_number - recovery_period_days
if(recovery_period_days_ago_row_number < 1){
return(0)
} else {
recovered <- results_df$newly_incubating[recovery_period_days_ago_row_number]
return(recovered)
}
}
recover_hospitalised_patients <- function(results_df, date_to_model, hospitalisation_period_days){
new_date_row_number <- which(results_df[["Date"]] == date_to_model)
recovery_period_days_ago_row_number <- new_date_row_number - hospitalisation_period_days
if(recovery_period_days_ago_row_number < 1){
return(0)
} else {
recovered <- results_df$newly_hospitalised[recovery_period_days_ago_row_number]
return(recovered)
}
}
recover_ICU_patients <- function(results_df, date_to_model, ICU_period_days){
new_date_row_number <- which(results_df[["Date"]] == date_to_model)
recovery_period_days_ago_row_number <- new_date_row_number - ICU_period_days
if(recovery_period_days_ago_row_number < 1){
return(0)
} else {
recovered <- results_df$newly_require_ICU[recovery_period_days_ago_row_number]
return(recovered)
}
}
patient_deaths <- function(results_df
, date_to_model
, recovered_patients # number
, demographics
, overloaded_beds_fatality_multiplier
, overloaded_ICU_fatality_multiplier
){
# A % of 'recovering' patients will die based on age groups and overloading
new_date_row_number <- which(results_df[["Date"]] == date_to_model)
ICU_overloaded <- results_df$ICU_overloaded[new_date_row_number - 1]
beds_overloaded <- results_df$beds_overloaded[new_date_row_number - 1]
if(recovered_patients < 1){
deaths <- tibble(age_group_id = 1:9
, deaths = rep(0, times = 9)
)
} else {
deaths <- tibble(age_group_id = 1:9
, deaths = purrr::map_dbl(demographics$baseline_fatality_percentage
, function(baseline_fatality_percentage
, recovering_patients
, beds_overloaded
, ICU_overloaded
, overloaded_beds_fatality_multiplier
, overloaded_ICU_fatality_multiplier){
round(recovering_patients * (baseline_fatality_percentage *
ifelse(beds_overloaded
, overloaded_beds_fatality_multiplier
, 1
) *
ifelse(ICU_overloaded
, overloaded_ICU_fatality_multiplier
, 1
)
)
, digits = 0
)
}
, recovering_patients = recovered_patients
, beds_overloaded = beds_overloaded
, ICU_overloaded = ICU_overloaded
, overloaded_beds_fatality_multiplier = overloaded_beds_fatality_multiplier
, overloaded_ICU_fatality_multiplier = overloaded_ICU_fatality_multiplier
))
}
total_deaths <- sum(deaths$deaths)
return(total_deaths)
}
# infectious means that the condition has ended its incubation period
# this is also the time when the patient will be hospitalised if necessary
become_infectious <- function(results_df, date_to_model, incubation_period_days){
new_date_row_number <- which(results_df[["Date"]] == date_to_model)
incubation_period_days_ago_row_number <- new_date_row_number - incubation_period_days
if(incubation_period_days_ago_row_number < 1){
return(0)
} else {
infectious <- results_df$newly_incubating[incubation_period_days_ago_row_number]
return(infectious)
}
}
hospitalise_patients <- function(new_cases_infectious, demographics){
demo <- tibble(age_group_id = 1:9
, hospitalised = purrr::map_dbl(demographics$hospitalisation_percentage
, function(hospitalisation_percentage, new_cases_infectious){
round(new_cases_infectious * hospitalisation_percentage, digits = 0)
}
, new_cases_infectious = new_cases_infectious
)
)
total <- sum(demo$hospitalised)
total <- round(total, 0)
return(total)
}
ICU_patients <- function(new_cases_infectious, demographics){
demo <- tibble(
age_group_id = 1:9
, hospitalised = purrr::map2_dbl(demographics$hospitalisation_percentage
, demographics$require_ICU_percentage
, function(hospitalisation_percentage, ICU_percentage, new_cases_infectious){
round(
round(
new_cases_infectious * hospitalisation_percentage
, digits = 0) *
ICU_percentage
, digits = 0
)
}
, new_cases_infectious = new_cases_infectious
)
)
total <- sum(demo$hospitalised)
total <- round(total, 0)
return(total)
}
susceptible_population <- function(results_df
, date_to_model
, trust_population
){
new_date_row_number <- which(results_df[["Date"]] == date_to_model)
# recovered patients includes deaths, but including these would double-count them since they are also removed from the trust population.
recovered_patients <- results_df$total_recovered[new_date_row_number - 1] -
results_df$total_died[new_date_row_number - 1]
currently_infected <- results_df$total_incubating[new_date_row_number - 1] +
results_df$total_infectious[new_date_row_number - 1] +
results_df$total_recovering[new_date_row_number - 1]
susceptible <- trust_population - recovered_patients - currently_infected
susceptible <- ifelse(susceptible < 0, 0, susceptible)
return(susceptible)
}
new_infections <- function(results_df
, date_to_model
, contagious_period_days
){
new_date_row_number <- which(results_df[["Date"]] == date_to_model)
R0 <- results_df$effective_R0[new_date_row_number - 1]
percentage_susceptible <- results_df$susceptible_percentage[new_date_row_number - 1]
newly_infected <- results_df$total_infectious[new_date_row_number - 1] *
infections_per_day_per_infectious(contagious_period_days, R0) *
percentage_susceptible
newly_infected <- round(newly_infected, 0)
return(newly_infected)
}
model_new_day <- function(results_df
, effective_R0_df
, demographics
, date_to_model
, trust_population
, UHB_share
, total_beds
, total_ICUs
, occupied_beds_percentage
, occupied_ICU_percentage
, recovery_period_days
, incubation_period_days
, contagious_period_days
, hospitalisation_period_days
, ICU_period_days
, overloaded_beds_fatality_multiplier
, overloaded_ICU_fatality_multiplier
) {
new_date_row_number <- which(results_df[["Date"]] == date_to_model)
effective_R0 <- effective_R0_df[[new_date_row_number, "R0"]]
trust_population <- results_df$trust_population[1] - results_df$total_died[new_date_row_number - 1]
# recovered patients (includes deaths, despite name)
newly_recovered <- recover_patients(results_df, date_to_model, recovery_period_days)
total_recovered <- newly_recovered + results_df$total_recovered[new_date_row_number - 1]
newly_released_hosp <- recover_hospitalised_patients(results_df, date_to_model, hospitalisation_period_days)
newly_released_ICU <- recover_ICU_patients(results_df, date_to_model, ICU_period_days)
#recovered patients that died
newly_died <- patient_deaths(results_df
, date_to_model
, newly_recovered
, demographics
, overloaded_beds_fatality_multiplier
, overloaded_ICU_fatality_multiplier)
total_died <- newly_died + results_df$total_died[new_date_row_number - 1]
# recovering patients (after infectious period, before recovered)
newly_recovering <- recovering_patients(results_df, date_to_model, recovery_period_days, contagious_period_days)
total_recovering <- newly_recovering + results_df$total_recovering[new_date_row_number - 1] - newly_recovered
# cases at end of incubation period
newly_infectious <- become_infectious(results_df, date_to_model, incubation_period_days)
total_infectious <- (results_df$total_infectious[new_date_row_number - 1] - newly_recovering) + newly_infectious
# this is when they become hospitalised
newly_hospitalised <- hospitalise_patients(new_cases_infectious = newly_infectious, demographics)
newly_require_ICU <- ICU_patients(new_cases_infectious = newly_infectious, demographics)
susceptible <- susceptible_population(results_df, date_to_model, trust_population)
susceptible_percentage <- susceptible / trust_population
total_hospitalised <- results_df$total_hospitalised[new_date_row_number - 1] - newly_released_hosp + newly_hospitalised
total_require_ICU <- results_df$total_require_ICU[new_date_row_number - 1] + newly_require_ICU - newly_released_ICU
newly_incubating <- new_infections(results_df, date_to_model, contagious_period_days)
total_incubating <- results_df$total_incubating[new_date_row_number - 1] + newly_incubating - newly_infectious
new_day_model <- tibble(Date = as.Date(date_to_model)
, effective_R0 = effective_R0
, trust_population = trust_population
, UHB_share = UHB_share
, total_beds = total_beds
, total_ICUs = total_ICUs
, occupied_beds_percentage = occupied_beds_percentage
, occupied_ICU_percentage = occupied_ICU_percentage
, virus_characteristics = list(tibble(
recovery_period_days = recovery_period_days
, incubation_period_days = incubation_period_days
, contagious_period_days = contagious_period_days)
)
, newly_incubating = newly_incubating
, total_incubating = total_incubating
, newly_infectious = newly_infectious
, total_infectious = total_infectious
, newly_recovering = newly_recovering
, total_recovering = total_recovering
, newly_recovered = newly_recovered
, total_recovered = total_recovered
, newly_died = newly_died
, total_died = total_died
, susceptible_population = susceptible
, susceptible_percentage = susceptible_percentage
, newly_hospitalised = newly_hospitalised
, hospitalised_released = newly_released_hosp
, total_hospitalised = total_hospitalised
, beds_overloaded = total_hospitalised > total_beds
, newly_require_ICU = newly_require_ICU
, ICU_released = newly_released_ICU
, total_require_ICU = total_require_ICU
, ICU_overloaded = total_require_ICU > total_ICUs
)
results_df[new_date_row_number,] <- new_day_model[1,]
return(results_df)
}
empty_date <- integer()
class(empty_date) <- "Date"
results <- tibble(Date = empty_date
, effective_R0 = numeric()
, trust_population = integer()
, UHB_share = numeric()
, total_beds = numeric()
, total_ICUs = numeric()
, occupied_beds_percentage = numeric()
, occupied_ICU_percentage = numeric()
, virus_characteristics = list()
, newly_incubating = numeric()
, total_incubating = numeric()
, newly_infectious = numeric()
, total_infectious = numeric()
, newly_recovering = numeric()
, total_recovering = numeric()
, newly_recovered = numeric()
, total_recovered = numeric()
, newly_died = numeric()
, total_died = numeric()
, susceptible_population = numeric()
, susceptible_percentage = numeric()
, newly_hospitalised = numeric()
, hospitalised_released = numeric()
, total_hospitalised = numeric()
, beds_overloaded = logical()
, newly_require_ICU = numeric()
, ICU_released = numeric()
, total_require_ICU = numeric()
, ICU_overloaded = logical()
)
|
cdb145749d3ea682671d22477612dcd7d8d391c2
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/tricolore/examples/PowerScale.Rd.R
|
2dc1ba3f6d73c2c901f20375d3eb78f4c9ec0136
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 224
|
r
|
PowerScale.Rd.R
|
library(tricolore)
### Name: PowerScale
### Title: Compositional Powering
### Aliases: PowerScale
### Keywords: internal
### ** Examples
P <- prop.table(matrix(runif(12), 4), margin = 1)
tricolore:::PowerScale(P, 2)
|
4ecf02afbd0981b624c36cdedf6d57811589f440
|
5ae17fd85ab9a7df754eb21aaca4ad742c7767bd
|
/R/Data.R
|
58c836327c5ad816bb6050500a3294ebd0f881ba
|
[
"MIT"
] |
permissive
|
fdzul/rhealthdiag
|
e014c1e8dcd1bf724891b2a2900950c98ef29ca0
|
8d2e251916ffbb83c9f2771c231b8224722f8da9
|
refs/heads/main
| 2023-08-19T04:34:18.121288
| 2021-09-18T20:25:11
| 2021-09-18T20:25:11
| 405,239,307
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,235
|
r
|
Data.R
|
#' Mortality data of Puebla state.
#'
#' A dataset containing the general mortality data (2012-2019)
#' from state of Puebla.
#'
#' @format A dataframe object with 284000 rows and 16 variables:
#' \describe{
#' \item{ent_ocurr}{state id. Two digits, Puebla state is "21".}
#' \item{mun_ocurr}{municipality id. Three digits.}
#' \item{loc_ocurr}{locality id. Four digits.}
#' \item{causa_def_cve}{cause of death identifier.}
#' \item{lista_mex_cve}{mexican list id.}
#' \item{sexo}{sexo of death.}
#' \item{edad_cve}{age id.}
#' \item{anio_ocur}{year of occurrence.}
#' \item{anio_nacim}{year of birth.}
#' \item{ocupacion_cve}{occupation identifier.}
#' \item{escolaridad_cve}{scholarship identifier.}
#' \item{causa_def_des}{description of cause of death.}
#' \item{escolaridad_des}{description of schooling.}
#' \item{ocupacion_des}{description of the occupation.}
#' \item{edad_des}{description of the age.}
#' \item{lista_mex_des}{description of mexican list.}
#' ...
#' }
#' @source \url{https://www.inegi.org.mx/programas/mortalidad/#Datos_abiertos}
"mortalidad_general_21"
#' Mortality data of Guerrero state.
#'
#' A dataset containing the general mortality data (2012-2019)
#' from state of Guerrero.
#'
#' @format A dataframe object with 149051 rows and 16 variables:
#' \describe{
#' \item{ent_ocurr}{state id. Two digits, Puebla state is "12".}
#' \item{mun_ocurr}{municipality id. Three digits.}
#' \item{loc_ocurr}{locality id. Four digits.}
#' \item{causa_def_cve}{cause of death identifier.}
#' \item{lista_mex_cve}{mexican list id.}
#' \item{sexo}{sexo of death.}
#' \item{edad_cve}{age id.}
#' \item{anio_ocur}{year of occurrence.}
#' \item{anio_nacim}{year of birth.}
#' \item{ocupacion_cve}{occupation identifier.}
#' \item{escolaridad_cve}{scholarship identifier.}
#' \item{causa_def_des}{description of cause of death.}
#' \item{escolaridad_des}{description of schooling.}
#' \item{ocupacion_des}{description of the occupation.}
#' \item{edad_des}{description of the age.}
#' \item{lista_mex_des}{description of mexican list.}
#' ...
#' }
#' @source \url{https://www.inegi.org.mx/programas/mortalidad/#Datos_abiertos}
"mortalidad_general_12"
|
a5b54ba64f0620d137bcd88aba506a8b15c75fb2
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/HiveR/examples/HEC.Rd.R
|
afa610c465cca4d519e07725b1f83a6609e70688
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,007
|
r
|
HEC.Rd.R
|
library(HiveR)
### Name: HEC
### Title: A HivePlotData Object of the Hair Eye Color Data Set
### Aliases: HEC
### Keywords: datasets
### ** Examples
# An example of building an HPD from scratch
### Step 0. Get to know your data.
data(HairEyeColor) # see ?HairEyeColor for background
df <- data.frame(HairEyeColor) # str(df) is useful
# Frequencies of the colors can be found with:
eyeF <- aggregate(Freq ~ Eye, data = df, FUN = 'sum')
hairF <- aggregate(Freq ~ Hair, data = df, FUN = 'sum')
es <- eyeF$Freq/eyeF$Freq[4] # node sizes for eye
hs <- hairF$Freq/hairF$Freq[3] # node sizes for hair
### Step 1. Assemble a data frame of the nodes.
# There are 32 rows in the data frame, but we are going to
# separate the hair color from the eye color and thus
# double the number of rows in the node data frame
nodes <- data.frame(
id = 1:64,
lab = paste(rep(c("hair", "eye"), each = 32), 1:64, sep = "_"),
axis = rep(1:2, each = 32),
radius = rep(NA, 64))
for (n in 1:32) {
# assign node radius based most common colors
if (df$Hair[n] == "Black") nodes$radius[n] <- 2
if (df$Hair[n] == "Brown") nodes$radius[n] <- 4
if (df$Hair[n] == "Red") nodes$radius[n] <- 1
if (df$Hair[n] == "Blond") nodes$radius[n] <- 3
if (df$Eye[n] == "Brown") nodes$radius[n + 32] <- 1
if (df$Eye[n] == "Blue") nodes$radius[n + 32] <- 2
if (df$Eye[n] == "Hazel") nodes$radius[n + 32] <- 3
if (df$Eye[n] == "Green") nodes$radius[n + 32] <- 4
# now do node sizes
if (df$Hair[n] == "Black") nodes$size[n] <- hs[1]
if (df$Hair[n] == "Brown") nodes$size[n] <- hs[2]
if (df$Hair[n] == "Red") nodes$size[n] <- hs[3]
if (df$Hair[n] == "Blond") nodes$size[n] <- hs[4]
if (df$Eye[n] == "Brown") nodes$size[n + 32] <- es[4]
if (df$Eye[n] == "Blue") nodes$size[n + 32] <- es[3]
if (df$Eye[n] == "Hazel") nodes$size[n + 32] <- es[2]
if (df$Eye[n] == "Green") nodes$size[n + 32] <- es[1]
}
nodes$color <- rep("black", 64)
nodes$lab <- as.character(nodes$lab) # clean up some data types
nodes$radius <- as.numeric(nodes$radius)
### Step 2. Assemble a data frame of the edges.
edges <- data.frame( # There will be 32 edges, corresponding to the original 32 rows
id1 = c(1:16,49:64), # This will set up edges between each eye/hair pair
id2 = c(33:48,17:32), # & put the males above and the females below
weight = df$Freq,
color = rep(c("lightblue", "pink"), each = 16))
edges$color <- as.character(edges$color)
# Scale the edge weight (det'd by trial & error to emphasize differences)
edges$weight <- 0.25*log(edges$weight)^2.25
### Step 3. Now assemble the HivePlotData (HPD) object.
HEC <- list()
HEC$nodes <- nodes
HEC$edges <- edges
HEC$type <- "2D"
HEC$desc <- "HairEyeColor data set"
HEC$axis.cols <- c("grey", "grey")
class(HEC) <- "HivePlotData"
### Step 4. Check it & summarize
chkHPD(HEC) # answer of FALSE means there are no problems
sumHPD(HEC)
### Step 5. Plot it.
# A minimal plot
plotHive(HEC, ch = 0.1, bkgnd = "white")
# See ?plotHive for fancier options
|
fcc4b2fc790ee2055d7d90bffaf991eed7211d73
|
d541ac8c98d952b3c7ef0593469a468645f904c2
|
/man/maf_tab.Rd
|
b0fe787d18de1193672912d987cfc9ff186a57f3
|
[] |
no_license
|
pdiakumis/varpr
|
a65a5c29580a5a2902dd31eb9645914c72c10f93
|
2d84597a45765cf3a8a0a5754ab29d703ddfb350
|
refs/heads/master
| 2021-04-26T22:18:22.954030
| 2016-05-11T08:05:28
| 2016-05-11T08:05:28
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 689
|
rd
|
maf_tab.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/maf_tab.R
\name{maf_tab}
\alias{maf_tab}
\title{Returns a summary of a MAF variable (i.e. 1KG or ESP6500)}
\usage{
maf_tab(maf_vec)
}
\arguments{
\item{maf_vec}{The vector containing the MAF values.}
}
\value{
An integer vector with the counts of each condition
}
\description{
\code{maf_tab} reads in a MAF vector (i.e. 1KG or ESP6500) and outputs the
number of variants satisfying certain conditions.
}
\examples{
\dontrun{
maf_tab(vars$aaf.1KG) # assumes you have a vars data frame
maf_tab(vars$esp6500_all) # assumes you have a vars data frame
}
}
\seealso{
\code{\link{sum}} and \code{\link{setNames}}.
}
|
b4f645870ff6c7d0dc590d6cab3651dacd6888d0
|
bd103e2de3420542c0fb1aba8866799c4a322518
|
/R/03-script-manipulacion-datos-02.R
|
f168397e0b70c6ac25185beec00ae23e20dbda21
|
[] |
no_license
|
jbkunst/puc-introduccion-a-R
|
8670c0fab9fec40a718e991588325de43079bd05
|
46d54b5d9ae400ccb7b188aa87f9ffdb2daa0656
|
refs/heads/master
| 2021-07-05T17:50:33.980721
| 2019-11-05T14:29:56
| 2019-11-05T14:29:56
| 168,742,737
| 2
| 0
| null | 2020-09-04T08:50:02
| 2019-02-01T18:33:08
|
R
|
UTF-8
|
R
| false
| false
| 1,473
|
r
|
03-script-manipulacion-datos-02.R
|
# paquetes ----------------------------------------------------------------
library(tidyverse)
# data --------------------------------------------------------------------
songs <- read_csv("https://raw.githubusercontent.com/rstudio/EDAWR/master/data-raw/songs.csv")
songs
artists <- read_csv("https://raw.githubusercontent.com/rstudio/EDAWR/master/data-raw/artists.csv")
artists
# joins -------------------------------------------------------------------
left_join(songs, artists, by = "name")
left_join(songs, artists)
semi_join(songs, artists, by = "name")
anti_join(songs, artists, by = "name")
# spread & gather ---------------------------------------------------------
data("table4a")
table4a
data("table2")
table2
# ejercicio 1
# calcular valores promedio por pais de la tabla table4a
table4a
table4a %>%
mutate(promedio = (`1999` + `2000`)/2)
gather(table4a, `1999`, `2000`, key = "year", value = "cases")
table4ag <- gather(table4a, `1999`, `2000`, key = "year", value = "cases")
table4ag
table4ag %>%
group_by(country) %>%
summarise(promedio = mean(cases))
# ejercicio 2
# obtenga el promedio de casos (columna cases) por pais de la tabla table2
table2
spread(table2, type, count)
table2s <- spread(table2, type, count)
table2s
table4ag %>%
group_by(country) %>%
summarise(promedio = mean(cases))
# existen muchas formas
table2 %>%
filter(type == "cases") %>%
group_by(country) %>%
summarise(promedio = mean(count))
|
1f44d49969c5af722f90b125fc46a87b48054c95
|
12d744259fcc219696bc0ba844b7c0f5afb540e6
|
/rawdata/Supplement to CTDA/Ch10_BioeqCT.R
|
ef414a7133df3206a50cc43054e04cb372cbd112
|
[] |
no_license
|
Secondmoon-Kiom/CNUH-R-Lecture-2017
|
03d2984f725643fa7f9d05bd476c58557b2d2815
|
634bc9a67a8ee63e5b56800bdb2a2058831b56c7
|
refs/heads/master
| 2021-03-27T19:46:43.727988
| 2018-01-30T17:11:15
| 2018-01-30T17:11:15
| 111,091,579
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 27,062
|
r
|
Ch10_BioeqCT.R
|
###################################################
# Title: Chapter 10: Bioequivalence CT
# Author: Dr. DG. Chen
###################################################
###################################################
### chunk number 4: BE.dat2
###################################################
#line 102 "R4Bioeq.rnw"
require(RODBC)
datfile = "c:/R4CTDA/dataset/datR4CTDA.xlsx"
getxlsbook = odbcConnectExcel2007(datfile)
dat = sqlFetch(getxlsbook,"Cimetidine")
odbcCloseAll()
###################################################
### chunk number 5:
###################################################
#line 109 "R4Bioeq.rnw"
library(xtable)
print(xtable(dat, digits= c(0,0,0,0,0,2,2,1),
caption="AUC, CMAX and TMAX data from Cimetidine" ,
label = "BE.Peacedat"),table.placement = "thbp",
caption.placement="top",include.rownames=FALSE)
###################################################
### chunk number 6: BE.dat
###################################################
#line 390 "R4Bioeq.rnw"
# get the library
require(RODBC)
# point to the excel file
datfile = "c:/R4CTDA/dataset/datR4CTDA.xlsx"
# connect to the excel book
getxlsbook = odbcConnectExcel2007(datfile)
# read in the data in that sheet
dat = sqlFetch(getxlsbook,"ChowLiuTab361")
odbcCloseAll()
###################################################
### chunk number 7: BE.meantab
###################################################
#line 406 "R4Bioeq.rnw"
# use ``aggregate" to the sample size
tab.n = aggregate(dat$AUC,list(seq=dat$Sequence,
prd=dat$Period),length)
n1 = tab.n[tab.n$seq==1 & tab.n$prd==1,]$x
n2 = tab.n[tab.n$seq==2 & tab.n$prd==1,]$x
n = n1+n2
# use ``aggregate" to get the mean
tab.mean = aggregate(dat$AUC,list(seq=dat$Sequence,
prd=dat$Period),mean)
# use ``aggregate" to get the variance
tab.var = aggregate(dat$AUC,list(seq=dat$Sequence,
prd=dat$Period),var)
# make a dataframe for the summary data
summaryTab = data.frame(Sequence=tab.mean$seq,
Period=tab.mean$prd, numSample = tab.n$x,
Mean = tab.mean$x, Var=tab.var$x)
# print the summary table
round(summaryTab,2)
###################################################
### chunk number 8:
###################################################
#line 446 "R4Bioeq.rnw"
tapply(dat$AUC, dat[,c("Sequence","Period")], mean)
###################################################
### chunk number 9: BE.Uik
###################################################
#line 466 "R4Bioeq.rnw"
Uik = aggregate(dat$AUC,
list(seq = dat$Sequence,sub=dat$Subject), sum)
colnames(Uik) = c("seq", "sub","Uik")
###################################################
### chunk number 10: BE.mUk
###################################################
#line 478 "R4Bioeq.rnw"
mUk = aggregate(Uik$Uik, list(seq=Uik$seq), mean)
colnames(mUk) = c("seq", "mUk")
print(mUk)
###################################################
### chunk number 11: BE.C
###################################################
#line 490 "R4Bioeq.rnw"
hatC = mUk[2,2]-mUk[1,2]
hatC
###################################################
### chunk number 12: BE.var
###################################################
#line 506 "R4Bioeq.rnw"
dU = merge(Uik, mUk)
sigu2 = sum((dU$Uik-dU$mUk)^2)/(n1+n2-2)
sigu2
###################################################
### chunk number 13:
###################################################
#line 517 "R4Bioeq.rnw"
se.sigu = sqrt(sigu2*(1/n1+1/n2))
TC = hatC/se.sigu
TC
###################################################
### chunk number 14: BE.pC
###################################################
#line 527 "R4Bioeq.rnw"
pC = 2*(1-pt(abs(TC), n1+n2-2))
pC
###################################################
### chunk number 15: BE.dd
###################################################
#line 539 "R4Bioeq.rnw"
dik = aggregate(dat$AUC,
list(sub=dat$Subject,seq=dat$Sequence),diff)
dik$x = dik$x/2
colnames(dik)= c("sub", "seq","dik")
dik
###################################################
### chunk number 16:
###################################################
#line 553 "R4Bioeq.rnw"
mdk = aggregate(dik$dik, list(seq=dik$seq), mean)
colnames(mdk) = c("seq", "mdk")
hatF = mdk[1,2]-mdk[2,2]
hatF
###################################################
### chunk number 17: BE.vard
###################################################
#line 573 "R4Bioeq.rnw"
dF = merge(dik, mdk)
sigd2 = sum((dF$dik-dF$mdk)^2)/(n1+n2-2)
sigd2
###################################################
### chunk number 18:
###################################################
#line 584 "R4Bioeq.rnw"
se.sigd = sqrt(sigd2*(1/n1+1/n2))
TF = hatF/se.sigd
TF
###################################################
### chunk number 19:
###################################################
#line 594 "R4Bioeq.rnw"
pF = 2*(1-pt(abs(TF), n1+n2-2))
pF
###################################################
### chunk number 20: BE.ANOVA
###################################################
#line 609 "R4Bioeq.rnw"
# cat("We first re-format the data into R dataframe","\n")
Data = data.frame(subj = as.factor(dat$Subject),
drug = as.factor(dat$Formulation),
seq = as.factor(dat$Sequence),
prd = as.factor(dat$Period),
AUC = dat$AUC)
# cat("Then call R function aov for ANOVA Table", "\n")
summary(aov(AUC ~ seq*drug + Error(subj), data = Data))
###################################################
### chunk number 21: BE.decisionCI
###################################################
#line 636 "R4Bioeq.rnw"
# get the mean for AUC by Formulation
mdrug = tapply(dat$AUC, list(drug=dat$Formulation), mean)
# extract the means
ybarT = mdrug["T"]
ybarR = mdrug["R"]
# make the decision CI
dec2.low = theta.L = -0.2*ybarR
dec2.up = theta.U = 0.25*ybarR
cat("DecisionCI.mean=(",dec2.low,",",dec2.up,")",sep="","\n")
###################################################
### chunk number 22: BE.CI12
###################################################
#line 659 "R4Bioeq.rnw"
# the confidence coefficient: alpha
alphaCI = .1
# the t-value
qt.alpha = qt(1-alphaCI, n1+n2-2)
qt.alpha
# the lower and upper limits for CI1
low1 = (ybarT-ybarR)-qt.alpha*sqrt(sigd2)*sqrt(1/n1+1/n2)
up1 = (ybarT-ybarR)+qt.alpha*sqrt(sigd2)*sqrt(1/n1+1/n2)
cat("The classical CI1=(", round(low1,3),",",
round(up1,3),")", sep=" ","\n\n")
# the lower and upper limits for CI2
low2 = (low1/ybarR+1)*100
up2 = (up1/ybarR+1)*100
cat("The Ratio CI2=(", round(low2,3),",",
round(up2,3),")", sep=" ","\n\n")
###################################################
### chunk number 23:
###################################################
#line 686 "R4Bioeq.rnw"
k12 = 2*(ybarR-ybarT)/sqrt( sigd2*(1/n1+1/n2))
###################################################
### chunk number 24:
###################################################
#line 692 "R4Bioeq.rnw"
k2 = uniroot(function(k2) pt(k12-k2,n1+n2-2)- pt(k2,n1+n2-2)
-(1-alphaCI),lower = -10, upper = 10, tol = 0.0001)$root
k1 =k12-k2
cat("The Westlake k1=",k1," and k2=",k2,sep=" ", "\n\n")
###################################################
### chunk number 25:
###################################################
#line 700 "R4Bioeq.rnw"
low.west = k2*sqrt(sigd2*(1/n1+1/n2))-(ybarR-ybarT)
up.west = k1*sqrt(sigd2*(1/n1+1/n2))-(ybarR-ybarT)
cat("The Westlake CI for mu_T-mu_A is
(",low.west,",",up.west,")",sep=" ", "\n\n")
###################################################
### chunk number 26:
###################################################
#line 712 "R4Bioeq.rnw"
TL = (ybarT-ybarR-theta.L)/sqrt(sigd2*(1/n1+1/n2))
TU = (ybarT-ybarR-theta.U)/sqrt(sigd2*(1/n1+1/n2))
###################################################
### chunk number 27:
###################################################
#line 723 "R4Bioeq.rnw"
pL = 1-pt(abs(TL), n1+n2-2)
pU = pt(TU,n1+n2-2)
p1side = max(pL, pU)
###################################################
### chunk number 28: BE.bayes
###################################################
#line 736 "R4Bioeq.rnw"
tL = (theta.L -(ybarT-ybarR))/sqrt(sigd2*(1/n1+1/n2))
tU = (theta.U -(ybarT-ybarR))/sqrt(sigd2*(1/n1+1/n2))
pRD = pt(tU, n1+n2-2) - pt(tL, n1+n2-2)
pRD
###################################################
### chunk number 29:
###################################################
#line 753 "R4Bioeq.rnw"
dR = dat[dat$Formulation=="R",c("Subject","AUC")]
dT = dat[dat$Formulation=="T",c("Subject","AUC")]
colnames(dR) = c("Subject","AUC4R")
colnames(dT) = c("Subject","AUC4T")
dRT = merge(dR,dT)
rT2R = dRT$AUC4T/dRT$AUC4R
rT2R
###################################################
### chunk number 30:
###################################################
#line 764 "R4Bioeq.rnw"
k = 1/sqrt(1-.9)
rbar = mean(rT2R)
sigrbar = sqrt(var(rT2R)/n)
rbar
sigrbar
###################################################
### chunk number 31: BE.BTCI
###################################################
#line 773 "R4Bioeq.rnw"
low.BT = rbar-k*sigrbar
up.BT = rbar+k*sigrbar
cat("The Tchebycheff CI for mu_T/mu_A is
(",low.BT,",",up.BT,")",sep="", "\n\n")
###################################################
### chunk number 32: BE.boot
###################################################
#line 791 "R4Bioeq.rnw"
# B=number of bootstrap
B = 2000
# boota and bootb to keep track the bootstrap results
boota = bootb = NULL
for(b in 1:B){
# Boottrap the observed individual ratios
boota[b] = mean(sample(rT2R, replace=T))
# boottrap the individuals and calculate the means
tmp = dRT[sample(1:n, replace=T),]
bootb[b] = mean(tmp$AUC4T)/mean(tmp$AUC4R)
}
###################################################
### chunk number 33:
###################################################
#line 805 "R4Bioeq.rnw"
qxa = quantile(boota, c(0.05, 0.95))
qxa
qxb = quantile(bootb, c(0.05, 0.95))
qxb
###################################################
### chunk number 34: BE.boot1plot
###################################################
#line 818 "R4Bioeq.rnw"
hist(boota,nclass=30, freq=F,las=1,
xlab="Mean of the Ratios", ylab="Density", main="")
box()
den = density(boota)
lines(den, lwd=2)
qya = approx(den$x, den$y, c(qxa,rbar))$y
segments(qxa[1],0,qxa[1],qya[1], lwd=5)
segments(qxa[2],0,qxa[2],qya[2], lwd=5)
segments(rbar,0,rbar,qya[3],lty=4, lwd=5)
###################################################
### chunk number 35:
###################################################
#line 832 "R4Bioeq.rnw"
#line 818 "R4Bioeq.rnw#from line#832#"
hist(boota,nclass=30, freq=F,las=1,
xlab="Mean of the Ratios", ylab="Density", main="")
box()
den = density(boota)
lines(den, lwd=2)
qya = approx(den$x, den$y, c(qxa,rbar))$y
segments(qxa[1],0,qxa[1],qya[1], lwd=5)
segments(qxa[2],0,qxa[2],qya[2], lwd=5)
segments(rbar,0,rbar,qya[3],lty=4, lwd=5)
#line 833 "R4Bioeq.rnw"
###################################################
### chunk number 36: BE.boot2plot
###################################################
#line 840 "R4Bioeq.rnw"
hist(bootb,nclass=30, freq=F,las=1,
xlab="Ratio of the Means", ylab="Density", main="")
box()
den = density(bootb)
lines(den, lwd=2)
rmean = mean(dRT$AUC4T)/mean(dRT$AUC4R)
qyb = approx(den$x, den$y, c(qxb,rmean))$y
segments(qxb[1],0,qxb[1],qyb[1], lwd=5)
segments(qxb[2],0,qxb[2],qyb[2], lwd=5)
segments(rmean,0,rmean,qyb[3],lty=4, lwd=5)
###################################################
### chunk number 37:
###################################################
#line 855 "R4Bioeq.rnw"
#line 840 "R4Bioeq.rnw#from line#855#"
hist(bootb,nclass=30, freq=F,las=1,
xlab="Ratio of the Means", ylab="Density", main="")
box()
den = density(bootb)
lines(den, lwd=2)
rmean = mean(dRT$AUC4T)/mean(dRT$AUC4R)
qyb = approx(den$x, den$y, c(qxb,rmean))$y
segments(qxb[1],0,qxb[1],qyb[1], lwd=5)
segments(qxb[2],0,qxb[2],qyb[2], lwd=5)
segments(rmean,0,rmean,qyb[3],lty=4, lwd=5)
#line 856 "R4Bioeq.rnw"
###################################################
### chunk number 38:
###################################################
#line 865 "R4Bioeq.rnw"
shapiro.test(bootb)
###################################################
### chunk number 39: BE.qqnorm
###################################################
#line 869 "R4Bioeq.rnw"
qqnorm(bootb, main="")
qqline(bootb)
###################################################
### chunk number 40:
###################################################
#line 876 "R4Bioeq.rnw"
#line 869 "R4Bioeq.rnw#from line#876#"
qqnorm(bootb, main="")
qqline(bootb)
#line 877 "R4Bioeq.rnw"
###################################################
### chunk number 41: BE.pdat
###################################################
#line 889 "R4Bioeq.rnw"
require(RODBC)
datfile = "c:/R4CTDA/dataset/datR4CTDA.xlsx"
getxlsbook = odbcConnectExcel2007(datfile)
datRaw0 = sqlFetch(getxlsbook,"CimetidineRaw", colnames=F)
odbcCloseAll()
###################################################
### chunk number 42: BE.pdat
###################################################
#line 909 "R4Bioeq.rnw"
require(RODBC)
datfile = "c:/R4CTDA/dataset/datR4CTDA.xlsx"
getxlsbook = odbcConnectExcel2007(datfile)
datRaw0 = sqlFetch(getxlsbook,"CimetidineRaw", colnames=F)
odbcCloseAll()
print(datRaw0)
###################################################
### chunk number 43:
###################################################
#line 922 "R4Bioeq.rnw"
datRaw = reshape(datRaw0, direction="long",
varying=-1,idvar = "Subject",sep="")
datRaw$time = datRaw$time/10
colnames(datRaw) = c("subj","time","conc")
head(datRaw)
###################################################
### chunk number 44: BE.fig.pdat
###################################################
#line 932 "R4Bioeq.rnw"
library(lattice)
print(xyplot(conc~time,group=subj,datRaw,xlab="Time(HR)",
xlim=c(0,13),auto.key = list(corner=c(1,1),lines = TRUE) ,
ylab="Concentration(mCG/ML)",type=c("p","a")))
###################################################
### chunk number 45:
###################################################
#line 941 "R4Bioeq.rnw"
#line 932 "R4Bioeq.rnw#from line#941#"
library(lattice)
print(xyplot(conc~time,group=subj,datRaw,xlab="Time(HR)",
xlim=c(0,13),auto.key = list(corner=c(1,1),lines = TRUE) ,
ylab="Concentration(mCG/ML)",type=c("p","a")))
#line 942 "R4Bioeq.rnw"
###################################################
### chunk number 46: BE.fig.mpdat
###################################################
#line 952 "R4Bioeq.rnw"
# make the mean concentration
dat.mean= aggregate(datRaw$conc, list(time=datRaw$time), mean)
# plot it with a line
plot(conc~time,las=1,type="n",datRaw,xlab="Time",xlim=c(0,13),
ylim=c(0, 4), ylab="Mean Concentration")
lines(x~time,dat.mean, lty=1, lwd=3)
###################################################
### chunk number 47:
###################################################
#line 963 "R4Bioeq.rnw"
#line 952 "R4Bioeq.rnw#from line#963#"
# make the mean concentration
dat.mean= aggregate(datRaw$conc, list(time=datRaw$time), mean)
# plot it with a line
plot(conc~time,las=1,type="n",datRaw,xlab="Time",xlim=c(0,13),
ylim=c(0, 4), ylab="Mean Concentration")
lines(x~time,dat.mean, lty=1, lwd=3)
#line 964 "R4Bioeq.rnw"
###################################################
### chunk number 48: BE.fn.beta
###################################################
#line 973 "R4Bioeq.rnw"
# function `make.beta' with argument `dt'
make.beta = function(dt){
# terminal elimination beta to find the slope for R2(k+1) <R2(k)
n = length(dt$conc) # get the length of data
# end the loop at tmax
tmax = which.max(dt$conc)
# loop over starting from the last time-conc point to tmax
for(k in n:tmax){
dt1 = dt[((k-2):n),] # start with last 3 pts and move on
dt2 = dt[((k-3):n),] # start with last 4 pts and move on
# some date have 0s at the end of t-c curve and make the lm crash
# so make this dataframe at least 3 data points
if( dim(dt1[dt1$conc>0,])[1]>= 3 ){
# fit log(conc) to time and track the r-square
m1 = lm(log(conc)~time, dt1[(dt1$conc>0),])
m2 = lm(log(conc)~time, dt2[(dt2$conc>0),])
betat = m1$coef[[2]]
#cat("Check=",summary(m1)$r.squared > summary(m2)$r.squared,"
#and Stopped at", k, "with beta=",betat,sep=" ","\n\n")
if(summary(m1)$r.squared > summary(m2)$r.squared) break
} # end of if-loop
} # end of k-for-loop
#cat("final beta=",betat,"\n\n")
# return
betat
} # end of make-beta function
###################################################
### chunk number 49: BE.fn.make
###################################################
#line 1005 "R4Bioeq.rnw"
make = function(dt){
time = dt$time; conc = dt$conc
# calculate AUC
t.dif = diff(time) # the t(i)-t(i-1)
c.mean = (conc[-1]+conc[-length(conc)])/2
auc = sum(t.dif*c.mean)
# Cmax
cmax = max(conc)
# tmax
tmax = dt[which.max(dt$conc),]$time
# terminal elimination beta to find the slope for R2(k+1) <R2(k)
betat = make.beta(dt)
# terminal halflife
t5 = round(-log(2)/betat*2.303,1)
# AUC infinite
aucinf = auc+ conc[length(conc)]/betat
# return the results.
c(auc,cmax,tmax, betat, t5, aucinf)
}
###################################################
### chunk number 50:
###################################################
#line 1028 "R4Bioeq.rnw"
name.subj = sort(unique(datRaw$subj))
num.subj = length(name.subj)
endpts = matrix(0, nrow=num.subj, ncol=7)
colnames(endpts) = c("subj","AUC","CMAX","TMAX",
"betat","t5","AUCinf")
for(id in 1:num.subj){
tmp = datRaw[(datRaw$subj == name.subj[id]),
c("time","conc")]
endpts[id,] = c(name.subj[id],make(tmp))
}
endpts
###################################################
### chunk number 51:
###################################################
#line 1051 "R4Bioeq.rnw"
require(RODBC)
datfile = "c:/R4CTDA/dataset/datR4CTDA.xlsx"
getxlsbook = odbcConnectExcel2007(datfile)
dat = sqlFetch(getxlsbook,"Cimetidine", colnames=F)
odbcCloseAll()
###################################################
### chunk number 52: BE.pANOVA
###################################################
#line 1062 "R4Bioeq.rnw"
Data = data.frame(subj = as.factor(dat$Subject),
drug = as.factor(dat$Formulation),
seq = as.factor(dat$Sequence),
prd = as.factor(dat$Period),
AUC = dat$AUC, lAUC= log(dat$AUC),
CMAX = dat$CMAX, lCMAX= log(dat$CMAX))
###################################################
### chunk number 53:
###################################################
#line 1072 "R4Bioeq.rnw"
nsubj = tapply(dat$Subject, list(dat$Sequence), length)/2
n1 = nsubj[1]
n2 = nsubj[2]
n = n1+n2
###################################################
### chunk number 54:
###################################################
#line 1089 "R4Bioeq.rnw"
# the fixed model using lm for "formulation" and "period"
mdAUC = lm(AUC ~ seq + subj:seq + prd + drug,data = Data)
print(anova(mdAUC))
# the random effect model using aov for carryover and other effects
mdAUC.R = aov(AUC ~ prd * drug + Error(subj), data = Data)
print(summary(mdAUC.R))
###################################################
### chunk number 55:
###################################################
#line 1104 "R4Bioeq.rnw"
# the fixed model using lm for "formulation" and "period"
mdlAUC = lm(lAUC ~ seq + subj:seq + prd + drug,data = Data)
print(anova(mdlAUC))
# the random effect model using aov for carryover and other effects mdlAUC.R = aov(lAUC ~ prd * drug + Error(subj), data = Data) print(summary(mdlAUC.R))
###################################################
### chunk number 56:
###################################################
#line 1112 "R4Bioeq.rnw"
# the fixed model using lm for "formulation" and "period"
mdCMAX = lm(CMAX ~ seq + subj:seq + prd + drug,data = Data)
print(anova(mdCMAX))
# the random effect model using aov for carryover and other effects
mdCMAX.R = aov(CMAX ~ prd * drug + Error(subj), data = Data)
print(summary(mdCMAX.R))
###################################################
### chunk number 57:
###################################################
#line 1122 "R4Bioeq.rnw"
# the fixed model using lm for "formulation" and "period"
mdlCMAX = lm(lCMAX ~ seq + subj:seq + prd + drug,data = Data)
print(anova(mdlCMAX))
# the random effect model using aov for carryover and other effects
mdlCMAX.R = aov(lCMAX ~ prd * drug + Error(subj), data = Data)
print(summary(mdlCMAX.R))
###################################################
### chunk number 58: BE.decisionCI
###################################################
#line 1142 "R4Bioeq.rnw"
mdrug = tapply(dat$AUC, list(drug=dat$Formulation), mean)
ybarT = mdrug["T"]
ybarR = mdrug["R"]
dec2.low = theta.L = -0.2*ybarR
dec2.up = theta.U = 0.25*ybarR
cat("DecisionCI.mean=(",dec2.low,",",dec2.up,")",sep="","\n")
###################################################
### chunk number 59: BE.CI12
###################################################
#line 1160 "R4Bioeq.rnw"
# the confidence coefficient: alpha
alphaCI = .1
# the t-value
qt.alpha = qt(1-alphaCI, n1+n2-2)
qt.alpha
# the sigma using the ANOVA model instead
sigd2 = anova(mdAUC)[5,3]/2
# the lower and upper limits for CI1
low1 = (ybarT-ybarR)-qt.alpha*sqrt(sigd2)*sqrt(1/n1+1/n2)
up1 = (ybarT-ybarR)+qt.alpha*sqrt(sigd2)*sqrt(1/n1+1/n2)
cat("The classical CI1=(", round(low1,3),",",
round(up1,3),")", sep=" ","\n\n")
# the lower and upper limits for CI2
low2 = (low1/ybarR+1)*100
up2 = (up1/ybarR+1)*100
cat("The Ratio CI2=(", round(low2,3),",",
round(up2,3),")", sep=" ","\n\n")
###################################################
### chunk number 60:
###################################################
#line 1189 "R4Bioeq.rnw"
k12 = 2*(ybarR-ybarT)/sqrt( sigd2*(1/n1+1/n2))
###################################################
### chunk number 61:
###################################################
#line 1195 "R4Bioeq.rnw"
k2 = uniroot(function(k2) pt(k12-k2,n1+n2-2)- pt(k2,n1+n2-2)
-(1-alphaCI),lower = -10, upper = 10, tol = 0.0001)$root
k1 =k12-k2
cat("The Westlake k1=",k1," and k2=",k2,sep=" ", "\n\n")
###################################################
### chunk number 62:
###################################################
#line 1203 "R4Bioeq.rnw"
low.west = k2*sqrt(sigd2*(1/n1+1/n2))-(ybarR-ybarT)
up.west = k1*sqrt(sigd2*(1/n1+1/n2))-(ybarR-ybarT)
cat("The Westlake CI for mu_T-mu_A is
(",low.west,",",up.west,")",sep=" ", "\n\n")
###################################################
### chunk number 63:
###################################################
#line 1215 "R4Bioeq.rnw"
TL = (ybarT-ybarR-theta.L)/sqrt(sigd2*(1/n1+1/n2))
TU = (ybarT-ybarR-theta.U)/sqrt(sigd2*(1/n1+1/n2))
###################################################
### chunk number 64:
###################################################
#line 1226 "R4Bioeq.rnw"
pL = 1-pt(abs(TL), n1+n2-2)
pU = pt(TU,n1+n2-2)
p1side = max(pL, pU)
###################################################
### chunk number 65: BE.bayes
###################################################
#line 1239 "R4Bioeq.rnw"
tL = (theta.L -(ybarT-ybarR))/sqrt(sigd2*(1/n1+1/n2))
tU = (theta.U -(ybarT-ybarR))/sqrt(sigd2*(1/n1+1/n2))
pRD = pt(tU, n1+n2-2) - pt(tL, n1+n2-2)
pRD
###################################################
### chunk number 66:
###################################################
#line 1256 "R4Bioeq.rnw"
dR = dat[dat$Formulation=="R",c("Subject","AUC")]
dT = dat[dat$Formulation=="T",c("Subject","AUC")]
colnames(dR) = c("Subject","AUC4R")
colnames(dT) = c("Subject","AUC4T")
dRT = merge(dR,dT)
rT2R = dRT$AUC4T/dRT$AUC4R
rT2R
###################################################
### chunk number 67:
###################################################
#line 1267 "R4Bioeq.rnw"
k = 1/sqrt(1-.9)
rbar = mean(rT2R)
sigrbar = sqrt(var(rT2R)/n)
rbar
sigrbar
###################################################
### chunk number 68: BE.BTCI
###################################################
#line 1276 "R4Bioeq.rnw"
low.BT = rbar-k*sigrbar
up.BT = rbar+k*sigrbar
cat("The Tchebycheff CI for mu_T/mu_A is
(",low.BT,",",up.BT,")",sep="", "\n\n")
###################################################
### chunk number 69: BE.boot
###################################################
#line 1293 "R4Bioeq.rnw"
# B=number of bootstrap
B = 2000
# boota and bootb to keep track the bootstrap results
boota = bootb = NULL
for(b in 1:B){
# Boottrap the observed individual ratios
boota[b] = mean(sample(rT2R, replace=T))
# boottrap the individuals and calculate the means
tmp = dRT[sample(1:n, replace=T),]
bootb[b] = mean(tmp$AUC4T)/mean(tmp$AUC4R)
}
###################################################
### chunk number 70:
###################################################
#line 1308 "R4Bioeq.rnw"
qxa = quantile(boota, c(0.05, 0.95))
qxa
qxb = quantile(bootb, c(0.05, 0.95))
qxb
###################################################
### chunk number 71: BE.boot1plot
###################################################
#line 1321 "R4Bioeq.rnw"
hist(boota,nclass=30, freq=F,las=1,
xlab="Mean of the Ratios", ylab="Density", main="")
box()
den = density(boota)
lines(den, lwd=2)
qya = approx(den$x, den$y, c(qxa,rbar))$y
segments(qxa[1],0,qxa[1],qya[1], lwd=5)
segments(qxa[2],0,qxa[2],qya[2], lwd=5)
segments(rbar,0,rbar,qya[3],lty=4, lwd=5)
###################################################
### chunk number 72:
###################################################
#line 1335 "R4Bioeq.rnw"
#line 1321 "R4Bioeq.rnw#from line#1335#"
hist(boota,nclass=30, freq=F,las=1,
xlab="Mean of the Ratios", ylab="Density", main="")
box()
den = density(boota)
lines(den, lwd=2)
qya = approx(den$x, den$y, c(qxa,rbar))$y
segments(qxa[1],0,qxa[1],qya[1], lwd=5)
segments(qxa[2],0,qxa[2],qya[2], lwd=5)
segments(rbar,0,rbar,qya[3],lty=4, lwd=5)
#line 1336 "R4Bioeq.rnw"
###################################################
### chunk number 73: BE.boot2plot
###################################################
#line 1343 "R4Bioeq.rnw"
hist(bootb,nclass=30, freq=F,las=1,
xlab="Ratio of the Means", ylab="Density", main="")
box()
den = density(bootb)
lines(den, lwd=2)
rmean = mean(dRT$AUC4T)/mean(dRT$AUC4R)
qyb = approx(den$x, den$y, c(qxb,rmean))$y
segments(qxb[1],0,qxb[1],qyb[1], lwd=5)
segments(qxb[2],0,qxb[2],qyb[2], lwd=5)
segments(rmean,0,rmean,qyb[3],lty=4, lwd=5)
###################################################
### chunk number 74:
###################################################
#line 1358 "R4Bioeq.rnw"
#line 1343 "R4Bioeq.rnw#from line#1358#"
hist(bootb,nclass=30, freq=F,las=1,
xlab="Ratio of the Means", ylab="Density", main="")
box()
den = density(bootb)
lines(den, lwd=2)
rmean = mean(dRT$AUC4T)/mean(dRT$AUC4R)
qyb = approx(den$x, den$y, c(qxb,rmean))$y
segments(qxb[1],0,qxb[1],qyb[1], lwd=5)
segments(qxb[2],0,qxb[2],qyb[2], lwd=5)
segments(rmean,0,rmean,qyb[3],lty=4, lwd=5)
#line 1359 "R4Bioeq.rnw"
|
0156c005c81d81599216751d8c1cab49e6de0a69
|
b227b78f9c6eb20f8324ac4c7da425adb6bbf493
|
/plot1.R
|
e3fb540229aab2d5a28823031d829534f485ce92
|
[] |
no_license
|
coolnumber9/ExData_Plotting1
|
ab86d9ebfc27a362c5b6b08542b01a3155709e87
|
486cde73c2c899a0903eb258cbffaf7c99613f83
|
refs/heads/master
| 2021-04-05T23:35:31.985206
| 2018-03-12T08:29:21
| 2018-03-12T08:29:21
| 124,826,006
| 0
| 0
| null | 2018-03-12T03:00:27
| 2018-03-12T03:00:27
| null |
UTF-8
|
R
| false
| false
| 1,786
|
r
|
plot1.R
|
# ------------------ PLOT 1 ASSIGNMENT ---------------------------
# Cleaning up the environment
rm(list = ls())
# Working directory setup
getwd()
setwd('D:\\dev\\repo\\ExData_Plotting1')
getwd()
# Check if library is available, then use it...
find.package('data.table')
library(data.table)
# Reading of data (not including the missing data)
# (Prereq: Data in txt file already downloaded in the working directory)
powerConsumption <- data.table::fread(input = 'household_power_consumption.txt'
, na.strings='?')
# Peak the head of the data
head(powerConsumption)
# Convert the Date variables to Date Class/Format
powerConsumption[, Date := lapply(.SD, as.Date, '%d/%m/%Y'),
.SDcols = c('Date')]
powerConsumption[, Time := lapply(.SD,
function(x) as.POSIXct(strptime(x, '%H:%M:%S'))),
.SDcols = c('Time')]
# Warning message:
# In strptime(x, "%H:%M:%S") :
# POSIXlt column type detected and converted to POSIXct. We do not recommend use of POSIXlt at all because it uses 40 bytes to store one date.
# Check if the date format has changed
head(powerConsumption)
# Filter the dates from 2007-02-01 to 2007-02-02 only
powerConsumption <- powerConsumption[(Date >= '2007-02-01') &
(Date <= '2007-02-02')]
# Check if the dimensions has changed after filtering
dim(powerConsumption)
# Check the structure
str(powerConsumption)
# Plot 1 (Graphics Device: PNG)
png("plot1.png", width = 480, height = 480)
# Plot 1 (Graphics Device: Screen)
hist(powerConsumption$Global_active_power,
main = 'Global Active Power',
xlab = 'Global Active Power (kilowatts)',
ylab = 'Frequency',
col = 'red')
# Just wanted to add Rug
rug(powerConsumption$Global_active_power)
dev.off()
|
5c2a713371233b6aef811f556a1678f8c014b99b
|
3fccc509e03079752e37ecdcf0c0c177237a29eb
|
/functions/Check.R
|
bc8f3ba4409559ae8ca4f531dcf4965beb7fb7c8
|
[] |
no_license
|
sasregret/vsurvey-check
|
50ffc4ec042743a10401145bc4994fff0bb2a91c
|
ffe874d5f214ab72b736baf881068c90e5bcca8f
|
refs/heads/master
| 2020-12-25T10:41:48.523460
| 2016-06-09T03:21:27
| 2016-06-09T03:21:27
| 60,746,444
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 902
|
r
|
Check.R
|
check <- function(sample, wm.zero, zero, wm.non.na, non.na) {
non.na.cutoff <-
ifelse(sample %in% 0:20, .32,
ifelse(sample %in% 21:50, .20,
ifelse(sample %in% 51:100, .16,
ifelse(sample %in% 101:200, .12,
ifelse(sample %in% 201:400, .08, .06)))))
zero.cutoff <-
ifelse(sample %in% 0:20, .32,
ifelse(sample %in% 21:50, .20,
ifelse(sample %in% 51:100, .16,
ifelse(sample %in% 101:200, .12,
ifelse(sample %in% 201:400, .08, .06)))))
flag <-
ifelse(abs(non.na - wm.non.na) > non.na.cutoff, "non.na",
ifelse(wm.non.na == 0, "missing",
ifelse(non.na == 0, "non.na",
ifelse(abs(zero - wm.zero) > zero.cutoff, "zero", "include"))))
return(flag)
}
|
54eecb9315b9b4c12e615a69c59f376fd41f5d3b
|
c0340c511cff5b40b4681c4d3238d807624c0323
|
/models/revision/ud_relations/analyze_acl.R
|
5c02fa78e1bc649fba007221a78a252b63767a84
|
[] |
no_license
|
m-hahn/grammar-optim
|
5fa7ade47d2ad91f517c887ee2c65af24059069d
|
07a1a80692a504bcafc8120a21c4dc9066b495ee
|
refs/heads/master
| 2022-08-30T06:54:42.749264
| 2022-08-05T12:09:28
| 2022-08-05T12:09:28
| 156,456,167
| 13
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 499
|
r
|
analyze_acl.R
|
data = read.csv("relations.tsv", header=FALSE, sep="\t")
names(data) <- c("Coarse", "Dep", "Language", "Count", "DH_Proportion")
library(tidyr)
library(dplyr)
data_acl = data %>% filter(Dep == "acl") %>% rename(Count_acl = Count, DH_Proportion_acl = DH_Proportion) %>% mutate(Dep = NULL)
data_relcl = data %>% filter(Dep == "acl:relcl") %>% rename(Count_relcl = Count, DH_Proportion_relcl = DH_Proportion) %>% mutate(Dep = NULL)
data2 = merge(data_acl, data_relcl, by=c("Language", "Coarse"))
|
df71a13b25464af8ac3d6647ffb2f52aa624e3af
|
dc355038ba2b48505da2901c0cc58f1c44ddceea
|
/Code/Final Scripts/process.R
|
552032b46a8f5e70a4b74cc63009f8bcc6bf4559
|
[] |
no_license
|
john-james-ai/RepData_PeerAssessment1
|
26eabd359db2aac6fbd01024ec6619fab48b0862
|
e9e6eb5315819fcd5249cf0f4fc4ffc0e344a880
|
refs/heads/master
| 2021-12-14T18:46:19.981078
| 2016-03-07T02:27:26
| 2016-03-07T02:27:26
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 13,175
|
r
|
process.R
|
# Coursera Data Science Specialization
# Reproducible Research
# Peer Assessment Project 1
# Author: John James
# Date: March 5, 2016
# process.R
###############################################################################################################################
# INTRODUCTION
#
# It is now possible to collect a large amount of data about personal movement using activity monitoring
# devices such as a Fitbit, Nike Fuelband, or Jawbone Up. This script performs an exploratory analysis of personal activity
# data to ascrtain patterns of behavior.
#
#
# QUESTION
#
# This analysis processes personal activity data obtained from a group of anonymous individuals, collected on 5 minute intervals over the
# two month period from October thru November 2012. This analysis seeks to address the following questions:
#
# 1. What is mean total number of steps taken per day?
# 2. What is the average daily activity pattern?
# 3. How do these observations change with imputed data?
# 4. Are there differences in activity patterns between weekdays and weekends?
#
#
# DATA
#
# This project makes use of data from a personal activity monitoring device. This device collects data at
# 5 minute intervals through out the day. The data consists of two months of data from an anonymous individual
# collected during the months of October and November, 2012 and include the number of steps taken in 5 minute
# intervals each day
#
# The data for this assignment was downloaded from the course web site: https://d396qusza40orc.cloudfront.net/repdata%2Fdata%2Factivity.zip
#
# The variables included in this dataset are:
# steps: Number of steps taking in a 5-minute interval (missing values are coded as NA)
# date: The date on which the measurement was taken in YYYY-MM-DD format
# interval: Identifier for the 5-minute interval in which measurement was taken
#
# The dataset is stored in a comma-separated-value (CSV) file and there are a total of 17,568 observations in this dataset.
#
#
# ORGANIZATION
#
# The files for the analysis are organized as follows:
# Data
# - Raw Data - Data obtained from the course website. This data has been read only and has not been manipulated
# - Processed Data - Data with missing values imputed
# Figure
# - Exploratory Figures - Figures used for exploratory analysis
# - Final Figures - Figures included in the final analysis
# Code
# - Raw Scripts - scripts used for experimentation purposes that are not considered final
# - Final Scripts - final versions of scripts used for this analysis. The scripts used are as follows
# - load.R - functions used to load the raw and processed data into the environment
# - func.R - functions used to analyze the data
# - process.R - master script that runs the analysis from beginning to end
# - R Markdown - Markdown files used to report the analysis of the project
# Text
# - Text of the analysis / report
#
#
# EXPLORATORY ANALYSIS
#
# The script has been organized as follows.
# 0. User Instructions - Information required by the script from the user
# 1. Housekeeping - Initializes working directories, sources files, and indicates which parts of the analysis is to be performed.
# 2. Load Data - Load the raw data into the environment
# 3. Mean Total - Calculates mean total steps per day and produces a histogram
# 4. Activity Pattern - Prepares a time series of activity, in terms of steps per day for each 5 minute interval
# 5. Impute Values - Missing values are replaced by the average number of steps per day across all days for the missing time interval
# 6. Comparison - Compared density, histogram and box plots for the raw and processed data are prepared
# 7. Weekend/Weekday - Time series line charts are prepared for both weekday and weekends.
#
# Here we go!
##################################################################################################################################
#############################################################################
## USER INSTRUCTIONS ##
#############################################################################
# ---- initialize
# Step 1: Working Directory
# Note: This script assumes that your current working directory is your project home directory. If you haven't
# already done so, set your working directory accordingly. All subordinate data and figure directories will be created
# for you. If using R Studio, you may set your working directory as follows:
# Go to menu bar
# Click "Sessions"
# Select "Set Working Directory"
#
# sTEP 2: Select Process Steps
# Please select the process steps you wish to execute. The default is that all processing steps will be executed. To
# skip a process, set the flag to FALSE. Note, that each process depends upon the prior process having been executed at
# least once.
#
loadData = TRUE
meanAnalysis = TRUE
activityPattern = TRUE
imputeData = TRUE
compare = TRUE
wdAnalysis = TRUE
#
# Step 3: Set data directory, URL for the data source, and file names
#
dataUrl <- "https://d396qusza40orc.cloudfront.net/repdata%2Fdata%2Factivity.zip" # Link to data source
rawDataDir <- "/Data/Raw Data" # Directory where raw data is to be stored
dataZipFile <- "activity.zip" # Name of zip file (get this from the data source code book)
dataCsvFile <- "activity.csv" # Name of csv file (get this from the data source code book)
procDataFrame <- "procActivities" # Name of data frame in which the processed data is stored
#
#
# Step 4: Is this exploratory or final analysis
# This determines the directories from which, scripts will be sourced and figures will be placed.
exploratory = FALSE
# ---- end
#############################################################################
## HOUSEKEEPING ##
#############################################################################
# ---- housekeeping
# Store project top level working directory in the global environment
homeDir <- getwd()
Sys.setenv("PROJ_HOME"= homeDir)
# Set directory from which scripts will be sourced, then source them
if (exploratory) {scriptDir <- "/Code/Raw Scripts" } else {scriptDir <- "/Code/Final Scripts" }
#setwd(file.path(homeDir, scriptDir))
# Load custom functions files
source("func.R")
source("load.R")
# Set figure directory to which figures will be placed.
if (exploratory) {figDir <- "/Figure/Exploratory Figures" } else {figDir <- "/Figure/Final Figures" }
# Include requisite libraries
library(ggplot2)
library(plyr)
library(dplyr)
library(gridExtra)
library(grid)
library(downloader)
# Reset working directory
setwd(file.path(homeDir))
# ---- end
#############################################################################
## LOAD DATA ##
#############################################################################
# Load raw data
if (loadData) {
# ---- loadDataCall
rawActivities <- loadZipData(dataUrl, rawDataDir, dataZipFile, dataCsvFile)
# ---- end
# ---- calcLoadStatsCall
loadStats <- calcLoadStats(rawActivities)
print(loadStats)
# ---- end
# ---- loadSummary
print(summary(rawActivities$steps))
# ---- end
# ---- makeLoadHistCall
loadHist <- makeLoadHist(rawActivities)
# ---- end
# ---- loadMissingData
loadMissingData <- rawActivities[is.na(rawActivities$steps),]
# ---- end
# ---- loadMissingDataHistCall
loadMissingDataHist <- missingDataHist(loadMissingData)
# ---- end
}
#############################################################################
## MEAN TOTAL NUMBER OF STEPS PER DAY ##
#############################################################################
if (meanAnalysis){
# ---- completeCases
# Extract complete cases from activities
completeCases <- rawActivities[complete.cases(rawActivities),]
# ---- end
# ---- stepsByDay01Call
# Summarizes activitities into steps by date
stepsByDay01 <- summate(x = completeCases, v = "date", f = "sum")
# ---- end
# ---- computeStats01Call
# Compute and store summary statistics of total steps per day
stats01 <- computeStats(stepsByDay01)
# ---- end
# ---- makeHist01Call
# Prepare, render and return histogram and summary statistics of total number of steps taken each day
stepsHist01 <- makeHist(stepsByDay01, stats01)
# ---- end
# Store histogram in appropriate figures folder.
plotFile <- "stepsHist01.png"
plotSave(stepsHist01, plotFile)
}
#############################################################################
## AVERAGE DAILY ACTIVITY PATTERN ##
#############################################################################
if (activityPattern) {
# ---- stepsByInterval01
# Summarize activities into average steps taken per 5 minute interval
stepsByInterval01 <- summate(completeCases, "interval", "mean")
# ---- end
# ---- makeTimeSeriesCall
# Prepare, render and return time series plot of steps taken per interval
stepTimeSeries01 <- makeTimeSeries(stepsByInterval01)
# ---- end
# Store line chart in approprite figures folder
plotFile <- "stepTimeSeries01.png"
plotSave(stepTimeSeries01, plotFile)
}
#############################################################################
## IMPUTING MISSING VALUES ##
#############################################################################
if (imputeData) {
# ---- missingValues
# Calculate and report the total number of missing values in the dataset
missingValues <- sum(is.na(rawActivities$steps))
print(paste("There are",missingValues,"rows with NAs in the steps variable"))
# ---- end
# ---- imputeCall
# Create a new activities data set with the missing values filled in.
procActivities <- impute(rawActivities, stepsByInterval01, "procActivities.csv")
# ---- end
# ---- stepsByDay02
# Summarizes activitities into steps by date
stepsByDay02 <- summate(x = procActivities, v = "date", f = "sum")
# ---- end
# ---- computeStatsCall
#Compute and store summary statistics of total steps per day
stats02 <- computeStats(stepsByDay02)
# ---- end
# ---- stepsHist02
# Prepare, render and return histogram of total number of steps taken each day
stepsHist02 <- makeHist(stepsByDay02, stats02)
# ---- end
# Store histogram in appropriate figures folder
plotFile <- "stepsHist02.png"
plotSave(stepsHist02, plotFile)
}
#############################################################################
## DATA SET COMPARISON ##
#############################################################################
if (compare) {
# ---- conjoinCall
# Combine the raw and processed data frame into a single data frame with a new categorical variable for each row
# that identifies it as a raw or processed data. All NA rows are removed.
combined <- conjoin(stepsByDay01, stepsByDay02)
# ---- end
# ---- makeDensityPlotCall
# Prepare, render and return a density plot comparing distributions of the raw and processed data
density <- makeDensityPlot(combined)
# ---- end
# Store the density plot in approprite figures folder
plotFile <- "density.png"
plotSave(density, plotFile)
# ---- makeOverlaidHistCall
# Prepare, render and return a histogram plot comparing distributions of the raw and processed data
stepsHist03 <- makeOverlaidHist(combined)
# ---- end
# Store the histogram in approprite figures folder
plotFile <- "stepHist03.png"
plotSave(stepsHist03, plotFile)
# ---- compareStatsCall
# Compute the comparative difference in summary statistics of total steps per day between the raw and processed data
difference <- compareStats(stats01, stats02)
# ---- end
# ---- makeBoxPlotCall
# Prepare, render and return a boxplot comparing distributions of the raw and processed data
stepsBox <- makeBoxPlot(combined, difference)
# ---- end
# Store the boxpot in approprite figures folder
plotFile <- "stepBox.png"
plotSave(stepsBox, plotFile)
}
#############################################################################
## WEEKEND/WEEKDAY PATTERN ANALYSIS ##
#############################################################################
if (wdAnalysis) {
# ---- dayLabelCAll
# Create a new Weekday / Weekend factor variable
procActivities <- dayLabel(procActivities)
# ---- end
# ---- summateWdCall
# Summarize the data by day label, and 5-Minute Interval Time
stepsByIntervalWd <- summateWd(procActivities)
# ---- end
# ---- makeTimeSeriesWdCall
#Create comparative time-series
stepTimeSeriesWd <- makeTimeSeriesWd(stepsByIntervalWd)
# ---- end
# Store the time series in approprite figures folder
plotFile <- "stepTimeSeriesWd.png"
plotSave(stepTimeSeriesWd, plotFile)
}
|
98a9f66d88d9cf1cb1b096531ca756fce602bdb6
|
70de73f045cb585abe889ec4d097184aed634680
|
/man/prep.step_anova.Rd
|
1fe9bef353fab1a71a4145461250405aea97eb91
|
[
"MIT"
] |
permissive
|
yzhizai/supprecipe
|
28cf82810ce8afabd45fd17e87355c20b9b17869
|
f307ea1dbf860dbed580555f41f69e42a5ec2e8f
|
refs/heads/main
| 2023-04-09T17:04:32.539920
| 2021-04-05T00:43:11
| 2021-04-05T00:43:11
| 353,232,871
| 4
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 427
|
rd
|
prep.step_anova.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/step_anova.R
\name{prep.step_anova}
\alias{prep.step_anova}
\title{the general prep function used for step_anova}
\usage{
\method{prep}{step_anova}(x, training, info = NULL)
}
\arguments{
\item{x}{a recipe object}
\item{training}{the data used in recipe}
\item{info}{...}
}
\value{
}
\description{
the general prep function used for step_anova
}
|
d6953c8985fa13869f81282c50c3602f0c4e91f1
|
8f76a223e7a8c0713643c4f73fd6244d3432e536
|
/etl/repeat_purchases.R
|
84f591aa0c35433dae37c55b61844395ce6151e6
|
[] |
no_license
|
fameandpartners/analytics
|
c3864b8f1de7773171420ce92177ec9c4aa0c43d
|
36447659a0e84937ac7fdeb18f787065203d1af8
|
refs/heads/master
| 2023-02-08T09:05:20.012601
| 2018-04-04T22:51:44
| 2018-04-04T22:51:44
| 81,490,348
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 394
|
r
|
repeat_purchases.R
|
source("~/code/analytics/etl/full_global.R")
setwd("~/data")
customer_aquisitions <- products_sold %>%
filter(is_shipped
& order_state != "canceled"
& !return_requested) %>%
group_by(email) %>%
summarise(date = min(order_date))
write_csv(
customer_aquisitions,
"~/code/analytics/ecommerce-performance/static-data/customer_acquisitions.csv"
)
|
bdcb53ce389398a4ceafc639cd3f33fcff59a48e
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/Luminescence/examples/GitHub-API.Rd.R
|
eec90b54f2abe8a0d09a50e7b6c130cd24cfcab8
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 389
|
r
|
GitHub-API.Rd.R
|
library(Luminescence)
### Name: GitHub-API
### Title: GitHub API
### Aliases: GitHub-API github_commits github_branches github_issues
### ** Examples
## Not run:
##D github_branches(user = "r-lum", repo = "luminescence")
##D github_issues(user = "r-lum", repo = "luminescence")
##D github_commits(user = "r-lum", repo = "luminescence", branch = "master", n = 10)
## End(Not run)
|
5950876b32e75c4e242501ed2f3dc80fb3dfa077
|
2b91589e6b49b6402711f2741d761be396e14a89
|
/deseq2_multi/3.0/descriptionPlots.r
|
67e94dd5ae59eee2c4db0a285cc65debdd3edde0
|
[] |
no_license
|
cyverse/docker-builds
|
3421e170f69320d137e1c2776bd723b13cf0721b
|
48ce4aeb1a2567e9959bba161669f8014fd07c0f
|
refs/heads/master
| 2023-06-22T13:32:37.512486
| 2023-06-16T21:45:40
| 2023-06-16T21:45:40
| 14,870,298
| 8
| 6
| null | 2023-06-16T21:45:41
| 2013-12-02T18:02:26
|
Perl
|
UTF-8
|
R
| false
| false
| 1,361
|
r
|
descriptionPlots.r
|
#' Description plots of the counts
#'
#' Description plots of the counts according to the biological condition
#'
#' @param counts \code{matrix} of counts
#' @param group factor vector of the condition from which each sample belongs
#' @param col colors for the plots (one per biological condition)
#' @return PNG files in the "figures" directory and the matrix of the most expressed sequences
#' @author Hugo Varet
descriptionPlots <- function(counts, n=3, group=target[,varInt],output.file=output.file, col){
# total number of reads per sample
source("/barplotTotal.R")
barplotTotal(counts, group=target[,varInt], output.file="barplotTotal.png", col)
# percentage of null counts per sample
source("/barplotNull.R")
barplotNull(counts, group=target[,varInt], output.file="barplotNull.png", col)
# distribution of counts per sample
source("/densityPlot.R")
densityPlot(counts, group=target[,varInt], output.file="densplot.png", col)
# features which catch the most important number of reads
source("/majSequences.R")
majSequences(counts, n=3, group=target[,varInt], output.file="majSeq.png", col)
# SERE and pairwise scatter plots
source("/pairwiseScatterPlots.R")
cat("Matrix of SERE statistics:\n")
print(tabSERE(counts))
#pairwiseScatterPlots(counts, group=target[,varInt], output.file="pairwiseScatter.png")
return(majSequences)
}
|
dfb18b2b2c2dc7bc880f341a9f6ed8950e3cb04a
|
590c4a059f51a81e5283a26b6837c3560e179422
|
/0_Prepare_data/0_GatherData.R
|
dda942111022884d43c50f6b2a69e85ab81ef5f4
|
[] |
no_license
|
lipc20052006/connectivity_eqtl_networks
|
5ac7fc974073484580f830a7b517c4467f3dd51c
|
6d0cf6722edf88de7512b78172986da277c5fcbc
|
refs/heads/main
| 2023-07-04T20:49:07.612600
| 2021-08-03T23:10:33
| 2021-08-03T23:10:33
| 411,984,035
| 1
| 0
| null | 2021-09-30T08:29:09
| 2021-09-30T08:29:08
| null |
UTF-8
|
R
| false
| false
| 476
|
r
|
0_GatherData.R
|
#eQTL data files from GTEx
wget https://storage.googleapis.com/gtex_analysis_v8/single_tissue_qtl_data/GTEx_Analysis_v8_eQTL_expression_matrices.tar && \
tar xf GTEx_Analysis_v8_eQTL_expression_matrices.tar && rm GTEx_Analysis_v8_eQTL_expression_matrices.tar
wget https://storage.googleapis.com/gtex_analysis_v8/single_tissue_qtl_data/GTEx_Analysis_v8_eQTL_covariates.tar.gz && \
tar xf GTEx_Analysis_v8_eQTL_covariates.tar.gz && rm GTEx_Analysis_v8_eQTL_covariates.tar.gz
|
89402cb765a4f52901bf8588efc3330e42f9abe8
|
a71b7fe35d652d86f136823cd1801eb51d902839
|
/datibw.R
|
ce46fb032a9fd7d026a49bf7b8735ec817fea8d9
|
[] |
no_license
|
StaThin/data
|
9efd602022db768b927c3338e5ce7483f57e3469
|
d7f6c6b5d4df140527c269b032bb3b0be45ceeeb
|
refs/heads/master
| 2023-03-29T18:40:09.694794
| 2023-03-15T09:32:42
| 2023-03-15T09:32:42
| 29,299,462
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,761
|
r
|
datibw.R
|
datibw <-
structure(list(bw = c(2350, 2450, 3300, 1800, 2900, 3500, 2700,
2950, 4000, 2650, 1450, 3000, 2200, 3680, 3200, 2900, 2000, 2680,
2450, 3300, 3400, 3400, 2350, 3300, 3650, 3500, 3550, 3000, 2700,
2650, 4850, 2350, 3100, 1950, 3600, 3100, 3000, 1700, 3100, 2750,
1700, 1250, 2950, 2050, 3200, 3500, 2600, 2750, 3250, 1750, 1700,
2600, 3600, 2800, 3000, 3050, 4100, 2900, 2800, 3000, 2650, 2950,
2050, 2500, 2050, 3100, 2900, 4200, 2200, 2600, 3200, 3100, 2950,
3200, 2450, 2500, 2450, 2600, 2550, 2050, 2500, 2750, 2500, 2950,
2850, 2700, 1700, 3300, 2250, 2800, 2750, 1300, 2550, 3800, 2800,
3150, 1650, 2900, 2100, 1150, 1250, 2500, 2000, 4000, 3550, 1173,
2900), bpd = c(88, 91, 94, 84, 89, 100, 90, 92, 95, 92, 80, 92,
86, 96, 95, 91, 87, 88, 92, 92, 94, 88, 89, 95, 94, 96, 93, 91,
89, 89, 95, 92, 92, 83, 84, 91, 94, 84, 92, 94, 80, 74, 93, 90,
94, 95, 92, 93, 96, 88, 82, 92, 97, 91, 91, 90, 97, 84, 91, 92,
92, 94, 81, 84, 80, 94, 93, 97, 82, 93, 93, 92, 92, 94, 92, 90,
89, 85, 86, 87, 87, 88, 91, 91, 91, 92, 83, 93, 90, 90, 89, 74,
94, 93, 89, 91, 85, 92, 82, 68, 64, 90, 90, 96, 92, 72, 92),
ad = c(92, 98, 110, 89, 97, 110, 110, 100, 98, 98, 78, 100,
90, 120, 105, 101, 93, 105, 100, 114, 110, 113, 94, 112,
107, 111, 108, 108, 105, 111, 118, 100, 112, 86, 116, 102,
101, 85, 109, 108, 90, 94, 108, 92, 106, 102, 105, 105, 102,
89, 79, 100, 112, 103, 105, 100, 129, 104, 106, 105, 103,
108, 95, 97, 96, 110, 110, 133, 90, 102, 114, 119, 107, 110,
100, 95, 96, 98, 106, 90, 105, 99, 98, 100, 105, 102, 90,
103, 96, 108, 110, 71, 102, 118, 103, 107, 82, 108, 90, 80,
71, 96, 93, 115, 116, 73, 104)), .Names = c("bw", "bpd",
"ad"), class = "data.frame", row.names = c(NA, -107L))
|
99538b245570d87e77c1177519e54d0fe5a0bc15
|
8cbc16b08945e437eab544222356c451aaf80d8f
|
/analysis/SuppFig8d-stress-tf-activity.R
|
347754d20f54aa0590d082367c61cf9dd6422150
|
[
"MIT"
] |
permissive
|
gladelephant/singlecellglioma-verhaaklab
|
f18b6b77596ba9adc5696f0d3224c470d6d0ddbe
|
b785e86484e826d94bd9c9ac1e88cead624f1762
|
refs/heads/master
| 2023-08-19T08:27:28.521716
| 2021-10-01T18:03:46
| 2021-10-01T18:03:46
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 34,172
|
r
|
SuppFig8d-stress-tf-activity.R
|
##################################
# TFs with DNAme disorder + TF activity
# Updated: 2021.05.20
# Author: Kevin J.
###################################
# Working directory for this analysis.
mybasedir = "/Users/johnsk/github/"
setwd(mybasedir)
###################################
# Necessary packages:
library(tidyverse)
library(Seurat)
library(SCENIC)
library(AUCell)
library(ggpubr)
###################################
## Plotting theme:
plot_theme <- theme_bw(base_size = 12) + theme(axis.title = element_text(size = 12),
axis.text = element_text(size = 12),
panel.background = element_rect(fill = "transparent"),
axis.line = element_blank(),
strip.background = element_blank(),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.border = element_blank(),
axis.line.x = element_line(size = 0.5, linetype = "solid", colour = "black"),
axis.line.y = element_line(size = 0.5, linetype = "solid", colour = "black"))
############################
#### TFBS DNAme disorder ###
############################
## Additional information about single-cells passing QC.
tfbs <- read.table(file="data/analysis_RRBS_individual_TFBS_motif_DNAme_disorder.csv", sep = ",", header = TRUE)
hypoxia_tfbs <- tfbs %>%
filter(treatment!="RT")
rt_tfbs <- tfbs %>%
filter(treatment=="RT")
## Split into Unique CpGs per TF AND TFBS PDR.
epimut_cpg_tf_hypoxia = hypoxia_tfbs %>%
filter(timepoint=="9 days") %>%
dplyr::select(sample, cell_line, timepoint, treatment, ends_with("_num_unique_CpGs")) %>%
pivot_longer(
cols = ends_with("_num_unique_CpGs"),
names_to = "tf",
values_to = "num_unique") %>%
mutate(tf = gsub("_num_unique_CpGs", "", tf))
epimut_cpg_tf_rt = rt_tfbs %>%
dplyr::select(sample, cell_line, timepoint, treatment, ends_with("_num_unique_CpGs")) %>%
pivot_longer(
cols = ends_with("_num_unique_CpGs"),
names_to = "tf",
values_to = "num_unique") %>%
mutate(tf = gsub("_num_unique_CpGs", "", tf))
## Combine two data.frames.
all(colnames(epimut_cpg_tf_hypoxia)==colnames(epimut_cpg_tf_rt))
epimut_cpg_tf <- bind_rows(epimut_cpg_tf_hypoxia, epimut_cpg_tf_rt)
# Do the same for PDR.
epimut_pdr_tf_hypoxia = hypoxia_tfbs %>%
filter(timepoint=="9 days") %>%
dplyr::select(sample, cell_line, timepoint, treatment, ends_with("_PDR")) %>%
pivot_longer(
cols = ends_with("_PDR"),
names_to = "tf",
values_to = "pdr") %>%
mutate(tf = gsub("_PDR", "", tf))
epimut_pdr_tf_rt = rt_tfbs %>%
dplyr::select(sample, cell_line, timepoint, treatment, ends_with("_PDR")) %>%
pivot_longer(
cols = ends_with("_PDR"),
names_to = "tf",
values_to = "pdr") %>%
mutate(tf = gsub("_PDR", "", tf))
## Combine two data.frames.
all(colnames(epimut_pdr_tf_hypoxia)==colnames(epimut_pdr_tf_rt))
epimut_pdr_tf <- bind_rows(epimut_pdr_tf_hypoxia, epimut_pdr_tf_rt)
## Combine into single dataframe.
all(epimut_cpg_tf$sample==epimut_pdr_tf$sample)
all(epimut_cpg_tf$tf==epimut_pdr_tf$tf)
epimut_comb_tf = epimut_cpg_tf %>%
inner_join(epimut_pdr_tf, by=c("sample", "cell_line", "timepoint", "treatment", "tf"))
## Tabulate the median number of CpGs per TF across cell lines.
epimut_comb_tf_tab = epimut_comb_tf %>%
group_by(tf) %>%
summarise(median_cpgs = median(num_unique))
## Set a filter to retain specific TFs.
epimut_comb_tf_tab_filt = epimut_comb_tf_tab %>%
filter(median_cpgs > 30)
## Identify the TFs to keep based on minimum 30 unique CpGs.
tf_keep <- unique(epimut_comb_tf_tab_filt$tf)
## Filter the TFs to keep.
epimut_comb_tf_filt = epimut_comb_tf %>%
filter(tf%in%tf_keep)
## Examine the median TF values between normoxia and rt/hypoxia for each cell line.
epimut_group_med_compare = epimut_comb_tf_filt %>%
group_by(tf, cell_line, treatment) %>%
summarise(median_pdr = median(pdr)) %>%
pivot_wider(names_from = treatment, values_from = median_pdr) %>%
mutate(rt_delta_median_pdr = RT-Normoxia,
hypoxia_delta_median_pdr = Hypoxia-Normoxia)
## Perform analyses using the relative epimutation burden (Hypoxia/Normoxia).
epimut_group_med = epimut_comb_tf_filt %>%
mutate_each(funs(./median(.[treatment == "Normoxia"])), pdr) %>%
group_by(cell_line, tf, treatment) %>%
summarise(median_relative_epimutation = median(pdr)) %>%
pivot_wider(names_from = treatment, values_from = median_relative_epimutation) %>%
select(cell_line, tf, Normoxia_rel_epimut = Normoxia, Hypoxia_rel_epimut = Hypoxia, RT_rel_epimut = RT)
## Calculate a wilcoxon rank sum p-value for each RT/hypoxia vs. normoxia comparison.
epimut_group_wilcox_hypoxia = epimut_comb_tf_filt %>%
filter(treatment!="RT") %>%
group_by(cell_line, tf) %>%
do(w = wilcox.test(pdr~treatment, data=., paired=FALSE)) %>%
summarise(cell_line, tf, Wilcox = w$p.value) %>%
select(cell_line, tf, hypoxia_wilcox = Wilcox)
epimut_group_wilcox_rt = epimut_comb_tf_filt %>%
filter(treatment!="Hypoxia") %>%
group_by(cell_line, tf) %>%
do(w = wilcox.test(pdr~treatment, data=., paired=FALSE)) %>%
summarise(cell_line, tf, Wilcox = w$p.value) %>%
select(cell_line, tf, rt_wilcox = Wilcox)
## Combine two analyses together.
epimut_group_wilcox = epimut_group_wilcox_hypoxia %>%
inner_join(epimut_group_wilcox_rt, by=c("cell_line", "tf"))
## Combine data to identify common modes of shifts in epimutation burden.
epimut_group_med_wilcox <- epimut_group_med %>%
inner_join(epimut_group_wilcox, by=c("cell_line", "tf")) %>%
ungroup()
## Which TFBS motifs are statistically significant in all conditions?
tfbs_pvalues <- epimut_group_med_wilcox %>%
dplyr::select(cell_line, tf, hypoxia_wilcox, rt_wilcox) %>%
pivot_wider(names_from = cell_line, values_from = c(hypoxia_wilcox, rt_wilcox)) %>%
filter(hypoxia_wilcox_HF2354 < 0.1, hypoxia_wilcox_HF3016 < 0.1, rt_wilcox_HF2354 < 0.1, rt_wilcox_HF3016 < 0.1)
#############################
##### TF activity ######
#############################
## Load the SCENIC results for each sample:.
hf2354_umap <- read.table(file="data/analysis_scRNAseq_stress_hf2354_metadata.csv", sep = ",", header = TRUE)
## Load filtered, processed, unnormalized count matrix.
hf2354_obj <- ReadH5AD("data/analysis_scRNAseq_stress_hf2354_expression.h5ad")
## Make a Seurat object with the standard pipeline.
## Feature counts for each cell are divided by the total counts for that cell and multiplied by the scale.factor.
## This is then natural-log transformed using log1p.
hf2354_obj <- hf2354_obj %>%
Seurat::NormalizeData()
## Use the experimental conditions.
hf2354_obj@meta.data$cell_name = rownames(hf2354_obj@meta.data)
## Downsample for input into SCENIC.
cells_to_sample <- 5000
set.seed(34)
cells_to_subset <- sample(x = hf2354_obj@meta.data$cell_name, size = cells_to_sample, replace = F)
# Subset Seurat object.
hf2354_obj_sub <- SubsetData(object = hf2354_obj, cells = cells_to_subset)
## Restrict the metadata data to that which was included in the SCENIC run.
hf2354_umap_filt <- hf2354_umap %>%
filter(cell_barcode%in%hf2354_obj_sub@meta.data$cell_name)
##########################
### Begin SCENIC approach
##########################
## Building the **gene regulatory network (GRN)**:
## 1. Identify potential targets for each TF based on co-expression.
# - Filtering the expression matrix and running GENIE3/GRNBoost.
# - Formatting the targets from GENIE3/GRNBoost into co-expression modules.
## Running SCENIC on a subsample of the data for run time considerations.
## Initialize SCENIC settings:
org="hgnc"
dbDir="/projects/verhaak-lab/scgp/reference/cisTarget_databases"
myDatasetTitle="SCENIC HF2354"
data(defaultDbNames)
dbs <- defaultDbNames[[org]]
scenicOptions <- initializeScenic(org=org, dbDir=dbDir, dbs=dbs, datasetTitle=myDatasetTitle, nCores=10)
# Save to use at a later time.
saveRDS(scenicOptions, file="int/scenicOptions.Rds")
## Load expression matrix.
exprMat <- data.matrix(GetAssayData(hf2354_obj_sub))
cellInfo <- data.frame(hf2354_obj_sub@meta.data$library)
rownames(cellInfo) <- rownames(hf2354_obj_sub@meta.data)
colnames(cellInfo) <- "CellType"
# Color to assign to the variables (same format as for NMF::heatmap)
colVars <- list(CellType=c("RV20018" = "#fcbba1",
"RV20033" = "#00FF00"))
colVars$CellType <- colVars$CellType[intersect(names(colVars$CellType), cellInfo$CellType)]
## Save outputs for cell annotation and color code.
saveRDS(cellInfo, file="int/cellInfo.Rds")
saveRDS(colVars, file="int/colVars.Rds")
## Examine how many genes have greater than 0.
cellInfo$nGene <- colSums(exprMat>0)
head(cellInfo)
## Filter based on the number of genes.
genesKept <- geneFiltering(exprMat, scenicOptions=scenicOptions,
minCountsPerGene=3*.01*ncol(exprMat),
minSamples=ncol(exprMat)*.01)
## Filter the expression matrix only to keep these genes.
## The data is already logged / normalized.
exprMat_filtered <- exprMat[genesKept, ]
dim(exprMat_filtered)
## Split the targets into positive- and negative-correlated targets
## (i.e. Spearman correlation between the TF and the potential target).
runCorrelation(exprMat_filtered, scenicOptions)
### Run GENIE3 (this is computationally intensive).
runGenie3(exprMat_filtered, scenicOptions)
## 2. Select potential direct-binding targets (regulons) based on DNA-motif analysis (*RcisTarget*: TF motif analysis)
## Build and score the GRN.
scenicOptions@settings$verbose <- TRUE
scenicOptions@settings$nCores <- 10
scenicOptions@settings$seed <- 123
## 1. Get co-expression modules.
runSCENIC_1_coexNetwork2modules(scenicOptions)
## 2. Get regulons (with RcisTarget): TF motif analysis).
runSCENIC_2_createRegulons(scenicOptions)
## 3. Score GRN (regulons) in the cells (with AUCell).
runSCENIC_3_scoreCells(scenicOptions, exprMat_filtered)
## 4. Determine the binarized activities.
runSCENIC_4_aucell_binarize(scenicOptions, skipBoxplot = FALSE, skipHeatmaps = FALSE,
skipTsne = FALSE, exprMat = exprMat_filtered)
##### The outputs from the SCENIC runs.
## HF2354 single cells exposed to different stressprs.
auc_rankings_hf2354 <- readRDS("data/SCENIC/HF2354/3.3_aucellRankings.Rds")
regulonAUC_hf2354 <- readRDS("data/SCENIC/HF2354/3.4_regulonAUC.Rds")
## Create a data.frame with the gene sets/TFs and cells.
regulonAUC_hf2354_df = as.data.frame(getAUC(regulonAUC_hf2354))
## generate z-scores for variable A using the scale() function
## scale(A, center = TRUE, scale = TRUE). These are the defaults.
regulonAUC_hf2354_scaled = t(apply(as.matrix(regulonAUC_hf2354_df), 1, scale))
## Extract the TFs of interest.
regulonAUC_hf2354_scaled_filt <- as.data.frame(t(regulonAUC_hf2354_scaled[grep("RELA|TFDP1|ELK4|KLF16", rownames(regulonAUC_hf2354_scaled)), ]))
regulonAUC_hf2354_scaled_filt$cell_name <- colnames(regulonAUC_hf2354_df)
regulon_filt_9d_annot_hf2354 <- hf2354_umap_filt %>%
inner_join(regulonAUC_hf2354_scaled_filt, by=c("cell_barcode" ="cell_name")) %>%
pivot_longer(c(`TFDP1 (3124g)`, `RELA (406g)`, `ELK4_extended (913g)`, `KLF16_extended (242g)`),
names_to = "tf",
values_to = "activity") %>%
filter(timepoint=="9days") %>%
mutate(tf = gsub("_extended", "", tf),
tf = sapply(strsplit(tf, " "), "[[", 1))
regulon_filt_9d_annot_hf2354$condition_revalue[regulon_filt_9d_annot_hf2354$condition_revalue=="Irradiation-9d"] <- "Irradiation"
regulon_filt_9d_annot_hf2354$condition_revalue[regulon_filt_9d_annot_hf2354$condition_revalue=="hypoxia-9d"] <- "Hypoxia"
regulon_filt_9d_annot_hf2354$condition_revalue[regulon_filt_9d_annot_hf2354$condition_revalue=="normoxia-9d"] <- "Normoxia"
regulon_filt_9d_annot_hf2354$condition_revalue <- factor(x = regulon_filt_9d_annot_hf2354$condition_revalue, levels = c("Normoxia", "Hypoxia", "Irradiation"))
my_comparisons <- list( c("Normoxia", "Irradiation"), c("Normoxia", "Hypoxia"))
ggplot(regulon_filt_9d_annot_hf2354, aes(x=condition_revalue, y=activity, fill=condition_revalue)) +
geom_boxplot(outlier.shape = NA) +
#ylim(-3, 4) +
facet_wrap(~cell_line, scales = "free_y") +
labs(y = "TF activity (Z-score)", x = "Oxygen\nconc.", fill = "Oxygen\nconc.") +
scale_fill_manual(values=c("Normoxia" = "#abd9e9",
"Hypoxia" = "#d7191c",
"Irradiation" = "#0dba86")) +
plot_theme +
facet_grid( ~ tf) +
theme(axis.text.x = element_text(angle = 90)) +
stat_compare_means(comparisons = my_comparisons, label = "p.format")
###################################
##### HF3016 TF activity #####
###################################
## Load the SCENIC results for each sample:.
hf3016_umap <- read.table(file="data/analysis_scRNAseq_stress_hf3016_metadata.csv", sep = ",", header = TRUE)
## Load filtered, processed, unnormalized count matrix.
hf3016_obj <- ReadH5AD("data/analysis_scRNAseq_stress_hf3016_expression.h5ad")
# Make a Seurat object with the standard pipeline.
## Feature counts for each cell are divided by the total counts for that cell and multiplied by the scale.factor.
## This is then natural-log transformed using log1p.
hf3016_obj <- hf3016_obj %>%
Seurat::NormalizeData()
## Use the experimental conditions.
hf3016_obj@meta.data$cell_name = rownames(hf3016_obj@meta.data)
## Downsample for input into SCENIC.
cells_to_sample <- 5000
set.seed(34)
cells_to_subset <- sample(x = hf3016_obj@meta.data$cell_name, size = cells_to_sample, replace = F)
# Subset Seurat object.
hf3016_obj_sub <- SubsetData(object = hf3016_obj, cells = cells_to_subset)
## Restrict the metadata data to that which was included in the SCENIC run.
hf3016_umap_filt <- hf3016_umap %>%
filter(cell_barcode%in%hf3016_obj_sub@meta.data$cell_name)
##########################
### Begin SCENIC approach
##########################
## Building the **gene regulatory network (GRN)**:
## 1. Identify potential targets for each TF based on co-expression.
# - Filtering the expression matrix and running GENIE3/GRNBoost.
# - Formatting the targets from GENIE3/GRNBoost into co-expression modules.
## Initialize SCENIC settings:
org="hgnc"
dbDir="/projects/verhaak-lab/scgp/reference/cisTarget_databases"
myDatasetTitle="SCENIC HF3016"
data(defaultDbNames)
dbs <- defaultDbNames[[org]]
scenicOptions <- initializeScenic(org=org, dbDir=dbDir, dbs=dbs, datasetTitle=myDatasetTitle, nCores=10)
# Save to use at a later time.
saveRDS(scenicOptions, file="int/scenicOptions.Rds")
## Load expression matrix.
exprMat <- data.matrix(GetAssayData(hf3016_obj_sub))
cellInfo <- data.frame(hf3016_obj_sub@meta.data$library)
rownames(cellInfo) <- rownames(hf3016_obj_sub@meta.data)
colnames(cellInfo) <- "CellType"
# Color to assign to the variables (same format as for NMF::heatmap)
colVars <- list(CellType=c("RV20020" = "#fb6a4a",
"RV20022" = "#fcbba1",
"RV20033" = "#00FF00"))
colVars$CellType <- colVars$CellType[intersect(names(colVars$CellType), cellInfo$CellType)]
## Save outputs for cell annotation and color code.
saveRDS(cellInfo, file="int/cellInfo.Rds")
saveRDS(colVars, file="int/colVars.Rds")
## Examine how many genes have greater than 0.
cellInfo$nGene <- colSums(exprMat>0)
head(cellInfo)
## Filter based on the number of genes.
genesKept <- geneFiltering(exprMat, scenicOptions=scenicOptions,
minCountsPerGene=3*.01*ncol(exprMat),
minSamples=ncol(exprMat)*.01)
## Filter the expression matrix only to keep these genes.
## The data is already logged / normalized.
exprMat_filtered <- exprMat[genesKept, ]
dim(exprMat_filtered)
## Split the targets into positive- and negative-correlated targets
## (i.e. Spearman correlation between the TF and the potential target).
runCorrelation(exprMat_filtered, scenicOptions)
### Run GENIE3 (this is computationally intensive).
runGenie3(exprMat_filtered, scenicOptions)
## 2. Select potential direct-binding targets (regulons) based on DNA-motif analysis (*RcisTarget*: TF motif analysis)
## Build and score the GRN.
scenicOptions@settings$verbose <- TRUE
scenicOptions@settings$nCores <- 10
scenicOptions@settings$seed <- 123
## 1. Get co-expression modules.
runSCENIC_1_coexNetwork2modules(scenicOptions)
## 2. Get regulons (with RcisTarget): TF motif analysis).
runSCENIC_2_createRegulons(scenicOptions)
## 3. Score GRN (regulons) in the cells (with AUCell).
runSCENIC_3_scoreCells(scenicOptions, exprMat_filtered)
## 4. Determine the binarized activities.
runSCENIC_4_aucell_binarize(scenicOptions, skipBoxplot = FALSE, skipHeatmaps = FALSE,
skipTsne = FALSE, exprMat = exprMat_filtered)
#########################
### Load SCENIC results
#########################
auc_rankings_hf3016 <- readRDS("data/SCENIC/HF3016/3.3_aucellRankings.Rds")
regulonAUC_hf3016 <- readRDS("data/SCENIC/HF3016/3.4_regulonAUC.Rds")
## Create a data.frame with the gene sets/TFs and cells.
regulonAUC_hf3016_df = as.data.frame(getAUC(regulonAUC_hf3016))
## generate z-scores for variable A using the scale() function
## scale(A, center = TRUE, scale = TRUE). These are the defaults.
regulonAUC_hf3016_scaled = t(apply(as.matrix(regulonAUC_hf3016_df), 1, scale))
## Extract the TFs of interest.
regulonAUC_hf3016_scaled_filt <- as.data.frame(t(regulonAUC_hf3016_scaled[grep("RELA|TFDP1|ELK4|KLF16", rownames(regulonAUC_hf3016_scaled)), ]))
regulonAUC_hf3016_scaled_filt$cell_name <- colnames(regulonAUC_hf3016_df)
regulon_filt_9d_annot_hf3016 <- hf3016_umap_filt %>%
inner_join(regulonAUC_hf3016_scaled_filt, by=c("cell_barcode" ="cell_name")) %>%
pivot_longer(c(`TFDP1 (2910g)`, `RELA (650g)`, `ELK4_extended (1222g)`, `KLF16_extended (40g)`),
names_to = "tf",
values_to = "activity") %>%
filter(timepoint=="9days") %>%
mutate(tf = gsub("_extended", "", tf),
tf = sapply(strsplit(tf, " "), "[[", 1))
regulon_filt_9d_annot_hf3016$condition_revalue[regulon_filt_9d_annot_hf3016$condition_revalue=="Irradiation-9d"] <- "Irradiation"
regulon_filt_9d_annot_hf3016$condition_revalue[regulon_filt_9d_annot_hf3016$condition_revalue=="hypoxia-9d"] <- "Hypoxia"
regulon_filt_9d_annot_hf3016$condition_revalue[regulon_filt_9d_annot_hf3016$condition_revalue=="normoxia-9d"] <- "Normoxia"
regulon_filt_9d_annot_hf3016$condition_revalue <- factor(x = regulon_filt_9d_annot_hf3016$condition_revalue, levels = c("Normoxia", "Hypoxia", "Irradiation"))
my_comparisons <- list( c("Normoxia", "Irradiation"), c("Normoxia", "Hypoxia"))
ggplot(regulon_filt_9d_annot_hf3016, aes(x=condition_revalue, y=activity, fill=condition_revalue)) +
geom_boxplot(outlier.shape = NA) +
#ylim(-3, 4) +
facet_wrap(~cell_line, scales = "free_y") +
labs(y = "TF activity (Z-score)", x = "Oxygen\nconc.", fill = "Oxygen\nconc.") +
scale_fill_manual(values=c("Normoxia" = "#abd9e9",
"Hypoxia" = "#d7191c",
"Irradiation" = "#0dba86")) +
plot_theme +
facet_grid( ~ tf) +
theme(axis.text.x = element_text(angle = 90)) +
stat_compare_means(comparisons = my_comparisons, label = "p.format")
##############################
#### Combine all data #####
##############################
## Print the TFs with consistently stress-altered TFBS motif DNAme disorder
tfbs_pvalues$tf
## How many are covered in the SCENIC analysis?
rownames(regulonAUC_hf2354_scaled[grep("ELK4|FOSL2|KLF10|KLF16|MEF2D|NFYB|NR2C1|REL_|RELA|RFX3|TFDP1|VEZF1|ZNF263|ZNF384", rownames(regulonAUC_hf2354_scaled)), ])
rownames(regulonAUC_hf3016_scaled[grep("ELK4|FOSL2|KLF10|KLF16|MEF2D|NFYB|NR2C1|REL_|RELA|RFX3|TFDP1|VEZF1|ZNF263|ZNF384", rownames(regulonAUC_hf3016_scaled)), ])
##### HF2354 ############
regulonAUC_hf2354_scaled_filt <- as.data.frame(t(regulonAUC_hf2354_scaled[grep("ELK4|FOSL2|KLF10|KLF16|MEF2D|NFYB|NR2C1|REL_|RELA|RFX3|TFDP1|VEZF1|ZNF263|ZNF384", rownames(regulonAUC_hf2354_scaled)), ]))
regulonAUC_hf2354_scaled_filt$cell_name <- colnames(regulonAUC_hf2354_df)
tf_activity_wilcox_hypoxia_hf2354 <- hf2354_umap_filt %>%
inner_join(regulonAUC_hf2354_scaled_filt, by=c("cell_barcode" ="cell_name")) %>%
pivot_longer(c(`NFYB_extended (1712g)`:`ELK4_extended (913g)`),
names_to = "tf",
values_to = "activity") %>%
filter(timepoint=="9days", condition_revalue!="Irradiation-9d") %>%
group_by(tf, cell_line) %>%
do(w = wilcox.test(activity~condition_revalue, data=., paired=FALSE)) %>%
summarise(tf, cell_line, Wilcox = w$p.value) %>%
dplyr::select(cell_line, tf, hypoxia_wilcox = Wilcox)
tf_activity_wilcox_rt_hf2354 <- hf2354_umap_filt %>%
inner_join(regulonAUC_hf2354_scaled_filt, by=c("cell_barcode" ="cell_name")) %>%
pivot_longer(c(`NFYB_extended (1712g)`:`ELK4_extended (913g)`),
names_to = "tf",
values_to = "activity") %>%
filter(timepoint=="9days", condition_revalue!="hypoxia-9d") %>%
group_by(tf, cell_line) %>%
do(w = wilcox.test(activity~condition_revalue, data=., paired=FALSE)) %>%
summarise(tf, cell_line, Wilcox = w$p.value) %>%
dplyr::select(cell_line, tf, rt_wilcox = Wilcox)
tf_activity_median_wilcox_hf2354 <- hf2354_umap_filt %>%
inner_join(regulonAUC_hf2354_scaled_filt, by=c("cell_barcode" ="cell_name")) %>%
pivot_longer(c(`NFYB_extended (1712g)`:`ELK4_extended (913g)`),
names_to = "tf",
values_to = "activity") %>%
filter(timepoint=="9days") %>%
group_by(tf, cell_line, condition_revalue) %>%
summarise(avg_activity = median(activity)) %>%
pivot_wider(names_from = condition_revalue, values_from = avg_activity) %>%
mutate(rt_delta_median_activity = `Irradiation-9d`-`normoxia-9d`,
hypoxia_delta_median_activity = `hypoxia-9d`-`normoxia-9d`) %>%
inner_join(tf_activity_wilcox_hypoxia_hf2354, by=c("tf", "cell_line")) %>%
inner_join(tf_activity_wilcox_rt_hf2354, by=c("tf", "cell_line")) %>%
mutate(rt_group = ifelse(rt_delta_median_activity>0 & rt_wilcox < 0.05, "upregulation", ifelse(rt_delta_median_activity<0 & rt_wilcox < 0.05, "downregulation", "no change")),
hypoxia_group = ifelse(hypoxia_delta_median_activity>0 & hypoxia_wilcox < 0.05, "upregulation", ifelse(hypoxia_delta_median_activity<0 & hypoxia_wilcox < 0.05, "downregulation", "no change"))) %>%
dplyr::select(cell_line, tf, hypoxia_group, rt_group) %>%
mutate(tf = gsub("_extended", "", tf),
tf = sapply(strsplit(tf, " "), "[[", 1)) %>%
distinct()
tf_activity_median_wilcox_hf2354 <- hf2354_umap_filt %>%
inner_join(regulonAUC_hf2354_scaled_filt, by=c("cell_barcode" ="cell_name")) %>%
pivot_longer(c(`NFYB (1192g)`,`TFDP1 (3124g)`, `FOSL2 (62g)`, `KLF16_extended (242g)`,
`ZNF384 (116g)`, `REL_extended (56g)`, `RELA (406g)`,
`RFX3 (16g)`, `VEZF1_extended (38g)`, `ELK4_extended (913g)`),
names_to = "tf",
values_to = "activity") %>%
filter(timepoint=="9days") %>%
group_by(tf, cell_line, condition_revalue) %>%
summarise(avg_activity = median(activity)) %>%
pivot_wider(names_from = condition_revalue, values_from = avg_activity) %>%
mutate(rt_delta_median_activity = `Irradiation-9d`-`normoxia-9d`,
hypoxia_delta_median_activity = `hypoxia-9d`-`normoxia-9d`) %>%
inner_join(tf_activity_wilcox_hypoxia_hf2354, by=c("tf", "cell_line")) %>%
inner_join(tf_activity_wilcox_rt_hf2354, by=c("tf", "cell_line")) %>%
mutate(rt_group = ifelse(rt_delta_median_activity>0, "upregulation", "downregulation"),
hypoxia_group = ifelse(hypoxia_delta_median_activity>0, "upregulation", "downregulation")) %>%
dplyr::select(cell_line, tf, hypoxia_group, hypoxia_wilcox, rt_group, rt_wilcox) %>%
mutate(tf = gsub("_extended", "", tf),
tf = sapply(strsplit(tf, " "), "[[", 1)) %>%
distinct()
###### HF3016 ########
regulonAUC_hf3016_scaled_filt <- as.data.frame(t(regulonAUC_hf3016_scaled[grep("ELK4|FOSL2|KLF10|KLF16|MEF2D|NFYB|NR2C1|REL_|RELA|RFX3|TFDP1|VEZF1|ZNF263|ZNF384", rownames(regulonAUC_hf3016_scaled)), ]))
regulonAUC_hf3016_scaled_filt$cell_name <- colnames(regulonAUC_hf3016_df)
tf_activity_wilcox_hypoxia_hf3016 <- hf3016_umap_filt %>%
inner_join(regulonAUC_hf3016_scaled_filt, by=c("cell_barcode" ="cell_name")) %>%
pivot_longer(c(`FOSL2_extended (649g)`:`RELA (650g)`),
names_to = "tf",
values_to = "activity") %>%
filter(timepoint=="9days", condition_revalue!="Irradiation-9d") %>%
group_by(tf, cell_line) %>%
do(w = wilcox.test(activity~condition_revalue, data=., paired=FALSE)) %>%
summarise(tf, cell_line, Wilcox = w$p.value) %>%
dplyr::select(cell_line, tf, hypoxia_wilcox = Wilcox)
tf_activity_wilcox_rt_hf3016 <- hf3016_umap_filt %>%
inner_join(regulonAUC_hf3016_scaled_filt, by=c("cell_barcode" ="cell_name")) %>%
pivot_longer(c(`FOSL2_extended (649g)`:`RELA (650g)`),
names_to = "tf",
values_to = "activity") %>%
filter(timepoint=="9days", condition_revalue!="hypoxia-9d") %>%
group_by(tf, cell_line) %>%
do(w = wilcox.test(activity~condition_revalue, data=., paired=FALSE)) %>%
summarise(tf, cell_line, Wilcox = w$p.value) %>%
dplyr::select(cell_line, tf, rt_wilcox = Wilcox)
tf_activity_median_wilcox_hf3016 <- hf3016_umap_filt %>%
inner_join(regulonAUC_hf3016_scaled_filt, by=c("cell_barcode" ="cell_name")) %>%
pivot_longer(c(`FOSL2_extended (649g)`:`RELA (650g)`),
names_to = "tf",
values_to = "activity") %>%
filter(timepoint=="9days") %>%
group_by(tf, cell_line, condition_revalue) %>%
summarise(avg_activity = median(activity)) %>%
pivot_wider(names_from = condition_revalue, values_from = avg_activity) %>%
mutate(rt_delta_median_activity = `Irradiation-9d`-`normoxia-9d`,
hypoxia_delta_median_activity = `hypoxia-9d`-`normoxia-9d`) %>%
inner_join(tf_activity_wilcox_hypoxia_hf3016, by=c("tf", "cell_line")) %>%
inner_join(tf_activity_wilcox_rt_hf3016, by=c("tf", "cell_line")) %>%
mutate(rt_group = ifelse(rt_delta_median_activity>0 & rt_wilcox < 0.05, "upregulation", ifelse(rt_delta_median_activity<0 & rt_wilcox < 0.05, "downregulation", "no change")),
hypoxia_group = ifelse(hypoxia_delta_median_activity>0 & hypoxia_wilcox < 0.05, "upregulation", ifelse(hypoxia_delta_median_activity<0 & hypoxia_wilcox < 0.05, "downregulation", "no change"))) %>%
dplyr::select(cell_line, tf, hypoxia_group, rt_group) %>%
mutate(tf = gsub("_extended", "", tf),
tf = sapply(strsplit(tf, " "), "[[", 1)) %>%
distinct()
tf_activity_median_wilcox_hf3016 <- hf3016_umap_filt %>%
inner_join(regulonAUC_hf3016_scaled_filt, by=c("cell_barcode" ="cell_name")) %>%
pivot_longer(c(`FOSL2 (490g)`,`TFDP1 (2910g)`, `KLF16_extended (40g)`, `RFX3_extended (16g)`,
`ELK4_extended (1222g)`, `NFYB (13g)`, `MEF2D (11g)`, `ZNF263_extended (166g)`,
`RELA (650g)`),
names_to = "tf",
values_to = "activity") %>%
filter(timepoint=="9days") %>%
group_by(tf, cell_line, condition_revalue) %>%
summarise(avg_activity = median(activity)) %>%
pivot_wider(names_from = condition_revalue, values_from = avg_activity) %>%
mutate(rt_delta_median_activity = `Irradiation-9d`-`normoxia-9d`,
hypoxia_delta_median_activity = `hypoxia-9d`-`normoxia-9d`) %>%
inner_join(tf_activity_wilcox_hypoxia_hf3016, by=c("tf", "cell_line")) %>%
inner_join(tf_activity_wilcox_rt_hf3016, by=c("tf", "cell_line")) %>%
mutate(rt_group = ifelse(rt_delta_median_activity>0, "upregulation", "downregulation"),
hypoxia_group = ifelse(hypoxia_delta_median_activity>0, "upregulation", "downregulation")) %>%
dplyr::select(cell_line, tf, hypoxia_group, hypoxia_wilcox, rt_group, rt_wilcox) %>%
mutate(tf = gsub("_extended", "", tf),
tf = sapply(strsplit(tf, " "), "[[", 1)) %>%
distinct()
comb_tf_activity <- tf_activity_median_wilcox_hf3016 %>%
bind_rows(tf_activity_median_wilcox_hf2354) %>%
dplyr::select(cell_line, tf, hypoxia_group, rt_group) %>%
pivot_longer(
cols = c(hypoxia_group, rt_group),
names_to = "stress",
values_to = "activity") %>%
mutate(stress = recode(stress, "hypoxia_group" = "Hypoxia", "rt_group" = "Irradiation")) %>%
filter(tf%in%c("TFDP1", "RFX3", "RELA", "NFYB", "KLF16", "FOSL2", "ELK4"))
comb_tf_pvalue <- tf_activity_median_wilcox_hf3016 %>%
bind_rows(tf_activity_median_wilcox_hf2354) %>%
dplyr::select(cell_line, tf, hypoxia_wilcox, rt_wilcox) %>%
pivot_longer(
cols = c(hypoxia_wilcox, rt_wilcox),
names_to = "stress",
values_to = "pvalue") %>%
mutate(stress = recode(stress, "hypoxia_wilcox" = "Hypoxia", "rt_wilcox" = "Irradiation")) %>%
filter(tf%in%c("TFDP1", "RFX3", "RELA", "NFYB", "KLF16", "FOSL2", "ELK4"))
comb_tf_all <- comb_tf_pvalue %>%
inner_join(comb_tf_activity, by=c("cell_line", "tf", "stress")) %>%
mutate(pvalue = ifelse(pvalue <2.2e-16, 2.2e-16, pvalue))
comb_tf_all$activity <- factor(x = comb_tf_all$activity, levels = c("upregulation","downregulation"))
pdf(file = "results/Fig4/SuppFig8d-tf-activity.pdf", height = 6, width = 4, bg = "transparent", useDingbats = FALSE)
ggplot() +
geom_tile(data = comb_tf_all, aes(x = stress, y = tf, fill = activity, alpha = -log10(pvalue)), color = "black") +
labs(y = "", fill = "9-day TF activity \nchange from normoxia") +
scale_fill_manual(values = c("upregulation" = "#CD4F39", "downregulation" = "#27408B", "no change" = "#d3d3d3",
"NA" = "#FFFFFF")) +
plot_theme +
theme(axis.text.x = element_text(angle=45, hjust=1)) +
facet_grid(. ~ cell_line, scales="free") +
ggtitle("Selected TFs with significant TFBS motif\nDNAme disorder changes")
dev.off()
####################################################################
regulon_filt_9d_annot_hf3016_sub <- regulon_filt_9d_annot_hf3016 %>%
dplyr::select(cell_line, stress = condition_revalue, tf, value = activity) %>%
mutate(type = "tf_activity")
regulon_filt_9d_annot_hf2354_sub <- regulon_filt_9d_annot_hf2354 %>%
dplyr::select(cell_line, stress = condition_revalue, tf, value = activity) %>%
mutate(type = "tf_activity")
tf_activity <- regulon_filt_9d_annot_hf2354_sub %>%
bind_rows(regulon_filt_9d_annot_hf3016_sub)
tfbs_disorder <- epimut_comb_tf_filt %>%
group_by(cell_line, tf) %>%
mutate_each(funs(./median(.[treatment == "Normoxia"])), pdr) %>%
filter(tf%in%c("ELK4", "TFDP1")) %>%
mutate(type = "tf_disorder",
treatment = recode(treatment, `RT` = "Irradiation")) %>%
select(cell_line, stress = treatment, tf, value = pdr, type)
tfbs_disorder$stress <- factor(x = tfbs_disorder$stress, levels = c("Normoxia", "Hypoxia", "Irradiation"))
my_comparisons <- list( c("Normoxia", "Irradiation"), c("Normoxia", "Hypoxia"))
pdf(file = "results/Fig4/SuppFig8e-f-tf-disorder.pdf", height = 3, width = 6, bg = "transparent", useDingbats = FALSE)
ggplot(tfbs_disorder, aes(x=tf, y=value, fill= stress)) +
geom_violin() +
geom_boxplot(width=0.9, color="black", alpha=0.1, outlier.shape = NA) +
labs(y = "Relative TFBS motif\nDNAme disorder", x = "", fill = "9-day\nstress exposure") +
scale_fill_manual(values=c("Normoxia" = "#abd9e9",
"Hypoxia" = "#d7191c",
"Irradiation" = "#0dba86")) +
plot_theme +
theme(panel.spacing.x = unit(1.5, "lines")) +
facet_grid(. ~ cell_line, scales ="free")
dev.off()
## Extract p-values
ggplot(tfbs_disorder, aes(x=stress, y=value, fill= stress)) +
geom_violin() +
geom_boxplot(width=0.9, color="black", alpha=0.1, outlier.shape = NA) +
labs(y = "Relative TFBS motif\nDNAme disorder", x = "", fill = "9-day\nstress exposure") +
scale_fill_manual(values=c("Normoxia" = "#abd9e9",
"Hypoxia" = "#d7191c",
"Irradiation" = "#0dba86")) +
plot_theme +
theme(panel.spacing.x = unit(1.5, "lines")) +
facet_grid(tf ~ cell_line, scales ="free") +
stat_compare_means(comparisons = my_comparisons, label = "p.format")
tf_activity <- regulon_filt_9d_annot_hf2354_sub %>%
bind_rows(regulon_filt_9d_annot_hf3016_sub) %>%
filter(tf%in%c("ELK4", "TFDP1"))
pdf(file = "results/Fig4/SuppFig8e-tf-activity.pdf", height = 3, width = 6, bg = "transparent", useDingbats = FALSE)
ggplot(tf_activity, aes(x=tf, y=value, fill=stress)) +
geom_violin() +
geom_boxplot(width=0.9, color="black", alpha=0.1, outlier.shape = NA) +
labs(y = "SCENIC TF activity\n(Z-score)", x = "", fill = "9-day\nstress exposure") +
scale_fill_manual(values=c("Normoxia" = "#abd9e9",
"Hypoxia" = "#d7191c",
"Irradiation" = "#0dba86")) +
plot_theme +
theme(panel.spacing.x = unit(1.5, "lines")) +
facet_grid(. ~ cell_line, scales="free")
dev.off()
## Extract p-values.
ggplot(tf_activity, aes(x=stress, y=value, fill=stress)) +
geom_violin() +
geom_boxplot(width=0.9, color="black", alpha=0.1, outlier.shape = NA) +
labs(y = "SCENIC TF activity (Z-score)", x = "", fill = "9-day\nstress exposure") +
scale_fill_manual(values=c("Normoxia" = "#abd9e9",
"Hypoxia" = "#d7191c",
"Irradiation" = "#0dba86")) +
plot_theme +
theme(panel.spacing.x = unit(1.5, "lines")) +
facet_grid(tf ~ cell_line, scales="free") +
stat_compare_means(comparisons = my_comparisons, label = "p.format")
#### END #####
|
0050e0bd6a46c45c8b3f5994f716341e650c93d8
|
12e3a88676d632a0a1ab01883aebe2ad8b89cb14
|
/book/nishiyama_econometrics/5-8_非線形モデル_結合仮説.R
|
07d8184248c33723cdf8e57ec7dd246e11b3bfee
|
[] |
no_license
|
delta0726/r-econometrics
|
0245c543109abb91367dd196d287f9b0346da338
|
62e55e1f1d1c0bf64886ca38ba6d17e9d2e7d13d
|
refs/heads/master
| 2023-03-05T05:52:23.002221
| 2023-01-25T22:57:56
| 2023-01-25T22:57:56
| 341,499,752
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,290
|
r
|
5-8_非線形モデル_結合仮説.R
|
# ***********************************************************************************************
# Title : 計量経済学
# Chapter : 5 重回帰回帰モデルの推定と検定
# Theme : 実証例5.8 非線形モデルにおける結合仮説の検定
# Date : 2022/12/12
# Page : P178
# URL : https://ritsu1997.github.io/r-for-nlas-econometrics/index.html
# ***********************************************************************************************
# <目次>
# 0 準備
# 1 モデル構築
# 0 準備 ---------------------------------------------------------------
# ライブラリ
library(tidyverse)
library(estimatr)
library(modelsummary)
library(gridExtra)
# データロード
df <- read_csv("csv/youdou.csv")
# データ準備
df <-
df %>%
mutate(lny80 = log(y80),
lny99 = log(y99),
lny90 = log(y90),
growthrate8099 = (lny99 - lny80) / 19 * 100,
growthrate8090 = (lny90 - lny80) / 10)
# データ確認
df %>% print()
df %>% glimpse()
# 1 モデル構築 -------------------------------------------------------
# <ポイント>
#
# モデル構築
model <- lm_robust(growthrate8099 ~ y80 + I(y80^2), se_type = "stata", data = df)
# 結果比較
model %>% summary()
model %>% glance()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.