blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 327 | content_id stringlengths 40 40 | detected_licenses listlengths 0 91 | license_type stringclasses 2 values | repo_name stringlengths 5 134 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 46 values | visit_date timestamp[us]date 2016-08-02 22:44:29 2023-09-06 08:39:28 | revision_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | committer_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | github_id int64 19.4k 671M ⌀ | star_events_count int64 0 40k | fork_events_count int64 0 32.4k | gha_license_id stringclasses 14 values | gha_event_created_at timestamp[us]date 2012-06-21 16:39:19 2023-09-14 21:52:42 ⌀ | gha_created_at timestamp[us]date 2008-05-25 01:21:32 2023-06-28 13:19:12 ⌀ | gha_language stringclasses 60 values | src_encoding stringclasses 24 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 7 9.18M | extension stringclasses 20 values | filename stringlengths 1 141 | content stringlengths 7 9.18M |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
2a02e756c459ecfabfe0c42509c1324f6d914c36 | 700184e0580ebe891897fc23b7c4f960eb61a9db | /labs/16/lab16.R | 3396d71b59903f97257987bbb03a5681730aa3e9 | [] | no_license | gjhunt/688spring2021 | f729905297236e992580f2d15acdb00f4ed94b65 | c187b650ccd9f7152db6311db5fa62bd694b91e1 | refs/heads/main | 2023-04-16T06:59:21.213700 | 2021-04-26T02:46:41 | 2021-04-26T02:46:41 | 331,686,631 | 2 | 1 | null | null | null | null | UTF-8 | R | false | false | 1,207 | r | lab16.R | # ---
# jupyter:
# jupytext:
# formats: ipynb,Rmd,R
# text_representation:
# extension: .R
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.9.1
# kernelspec:
# display_name: R
# language: R
# name: ir
# ---
# # Lab 16
# ## Lecture 16
d = read.csv('data/tcga/smaller.csv',row.names=1)
dim(d)
head(d)
d = as.matrix(d)
w0 = which(apply(d,2,sd)==0)
d = d[,-w0]
dim(d)
y = d[,5]
X = d[,-5]
sigma = apply(X,2,sd)
mus = colMeans(X)
X = scale(X,scale=TRUE,center=TRUE)
head(X)
sum(!is.finite(X))
lm(y~X)
library('pls')
?pcr
pcrmod = pcr(y~X,ncomp=10)
summary(pcrmod)
pcr_preds = predict(pcrmod,ncomp=10)
V = svd(X,nv=10)$v
XV = X%*%V
dim(XV)
hm = lm(y~XV)
summary(hm)
head(coef(hm))
hm_preds = predict(hm)
head(hm_preds)
plot(pcr_preds,hm_preds)
beta_pcr = V%*%array(coef(hm)[-1],c(10,1))
head(beta_pcr)
plot(predict(pcrmod,ncomp=10),mean(y)+X%*%beta_pcr)
newx = rnorm(ncol(X))
newx = data.frame(t(newx))
colnames(newx) = colnames(X)
head(newx)
predict(pcrmod,as.matrix(newx),ncomp=10)
as.numeric(mean(y)+as.matrix(newx)%*%beta_pcr)
|
50dea27b94c090e79a3bbbbcd2159505a819c25f | 0919df75fe691228ecdb8369f03c224072f4b3a9 | /test.R | 8c26869f360b5729e28ecce57798362d9c606239 | [] | no_license | docxia/r-base-pathology | 6bdb322eebe517182bcdbcb3533c8077b1ddb86f | e8c02a843a879336be4a256f9f7ae6319b6e4a8f | refs/heads/master | 2023-08-28T02:57:50.539104 | 2021-10-18T08:51:31 | 2021-10-18T08:51:31 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,888 | r | test.R | library(matrixStats)
library(MatrixGenerics)
library(BiocGenerics)
library(stats4)
library(BiocGenerics)
library(parallel)
library(SummarizedExperiment)
library(TCGAbiolinks)
library(SingleCellExperiment)
library(scRNAseq)
# 宸ヤ綔鐩綍
work_dir <- "C:/Users/Administrator/Desktop/bkb"
# tcga瀵瑰簲鑲跨槫鏌ヨ
project <- "TCGA-KIRC"
data_category <- "Transcriptome Profiling"
data_type <- "Gene Expression Quantification"
workflow_type <- "HTSeq - Counts"
legacy <- FALSE
# 璁剧疆涓哄綋鍓嶅伐浣滅洰褰?
setwd(work_dir)
getwd()
# 鏁版嵁涓嬭浇鏌ヨ
DataDirectory <- paste0(work_dir,"/GDC/",gsub("-","_",project))
FileNameData <- paste0(DataDirectory, "_","RNAseq_HTSeq_Counts",".rda")
# 鏁版嵁鎯呭喌涓嬭浇
query <- GDCquery(project = project,
data.category = data_category,
data.type = data_type,
workflow.type = workflow_type,
legacy = legacy)
# 鎬绘暟鏌ヨ
samplesDown <- getResults(query,cols=c("cases"))
cat("Total sample to download:", length(samplesDown))
# 鑲跨槫鏌ヨ
dataSmTP <- TCGAquery_SampleTypes(barcode = samplesDown, typesample = "TP")
cat("Total TP samples to down:", length(dataSmTP))
# 姝e父鏌ヨ
dataSmNT <- TCGAquery_SampleTypes(barcode = samplesDown,typesample = "NT")
cat("Total NT samples to down:", length(dataSmNT))
# 涓村簥鏁版嵁涓嬭浇
GDCdownload(query = query,
directory = DataDirectory,files.per.chunk=6, method='client')
# data 赋值
data <- GDCprepare(query = query,
save = TRUE,
directory = DataDirectory,
save.filename = FileNameData)
data_expr <- assay(data)
dim(data_expr)
expr_file <- paste0(DataDirectory, "_","All_HTSeq_Counts",".txt")
write.table(data_expr, file = expr_file, sep="\t", row.names =T, quote = F) |
cabfe792fdecc9a4883a4b88169ffbeed2402139 | b025dfdc791d5222bf6c5844689f939d8dbf3e00 | /DQ/DQ/dq.r | 70241b6e0ff7d245eda36fd39204db830077b9b0 | [] | no_license | balajisubudhi/projects | ed2bd65099d59fe995dc07dde1a815dbf71d01ca | 76ece9de664914f10de7a9d349e144d38197f52f | refs/heads/master | 2021-01-16T22:02:17.727239 | 2016-05-30T15:11:36 | 2016-05-30T15:11:36 | 60,018,827 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 61 | r | dq.r | library(shiny)
runApp(appDir = "D:\\github\\DQ",port = 1009)
|
42c8b0945b5e675590bdc1378df6dd1434dc5c6f | 09450ca02618728164fcd8cb9c7a6d745f3e505b | /questions/factors.R | 45205f40e2fb0273ffb3ab2041d6ed0c952ebee0 | [] | no_license | xAdvitya/INT-232-Rprogramming | d7f23b56a530b65b11b0c3f0e3f9235d7e6cc798 | eba20c4d5c9493051e00ff4f1398c9c434ef4410 | refs/heads/master | 2022-12-05T12:31:36.261937 | 2020-09-03T11:08:59 | 2020-09-03T11:31:52 | 284,464,572 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 70 | r | factors.R | num = 8
for(i in 1:num){
if(num%%i ==0){
print(i)
}
} |
3b0a4544c1f1c5c337b451413e7dc50c4dc40000 | 90c68203e3bf2e91dd963afd3f90e5fe180e3be6 | /snapshot_kmeans_DRB.R | cf42096f32e77479199e75ff634d57f792064977 | [] | no_license | guanjue/ctcf_degradation_analysis | 57675e3dd60856610d88e9d47283c10043483674 | f866937bafe6aa824915e55e9f532d0559de08a2 | refs/heads/master | 2020-03-26T06:38:02.653034 | 2018-12-14T19:09:11 | 2018-12-14T19:09:11 | 144,613,674 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,251 | r | snapshot_kmeans_DRB.R | library(pheatmap)
rep1 = as.matrix(read.table('rep1_sig_TSnorm_mat.txt', header=TRUE))
rep2 = as.matrix(read.table('rep2_sig_TSnorm_mat.txt', header=TRUE))
ave = as.matrix(read.table('average_sig_TSnorm_mat.txt', header=TRUE))
rep1_nopk = as.matrix(read.table('rep1_sig_TSnorm_mat_nopk.txt', header=TRUE))
rep2_nopk = as.matrix(read.table('rep2_sig_TSnorm_mat_nopk.txt', header=TRUE))
ave_nopk = as.matrix(read.table('average_sig_TSnorm_mat_nopk.txt', header=TRUE))
rep1 = as.matrix(read.table('rep1_sig_s3norm_mat.txt', header=TRUE))
rep2 = as.matrix(read.table('rep2_sig_s3norm_mat.txt', header=TRUE))
ave = as.matrix(read.table('average_sig_s3norm_mat.txt', header=TRUE))
rep1_nopk = as.matrix(read.table('rep1_sig_s3norm_mat_nopk.txt', header=TRUE))
rep2_nopk = as.matrix(read.table('rep2_sig_s3norm_mat_nopk.txt', header=TRUE))
ave_nopk = as.matrix(read.table('average_sig_s3norm_mat_nopk.txt', header=TRUE))
set.seed(2018)
used_id_nopk = sample(dim(ave_nopk)[1], 5000)
pdf('signal_hist.pdf')
hist(ave, breaks=50)
dev.off()
pdf('signal_hist.log2.pdf')
hist(log2(ave), breaks=50)
dev.off()
tp_name = c('0hr', '4hr', '6hr', '12hr', '18hr', '24hr')
pdf('signal_hist.log2.all.pdf', height=14, width=7)
par(mfrow=c(4,1))
for (i in c(1:4)){
sig = log2(ave[,i])
breaks_vec = seq(min(sig)-1,max(sig)+1,length.out=10)
hist(log2(ave[,i]), breaks=100, xlim=c(-5, 8), ylim=c(0,0.5), main=tp_name[i], freq=FALSE)
box()
}
dev.off()
scale_fc = function(x){
xs = (log2(x))
return((xs)-(xs[4]))
}
ave_fc = t(apply(ave, 1, scale_fc))
pdf('fc_hist.pdf')
hist((ave_fc[,-1]), breaks=50)
dev.off()
pdf('fc_hist.log2.pdf')
hist(log2(ave_fc[,-1]), breaks=100, xlim=c(-5.5, 15.5))
dev.off()
pdf('fc_hist.log2.all.pdf', height=14, width=7)
par(mfrow=c(3,1))
for (i in c(1:3)){
hist((ave_fc[,i]), breaks=100, xlim=c(-4, 10), ylim=c(0,2.0), main=tp_name[i], freq=FALSE)
abline(v=0, col='red',lwd=1.5)
box()
}
dev.off()
nr = 10
set.seed(2018)
dr = as.matrix(ave_fc)
fit = kmeans(dr, nr)
sig_reps = c()
sig_reps_nopk = c()
for (i in c(1:dim(rep1)[2])){
sig_reps = cbind(sig_reps, rep1[,i], rep2[,i])
sig_reps_nopk = cbind(sig_reps_nopk, rep1_nopk[,i], rep2_nopk[,i])
}
dr_kmeans = log2(sig_reps[order(fit$cluster),]+1)
my_colorbar=colorRampPalette(c('white', 'red'))(n = 128)
pdf(paste('kmean.s3.', toString(nr), '.pdf', sep=''))
pheatmap(rbind(dr_kmeans, sig_reps_nopk[used_id_nopk,]), color=my_colorbar, cluster_rows = FALSE, cluster_cols = FALSE,annotation_names_row=FALSE,annotation_names_col=FALSE,show_rownames=FALSE,show_colnames=FALSE)
dev.off()
dr_kmeans_ave = log2(ave[order(fit$cluster),]+1)
my_colorbar=colorRampPalette(c('white', 'red'))(n = 128)
pdf(paste('kmean.s3.ave.', toString(nr), '.pdf', sep=''))
pheatmap(rbind(dr_kmeans_ave, ave_nopk[used_id_nopk,]), color=my_colorbar, cluster_rows = FALSE, cluster_cols = FALSE,annotation_names_row=FALSE,annotation_names_col=FALSE,show_rownames=FALSE,show_colnames=FALSE)
dev.off()
png(paste('kmean.s3.ave.', toString(nr), '.png', sep=''))
pheatmap(rbind(dr_kmeans_ave, ave_nopk[used_id_nopk,]), color=my_colorbar, cluster_rows = FALSE, cluster_cols = FALSE,annotation_names_row=FALSE,annotation_names_col=FALSE,show_rownames=FALSE,show_colnames=FALSE)
dev.off()
my_colorbar=colorRampPalette(c('white', 'red'))(n = 128)
pdf(paste('kmean.TS.', toString(nr), '.pdf', sep=''))
pheatmap(rbind(dr_kmeans, sig_reps_nopk[used_id_nopk,]), color=my_colorbar, cluster_rows = FALSE, cluster_cols = FALSE,annotation_names_row=FALSE,annotation_names_col=FALSE,show_rownames=FALSE,show_colnames=FALSE)
dev.off()
dr_kmeans_ave = log2(ave[order(fit$cluster),]+1)
my_colorbar=colorRampPalette(c('white', 'red'))(n = 128)
pdf(paste('kmean.TS.ave.', toString(nr), '.pdf', sep=''))
pheatmap(rbind(dr_kmeans_ave, ave_nopk[used_id_nopk,]), color=my_colorbar, cluster_rows = FALSE, cluster_cols = FALSE,annotation_names_row=FALSE,annotation_names_col=FALSE,show_rownames=FALSE,show_colnames=FALSE)
dev.off()
png(paste('kmean.TS.ave.', toString(nr), '.png', sep=''))
pheatmap(rbind(dr_kmeans_ave, ave_nopk[used_id_nopk,]), color=my_colorbar, cluster_rows = FALSE, cluster_cols = FALSE,annotation_names_row=FALSE,annotation_names_col=FALSE,show_rownames=FALSE,show_colnames=FALSE)
dev.off()
|
f0ee1e4afd098aa3a88ba5af80ba20e72d1982b0 | a4ff06aff93c2e6978fb08f89dca1f739eed6db5 | /src/exposed/corset_exposed_asw_analysis.R | 97a8f9a56b5ac88925a1918fcf1518acc2ced9a8 | [] | no_license | sarahinwood/asw-para-rnaseq | 4e36e901a6bfc2db45d89e7b6f0aa67a82b54ab6 | 7561f39ef53fe7f66d933cb37860bf3910b9d3ae | refs/heads/master | 2021-11-26T15:24:15.764144 | 2021-11-05T01:11:15 | 2021-11-05T01:11:15 | 148,420,494 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,137 | r | corset_exposed_asw_analysis.R | library(data.table)
library(DESeq2)
library(EnhancedVolcano)
library(plyr)
library(VennDiagram)
##read in corset output
count_data<-read.delim("output/corset/counts.txt",row.names=1, check.names = FALSE)
##Import table describing samples
sample_data <- fread("data/full_sample_key.csv")
setkey(sample_data, Sample_name)
##make dds object
dds <- DESeqDataSetFromMatrix(count_data, colData = sample_data[colnames(count_data)], design = ~1)
##save dds object
saveRDS(dds, file = "output/exposed_corset/deseq2/dds.rds")
##create dds object for group analysis
dds_group <- copy(dds)
##create groupings of tissue+treatment
dds_group$group <- factor(paste(dds$Tissue,dds$Treatment,sep="_"))
##add group to design
design(dds_group) <- ~group
##run deseq2 and generate results
dds_group <- DESeq(dds_group)
saveRDS(dds_group, file = "output/exposed_corset/deseq2/dds_group.rds")
resultsNames(dds_group)
##Make table of results for exposed vs control heads
res_group <- results(dds_group, contrast = c("group", "Head_Exposed", "Head_Control"), lfcThreshold = 1, alpha = 0.1)
##Order based of padj
ordered_res_group <- res_group[order(res_group$padj),]
##Make data table and write to output
ordered_res_group_table <- data.table(data.frame(ordered_res_group), keep.rownames = TRUE)
fwrite(ordered_res_group_table, "output/exposed_corset/deseq2/res_group.csv")
ordered_sig_res_group_table <- subset(ordered_res_group_table, padj < 0.05)
fwrite(ordered_sig_res_group_table, "output/exposed_corset/deseq2/exposed_analysis_sig_degs.csv", col.names = TRUE, row.names = FALSE)
##Sub in any gene of interest to plot counts
plotCounts(dds_group, "Cluster-2682.0", intgroup = c("group"), main="...")
##volcano plot
EnhancedVolcano(ordered_res_group_table, x="log2FoldChange", y="padj", lab="", transcriptPointSize = 3)
##read in annotations
trinotate_report <- fread("data/trinotate_annotation_report.txt")
##read in corset clusters
cluster_data<-read.delim("output/corset/clusters.txt", header = FALSE)
##Generate counts of transcripts in each cluster
cluster_counts <- count(cluster_data, vars="V2")
##generate table of transcript, annot + cluster allocation
trinotate_clusters<-merge(cluster_data, trinotate_report, by.x="V1", by.y="transcript_id", all.x=TRUE)
##merge annot+clusters with list of DEGs
degs_annots <- merge(trinotate_clusters, ordered_sig_res_group_table, by.x="V2", by.y="rn")
fwrite(degs_annots, "output/exposed_corset/deseq2/sig_clusters_annots.csv")
##look at overlap with original analysis w/out clustering
deg_ids <- data.frame(tstrsplit(degs_annots$V1, "_i", keep=1))
setnames(deg_ids, old=c("c..TRINITY_DN25575_c0_g1....TRINITY_DN39667_c0_g1....TRINITY_DN1916_c0_g1..."), new=c("gene_id"))
deg_iso_ids <- deg_ids$gene_id
##read in longest iso/gene results
no_corset_anal <- fread("output/exposed/deseq2/exposed_analysis_sig_degs.csv")
old_deg_ids <- no_corset_anal$rn
##Venn diagram
Set1 <- RColorBrewer::brewer.pal(3, "Set1")
vd <- venn.diagram(x = list("Corset DEGs"=deg_iso_ids, "Longest Isoform/Gene DEGs"=old_deg_ids), filename=NULL, alpha=0.5, cex = 1, cat.cex=1, lwd=1, label=TRUE)
grid.newpage()
grid.draw(vd) |
d3308baf767b0e7cbf303a17fa0bcd459464714a | d92043c1e880559f479d25a7b50cfedcee3f48df | /man/f_model_importance_pl_add_plots_regression.Rd | 217e235685304aab6c6fb94e43f3cc3a1973ca25 | [] | no_license | erblast/oetteR | f759d69121361007136499706687fae6cd9274ef | 02f59f2c0562ae224b798a2115c2061608e8e6f7 | refs/heads/master | 2021-06-21T13:36:58.536874 | 2019-05-07T08:39:10 | 2019-05-07T08:39:10 | 104,708,489 | 7 | 1 | null | null | null | null | UTF-8 | R | false | true | 3,241 | rd | f_model_importance_pl_add_plots_regression.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/f_model_imp.R
\name{f_model_importance_pl_add_plots_regression}
\alias{f_model_importance_pl_add_plots_regression}
\title{add plots based on variable importance to pipelearner dataframe}
\usage{
f_model_importance_pl_add_plots_regression(pl, data, m, ranked_variables,
response_var, title,
variable_color_code = f_plot_color_code_variables(data_ls), formula,
data_ls, var_dep_limit = 10, var_dep_log_y = F, tabplot_limit = 12,
formula_in_pl = F)
}
\arguments{
\item{pl}{a dataframe containing the columns for data, m, ranked_variables, response_var and title}
\item{data}{symbol (unquoted name) of data column in pl}
\item{m}{symbol (unquoted name) of data column in pl}
\item{ranked_variables}{symbol (unquoted name) of data column in pl}
\item{response_var}{symbol (unquoted name) of data column in pl}
\item{title}{symbol (unquoted name) of data column in pl}
\item{variable_color_code}{dataframe created by f_plot_color_code_variables()}
\item{formula}{fomula that was used to construct model}
\item{data_ls}{data_ls list object containing the whole of the original data}
\item{var_dep_limit}{number of variables to be plotted on dependency plot}
\item{var_dep_log_y}{should y axis of dependency plot be logarithmic}
\item{tabplot_limit}{number of variables to be plotted on tabplot}
\item{formula_in_pl}{boolean if formula is a column in pl?}
}
\value{
dataframe
}
\description{
adds a bar plot of the ranked variables, a tabplot sorted by the
target variable and a dependency plot (response variable vs the sequential
range of one of the predictor variables while all other predictors are kept
constant at mean values).
}
\examples{
data_ls = f_clean_data(mtcars)
form = disp~cyl+mpg+hp
variable_color_code = f_plot_color_code_variables(data_ls)
pl = pipelearner::pipelearner(data_ls$data) \%>\%
pipelearner::learn_models( rpart::rpart, form ) \%>\%
pipelearner::learn_models( randomForest::randomForest, form ) \%>\%
pipelearner::learn_models( e1071::svm, form ) \%>\%
pipelearner::learn() \%>\%
mutate( imp = map2(fit, train, f_model_importance)
, title = paste(model, models.id, train_p) ) \%>\%
f_model_importance_pl_add_plots_regression( data = train
, m = fit
, ranked_variables = imp
, title = title
, response_var = target
, variable_color_code = variable_color_code
, formula = form
, data_ls = data_ls
, var_dep_limit = 10
, var_dep_log_y = T
, tabplot_limit = 12 )
}
\seealso{
\code{\link{f_model_importance_plot}}
\code{\link{f_model_importance_plot_tableplot}}
\code{\link{f_model_plot_variable_dependency_regression}}
}
|
f16f8c266c9689b5007b9814f8ddeb142beeaaab | 9e4df408b72687493cc23144408868a975971f68 | /SMS_r_prog/r_prog_less_frequently_used/extractonespeciesforsingle.r | fa2704eccf9382d8f0feb844c16f10c8dbd532a4 | [
"MIT"
] | permissive | ices-eg/wg_WGSAM | 7402ed21ae3e4a5437da2a6edf98125d0d0e47a9 | 54181317b0aa2cae2b4815c6d520ece6b3a9f177 | refs/heads/master | 2023-05-12T01:38:30.580056 | 2023-05-04T15:42:28 | 2023-05-04T15:42:28 | 111,518,540 | 7 | 0 | null | null | null | null | UTF-8 | R | false | false | 7,285 | r | extractonespeciesforsingle.r | exOneSp<-function(my.sp.name='Mackerel',my.sp=20,read.fleet=T,doRun=F) {
SMS.dat<-read.FLSMS.control()
############
my.sp.VPA<-my.sp-first.VPA+1
# runs are made in a separate dirictory
my.sp.dir<-paste('SS',my.sp.name,sep='_')
scenario.dir<-file.path(root,my.sp.dir)
if (file.exists(scenario.dir)) unlink(scenario.dir,recursive = T)
dir.create(scenario.dir,showWarnings = FALSE)
old<-SMS.dat
ny<-new("FLSMS.control")
ny@first.year<-old@first.year
ny@first.year.model<-old@first.year.model
ny@last.year<-old@last.year
ny@last.year.model<-old@last.year.model
ny@last.season<-old@last.season
ny@last.season.last.year<-old@last.season.last.year
ny@species.names<-old@species.names[my.sp]
ny@first.age<-old@first.age
ny@rec.season<-old@rec.season
ny@species.info[]<- old@species.info[my.sp,]
ny@max.age.all<-ny@species.info[,'last-age']
nyAges<-ny@max.age.all-ny@first.age+1
#cat(nyAges,'\n')
ny@beta.cor[]<-old@beta.cor[my.sp.VPA]
ny@SSB.R.year.first[]<-old@SSB.R.year.first[my.sp.VPA]
ny@SSB.R.year.last[]<-old@SSB.R.year.last[my.sp.VPA]
ny@obj.func.weight[]<-old@obj.func.weight[my.sp,]
ny@discard<-old@discard[[my.sp.VPA]]
ny@combined.catches<-old@combined.catches[my.sp.VPA]
ny@seasonal.catch.s2<-old@seasonal.catch.s2[my.sp.VPA]
ny@catch.s2.group<-list(old@catch.s2.group[[my.sp.VPA]])
ny@catch.season.age<-list(old@catch.season.age[[my.sp.VPA]])
ny@avg.F.ages[]<-old@avg.F.ages[my.sp.VPA,]
ny@min.catch[]<-old@min.catch[my.sp.VPA]
ny@catch.sep.year<-list(old@catch.sep.year[[my.sp.VPA]])
ny@catch.spline.year<- list(old@catch.spline.year[[my.sp.VPA]])
ny@zero.catch.year.season<-old@zero.catch.year.season
ny@zero.catch.season.age<-old@zero.catch.season.age
ny@fix.F.factor<-old@fix.F.factor[my.sp.VPA]
ny@est.calc.sigma<-old@est.calc.sigma
ny@read.HCR<-old@read.HCR
write.FLSMS.control(ny,file=file.path(scenario.dir,'SMS.dat'),path=scenario.dir, writeSpNames=T)
if (read.fleet) {
SMS.indices<<-SMS2FLIndices(SMS.dat)
summary(SMS.indices)
}
f2 <- function(x) {
#print(x@name)
if (substr(x@name,1,3)=='Whg') return(x)
}
used<-FLIndices();fl<-0
for (i in (1:length(SMS.indices))) {
a<-SMS.indices[[i]]
if (substr(a@name,1,3)== my.sp.name) {
fl<-fl+1
used[[fl]]<-a
}
}
FLIndices2SMS(out.path=scenario.dir,indices=used,control=ny)
SMS.control<-read.FLSMS.control()
la<-SMS.control@max.age.all
fa<-SMS.control@first.age
years<-c(1,1)
years[1]<-SMS.control@first.year
years[2]<-SMS.control@last.year
ny<-years[2]-years[1]+1
npr<-sum(SMS.control@species.info[,'predator']>=1)
nsp<-SMS.control@no.species
nq<-SMS.control@last.season
noAreas<-SMS.control@no.areas
############# catch data
tr_sp<-function(inp.file='canum.in',path=NULL) {
vari<-scan(file.path(data.path,inp.file),comment.char='#')
vari<-head(vari,-1)
if (inp.file=='west.in')vari<-vari[((first.VPA-1)*noAreas*ny*(la-fa+1)*nq+1):length(vari)]
b<-expand.grid(sub_area=1:noAreas,species.n=first.VPA:nsp,year=years[1]:years[2],quarter=1:nq,age=fa:la)
b<-b[order(b$sub_area,b$species.n,b$year,b$quarter,b$age),]
b$vari<-vari
b<-droplevels(subset(b,species.n==my.sp))
b<-tapply(b$vari,list(b$year,b$quarter,b$age),sum)
round(ftable(b),0)
cat('#\n',file=file.path(path,inp.file),append=F)
y<-0
for (year in (years[1]:years[2])) {
y<-y+1
write.table(b[y,,1:nyAges],row.names = F,col.names = F,file=file.path(path,inp.file),append=T)
}
cat(' -999 # check\n',file=file.path(path,inp.file),append=T)
}
tr_sp(inp.file='canum.in',path=file.path(root,my.sp.dir))
tr_sp(inp.file='weca.in',path=file.path(root,my.sp.dir))
tr_sp(inp.file='natmor.in',path=file.path(root,my.sp.dir))
tr_sp(inp.file='natmor1.in',path=file.path(root,my.sp.dir))
tr_sp(inp.file='propmat.in',path=file.path(root,my.sp.dir))
tr_sp(inp.file='proportion_landed.in',path=file.path(root,my.sp.dir))
tr_sp(inp.file='west.in',path=file.path(root,my.sp.dir))
tr_sp2<-function(inp.file='zero_catch_season_ages.in',path=NULL) {
vari<-scan(file.path(data.path,inp.file),comment.char='#')
vari<-head(vari,-1)
b<-expand.grid(sub_area=1:noAreas,species.n=first.VPA:nsp,quarter=1:nq,age=fa:la)
b<-b[order(b$sub_area,b$species.n,b$quarter,b$age),]
b$vari<-vari
b<-droplevels(subset(b,species.n==my.sp))
b<-tapply(b$vari,list(b$quarter,b$age),sum)
write.table(b,row.names = F,col.names = F,file=file.path(path,inp.file),append=F)
cat(' -999 # check\n',file=file.path(path,inp.file),append=T)
}
tr_sp2(inp.file='zero_catch_season_ages.in',path=file.path(root,my.sp.dir))
tr_sp3<-function(inp.file='zero_catch_year_season.in',path=NULL) {
vari<-scan(file.path(data.path,inp.file),comment.char='#')
vari<-head(vari,-1)
b<-expand.grid(sub_area=1:noAreas,species.n=first.VPA:nsp,year=years[1]:years[2],quarter=1:nq)
b<-b[order(b$sub_area,b$species.n,b$year,b$quarter),]
b$vari<-vari
b<-droplevels(subset(b,species.n==my.sp))
b<-tapply(b$vari,list(b$year,b$quarter),sum)
write.table(b,row.names = F,col.names = F,file=file.path(path,inp.file),append=F)
cat(' -999 # check\n',file=file.path(path,inp.file),append=T)
}
tr_sp3(inp.file='zero_catch_year_season.in',path=file.path(root,my.sp.dir))
tr_sp4<-function(inp.file='recruitment_years.in',path=NULL) {
vari<-scan(file.path(data.path,inp.file),comment.char='#')
vari<-head(vari,-1)
b<-expand.grid(sub_area=1:noAreas,species.n=first.VPA:nsp,year=years[1]:years[2])
b<-b[order(b$sub_area,b$species.n,b$year),]
b$vari<-vari
b<-droplevels(subset(b,species.n==my.sp))
b<-tapply(b$vari,list(b$year),sum)
write.table(b,row.names = F,col.names = F,file=file.path(path,inp.file),append=F)
cat(' -999 # check\n',file=file.path(path,inp.file),append=T)
}
tr_sp4(inp.file='recruitment_years.in',path=file.path(root,my.sp.dir))
a<-readLines(con = file.path(data.path,'F_q_ini.in'))
a<-a[grep( toupper(my.sp.name),a)]
writeLines(a,con= file.path(root,my.sp.dir,'F_q_ini.in'))
cat(' -999 # check\n',file=file.path(root,my.sp.dir,'F_q_ini.in'),append=T)
SMS.files.single<-c("area_names.in","just_one.in","reference_points.in","cp.bat",
"proportion_M_and_F_before_spawning.in",'sms.exe')
for (from.file in SMS.files.single) {
file.copy(file.path(data.path,from.file), file.path(scenario.dir,from.file), overwrite = TRUE)
}
sms.do<-file.path(scenario.dir,'do_run.bat')
if (doRun) {
cat(paste('cd ', scenario.dir,'\n'),file=sms.do)
cat(paste(file.path(scenario.dir,"sms.exe")," -nox \n",sep=""),file=sms.do,append=TRUE)
command<-paste('"',sms.do,'"',sep='')
system(command,show.output.on.console =T)
}
}
if (FALSE) {
exOneSp(my.sp.name='Cod',my.sp=18,read.fleet=T)
exOneSp(my.sp.name='Whg',my.sp=19,read.fleet=F)
exOneSp(my.sp.name='Had',my.sp=20,read.fleet=F)
exOneSp(my.sp.name='Pok',my.sp=21,read.fleet=F)
exOneSp(my.sp.name='Her',my.sp=22,read.fleet=F)
exOneSp(my.sp.name='Nsa',my.sp=23,read.fleet=F)
exOneSp(my.sp.name='Ssa',my.sp=24,read.fleet=F)
exOneSp(my.sp.name='Nop',my.sp=25,read.fleet=F)
exOneSp(my.sp.name='Spr',my.sp=26,read.fleet=F)
exOneSp(my.sp.name='Ple',my.sp=27,read.fleet=F)
exOneSp(my.sp.name='Sol',my.sp=28,read.fleet=F)
}
exOneSp(my.sp.name='Nop',my.sp=24,read.fleet=T)
# scenario.dir<-file.path(root,'SS_COD')
|
51b744e06ba5d27aae8b372bc184c8d6d1ad53ba | 1b30a734bae643efea3d2025637c2e1619572c84 | /cachematrix.R | ed192b0764a275df1517e24d35d95722fd4306fa | [] | no_license | eddidaz/ProgrammingAssignment2 | ccff0b65d27fbc31ba29552428b526e651496b49 | 99c7d3a381846622571ba991b720bc56a6189af0 | refs/heads/master | 2021-01-17T07:56:45.189563 | 2016-05-21T15:29:30 | 2016-05-21T15:29:30 | 59,357,463 | 0 | 0 | null | 2016-05-21T11:58:30 | 2016-05-21T11:58:30 | null | UTF-8 | R | false | false | 1,892 | r | cachematrix.R | ## Put comments here that give an overall description of what your
## functions do
## Creates a special "matrix" object that can cache its inverse
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y) {
x <<- y
inv <<- NULL
}
get <- function() x
setinv <- function(inverse) inv <<- inverse
getinv <- function() inv
list(set = set, get = get, setinv = setinv, getinv = getinv)
}
## Computes the inverse of the special "matrix" returned by makeCacheMatrix
#above. If the inverse has already been calculated (and the matrix has not
#changed), then cacheSolve should retrieve the inverse from the cache
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inv <- x$getinv()
if(!is.null(inv)) {
message("getting cached data")
return(inv)
}
data <- x$get()
inv <- solve(data, ...)
x$setinv(inv)
inv
}
# Function testing
# https://www.coursera.org/learn/r-programming/discussions/all/threads/hdpNLxwBEeaxVRJ-Fv2Eqw/replies/-xz_oxyiEeaNSw6v6KnGpw
# approach 1: create a matrix object, then use it as input to cacheSolve()
a <- makeCacheMatrix(matrix(c(-1, -2, 1, 1), 2,2))
cacheSolve(a)
# approach 2: use makeCacheMatrix() as the input argument to cacheSolve()
# note that the argument to cacheSolve() is a different object
# than the argument to the first call of cacheSolve()
cacheSolve(makeCacheMatrix(matrix(c(-1, -2, 1, 1), 2,2)))
# call cacheSolve(a) a second time to trigger the "getting cached inverse" message
cacheSolve(a)
# try a non-invertible matrix
b <- makeCacheMatrix(matrix(c(0,0,0,0),2,2))
cacheSolve(b)
# illustrate getting the memory locations
a <- makeCacheMatrix(matrix(c(-1, -2, 1, 1), 2,2))
tracemem(a)
tracemem(matrix(c(-1, -2, 1, 1), 2,2))
|
17b4cdd6ef1227470bce36626d67a22a25b7c609 | 284c83a4a4c783bdc7eb53283c1a4b835f0c5ac5 | /code/thirdCode.R | 3150c9ae8e79a5cffd00ecca3bd63b8bf6c692f0 | [] | no_license | kfeng123/regressionPaper | bf19337cc04e8294dd77de175306a106de9d9c4f | 8abd7058b8593acf30a9c9c5584c721be339b839 | refs/heads/master | 2021-01-18T22:15:02.390825 | 2019-10-11T12:41:40 | 2019-10-11T12:41:40 | 72,349,101 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,375 | r | thirdCode.R |
myExp=function(
n=15,
p=100,
betaMod=0.00,
varEpsilon=4,
rho=runif(T,0,1),
muX=runif(p,2,3),
alpha=0,
r=1,
TheLambda=p){
beta=rep(c(1,-1),p/2)
#beta=c(rep(1,p/2),rep(0,p/2))
#beta=c(rep(1,5),rep(0,p-5))
#beta=rnorm(p)
#beta=beta[sample.int(p)]
beta=beta/sqrt(sum(beta^2))
beta=sqrt(betaMod)*beta
if(r==0){
theSigma=diag(rep(1,p))
}else{
if(r!=1){
D=diag(rep(sqrt(TheLambda),r))
}else{
D=TheLambda
dim(D)=c(1,1)
}
V=rnorm(p*r,0,1)
dim(V)=c(p,r)
V=svd(V)$u
theSigma=rep(0,p*p)
dim(theSigma)=c(p,p)
theSigma=V%*%D%*%D%*%t(V)+diag(rep(1,p))
}
# beta=(diag(p)-V%*%t(V))%*%rnorm(p)
# beta=beta/sqrt(sum(beta^2))
# beta=sqrt(betaMod)*beta
generateData=function(beta){
X=rep(0,n*p)
dim(X)=c(n,p)
if(r==0){
Z=rnorm(n*p,0,1)
X=Z
}else{
U=rnorm(n*r,0,1)
dim(U)=c(n,r)
Z=rnorm(n*p,0,1)
dim(Z)=c(n,p)
X=U%*%D%*%t(V)+Z
}
y=X%*%beta+rep(alpha,n)+rnorm(n,0,sqrt(varEpsilon))
list(X=X,y=y)
}
Q=diag(n)-rep(1,n)%*%t(rep(1,n))/n
W=eigen(Q)$vectors[,1:(n-1)]
ChenTT=pnorm(qnorm(0.05)+n*sum((theSigma%*%beta)^2)/sqrt(2*sum(theSigma^2))/4)
simul=function(){
data=generateData(beta)
X=t(data$X)
y=data$y
myTCal=function(X,y){
temp=(
sum(y^2)-
n*(mean(y))^2
)/
(
t(y)%*%W%*%solve(t(W)%*%t(X)%*%X%*%W)%*%t(W)%*%y
)
# SigmaEst=var(t(X))
# myE=eigen(SigmaEst)$values
# jun=sum(myE)
# fang=sum(myE^2)-jun^2/(n-1)
# (temp-jun)/sqrt(fang)
}
T=as.numeric(myTCal(X,y))
ChenTCal=function(X,y){
# thePhi=function(i1,i2,i3,i4){
# 1/4*t(X[,i1]-X[,i2])%*%(X[,i3]-X[,i4])*(y[i1]-y[i2])*(y[i3]-y[i4])
# }
# theTemp=0
# for(i1 in 1:n)for(i2 in 1:n)for(i3 in 1:n)for(i4 in 1:n){
# if(i1!=i2&i1!=i3&i1!=i4&i2!=i3&i2!=i4&i3!=i4){
# theTemp=theTemp+thePhi(i1,i2,i3,i4)
# }
# }
# theTemp=theTemp*n*(n-1)*(n-2)*(n-3)/4/3/2/1
# ChenT=n*theTemp/sqrt(2*sum(theSigma^2))/4
theTemp=0
for(i1 in 1:n)for(i2 in 1:n){
if(i1!=i2){
theTemp=theTemp+sum(X[,i1]*X[,i2])*y[i1]*y[i2]
}
}
theTemp/
(
t(y)%*%Q%*%y
)
}
ChenT=as.numeric(ChenTCal(X,y))
beta=0*beta
Oh=NULL
ChenOh=NULL
for(ti in 1:100){
# data=generateData(beta)
# X=t(data$X)
# y=data$y
# TT=as.numeric(myTCal(X,y))
# TT2=as.numeric(ChenTCal(X,y))
myOrd=sample.int(n)
TT=as.numeric(myTCal(X,y[myOrd]))
TT2=as.numeric(ChenTCal(X,y[myOrd]))
Oh=c(Oh,TT)
ChenOh=c(ChenOh,TT2)
}
T=0+(mean(Oh>T)<=0.05)
ChenT=0+(mean(ChenOh>ChenT)<=0.05)
# theTS=NULL
# for(th in 1:100) theTS[th]=myTCal(X,y[sample(n)])
# T=0+(mean(theTS>T)<=0.05)
# T=0+(T>qnorm(0.95))
list(T=T,ChenT=ChenT,ChenTT=ChenTT)
}
REL=NULL
ChenREL=NULL
ChenREL2=NULL
for(i in 1:100){
temp=simul()
REL[i]=temp$T
ChenREL[i]=temp$ChenT
ChenREL2[i]=temp$ChenTT
}
#xxx=NULL
#for(j in 1:length(REL)){
#xxx[j]=qchisq((j-0.5)/length(REL),df=1)
#}
#plot(xxx,sort(REL))
#abline(0,1)
#hist(pchisq(REL,df=1))
list(myPower=mean(REL),
chenPower=mean(ChenREL),ChenTT=mean(ChenREL2))
}
myExp(n=10,p=100,r=1,TheLambda = 100,varEpsilon = 4,betaMod =0.01)
library(xtable)
myTable=xtable(resul)
digits(myTable)=c(0,0,0,2,2,2)
print(myTable,include.rownames = FALSE) |
658b7954977b5ebfd82e0de9e65ff376f39d53f6 | f26c87ff8e54310480c7822678a2848b40756770 | /scripts/example_code_seasonal_question.R | 80e8065156f8107f32c49b7ea3aa6a67c41990bf | [] | no_license | aranryan/lodfor | 7164cc96fa3cdde72fa7c2df16e8f41f7002c089 | 2ae85a637fafd6b42fc01167ff8a01ea83233a34 | refs/heads/master | 2020-05-21T23:24:58.364367 | 2016-12-01T13:57:33 | 2016-12-01T13:57:33 | 24,097,614 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,073 | r | example_code_seasonal_question.R | # example code I used in asking question posted on
#https://github.com/christophsax/seasonal/issues/157
library(seasonal)
library(xts)
# code from Example 7.14 at
# http://www.seasonal.website/examples.html#seats
mmod1 <- seas(AirPassengers,
regression.aictest = "td",
outlier.types = c("ao", "ls", "tc"),
forecast.maxlead = 36
)
# accessing s16 as combined seasonal factors
tail(as.xts(series(mmod1, "s16")), 48)
# same code, but with line added to append forecast
mmod2 <- seas(AirPassengers,
regression.aictest = "td",
outlier.types = c("ao", "ls", "tc"),
forecast.maxlead = 36,
seats.appendfcst="yes"
)
tail(as.xts(series(mmod2, "s16")), 48)
# same code, but with line added to append forecast
mmod3 <- seas(AirPassengers,
regression.aictest = "td",
outlier.types = c("ao", "ls", "tc"),
forecast.maxlead = 48,
seats.appendfcst="yes"
)
tail(as.xts(series(mmod3, "s16")), 48)
install.packages("x13binary")
|
aa3cd2c5172a1c301a11fb77f5979432e30254fa | 4aa2524aecc11314133b495c2ce8d386cf6acba7 | /source/init.R | b5dc9063dd2caf13d9afd7899f6191e8d9228a98 | [] | no_license | lebihanbastien/RFTDA | 7bfc109a925be538939e8d760c980bc37860ac62 | 6c7f7cc787f6c32e458f2c7e0b664942039d0e0a | refs/heads/master | 2021-01-19T13:07:21.390443 | 2017-08-03T12:42:34 | 2017-08-03T12:42:34 | 88,065,848 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,567 | r | init.R | ################################################################################
# Initialization script for R session.
#
# BLB 2017
#
# Note: to instale a package locally:
# install.packages(pkgs ='/home/b.le-bihan/R/tikzDevice_0.9.tar.gz', repos = NULL)
#
################################################################################
#-------------------------------------------------------------------------------
# R options
#-------------------------------------------------------------------------------
options(digits = 15)
#-------------------------------------------------------------------------------
# Load libraries
#-------------------------------------------------------------------------------
library(plyr)
library(ggplot2)
library(reshape2)
library(scales)
library(grid)
library(tikzDevice)
library(latex2exp)
library(RColorBrewer)
library(Rgnuplot)
#library(plot3D)
library(rgl)
library(gtable)
#-------------------------------------------------------------------------------
# Load Source files
#-------------------------------------------------------------------------------
source("source/rgl_init.R")
source("source/folder.R")
source("source/plot.R")
source("source/env.R")
source("source/userFriendly.R")
source("source/multiplot.R")
source("source/dffbinary.R")
source("source/parameters.R")
source("source/RGnuplot.R")
source("source/addgrids3d.R")
source("source/scattex3D.R")
source("source/ggplot2tikz.R")
source("source/rbind_cc.R")
source("source/get_cont.R")
source("source/fpp_path_traj.R")
source("source/fpp_path_traj_phd.R") |
979fa0d903c90f82074833f394247b48675303e1 | 6ece01e793b19bde7fa69c0ee25eb3ce240dd3f7 | /oldExampleRFiles/parametricPostHoc.R | 4719d7aea0a92d985ef178f93612c2ea44e46a5a | [] | no_license | neilhatfield/STAT461 | af4d8a7f56a9794133e16e237d6664b4f428c103 | 5ca5c4b8ce4d5559b0e0e5ce0dc59b945156d3e8 | refs/heads/main | 2023-09-03T22:28:26.594878 | 2023-09-03T18:12:58 | 2023-09-03T18:12:58 | 233,878,778 | 4 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,524 | r | parametricPostHoc.R | ratLiver <- read.table(
file = "https://raw.github.com/neilhatfield/STAT461/master/dataFiles/ratLiver.dat",
header = TRUE, sep = ",")
ratLiver$diet <- as.factor(ratLiver$diet)
options("contrasts" = c("contr.sum","contr.poly"))
ratModelA <- aov(liverWeight ~ diet, data = ratLiver)
# Pairwise Tests
## Tukey HSD
post.hoc <- TukeyHSD(ratModelA,conf.level=0.9)
## Kable Code for Tukey HSD
knitr::kable(
post.hoc$diet,
digits = 3,
caption = "Post Hoc Tukey HSD Comparisons",
col.names = c("Difference", "Lower Bound",
"Upper Bound", "Adj. p-Value"),
align = 'lcccc'
) %>%
kableExtra::kable_styling(
bootstrap_options = c("condensed", "boardered"),
font_size = 12, latex_options = "HOLD_position")
## Pairwise Method
pairwiseList <- pairwise.t.test(ratLiver$liverWeight, ratLiver$diet,
p.adjust.method = "bonferroni")
## Kable Code for Pairwise.t.Test
knitr::kable(
pairwiseList$p.value,
digits = 3,
caption = paste("Post Hoc",
gsub("(^|[[:space:]])([[:alpha:]])", "\\1\\U\\2",
pairwiseList$p.adjust.method,
perl = TRUE),
"Comparisons"),
align = rep('c', nrow(pairwiseList$p.value))
) %>%
kableExtra::kable_styling(
bootstrap_options = c("condensed", "boardered"),
font_size = 12, latex_options = "HOLD_position") %>%
kableExtra::footnote(general = "Rows and Columns are Treatment Levels.")
## DescTools Pairwise Method
dtPHT <- DescTools::PostHocTest(aov(liverWeight~diet, data=ratLiver),
method = "bonf", conf.level = 0.9)
## Kable Code for DescTools
knitr::kable(
dtPHT$diet,
digits = 3,
caption = paste("Post Hoc",
attr(dtPHT, "method"),
"Comparisons"),
col.names = c("Difference", "Lower Bound",
"Upper Bound", "Adj. p-Value"),
align = 'lcccc'
) %>%
kableExtra::kable_styling(
bootstrap_options = c("condensed", "boardered"),
font_size = 12, latex_options = "HOLD_position")
# Connecting Letters Report
multcompView::multcompLetters4(ratModelA, post.hoc,
threshold = 0.1)
## Boxplot with connecting letters--Does not allow you to set
## the threshold.
multcompView::multcompBoxplot(liverWeight ~ diet,
data = ratLiver,
compFn = "TukeyHSD",
plotList = list(
boxplot = list(fig = c(0, 0.85, 0, 1)),
multcompLetters = list(fig = c(0.6, 1, 0.17, 0.87),
fontsize = 12, fontface = NULL))
)
# Special Comparisons-Dunnett's Test
dunnett <- DescTools::DunnettTest(
liverWeight ~ diet,
data = ratLiver,
control = "1",
conf.level = 0.9)
## Kable Code for Dunnett's Test
knitr::kable(
dunnett$`1`,
digits = 3,
caption = paste("Post Hoc Comparisons--Dunnett's Test"),
col.names = c("Difference", "Lower Bound",
"Upper Bound", "Adj. p-Value"),
align = 'lcccc'
) %>%
kableExtra::kable_styling(
bootstrap_options = c("condensed", "boardered"),
font_size = 12, latex_options = "HOLD_position")
# Effect Sizes
source("https://raw.github.com/neilhatfield/STAT461/master/ANOVATools.R")
knitr::kable(
anova.PostHoc(ratModelA),
digits = 3,
caption = "Post Hoc Comparison Effect Sizes",
col.names = c("Pairwise Comparison","Cohen's d", "Hedge's g",
"Prob. Superiority"),
align = 'lccc'
) %>%
kableExtra::kable_styling(
bootstrap_options = c("condensed", "boardered"),
font_size = 12, latex_options = "HOLD_position")
|
f8d35b11ec496ec21475a09313af35970b68035d | 755c9d8af86f33c64bb0532f46a6d875e9728e13 | /scripts/STEP05_candidate.R | ed24a7f24d65fe44c45b402496ef61e09c979c51 | [] | no_license | jmzhang1911/CRC_lncRNA_backup5.0 | f2a08eb81b9806115a1053c690433706ef066845 | 56a48cac477461b5f236afe35bf5a8f79f68c6f6 | refs/heads/main | 2023-04-02T01:53:14.521092 | 2020-12-18T01:07:44 | 2020-12-18T01:07:44 | 320,638,764 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 10,312 | r | STEP05_candidate.R | rm(list = ls())
source('Utils.R')
#===> our dynamic-DE-expression genes = dynamic + DE
#==> 4875 = 807lnc + 4068coding
load('outcomes/inputdata/input.RData')
load('outcomes/DEanalysis/DE_results_inte.RData') # DE genes
load('outcomes/masigpro/dynamicdf.RData') # dynamic genes
mkdir('outcomes/candidate')
dedf <- dplyr::filter(DE_results_inte$DE_each_inte,
direction %in% c('up','down'),
abs(log2FoldChange) >= 1)
table(unique(dynamicdf$gene_id) %in% unique(dedf$gene_id))
#4875 dynamic and de genes
dynamic_de_df <- dynamicdf %>% dplyr::filter(gene_id %in% dedf$gene_id)
#dynamic and DE coding:4068;noncoding:807
table(dynamic_de_df$gene_type)
#get the dy genes
dy_lncdf <- dynamic_de_df[dynamic_de_df$gene_type == 'lnc',];dim(dy_lncdf) #807
dy_codingdf <- dynamic_de_df[dynamic_de_df$gene_type == 'coding',];dim(dy_codingdf)
# dynamic or not dynamic lnclist
if(T){
lncRNA_expr_class_df <- input_matrix_count$lncRNA_count %>% dplyr::select(gene_id) %>%
mutate(expr_class = if_else(gene_id %in% dy_lncdf$gene_id, 'dynamic','not_dynamic'))
dynamic_lnc <- dy_lncdf$gene_id
not_dynamic_lnc <- lncRNA_expr_class_df %>%
dplyr::filter(expr_class == 'not_dynamic') %>%
pull(gene_id, )
lncRNA_expr_class_list <- list(lncRNA_expr_class_df = lncRNA_expr_class_df,
dynamic_lnc =dynamic_lnc,
not_dynamic_lnc = not_dynamic_lnc)
write.table(dynamic_lnc, file = 'outcomes/candidate/dy_lnclist.txt',
quote = F, row.names = F, col.names = F)
save(lncRNA_expr_class_list, file = 'outcomes/candidate/lncRNA_expr_class_list.RData')
}
# dynamic genes enrichment analysis
source('scripts/STEP09_endplot.R')
df <- mygetallpathwaydf(genelist = dy_codingdf$gene_id,
prob = CRC_related_pathway_prob)
mypathwayplot(df, 'Enrchment of dynamic coding genes ')
lnc_closed_genes <- read.table('outcomes/candidate/dynamic_lnc_closest_coding_gene.txt')$V1
df2 <- mygetallpathwaydf(genelist = lnc_closed_genes,
prob = CRC_related_pathway_prob)
mypathwayplot(df2, 'enrichment of dynamic-lncRNA')
table(dynamic_de_df$gene_id %in% lnc_closed_genes)
if(T){
cor_results <- input_matrix_count$mRNA_lncRNA_count %>%
dplyr::filter(gene_id %in% dynamic_de_df$gene_id) %>%
column_to_rownames(var = 'gene_id') %>% myddnor() %>%
as.matrix() %>% t() %>%
mycoranalysis2()
#save(cor_results, file = 'outcomes/candidate/cor_results.RData')
}
cor_dy_lnc_gene <- dplyr::filter(cor_results, source %in% dy_codingdf$gene_id,
target %in% dy_lncdf$gene_id,
abs(r_value) >=0.7, p_value < 0.05)
length(unique(cor_dy_lnc_gene$source)) #3999
length(unique(cor_dy_lnc_gene$target)) #804
save(cor_dy_lnc_gene, file = 'outcomes/candidate/cor_dy_lnc_gene.RData')
# CRC dynamic-coding genes list
if(T){
dy_crcgene <- mygetenrichedgenefromkk2(dynamic_de_df$gene_id) %>%
distinct(ENSEMBL) %>% pull(ENSEMBL);length(dy_crcgene) #105
save(dynamic_de_df, dy_crcgene, file = 'outcomes/candidate/dy_results.RData')
}
#===> calculate the correlation between dy-coding and dy-noncoding
load('outcomes/inputdata/input.RData')
load('outcomes/candidate/dy_results.RData')
load('outcomes/candidate/lncRNA_expr_class_list.RData')
library(Hmisc)
dim(dynamic_de_df)
if(T){
cor_results <- input_matrix_count$mRNA_lncRNA_count %>%
dplyr::filter(gene_id %in% dynamic_de_df$gene_id) %>%
column_to_rownames(var = 'gene_id') %>% myddnor() %>%
as.matrix() %>% t() %>%
mycoranalysis2()
save(cor_results, file = 'outcomes/candidate/cor_results.RData')
}
#===> filter dy-crc-coding relatived dy-lnc
if(T){
cor_lnc_crcgene <- dplyr::filter(cor_results, source %in% dy_crcgene,
target %in% lncRNA_expr_class_list$dynamic_lnc,
abs(r_value) >=0.7, p_value < 0.05)
cor_crcgene <- unique(cor_lnc_crcgene$source);length(cor_crcgene) #104
cor_lnc <- unique(cor_lnc_crcgene$target);length(cor_lnc) #737
crc_corgenelist <- list(cor_lnc_crcgene = cor_lnc_crcgene,
cor_lnc = cor_lnc,
cor_crcgene = cor_crcgene)
save(crc_corgenelist, file = 'outcomes/candidate/crc_corgenelist.RData')
}
#===> Find the max trans of dy-lncRNA
#===> Find the TSS of dy-lncRNA
###########=====> do it in jobs >>>>>>###########
#===> saved as FindTSS_maxlen.sh in jobs
###########=====> do it in jobs >>>>>>###########
load('outcomes/candidate/dy_results.RData')
maxtrans_df <- read.table('outcomes/candidate/maxtrans.txt', sep = '\t',
comment.char = '#',header = T)
save(maxtrans_df, file = 'outcomes/candidate/maxtrans_df.RData')
write.table(maxtrans_df$trans_id, file = 'outcomes/candidate/maxlentransofdylnc.txt',
quote = F, row.names = F, col.names = F)
#===> find candidate lnc genes
load('outcomes/coranalysis/cor_analysis.RData')
de_lnc <- dplyr::filter(DE_results_inte$DE_each_inte,
direction %in% c('up','down'),
abs(log2FoldChange) >= 1,
gene_id %in% input_matrix_count$lnc_genelist)
candidate_lnc_df <- cor_results %>%
dplyr::filter(source %in% dy_de_crc_coding, target %in% dy_de_lnc,
abs(r_value) >= 0.7, p_value < 0.05,
target %in% de_lnc$gene_id)
length(unique(candidate_lnc_df$source)) # 54 dy and de genes
length(unique(candidate_lnc_df$target)) # 659 dy and de lnc FC>=2
# co-expressied with crc-dy-gene lncRNA
# 659 dy_de_FC>=2 lnc co-expression with 54 dy_de_FC>=2 gene
# candidate_lnc = candidate_lnctrans_help.txt + candidate_lnctrans_help2.txt
co_expr_candidate_lnc <- dplyr::filter(input_matrix_count$exon_trans,
gene_id %in% candidate_lnc_df$target) %>%
group_by(gene_id) %>% top_n(1, translen) %>%
group_by(gene_id) %>% top_n(1, exon_num) %>%
group_by(gene_id) %>% top_n(1, trans_id) %>%
pull(trans_id)
write.table(co_expr_candidate_lnc, file = 'outcomes/candidate_lnc.txt',
col.names = F, row.names = F, quote = F)
# all dy-lnc analysis by annolnc2
# old file = candidate_lnctrans_help3.txt
all_candidate_lnc <- dplyr::filter(input_matrix_count$exon_trans,
gene_id %in% dy_de_lnc,
gene_id %!in% candidate_lnc_df$target) %>%
group_by(gene_id) %>% top_n(1, translen) %>%
group_by(gene_id) %>% top_n(1, exon_num) %>%
group_by(gene_id) %>% top_n(1, trans_id) %>%
pull(trans_id)
write.table(all_candidate_lnc, file = 'outcomes/all_candidate_lnc.txt',
col.names = F, row.names = F, quote = F)
#===> lncRNA classes
lncRNA_classes <- read_table2("data/lncRNA_classes_pbs.txt")
lncRNA_classed_df <- lncRNA_classes %>%
dplyr::filter(isBest == 1) %>%
mutate(class = if_else(direction == 'sense' & subtype == 'overlapping',
'overlapping_sence', 'NA'),
class = if_else())
table(lncRNA_classes$direction)
table(lncRNA_classes$type)
table(lncRNA_classes$subtype)
table(lncRNA_classes$location)
table4 <- lncRNA_classes %>% dplyr::filter(isBest == 1, direction == 'antisense',
type == 'genic') %>%
mutate(lnk = str_c(partnerRNA_gene, lncRNA_gene, sep = '='))
cor_results %>% head()
de_lnc <- DE_results_inte$DE_each_inte %>%
dplyr::filter(direction == c('up','down'),
abs(log2FoldChange) >= 4) %>%
distinct(gene_id) %>%
pull(gene_id)
colnames(input_matrix_count$mRNA_lncRNA_count)
df1 <- input_matrix_count$mRNA_lncRNA_count %>% dplyr::filter(gene_id %in% de_lnc) %>%
column_to_rownames(var = 'gene_id') %>% myddnor() %>% as.data.frame() %>%
rownames_to_column(var = 'gene_id') %>%
pivot_longer(cols = control_1:week10_3, names_to = 'time', values_to = 'levels') %>%
mutate(time = str_replace_all(time, '_[0-9]', ''),
time = factor(time, levels = c('control','week2','week4','week7','week10')))
p1 <- ggplot(df1, aes(x = time, y = log10(levels+1))) +
geom_point() +
geom_line(aes(group = gene_id), size = 1.25)
df2 <- input_matrix_count$mRNA_lncRNA_count %>% dplyr::filter(gene_id %in% dy_de_lnc) %>%
column_to_rownames(var = 'gene_id') %>% myddnor() %>% as.data.frame() %>%
rownames_to_column(var = 'gene_id') %>%
pivot_longer(cols = control_1:week10_3, names_to = 'time', values_to = 'levels') %>%
mutate(time = str_replace_all(time, '_[0-9]', ''),
time = factor(time, levels = c('control','week2','week4','week7','week10')))
p2 <- ggplot(df2, aes(x = time, y = log10(levels+1))) +
geom_point() +
geom_line(aes(group = gene_id), size = 1.25)
p1 + p2
table(de_lnc %in% dy_de_lnc)
table(dy_de_lnc %in% de_lnc)
table3 <- cor_results %>%
dplyr::filter(source %in% dy_de_crc_coding, target %in% de_lnc,
abs(r_value) >=0.7, p_value < 0.05) %>%
mutate(lnk = str_c(source, target, sep = '=')) %>%
dplyr::filter(lnk %in% table4$lnk)
table5 <- cor_results %>%
dplyr::filter(source %in% dy_de_crc_coding, target %in% dy_de_lnc,
abs(r_value) >= 0.7, p_value < 0.05,
target %in% de_lnc)
length(unique(table5$target))
length(unique(table5$source))
load('outcomes/inputdata/input.RData')
candidata <- unique(table5$target)
count_len <- input_matrix_count$exon_trans
head(count_len)
dim(count_len)
key_lnc <- dplyr::filter(count_len,
gene_id %in% candidata) %>%
group_by(gene_id) %>% top_n(1, translen) %>%
group_by(gene_id) %>% top_n(1, exon_num) %>%
group_by(gene_id) %>% top_n(1, trans_id) %>%
pull(trans_id)
write.table(key_lnc, file = 'outcomes/candidate_lnctrans.txt',
col.names = F, row.names = F,
quote = F)
tmp_lnc <- table5 %>% distinct(target) %>% pull(target)
tmp_coding <- lncRNA_classes %>%
dplyr::filter(isBest == 1, lncRNA_gene %in% tmp_lnc) %>%
distinct(partnerRNA_gene) %>% pull(partnerRNA_gene)
tmp_enrich <- mygetenrichedgenefromkk2(tmp_coding)
df <- left_join(table3, table4, by = 'lnk') %>%
left_join(table, by = c('source' = 'ENSEMBL'))
|
a0aeef6284f1331c7f2e10d7834712a46248c7bf | 309d8ffea9188c9cd62826d0f310185f1a7c44c0 | /Results/purity_ploidy/R/purity_ploidy_import.R | 48a00dfd0cb3b1435b1c4592414738b780049f93 | [] | no_license | shbrief/PureCN_manuscript | 7e0d3846d094dcb2943cda5bb9068ecc22630256 | 77ba1baaa6ac85cbd58a04b35ed8de1f79eed17a | refs/heads/master | 2023-02-16T04:45:03.774942 | 2021-01-08T04:08:01 | 2021-01-08T04:08:01 | 167,900,850 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 786 | r | purity_ploidy_import.R | # Import purity and ploidy calls from PureCN outputs
output.dir = file.path("/data/16tb/CNVworkflow", kit, "purecn_output",
paste0(kit, "_PureCN"), purecn_mode)
samples = list.files(output.dir)
optima_csv = file.path(output.dir, samples, paste0(samples, ".csv"))
optima_csv = optima_csv[file.exists(optima_csv)]
res = sapply(optima_csv[seq_along(optima_csv)], puri_ploi, USE.NAMES = FALSE) %>%
cbind %>% t %>% as.data.frame
res$capture_kit = kit
res_all = res # save the additional columns
res = res[, c("submitter_id", "Purity", "Ploidy")]
res$submitter_id = as.character(res$submitter_id)
res$Purity = as.numeric(res$Purity)
res$Ploidy = as.numeric(res$Ploidy)
names(res) = c("SampleId", paste0("Purity_", purecn_mode), paste0("Ploidy_", purecn_mode)) |
e4c6ae3c39cf7a5f05d27b12d2f92fa7e8e6b0b3 | 47362106110f59c302835ad07dac75a2c069422a | /cachematrix.R | 46413c97e8fb28c657a31431557f827770032680 | [] | no_license | patrickcnkm/ProgrammingAssignment2 | 47b206d503a64dbc789dc5df56ca8c39dc199645 | 2def147415826f3d4cf0bee7255204e03e3c94c5 | refs/heads/master | 2021-01-22T13:08:09.757704 | 2014-09-21T09:38:42 | 2014-09-21T09:38:42 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,462 | r | cachematrix.R | ## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this function
makeCacheMatrix <- function(x=matrix(),z=matrix()) {
s1 <- NULL
s2 <- NULL
set <- function(y) {
x <<- y
z <<- y
s1 <<- NULL
s2 <<- NULL
}
get <- function() x
getp <- function() z
setsolve <- function(solve) s1 <<- solve
setparam <- function(param) s2 <<- param
getsolve <- function() s1
s.param <- function() return(identical(s2,z))
list(set = set, get = get,getp =getp,
setsolve = setsolve,setparam = setparam,
getsolve = getsolve,s.param = s.param)
}
## Write a short comment describing this function
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
## Obtain the value of last solve.
s1 <- x$getsolve()
## Cache can be obtained under 2 conditions:
## 1) last solve value is not null.
## 2) The identity matrix of solve should be same.
if(!is.null(s1) & x$s.param()) {
message("getting cached data")
return(s1)
}
## Initialize the original matrix and identity matrix for solve.
data <- x$get()
spara <- x$getp()
## Work out the inverse matrix
if (is.na(spara)) s1<-solve(data,...)
else s1 <- solve(data,spara, ...)
## Setup the cache for both of inverse matrix and identity matrix.
x$setsolve(s1)
x$setparam(spara)
## Return the inverse matrix as output
s1
} |
eb209cb57fcdd358841f7873189f3cfbab06d698 | 9076daabcdb89dcf92fb7e6bcb2c534b6202bb9d | /tests/testthat/test-LonLat2XY.R | 75aac14c463143e0d5f54f545a6b819ddcaba15d | [
"CC-BY-3.0"
] | permissive | dkahle/ggmap | dc9a498ae73a41bbc189dbb29af1ad42e7331eb3 | 2cc32a47cf2730f592680f7d698942aaca32382c | refs/heads/master | 2023-08-29T08:23:27.045905 | 2023-03-14T13:59:22 | 2023-03-14T13:59:22 | 2,876,618 | 745 | 245 | null | 2023-06-21T06:15:47 | 2011-11-29T17:14:54 | R | UTF-8 | R | false | false | 421 | r | test-LonLat2XY.R | context("LonLat2XY")
test_that("LonLat2XY example works", {
# gc <- geocode('baylor university')
gc <- list(lon = -97.11431, lat = 31.54984)
ll2xy <- LonLat2XY(gc$lon, gc$lat, 10)
expect_equal(
ll2xy$X,
235
)
expect_equal(
ll2xy$Y,
417
)
expect_true(
abs(ll2xy$x - 195.5142) < 0.01 # float math is hard
)
expect_true(
abs(ll2xy$y - 88.52267) < 0.01 # float math is hard
)
}) |
b1bbe8b597f7a08007137cdc1967748c7f0cc27f | d104eaae49776e9f1bf929b2a4bc8d54f305e212 | /tests/testthat/test-suffix_edge_tag.R | 0b7f1356e17d80f57888098c32b47ec58d73f793 | [] | no_license | forestgeo/fgeo.misc | 96d1a65360511bba7ef147bca74e507218c5c654 | 3a9386cebc7512df0d4923c364585f9c7a113c2b | refs/heads/master | 2020-04-10T19:14:48.565370 | 2019-06-21T22:41:11 | 2019-06-21T22:41:11 | 161,228,174 | 2 | 2 | null | 2019-02-04T22:50:11 | 2018-12-10T19:48:01 | R | UTF-8 | R | false | false | 1,480 | r | test-suffix_edge_tag.R | library(tibble)
context("suffix_tags_beyond_edge")
test_that("errs with wrong input", {
expect_message(
suffix_tags_beyond_edge(
x = tibble(QX = 21, QY = 21, tag = "01", status = "dead"),
.match = "dead",
suffix = "suffix",
x_q = 20
)
)
expect_message(
suffix_tags_beyond_edge(
x = tibble(QX = 20, QY = 20, tag = "01", status = "dead"),
.match = "dead",
suffix = "suffix",
x_q = 20
)
)
expect_error(
suffix_tags_beyond_edge(x = "not dfm",
.match = "dead",
suffix = "suffix",
x_q = 20
)
)
expect_error(
suffix_tags_beyond_edge(
x = tibble(x = 21),
.match = "dead",
suffix = "suffix",
x_q = 20
)
)
expect_error(
suffix_tags_beyond_edge(
x = tibble(y = 21),
.match = "dead",
suffix = "suffix",
x_q = 20
)
)
expect_error(
suffix_tags_beyond_edge(
x = tibble(a = 21),
.match = "dead",
suffix = "suffix",
x_q = 20
)
)
})
context("detect_spillover")
test_that("asserts correctly", {
expect_false(
expect_message(
detect_spillover(x = tibble(qx = 20, qy = 20), x_q = 20, y_q = 20)
)
)
expect_true(
expect_message(
detect_spillover(x = tibble(qx = 21, qy = 20), x_q = 20, y_q = 20)
)
)
expect_true(
expect_message(
detect_spillover(x = tibble(qx = 20, qy = 21), x_q = 20, y_q = 20)
)
)
})
|
b23451ee756c52580e6a16b51d2ac4069727f2ca | 7e7bb7bfdf62c24b7fecf78f5247d28839728710 | /MAP Analysis/2013-2014/src/middle_schools_waterfalls_F13S14.R | 2772dd3a65b75422c33182b01a36b79bc3009f3a | [] | no_license | kippchicago/Data_Analysis | 1ad042d24c7a1e11e364f39c694692f5829363a4 | 8854db83e5c60bc7941654d22cbe6b9c63613a7f | refs/heads/master | 2022-04-09T10:10:43.355762 | 2020-02-20T18:03:40 | 2020-02-20T18:03:40 | 5,903,341 | 2 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,398 | r | middle_schools_waterfalls_F13S14.R | # Script to get Winter Data
require(ProjectTemplate)
load.project()
info(logger, "Prepping F13-S14 data")
FW.dt<-PrepMAP(map.F13S14, season1="Fall13", season2="Spring14")
info(logger, "Print middle Waterfall PDFs by grade.")
#Middle Schools first (since they need students by grade)
schools=list("KAMS", "KCCP", "KBCP")
lapply(schools,
pdf_waterfall,
.data=FW.dt,
.by="grade",
season1="Fall13",
season2="Spring14",
alpha=.6)
# Tabular summary for Winter 14 (should be abstracted and moved to liv)
tabSummaryMAP <- function(.data, school="KAMS"){
dt<-copy(.data)
dt.sum<-dt[SchoolInitials %in% school,
list("Total Tested"= .N,
"# >= Typical" = sum(Winter14_RIT>=ProjectedGrowth),
"% >= Typical" = round(sum(Winter14_RIT>=ProjectedGrowth)/.N,2),
"# >= College Ready" = sum(Winter14_RIT>=CollegeReadyGrowth),
"% >= Collge Ready" = round(sum(Winter14_RIT>=CollegeReadyGrowth)/.N,2)),
by=list(SchoolInitials,
Winter14_Grade,
Subject)]
setnames(dt.sum, c("SchoolInitials", "Winter14_Grade"),
c("School", "Grade"))
dt.sum[order(School, Subject, Grade)]
}
lapply(schools, tabSummaryMAP, .data=FW.dt)
write.csv(tabSummaryMAP(FW.dt, "KAMS"), "reports/MAP_Winter_14_KAMS_.csv")
|
a89732fdc190625abebf82d12ceb18a472bdbd04 | 34160365a65aca8257cc940abc0d3081a6112aef | /2019_08_28/00_복습.R | 03f373b14c1af3a6d978053d42eceddf17a745f6 | [] | no_license | sungminhong90/BigDataClassReview_R | 63b947e2b5a2e83d29e66a221e7b90fbbdfbfbf4 | c1ff1bbbaa062c411562067d4d2d41bfdd46b132 | refs/heads/master | 2020-07-08T03:23:32.802388 | 2019-09-25T13:42:14 | 2019-09-25T13:42:14 | 203,550,634 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 838 | r | 00_복습.R | #Summary
#현재 디렉터리 getwd()/ setwd()
#seq (0,100,2) <- 1부터 100까지 짝수만
# search() <- from package installation
# str()
# Is() / class <- data type 확인
#names()/colnames()/summary()
## 데이터 읽어오기
# read.table('파일명', sep ='/')
# read_excel
# read.csv()
## 가장 많이 사용되는 자료형
## => data frame (단, 행과열이 같아야 한다?????)
##통계기본
# mean()/ #max()/#min()/#median()/ #1-4분위 수? => #IQR
# quantile() (25%/50%/75%100% 값 구해줌)
##데이터 쓰기
#write.csv
## R data file (.rda)
# save()
# load()
## ggplot2/ dplyr /
# 행추출 -> filter()
# 열 추출 -> select()
# 정렬 -> arrange()
# 변수추가 -> mutate()
# 통계치 산출 -> 먼저 group_by로 묶는다 -> summarise()
# 데이터 합치기 -> left_join/ bind_rows/merge
|
1fd34b636c6275e66c2784c780d53aaaf1e9fc85 | 526b00ed35c5efd13e51de652ac96f34456770bd | /cachematrix.R | 1d3656ec0310e5ceb4baf3c5b305f3ccfead80ad | [] | no_license | hartjm/ProgrammingAssignment2 | cd679417e38557d11b906bd393b8c39d28de7caa | 5d777bd47067e90c7e6cdf03ee6395cf68a0773e | refs/heads/master | 2020-12-24T11:17:22.013611 | 2014-07-24T16:15:08 | 2014-07-24T16:15:08 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,854 | r | cachematrix.R | ## Solution to the CacheMatrix problem whereby we want to calculate the inverse
## of a matrix and also cache the value as the calculation can be time intensive
## we solve this problem my implmementing a wrapper around matrix
## (like creating a facade class that wraps the matrix in an oo language )
## and then we use this new makeCacheMatrix instead of matrix() with another
## functions that unstands the makeCacheMatrix signature (list of 4 objects)
## This function creates a special matrix that caches its inverse
makeCacheMatrix <- function(x = matrix()) {
i <- NULL # initalise i which will hold the inverse value
set <- function(y) { # define a function set() which sets the matrix and
x <<- y # (re-)initalises the inverse value to NULL
i <<- NULL
}
get <- function() x # getter to retrive matrix
setinverse <- function(inverse) i <<- inverse # setter to store matrix inverse
getinverse <- function() i # getter to retrieve matrix inverse
list(set = set, get = get, # return the 4 cachematrix functions as a list
setinverse = setinverse,
getinverse = getinverse)
}
## This function computes the inverse of a matrix returned from makeCacheMatrix
cacheSolve <- function(x, ...) {
i <- x$getinverse() # attempt to retrived inverse value
if(!is.null(i)) { # was inverse value found?
message("getting cached data") # yes, so retrive from cache
return(i) # have what we need to leave now
}
data <- x$get() # no inverse found so must now get matrix...
i <- solve(data, ...) # ...so that we can calculate the inverse of it
x$setinverse(i) # done so set the value so other callers can get later on
i ## and return the matrix that is the inverse of 'x'
}
|
158988b3f7f146f4b3fcdaa669b9e70ce1154644 | dff7bca022a3d511703fae341e1c9961d4fd1648 | /R/query-select.R | e13aa34fe4b714bdbac86a7f164d92d25c0572e2 | [
"MIT"
] | permissive | mkearney/dbplyr | a7aeda6187cfdc7675535f329837e349599779dc | 6873d6482a1cfa076b4d49d0787485ab996cc3cd | refs/heads/master | 2020-04-17T12:15:20.735504 | 2019-01-19T23:35:43 | 2019-01-19T23:35:43 | 166,572,806 | 1 | 0 | NOASSERTION | 2019-01-19T17:17:18 | 2019-01-19T17:17:17 | null | UTF-8 | R | false | false | 4,112 | r | query-select.R | #' @export
#' @rdname sql_build
select_query <- function(from,
select = sql("*"),
where = character(),
group_by = character(),
having = character(),
order_by = character(),
limit = NULL,
distinct = FALSE) {
stopifnot(is.character(select))
stopifnot(is.character(where))
stopifnot(is.character(group_by))
stopifnot(is.character(having))
stopifnot(is.character(order_by))
stopifnot(is.null(limit) || (is.numeric(limit) && length(limit) == 1L))
stopifnot(is.logical(distinct), length(distinct) == 1L)
structure(
list(
from = from,
select = select,
where = where,
group_by = group_by,
having = having,
order_by = order_by,
distinct = distinct,
limit = limit
),
class = c("select_query", "query")
)
}
#' @export
print.select_query <- function(x, ...) {
cat(
"<SQL SELECT",
if (x$distinct) " DISTINCT", ">\n",
sep = ""
)
cat("From: ", gsub("\n", " ", sql_render(x$from, root = FALSE)), "\n", sep = "")
if (length(x$select)) cat("Select: ", named_commas(x$select), "\n", sep = "")
if (length(x$where)) cat("Where: ", named_commas(x$where), "\n", sep = "")
if (length(x$group_by)) cat("Group by: ", named_commas(x$group_by), "\n", sep = "")
if (length(x$order_by)) cat("Order by: ", named_commas(x$order_by), "\n", sep = "")
if (length(x$having)) cat("Having: ", named_commas(x$having), "\n", sep = "")
if (length(x$limit)) cat("Limit: ", x$limit, "\n", sep = "")
}
#' @export
sql_optimise.select_query <- function(x, con = NULL, ...) {
if (!inherits(x$from, "select_query")) {
return(x)
}
from <- sql_optimise(x$from)
# If all outer clauses are executed after the inner clauses, we
# can drop them down a level
outer <- select_query_clauses(x)
inner <- select_query_clauses(from)
if (length(outer) == 0 || length(inner) == 0)
return(x)
if (min(outer) > max(inner)) {
from[as.character(outer)] <- x[as.character(outer)]
from
} else {
x
}
}
# List clauses used by a query, in the order they are executed
# https://sqlbolt.com/lesson/select_queries_order_of_execution
# List clauses used by a query, in the order they are executed in
select_query_clauses <- function(x) {
present <- c(
where = length(x$where) > 0,
group_by = length(x$group_by) > 0,
having = length(x$having) > 0,
select = !identical(x$select, sql("*")),
distinct = x$distinct,
order_by = length(x$order_by) > 0,
limit = !is.null(x$limit)
)
ordered(names(present)[present], levels = names(present))
}
#' @export
sql_render.select_query <- function(query, con, ..., root = FALSE) {
from <- sql_subquery(con, sql_render(query$from, con, ..., root = root), name = NULL)
sql_select(
con, query$select, from, where = query$where, group_by = query$group_by,
having = query$having, order_by = query$order_by, limit = query$limit,
distinct = query$distinct,
...
)
}
# SQL generation ----------------------------------------------------------
#' @export
sql_select.DBIConnection <- function(con, select, from, where = NULL,
group_by = NULL, having = NULL,
order_by = NULL,
limit = NULL,
distinct = FALSE,
...) {
out <- vector("list", 7)
names(out) <- c("select", "from", "where", "group_by", "having", "order_by",
"limit")
out$select <- sql_clause_select(select, con, distinct)
out$from <- sql_clause_from(from, con)
out$where <- sql_clause_where(where, con)
out$group_by <- sql_clause_group_by(group_by, con)
out$having <- sql_clause_having(having, con)
out$order_by <- sql_clause_order_by(order_by, con)
out$limit <- sql_clause_limit(limit, con)
escape(unname(purrr::compact(out)), collapse = "\n", parens = FALSE, con = con)
}
|
fba164bbf929c870258f4dec914db8702122bc24 | 862d2dd3989cb231353b3062829172c9d067b4be | /run_analysis.R | 5fdf24444dfdddb0b1796fa9e0b288a75cee70dc | [] | no_license | mmcdonnell1/datasciencecourseragetting-cleaningdata | 58d47ca75dafbdb8aca8ef6eec291e65c9e728ed | 7d8fae65b16d51f06bde2ea0492917467b198257 | refs/heads/master | 2016-09-10T02:06:55.587905 | 2014-08-24T17:58:54 | 2014-08-24T17:58:54 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,413 | r | run_analysis.R | ## This code merges data sets related to accelerometers from the Samsung Galaxy S smartphone,
## extracts select measurements, applies descriptive activity names, labels the data, and
## creates a tidy data set.
## 1. Merge the training and the test sets to create one data set.
## Load training and test datasets
X_train <- read.table("./UCI HAR Dataset/train/X_train.txt")
y_train <- read.table("./UCI HAR Dataset/train/y_train.txt")
subject_train <- read.table("./UCI HAR Dataset/train/subject_train.txt")
X_test <- read.table("./UCI HAR Dataset/test/X_test.txt")
y_test <- read.table("./UCI HAR Dataset/test/y_test.txt")
subject_test <- read.table("./UCI HAR Dataset/test/subject_test.txt")
## Load feature labels and activity labels
features <- read.table("./UCI HAR Dataset/features.txt")
activity_labels <- read.table("./UCI HAR Dataset/activity_labels.txt")
## Combine training and test datasets
X <- rbind(X_train, X_test)
y <- rbind(y_train, y_test)
subject <- rbind(subject_train, subject_test)
## Add column names to the subject ids, activity ids, activity labels,
## and the features data
colnames(subject) <- c("subject_id")
colnames(y) <- c("activity_id")
colnames(activity_labels) <- c("activity_id", "activity_labels")
colnames(X) <- features[, 2]
## 2. Extracts only the measurements on the mean and standard deviation for each measurement.
## Extract the columns related to mean and std features
XInclude <- X[, grep("mean[()]|std[()]",features[,2])]
## 3. Uses descriptive activity names to name the activities in the data set
## Append the subject ids to the features data
XInclude.subject <- cbind(XInclude, subject)
## Append the activity ids to the subject ids and features data
XInclude.subject.y <- cbind(XInclude.subject, y)
## Match each activity id with the appropriate activity label
XInclude.subject.y.ActNames <- merge(XInclude.subject.y, activity_labels,
by.x = "activity_id", by.y= "activity_id")
## 4. Appropriately labels the data set with descriptive variable names.
## Extract the feature rows related to mean and std features
featuresInclude <- features[grep("mean[()]|std[()]",features[,2]), ]
## Create space between label descriptors using underscores
featuresInclude$V3 <- gsub("Body", "_Body_", featuresInclude[,2])
featuresInclude$V3 <- gsub("Gravity", "_Gravity_", featuresInclude[,3])
featuresInclude$V3 <- gsub("Jerk", "_Jerk", featuresInclude[,3])
featuresInclude$V3 <- gsub("Mag", "_Magnitude", featuresInclude[,3])
## Convert -X, -Y, and -Z to include "Axis" reference
featuresInclude$V3 <- gsub("-X", "_X_Axis", featuresInclude[,3])
featuresInclude$V3 <- gsub("-Y", "_Y_Axis", featuresInclude[,3])
featuresInclude$V3 <- gsub("-Z", "_Z_Axis", featuresInclude[,3])
## Convert "time" and "frequency" indicators to complete values
featuresInclude$V3 <- gsub("t_", "Time_", featuresInclude[,3])
featuresInclude$V3 <- gsub("f_", "Frequency_", featuresInclude[,3])
## Repalce R-unfriendly characters (i.e. "-" and "()") and clean-up incorrect names
featuresInclude$V3 <- gsub("-", "_", featuresInclude[,3])
featuresInclude$V3 <- gsub("\\()", "", featuresInclude[,3])
featuresInclude$V3 <- gsub("__Body", "", featuresInclude[,3])
## Convert variable names to lower case
featuresInclude$V3 <- tolower(featuresInclude[,3])
## Apply new tidy variable names
colnames(XInclude.subject.y.ActNames) <- c("activity_id", featuresInclude[,3], "subject_id", "activity_labels")
## 5. Creates a second, independent tidy data set with the average of each
## variable for each activity and each subject.
## Install the plyr package to leverage ddply
install.packages("plyr")
library(plyr)
## Calculate column averages for each subject and activity
XInclude.colMeans.by.Subject.Activity <- ddply(XInclude.subject.y.ActNames,
.(subject_id, activity_id, activity_labels),
colwise(mean))
## Create tidy data set
TidyData <- cbind(XInclude.colMeans.by.Subject.Activity[,1:3],
format(round(XInclude.colMeans.by.Subject.Activity[,4:69], 4),
nsmall = 2))
write.table(TidyData, file = "./TidyData.txt", row.names = FALSE) |
3862d89ef47e2824f1f76cde41aced3b7bc1a795 | a3b22019c25f279739d04d99f3e716e88aaa1a73 | /plot1.R | 1843702ec84c0bea797c3a5d66d1d5d89f55f4c5 | [] | no_license | joeswaminathan/ExData_Plotting1 | 95f3daa860c05b3be1ec2f329bb7d1e2fba576f9 | b5c7b036d5dc19baf0b16c50d5e06d8101af7133 | refs/heads/master | 2020-05-29T11:44:35.973156 | 2015-11-08T15:25:53 | 2015-11-08T15:25:53 | 45,773,875 | 0 | 0 | null | 2015-11-08T09:12:54 | 2015-11-08T09:12:53 | null | UTF-8 | R | false | false | 925 | r | plot1.R | plot1 <- function()
{
library(datasets)
library(data.table)
fileurl <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
download.file(fileurl, destfile = "household_power_consumption.zip")
unzip("household_power_consumption.zip")
DT <- read.csv2("household_power_consumption.txt", sep=";")
DT <- subset(DT, strptime(Date, "%d/%m/%Y") <= strptime("02/02/2007", "%d/%m/%Y") & strptime(Date, "%d/%m/%Y") >= strptime("01/02/2007", "%d/%m/%Y"))
png("figure/plot1.png")
DT <- within(DT, Global_active_power <- as.numeric(as.character(Global_active_power)))
hist(DT$Global_active_power, main="Global Active Power", xlab = "Global Active Power (kilowatts)", col="red")
# hist(as.numeric(levels(DT$Global_active_power[1]))[DT$Global_active_power], main="Global Active Power", xlab = "Global Active Power (kilowatts)", col="red")
dev.off()
}
|
d49d8de4dff22e0c7139b3c2cac5d91b6e2f1aab | 4be11f5a0acd02236cf6b07a86f5913e03c677c7 | /man/setDesign.Rd | 79058b7d9bd228325db15210acec4cb1eac3d87e | [
"MIT"
] | permissive | retaj/qpcrvis | 9f5f010963d5014a7042741e9ac9fdda64ff668a | 2bd996e5dbf27bc9cae23c65e07838b5c75c9f83 | refs/heads/master | 2022-03-27T07:30:19.379477 | 2020-01-07T10:54:57 | 2020-01-07T10:54:57 | 75,178,390 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 339 | rd | setDesign.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils.R
\name{setDesign}
\alias{setDesign}
\title{setDesign}
\usage{
setDesign(pcr, groups)
}
\arguments{
\item{pcr}{qPCR object to work on}
\item{groups}{character vector of sample groups}
}
\description{
set design of a qPCR experiment
}
\details{
details
}
|
d5aa621d1ea0604f18a4b2c112d2751fd420c9e7 | afbb0e9fe12437b9d8c657a4ab83e2c8a4321e51 | /genShape.R | f073622fd1b6e6f20472cdfade81ba0174053ca8 | [] | no_license | yosoykit/BaseCaseII | d66d48ea2bbb35162336c7928492e51f5e122496 | 11f3141d840ddeea3c40326b752410d4c660948c | refs/heads/master | 2021-01-21T12:58:06.083641 | 2016-05-06T21:20:58 | 2016-05-06T21:20:58 | 48,261,098 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,255 | r | genShape.R | genShape <- function(xy = xy, nbr = nbr, size = 10, gamma = 1, col = 'blue') {
# xy: generated by genHexGrid (crypt coordinates x and y and dims nx and ny)
# nbr: generated by neighborList
# gamma: diffusness of shape: > 0 (compact) or < 0 (diffuse)
# tests
if(xy$ny %% 2 != 0) {
warning("grid not set up correctly: ny required to be even")
return()
}
# neighbor list ... with quasi-periodic bc
N = length(xy$x); nx = xy$nx; ny = xy$ny
nbr.state = as.list(N)
# state vector with zeros (initialized to empty)
state = prob = numeric(N)
## everyone assumed to have 75 mm esophagus circumference
# simulate shape starting near center point
#istart = floor((N-nx)/2)
# simulate shop in random spot in biopsy quadrant
istart = sample(1:N,1,prob=rep(1/N,N))
state[istart] = 1; prob[istart]=0.5
nbr.state[[istart]] = state[nbr[[istart]]]
# only divide cells that have state 1 with prob
prob = prob * state
# initialize shape generation
n = 2
set.1 = which(state==1) #### do not need all state 1 (occupied), only surface is needed
prob.1 = prob[set.1]
j = sample(nbr[[istart]],size=1, prob=1-nbr.state[[istart]]) # only occupy empty slots
# update grid
state[j] = 1
nbr.state[[istart]] = state[nbr[[istart]]]
nbr.state[[j]] = state[nbr[[j]]]
prob[istart] = (max(c(0.1666667,sum(nbr.state[[istart]])))/6)
prob[j] = (max(c(0.1666667,sum(nbr.state[[j]])))/6)^gamma
set.1 = which(state==1)
prob.1 = prob[set.1]
#points(xy$x[j],xy$y[j],pch=19,cex=0.6,col='blue')
# loop
while (n < size) {
if (length(set.1)==1){
i=set.1
}
else{
i = sample(set.1, size=1, prob=prob.1)
}
if (length(nbr[[i]])==1){
j=nbr[[i]]
}
else{
j = sample(nbr[[i]],size=1, prob=1-nbr.state[[i]]) # only occupy empty slots
}
# update affected grid points
state[j] = 1
nbr.state[[i]] = state[nbr[[i]]]
prob[i] = (max(c(0.1666667,sum(nbr.state[[i]])))/6)^gamma
#print(prob[i])
nbr.state[[j]] = state[nbr[[j]]]
prob[j] = (max(c(0.1666667,sum(nbr.state[[j]])))/6)^gamma ### fix problem with quasi-bc
#print(prob[j])
for(k in 1:6) {
ik = nbr[[i]][k]
if(state[ik] == 1) {
nbr.state[[ik]] = state[nbr[[ik]]]
prob[ik] = (max(c(0.1666667,sum(nbr.state[[ik]])))/6)^gamma
#print(prob[ik])
}
jk = nbr[[j]][k]
if(state[jk] == 1) {
nbr.state[[jk]] = state[nbr[[jk]]]
prob[jk] = (max(c(0.1666667,sum(nbr.state[[jk]])))/6)^gamma
#print(prob[jk])
}
}
prob[prob==1] = 0 # once cells are fully surrounded - exclude from further sampling
if (length(which(prob>0))>0){
set.1 = which(prob>0) # only select cells that have available neighboring slots
}
else{n=size}
prob.1 = prob[set.1]
# points(xy$x[j],xy$y[j],pch=19,cex=0.6,col='blue')
n = n + 1
}
#points(xy$x[set.1],xy$y[set.1],pch=19,cex=0.55,col=col) # plots active cells
points(xy$x[which(state==1)],xy$y[which(state==1)],pch=19,cex=0.5,col=col) # plots active cells
#points(xy$x[istart],xy$y[istart],pch=19,cex=0.6,col=1)
#rect(-3/2,-5/2,3/2,5/2, lwd = 1.5,lty=2)
return(list(xpremalig =xy$x[which(state==1)], ypremalig=xy$y[which(state==1)],state=state))
}
### PLOTS ALL CLONES
genShape_all <- function(xy = xy, nbr = nbr, size = 10, gamma = 1, col = 'blue') {
# xy: generated by genHexGrid (crypt coordinates x and y and dims nx and ny)
# nbr: generated by neighborList
# gamma: diffusness of shape: > 0 (compact) or < 0 (diffuse)
# tests
if(xy$ny %% 2 != 0) {
warning("grid not set up correctly: ny required to be even")
return()
}
# neighbor list ... with quasi-periodic bc
N = length(xy$x); nx = xy$nx; ny = xy$ny
nbr.state = as.list(N)
# state vector with zeros (initialized to empty)
state = prob = numeric(N)
## everyone assumed to have 75 mm esophagus circumference
# simulate shape starting near center point
#istart = floor((N-nx)/2)
# simulate shop in random spot in biopsy quadrant
istart = sample(1:N,1,prob=rep(1/N,N))
state[istart] = 1; prob[istart]=0.5
nbr.state[[istart]] = state[nbr[[istart]]]
# only divide cells that have state 1 with prob
prob = prob * state
# initialize shape generation
n = 2
set.1 = which(state==1) #### do not need all state 1 (occupied), only surface is needed
prob.1 = prob[set.1]
j = sample(nbr[[istart]],size=1, prob=1-nbr.state[[istart]]) # only occupy empty slots
# update grid
state[j] = 1
nbr.state[[istart]] = state[nbr[[istart]]]
nbr.state[[j]] = state[nbr[[j]]]
prob[istart] = (max(c(0.1666667,sum(nbr.state[[istart]])))/6)
prob[j] = (max(c(0.1666667,sum(nbr.state[[j]])))/6)^gamma
set.1 = which(state==1)
prob.1 = prob[set.1]
#points(xy$x[j],xy$y[j],pch=19,cex=0.6,col='blue')
# loop
while (n < size) {
if (length(set.1)==1){
i=set.1
}
else{
i = sample(set.1, size=1, prob=prob.1)
}
if (length(nbr[[i]])==1){
j=nbr[[i]]
}
else{
j = sample(nbr[[i]],size=1, prob=1-nbr.state[[i]]) # only occupy empty slots
}
# update affected grid points
state[j] = 1
nbr.state[[i]] = state[nbr[[i]]]
prob[i] = (max(c(0.1666667,sum(nbr.state[[i]])))/6)^gamma
#print(prob[i])
nbr.state[[j]] = state[nbr[[j]]]
prob[j] = (max(c(0.1666667,sum(nbr.state[[j]])))/6)^gamma ### fix problem with quasi-bc
#print(prob[j])
for(k in 1:6) {
ik = nbr[[i]][k]
if(state[ik] == 1) {
nbr.state[[ik]] = state[nbr[[ik]]]
prob[ik] = (max(c(0.1666667,sum(nbr.state[[ik]])))/6)^gamma
#print(prob[ik])
}
jk = nbr[[j]][k]
if(state[jk] == 1) {
nbr.state[[jk]] = state[nbr[[jk]]]
prob[jk] = (max(c(0.1666667,sum(nbr.state[[jk]])))/6)^gamma
#print(prob[jk])
}
}
prob[prob==1] = 0 # once cells are fully surrounded - exclude from further sampling
if (length(which(prob>0))>0){
set.1 = which(prob>0) # only select cells that have available neighboring slots
}
else{n=size}
prob.1 = prob[set.1]
# points(xy$x[j],xy$y[j],pch=19,cex=0.6,col='blue')
n = n + 1
}
#points(xy$x[set.1],xy$y[set.1],pch=19,cex=0.55,col=col) # plots active cells
points(xy$x[which(state==1)],xy$y[which(state==1)],pch=19,cex=0.5,col=col) # plots active cells
#points(xy$x[istart],xy$y[istart],pch=19,cex=0.6,col=1)
#rect(-3/2,-5/2,3/2,5/2, lwd = 1.5,lty=2)
return(list(xpremalig =xy$x[which(state==1)], ypremalig=xy$y[which(state==1)],state=state))
}
|
50f61100e819c715a634c02ca57a15fb6b2ea9a8 | 0c144322ecbf8d0f4e9386c67c055cd81dbc4c68 | /analysis.R | 4b6b7b9a3a9808f59908728be52244a60c18c9d6 | [
"MIT"
] | permissive | info201b-au20/a3-jwc225 | 3097ccec39ca0e1693caf3f221e976b63bf09b37 | bab3d20b075324a084937275f87e77dd50104793 | refs/heads/main | 2023-08-27T09:19:20.635249 | 2021-10-24T07:28:44 | 2021-10-24T07:28:44 | 313,210,753 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,551 | r | analysis.R | # Assignment 3 -- Analyzing Incarceration data
# Set up
library(tidyverse)
library(scales)
data <- read.csv("https://raw.githubusercontent.com/vera-institute/incarceration-trends/master/incarceration_trends.csv")
# ----------------Summary Info section----------------
# Get name of location with the largest Native American jail population in 2016
place_top_na_jail_pop_2016 <- data %>%
mutate(location = paste(county_name, state, sep = ", ")) %>%
filter(year == 2016) %>%
filter(native_jail_pop == max(native_jail_pop, na.rm = T)) %>%
pull(location)
# Get population of Native Americans in jail in above location
top_native_jail_pop <- data %>%
filter(year == 2016) %>%
mutate(location = paste(county_name, state, sep = ", ")) %>%
filter(location == place_top_na_jail_pop_2016) %>%
pull(native_jail_pop)
# Get the year with the most prison admissions
year_most_prison_adm <- data %>%
group_by(year) %>%
summarize(num_prison_adm = sum(total_prison_adm, na.rm = T)) %>%
filter(num_prison_adm == max(num_prison_adm, nam.rm = T)) %>%
pull(year)
# Get the number of prison admissions in year with most admissions
num_prison_adm_largest_year <- data %>%
group_by(year) %>%
summarize(num_prison_adm = sum(total_prison_adm, na.rm = T)) %>%
filter(year == year_most_prison_adm) %>%
pull(num_prison_adm)
# Get the average black prison population (across all US counties) in 2016
avg_black_prison_pop_2016 <- data %>%
group_by(year) %>%
summarize(avg_black_prison_pop = mean(black_prison_pop, na.rm = T)) %>%
filter(year == 2016) %>%
pull(avg_black_prison_pop)
# Function takes year as input, returns the total U.S population for
# the given year.
num_prison_pop_in_year <- function(yr) {
result <- data %>%
group_by(year) %>%
summarize(num_prison_pop = sum(total_prison_pop, na.rm = T)) %>%
filter(year == yr) %>%
pull(num_prison_pop)
result
}
# Get U.S prison population change between 2000 and 2010
prison_pop_change_2000_to_2010 <- round(num_prison_pop_in_year(2010) -
num_prison_pop_in_year(2000), 0)
# ----------------Trend Plot section----------------
# Get the top 5 Louisiana counties by avg jail population over the time period
# of 1970-2018 (period of data collection)
top_5_counties_la <- data %>%
filter(state == "LA") %>%
group_by(county_name) %>%
summarize(avg_jail_pop = mean(total_jail_pop, na.rm = T)) %>%
slice_max(avg_jail_pop, n = 5) %>%
pull(county_name)
# return a data frame of the filtered Louisiana counties
filtered_counties <- data %>%
filter(county_name %in% top_5_counties_la)
# Render a trend plot of jail incarceration rates over time
trend_plot <- ggplot(filtered_counties,
mapping =
aes(x = year, y = total_jail_pop_rate, color = county_name)
) +
geom_point() +
geom_smooth() +
labs(
title = "Jail Incarceration Rate per Year of Top 5 Louisiana Counties",
x = "Year",
y = "Jail Incarceration Rate (per 100,000 residents age 15-64)",
color = "County"
)
# ----------------Variable Comparison Plot Section----------------
# Get desired incarceration data
filtered_data <- data %>%
filter(year == 2018) %>% # just get 2018 rows
filter(urbanicity != "") %>% # remove rows with blank category urbanity
filter(is.na(total_jail_pop) == F, is.na(jail_rated_capacity) == F)
# Render comparison scatter plot matrix
comparison_plot <- ggplot(filtered_data, ) +
geom_point(
mapping = aes(
x = jail_rated_capacity, y = total_jail_pop,
color = urbanicity
),
alpha = 0.3 # opacity of points
) +
xlim(0, 12500) + # manual limits -- very few data points beyond
ylim(0, 8000) +
labs(
title = "Jail Capacity versus Total Jail Population (2018)",
x = "Jail Capacity",
y = "Total Jail Population",
color = "Urbanity"
)
# ----------------Map Plot Section----------------
# Code borrowed from Ch.16 of textbook
# Define a minimalist theme for maps
blank_theme <- theme_bw() +
theme(
axis.line = element_blank(), # remove axis lines
axis.text = element_blank(), # remove axis labels
axis.ticks = element_blank(), # remove axis ticks
axis.title = element_blank(), # remove axis titles
plot.background = element_blank(), # remove gray background
panel.grid.major = element_blank(), # remove major grid lines
panel.grid.minor = element_blank(), # remove minor grid lines
panel.border = element_blank() # remove border around plot
)
# load incarceration data
data_2018 <- data %>% # keep only 2018 data
rename(county = county_name) %>%
mutate(location = paste(county, state, sep = ", ")) %>% # mutate for join
filter(year == 2018) %>%
group_by(state) %>%
summarize(
black_jail_prop = mean(black_jail_pop / total_jail_pop, na.rm = T)
)
# Load shapefile of U.S States
state_shape <- map_data("state") %>%
rename(state = region) %>%
mutate(state = str_to_title(state)) %>%
mutate(state = state.abb[match(state, state.name)]) %>% # mutate for join
left_join(data_2018, by = "state")
# Create a blank map of U.S. states
map_plot <- ggplot(state_shape) +
geom_polygon(
mapping = aes(
x = long, y = lat, group = group,
fill = black_jail_prop
),
color = "white", # show state outlines
size = .1 # thinly stroked
) +
coord_map() + # use a map-based coordinate system
scale_fill_continuous(low = "#132B43", high = "Red", labels = percent) +
labs(
title =
"Proportion of Blacks in Jail Populations by State (2018)",
fill = "% Black Jail Population"
) +
blank_theme |
a779d82067bc94d0f8f563eeda559887dcd87465 | 6fc8dd03e54041194ac1b88ee8cb017bcc2515e6 | /0_RPackages/R-4/R-4.1.1/configurationFile.R | 0393ed6311062fe1e4372d1b3466b2e0e24edcc2 | [] | no_license | alessandriLuca/CREDOengine | d54f112b31bd2cfa13dc3fd4b680936ce0a6259e | 6e1a7dbea30a5b2169060fa39af61769707ccbe3 | refs/heads/main | 2023-07-07T14:53:30.660211 | 2023-06-27T15:25:19 | 2023-06-27T15:25:19 | 418,818,461 | 3 | 0 | null | null | null | null | UTF-8 | R | false | false | 74 | r | configurationFile.R | cran("Rtsne")
#bioconductor("GenomicRanges")
#github("kendomaniac/rCASC")
|
402f014f8b16a495cc5cca113082e0758a5c0e6f | b493982741e3b91a4e9c3c63c892479e3a56ba9b | /tests/testthat/test_kql_build.R | 3b82d2bea00af0846716e4d829f9a1178df05c7b | [
"MIT",
"LicenseRef-scancode-generic-cla"
] | permissive | cloudyr/AzureKusto | a1f382329c00a818196f193cc9cf6c25f8954186 | 9d147fd05a8a468fbaab74ac59a93cc89ecbdcc1 | refs/heads/master | 2023-04-04T22:58:10.290251 | 2023-03-17T17:42:37 | 2023-03-17T17:42:37 | 161,263,733 | 20 | 3 | NOASSERTION | 2019-05-23T00:14:27 | 2018-12-11T02:12:05 | R | UTF-8 | R | false | false | 631 | r | test_kql_build.R | context("KQL Build")
# filter ------------------------------------------------------------------
test_that("filter generates simple expressions",
{
out <- tbl_kusto_abstract(data.frame(x = 1), "foo") %>%
filter(x > 1L) %>%
kql_build()
expect_equal(out$ops[[2]][[1]], kql("where ['x'] > 1"))
})
# mutate ------------------------------------------------------------------
test_that("mutate generates simple expressions",
{
out <- tbl_kusto_abstract(data.frame(x = 1), "foo") %>%
mutate(y = x + 1L) %>%
kql_build()
expect_equal(out$ops[[2]], kql("extend ['y'] = ['x'] + 1"))
})
|
75d92d95ce084fd8428da89f2f346be478a0d385 | 18e8822e6cce16631058ecfd14906bbb1580aa66 | /man/BITFAM.Rd | ee0c2c4841a0d57e72891cabb96db899d6d69586 | [] | no_license | jaleesr/BITFAM | fe443d9b0bc23016526e483e918cfe38bd069913 | b604014c40329b3f737d4a152d44114a60c518b1 | refs/heads/master | 2023-02-21T11:10:50.972711 | 2023-02-13T13:50:50 | 2023-02-13T13:50:50 | 310,433,311 | 27 | 18 | null | null | null | null | UTF-8 | R | false | true | 995 | rd | BITFAM.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/BITFAM.R
\name{BITFAM}
\alias{BITFAM}
\title{BITFAM main function. BITFAM will infer the transcription factor activities from single cell RNA-seq data based on the ChIP-seq data}
\usage{
BITFAM(data, species, interseted_TF = NA, scATAC_obj = NA, ncores, iter = 8000, tol_rel_obj = 0.005)
\arguments{
\item{data}{A matrix or dataframe, normalized single cell RNA-seq data}
\item{species}{mouse or human}
\item{interseted_TF}{Transcription factors of interests}
\item{scATAC_obj}{A preprocessed Seurat object of scATAC-seq data}
\item{ncores}{Number of CPU cores}
\item{iter}{Number of max iteration}
\item{tol_rel_obj}{The convergence tolerance on the relative norm of the objective}
}
\value{
sampling results of TF inferred activities and TF-gene weights
}
\description{
BITFAM main function. BITFAM will infer the transcription factor activities from single cell RNA-seq data based on the ChIP-seq data
}
|
8271b626284796e8016b0d2fb8b10b51a991836d | a3e56dccec4c41f256583f45959ee64d6d269f57 | /man/puffin.Rd | 07835b65bf95ed9e2aa09ed55343810ca1106e87 | [] | no_license | cran/MBCbook | 76b189b1b24303fc49ae748f34807c5253507229 | 71cd7f2313a55239d5b1c6c707308c783481b200 | refs/heads/master | 2020-12-22T01:03:39.836908 | 2019-07-02T06:00:03 | 2019-07-02T06:00:03 | 236,623,831 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 731 | rd | puffin.Rd | \name{puffin}
\alias{puffin}
\docType{data}
\title{
The puffin data set
}
\description{
The puffin data set contains 69 individuals (birds) described by 5 categorical variables, in addition to class labels.
}
\usage{data("puffin")}
\format{
A data frame with 69 observations and 6 variables.
\describe{
\item{\code{class}}{the class of the observations}
\item{\code{gender}}{gender of the bird}
\item{\code{eyebrow}}{gender of the bird}
\item{\code{collar}}{gender of the bird}
\item{\code{sub.caudal}}{gender of the bird}
\item{\code{border}}{gender of the bird}
}
}
\source{
The data were provided by Bretagnolle, V., Museum d'Histoire Naturelle, Paris.
}
\examples{
data(puffin)
}
\keyword{datasets}
|
f6e9b905696aab7284e4bba3809e2dd8ad219d44 | 2da2406aff1f6318cba7453db555c7ed4d2ea0d3 | /inst/snippet/taste-anova01.R | f2d2bbbbbbaff9cd45c0584610e503ade25388a8 | [] | no_license | rpruim/fastR2 | 4efe9742f56fe7fcee0ede1c1ec1203abb312f34 | d0fe0464ea6a6258b2414e4fcd59166eaf3103f8 | refs/heads/main | 2022-05-05T23:24:55.024994 | 2022-03-15T23:06:08 | 2022-03-15T23:06:08 | 3,821,177 | 11 | 8 | null | null | null | null | UTF-8 | R | false | false | 234 | r | taste-anova01.R | favstats(score ~ type, data = TasteTest)
gf_point(score ~ type, data = TasteTest)
taste.lm <- lm(score ~ type, data = TasteTest)
anova(taste.lm)
taste.cint <- confint(glht(taste.lm, mcp(type = "Tukey"))); taste.cint
plot(taste.cint)
|
34af2e52797d686bb85e3257703b05ccee6470a3 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/rriskDistributions/examples/get.pert.par.Rd.R | b6b3fecd7c812ed2fe717f46a85a460f7255a8f0 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,784 | r | get.pert.par.Rd.R | library(rriskDistributions)
### Name: get.pert.par
### Title: Fitting parameters of a pert distribution from four or more
### quantiles
### Aliases: get.pert.par
### Keywords: fitpercentiles
### ** Examples
q <- mc2d::qpert(p = c(0.025, 0.5, 0.6, 0.975), min = 0, mode = 3, max = 10, shape = 5)
old.par <- graphics::par(mfrow = c(2, 3))
get.pert.par(q = q)
get.pert.par(q = q, fit.weights = c(100, 1, 1, 100))
get.pert.par(q = q, fit.weights = c(10, 1, 1, 10))
get.pert.par(q = q, fit.weights = c(1, 100, 1, 1))
get.pert.par(q = q, fit.weights = c(1, 10, 1, 1))
graphics::par(old.par)
q <- mc2d::qpert(p = c(0.025, 0.5, 0.6, 0.975), min = 1, mode = 5, max = 10, shape = 4)
old.par <- graphics::par(mfrow = c(2, 3))
get.pert.par(q = q)
get.pert.par(q = q, scaleX = c(0.0001, 0.999999))
get.pert.par(q = q, fit.weights = c(100, 1, 1, 100))
get.pert.par(q = q, fit.weights = c(10, 1, 1, 10))
get.pert.par(q = q, fit.weights = c(1, 100, 1, 1))
get.pert.par(q = q, fit.weights = c(1, 10, 1, 1))
graphics::par(old.par)
q <- mc2d::qpert(p = c(0.025, 0.5, 0.6, 0.975), min=-10, mode = 5, max = 10, shape = 4)
old.par <- graphics::par(mfrow = c(2, 3))
get.pert.par(q = q)
get.pert.par(q = q, fit.weights = c(100, 1, 1, 100))
get.pert.par(q = q, fit.weights = c(10, 1, 1, 10))
get.pert.par(q = q, fit.weights = c(1, 100, 1, 1))
get.pert.par(q = q, fit.weights = c(1, 10, 1, 1))
graphics::par(old.par)
q <- mc2d::qpert(p = c(0.025, 0.5, 0.6, 0.975), min=-10, mode = 5, max = 10, shape = 0.4)
old.par <- graphics::par(mfrow = c(2, 3))
get.pert.par(q = q)
get.pert.par(q = q, fit.weights = c(100, 1, 1, 100))
get.pert.par(q = q, fit.weights = c(10, 1, 1, 10))
get.pert.par(q = q, fit.weights = c(1, 100, 1, 1))
get.pert.par(q = q, fit.weights = c(1, 10, 1, 1))
graphics::par(old.par)
|
ba61fd5c8b8a1e136145feb78c79cee364de3b3e | b3809f859d05388c0d206b827eea5dec375588fd | /Data Scripts/step3/make_plot.R | c7ef5080dbad340fc4df025387fb1bb85b0f132d | [] | no_license | rexshihaoren/MSPrediction-R | cfd2fa8e81dc9c35fa89c403c2bb20821707b350 | 7d2c6a41b94b310c4e8b75cb3eb729bf7d2e2179 | refs/heads/master | 2020-12-07T02:25:24.752857 | 2014-12-03T00:49:24 | 2014-12-03T00:49:24 | 17,223,400 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,856 | r | make_plot.R | all_data_pca = read.csv("./data_all.csv")
all_data_pca = all_data_pca[,c('group1', 'group2', 'group3', 'RelativePain','EnjoyLife')]
all_data_pca2 = with(all_data_pca[,c('group1', 'group2', 'group3', 'RelativePain','EnjoyLife')],
data.frame(LackFocus=group1, LackEnergy=group2, Psycho_Down=group3, RelativePain, EnjoyLife))
# sample how the plot looks like
plot(density(all_data_pca$group1, adjust = 3))
plot(density(all_data_pca$group2, adjust = 3))
plot(density(all_data_pca$group3, adjust = 3))
plot(density(all_data_pca$RelativePain, adjust = 3))
plot(density(all_data_pca$EnjoyLife))
hist(all_data_pca$group1, freq = FALSE, breaks = 0:4)
hist(all_data_pca$group2, freq = FALSE, breaks = 0:4)
hist(all_data_pca$group3, freq = FALSE, breaks = 0:4)
hist(all_data_pca$EnjoyLife, freq = FALSE, breaks = 0:4)
hist(all_data_pca$RelativePain, freq = FALSE, breaks = 0:4)
par(las=1)
boxplot(all_data_pca, add=F, horizontal=T, outline=F, at=c(2.5+(0:4)*3), xlim = c(1,16), boxwex = 2, whiskcol='white', staplecol = 'white')
barplot(user_data_pca , horiz = T, add = T, space = 2, ylim = c(1,16), xpd = T, col = adjustcolor('grey', alpha = 0.4),xlim = c(0,4))
# axis(side=2, at = 0:16)
pop_agV <- sapply(all_data_pca, function(x) {q <- quantile(x,prob=c(0.20,0.80), type=7)
m <- mean(x)
return(c(q[1], mean = m, q[2]))})
pop_agV
mapply( function(x, y) lines(rep(x, 2), y=c(-1,1)+y, lwd=3), pop_agV[2,],c(2.5+(0:4)*3))
mapply( function(x, y) lines(rep(x, 2), y=c(-1,1)*0.7+y, lwd=2.5, col = 'red'), pop_agV[1,],c(2.5+(0:4)*3))
mapply( function(x, y) lines(rep(x, 2), y=c(-1,1)*0.7+y, lwd=2.5, col = 'red'), pop_agV[3,],c(2.5+(0:4)*3))
axis(side=2, at = 0:16)
barplot( pop_agV, horiz = T, add=F, ylim = c(1,16), col = adjustcolor(c("yellow", "orange", "red"), alpha = 0.8), xlim = c(0,4), width = 0.6, space = c(0,2), beside=T )
barplot(user_data_pca , horiz = T, add = T, space = 2, ylim = c(1,16), xpd = T, col = adjustcolor('grey', alpha = 0.4),xlim = c(0,4), names.arg=NA)
barplot( pop_agV, horiz = T, add=F, ylim = c(1,20), col = adjustcolor(c("yellow", "orange", "red"), alpha = 0.5), xlim = c(0,4), width = 1, space = c(0,1), beside=T )
barplot(user_data_pca , horiz = T, add = T, width = 3, space = 1/3, ylim = c(1,20), xpd = T, col = adjustcolor('grey', alpha = 0.6),xlim = c(0,4), names.arg=NA)
barplot( rbind(pop_agV,user_data_pca ), horiz = T, add=F, col = adjustcolor(c("yellow", "orange", "red", "black"), alpha = 0.5), xlim = c(0,4), width = 1, space = c(0,1), beside=T )
barplot( pop_agV, horiz = T, add=F, ylim = c(1,20), col = adjustcolor(c("yellow", "orange", "red"), alpha = 0.5), xlim = c(0,4), width = 1, space = c(0,1), beside=T )
mapply( function(x, y) lines(c(0,x), y=rep(y,2), lwd=8, col = adjustcolor("black", alpha = 0.6)), user_data_pca , c(2.5+0:4*4))
barplot( pop_agV, horiz = T, add=F, ylim = c(1,20), col = adjustcolor(c("yellow", "orange", "red"), alpha = 0.5), xlim = c(0,4), width = 1, space = c(0,1), beside=T, border = NA )
mapply( function(x, y) lines(c(0,x), y=rep(y,2), lwd=60, col = adjustcolor("black", alpha = 0.2)), user_data_pca , c(2.5+0:4*4))
barplot( rbind(pop_agV[1,], apply(pop_agV, 2, diff)), horiz = T, add=F, col = adjustcolor(c("blue", "magenta", "red"), alpha = 0.7), xlim = c(0,4), width = 1, space = 0.4, beside=F, border = NA )
barplot( rbind(pop_agV[1,], apply(pop_agV, 2, diff)), horiz = T, add=F, col = adjustcolor(c("red", "orange", "yellow"), alpha = 0.7), xlim = c(0,4), width = 1, space = 0.4, beside=F, border = NA )
barplot( rbind(pop_agV[1,], apply(pop_agV, 2, diff)), horiz = T, add=F, col = adjustcolor(c("black", "grey", "grey90"), alpha = 0.5), xlim = c(0,4), width = 1, space = 0.4, beside=F, border = NA )
mapply( function(x, y) lines(c(0,x), y=rep(y,2), lwd=15, col = adjustcolor("black", alpha = 1)), user_data_pca , c(0.9+0:4*1.4))
offsetF <- -0.04
barplot( rbind(pop_agV[1,]-offsetF, apply(pop_agV, 2, diff)), horiz = T, add=F, col = adjustcolor(c("black", "grey", "grey90"), alpha = 0.5), xlim = c(-0.2,4), width = 1, space = 0.4, beside=F, border = NA, offset = offsetF )
mapply( function(x, y) lines(c(offsetF,x), y=rep(y,2), lwd=15, lend = 1, col = adjustcolor("black", alpha = 1), ), user_data_pca , c(0.9+0:4*1.4))
group1.ordered = unique(sort(all_data_pca$group1))
n = (3312 - cumsum(table(all_data_pca$group1)))/length(all_data_pca$group1)
plot(group1.ordered, n, type = 'l', ylim = c(-1, 1), xaxt = 'n')
points(group1.ordered, -n, type = 'l')
rect(0, -0.1, user_data_pca[1], 0.1, col = 'grey', border = NA)
plot(lowess(group1.ordered, n, iter=5, f=0.3), type = 'l', ylim = c(-1, 1), xaxt = 'n')
points(lowess(group1.ordered, -n, iter=5, f=0.3), type = 'l', ylim = c(-1, 1), xaxt = 'n')
rect(0, -0.1, user_data_pca[1], 0.1, col = 'grey', border = NA)
|
504f363eaf217e1336c1226a7a4fc9dc707551e1 | 04bf444bf40498ba6672d8558b6aac2e7b2c8031 | /web-scraping-with-rvest.R | 6386f57109c10da440f97a0d900f9b0abae3d7b4 | [] | no_license | triadicaxis/quickr | dea5bb836dce9ece41c614db002bf7477f0a70e2 | 784723c3ac9a43304257788abcd7d0a2dc2e066a | refs/heads/master | 2020-03-11T11:10:19.392031 | 2019-10-21T14:54:12 | 2019-10-21T14:54:12 | 129,503,198 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 772 | r | web-scraping-with-rvest.R | browseURL("https://www.youtube.com/watch?v=TPLMQnGw0Vk")
browseURL("http://blog.rstudio.org/2014/11/24/rvest-easy-web-scraping-with-r/")
vignette("selectorgadget")
library("rvest")
absweb <- html("http://www.abs.gov.au/AUSSTATS/abs@.nsf/mf/1345.0?")
rbaweb <- html("http://www.rba.gov.au/statistics/cash-rate/")
## 1. extract indicator names (column 1)
indicator <- html_nodes(absweb, "table:nth-child(2) td:nth-child(1) font")
indicator <- html_text(indicator)
## 2. extract source cat. no. (column 2)
catno <- html_nodes(absweb, "u font, tr:nth-child(33) font, tr:nth-child(35) font")
catno <- html_text(catno)
var1 <- html_nodes(rbaweb, "td , th, #table_1 #table_1")
var1 <- html_text(var1)
cashrate.change <- html_nodes(rbaweb, "td:nth-child(2)")
cashrate.change
|
9366591dbe9852822ff95fde2ebf4c218b0396c6 | 67de61805dd839979d8226e17d1316c821f9b1b4 | /R/MxFitFunction.R | c592dfe95b02b492e2390fe1e32a39e72be76a61 | [
"Apache-2.0"
] | permissive | falkcarl/OpenMx | f22ac3e387f6e024eae77b73341e222d532d0794 | ee2940012403fd94258de3ec8bfc8718d3312c20 | refs/heads/master | 2021-01-14T13:39:31.630260 | 2016-01-17T03:08:46 | 2016-01-17T03:08:46 | 49,652,924 | 1 | 0 | null | 2016-01-14T14:41:06 | 2016-01-14T14:41:05 | null | UTF-8 | R | false | false | 6,216 | r | MxFitFunction.R | #
# Copyright 2007-2016 The OpenMx Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# The virtual base class for all fit functions
#
##' MxBaseFitFunction
##'
##' The virtual base class for all fit functions. This is an internal class and should not be used directly.
##'
##' @aliases
##' $,MxBaseFitFunction-method
##' $<-,MxBaseFitFunction-method
##' @seealso
##' \link{mxFitFunctionAlgebra}, \link{mxFitFunctionML}, \link{mxFitFunctionMultigroup},
##' \link{mxFitFunctionR}, \link{mxFitFunctionWLS}, \link{mxFitFunctionRow},
##' \link{mxFitFunctionGREML}
##' @rdname MxBaseFitFunction-class
setClass(Class = "MxBaseFitFunction",
representation = representation(
info = "list",
dependencies = "integer",
expectation = "integer",
vector = "logical",
rowDiagnostics = "logical",
result = "matrix", "VIRTUAL"),
contains = "MxBaseNamed")
##' @title MxFitFunction
##' @name MxFitFunction-class
##'
##' @description
##' This is an internal class and should not be used directly.
##'
##' @aliases
##' MxFitFunction
##' MxFitFunction-class
##' @rdname MxFitFunction-class
setClassUnion("MxFitFunction", c("NULL", "MxBaseFitFunction"))
setGeneric("genericFitDependencies",
function(.Object, flatModel, dependencies) {
return(standardGeneric("genericFitDependencies"))
})
setGeneric("genericFitRename",
function(.Object, oldname, newname) {
return(standardGeneric("genericFitRename"))
})
setGeneric("genericFitInitialMatrix",
function(.Object, flatModel) {
return(standardGeneric("genericFitInitialMatrix"))
})
setGeneric("genericFitNewEntities",
function(.Object) {
return(standardGeneric("genericFitNewEntities"))
})
setGeneric("genericFitFunConvert",
function(.Object, flatModel, model, labelsData, dependencies) {
return(standardGeneric("genericFitFunConvert"))
})
setGeneric("generateReferenceModels",
function(.Object, model) {
return(standardGeneric("generateReferenceModels"))
})
setMethod("generateReferenceModels", "MxBaseFitFunction",
function(.Object, model) {
msg <- paste("Don't know how to make reference models for a model with a ",
class(.Object), " fit function.", sep="")
stop(msg)
})
setMethod("genericFitInitialMatrix", "MxBaseFitFunction",
function(.Object, flatModel) {
return(matrix(as.double(NA), 1, 1))
})
setMethod("genericFitInitialMatrix", "NULL",
function(.Object, flatModel) {
return(NULL)
})
setMethod("$", "MxBaseFitFunction", imxExtractSlot)
setReplaceMethod("$", "MxBaseFitFunction",
function(x, name, value) {
if(name == "result") {
stop("You cannot set the result of an fit function. Use mxRun() to populate the result, or mxEval() to compute it.")
}
return(imxReplaceSlot(x, name, value, check=TRUE))
}
)
setMethod("names", "MxBaseFitFunction", slotNames)
##' Add dependencies
##'
##' If there is an expectation, then the fitfunction should always
##' depend on it. Hence, subclasses that implement this method must
##' ignore the passed-in dependencies and use "dependencies <-
##' callNextMethod()" instead.
##'
##' @param .Object fit function object
##' @param flatModel flat model that lives with .Object
##' @param dependencies accumulated dependency relationships
setMethod("genericFitDependencies", "MxBaseFitFunction",
function(.Object, flatModel, dependencies) {
name <- .Object@name
modelname <- imxReverseIdentifier(flatModel, .Object@name)[[1]]
expectName <- paste(modelname, "expectation", sep=".")
if (!is.null(flatModel[[expectName]])) {
dependencies <- imxAddDependency(expectName, .Object@name, dependencies)
}
return(dependencies)
})
setMethod("genericFitDependencies", "NULL",
function(.Object, flatModel, dependencies) {
return(dependencies)
})
setMethod("genericFitRename", "MxBaseFitFunction",
function(.Object, oldname, newname) {
return(.Object)
})
setMethod("genericFitRename", "NULL",
function(.Object, oldname, newname) {
return(NULL)
})
setMethod("genericFitNewEntities", "MxBaseFitFunction",
function(.Object) {
return(NULL)
})
setGeneric("genericFitConvertEntities",
function(.Object, flatModel, namespace, labelsData) {
return(standardGeneric("genericFitConvertEntities"))
})
setGeneric("genericFitAddEntities",
function(.Object, job, flatJob, labelsData) {
return(standardGeneric("genericFitAddEntities"))
})
setMethod("genericFitConvertEntities", "MxBaseFitFunction",
function(.Object, flatModel, namespace, labelsData) {
return(flatModel)
})
setMethod("genericFitConvertEntities", "NULL",
function(.Object, flatModel, namespace, labelsData) {
return(flatModel)
})
setMethod("genericFitAddEntities", "MxBaseFitFunction",
function(.Object, job, flatJob, labelsData) {
return(job)
})
setMethod("genericFitAddEntities", "NULL",
function(.Object, job, flatJob, labelsData) {
return(job)
})
fitFunctionAddEntities <- function(model, flatModel, labelsData) {
fitfunctions <- flatModel@fitfunctions
if (length(fitfunctions) == 0) {
return(model)
}
for(i in 1:length(fitfunctions)) {
model <- genericFitAddEntities(fitfunctions[[i]], model, flatModel, labelsData)
}
return(model)
}
fitFunctionModifyEntities <- function(flatModel, namespace, labelsData) {
fitfunctions <- flatModel@fitfunctions
if (length(fitfunctions) == 0) {
return(flatModel)
}
for(i in 1:length(fitfunctions)) {
flatModel <- genericFitConvertEntities(fitfunctions[[i]], flatModel, namespace, labelsData)
}
return(flatModel)
}
convertFitFunctions <- function(flatModel, model, labelsData, dependencies) {
retval <- lapply(flatModel@fitfunctions, genericFitFunConvert,
flatModel, model, labelsData, dependencies)
return(retval)
}
|
96d667512e84933b5868a3872617afedd29ae1f1 | 611610808ab0b23196156a408dd58def1d88fb0f | /code/analysis.R | 9cc8f7975de5a0f05db9665a31e56671a37a16df | [] | no_license | cambrone/SVM_imbalance | 796b95d83b05888ee5b4f0fedaba22d244217779 | 9ad69f75525979a4afc4a757601578974f4e2e01 | refs/heads/master | 2020-03-24T19:40:03.638723 | 2018-08-28T22:38:31 | 2018-08-28T22:38:31 | 142,939,276 | 2 | 0 | null | null | null | null | UTF-8 | R | false | false | 19,295 | r | analysis.R | ###########################################################################################
# TASK: Compare performance of SVM trained using balancing algorithms
# Author: Andres Cambronero
# Project: Comparison of Oversampling Algorithms to Classify Imbalanced Data
# Date Started: July 2, 2018
# Latest Update: July 29, 2018
###########################################################################################
#clear environment
rm(list=ls())
#set working directory
setwd("~/Desktop/summer_projects/Imbalanced Data/data")
#load libraries
library(e1071)
library(smotefamily)
library(cvAUC)
library(ROCR)
library(DMwR)
#load data
imb<-read.csv("imbalanced_train.csv", colClasses = "character")
bal_over<-read.csv("balanced_over_train.csv", colClasses = "character")
bal_under<-read.csv("balanced_under_train.csv", colClasses = "character")
test<-read.csv("test.csv", colClasses = "character")
#change columns class from character to numeric in training and test set
#traning sets
imb[,6:30]<-sapply(imb[,6:30], as.numeric)
bal_over[,6:30]<-sapply(bal_over[,6:30], as.numeric)
bal_under[,6:30]<-sapply(bal_under[,6:30], as.numeric)
#test set
test[,6:30]<-sapply(test[,6:30], as.numeric)
#change class of CHARTER from character to class factor in training and test set
#traning sets
imb$CHARTER<-as.factor(imb$CHARTER)
bal_over$CHARTER<-as.factor(bal_over$CHARTER)
bal_under$CHARTER<-as.factor(bal_under$CHARTER)
#test set
test$CHARTER<-as.factor(test$CHARTER)
#change class of PROF_LEVEL from character to class factor in training and test set
#traning sets
imb$PROF_LEVEL<-as.factor(imb$PROF_LEVEL)
bal_over$PROF_LEVEL<-as.factor(bal_over$PROF_LEVEL)
bal_under$PROF_LEVEL<-as.factor(bal_under$PROF_LEVEL)
#test set
test$PROF_LEVEL<-as.factor(test$PROF_LEVEL)
#Drop variables that will not be used in training and test set
#traning sets
imb$ENTITY_CD<-NULL
imb$ENTITY_NAME <-NULL
imb$DISTRICT_NAME<-NULL
imb$COUNTY_NAME<-NULL
bal_over$ENTITY_CD<-NULL
bal_over$ENTITY_NAME <-NULL
bal_over$DISTRICT_NAME<-NULL
bal_over$COUNTY_NAME<-NULL
bal_under$ENTITY_CD<-NULL
bal_under$ENTITY_NAME <-NULL
bal_under$DISTRICT_NAME<-NULL
bal_under$COUNTY_NAME<-NULL
#test sets
test$ENTITY_CD<-NULL
test$ENTITY_NAME <-NULL
test$DISTRICT_NAME<-NULL
test$COUNTY_NAME<-NULL
#Normalize variables in training and test sets
#training sets
imb[,2:22]<-scale(imb[,2:22])
imb[,24:26]<-scale(imb[,24:26])
bal_over[,2:22]<-scale(bal_over[,2:22])
bal_over[,24:26]<-scale(bal_over[,24:26])
bal_under[,2:22]<-scale(bal_under[,2:22])
bal_under[,24:26]<-scale(bal_under[,24:26])
#test set
test[,2:22]<-scale(test[,2:22])
test[,24:26]<-scale(test[,24:26])
############################################
# ANALYSIS OF PERFORMANCE: IMBALANCED DATA #
############################################
#set seed
set.seed(1)
#train SVM on imbalanced
svm_imb <- svm(PROF_LEVEL ~ ., data = imb, kernel="polynomial", degree=2, cost=5, probability=TRUE, cross=10)
#prediction of observations in test data
imb_pred<-predict(svm_imb, test[,-1], probability=TRUE)
#confusion matrix
imb_confmat <- table(true = test[,1],pred = imb_pred)
write.csv(imb_confmat, "imb_confmat.csv", row.names = T)
#GMEAN
#gmean function
gmean<-function(confmat){
acc_neg=confmat[1,1]/sum(confmat[1,1]+confmat[1,2])
acc_pos=confmat[2,2]/sum(confmat[2,2]+confmat[2,1])
gmean_val=sqrt(acc_neg*acc_pos)
return(gmean_val)
}
#calculate gmean on test data for SVMtrained on imbalanced data
imb_gmean<-gmean(imb_confmat)
#Precision
#precision function
precision<-function(confmat){
precis_val=confmat[2,2]/sum(confmat[2,2]+confmat[1,2])
return(precis_val)
}
#calculate precision on test data for SVMtrained on imbalanced data
imb_precision<-precision(imb_confmat)
#Recall
#recall function
recall<-function(confmat){
recall_val=confmat[2,2]/sum(confmat[2,2]+confmat[2,1])
return(recall_val)
}
#calculate recall on test data for SVMtrained on imbalanced data
imb_recall<-recall(imb_confmat)
#F-MEASURE
#F-measure function
fmeasure<-function(confmat){
precis_val=confmat[2,2]/sum(confmat[2,2]+confmat[1,2])
recall_val=confmat[2,2]/sum(confmat[2,2]+confmat[2,1])
F_val=(2*precis_val*recall_val)/(precis_val+recall_val)
return(F_val)
}
#calculate fmeasure on test data for SVMtrained on imbalanced data
imb_f_measure<-fmeasure(imb_confmat)
#ROC Curve
#plot ROC curve for SVMtrained on imbalanced data
write.csv(attr(imb_pred,"probabilities"), "imb_pred.csv", row.names = F)
svm_rocr<-prediction(attr(imb_pred,"probabilities")[,2], test[,1] == "Proficient")
svm_perf<-performance(svm_rocr, measure = "tpr", x.measure = "fpr")
plot(svm_perf,col="RED")
##calculate AUC on test data for SVMtrained on imbalanced data
imb_auc<-as.numeric(performance(svm_rocr, measure = "auc", x.measure = "cutoff")@ y.values)
write.csv(imb_auc, "imb_auc.csv", row.names = F)
#################################################
# ANALYSIS OF PERFORMANCE: RANDOM OVER-Sampling #
#################################################
#set seed
set.seed(1)
#train SVM on randomly oversampled data
svm_bal_over <- svm(PROF_LEVEL ~ ., data = bal_over, kernel="polynomial", degree=2, cost=5, probability=TRUE, cross=10)
#prediction observations on test data
bal_over_pred<-predict(svm_bal_over, test[,-1], probability=TRUE)
#confusion matrix
bal_over_confmat <- table(true = test[,1],pred = bal_over_pred)
write.csv(bal_over_confmat, "bal_over_confmat.csv", row.names = T)
##calculate gmean on test data for SVMtrained on randomly oversampled data
bal_over_gmean<-gmean(bal_over_confmat)
#calculate precision on test data for SVMtrained on randomly oversampled data
bal_over_precision<-precision(bal_over_confmat)
##calculate recall on test data for SVMtrained on randomly oversampled data
bal_over_recall<-recall(bal_over_confmat)
##calculate fmeasure on test data for SVMtrained on randomly oversampled data
bal_over_F<-fmeasure(bal_over_confmat)
##plot ROC on test data for SVMtrained on randomly oversampled data
write.csv(attr(bal_over_pred,"probabilities"), "bal_over_pred.csv", row.names = F)
svm_rocr<-prediction(attr(bal_over_pred,"probabilities")[,1], test[,1] == "Proficient")
svm_perf<-performance(svm_rocr, measure = "tpr", x.measure = "fpr")
plot(svm_perf,add=TRUE, col="BLUE")
##calculate AUC on test data for SVMtrained on randomly oversampled data
bal_over_auc<-as.numeric(performance(svm_rocr, measure = "auc", x.measure = "cutoff")@ y.values)
write.csv(bal_over_auc, "bal_over_auc.csv", row.names = F)
#################################################
# ANAYSIS OF PERFORMANCE: RANDOM UNDER-Sampling #
#################################################
#set seed
set.seed(1)
#train SVM on randomly under sampling
svm_bal_under <- svm(PROF_LEVEL ~ ., data = bal_under, kernel="polynomial", degree=2, cost=5, probability=TRUE, cross=10)
#prediction observations on test data
bal_under_pred<-predict(svm_bal_under, test[,-1], probability=TRUE)
#confusion matrix
bal_under_confmat <- table(true = test[,1],pred = bal_under_pred)
write.csv(bal_under_confmat, "bal_under_confmat.csv", row.names = T)
##calculate gmean on test data for SVM trained on randomly undersampled data
bal_under_gmean<-gmean(bal_under_confmat)
###calculate precision on test data for SVM trained on randomly undersampled data
bal_under_precision<-precision(bal_under_confmat)
##calculate recall on test data for SVM trained on randomly undersampled data
bal_under_recall<-recall(bal_under_confmat)
##calculate fmeasure on test data for SVM trained on randomly undersampled data
bal_under_F<-fmeasure(bal_under_confmat)
##plot ROC on test data for SVM trained on randomly undersampled data
write.csv(attr(bal_under_pred,"probabilities"), "bal_under_pred.csv", row.names = F)
svm_rocr<-prediction(attr(bal_under_pred,"probabilities")[,1], test[,1] == "Proficient")
svm_perf<-performance(svm_rocr, measure = "tpr", x.measure = "fpr")
plot(svm_perf, add=TRUE, col="GREEN")
##calculate AUC on test data for SVMtrained on randomly undersampled data
bal_under_auc<-as.numeric(performance(svm_rocr, measure = "auc", x.measure = "cutoff")@ y.values)
write.csv(bal_under_auc, "bal_under_auc.csv", row.names = F)
#####################################
#ANALYSIS OF PERFORMANCE: SMOTE
#####################################
#create balanced dataset with synthetic data points
smote_data<-DMwR::SMOTE(PROF_LEVEL ~., imb, perc.over = 1000, perc.under = 110 , k=5)
#set seed
set.seed(1)
#train SVM using smote data
svm_smote <- svm(PROF_LEVEL ~ ., data = smote_data, kernel="polynomial", degree=2, cost=5, probability=TRUE, cross=10)
#predict test observations
smote_pred<-predict(svm_smote, test[,-1], probability=TRUE)
#confustion matrix
smote_confmat <- table(true = test[,1],pred = smote_pred)
write.csv(smote_confmat, "smote_confmat.csv", row.names = T)
##calculate gmean on test data for SVM trained on SMOTE
smote_gmean<-gmean(smote_confmat)
##calculate precision on test data for SVM trained on SMOTE
smote_precision<-precision(smote_confmat)
##calculate recall on test data for SVM trained on SMOTE
smote_recall<-recall(smote_confmat)
##calculate fmeasure on test data for SVM trained on SMOTE
smote_F<-fmeasure(smote_confmat)
##plot ROC for SVM trained on SMOTE
write.csv(attr(smote_pred,"probabilities"), "smote_pred.csv", row.names = F)
svm_rocr<-prediction(attr(smote_pred,"probabilities")[,2], test[,1] == "Proficient")
svm_perf<-performance(svm_rocr, measure = "tpr", x.measure = "fpr")
plot(svm_perf,add=TRUE,col="PURPLE")
##calculate AUC on test data for SVM trained on SMOTE
smote_auc<-as.numeric(performance(svm_rocr, measure = "auc", x.measure = "cutoff")@ y.values)
write.csv(smote_auc, "smote_auc.csv", row.names = F)
################################################
#ANALYSIS OF PERFORMANCE: BORDERLINE SMOTE
################################################
#smotefamily needs all data to be numeric
#change PROF_LEVEL to numeric
imb$PROF_LEVEL<-as.character(imb$PROF_LEVEL)
imb$PROF_LEVEL<-ifelse(imb$PROF_LEVEL=="Not Proficient",0,1)
#change CHARTER to numertic
imb$CHARTER<-as.character(imb$CHARTER)
imb$CHARTER<-as.numeric(imb$CHARTER)
#create balanced boderline smote data
border_smote_data<-BLSMOTE(imb,imb$PROF_LEVEL,K=5,C=4,dupSize=0,method =c("type1"))
#extract data with synthetic data points and drop extra column
border_smote_data<-border_smote_data$data
border_smote_data$class<-NULL
#change PROF_LEVEL to factor to train model
border_smote_data$PROF_LEVEL<-ifelse(border_smote_data$PROF_LEVEL==0,"Not Proficient","Proficient")
border_smote_data$PROF_LEVEL<-as.factor(border_smote_data$PROF_LEVEL)
#synthetic data gives CHARTER values between 0 and 1
#changing to factor would create incorrect levels.
# treat charter as numeric
#border_smote_data$CHARTER<-as.character(border_smote_data$CHATER)
#border_smote_data$CHARTER<-as.factor(border_smote_data$CHATER)
#train SVM on borderline smote data
svm_border <- svm(as.factor(PROF_LEVEL) ~ ., data = border_smote_data, kernel="polynomial", degree=2, cost=5, probability=TRUE, cross=10)
#change CHARTER to numeric in test data
test$CHARTER<-as.character(test$CHARTER)
test$CHARTER<-as.numeric(test$CHARTER)
#predict test observations
border_pred<-predict(svm_border, test[,-1], probability = T)
#confustion matrix
border_confmat <- table(true = test[,1],pred = border_pred)
write.csv(border_confmat, "border_confmat.csv", row.names = T)
##calculate gmean on test data for SVM trained on borderline SMOTE
border_gmean<-gmean(border_confmat)
##calculate precision on test data for SVM trained on borderline SMOTE
border_precision<-precision(border_confmat)
##calculate recall on test data for SVM trained on borderline SMOTE
border_recall<-recall(border_confmat)
##calculate fmeasure on test data for SVM trained on borderline SMOTE
border_F<-fmeasure(border_confmat)
# plot ROC for SVM trained on borderline SMOTE
write.csv(attr(border_pred,"probabilities"), "border_pred.csv", row.names = F)
svm_rocr<-prediction(attr(border_pred,"probabilities")[,1], test[,1] == "Proficient")
svm_perf<-performance(svm_rocr, measure = "tpr", x.measure = "fpr")
plot(svm_perf,add=TRUE,col="BROWN")
##calculate AUC on test data for SVM trained on borderline SMOTE
border_auc<-as.numeric(performance(svm_rocr, measure = "auc", x.measure = "cutoff")@ y.values)
write.csv(border_auc, "border_auc.csv", row.names = F)
###################################
#ANALYSIS OF PERFORMANCE: ADASYN
###################################
#create balanced ADASYN data
adasyn_data<-ADAS(imb,imb$PROF_LEVEL,K=5)
#extract data with synthetic observations
adasyn_data<-adasyn_data$data
#drop extract column
adasyn_data$class<-NULL
#change PROF_LEVEL to factor
adasyn_data$PROF_LEVEL<-ifelse(adasyn_data$PROF_LEVEL==0,"Not Proficient","Proficient")
adasyn_data$PROF_LEVEL<-as.factor(adasyn_data$PROF_LEVEL)
#train model on ADASYN data
svm_adasyn <- svm(as.factor(PROF_LEVEL) ~ ., data = adasyn_data, kernel="polynomial", degree=2, cost=5, probability=TRUE, cross=10)
#predict test observations
adasyn_pred<-predict(svm_adasyn, test[,-1], probability = T)
#confusion matrix
adasyn_confmat <- table(true = test[,1],pred = adasyn_pred)
write.csv(adasyn_confmat, "adasyn_confmat.csv", row.names = T)
##calculate gmean on test data for SVM trained on ADASYN
adasyn_gmean<-gmean(adasyn_confmat)
##calculate precision on test data for SVM trained on ADASYN
adasyn_precision<-precision(adasyn_confmat)
##calculate recall on test data for SVM trained on ADASYN
adasyn_recall<-recall(adasyn_confmat)
##calculate fmeasure on test data for SVM trained on ADASYN
adasyn_F<-fmeasure(adasyn_confmat)
##plot ROC for SVM trained on ADASYN
write.csv(attr(adasyn_pred,"probabilities"), "adasyn_pred.csv", row.names = F)
svm_rocr<-prediction(attr(adasyn_pred,"probabilities")[,1], test[,1] == "Proficient")
svm_perf<-performance(svm_rocr, measure = "tpr", x.measure = "fpr")
plot(svm_perf,add=TRUE,col="black")
##calculate AUC on test data for SVM trained on ADASYN
adasyn_auc<-as.numeric(performance(svm_rocr, measure = "auc", x.measure = "cutoff")@ y.values)
write.csv(adasyn_auc, "adasyn_auc.csv", row.names = F)
#########################################
# ANALYSIS OF PERFORMANCE: SAFE-LEVEL
#########################################
#Safe Level SMOTE
sl_data<-SLS(imb,imb$PROF_LEVEL,K=5, C=4, dupSize = 0)
#extract data with synthetic data points
sl_data<-sl_data$data
sl_data$class<-NULL
#change PROF_LEVEL to factor
sl_data$PROF_LEVEL<-ifelse(sl_data$PROF_LEVEL==0,"Not Proficient","Proficient")
sl_data$PROF_LEVEL<-as.factor(sl_data$PROF_LEVEL)
#train model on safe-level data
svm_sl <- svm(as.factor(PROF_LEVEL) ~ ., data = sl_data, kernel="polynomial", degree=2, cost=5, probability=TRUE, cross=10)
#predict observations from test data
sl_pred<-predict(svm_sl, test[,-1], probability = T)
#confusion matrix
sl_confmat <- table(true = test[,1],pred = sl_pred)
write.csv(sl_confmat, "sl_confmat.csv", row.names = T)
##calculate gmean on test data for SVM trained on safelevel
sl_gmean<-gmean(sl_confmat)
##calculate precision on test data for SVM trained on safelevel
sl_precision<-precision(sl_confmat)
##calculate precision on test data for SVM trained on safelevel
sl_recall<-recall(sl_confmat)
##calculate fmeasure on test data for SVM trained on safelevel
sl_F<-fmeasure(sl_confmat)
##Plot ROC for SVM trained on safelevel
write.csv(attr(sl_pred,"probabilities"), "sl_pred.csv", row.names = F)
svm_rocr<-prediction(attr(sl_pred,"probabilities")[,1], test[,1] == "Proficient")
svm_perf<-performance(svm_rocr, measure = "tpr", x.measure = "fpr")
plot(svm_perf,add=TRUE,col="yellow")
##calculate AUC on test data for SVM trained on safelevel
sl_auc<-as.numeric(performance(svm_rocr, measure = "auc", x.measure = "cutoff")@ y.values)
write.csv(sl_auc, "sl_auc.csv", row.names = F)
####################################
# COMPARISON OF METRICS
####################################
#ADD LEGEND TO PLOT
legend("bottomright", legend=c("Original", "Rand. Oversamp.",
"Rand. Under", "SMOTE", "Borderline",
"ADASYN", "Safe-Level"),
col=c("RED", "BLUE", "GREEN", "PURPLE",
"BROWN", "BLACK", "YELLOW"),cex=0.8, lty=1)
#vector of gmeans
gmeans<-c(imb_gmean, bal_over_gmean,
bal_under_gmean, smote_gmean,
border_gmean, adasyn_gmean, sl_gmean)
#labels
methods<-c("Original", "Rand. Oversamp.",
"Rand. Under", "SMOTE", "Borderline",
"ADASYN", "Safe-Level")
#plot gmeans for all methods
gmean_plot<-barplot(gmeans, ylim = c(0,1), ylab = "G-Mean")
axis(1, at=gmean_plot, labels=methods, tick=FALSE,
las=2, line=-0.5, cex.axis=0.7)
text(x = gmean_plot, y = gmeans, label = round(gmeans,3), pos = 3,
cex = 0.7, col = c("black","red","black",
"black","black","black",
"black"))
#vector of precisions
precisions<-c(imb_precision, bal_over_precision,
bal_under_precision, smote_precision,
border_precision, adasyn_precision, sl_precision)
#plot precision for all methods
precision_plot<-barplot(precisions, ylim = c(0,1), ylab = "Precision")
axis(1, at=precision_plot, labels=methods,
tick=FALSE, las=2, line=-0.5, cex.axis=0.7)
text(x = precision_plot, y = precisions, label = round(precisions,3),
pos = 3,cex = 0.7, col = c("red", "black","black",
"black","black", "black",
"black"))
#vector of recall values
recalls<-c(imb_recall, bal_over_recall,
bal_under_recall, smote_recall,
border_recall, adasyn_recall, sl_recall)
#plot of recall
recall_plot<-barplot(recalls, ylim = c(0,1.1), ylab = "Recall")
axis(1, at=recall_plot, labels=methods,
tick=FALSE, las=2, line=-0.5, cex.axis=0.7)
text(x = recall_plot, y = recalls, label = round(recalls,3),
pos = 3,cex = 0.7, col = c("black","red","black",
"black","black", "black",
"black"))
#Vector of fmeasures
Fs<-c(imb_f_measure, bal_over_F,
bal_under_F, smote_F,
border_F, adasyn_F, sl_F)
#plot of fmeasures
F_plot<-barplot(Fs, ylim = c(0,0.8), ylab = "F-Measure")
axis(1, at=F_plot, labels=methods,
tick=FALSE, las=2, line=-0.5, cex.axis=0.7)
text(x = F_plot, y = Fs, label = round(Fs,3),
pos = 3,cex = 0.7, col = c("black","black",
"black","red","black", "black",
"black"))
#vector of AUCs
AUCs<-c(imb_auc, bal_over_auc, bal_under_auc,
smote_auc, border_auc, adasyn_auc,sl_auc)
#plot of AUCs
AUC_plot<-barplot(AUCs, ylim = c(0,1.2), ylab = "AUC")
axis(1, at=AUC_plot, labels=methods,
tick=FALSE, las=2, line=-0.5, cex.axis=0.7)
text(x = AUC_plot, y = AUCs, label = round(AUCs,3),
pos = 3,cex = 0.7, col = c("black","red","black",
"black","black", "black",
"black"))
|
5e9ecf01dda29a1b32e7b3c23da91fd386c6208a | 2fc6f80d7c41350c189d468354b7016d582b606b | /Association Rule/movies_rule.R | de56461050481777ff01fa37ac840658b559cab8 | [] | no_license | raouday79/Data-Analysis | 0098e5a41d154036004332de8f740c7cb0634290 | e25506c4fbe3f27815286463cfe1d3e45a79ceb5 | refs/heads/master | 2020-11-27T13:59:06.596620 | 2020-06-24T19:32:59 | 2020-06-24T19:32:59 | 229,473,016 | 2 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,150 | r | movies_rule.R | #Dataset=groceries.csv
data = read.csv(file.choose())
data=data[-1,]
View(data)
data = data[,6:15]
View(data)
str(data)
#Converting into factor
data[c(1:10)] = lapply(data[c(1:10)], factor)
#name <- "movies_data.csv"
#write.csv(data,file=name)
#We need data in sparse matrix form
#Read the data set as transection.
#Using the arules library
#install.packages("arules")
library(arules)
library(arulesViz)
trans = as(data,"transactions")
#dataset = read.transactions(name,cols =2 , sep=",",rm.duplicates = TRUE)
View(trans)
summary(trans)
#Plot showing the most
itemFrequencyPlot(x=trans,topN=10)
#Building Rule
#Support of 2
rule = apriori(data=trans,parameter = list(support=0.02,confidence=0.3))
#Inspecting the rules
inspect(sort(rule,by="lift")[1:4])
plot(rule)
#Support of 5
rule2 = apriori(data=trans,parameter = list(support=0.5,confidence=0.4))
#23550 rules
#Inspecting the rules
inspect(sort(rule2,by="lift")[1:4])
plot(rule2)
#graph plot of rule
plot(rule2,method="graph",max=10)
#people who watch harry potter 1 also watch harry potter 2 vice versa is also true from the above rule.
|
123dcb4a65a5b914e38678d9dbea1cbda80b7f36 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/knitr/examples/kable.Rd.R | e93a4fc9ed4156a36c55d8e7b0f8be1e497123b3 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,524 | r | kable.Rd.R | library(knitr)
### Name: kable
### Title: Create tables in LaTeX, HTML, Markdown and reStructuredText
### Aliases: kable
### ** Examples
kable(head(iris), format = "latex")
kable(head(iris), format = "html")
kable(head(iris), format = "latex", caption = "Title of the table")
kable(head(iris), format = "html", caption = "Title of the table")
# use the booktabs package
kable(mtcars, format = "latex", booktabs = TRUE)
# use the longtable package
kable(matrix(1000, ncol = 5), format = "latex", digits = 2, longtable = TRUE)
# change LaTeX default table environment
kable(head(iris), format = "latex", caption = "My table", table.envir = "table*")
# add some table attributes
kable(head(iris), format = "html", table.attr = "id=\"mytable\"")
# reST output
kable(head(mtcars), format = "rst")
# no row names
kable(head(mtcars), format = "rst", row.names = FALSE)
# R Markdown/Github Markdown tables
kable(head(mtcars[, 1:5]), format = "markdown")
# no inner padding
kable(head(mtcars), format = "markdown", padding = 0)
# more padding
kable(head(mtcars), format = "markdown", padding = 2)
# Pandoc tables
kable(head(mtcars), format = "pandoc", caption = "Title of the table")
# format numbers using , as decimal point, and ' as thousands separator
x = as.data.frame(matrix(rnorm(60, 1e+06, 10000), 10))
kable(x, format.args = list(decimal.mark = ",", big.mark = "'"))
# save the value
x = kable(mtcars, format = "html")
cat(x, sep = "\n")
# can also set options(knitr.table.format = 'html') so that the output is HTML
|
30ad501bf349dd7cee82529ae974bf44c820c460 | ae9970a483fa3023732ab726f12354f4d2aa9911 | /pH descision tree model.R | c2aaa289d45fdee89f928ab8a1ffdd584f8b1ec2 | [] | no_license | aj-sykes92/pH-optimisation-arable | 1460fb00c89de3c6e0c96724edfb28a4ea0788e0 | 09ba5d0355b1b20aa0dae855efeceabe433dad2b | refs/heads/master | 2023-01-21T12:40:16.379472 | 2020-12-05T15:51:30 | 2020-12-05T15:51:30 | 211,346,121 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 2,083 | r | pH descision tree model.R | # script to be run following main model script [GIS pH analysis (all crops) v4.R]
library(fastDummies)
library(rpart)
# load workspace
load("Full model output df.RData")
# drop out that one row where N2O model misfired
Dat_main <- Dat_main %>% filter(!is.na(Abatement))
# pull out primary data and classifier for simplified decision tree model
Dat_model <- Dat_main %>%
mutate(has_abatement = as.numeric(GHG_balance <= -0.1),
is_cost_effective = as.numeric(MAC <= 66.1),
#is_cost_effective = as.numeric(MAC <= 0),
has_ce_abatement = as.numeric(has_abatement + is_cost_effective == 2)) %>%
select(Sand:pH, Crop, Yield_tha, has_ce_abatement)
# one-hot encode crops
Dat_model <- Dat_model %>%
mutate(Crop = Crop %>% str_replace_all("\\W", "") %>% str_to_lower()) %>%
dummy_cols() %>%
select(Sand, Clay, BD, OC, pH, Yield_tha, Crop_barley:Crop_wheat, has_ce_abatement) # dropping Crop and Clay variables
# switch crops to logical
Dat_model <- Dat_model %>%
mutate_at(vars(Crop_barley:Crop_wheat), funs(as.logical(.)))
# encode y as factor
Dat_model <- Dat_model %>%
mutate(has_ce_abatement = as.factor(has_ce_abatement))
# split datasets to train and test
set.seed(260592)
Dat_train <- Dat_model %>%
sample_frac(0.7, replace = F)
Dat_test <- setdiff(Dat_model, Dat_train)
# create classifier
control <- rpart.control(minsplit = 300, minbucket = 100, maxdepth = 10)
classifier <- rpart(has_ce_abatement ~ ., data = Dat_train, control = control)
# predictions
ypred <- predict(classifier, newdata = Dat_test[-ncol(Dat_test)])
preds <- tibble(actual = Dat_test$has_ce_abatement, predict_prob = ypred) %>%
mutate(predict_class = as.numeric(predict_prob[, "1"] >= 0.5))
# confusion matrix
confmat <- table(preds$actual, preds$predict_class) # preds across top, actual down side
print(confmat)
(confmat[1, 1] + confmat [2, 2]) / sum(confmat) # prediction accuracy
confmat[2, 1] / sum(confmat) # false negative
confmat[1, 2] / sum(confmat) # false positive
# plot decision tree
par(mar = c(0, 0, 0, 0))
plot(classifier)
text(classifier)
|
a30392b3edc1e703fa550f22db207e62c2f46ef3 | 0c9e07287c0e5eb18c3fc1bed6b4900a6e1c0350 | /RCpp LF-RVgen/Main.R | 7d99ffc08f85e31b88c6ec9b2a217596a06126b9 | [] | no_license | zhenghannan/Rhee-Glynn-2013 | c7d5b24327eff2785b7960aca69a170aa63e3779 | 5cbfdbf4d53a69ef9b7dce71116ba6172d0fdf69 | refs/heads/main | 2023-02-27T07:10:35.740641 | 2021-02-05T03:39:44 | 2021-02-05T03:39:44 | 336,152,485 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,703 | r | Main.R | # This code compute the likelihood function
setwd("~/Desktop/RCpp LF-RVgen")
source("DeltaRcpp.R")
source("X_generation.R")
rho=c(0,0)
ell=40
Delta=1/12
T=4
theta=c(0.05,-0.5,0.1,0.2,-0.5,0.2,25,-0.01,0.02,0.02)
#theta=c(0.05,-0.5,0.1,0.2,-0.5,0.2,25,0,0,-0.01,0.02,0,1,0.02)
# First example with 3 points
# X = matrix(0,2,3)
# X[,1] = c(0,0.1)
# X[,2] = c(0,0.11)
# X[,3] = c(-0.3,0.17)
# Second example with more points
#X = matrix(0,2,61)
#X[2,] = seq(from=0.1, to=0.4, by = 0.005)
X=gen_X(theta,T,1/12,c(4.5,0.1))
N=5 #This is the max N
M = 20000 #MC simulations
N2 = 5 # discretization steps after last jump time
d = nrow(X)
Nrv = gen_N(N,M)
Npois = gen_P(Delta,M,ell)
Marks = gen_Marks(theta,M,Npois)
W1 = gen_Norm1(N,M,Npois,Nrv,d)
W3 = gen_Norm3(N2,M,d)
#Monte-Carlo part
#start=Sys.time()
# Compute the log-likelihood function using the R-C++ function
#LF = LF(M,Delta,N,theta,rho,ell,X,N2,Nrv,Npois,Marks,W1,W3)
# Compute the log-likelihood function using the C++ function
LF = LFcpp(ubounds,lbounds,M,Delta,N,theta,rho,ell,X,N2,Nrv,Npois,Marks,W1,W3)
end=Sys.time()
time1=end-start
lbounds = c(-0.2,-1,0 ,0 ,-1, 0,0,-0.05,0 ,0)
ubounds = c(0.2, 1,0.5,0.5,1 ,0.5,50,0.05,0.1,0.1 )
fr<-function(theta_hat){
logllf = -1*LFcpp(ubounds,lbounds,M,Delta,N,theta_hat,rho,ell,X,N2,Nrv,Npois,Marks,W1,W3)
if (is.na(logllf)){
logllf=10000000
}
logllf
}
theta_false=theta*1.2
# mll=optim(par = theta_false, fn = fr, method="Nelder-Mead", control = list(fnscale=-1,maxit=1000,REPORT=1))
#
# print(c(mll$value, mll$counts, mll$par))
mll=fminbnd(fun = fr,x0 = theta_false,xmin = lbounds,xmax = ubounds,options = list(MaxIter=100,MaxFunEvals=100000000))
|
b450fc837838f962d705cd0536a579e43b56e2c9 | 8d29c9f8faa03eb55764ac9d2c60499c6b16b48c | /man/plotFinalClasses.Rd | 61c48b16e891b8aff5a1016d5aaa53e37fbee5fd | [] | no_license | jsemple19/EMclassifieR | fd2671a5be8ac97cc67e20c94afe97223f44b0ba | a3626d02d5c73046073f04dbdba4b246a7278bcc | refs/heads/master | 2022-08-10T06:27:20.721490 | 2022-08-08T11:34:43 | 2022-08-08T11:34:43 | 198,430,374 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,669 | rd | plotFinalClasses.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/EMbasic.R
\name{plotFinalClasses}
\alias{plotFinalClasses}
\title{Plot reads and class means with final classification}
\usage{
plotFinalClasses(
dataOrderedByClass,
numClasses,
allClassMeans,
outFileBase,
outPath,
xRange,
myXlab,
featureLabel,
baseFontSize,
figFormat = "png",
classesToPlot = NULL
)
}
\arguments{
\item{dataOrderedByClass}{A matrix of methylation or bincount values (reads x position) that have been ordered by class. The assigned class, e.g. "__class1" etc has been appended to read names.}
\item{numClasses}{An integer indicating the number of classes to learn}
\item{allClassMeans}{A long data frame of methylation or bincount values with columns for position, methylation frequency (methFreq), class and replicate.}
\item{outFileBase}{A string that will be used in the filenames and titles of the plots produced}
\item{outPath}{path to directory where plots will be saved}
\item{xRange}{A vector of the first and last coordinates of the region to plot (default is c(-250,250))}
\item{myXlab}{A label for the x axis (default is "CpG/GpC position")}
\item{featureLabel}{A label for a feature you want to plot, such as the position of the TSS (default="TSS")}
\item{baseFontSize}{The base font for the plotting theme (default=12 works well for 4x plots per A4 page)}
\item{figFormat}{format of output figures. Should be one of "png" or "pdf"}
\item{classesToPlot}{A numerical vector indicating which classes to plot (default NULL will plot all classes)}
}
\value{
None
}
\description{
Plot reads and class means with final classification
}
|
20ab2c4aa2d0d24781cb386b3a08f01b3eab5bd2 | d45e3fff54f9c53ddae1d3ab5056d9157f611edd | /plot2.R | 3d5879d1f79f0f0b2f472c762484ec99846b6b31 | [] | no_license | Benkahila/ExData_Plotting1 | 72f2d32476c7452fada4046b1c577304083084b7 | 33e0eeb91b61c75340a05ebb1d313689499642fb | refs/heads/master | 2020-04-14T18:02:07.640086 | 2019-01-03T18:36:30 | 2019-01-03T18:36:30 | 164,003,747 | 0 | 0 | null | 2019-01-03T17:35:25 | 2019-01-03T17:35:25 | null | UTF-8 | R | false | false | 665 | r | plot2.R | X <- read.csv("C:/Users/benkahila/Downloads/exdata_data_household_power_consumption/household_power_consumption.txt", header=TRUE , sep=";")
xs <- subset(X, X$Date == "1/2/2007" | X$Date == "2/2/2007")
class(xs$Date)
class(xs$Time)
xs$Date <- as.Date(xs$Date, format="%d/%m/%Y")
xs$Time <- strptime(xs$Time, format="%H:%M:%S")
xs[1:1440,"Time"] <- format(xs[1:1440,"Time"],"2007-02-01 %H:%M:%S")
xs[1441:2880,"Time"] <- format(xs[1441:2880,"Time"],"2007-02-02 %H:%M:%S")
xs$Global_active_power <- as.numeric(as.character(xs$Global_active_power))
plot(xs$Time, xs$Global_active_power ,type="l" , xlab="" ,
ylab="Global Active Power (kilowatts)")
|
ce81baaf6712eeabe87c10f84899096a34a55977 | f8559eef1b66e2307113046dd9b584fe3a432dc8 | /rasc/with joint proposal/check CalcInvasionPressure function.R | db940b651d1334e1185596889b38e63dc0dda65b | [] | no_license | drvalle1/git_leish_PE | 51a76c25c87eaea842cd1c6b1c5fd08d761ca580 | 936529f320bc3beb783b5ca7c8b6d4ce83419ad3 | refs/heads/master | 2022-02-18T05:50:53.888426 | 2022-02-01T13:29:41 | 2022-02-01T13:29:41 | 166,860,641 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 906 | r | check CalcInvasionPressure function.R | rm(list=ls(all=TRUE))
library('Rcpp')
#read relevant functions
setwd('U:\\GIT_models\\git_leish_PE')
sourceCpp('aux_PE_leish.cpp')
#get incidence
setwd('U:\\anaia\\simulated data')
dat=data.matrix(read.csv('simulated data.csv',as.is=T))
nloc=nrow(dat)
nanos=ncol(dat)
#get distance
setwd('U:\\anaia\\derived data')
dist=read.csv('matriz distancia.csv',as.is=T)
rownames(dist)=dist$X
ind=which(colnames(dist)=='X')
dist=dist[,-ind]
OneOverDist=1/data.matrix(dist)
diag(OneOverDist)=0
SomaOneOverDist=rowSums(OneOverDist)
IP=CalcInvasionPressure(z=dat, OneOverDist=OneOverDist,
nanos=nanos, nlocs=nloc,
SomaOneOverDist=SomaOneOverDist)
IP1=matrix(NA,nloc,nanos)
for (i in 1:nanos){
z1=matrix(dat[,i],nloc,nloc,byrow=T)
tmp=rowSums(OneOverDist*z1)
tmp1=tmp/SomaOneOverDist
IP1[,i]=tmp1
}
plot(IP,IP1)
rango=c(-10,10)
lines(rango,rango,col='red') |
99fd5f6860ec20962478a88f4af3f5519540347a | 87b966e5e6a2d1fb7afca8c7623ae617ebeb35ce | /hab_sel_spatial_join.R | db6999ef9923a8a1e83407263d8b07c8dbd40e85 | [] | no_license | appelc/cara_thesis | f7376659bd2015d6651a8b17968a9b8b868cc8c8 | 12fcb0d2041e7ebb96e609f5dddeb8a8ed0094aa | refs/heads/master | 2022-05-31T04:23:55.400474 | 2022-05-18T23:21:26 | 2022-05-18T23:21:26 | 55,104,238 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,098 | r | hab_sel_spatial_join.R | ##########################
## Porcupine habitat selection
## Try spatial join in R to get used/available PER ANIMAL
##########################
library(rgdal)
library(sp)
library(googlesheets)
library(maps)
library(httr)
library(readr)
library(devtools)
devtools::install_github("jennybc/googlesheets")
## load location points
## pull out only collared animals, keep only visual/patch/LOAS, and format correctly
my_sheets <- gs_ls()
locs <- gs_title("Porc relocation data")
my_sheets
porc.locs.all <- data.frame(gs_read(ss=locs, ws="Relocations", is.na(TRUE)))
## can add "range=cell_cols(1:12)" if I don't want everything
colnames(porc.locs.all)
colnames(porc.locs.all) <- c("date", "id", "sess", "type", "time", "az", "utm_e", "utm_n",
"obs", "loc", "pos", "notes", "xvar", "yvar", "cov", "error")
## c <- grepl('^1', porc.locs.all$id) ## ask which ones start with "1" (TRUE/FALSE)
## then: porc.locs <- porc.loc.all[c,] ## so only keep the rows where it's true
## OR: change "type" for new porc to something like "N" ... go with this for now
porc.locs <- subset(porc.locs.all, type %in% c("V","V*","P","P*","L"))
unique(porc.locs$id)
## gs_read_csv correctly reads numeric and characters, but still need to format date
porc.locs$date <- as.Date(porc.locs$date, "%m/%d/%Y")
## now make it spatial points data frame
locs <- SpatialPointsDataFrame(data.frame(porc.locs$utm_e, porc.locs$utm_n),
data=data.frame(porc.locs),
proj4string=CRS("+proj=utm +zone=10 +datum=NAD83"))
locs@data ## can see the attributes
locs@coords ## here are the coordinates
## load veg polygons shapefile
?readOGR
veg <- readOGR(dsn="D:/GIS DATA/Veg map", layer="Veg categories CA", verbose=TRUE)
proj4string(veg) <- proj4string(locs) ## it's almost the same but not exactly; needs to be for "over"
is(veg) # it's a spatial polygons data frame
plot(veg) # cool!
## do spatial join using package "sp"
?over
locs@data$class <- over(locs, veg)$Class
locs@data$class2 <- over(locs, veg)$Class_2
locs@data$area <- over(locs, veg)$Area_1
head(locs)
View(locs)
plot(coordinates(locs))
map("world", region="usa", add=TRUE)
plot(veg, border="green", add=TRUE)
legend("topright", cex=0.85,
c("Bear in park", "Bear not in park", "Park boundary"),
pch=c(16, 1, NA), lty=c(NA, NA, 1),
col=c("red", "grey", "green"), bty="n")
title(expression(paste(italic("Ursus arctos"),
" sightings with respect to national parks")))
# now plot bear points with separate colors inside and outside of parks
points(bears[!inside.park, ], pch=1, col="gray")
points(bears[inside.park, ], pch=16, col="red")
## or, to keep everything in addition to Class (ID, Area_1, Class_2)
overlay <- cbind(locs, over(locs,veg)) # this didn't work...
head(overlay)
## great! need to figure out what to do with NAs
## (points not in a veg polygon, like Puck, Henrietta's capture, ?love, etc.)
## now can do a table to sum locations in each veg type per animal
## for design ii and iii analysis in adehabitatHS
|
63defc7ed5a48dd1567acfdf4f9a1351c72ee38b | c8fbb22479ded143cc2780c7faa7866f5f2fd609 | /Metagenomics/GenerateMatrices/HeatMap.R | 12d2656a5f53f1aff96851e8f68f62fadaa17c50 | [] | no_license | pranavvarmaraja/cmu-precollege | 56f5c77328c0872d4f82f5c2fc6bf5618fb75a84 | 460526f217b4e75a2d52df515c7ef74ab9f547b6 | refs/heads/master | 2023-01-04T10:16:59.785719 | 2020-11-03T19:07:15 | 2020-11-03T19:07:15 | 309,783,160 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,593 | r | HeatMap.R | library(ggcorrplot)
library(reshape)
library(stringr)
library(ggplot2)
path <- "C:/Users/megha/go/src/GenerateMatrices"
files <-list.files(normalizePath(path), pattern="txt")
for (file in files) {
if (grepl("Jaccard", file, fixed = TRUE) | grepl("Bray-Curtis", file, fixed = TRUE)) {
table <- read.table(file.path(path, file), sep="\t", header=TRUE)
cols <- colnames(table)
matrix <- as.matrix(table)
rownames(matrix) <- cols
co=melt(matrix)
head(co)
ggplot(co, aes(X1, X2)) + # x and y axes => Var1 and Var2
geom_tile(aes(fill = value)) + # background colours are mapped according to the value column
scale_fill_gradient2(low = "#6D9EC1",
mid = "white",
high = "#E46726",
midpoint = 0.5, limit= c(0,1.0)) +
theme(panel.grid.major.x=element_blank(), #no gridlines
panel.grid.minor.x=element_blank(),
panel.grid.major.y=element_blank(),
panel.grid.minor.y=element_blank(),
panel.background=element_rect(fill="white"), # background=white
axis.text.x = element_text(angle=90, hjust = 1,vjust=1,size = 12,face = "bold"),
plot.title = element_text(size=20,face="bold"),
axis.text.y = element_text(size = 12,face = "bold")) +
ggtitle("Distance Heat Map") +
theme(legend.title=element_text(face="bold", size=14)) +
scale_x_discrete(name="") +
scale_y_discrete(name="")
imageFilename <- str_replace(file, "txt", "png")
ggsave(imageFilename)
}
} |
8ad349ec1659da7019d5960b993f2a51f297d01e | 3d1c924a053862e6836374fb066b5685e5308370 | /BinaryClass_neuralnet.R | 35b4476844f40ecd441ac75653cc4dbfcb4949cb | [] | no_license | Murali423/Hand-Written-Digit-Recognition | cac46bf828191e9d5215158766366e9fb42b746d | 54852c5e9ff14a9b9acd6b61ff8d650e2786f707 | refs/heads/master | 2023-04-29T12:49:18.933653 | 2021-05-19T13:49:03 | 2021-05-19T13:49:03 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,012 | r | BinaryClass_neuralnet.R | # Build Neural Network for classification using neuralnet library.
rm(list=ls(all=TRUE))
# Set the working directory
setwd("C:/Users/gmanish/Dropbox/latest/openminds/slides/MachineLearning/7.ANNs/")
# Importing "data.csv" files's data into R dataframe using read.csv function.
data = read.csv(file="data.csv", header=TRUE, sep=",")
# Understand the structure the summary of the data using str and summary R commands
str(data)
summary(data)
# Using subset remove 'ID' and 'ZIP.Code' columns from the data
data = subset(data, select = -c(ID,ZIP.Code))
# Convert all the variables to appropriate type
# To numeric using as.numeric()
# To categoical using as.factor()
data$Education = as.factor(data$Education)
# R NN library takes only numeric attribues as input
# Convert all categorical attributes to numeric using appropriate technique. Hint: dummies
# Convert "Education" categorical attribute to numeric using dummy function in dummies R library
# Drop actual Education attribute from orginal data set
# Add created dummy Education variables to orginal data set
library(dummies)
education = dummy(data$Education)
data = subset(data, select=-c(Education))
data = cbind(data, education)
rm(education)
# Separate Target Variable and Independent Variables.
# In this case "Personal.Loan" is a target variable and all others are independent variable.
target_Variable = data$Personal.Loan
independent_Variables = subset(data, select = -c(Personal.Loan))
# Standardization the independent variables using decostand funcion in vegan R library
library(vegan)
# Note: To standardize the data using 'Range' method
independent_Variables = decostand(independent_Variables,"range")
data = data.frame(independent_Variables, Personal.Loan = target_Variable)
rm(independent_Variables, target_Variable)
# Use set.seed to get same test and train data
set.seed(123)
# Prepare train and test data in 70:30 ratio
num_Records = nrow(data)
# to take a random sample of 70% of the records for train data
train_Index = sample(1:num_Records, round(num_Records * 0.7, digits = 0))
train_Data = data[train_Index,]
test_Data = data[-train_Index,]
rm(train_Index, num_Records, data)
# See data distribution in response variable in both Train and Test data:
table(train_Data$Personal.Loan)
table(test_Data$Personal.Loan)
# Load neuralnet R library
library(neuralnet)
# Build a Neural Network having 1 hidden layer with 2 nodes
set.seed(1234)
nn = neuralnet(Personal.Loan ~ Age+Experience+Income+Family+CCAvg+Mortgage+
Securities.Account+CD.Account+Online+CreditCard+
Education1+Education2+Education3,
data=train_Data, hidden=2,linear.output = F)
# See covariate and result varaibls of neuralnet model - covariate mens the variables extracted from the data argument
out <- cbind(nn$covariate, nn$net.result[[1]])
head(out)
# Remove rownames and set column names
dimnames(out) = list(NULL,c
("Age","Experience","Income","Family","CCAvg","Mortgage",
"Securities.Account","CD.Account","Online","CreditCard",
"Education1","Education2", "Education3","nn_Output"))
# To view top records in the data set
head(out)
rm(out)
# Plot the neural network
plot(nn)
# Compute confusion matrix for train data.
#predicted = factor(ifelse(nn$net.result[[1]] > 0.5, 1, 0))
#conf_Matrix = table(train_Data$Personal.Loan, predicted)
# Remove target attribute from Test Data
test_Data_No_Target = subset(test_Data, select=-c(Personal.Loan))
# Predict
nn_predict <- compute(nn, covariate= test_Data_No_Target)
rm(test_Data_No_Target)
# View the predicted values
nn_predict$net.result
# Compute confusion matrix and accuracy
predicted = factor(ifelse(nn_predict$net.result > 0.5, 1, 0))
conf_Matrix<-table(test_Data$Personal.Loan, predicted)
sum(diag(conf_Matrix))/sum(conf_Matrix)*100
|
9c700ea7bdf6a3729f45e02b4c779b2044b230f7 | 3943ae90f392d4bf51667ba9939e6fd7f98e1bef | /imputation/knn_imputation.R | 2cabaa5160981aaf4f653a16d918e85bbb72e580 | [] | no_license | nmferraro5/correlation_outliers | 05503bfd5811a08c8c5588d7bf0b96b2e8cd8e02 | f9dd53966fc01e49037966dc694be7fb02513741 | refs/heads/master | 2020-07-31T15:14:06.059506 | 2020-06-05T18:28:00 | 2020-06-05T18:28:00 | 210,649,448 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 609 | r | knn_imputation.R | #!/usr/bin/env Rscript
# Load required packages
require(impute)
# Function to impute missing data using KNN and then estimate precision matrix
# Input: data matrix (rows = observations), k (number nearest neighbors),
# row max and col max missingness rates
# Output: estimated precision matrix after imputation
knn.impute <- function(data, impute_args, rmax = 0.999, cmax = 0.999){
k = impute_args['KNN.K']
new_data = impute.knn(data, k = k, rowmax = rmax, colmax = cmax)
covariance = cov(new_data$data)
precision = solve(covariance)
return(list(x = new_data$data, C = covariance, S = precision))
}
|
2538458dbc0d892fd9ce4496ec8ba13b3426f3fb | 87760ba06690cf90166a879a88a09cd2e64f3417 | /man/parse_index.Rd | a9cd237aa5a2d9ef5d5b4d5bb8fa5f66489d0fde | [
"MIT"
] | permissive | topepo/modeltime | 1189e5fe6c86ee3a70aec0f100387a495f8add5f | bff0b3784d1d8596aa80943b221eb621481534e1 | refs/heads/master | 2022-12-27T07:11:58.979836 | 2020-10-08T16:07:27 | 2020-10-08T16:07:27 | 289,933,114 | 1 | 0 | NOASSERTION | 2020-08-24T13:17:10 | 2020-08-24T13:17:10 | null | UTF-8 | R | false | true | 1,218 | rd | parse_index.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dev-parse_index.R
\name{parse_index}
\alias{parse_index}
\alias{parse_index_from_data}
\alias{parse_period_from_index}
\title{Developer Tools for parsing date and date-time information}
\usage{
parse_index_from_data(data)
parse_period_from_index(data, period)
}
\arguments{
\item{data}{A data frame}
\item{period}{A period to calculate from the time index. Numeric values are returned as-is.
"auto" guesses a numeric value from the index. A time-based phrase (e.g. "7 days") calculates
the number of timestamps that typically occur within the time-based phrase.}
}
\value{
\itemize{
\item parse_index_from_data(): Returns a tibble containing the date or date-time column.
\item parse_period_from_index(): Returns the numeric period from a tibble containing the index.
}
}
\description{
These functions are designed to assist developers in extending the \code{modeltime}
package.
}
\examples{
library(dplyr)
library(timetk)
predictors <- m4_monthly \%>\%
filter(id == "M750") \%>\%
select(-value)
index_tbl <- parse_index_from_data(predictors)
index_tbl
period <- parse_period_from_index(index_tbl, period = "1 year")
period
}
|
174e9d73822fe76c3b3f74f4e371a8fbdb3ba196 | a68242075d7e6ab5f767e59b797354f3cd062f14 | /gen_data/gen_data_infos.R | c7ff97222b2b023c2ca705a4a9a2dec5e6ca2950 | [] | no_license | norival/internship_2017 | 5c464e71bbfb332ef9e07c4dd635d89679b8e611 | d6cabbdc65e4d8b780330ecd5eaaf7c49f7c285c | refs/heads/master | 2020-05-25T23:41:43.559454 | 2017-06-12T12:22:33 | 2017-06-12T12:23:06 | 82,567,635 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,698 | r | gen_data_infos.R | # ------------------------------------------------------------------------------
# generate various files to easily access informations from databases
# ------------------------------------------------------------------------------
source("functions/gen_data_infos.R")
# ------------------------------------------------------------------------------
# get cultures from carre.parc and year
files <- list.files("data/generated", "weeds",
full.names = TRUE)
tab <- matrix("", 0, 3)
all.sp <- character()
for (file in files) {
a <- read.csv(file, sep = ";", stringsAsFactors = FALSE)
colnames(a) <- tolower(gsub("é|è", "e", colnames(a)))
tab <- rbind(tab, cbind(a$year, a$carre.parc, a$crop.analyses))
all.sp <- c(all.sp, a$sp)
}
tab <- as.data.frame(tab, stringsAsFactors = FALSE)
colnames(tab) <- c("year", "carre.parc", "crop.analyses")
tab <- tab[!duplicated(paste0(tab$year, tab$carre.parc)),]
tab$crop.analyses <- tolower(tab$crop.analyses)
tab$crop.analyses[tab$crop.analyses == "maïs"] <- "maize"
tab$crop.analyses[tab$crop.analyses == "tournesol"] <- "sunflower"
tab$crop.analyses[tab$crop.analyses == "colza"] <- "osr"
tab$crop.analyses[tab$crop.analyses == "trèfle"] <- "trefle"
tab$crop.analyses[tab$crop.analyses == "luzerne"] <- "lucerne"
tab$crop.analyses[tab$crop.analyses == "prairie"] <- "grassl"
write.csv(tab, "data/generated/corres_parc_crop.csv", row.names = FALSE)
# ------------------------------------------------------------------------------
# get traits informations
# get infos on SLA, Plant Height and Seed Mass from table compiled by Bérenger
# Bourgeois.
# When dealing with non present species or genus only identified species, we
# take the average of sepcies of the same genus, if possible.
# read table
ref <- read.csv("data/raw/jauz_20170403_sp_with_traits.csv", sep = " ",
stringsAsFactors = FALSE)
colnames(ref)[1] <- "sp"
sp <- unique(all.sp)
# get species names to the same format as in the reference table
sp <- gsub("-", " ", sp)
sp <- gsub(" ", " ", sp)
sp <- toupper(sp)
# compute traits values using the 'get_trait_value' function
sla.val <- sapply(sp, get_trait_value, ref = ref, trait = "SLA")
sm.val <- sapply(sp, get_trait_value, ref = ref, trait = "SM")
ph.val <- sapply(sp, get_trait_value, ref = ref, trait = "PH")
# combine them in a single data frame
all.trait.val <- cbind.data.frame(sp = names(sla.val),
SLA = as.numeric(sla.val),
PH = as.numeric(ph.val),
SM = as.numeric(sm.val))
write.csv(all.trait.val, "data/generated/traits_val.csv", row.names = FALSE)
|
7f50c7c225ad07436604b794d55ca75da8e417f2 | 3a465af00567cb9c76210db2a7d1c451cd0d4ab1 | /greml_heritability_sex_adj_3.R | eaa53dfe46dd1372cfcff3424d6053a85a112113 | [] | no_license | edmondodriscoll/AAD_t1d | 39685563f07a6d3421d466060f30ad0fb2a6e0a8 | 54e1e2fd847e23ced1d15557b312a6f6ea04b2ee | refs/heads/master | 2020-12-14T00:31:53.031475 | 2019-07-01T10:27:53 | 2019-07-01T10:27:53 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 10,120 | r | greml_heritability_sex_adj_3.R | #greml_heritability_sex_adj_3.R
#estimating heritability of <7s, 7-13s and >13s:
library(snpStats)
library(annotSnpStats)
library(snpStatsWriter)
library(humarray)
library(gridExtra)
library(multinomRob)
library(ggplot2)
d<-"/well/todd/users/jinshaw/t1d_risk/immunochip/"
#read SNP and phenotype data:
load(file=paste0(d,"all_inds_unrel_postqc_3.RData"))
all<-all[rownames(all) %in% rownames(all@samples[!is.na(all@samples$sex) & all@samples$sex!=0,]),]
all@samples$onset<-as.numeric(all@samples$onset)
all@samples$group<-ifelse(all@samples$affected==1,0,
ifelse(all@samples$onset<7 & !is.na(all@samples$onset),1,
ifelse(all@samples$onset>=7 & all@samples$onset<13 & !is.na(all@samples$onset),2,
ifelse(all@samples$onset>=13 & !is.na(all@samples$onset),3,NA))))
rownames(all)<-paste0(all@samples$pedigree,".",all@samples$member)
u<-all[rownames(all) %in% rownames(all@samples[all@samples$group %in% c(0,1),]),]
m<-all[rownames(all) %in% rownames(all@samples[all@samples$group %in% c(0,2),]),]
o<-all[rownames(all) %in% rownames(all@samples[all@samples$group %in% c(0,3),]),]
writeit<-function(df,name){
samples<-df@samples
snps<-df@snps
write.plink(file.base=paste0("/well/todd/users/jinshaw/aad/under_7/",name,"_all_sexadj"),
snps=as(df,"SnpMatrix"),
pedigree=samples$pedigree,
id=samples$member,
father=samples$father,
mother=samples$mother,
sex=samples$sex,
phenotype=samples$affected,
chromosome=snps$chromosome,
genetic.distance=snps$cM,
position=snps$position,
allele.1=snps$allele.1,
allele.2=snps$allele.2)
samples$t1d<-ifelse(samples$affected==2,1,ifelse(samples$affected==1,0,NA))
write.table(samples[,c("pedigree","member","t1d")],file=paste0("/well/todd/users/jinshaw/aad/under_7/pheno_",name,"_sexadj"),
col.names=F, row.names=F, sep="\t",quote=F)
write.table(samples[,c("pedigree","member","PC1","PC2","PC3","PC4","PC5","PC6","PC7","PC8","PC9","PC10","sex")],
file=paste0("/well/todd/users/jinshaw/aad/under_7/covars_",name,"_sexadj"),col.names=F, row.names=F, sep="\t", quote=F)
system(paste0("plink --bfile /well/todd/users/jinshaw/aad/under_7/",name,
"_all_sexadj --indep-pairwise 1000 50 0.2 --out /well/todd/users/jinshaw/aad/under_7/",name,"_all_sexadj_pruned"))
system(paste0("plink --bfile /well/todd/users/jinshaw/aad/under_7/",name,
"_all_sexadj --exclude /well/todd/users/jinshaw/aad/under_7/",name,"_all_sexadj_pruned.prune.out --make-bed --out /well/todd/users/jinshaw/aad/under_7/",
name,"_all_sexadj_pruned"))
}
writeit(u,"under_7")
writeit(m,"mid_range")
writeit(o,"over_13")
dogreml<-function(name){
sink(file=paste0("/users/todd/jinshaw/programs/aad/under_7/greml/",name,"_sexadj.sh"))
cat(paste0("/apps/well/gcta/1.91.5beta/gcta_1.91.5beta/gcta64 --bfile /well/todd/users/jinshaw/aad/under_7/",name,
"_all_sexadj --autosome --maf 0.01 --make-grm --out /well/todd/users/jinshaw/aad/under_7/grm_",name,"_sexadj --thread-num 10\n"))
cat(paste0("/apps/well/gcta/1.91.5beta/gcta_1.91.5beta/gcta64 --grm /well/todd/users/jinshaw/aad/under_7/grm_",name,"_sexadj",
" --pheno /well/todd/users/jinshaw/aad/under_7/pheno_",name,"_sexadj",
" --reml --qcovar /well/todd/users/jinshaw/aad/under_7/covars_",name,"_sexadj",
" --prevalence 0.004 --out /well/todd/users/jinshaw/aad/under_7/outreml_",name,"_sexadj --thread-num 10\n"))
sink()
system(paste0("bash /users/todd/jinshaw/programs/aad/under_7/greml/",name,"_sexadj.sh"))
}
dogreml("under_7")
dogreml("mid_range")
dogreml("over_13")
dogremlprev<-function(name,prev){
sink(file=paste0("/users/todd/jinshaw/programs/aad/under_7/greml/",name,"_",prev,"_sexadj.sh"))
cat(paste0("/apps/well/gcta/1.91.5beta/gcta_1.91.5beta/gcta64 --grm /well/todd/users/jinshaw/aad/under_7/grm_",name,"_sexadj",
" --pheno /well/todd/users/jinshaw/aad/under_7/pheno_",name,"_sexadj",
" --reml --qcovar /well/todd/users/jinshaw/aad/under_7/covars_",name,"_sexadj",
" --prevalence ",prev," --out /well/todd/users/jinshaw/aad/under_7/outreml_",name,"_",prev,"_sexadj --thread-num 10\n"))
sink()
system(paste0("bash /users/todd/jinshaw/programs/aad/under_7/greml/",name,"_",prev,"_sexadj.sh"))
}
dogremlprev("under_7",0.005)
dogremlprev("mid_range", 0.002)
dogremlprev("mid_range", 0.003)
dogremlprev("over_13",0.002)
dogremlprev("over_13",0.003)
#and now excluding the MHC:
s<-all@snps
s<-s[!(s$chromosome==6 & s$position>25000000 & s$position<35000000),]
all<-all[,colnames(all) %in% rownames(s)]
u<-all[rownames(all) %in% rownames(all@samples[all@samples$group %in% c(0,1),]),]
m<-all[rownames(all) %in% rownames(all@samples[all@samples$group %in% c(0,2),]),]
o<-all[rownames(all) %in% rownames(all@samples[all@samples$group %in% c(0,3),]),]
writeit(u,"under_7nomhc")
writeit(m,"mid_rangenomhc")
writeit(o,"over_13nomhc")
dogreml("under_7nomhc")
dogreml("mid_rangenomhc")
dogreml("over_13nomhc")
dogremlprev("under_7nomhc",0.005)
dogremlprev("mid_rangenomhc", 0.002)
dogremlprev("mid_rangenomhc", 0.003)
dogremlprev("over_13nomhc",0.002)
dogremlprev("over_13nomhc",0.003)
#now generate table for supplementary in results:
sink(file="/well/todd/users/jinshaw/output/aad/under_7/greml/heritibilities_sexadj.txt")
cat(paste0("Disease prevalence (%) (<7,7-13,>13);<7 including HLA;7-13 including HLA;>13 including HLA;<7 excluding HLA;7-13 excluding HLA;>13 excluding HLA\n"))
addline<-function(prevs,und,mid,old, youngnomhc, midnomhc,oldnomhc){
l<-system(paste0("awk -F \'\t\' \'NR==8\' /well/todd/users/jinshaw/aad/under_7/outreml_",und,"_sexadj.hsq"),intern=T)
l<-strsplit(l,split="\t")
h<-data.frame(h=as.numeric(l[[1]][2]), se=as.numeric(l[[1]][3]))
h$lb<-h$h-(qnorm(0.975)*h$se)
h$ub<-h$h+(qnorm(0.975)*h$se)
l1<-system(paste0("awk -F \'\t\' \'NR==8\' /well/todd/users/jinshaw/aad/under_7/outreml_",mid,"_sexadj.hsq"),intern=T)
l1<-strsplit(l1,split="\t")
h1<-data.frame(h=as.numeric(l1[[1]][2]), se=as.numeric(l1[[1]][3]))
h1$lb<-h1$h-(qnorm(0.975)*h1$se)
h1$ub<-h1$h+(qnorm(0.975)*h1$se)
l2<-system(paste0("awk -F \'\t\' \'NR==8\' /well/todd/users/jinshaw/aad/under_7/outreml_",old,"_sexadj.hsq"),intern=T)
l2<-strsplit(l2,split="\t")
h2<-data.frame(h=as.numeric(l2[[1]][2]), se=as.numeric(l2[[1]][3]))
h2$lb<-h2$h-(qnorm(0.975)*h2$se)
h2$ub<-h2$h+(qnorm(0.975)*h2$se)
l3<-system(paste0("awk -F \'\t\' \'NR==8\' /well/todd/users/jinshaw/aad/under_7/outreml_",youngnomhc,"_sexadj.hsq"),intern=T)
l3<-strsplit(l3,split="\t")
h3<-data.frame(h=as.numeric(l3[[1]][2]), se=as.numeric(l3[[1]][3]))
h3$lb<-h3$h-(qnorm(0.975)*h3$se)
h3$ub<-h3$h+(qnorm(0.975)*h3$se)
l4<-system(paste0("awk -F \'\t\' \'NR==8\' /well/todd/users/jinshaw/aad/under_7/outreml_",midnomhc,"_sexadj.hsq"),intern=T)
l4<-strsplit(l4,split="\t")
h4<-data.frame(h=as.numeric(l4[[1]][2]), se=as.numeric(l4[[1]][3]))
h4$lb<-h4$h-(qnorm(0.975)*h4$se)
h4$ub<-h4$h+(qnorm(0.975)*h4$se)
l5<-system(paste0("awk -F \'\t\' \'NR==8\' /well/todd/users/jinshaw/aad/under_7/outreml_",oldnomhc,"_sexadj.hsq"),intern=T)
l5<-strsplit(l5,split="\t")
h5<-data.frame(h=as.numeric(l5[[1]][2]), se=as.numeric(l5[[1]][3]))
h5$lb<-h5$h-(qnorm(0.975)*h5$se)
h5$ub<-h5$h+(qnorm(0.975)*h5$se)
cat(paste0(prevs,";",round(h$h,digits=3), " (",round(h$lb,digits=3),", ",round(h$ub,digits=3),");",
round(h1$h,digits=3), " (",round(h1$lb,digits=3),", ",round(h1$ub,digits=3),");",
round(h2$h,digits=3), " (",round(h2$lb,digits=3),", ",round(h2$ub,digits=3),");",
round(h3$h,digits=3), " (",round(h3$lb,digits=3),", ",round(h3$ub,digits=3),");",
round(h4$h,digits=3), " (",round(h4$lb,digits=3),", ",round(h4$ub,digits=3),");",
round(h5$h,digits=3), " (",round(h5$lb,digits=3),", ",round(h5$ub,digits=3),")\n"))
}
addline("0.4,0.4,0.4","under_7","mid_range","over_13","under_7nomhc","mid_rangenomhc","over_13nomhc")
addline("0.5,0.3,0.3","under_7_0.005","mid_range_0.003","over_13_0.003","under_7nomhc_0.005","mid_rangenomhc_0.003","over_13nomhc_0.003")
addline("0.5,0.2,0.2","under_7_0.005","mid_range_0.002","over_13_0.002","under_7nomhc_0.005","mid_rangenomhc_0.002","over_13nomhc_0.002")
sink()
#same thing but for a latex table (for thesis):
addlatex<-function(prevs,und,mid,old){
l<-system(paste0("awk -F \'\t\' \'NR==8\' /well/todd/users/jinshaw/aad/under_7/outreml_",und,"_sexadj.hsq"),intern=T)
l<-strsplit(l,split="\t")
h<-data.frame(h=as.numeric(l[[1]][2]), se=as.numeric(l[[1]][3]))
h$lb<-h$h-(qnorm(0.975)*h$se)
h$ub<-h$h+(qnorm(0.975)*h$se)
l1<-system(paste0("awk -F \'\t\' \'NR==8\' /well/todd/users/jinshaw/aad/under_7/outreml_",mid,"_sexadj.hsq"),intern=T)
l1<-strsplit(l1,split="\t")
h1<-data.frame(h=as.numeric(l1[[1]][2]), se=as.numeric(l1[[1]][3]))
h1$lb<-h1$h-(qnorm(0.975)*h1$se)
h1$ub<-h1$h+(qnorm(0.975)*h1$se)
l2<-system(paste0("awk -F \'\t\' \'NR==8\' /well/todd/users/jinshaw/aad/under_7/outreml_",old,"_sexadj.hsq"),intern=T)
l2<-strsplit(l2,split="\t")
h2<-data.frame(h=as.numeric(l2[[1]][2]), se=as.numeric(l2[[1]][3]))
h2$lb<-h2$h-(qnorm(0.975)*h2$se)
h2$ub<-h2$h+(qnorm(0.975)*h2$se)
cat(paste0(prevs,"&",round(h$h,digits=3), " (",round(h$lb,digits=3),", ",round(h$ub,digits=3),")&",
round(h1$h,digits=3), " (",round(h1$lb,digits=3),", ",round(h1$ub,digits=3),")&",
round(h2$h,digits=3), " (",round(h2$lb,digits=3),", ",round(h2$ub,digits=3),") \\\\\n"))
}
sink(file="/well/todd/users/jinshaw/output/aad/under_7/greml/heritibilities_sexadj_latex_hla.txt")
cat(paste0("\\begin{tabular}{c c c c c}
\\hline
Disease prevalence (\\%) (<7,7-13,>13)&<7&7-13&>13 including HLA \\\\ [0.5ex]
\\hline
"))
addlatex("0.4,0.4,0.4","under_7","mid_range","over_13")
addlatex("0.5,0.3,0.3","under_7_0.005","mid_range_0.003","over_13_0.003")
addlatex("0.5,0.2,0.2","under_7_0.005","mid_range_0.002","over_13_0.002")
cat(paste0("\\hline
\\end{tabular}"))
sink()
sink(file="/well/todd/users/jinshaw/output/aad/under_7/greml/heritibilities_sexadj_latex_nohla.txt")
cat(paste0("\\begin{tabular}{c c c c c}
\\hline
Disease prevalence (\\%) (<7,7-13,>13)&<7&7-13&>13 \\\\ [0.5ex]
\\hline
"))
addlatex("0.4,0.4,0.4","under_7nomhc","mid_rangenomhc","over_13nomhc")
addlatex("0.5,0.3,0.3","under_7nomhc_0.005","mid_rangenomhc_0.003","over_13nomhc_0.003")
addlatex("0.5,0.2,0.2","under_7nomhc_0.005","mid_rangenomhc_0.002","over_13nomhc_0.002")
cat(paste0("\\hline
\\end{tabular}"))
sink()
|
1e84c23d191ded99a6bd795a1e1457a25d393fda | 2e24e6f3d5159c3c932c4be4a71be70e113a9e86 | /R Code/AUC_Statistics.r | 1dc0cf363d7a7621a1c8d8821b19b239857163ce | [] | no_license | qahathaway/DIO_and_UCMS | 36ae3e6b0ccba9e0c9523e626304039bec1e2176 | 8c3aa6f90b5d127bbb91f3d5c56f605892f3ada5 | refs/heads/main | 2023-06-15T12:47:30.538392 | 2021-07-12T23:45:02 | 2021-07-12T23:45:02 | 384,835,887 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,718 | r | AUC_Statistics.r | ###pROC###
library(pROC)
AUC1 <- read.csv("Path_to_File.csv", header=TRUE)
AUC2 <- read.csv("Path_to_File.csv", header=TRUE)
AUC3 <- read.csv("Path_to_File.csv", header=TRUE)
AUC4 <- read.csv("Path_to_File.csv", header=TRUE)
AUC5 <- read.csv("Path_to_File.csv", header=TRUE)
AUC1pROC <- roc(AUC1$Pred, AUC1$Prob)
AUC2pROC <- roc(AUC2$Pred, AUC2$Prob)
AUC3pROC <- roc(AUC3$Pred, AUC3$Prob)
AUC4pROC <- roc(AUC4$Pred, AUC4$Prob)
AUC5pROC <- roc(AUC5$Pred, AUC5$Prob)
plot(AUC1pROC, col = "lightcoral", lwd = 5, legacy.axes = TRUE, xlim=c(1, 0), print.auc = TRUE, print.auc.x = 0.3, print.auc.y = 0.7)
plot(AUC2pROC, col = "green3", add = TRUE, lwd = 5, print.auc = TRUE, print.auc.x = 0.3, print.auc.y = 0.6)
plot(AUC3pROC, col = "turquoise3", add = TRUE, lwd = 5, print.auc = TRUE, print.auc.x = 0.3, print.auc.y = 0.5)
plot(AUC4pROC, col = "plum2", add = TRUE, lwd = 5, print.auc = TRUE, print.auc.x = 0.3, print.auc.y = 0.4)
title(main = "PostDiet", line=2.5)
# Add legend
##legend("bottomright",
# legend=c("Compiled", "PostDiet M-Mode", "PostDiet PW", "PostDiet Stress-Strain"),
# col=c("lightcoral", "green3", "turquoise3", "plum2"),
#lwd=3, cex =0.6,xpd = TRUE, horiz = FALSE)
plot(AUC5pROC, col = "gold1", lwd = 5, legacy.axes = TRUE, xlim=c(1, 0), print.auc = TRUE, print.auc.x = 0.3, print.auc.y = 0.5)
title(main = "PostStress", line=2.5)
plot(roc.s100b, print.auc=TRUE, auc.polygon=TRUE, grid=c(0.1, 0.2),
grid.col=c("green", "red"), max.auc.polygon=TRUE,
auc.polygon.col="lightblue", print.thres=TRUE)
# Thresholds
ci.thresolds.obj <- ci.thresholds(roc.s100b)
plot(ci.thresolds.obj)
# Specificities
plot(roc.s100b) # restart a new plot
ci.sp.obj <- ci.sp(roc.s100b, boot.n=500)
plot(ci.sp.obj)
# Sensitivities
plot(roc.s100b) # restart a new plot
ci.se.obj <- ci(roc.s100b, of="se", boot.n=500)
plot(ci.se.obj)
####Compare Multiple Classes ROC-AUC####
library(multiROC)
Multi <- data.frame(read.csv(
file = 'Path_to_File.csv'))
res <- multi_roc(Multi, force_diag=T)
unlist(res$AUC)
multi_roc_auc <- function(true_pred_data, idx) {
results <- multi_roc(true_pred_data[idx, ])$AUC
results <- unlist(results)
return(results)
}
roc_auc_with_ci_res <- roc_auc_with_ci(Multi, conf= 0.95, type='basic', R = 1000)
roc_auc_with_ci_res
#roc_test <- multi_roc(Multi)
#roc_test$Sensitivity
#roc_test$Specificity
n_method <- length(unique(res$Methods))
n_group <- length(unique(res$Groups))
res_df <- data.frame(Specificity= numeric(0), Sensitivity= numeric(0), Group = character(0), AUC = numeric(0), Method = character(0))
for (i in 1:n_method) {
for (j in 1:n_group) {
temp_data_1 <- data.frame(Specificity=res$Specificity[[i]][j],
Sensitivity=res$Sensitivity[[i]][j],
Group=unique(res$Groups)[j],
AUC=res$AUC[[i]][j],
Method = unique(res$Methods)[i])
colnames(temp_data_1) <- c("Specificity", "Sensitivity", "Group", "AUC", "Method")
res_df <- rbind(res_df, temp_data_1)
}
}
ggplot2::ggplot(res_df, ggplot2::aes(x = 1-Specificity, y=Sensitivity)) + ggplot2::geom_path(ggplot2::aes(color = Group), size=2) + ggplot2::geom_segment(ggplot2::aes(x = 0, y = 0, xend = 1, yend = 1),
colour='grey', linetype = 'dotdash') + ggplot2::theme_bw() + ggplot2::theme(plot.title = ggplot2::element_text(hjust = 0.5), legend.justification=c(1, 0),
legend.position=c(0.99, .01), legend.title=ggplot2::element_blank(),
legend.background = ggplot2::element_rect(fill=NULL, size=0.5,
linetype="solid", colour ="black"))
|
c8cd2a12ba4b3f74ac984a5198514dc2a8a2adc7 | 0500ba15e741ce1c84bfd397f0f3b43af8cb5ffb | /cran/paws.security.identity/man/acmpca_create_certificate_authority_audit_report.Rd | 39d0e6461a622f64de45725533a49532db7b9e28 | [
"Apache-2.0"
] | permissive | paws-r/paws | 196d42a2b9aca0e551a51ea5e6f34daca739591b | a689da2aee079391e100060524f6b973130f4e40 | refs/heads/main | 2023-08-18T00:33:48.538539 | 2023-08-09T09:31:24 | 2023-08-09T09:31:24 | 154,419,943 | 293 | 45 | NOASSERTION | 2023-09-14T15:31:32 | 2018-10-24T01:28:47 | R | UTF-8 | R | false | true | 1,358 | rd | acmpca_create_certificate_authority_audit_report.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/acmpca_operations.R
\name{acmpca_create_certificate_authority_audit_report}
\alias{acmpca_create_certificate_authority_audit_report}
\title{Creates an audit report that lists every time that your CA private key
is used}
\usage{
acmpca_create_certificate_authority_audit_report(
CertificateAuthorityArn,
S3BucketName,
AuditReportResponseFormat
)
}
\arguments{
\item{CertificateAuthorityArn}{[required] The Amazon Resource Name (ARN) of the CA to be audited. This is of the
form:
\code{arn:aws:acm-pca:region:account:certificate-authority/12345678-1234-1234-1234-123456789012 }.}
\item{S3BucketName}{[required] The name of the S3 bucket that will contain the audit report.}
\item{AuditReportResponseFormat}{[required] The format in which to create the report. This can be either \strong{JSON} or
\strong{CSV}.}
}
\description{
Creates an audit report that lists every time that your CA private key is used. The report is saved in the Amazon S3 bucket that you specify on input. The \code{\link[=acmpca_issue_certificate]{issue_certificate}} and \code{\link[=acmpca_revoke_certificate]{revoke_certificate}} actions use the private key.
See \url{https://www.paws-r-sdk.com/docs/acmpca_create_certificate_authority_audit_report/} for full documentation.
}
\keyword{internal}
|
d64c0d32bf8366b901d52bc9337ede2bb0b112a8 | 99144fe0beb697c124e5271a1d395ab6477d405a | /man/io_yamlet.Rd | 7501e991156ea167ce4a5c88a003528bb6b49694 | [] | no_license | cran/yamlet | 233e29fc38d75205d4cc04db5a81af49dc05a5d5 | 3f494a19ab2e1cdb426606af40304309c78603ca | refs/heads/master | 2023-09-04T00:52:18.417901 | 2023-08-24T05:00:02 | 2023-08-24T06:31:30 | 236,960,454 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,250 | rd | io_yamlet.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/io_yamlet.R
\name{io_yamlet}
\alias{io_yamlet}
\title{Import and Export Yamlet}
\usage{
io_yamlet(x, ...)
}
\arguments{
\item{x}{object}
\item{...}{passed arguments}
}
\value{
see methods
}
\description{
Imports and exports yamlet.
Generic, with a read method \code{\link{io_yamlet.character}}
for character and a write method \code{\link{io_yamlet.data.frame}}
for data.frame. See also \code{\link{io_yamlet.yamlet}}.
}
\examples{
file <- system.file(package = 'yamlet', 'extdata','quinidine.yaml')
x <- io_yamlet(file)
tmp <- tempdir()
out <- file.path(tmp, 'tmp.yaml')
# we can losslessly 'round-trip' x using to generic calls
identical(x, io_yamlet(io_yamlet(x, out)))
}
\seealso{
Other io:
\code{\link{io_csv.character}()},
\code{\link{io_csv.data.frame}()},
\code{\link{io_csv}()},
\code{\link{io_res.character}()},
\code{\link{io_res.decorated}()},
\code{\link{io_res}()},
\code{\link{io_table.character}()},
\code{\link{io_table.data.frame}()},
\code{\link{io_table}()},
\code{\link{io_yamlet.character}()},
\code{\link{io_yamlet.data.frame}()},
\code{\link{io_yamlet.yamlet}()}
}
\concept{io}
\keyword{internal}
|
dd7b3a36b5c2544efd5df013b349123617db9730 | cd4673aea4e41a212ff5c57e2b1d99a7ade1fbf1 | /R/checkdense.R | fc66756979bbff8331c7c206576e2d1c65461471 | [] | no_license | cran/sdpt3r | 62765e36a08557f846fe50312a2ced20debdf153 | 6668a2744cfbe777717e3175d0a79ac542823e2c | refs/heads/master | 2021-07-15T04:50:22.141609 | 2019-02-11T07:50:03 | 2019-02-11T07:50:03 | 102,360,434 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 495 | r | checkdense.R | checkdense <- function(A){
m <- nrow(A)
n <- ncol(A)
idxden <- c()
nzratio <- 1
if(m > 1000){
nzratio <- .2
}
if(m > 2000){
nzratio <- .1
}
if(m > 5000){
nzratio <- 0.05
}
if(nzratio < 1){
ind <- which(A != 0)
Aprime <- matrix(0,nrow=m,ncol=n)
Aprime[ind] <- 1
nzcolA <- colSums(Aprime)
idxden <- which(nzcolA > nzratio*m)
if(length(idxden) > max(200,0.1*n)){
idxden <- c()
}
}
return(idxden)
} |
b3cddabc5eba8537567b679f46f2f72e8bef903e | 45fdc72f7292ea885f48875091afc59eaf7ded7d | /R/pseudo_rank.R | dc2c0936243e21249b7b9cf508d58af00edb0952 | [
"MIT"
] | permissive | AgrDataSci/gosset | 1a47afb30223e3e9a43957986dcadbb31079bc10 | a371113bc453f8467ee6b4cdae9f86e286e89af2 | refs/heads/master | 2023-07-06T05:05:26.159662 | 2023-07-03T21:16:24 | 2023-07-03T21:16:24 | 173,807,877 | 4 | 4 | NOASSERTION | 2022-06-20T15:23:01 | 2019-03-04T19:28:39 | R | UTF-8 | R | false | false | 1,572 | r | pseudo_rank.R | #' Add pseudo-rank to missing values
#'
#' @param object a matrix or PlackettLuce rank
#' @param ... additional arguments passed to methods
#' @return a matrix or PlackettLuce rank
#' @examples
#' library("PlackettLuce")
#' R = matrix(c(1, 2, 0, 0,
#' 4, 1, 0, 3,
#' 2, 1, 0, 3,
#' 1, 2, 0, 0,
#' 2, 1, 0, 0,
#' 1, 0, 0, 2), nrow = 6, byrow = TRUE)
#' colnames(R) = c("apple", "banana", "orange", "pear")
#'
#' # summary(PlackettLuce(R))
#'
#' R = pseudo_rank(R)
#'
#' summary(PlackettLuce(R))
#' @importFrom PlackettLuce as.rankings
#' @export
pseudo_rank = function(object, ...) {
keepclass = class(object)[1]
object = as.matrix(object)
do = dim(object)
sumR = colSums(object)
# find the missing values
missR = as.vector(which(sumR == 0))
if (length(missR) == 0) {
if (keepclass == "rankings") {
object = PlackettLuce::as.rankings(object)
}
return(object)
}
# check for n times the items are tested to balance variance
tested = apply(object, 2, function(x){sum(x != 0)})
tested = floor(mean(tested[-missR]))
# input the pseudo-ranking to the missing values to always loose
# against the worst
set.seed(21014422)
s = sort(sample(1:do[1], tested))
for (i in seq_along(missR)) {
object[s, ] = t(apply(object[s, ], 1, function(x){
x[missR[i]] = max(x) + 1
x
}))
}
if (keepclass == "rankings") {
object = PlackettLuce::as.rankings(object)
}
return(object)
}
|
6854cc773b9b8c147669b020fe70527144a56730 | 233d05c5432d785de7a4b2cf2d0e540754f951e2 | /11.R | 36c1e677f5fc05608fe317c9fbdd2c3381ffeb2c | [] | no_license | bill2692/R-Lecture-2021 | efabfdd45217d84b5ce48e2591acb82db138f27d | 449d2e1d51df6df0376c313952e273d47838e74a | refs/heads/main | 2023-04-21T03:59:42.423902 | 2021-05-06T08:48:32 | 2021-05-06T08:48:32 | 359,694,747 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 458 | r | 11.R | # 정규 표현식 (Regular Expressions)
fruits <- c("1 apple", "2 pears", "3 bananas")
str_match(fruits, '[aeiou]')
str_match_all(fruits, '[aeiou]')
str_match(fruits, '\\d') # \\d = 숫자
str_match(fruits, '[[:digit:]]') # [[:digit:]] = 숫자
str_match(fruits, '[[:punct:]]')
str_match_all(fruits, '[[:punct:]]')
gsub('[[:digit:]]', 'x', fruits)
gsub('[[:punct:]]', '_', fruits)
gsub('\\d', '', fruits)
gsub('\\s', '', fruits)
gsub('\\w', '', fruits)
|
a55b4a7584f558888dd8e9847899c03ef3b4ad12 | e66f3a09dfbb511911b48aaf8461af04add78c6c | /R/clase4.R | f330763d1c0ba071573ca6cc2a2362ecc0948c1b | [] | no_license | mmc00/curso_r | b831ac5d10cd1b9434a30d75e43cc93e21949cc3 | 430d02938533fed4b6fa9307db369f7735e2c72e | refs/heads/master | 2023-08-30T00:25:14.043798 | 2021-11-17T16:46:10 | 2021-11-17T16:46:10 | 290,911,356 | 0 | 0 | null | null | null | null | WINDOWS-1250 | R | false | false | 1,679 | r | clase4.R | # Bases de R
library(tidyverse)
library(readxl)
library(readr)
library(vroom)
library(rgtap)
# Introduccion funciones
# paramentros
readxl::read_excel(path = "data/pib_encadenado.xlsx")
# libreria - funcion - paramentros
# funcion nativas
# R
mean(c(5,6,7))
min(c(6,6,7))
max()
sd()
colnames()
# funciones de usuario
cuadrado_x <- function(x = NULL){
a <- x*x
return(a)
}
library(readxl)
library(tidyverse)
# Ejemplo
base_1 <- "data/pib_pmercados.xlsx"
base_2 <- "data/pib_encadenado.xlsx"
carga_pib <- function(path_base = NULL, pib_name = NULL){
if(is.null(path_base)){
stop("Error: no hay base")
}
if(is.null(pib_name)){
stop("Error: no hay nombre para PIB")
}
data <- read_xlsx(path_base, skip = 3) %>%
slice(-1) %>%
slice(1:18) %>%
rename("variable" = "...1") %>%
mutate(variable = str_trim(variable)) %>%
pivot_longer(cols = -variable, names_to = "year",
values_to = "values_mercado") %>%
mutate(variable = ifelse(variable == "Producto Interno Bruto a precios de mercado",
pib_name, variable))
return(data)
}
base_0 <- carga_pib()
base_0_0 <- carga_pib(path_base = base_1)
base_1_fix <- carga_pib(path_base = base_1)
base_1_fix_2 <- carga_pib(path_base = base_1, pib_name = "PIB_CR")
base_2_fix <- carga_pib(path_base = "data/pib_encadenado.xlsx", pib_name = "PIB_CR")
# GITHUB
# Prueba 1
# GITHUB
"mmc00/curso_r"
# creacion de script dentro de folder (tareas)
# buscar los datos del pib (carpeta datos)
# consumo
# inversion
# exportaciones
# importaciones
# commits de los dos
# funciones (corto el código puedan)
# Prueba 2
#papote malote
#sebas
# Marlon baborsh
|
90863b74b6e76e6a960a9cf945334b6d575d3be9 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/hit/examples/fast.anova.Rd.R | 082f5738b939090d7fcbd88529f9e9b221dbfdcd | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 215 | r | fast.anova.Rd.R | library(hit)
### Name: fast.anova
### Title: Fast ANOVA
### Aliases: fast.anova
### ** Examples
y <- rnorm(n = 100)
x <- matrix(data = rnorm(1000), nrow = 100)
a <- 1:10
fast.anova(x = x, y = y, assign = a)
|
fdfe2db1f86df281d88c68691080e3756f26e499 | 9d522ef194f863740f3cf3a9b7113a583f9edd6b | /transp.R | ab26f2a3fa3948c2ee592958c70a4b86894746b8 | [] | no_license | sirxqyang/three_positioning | 768a98dfc908cfa2b183ae04cac4acba4b583123 | 15ab073a632e21b0306107fc43aa306032c63ba4 | refs/heads/master | 2021-01-20T09:12:52.337646 | 2013-01-02T06:54:54 | 2013-01-02T06:54:54 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 610 | r | transp.R | require(graphics)
args <- commandArgs()
filename <- args[6]
Data <- read.table(filename, header=FALSE)
x <- Data[,1]
y <- Data[,2]
## will make the in pdf format
dir.create('results')
dir.create('results/translational_positioning')
#Path <- 'results/translational_positioning/'
#Name <- sub("_tss", "", filename)
#Suffix <- ".pdf"
#transp=paste (Path, Name, Suffix, sep = "", collapse = NULL)
pdf('results/translational_positioning/translational_positioning.pdf', width=10, height=4)
plot(x, y, col='white', xlab='Distance to TSS (bp)', ylab='Average nucleosome density')
lines(spline(x, y), col=1)
dev.off() |
4d42147e2707748a899324008252e860f2b482ca | 9a6694150b35e8746d0017f875a8c7e5cb35a273 | /genpred_library.R | 9b7840bc2a63029d1fc9ab17753956af7780440c | [] | no_license | anyelacamargo/wheatspike | 729bb758ff9534dbfd5a0f2608d201b3f2e9493c | 239c8c6c1f9401a70636a1e268e862c1230290d4 | refs/heads/master | 2020-05-21T00:23:38.070904 | 2019-05-09T15:49:13 | 2019-05-09T15:49:13 | 185,825,611 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,357 | r | genpred_library.R | #' Anyela Camargo
#'
#'
#'
#install.packages("qtl2", repos="http://rqtl.org/qtl2cran")
library('qtl2')
library('devtools')
library('data.table')
library('dplyr')
library('doParallel')
# Select RILS's marker profile
#' @param filename
#' @param markername
searchMarker <- function(sdat, markername){
i = match(markername, colnames(sdat))
m = sdat[, c(1,i), with=FALSE]
return(m)
}
#' @param RILs profile for a given marker
convert_snp <- function(x) {
#print(x)
#convert to {-1,0,1,NA}
alleles <- c('0', '2', '1'); # 0=AA, 2=BB
y <- rep(NA,length(x))
#print(alleles);
y[which(x==alleles[1])] <- '1'
y[which(x==alleles[2])] <- '3'
y[which(x==alleles[3])] <- '2'
#break();
return(y)
}
#' process genodata
#' @param dataril, genetic map
#' @param datamap, map
get_rils <- function(sdat)
{
clr = ncol(sdat)
rr = nrow(sdat)# Cols dataframe
# Convert snps to 1/0
R <- apply(sdat[1:rr,13:clr],1, convert_snp)
markername = as.vector(sdat[1:rr, 'Name'])
genotypename = colnames(sdat)[13:clr]
ril_data <- data.table(genotypename, R)
colnames(ril_data) <- c('id', t(markername))
fwrite(ril_data, file = 'MAGIC/magic_geno.csv', col.names=T,row.names=F,
quote = F,sep=",")
return(ril_data)
}
#' @param sdat raw data
get_parents <- function(sdat){
colname = c("Name", "ALCHEMY", "BROMPTON", "CLAIRE", "HEREWARD", "RIALTO", "ROBIGUS",
"SOISSONS", "XI-19")
data_parent <- sdat[, colname, with=FALSE]
clr = ncol(data_parent)
rr = nrow(data_parent)# Cols dataframe
markername = as.vector(data_parent[['Name']])
R <- apply(data_parent[1:rr,2:clr],2, convert_snp)
parent_data <- data.frame(markername, R)
colnames(parent_data) <- colnames(data_parent)
setDT(parent_data)
fwrite(parent_data, file = 'MAGIC/magic_foundergeno.csv', col.names=T,row.names=F,
quote = F,sep=",")
}
#' @param sdat raw data
get_map <- function(sdat){
cname <- c('Name', 'Chr', 'Pos')
map_data <- sdat[, cname, with= FALSE]
setDT(map_data)
fwrite(map_data, file = 'MAGIC/magic_gmap.csv', col.names=T,row.names=F,
quote = F,sep=",")
}
#' @param sdat raw data
get_pheno <- function(sdat){
pheno_data = sdat[, lapply(.SD, mean, na.rm=TRUE),
by=geno, .SDcols=c(colnames(sdat)[3:20]) ]
colnames(pheno_data)[1] = 'ind'
fwrite(pheno_data, file = 'MAGIC/magic_pheno.csv', col.names=T, row.names=F,
quote = F,sep=",")
}
#' Get all the data ready for mapping
get_data <- function(){
raw_geno <- fread('NIAB_MAGIC_ELITE_genotypes_mapped_markers.csv')
raw_pheno <- fread('magic_gt.csv', header = TRUE)
get_parents(raw_data)
get_rils(raw_data)
get_map(raw_data)
get_pheno(raw_pheno)
}
marker_presence_abscence <- function(){
##process rils and produce regression plots
fname <- 'MAGIC/magic_geno.csv'
rils <- fread(fname, sep =',', header=TRUE)
fname <- 'MAGIC/magic_pheno.csv'
pheno <- fread(fname, sep=',', header=TRUE)
markername <- c('RAC875_rep_c105718_585', 'RHT2')
m <- searchMarker(rils, markername)
m <- merge(m, pheno, by.x= 'id', by.y='ind')
cov_file <- read.table('MAGIC/magic_covar.csv', header=TRUE, sep=',')
cov_file <- merge(cov_file, m[,1:3], by.x= 'ind', by.y='id')
fwrite(cov_file, file='magic_covar1.csv', sep=',')
}
|
0d30f6b11461c93e2124fc7bbcb4bf132ef2cd74 | 42a009929b6b4f552d310f340eecc27bdda2ba4b | /plot4.R | 69c23f84459e2b15cad020ba2d976c224782c436 | [] | no_license | wporterjr/ExData_Plotting1 | b1fcfdca6fdda7e8e087a7664f550c301cc35f2f | d4c28cc2594afb8b1eaacf30b070bc2e326781bc | refs/heads/master | 2021-01-15T11:44:28.215455 | 2014-05-09T02:33:26 | 2014-05-09T02:33:26 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,900 | r | plot4.R | ## Coursera Exploratory Data Analysis Week 1 Project
## May 7, 2014
## Plot 4
setwd("~/Documents/Coursera/ExpDataAnalysis")
## Load in data frame
Df <- read.table("household_power_consumption.txt", header=TRUE, sep = ";", na.strings = "?")
## Add a formatdate column in date format
Df$formatdate <- as.Date(Df$Date, "%d/%m/%Y")
head(Df$formatdate)
# [1] "2006-12-16" "2006-12-16" "2006-12-16" "2006-12-16" "2006-12-16" "2006-12-16"
## Define the dates of interest
targetDates <- as.Date(c("2/1/2007", "2/2/2007"), "%m/%d/%Y")
targetDates
# [1] "2007-02-01" "2007-02-02"
## Subset the dataframe with the dates of interest
targetDf <- subset(Df, formatdate %in% targetDates)
unique(targetDf$formatdate)
# [1] "2007-02-01" "2007-02-02"
## Add a datetime column for the plot
targetDf$DateTime <- strptime(paste(targetDf$Date, targetDf$Time),
"%d/%m/%Y %H:%M:%S")
## Save as PNG with dim 480 x 480
## recycling previous plots where needed
png("output/plot4.png", height = 480, width = 480)
par(mfrow = c(2,2))
## upper left
plot(targetDf$DateTime, targetDf$Global_active_power,
type = "l",
xlab = "",
ylab = "Global Active Power (kilowats)")
## upper right
plot(targetDf$DateTime, targetDf$Voltage,
type = "l",
xlab = "datetime",
ylab = "Global Active Power (kilowats)")
## lower left
plot(targetDf$DateTime, targetDf$Sub_metering_1,
type = "l",
xlab = "",
ylab = "Energy sub metering")
legend("topright", c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"),
lty = 1,
col = c("black", "red", "blue"), bty = "n")
lines(targetDf$DateTime, targetDf$Sub_metering_2, col = "red")
lines(targetDf$DateTime, targetDf$Sub_metering_3, col = "blue")
## lower right
with(targetDf, plot(DateTime, Global_reactive_power, xlab='datetime', pch=NA))
with(targetDf, lines(DateTime, Global_reactive_power))
dev.off()
|
428629439f517fd607761ea98214dfa7c8f158fb | d81fcba2253dd3fdd3ce1ebfb71a48d907da8648 | /Kmeans_pam.R | 73cdb0604b1e332c1ffbb0e055f0d44a523033f9 | [] | no_license | honghh2018/R_scripts_collected | 80a11f948a8a1c5e892729890131a53b871c12db | 4fac551c407926f63ebd8ff1a25439a50578604a | refs/heads/master | 2020-03-27T04:24:00.520992 | 2019-10-07T04:21:44 | 2019-10-07T04:21:44 | 145,935,850 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,654 | r | Kmeans_pam.R | rm(list=ls())
library(RColorBrewer)
library(cluster)
library(reshape2)
library(dplyr)
library(ggplot2)
setwd('C:\\Users\\R')
### read expression data
exprSet<-read.delim('All_gene_fpkm.list',header = T,
#row.names = 1,
sep='\t',comment.char = '',
quote = '',check.names = F)[1:2000,]
### read duplicated information
group_frame<-read.delim('group_list.txt',sep='\t',header = F,stringsAsFactors = F)
'''
V1 V2
ApEpC A
ApHeC A
ApEpY B
ApMgY B
ApHeY C
ApMgC C
'''
sample_names<-colnames(exprSet)
newdataframe<-exprSet[match(colnames(exprSet),as.character(group_frame[,1])),]
match_samples<-colnames(exprSet)[2:length(colnames(exprSet))][match(group_frame[,1],sample_names[2:length(sample_names)])] #return index, last subjected to first arrange
### sort column with previously vector match_samples
new_exprSet<-exprSet[
,with(exprSet, match_samples)
]
library(tibble) # including has_rownames function
has_rownames(new_exprSet)
new_exprSet<-remove_rownames(new_exprSet)
new_exprSet$ID<-exprSet[,1]
temp1 <- new_exprSet[, c(length(new_exprSet), 1:length(new_exprSet)-1)]
temp_group<-table(group_frame$V2)
getmeancol<-function(data_frame,ingroup,tab_group){
j<-0
new_data_frame<-data.frame(ID=data_frame[,1])
for(i in unique(ingroup$V2)){
j =j+as.numeric(tab_group[[i]])
new_data_frame[,i]<-rowMeans(select(data_frame,j:j+1))
}
return(new_data_frame)
}
new_means_datafram<-getmeancol(temp1,group_frame,temp_group)
new_means_datafram<-exprSet
#colnames(exprSet)<-sub('(#ID|ID)','Gene',names(exprSet))
#exprSet_tibble<-as.tibble(exprSet) #转换为dplyr的tibble数据框
names(exprSet)[1]<-'Gene'
samplename<-1:(length(colnames(exprSet))-1)
names(samplename)<-colnames(exprSet)[2:length(colnames(exprSet))]
str(samplename)
#names(samplename)<-colnames(exprSet)[which(colnames(exprSet)!='ID')]
for(i in samplename){
colnames(exprSet)[i+1]=i
}
rownames(exprSet)<-exprSet[,1]
exprSet1<-exprSet %>%select(2:7) #select 2-7heatmap.2(transposed_alpha_mtx,
exprSet_t<-as.data.frame(t(exprSet1))
exprSet_cor<-exprSet_t %>%
cor(use='pairwise.complete.obs',method='pearson') #Obtain cor list,pairwise.complete.obs was parameters needed Miss values(NA)
### draw cor heatmap throught heatmap.2 function within gplots
if(!requireNamespace('gplots')) install.packages('gplots')
library(gplots) #including heatmap.2 function
color_scheme <- rev(brewer.pal(8,"RdBu"))
pdf(file='heatmap.2.pdf',height = 10,width=8)
heatmap.2(exprSet_cor[1:500,1:500],na.rm=TRUE,
cexRow=0.5, cexCol=0.5,
Rowv = NULL, # use the dendrogram previously calculated
Colv = NULL, # don't mess with my columns! (keep current ordering )
dendrogram = NULL, # only draw row dendrograms
breaks = seq(-3, 3, length.out = 9), # OPTIONAL: set break points for colors
col = color_scheme, # use previously defined colors
trace = "none", density.info = "none" # remove distracting elements of plot
)
dev.off()
exprSet_dist <- as.dist(1 - exprSet_cor) #deleting positive correlation from cor matrix by as.dist
fpkm_kmedoids <- pam(exprSet_dist, 8)
k_pam_clusters <- fpkm_kmedoids$cluster
sum(table(k_pam_clusters))
clusters_df <- data.frame(Gene = names(k_pam_clusters),
cluster = as.factor(k_pam_clusters))
exprSet_t$ID<-rownames(exprSet_t)
exprSet_m<-melt(exprSet_t,id.vars="ID",variable.name="Gene",value.name="expression")
exprSet_m$ID<-as.numeric(exprSet_m$ID)
exprSet_m<-exprSet_m %>%
left_join(clusters_df,by=c('Gene')) #left join the cluster_df into exprSet_m
cluster_means<-exprSet_m %>% group_by(cluster,ID) %>%
summarize(mean.exp=mean(expression,na.rm=TRUE))
options(warn=-1) #suppressing warning
p<-exprSet_m %>%
ggplot(aes(ID, expression, group=Gene)) +
geom_line(alpha=0.25) +
geom_line(aes(ID, mean.exp, group=NULL,color=cluster),
data = cluster_means,
size=1.1) +
ylim(0, 2) + #y tick arrange
facet_wrap(~cluster, ncol=4)+
theme_bw()+
scale_x_discrete(limits=c(names(samplename)))+
xlab('SampleName')+
theme(axis.text.x = element_text(face="bold", color="black",
size=8, angle=45,margin=margin(8,0,0,0)), #margin funs adjustting space between x axis and x text
axis.text.y=element_text(face="bold", color="black",size=8))
ggsave(p,filename = 'K-medoids_Cluster.pdf',path='./',width=22,height=15,units='cm',colormodel='srgb')
?ggsave
### Done
|
c6c85ff5316930bc014b46cd542585e8b47f5349 | d8b8e501280b3254d561fa2f6f41aeb24e041a97 | /dataInit.R | c33b9f373661f5b83d483da2b4472e59c606feff | [] | no_license | MCBrightcape/ProcessMiningPQP | db1aedc0b93c1ba831d59ce1c8f360f7288b5ece | 47a268ff1728f8b9ee558317bcc978f9108cbe9d | refs/heads/master | 2020-04-05T21:40:31.336850 | 2018-12-11T13:44:55 | 2018-12-11T13:44:55 | 157,228,757 | 2 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,345 | r | dataInit.R | #Load data from server
startLocalDatabase <-function(){
print("Loading Local Data")
data <- readr::read_csv('ExampleLog.csv',
locale = locale(date_names = 'en',
encoding = 'ISO-8859-1'))
# change timestamp to date var
data$starttimestampFormatted = as.POSIXct(data$Start_Date,
format = "%d.%m.%y %H:%M")
data$endtimestampFormatted = as.POSIXct(data$End_Date,
format = "%d.%m.%y %H:%M")
# remove blanks from var names
names(data) <- str_replace_all(names(data), c(" " = "_" , "," = "" ))
events <- activities_to_eventlog(
data,
case_id = 'Case_ID',
activity_id = 'Activity',
resource_id = 'Resource',
timestamps = c('starttimestampFormatted', 'endtimestampFormatted')
)
print("Finished Loading Local Data...")
return(events)
}
setDatabase <- function(data, headers){
#Get date seperator given in the input
sep <- substring(gsub('[[:digit:]]+', '', data[headers$timestamps[1]][1,]),1,1)
format <- paste("%d","%m","%Y %H:%M",sep=sep)
#Weird fix for listing
for (item in data[headers$timestamps[1]]){
fixedStarttime <- item
}
data$starttimestampFormatted = as.POSIXct(fixedStarttime, format = format)
if(length(headers$timestamps) > 1){
#Weird fix for listing
for (item in data[headers$timestamps[2]]){
fixedEndtime <- item
}
data$endtimestampFormatted = as.POSIXct(fixedEndtime, format = format)
}
# remove blanks from var names
names(data) <- str_replace_all(names(data), c(" " = "_" , "," = "" ))
events <<- activities_to_eventlog(
data,
case_id = headers$caseID,
activity_id = headers$activityID,
resource_id = headers$resourceID,
timestamps = c('starttimestampFormatted', 'endtimestampFormatted')
)
}
createGraph <- function(events, setGraphActFreq, setGraphTraceFreq, visType, measureType, durationType){
return (
if(visType == "Frequency"){
events %>%
filter_activity_frequency(percentage = setGraphActFreq) %>% # show only most frequent activities
filter_trace_frequency(percentage = setGraphTraceFreq) %>% # show only the most frequent traces
process_map(render = T)
}else{
events %>%
filter_activity_frequency(percentage = setGraphActFreq) %>% # show only most frequent activities
filter_trace_frequency(percentage = setGraphTraceFreq) %>% # show only the most frequent traces
process_map(performance(get(measureType), durationType), render = T)
})
}
createVariantsGraph2 <- function(input, output, session, events){
return(
events %>% filter_case(cases = unlist(strsplit(allVariants[input$caseSelect,'Cases'],split=", "))) %>%
filter_activity_frequency(percentage = 1.0) %>% # show only most frequent activities
filter_trace_frequency(percentage = input$setGraphTraceFreq2) %>% # show only the most frequent traces
process_map(render = T)
)
}
preproccesEventData <- function(events, session){
print("Processing Data")
allCases <- cases(events)
allVariants <<- data.frame(integer(nrow(events %>% traces())),
character(nrow(events %>% traces())),
integer(nrow(events %>% traces())),
double(nrow(events %>% traces())),
stringsAsFactors = FALSE)
colnames(allVariants)[1] <<- "Index"
colnames(allVariants)[2] <<- "Cases"
colnames(allVariants)[3] <<- "Frequency"
colnames(allVariants)[4] <<- "Total_Days"
for(i in 1:nrow(traces(events))){
caseVector <- c()
allVariants[i,'Index'] <<- i
for (j in which(allCases['trace_id'] == i)){
allVariants[i,'Frequency'] <<- allVariants[i,'Frequency'] + 1
allVariants[i,'Total_Days'] <<- allVariants[i,'Total_Days'] + allCases[j,'duration_in_days']
caseVector <- append(caseVector, unlist(allCases[j,'Case_ID'], use.names = FALSE))
}
allVariants[i,"Cases"] <<- toString(caseVector)
}
allVariants <<- allVariants[order(-allVariants$Frequency),]
choices <<- setNames(allVariants$Index, allVariants$Frequency)
updateSelectInput(session, "caseSelect", choices = choices)
print("Finished Processing Data")
} |
db67332b637a2ccde05cb158c1d305d648fe1f5d | 15204ebc34e9977ab5a3001e1b05cc2f9b15939e | /R/conformalIte_naive.R | 641059db039e5649992faaf142c11efc2c943f1d | [
"MIT"
] | permissive | valeman/cfcausal | e1a84e121300732afdadb157ffda7234ce63b2fd | 4eca02cd509404773ddf29e5f74ef72079923bd4 | refs/heads/master | 2023-03-30T05:22:54.538463 | 2021-04-02T05:20:47 | 2021-04-02T05:20:47 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,936 | r | conformalIte_naive.R | ## Naive methods of Conformal inference for individual treatment effects for subjects with both
## missing potential outcome. See ?conformalIte
conformalIteNaive <- function(X, Y, T,
type, side,
quantiles,
outfun, outparams,
psfun, psparams,
useCV,
trainprop,
nfolds){
n <- length(Y)
Y1 <- Y0 <- Y
Y1[T == 0] <- NA
Y0[T == 1] <- NA
inds <- which(T == 1)
Xtrain <- X
estimand1 <- "missing"
side1 <- switch(side,
two = "two",
above = "above",
below = "below")
if (side == "two"){
quantiles1 <- quantiles
} else {
quantiles1 <- quantiles[2]
}
obj1 <- conformalCf(Xtrain, Y1,
estimand1,
type, side1,
quantiles1,
outfun, outparams,
psfun, psparams,
useCV,
trainprop,
nfolds)
Y1_CIfun <- function(X, alpha, wthigh, wtlow){
predict(obj1, X, alpha = alpha / 2,
wthigh = wthigh, wtlow = wtlow)
}
estimand0 <- "missing"
side0 <- switch(side,
two = "two",
above = "below",
below = "above")
if (side == "two"){
quantiles0 <- quantiles
} else {
quantiles0 <- quantiles[1]
}
obj0 <- conformalCf(Xtrain, Y0,
estimand0,
type, side0,
quantiles0,
outfun, outparams,
psfun, psparams,
useCV,
trainprop,
nfolds)
Y0_CIfun <- function(X, alpha, wthigh, wtlow){
predict(obj0, X, alpha = alpha / 2,
wthigh = wthigh, wtlow = wtlow)
}
Ite_CIfun <- function(X, alpha, wthigh, wtlow){
Y1_CI <- Y1_CIfun(X, alpha, wthigh, wtlow)
Y0_CI <- Y0_CIfun(X, alpha, wthigh, wtlow)
CI <- data.frame(lower = Y1_CI[, 1] - Y0_CI[, 2],
upper = Y1_CI[, 2] - Y0_CI[, 1])
}
res <- list(Ite_CIfun = Ite_CIfun,
Y1_CIfun = Y1_CIfun,
Y0_CIfun = Y0_CIfun)
class(res) <- "conformalIteNaive"
return(res)
}
predict.conformalIteNaive <- function(object, Xtest,
alpha = 0.1,
wthigh = 20, wtlow = 0.05){
Ite_CI <- object$Ite_CIfun(Xtest, alpha, wthigh, wtlow)
Y1_CI <- object$Y1_CIfun(Xtest, alpha, wthigh, wtlow)
Y0_CI <- object$Y0_CIfun(Xtest, alpha, wthigh, wtlow)
list(Ite = Ite_CI, Y1 = Y1_CI, Y0 = Y0_CI)
}
|
bf2de07d7cbccb1ed6c08ff55133cd708da4f6b6 | bb099fedce1b86d0883d376e6690d6820e3fb2e0 | /cachematrix.R | 987cb423f10379e12b8918b6c9d66958787631c3 | [] | no_license | valentinaboterov/ProgrammingAssignment2 | e2db7a7d129516b0475357f844914751d0c75a4d | cbc827f6703a0e6abb26fe5afc7d0bd7d6344218 | refs/heads/master | 2022-12-02T22:50:32.263154 | 2020-08-15T21:54:12 | 2020-08-15T21:54:12 | 287,828,636 | 0 | 0 | null | 2020-08-15T21:37:57 | 2020-08-15T21:37:56 | null | ISO-8859-1 | R | false | false | 1,036 | r | cachematrix.R | ## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this function
#Crea una matrix "especial" y guarda su inversa
makeCacheMatrix <- function(x = matrix()) {
v <- NULL
set <- function(y){ #Pone el valor de la matriz
x <<- y
v <<- NULL
}
get <- function()x #Obtiene el valor de la matriz
setInverse <- function(inverse) j <<- inverse #Valor de la media
getInverse <- function() j #Obtiene la media
list(set = set, get = get, setInverse = setInverse, getInverse = getInverse)
}
## Write a short comment describing this function
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
v <- x$getInverse() #Verifica si la inversa ya esta guardada en el caché
if(!is.null(v)){ #Obtiene la inversa y la retorna
return(v)
}
#Hace el cálculo de la inversa ya que no la encontró
mat <- x$get() #Obtiene matriz
v <- solve(mat,...) #Obtiene la inversa
x$setInverse(v)
v #Retorna la inversa
}
|
007a10e11e47d0d6a964e611338f43595f048509 | 5904e9464761f3e68a17f19c9cdda9c307389727 | /Scripts/20161005-1436-HiSSE_0.75-0.36_SimulMk_AllModels.R | b2bb549649f49ce6de08c220a687338164b5c480 | [] | no_license | austinhpatton/Hybridization_Diversification_HiSSE | 969f07c5e3182cde69dab8240fc61cc50b35701c | 38f742d60facf9c9036741bf033b0077724514c8 | refs/heads/master | 2021-01-19T11:48:53.565932 | 2017-02-17T23:45:08 | 2017-02-17T23:45:08 | 82,266,821 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 14,038 | r | 20161005-1436-HiSSE_0.75-0.36_SimulMk_AllModels.R | #!/usr/bin/env Rscript
####HiSSE - Samp Freq = 0.75_0.36####
library(hisse)
library(diversitree)
library(geiger)
library(optparse)
opts <- list(make_option('--tree'),
make_option('--states'),
make_option('--char-states'),
make_option('--output'))
args <- parse_args(OptionParser(option_list=opts),
positional_arguments=TRUE)
tree_file <- args$options[['tree']]
state_file <- args$options[['states']]
hisse_dat_file <- args$options[['char-states']]
out_prefix <- args$options[['output']]
#Read in tree - Brendan substituted tree file variable here
# instead of hard-coded file name
tree <- read.nexus(tree_file)
#Read in states - again, state_file instead of hard-coded
# states
#states <- scan(state_file)
# Make dataframe with spp names and char states
# Variable instead of hard-coded file name
dat <- read.csv(hisse_dat_file)
#Transition rates used here are those estimated for the 'hybridizability' trait along out phylogeny.
#We then simulate a neutral trait along the phylogeny.
q <- list(rbind(c(-0.002692059,0.002692059), c(0.037110456,-0.037110456)))
sim.traits <- sim.char(tree, q, model='discrete', nsim = 1, root = 1)
sim.traits <- as.data.frame(sim.traits)
dat$State <- sim.traits[,1]-1
####################################
# Null 2 All Transitions All Equal #
####################################
trans.rate <- TransMatMaker(hidden.states=TRUE)
trans.rate[!is.na(trans.rate) & !trans.rate == 0] = 1
# Fixed turnover rates between 0A & 1A, and between 0B & 1B
# This runs the analysis
Null2_AllTrans_Eq <- hisse(phy = tree, data = dat, f = c(0.75, 0.36), hidden.states = T, turnover.anc = c(1,1,2,2),
eps.anc = c(1,1,2,2), trans.rate = trans.rate, output.type = "raw", bounded.search = TRUE)
# Store likelihood, AIC, and AICc
Null2_AllTrans_Eq.logL <- Null2_AllTrans_Eq$loglik
Null2_AllTrans_Eq.AIC <- Null2_AllTrans_Eq$AIC
Null2_AllTrans_Eq.AICc <- Null2_AllTrans_Eq$AICc
# Write output to file
capture.output(Null2_AllTrans_Eq, file=paste0(out_prefix, '_Null2_AllTrans_EqRate.txt'))
##########################################
# Null 2 No Double Transitions All Equal #
##########################################
trans.rate <- TransMatMaker(hidden.states=TRUE)
trans.rate <- ParDrop(trans.rate, c(3,5,8,10))
trans.rate[!is.na(trans.rate) & !trans.rate == 0] = 1
# Fixed turnover rates between 0A & 1A, and between 0B & 1B
# This runs the analysis
Null2_NoDub_Eq <- hisse(phy = tree, data = dat, f = c(0.75, 0.36), hidden.states = T, turnover.anc = c(1,1,2,2),
eps.anc = c(1,1,2,2), trans.rate = trans.rate, output.type = "raw", bounded.search = TRUE)
# Store likelihood, AIC, and AICc
Null2_NoDub_Eq.logL <- Null2_NoDub_Eq$loglik
Null2_NoDub_Eq.AIC <- Null2_NoDub_Eq$AIC
Null2_NoDub_Eq.AICc <- Null2_NoDub_Eq$AICc
# Write output to file
capture.output(Null2_NoDub_Eq, file=paste0(out_prefix, '_Null2_NoDub_EqRate.txt'))
#######################################
# Null 2 Three Trans Rates, No Double #
#######################################
# Define transition matrix
trans.rate <- TransMatMaker(hidden.states=TRUE)
trans.rate <- ParDrop(trans.rate, c(3,5,8,10))
to.change <- cbind(c(1,3), c(2,4))
trans.rate[to.change] = 1
# Now set all transitions from 1->0 to be governed by a single rate:
to.change <- cbind(c(2,4), c(1,3))
trans.rate[to.change] = 2
# Finally, set all transitions between the hidden state to be a single rate (essentially giving
# you an estimate of the rate by which shifts in diversification occur:
to.change <- cbind(c(1,3,2,4), c(3,1,4,2))
trans.rate[to.change] = 3
trans.rate
# Fixed turnover rates between 0A & 1A, and between 0B & 1B
# This runs the analysis
Null2_ThreeRate_NoDub <- hisse(phy = tree, data = dat, f = c(0.75, 0.36), hidden.states = T, turnover.anc = c(1,1,2,2),
eps.anc = c(1,1,2,2), trans.rate = trans.rate, output.type = "raw", bounded.search = TRUE)
# Store likelihood, AIC, and AICc
Null2_ThreeRate_NoDub.logL <- Null2_ThreeRate_NoDub$loglik
Null2_ThreeRate_NoDub.AIC <- Null2_ThreeRate_NoDub$AIC
Null2_ThreeRate_NoDub.AICc <- Null2_ThreeRate_NoDub$AICc
# Write output to file
capture.output(Null2_ThreeRate_NoDub, file=paste0(out_prefix, '_Null2_NoDub_ThreeRate.txt'))
###############
# BiSSE Model #
###############
# Now make the bisse model where diversification changes with hybridizability without
# the presence of hidden states. This will be the BiSSE Null model.
trans.rates.bisse <- TransMatMaker(hidden.states=FALSE)
trans.rates.bisse
# The transition matrix thus looks like the following
# (0) (1)
# (0) NA 2
# (1) 1 NA
# Given that the order of arguments in the turnover and extinction .anc commands that
# control the number of rate classes follows this order... (0A, 1A, 0B, 1B), the
# following will set the model to only transition between states that do not include
# hidden states.
bisse <- hisse(phy = tree, data = dat, f = c(0.75, 0.36), hidden.states = F, turnover.anc = c(1,2,0,0),
eps.anc = c(1,2,0,0), trans.rate = trans.rates.bisse, output.type = "raw", bounded.search = TRUE)
# Store likelihood, AIC, and AICc
bisse.logL <- bisse$loglik
bisse.AIC <- bisse$AIC
bisse.AICc <- bisse$AICc
# Write to csv
capture.output(bisse, file=paste0(out_prefix, '_BiSSE.txt'))
####################
# BiSSE Null Model #
####################
# Make a constrained bisse model where diversification rates are trait independent
bisse.null <- hisse(phy = tree, data = dat, f = c(0.75, 0.36), hidden.states = F, turnover.anc = c(1,1,0,0),
eps.anc = c(1,1,0,0), trans.rate = trans.rates.bisse, output.type = "raw", bounded.search = TRUE)
# Store likelihood, AIC, and AICc
bisse.null.logL <- bisse.null$loglik
bisse.null.AIC <- bisse.null$AIC
bisse.null.AICc <- bisse.null$AICc
# Write to csv
capture.output(bisse.null, file=paste0(out_prefix, '_BiSSE_Null.txt'))
###################################
# HiSSE Null 4 Model, Equal Rates #
###################################
# Conduct the HiSSE null-4 model that contains the same complexity as a full HiSSE
# model
hisse.null4.equal <- hisse.null4(phy = tree, data = dat, f = c(0.75, 0.36), turnover.anc = rep(c(1,2,3,4),2),
eps.anc = rep(c(1,2,3,4),2), trans.type = "equal", output.type = "raw", bounded.search = TRUE)
#Store likelihood, AIC, and AICc
hisse.null4.equal.logL <- hisse.null4.equal$loglik
hisse.null4.equal.AIC <- hisse.null4.equal$AIC
hisse.null4.equal.AICc <- hisse.null4.equal$AICc
# Write to csv
capture.output(hisse.null4.equal, file=paste0(out_prefix, '_Null4_Equal.txt'))
###################################
# HiSSE Null 4 Model, Three Rates #
###################################
# Conduct the HiSSE null-4 model that contains the same complexity as a full HiSSE
# model
hisse.null4.three <- hisse.null4(phy = tree, data = dat, f = c(0.75, 0.36), turnover.anc = rep(c(1,2,3,4),2),
eps.anc = rep(c(1,2,3,4),2), trans.type = "three.rate", output.type = "raw", bounded.search = TRUE)
#Store likelihood, AIC, and AICc
hisse.null4.three.logL <- hisse.null4.three$loglik
hisse.null4.three.AIC <- hisse.null4.three$AIC
hisse.null4.three.AICc <- hisse.null4.three$AICc
# Write to csv
capture.output(hisse.null4.three, file=paste0(out_prefix, '_Null4_Three.txt'))
##############################
# HiSSE No0B All Transitions #
##############################
# Make a model that has hidden states for only state 1 - we thus cannot have transitions to 0B
trans.rates.hisse <- TransMatMaker(hidden.states=TRUE)
trans.rates.hisse <- ParDrop(trans.rates.hisse, drop.par=c(2,5,12,7,8,9))
no0B_AllTrans <- hisse(phy = tree, data = dat, f = c(0.75, 0.36), hidden.states = T, turnover.anc = c(1,2,0,3),
eps.anc = c(1,2,0,3), trans.rate = trans.rates.hisse, output.type = "raw", bounded.search = TRUE)
#Store likelihood, AIC, and AICc
no0B_AllTrans.logL <- no0B_AllTrans$loglik
no0B_AllTrans.AIC <- no0B_AllTrans$AIC
no0B_AllTrans.AICc <- no0B_AllTrans$AICc
# Write to csv
capture.output(no0B_AllTrans, file=paste0(out_prefix, '_No0B_AllTrans.txt'))
####################################
# HiSSE No0B No Double Transitions #
####################################
# Make a model that has hidden states for only state 1 - we thus cannot have transitions to 0B
trans.rates.hisse <- TransMatMaker(hidden.states=TRUE)
trans.rates.hisse <- ParDrop(trans.rates.hisse, drop.par=c(2,3,5,7,8,9,10,12))
no0B_NoDub <- hisse(phy = tree, data = dat, f = c(0.75, 0.36), hidden.states = T, turnover.anc = c(1,2,0,3),
eps.anc = c(1,2,0,3), trans.rate = trans.rates.hisse, output.type = "raw", bounded.search = TRUE)
#Store likelihood, AIC, and AICc
no0B_NoDub.logL <- no0B_NoDub$loglik
no0B_NoDub.AIC <- no0B_NoDub$AIC
no0B_NoDub.AICc <- no0B_NoDub$AICc
# Write to csv
capture.output(no0B_NoDub, file=paste0(out_prefix, '_No0B_NoDub.txt'))
##############################
# HiSSE No1B All Transitions #
##############################
# Make a model that has hidden states for only state 1 - we thus cannot have transitions to 0B
trans.rates.hisse <- TransMatMaker(hidden.states=TRUE)
trans.rates.hisse <- ParDrop(trans.rates.hisse, drop.par=c(3,6,9,10,11,12))
no1B_AllTrans <- hisse(phy = tree, data = dat, f = c(0.75, 0.36), hidden.states = T, turnover.anc = c(1,2,3,0),
eps.anc = c(1,2,3,0), trans.rate = trans.rates.hisse, output.type = "raw", bounded.search = TRUE)
#Store likelihood, AIC, and AICc
no1B_AllTrans.logL <- no1B_AllTrans$loglik
no1B_AllTrans.AIC <- no1B_AllTrans$AIC
no1B_AllTrans.AICc <- no1B_AllTrans$AICc
# Write to csv
capture.output(no1B_AllTrans, file=paste0(out_prefix, '_No1B_AllTrans.txt'))
####################################
# HiSSE No0B No Double Transitions #
####################################
# Make a model that has hidden states for only state 1 - we thus cannot have transitions to 0B
trans.rates.hisse <- TransMatMaker(hidden.states=TRUE)
trans.rates.hisse <- ParDrop(trans.rates.hisse, drop.par=c(3,5,6,8,9,10,11,12))
no1B_NoDub <- hisse(phy = tree, data = dat, f = c(0.75, 0.36), hidden.states = T, turnover.anc = c(1,2,3,0),
eps.anc = c(1,2,3,0), trans.rate = trans.rates.hisse, output.type = "raw", bounded.search = TRUE)
#Store likelihood, AIC, and AICc
no1B_NoDub.logL <- no1B_NoDub$loglik
no1B_NoDub.AIC <- no1B_NoDub$AIC
no1B_NoDub.AICc <- no1B_NoDub$AICc
# Write to csv
capture.output(no1B_NoDub, file=paste0(out_prefix, '_No1B_NoDub.txt'))
##############################
# Full HiSSE All Transitions #
##############################
# Make a model that has hidden states for all states but does not allow for a transition between
# state 0A and 1B
trans.rates.hisse <- TransMatMaker(hidden.states=TRUE)
hisse.full_AllTrans <- hisse(phy = tree, data = dat, f = c(0.75, 0.36), hidden.states = T, turnover.anc = c(1,2,3,4),
eps.anc = c(1,2,3,4), trans.rate = trans.rates.hisse, output.type = "raw", bounded.search = TRUE)
#Store likelihood, AIC, and AICc
hisse.full_AllTrans.logL <- hisse.full_AllTrans$loglik
hisse.full_AllTrans.AIC <- hisse.full_AllTrans$AIC
hisse.full_AllTrans.AICc <- hisse.full_AllTrans$AICc
# Write to csv
# Added by Brendan
# Tried to make this work as a variable too
capture.output(hisse.full_AllTrans, file=paste0(out_prefix, '_hisse_full_AllTrans.txt'))
####################################
# Full HiSSE No Double Transitions #
####################################
# Make a model that has hidden states for all states but does not allow for a transition between
# state 0A and 1B
trans.rates.hisse <- TransMatMaker(hidden.states=TRUE)
trans.rates.hisse <- ParDrop(trans.rates.hisse, c(3,5,8,10))
hisse.full_NoDub <- hisse(phy = tree, data = dat, f = c(0.75, 0.36), hidden.states = T, turnover.anc = c(1,2,3,4),
eps.anc = c(1,2,3,4), trans.rate = trans.rates.hisse, output.type = "raw", bounded.search = TRUE)
#Store likelihood, AIC, and AICc
hisse.full_NoDub.logL <- hisse.full_NoDub$loglik
hisse.full_NoDub.AIC <- hisse.full_NoDub$AIC
hisse.full_NoDub.AICc <- hisse.full_NoDub$AICc
# Write to csv
# Added by Brendan
# Tried to make this work as a variable too
capture.output(hisse.full_NoDub, file=paste0(out_prefix, '_hisse_full_NoDub.txt'))
######################
# Compile Model Fits #
######################
# Write relevent outcome for model comparison to new vectors
logL <- c(Null2_AllTrans_Eq.logL, Null2_NoDub_Eq.logL, Null2_ThreeRate_NoDub.logL, bisse.logL, bisse.null.logL, hisse.null4.equal.logL,
hisse.null4.three.logL, no0B_AllTrans.logL, no0B_NoDub.logL, no1B_AllTrans.logL, no1B_NoDub.logL, hisse.full_AllTrans.logL,
hisse.full_NoDub.logL)
AIC <- c(Null2_AllTrans_Eq.AIC, Null2_NoDub_Eq.AIC, Null2_ThreeRate_NoDub.AIC, bisse.AIC, bisse.null.AIC, hisse.null4.equal.AIC,
hisse.null4.three.AIC, no0B_AllTrans.AIC, no0B_NoDub.AIC, no1B_AllTrans.AIC, no1B_NoDub.AIC, hisse.full_AllTrans.AIC,
hisse.full_NoDub.AIC)
AICc <- c(Null2_AllTrans_Eq.AICc, Null2_NoDub_Eq.AICc, Null2_ThreeRate_NoDub.AICc, bisse.AICc, bisse.null.AICc, hisse.null4.equal.AICc,
hisse.null4.three.AICc, no0B_AllTrans.AICc, no0B_NoDub.AICc, no1B_AllTrans.AICc, no1B_NoDub.AICc, hisse.full_AllTrans.AICc,
hisse.full_NoDub.AICc)
# Make a data frame to store model comparison/results to
results <- as.data.frame(logL, row.names=c('Null2 AllTrans EqRates', 'Null2 NoDub EqRates', 'Null2 ThreeRate NoDub', 'BiSSE', 'BiSSE Null',
'Null4 EqRates', 'Null4 ThreeRate', 'No0B AllTrans', 'No0B NoDub', 'No1B AllTrans', 'No1B NoDub',
'HiSSE Full AllTrans', 'HiSSE Full NoDub'))
results$AIC <- AIC
results$AICc <- AICc
# Variable outfile name instead of hard-coded:
write.csv(results, file=paste0(out_prefix, 'ModelSupport.csv'))
|
29974055ca5fbf0ac6368ec337f362ef3103b6a9 | 430829d07f84835632bb87452927bf701efdec05 | /preproc-AERmon/aero_tegen/tegen_gen_from_NorESM2-MM.R | 9f69f0a679f2ef9fa9186d4a2bd39584de447e99 | [] | no_license | doblerone/CMIPtoHCLIM | 1de32871855ef9030451fec09ef3af18eb4a6cac | 880dae26dcaca912455f23d6821b9644adf887ff | refs/heads/master | 2022-07-02T10:09:25.024536 | 2022-05-30T15:20:39 | 2022-05-30T15:20:39 | 212,314,556 | 1 | 1 | null | null | null | null | UTF-8 | R | false | false | 4,121 | r | tegen_gen_from_NorESM2-MM.R | # Available from NorESM2-MM:
# od550aer AOD from the ambient aerosols (i.e., includes aerosol water). Does not include AOD from stratospheric aerosols if these are prescribed but includes other possible background aerosol types.
# od550aerh2o atmosphere_optical_thickness_due_to_water_in_ambient_aerosol_particles
# od550bc atmosphere_optical_thickness_due_to_black_carbon_ambient_aerosol
# od550csaer AOD from the ambient aerosols in clear skies if od550aer is for all-sky (i.e., includes aerosol water). Does not include AOD from stratospheric aerosols if these are prescribed but includes other possible background aerosol types.
# od550dust atmosphere_optical_thickness_due_to_dust_ambient_aerosol_particles
# od550lt1aer od550 due to particles with wet diameter less than 1 um (ambient here means wetted). When models do not include explicit size information, it can be assumed that all anthropogenic aerosols and natural secondary aerosols have diameter less than 1 um.
# od550oa atmosphere_optical_thickness_due_to_particulate_organic_matter_ambient_aerosol_particles
# od550so4 atmosphere_optical_thickness_due_to_sulfate_ambient_aerosol_particles
# od550ss atmosphere_optical_thickness_due_to_sea_salt_ambient_aerosol_particles
# First attempt: use
# od550ss for the first class "SEA"
# od550oa for the second class "LAND"
# od550bc for the third class "SOOT"
# od550dust for the fourth class "DESERT"
# The following are then not used:
# od550aer
# od550aerh20
# od550csaer
# od550lt1aer (Should we consider adding this to the SOOT class?)
# od550so4
# Download data from Nird:
# cd ~/HCLIM/aerosols/aero_tegen/NorESM2-MM
# scp "nird:/projects/NS9034K/CMIP6/CMIP/NCC/NorESM2-MM/historical/r1i1p1f1/AERmon/od550*/gn/latest/od550*198001-198912.nc" .
library(ncdf4)
library(fields)
# Read od550ss
set_revlat=F
nc <- nc_open("NorESM2-MM/od550ss_AERmon_NorESM2-MM_historical_r1i1p1f1_gn_198001-198912.nc")
lat <- nc$dim$lat$vals
lon <- nc$dim$lon$vals
nc_close(nc)
if (length(dim(lat)) > 1) { stop(" The lat variable has more than one dimension. Only regular grids supported so far.") }
dlat <- diff(lat)
if (any(dlat>0) & any(dlat<0) ) { stop("Error while reading lat. Values are both increasing and decreasing.")}
if (all(dlat > 0)) { lat <- rev(lat); set_revlat=T } # go from north to south
nlat <- length(lat)
if (length(dim(lon)) > 1) { stop(" The lon variable has more than one dimension. Only regular grids supported so far.") }
dlon <- diff(lon)
if (any(dlon>0) & any(dlon<0) ) { stop("Error while reading lon. Values are both increasing and decreasing.")}
nlon <- length(lon)
# generate the first rows in Tegen file (lat and lon values)
if ((nlat + nlon) %% 5 != 0) { stop("nlon + nlat not divisible by 5 (the number of columns). Implement a workaround.") }
header <- matrix(c(lat,lon),ncol=5,byrow = T)
write.table(matrix(header,ncol=5),file="header.txt", sep = " ",row.names = F, col.names = F)
# TODO: generalise by using names(nc$var) to identify the variable name
# make function that takes 4 files as argument
nc <- nc_open("NorESM2-MM/od550ss_AERmon_NorESM2-MM_historical_r1i1p1f1_gn_198001-198912.nc")
data <- ncvar_get(nc,"od550ss"); nc_close(nc)
if (set_revlat) { data <- data[,nlat:1,] }
data1 <- c(data[,,1])
nc <- nc_open("NorESM2-MM/od550oa_AERmon_NorESM2-MM_historical_r1i1p1f1_gn_198001-198912.nc")
data <- ncvar_get(nc,"od550oa"); nc_close(nc)
if (set_revlat) { data <- data[,nlat:1,] }
data2 <- c(data[,,1])
nc <- nc_open("NorESM2-MM/od550bc_AERmon_NorESM2-MM_historical_r1i1p1f1_gn_198001-198912.nc")
data <- ncvar_get(nc,"od550bc"); nc_close(nc)
if (set_revlat) { data <- data[,nlat:1,] }
data3 <- c(data[,,1])
nc <- nc_open("NorESM2-MM/od550dust_AERmon_NorESM2-MM_historical_r1i1p1f1_gn_198001-198912.nc")
data <- ncvar_get(nc,"od550dust"); nc_close(nc)
if (set_revlat) { data <- data[,nlat:1,] }
data4 <- c(data[,,1])
write.table(file="tmp.NorESM2-MM.m01.txt",x = matrix(c(data1,data2,data3,data4),ncol=12,byrow = T), row.names=F, col.names=F)
system(command = "cat header.txt tmp.NorESM2-MM.m01.txt > NorESM2-MM.m01.txt; rm tmp.NorESM2-MM.m01.txt")
|
5b6deb6e17af2200414c1c937fdbb1698e393763 | 69bc712bce655618928721a4e57e85b0500ec526 | /man/inudge.plot.qq.Rd | ac47b2047bb2eafdb56b1b6b77c45f4f350d2b3f | [] | no_license | cran/DIME | 5cea66eee25305ed777f7ff9ce364ad7b53987d8 | 4c34813ded0a2333af020e6b9784f82edc648296 | refs/heads/master | 2022-05-17T09:27:54.289423 | 2022-05-09T13:50:13 | 2022-05-09T13:50:13 | 17,678,659 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,226 | rd | inudge.plot.qq.Rd | \name{inudge.plot.qq}
\alias{inudge.plot.qq}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
QQ-plot of GNG model vs. observed data
}
\description{
Produces a QQ-plot for visual inspection of quality of fit with regards to
the uniform Gaussian (iNUDGE) mixture model estimated using the function
\code{\link{inudge.fit}}
}
\usage{
inudge.plot.qq(data, obj, resolution = 10, xlab = NULL, ylab = NULL,
main = NULL, pch = NULL, ...)
}
\arguments{
\item{data}{
an \strong{R list} of vector of normalized intensities (counts). Each element can
correspond to particular chromosome. User can construct their own list
containing only the chromosome(s) they want to analyze.
}
\item{obj}{
a list object returned by \code{\link{gng.fit}} function.
}
\item{resolution}{
optional number of points used to sample the estimated density function.
}
\item{xlab}{
optional x-axis label (see \code{\link{par}}).
}
\item{ylab}{
optional y-axis label (see \code{\link{par}}).
}
\item{main}{
optional plot title (see \code{\link{par}}).
}
\item{pch}{
optional plotting symbol to use (see \code{\link{par}}).
}
\item{\dots}{
additional graphical arguments to be passed to methods (see \code{\link{par}}).
}
}
\seealso{
\code{\link{inudge.fit}}, \code{\link{qqplot}}
}
\examples{
library(DIME);
# generate simulated datasets with underlying uniform and 2-normal distributions
set.seed(1234);
N1 <- 1500; N2 <- 500; rmu <- c(-2.25,1.5); rsigma <- c(1,1);
rpi <- c(.10,.45,.45); a <- (-6); b <- 6;
chr4 <- list(c(-runif(ceiling(rpi[1]*N1),min = a,max =b),
rnorm(ceiling(rpi[2]*N1),rmu[1],rsigma[1]),
rnorm(ceiling(rpi[3]*N1),rmu[2],rsigma[2])));
chr9 <- list(c(-runif(ceiling(rpi[1]*N2),min = a,max =b),
rnorm(ceiling(rpi[2]*N2),rmu[1],rsigma[1]),
rnorm(ceiling(rpi[3]*N2),rmu[2],rsigma[2])));
# analyzing chromosome 4 and 9
data <- list(chr4,chr9);
# fit iNUDGE model with 2-normal components and maximum iteration =20
set.seed(1234);
bestInudge <- inudge.fit(data, K=2, max.iter=20)
# QQ-plot
inudge.plot.qq(data,bestInudge);
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ dplot }
\keyword{ aplot }% __ONLY ONE__ keyword per line
|
1cdad5260192faf5658095526bbb2598a146a6d2 | 441dff109559e3cbd93438ad36454eeb6190bd15 | /R/custom_fields.R | dcf0d86609aef9c4efc8039e2e3b5d9802ba7986 | [
"MIT"
] | permissive | aamangold/wriker | b9adb8facece6e1b3941fd670f4897aa708ce5c1 | 8a64611861c72098b2a4f26cd5ac58c3765b4fd9 | refs/heads/master | 2020-04-22T22:44:43.503729 | 2019-06-07T18:33:33 | 2019-06-07T18:33:33 | 170,718,909 | 3 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,139 | r | custom_fields.R | #' @title Wrike Custom Field Use by Folder
#'
#' @description This function pulls a list of your task ids within a folder that have a specified custom field filled out OR that have the specified value.
#' @param folder_id Wrike folder id. Use \code{\link{wrike_folders}} function to determine if needed.
#' @param custom_field_id Wrike custom field id or search for a value in the custom fields (ex: a product code). Use \code{\link{wrike_custom_field_url}} function to determine if needed.
#' @return A list of all Wrike task ids in a folder that have the specified custom field filled out
#'
#' @export
#' @examples
#' wrike_custom_field_exists(folder_id = "IEAAAOH5I4AB7JFG", custom_field_id = "IEAAAOH5JUAAABQ5")
#' wrike_custom_field_exists(folder_id = "IEAAAOH5I4AB7JFG", custom_field_id = "Solar Inspection")
wrike_custom_field_exists <- function(folder_id, custom_field_id) {
wriker::authenticate()
url <- paste0('https://www.wrike.com/api/v4/folders/', folder_id, '/tasks?fields=["customFields"]')
GETdata <- httr::GET(url, httr::add_headers(Authorization = paste("Bearer", v4_key, sep = " ")))
dat <- httr::content(GETdata)
dat2 <- dat[[2]]
field_list <- dplyr::data_frame()
for(i in seq_along(dat2)){
tmp <- dplyr::bind_cols(fields = sum(stringr::str_detect(unlist(dat$data[[i]]$`customFields`),
custom_field_id)),
id = purrr::map_df(dat2[i], magrittr::extract, c("id")))
field_list <- dplyr::bind_rows(field_list, tmp)
}
print(field_list %>% dplyr::filter(fields > 0 & nchar(id) > 4))
}
#' @title Wrike Custom Field Use by Task Id
#'
#' @description This function pulls a list of the custom field values & ids associated with specified task ids
#' @param task_id Wrike task id
#' @param custom_field_id Use \code{\link{wrike_custom_field_url}} function to find id if needed.
#'
#' @import httr
#' @import purrr
#' @import magrittr
#' @import stringr
#'
#' @export
#' @examples
#' wrike_custom_field_on_task(task_id = "IEABOGRQKQAN3QOA", custom_field_id = "IEAAAOH5JUAAABQ5")
#'
wrike_custom_field_on_task <- function(task_id, custom_field_id){
wriker::authenticate()
url <- paste0("https://www.wrike.com/api/v4/tasks/", task_id)
GET <- httr::GET(url, httr::add_headers(Authorization = paste("Bearer", v4_key, sep = " ")))
data <- httr::content(GET)
data2 <- data[["data"]]
custom_fields <- unlist(data$data[[1]]$`customFields`)
custom_field_count <- sum(stringr::str_detect(custom_fields, custom_field_id))
id_extract <- map_dfr(data2, magrittr::extract, c("id"))
results <- data.frame(id = id_extract,
custom_field_count = custom_field_count)
return(results)
}
#' @title Wrike Custom Field URL
#'
#' @description This function gives you the URL to access your account's custom fields
#'
#' @export
#' @examples
#' wrike_custom_field_url()
#' DEPRECATED
wrike_custom_field_url <- function() {
wriker::authenticate()
print(paste0("https://www.wrike.com/api/v3/accounts/", account_id, "/customfields"))
}
#' @title Wrike Custom Field Update
#'
#' @description This function populates a custom field from specified task id
#' @param task_id Wrike task id
#' @param custom_field_id Use \code{\link{wrike_custom_field_url}} function to find id if needed.
#' @param custom_field_value What you want populated
#'
#' @import httr
#' @import purrr
#' @import magrittr
#' @import stringr
#'
#' @export
#' @examples
#' wrike_custom_field_update(task_id = "IEABOGRQKQAN3QOA", custom_field_id = "IEAAAOH5JUAAABQ5", custom_field_value = "myvalue")
#'
wrike_custom_field_update <- function(task_id, custom_field_id, custom_field_value) {
wriker::authenticate()
tmp <- jsonlite::toJSON(data.frame(id = custom_field_id, value = custom_field_value))
tmp2 <- list(customFields = tmp)
url <- paste0("https://www.wrike.com/api/v4/tasks/", task_id)
body <- tmp2
httr::PUT(url, body = body, encode = "json",
add_headers(Authorization = paste("Bearer", v4_key, sep = " ")))
}
|
afada822ae60f555aa19a88fb72a2c3e179df930 | 9b704ad104d5f78c90d596ca58f1623fcebeeb97 | /Code/IC.R | 5b81607057e7fe22930c7c6018c3e3c7e4f4a4d9 | [] | no_license | EffetsCumulatifsNavigation/Methode | 32d55cc59cc1ea360e3906c1c01e4576304d3aa8 | c7099f4cd0c279570b0f03f7a7a1d23c3676086e | refs/heads/master | 2022-11-06T23:53:57.741930 | 2020-07-07T12:05:36 | 2020-07-07T12:05:36 | 256,743,491 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 4,192 | r | IC.R | # source('./Code/IC.R')
library(raster)
library(graphicsutils)
random <- function(cont = TRUE) {
m <- secr::make.mask(nx = 100, ny = 100, spacing = 20)
h <- secr::randomHabitat(m, p = 0.5, A = 0.3)
r <- secr::raster(h)
if (cont) {
r <- r * rnorm(length(r), 1, .2)
r <- r / maxValue(r)
return(r)
}
if (!cont) {
return(r)
}
}
s <- stack(list(s1 = random(), s2 = random(), s3 = random()))
h <- stack(list(h1 = random(F), h2 = random(F), h3 = random(F)))
cols <- c("#ffffff","#C7CBCE", "#96A3A3", "#687677", "#222D3D", "#25364A", "#C77F20", "#E69831", "#E3AF16", "#E4BE29", "#F2EA8B")
pal <- colorRampPalette(cols)
pal2 <- colorRampPalette(c('#ffffff','#306919'))
box3 <- function(side) box2(side = side, which = 'figure', lty = 2)
xBox <- c(-75,175,175,-75)
yBox <- c(2075,2075,1825,1825)
cBox <- '#ffffff'
cap <- function(caption) {
polygon(x = xBox, y = yBox, col = cBox, border = cBox)
text(x = 50, y = 1950, labels = caption, cex = 1.5, font = 2, adj = c(.5,.5))
}
mat <- matrix(nrow = 7, ncol = 7)
mat[1, ] <- c(0,1,2,2,2,3,0)
mat[2, ] <- c(0,17,9,10,11,12,0)
mat[3, ] <- c(4,13,18,19,20,27,8)
mat[4, ] <- c(4,14,21,22,23,28,8)
mat[5, ] <- c(4,15,24,25,26,29,8)
mat[6, ] <- c(0,16,30,31,32,33,0)
mat[7, ] <- c(0,5,6,6,6,7,0)
png('./Figures/IC.png', res = 900, width = 200, height = 200, units = "mm")
layout(mat, heights = c(.25,1,1,1,1,1,.25), widths = c(.25,1,1,1,1,1,.25))
# layout.show(max(mat))
# Titles
par(mar = c(.5,.5,.5,.5))
plot0(); text(0,0,"Aire\nd'étude", adj = c(.5,.5), font = 2)
plot0(); text(0,0,"Facteurs de stress", adj = c(.5,.5), font = 2); box3('24')
plot0(); text(0,0,"Exposition\ncumulée", adj = c(.5,.5), font = 2)
plot0(); text(0,0,"Composantes valorisées", adj = c(.5,.5), font = 2, srt = 90); box3('13')
plot0(); text(0,0,"Composantes\nvalorisées intégrées", adj = c(.5,.5), font = 2)
plot0(); text(0,0,"Impacts intégrés stresseurs", adj = c(.5,.5), font = 2); box3('24')
plot0(); text(0,0,"Impacts cumulés", adj = c(.5,.5), font = 2)
plot0(); text(0,0,"Impacts intégrés\ncomposantes valorisées", adj = c(.5,.5), font = 2, srt = 90); box3('13')
# Stressors
par(mar = c(.5,.5,.5,.5))
image(s[[1]], col = pal(100), axes = F, xlab = '', ylab = ''); box(); box3('12')
cap('B')
image(s[[2]], col = pal(100), axes = F, xlab = '', ylab = ''); box(); box3('1')
image(s[[3]], col = pal(100), axes = F, xlab = '', ylab = ''); box(); box3('14')
# Empreinte cumulée
image(sum(s, na.rm = T), col = pal(100), axes = F, xlab = '', ylab = ''); box(); box3('1')
cap('D')
# Composantes valorisées
image(h[[1]], col = '#BACDB2', axes = F, xlab = '', ylab = ''); box(); box3('34')
cap('C')
image(h[[2]], col = '#BACDB2', axes = F, xlab = '', ylab = ''); box(); box3('4')
image(h[[3]], col = '#BACDB2', axes = F, xlab = '', ylab = ''); box(); box3('14')
# Composantes valorisées intégrés
image(sum(h, na.rm = T), col = pal2(4), axes = F, xlab = '', ylab = ''); box(); box3('4')
cap('E')
# Aire d'étude
plot0(); box(); box3('1')
text(x = -.925, y = .925, labels = 'A', cex = 1.5, font = 2, adj = c(.5,.5))
# Impacts individuels
u <- matrix(nrow = 3, ncol = 3, data = runif(9, 1,2))
l <- list()
for(i in 1:3) {
for(j in 1:3) {
l <- c(l, s[[j]] * h[[i]] * u[i,j])
}
}
l <- stack(l)
for(i in 1:9) {
image(l[[i]], col = pal(100), axes = F, xlab = '', ylab = '')
box()
if (i == 1) cap('F')
}
# Composantes valorisées
image(sum(l[[c(1,2,3)]], na.rm = T), axes = F, col = pal(100), xlab = '', ylab = ''); box(); box3('2')
cap('G')
image(sum(l[[c(4,5,6)]], na.rm = T), axes = F, col = pal(100), xlab = '', ylab = ''); box(); box3('2')
image(sum(l[[c(7,8,9)]], na.rm = T), axes = F, col = pal(100), xlab = '', ylab = ''); box(); box3('2')
# Stresseurs
image(sum(l[[c(1,4,7)]], na.rm = T), axes = F, col = pal(100), xlab = '', ylab = ''); box(); box3('3')
cap('H')
image(sum(l[[c(2,5,8)]], na.rm = T), axes = F, col = pal(100), xlab = '', ylab = ''); box(); box3('3')
image(sum(l[[c(3,6,9)]], na.rm = T), axes = F, col = pal(100), xlab = '', ylab = ''); box(); box3('3')
# Impacts cumulés
image(sum(l, na.rm = T), axes = F, col = pal(100), xlab = '', ylab = ''); box(); box3('23')
cap('I')
dev.off()
|
254aabcff40e71321eaa2190b992bfc8ee43935d | 5ca02e7bf31c24d040f4a74f0d832893f952de70 | /man/lm2v.Rd | 234932d003e0e9b7f463101a1603827b770f3af9 | [] | no_license | cran/str2str | 76fdf31fa0c442fa05820525dd9ea3f525a02232 | c83ba67337ffc63aff404f8a920dd336992c31f9 | refs/heads/master | 2023-07-27T10:30:59.582506 | 2021-09-05T03:30:02 | 2021-09-05T03:30:02 | 335,617,877 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 3,552 | rd | lm2v.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/str2str_functions.R
\name{lm2v}
\alias{lm2v}
\title{List of Matrices to (Atomic) Vector}
\usage{
lm2v(
lm,
along = 2,
use.listnames = TRUE,
use.dimnames = TRUE,
sep = "_",
check = TRUE
)
}
\arguments{
\item{lm}{list of matrices. They do NOT have to be the same typeof or have the
same dimensions.}
\item{along}{numeric vector of length one that is equal to either 1 or 2.
1 means that each matrix in \code{lm} is split along rows (i.e., dimension 1)
and then concatenated. 2 means that each matrix in \code{lm} is split along columns
(i.e., dimension 2) and then concatenated.}
\item{use.listnames}{logical vector of length 1 specifying whether the returned
vector should have names based on the list the element came from. If \code{lm}
does not have names, \code{use.listnames} = TRUE will have the list positions
serve as the list names (e.g., "1", "2", "3", etc.)}
\item{use.dimnames}{logical vector of length 1 specifying whether the returned
vector should have named based on the dimnames of the matrix the element came from.
If a matrix within \code{lm} does not have dimnames, \code{use.dimnames} = TRUE
will have the dimension positions serve as the dimnames (e.g., "1", "2", "3", etc.)}
\item{sep}{character vector of length 1 specifying the string used to separate
the listnames and dimnames from each other when creating the names of the returned
vector.}
\item{check}{logical vector of length 1 specifying whether to check the structure
of the input arguments. For example, check whether \code{lm} is a list of matrices.
This argument is available to allow flexibility in whether the user values
informative error messages (TRUE) vs. computational efficiency (FALSE).}
}
\value{
(atomic) vector with an element for each element from `lm`.
}
\description{
\code{lm2v} converts a list of matrices to a (atomic) vector. This function is
a combination of \code{m2v} and \code{lv2v}. This function can be useful in
conjunction with the \code{boot::boot} function when wanting to generate a
\code{statistic} function that returns an atomic vector.
}
\details{
When \code{list.names} and \code{use.dimnames} are both TRUE (default), the returned
vector elements the following naming scheme: "[listname][sep][rowname][sep][colname]".
If the matrices in \code{lm} are not all the same typeof, then the return object
is coerced to the most complex type of any matrix (e.g., character > double >
integer > logical). See \code{unlist} for details about the hierarchy of object types.
}
\examples{
lm <- list("numeric" = data.matrix(npk), "character" = as.matrix(npk))
# use.listnames = TRUE & use.dimnames = TRUE
lm2v(lm) # the first part of the name is the list names followed by the dimnames
# use.listnames = FALSE & use.dimnames = TRUE
lm2v(lm, use.listnames = FALSE) # only dimnames used,
# which can result in repeat names
# use.listnames = TRUE & use.dimnames = FALSE
lm2v(lm, use.dimnames = FALSE) # listnames and vector position without any
# reference to matrix dimensions
# use.listnames = FALSE & use.dimnames = FALSE
lm2v(lm, use.listnames = FALSE, use.dimnames = FALSE) # no names at all
# when list does not have names
lm <- replicate(n = 3, expr = as.matrix(attitude, rownames.force = TRUE), simplify = FALSE)
lm2v(lm) # the first digit of the names is the list position and
# the subsequent digits are the matrix dimnames
lm2v(lm, use.listnames = FALSE) # no listnames; only dimnames used,
# which can result in repeat names
}
|
d4620b712f9f41403d208646226ce56ffb7ba06c | 98be7e01a090acde3d7bc9493b692cb76578f0c6 | /Create 1930 Address.R | 2fe221fc9df18ef65cbfc7967f78f3a087aea9e1 | [] | no_license | mattdemography/S4-Code | 912ecb9960eef0ce288d43df99922a81beb19d9b | 569fd7ac875bb2026fdba96571ec9e8e3792e552 | refs/heads/master | 2021-09-12T14:39:58.834372 | 2018-04-17T17:30:54 | 2018-04-17T17:30:54 | 104,392,910 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 724 | r | Create 1930 Address.R | library(Hmisc)
library(DataCombine)
library(readstata13)
library(foreign)
library(car)
library(plyr)
library(seg)
library(spdep)
library(reshape)
library(reshape2)
library(rJava)
library(xlsx)
library(maptools)
library(rgdal)
library(haven)
sa<-read.csv("Z:/Projects/1940Census/Block Creation/San Antonio/SA_AutoClean30.csv")
names(sa)<-tolower(names(sa))
vars<-c("overall_match", "ed", "type", "block","hn")
sa30<-sa[vars]
sa30<-plyr::rename(sa30, c(block="Mblk", overall_match="fullname"))
sa30$state<-"TX"
sa30$city<-"San Antonio"
sa30$address<-paste(sa30$hn, sa30$fullname, sep=" ")
names(sa30)
View(sa30)
write.csv(sa30, "Z:/Projects/1940Census/Block Creation/San Antonio/Add_30.csv")
|
0be66c70e5b4fbedb3e736efa7b49c09b22dff9a | 4eb66a194563cb6a6c9147f4de120e5cb13611be | /R/summary_ICABinCont.R | c8c516b5aa8d8155fe4a2579f404632db7fcad83 | [] | no_license | cran/Surrogate | 8971061189573d24cb402e553f99b24fb7ba8834 | 463b16b365810c637073a7b6e9f3948913007354 | refs/heads/master | 2023-07-13T20:56:33.106817 | 2023-06-22T05:20:02 | 2023-06-22T05:20:02 | 17,919,925 | 1 | 1 | null | 2023-08-22T08:14:40 | 2014-03-19T20:50:31 | R | UTF-8 | R | false | false | 1,084 | r | summary_ICABinCont.R | summary.ICA.BinCont <- function(object, ..., Object){
options(digits = 4)
if (missing(Object)){Object <- object}
Object$R2_H <- na.exclude(Object$R2_H)
mode <- function(data) {
x <- data
z <- density(x)
mode_val <- z$x[which.max(z$y)]
fit <- list(mode_val= mode_val)
}
cat("\nFunction call:\n\n")
print(Object$Call)
cat("\n# Total number of valid R2_H values")
cat("\n#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n")
cat(length(Object$R2_H))
cat("\n\n\n# R2_H results summary")
cat("\n#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n")
cat("Mean (SD) R2_H: ", format(round(mean(Object$R2_H), 4), nsmall = 4), " (", format(round(sd(Object$R2_H), 4), nsmall = 4), ")",
" [min: ", format(round(min(Object$R2_H), 4), nsmall = 4), "; max: ", format(round(max(Object$R2_H), 4), nsmall = 4), "]", sep="")
cat("\nMode R2_H: ", format(round(mode(Object$R2_H)$mode_val, 4), nsmall = 4))
cat("\n\nQuantiles of the R2_H distribution: \n\n")
quant <- quantile(Object$R2_H, probs = c(.05, .10, .20, .50, .80, .90, .95))
print(quant)
}
|
dd970f0808fb763573387c16a595e5c0ece24908 | ec0b68cec1561ae345f70e5a9ad5cf3257169b90 | /R programming/day06/r2.R | 8b23abcedfcbeaff8e1d7099368d54dbc2d24c75 | [] | no_license | baeksangwoo/Connected_Project | 1c60769493b59312259869979fd2f319a9be5b33 | 50baf113e604b63c2acff96ddf46c91574f2ae5d | refs/heads/master | 2020-03-07T09:05:32.680596 | 2018-06-11T08:43:34 | 2018-06-11T08:43:34 | 127,380,826 | 0 | 0 | null | 2018-03-30T07:39:39 | 2018-03-30T04:18:32 | Java | UTF-8 | R | false | false | 2,152 | r | r2.R | class(welfare$income)
##[1] "numeric"
summary(welfare$income)
qplot(welfare$income)
#평균 급여 그래프
qplot(welfare$income)+xlim(0,1000)
#월급의 값이 0이거나 9999는 결측치로 판단하여 제거함
summary(welfare$income)ifelse(welfare$income %in% C(0,9999),NA,welfare$income)
table(is.na(welfare$income))
sex_income<- welfare %>% filter(!is.na(income)) %>% group_by(sex) %>% summarise(mean_income= mean(income))
sex_income
#그래프로 보았을때 남자의 월급이 여자의 두배가 나왔다
ggplot(data=sex_income, aes(x=sex, y=mean_income))+geom_col()
class(welfare$birth)
summary(welfare$birth)
qplot(welfare$birth)
#나이별 구간을 만들기 위해 전처리 작업
table(is.na(welfare$birth))
welfare$birth<-ifelse(welfare$birth == 9999,NA,welfare$birth)
table(is.na(welfare$birth))
welfare$age<-2015 -welfare$birth+1
summary(welfare$age)
qplot(welfare$age)
age_income<-welfare %>% filter(!is.na(income)) %>%
group_by(age) %>% summarise(mean_income =mean(income))
head(age_income)
ggplot(data=age_income , aes(x=age,y=mean_income))+geom_line()
welfare<-welfare %>% mutate(ageg=ifelse(age<30 ,"young",ifelse(age<=59,"middle","old")))
table(welfare$ageg)
ageg_income<-welfare %>% filter(!is.na(income)) %>%
group_by(ageg) %>% summarise(mean_income=mean(income))
ageg_income
ggplot(data= ageg_income , aes(x=ageg, y=mean_income))+geom_col()+scale_x_discrete(limits = c("young","middle","old"))
sex_income<-welfare %>% filter(!is.na(income)) %>% group_by(ageg,sex) %>% summarise(mean_income= mean(income))
sex_income
ggplot(data=sex_income, aes(x=ageg,y=mean_income, fill=sex))+geom_col()+scale_x_discrete(limits =c("young","middle","old"))
#남 녀를 통합해서 넣은 데이터
#중년의 범위를 50대 후반으로 넣음 young은 30세 이하
ggplot(data=sex_income, aes(x=ageg,y=mean_income, fill=sex))+geom_col(position = "dodge")+scale_x_discrete(limits =c("young","middle","old"))
sex_age <-welfare %>% filter(!is.na(income)) %>% group_by(age,sex) %>%
summarise(mean_income=mean(income))
sex_age
ggplot(data=sex_age, aes(x=age, y=mean_income, col= sex))+geom_line()
|
000dce47db0adc8aaa0b1b2a75f30d085df7b1dd | 3463b423f7b67bc8308e97bdd38288906a3190bc | /man/export.summary.mlms.Rd | 6c7ab225c0c0188b53519437f43c97973024f8f7 | [
"MIT"
] | permissive | zhuchcn/mlms | 7ad4e42fb3402da51c5461fa073874d3adb246ef | 01a54764bca02da7a1d6d71ea2698c93bc7cf05c | refs/heads/master | 2020-11-25T03:35:42.609813 | 2019-12-17T19:23:37 | 2019-12-17T19:23:37 | 228,483,521 | 0 | 1 | null | null | null | null | UTF-8 | R | false | true | 610 | rd | export.summary.mlms.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mlms.R
\name{export.summary.mlms}
\alias{export.summary.mlms}
\title{export summary mlms to excel}
\usage{
\method{export}{summary.mlms}(x, file)
}
\arguments{
\item{x}{summary.mlms returned by \code{\link{summary.mlms}}}
\item{file}{character. The file path to export}
}
\description{
export summary mlms to excel files
}
\examples{
data(growth)
X = growth[,1:3]
design = model.matrix(~treatment, data = growth)
Z = growth[,5:10]
fit = fit_mlms(X, design, Z, coef = "treatmentLNS")
res = summary(fit)
export(res, "growth.xlsx")
}
|
71cd5ad509000e4ca443f90c1ab60b743fc7908b | d51558632f781f7e9726f2ae13f8c6a2f26ebf92 | /man/svdJacobi.Rd | 385a41ade4ca564e86446e9146bdf031caad80eb | [
"MIT"
] | permissive | isglobal-brge/svdParallel | 6368183e88886d1dc57d0af392d5a4b7727f965d | 402b01d6e1560ab2ff1934b2d27c65f1f02330e1 | refs/heads/master | 2020-04-24T02:10:16.869316 | 2019-06-25T10:55:22 | 2019-06-25T10:55:22 | 171,627,134 | 2 | 1 | null | null | null | null | UTF-8 | R | false | true | 671 | rd | svdJacobi.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/svdJacobi.R
\name{svdJacobi}
\alias{svdJacobi}
\title{SVD using Jacobi algorithm}
\usage{
svdJacobi(x, tol = .Machine$double.eps)
}
\arguments{
\item{x}{a real nxp matrix}
\item{tol}{a small positive error tolerance. Default is machine tolerance}
}
\value{
a list of three components as for \code{base::svd}
}
\description{
SVD using Jacobi algorithm
}
\details{
Singular values, right singular vectors and left singular vectors of a real nxp matrix
using two-sided Jacobi algorithm
}
\examples{
(V <- (matrix(1:30, nrow=5, ncol=6)))
svdJacobi(V)
all.equal(svdJacobi(V)$v, base::svd(V)$v)
}
|
e0c244a2727d41b01b7b0123a246bb587dbff745 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/sdcTable/examples/setInfo.Rd.R | c340dd3f81e47132c04e1c14f50837574b9b53b1 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,078 | r | setInfo.Rd.R | library(sdcTable)
### Name: setInfo
### Title: set information of 'sdcProblem-class'- or
### 'problemInstance-class' objects
### Aliases: setInfo
### ** Examples
# load primary suppressed data (created in the example of \code{primarySuppression})
sp <- searchpaths()
fn <- paste(sp[grep("sdcTable", sp)], "/data/problemWithSupps.RData", sep="")
problem <- get(load(fn))
# which is the overall total?
index.tot <- which.max(getInfo(problem, 'freq'))
index.tot
# we see that the cell with index.tot==1 is the overall total and its
# anonymization state of the total can be extracted as follows:
print(getInfo(problem, type='sdcStatus')[index.tot])
# we want this cell to never be suppressed
problem <- setInfo(problem, type='sdcStatus', index=index.tot, input='z')
# we can verify this:
print(getInfo(problem, type='sdcStatus')[index.tot])
# changing slot 'UPL' for all cells
inp <- data.frame(strID=getInfo(problem,'strID'), UPL_old=getInfo(problem,'UPL'))
inp$UPL_new <- inp$UPL_old+1
problem <- setInfo(problem, type='UPL', index=1:nrow(inp), input=inp$UPL_new)
|
dc4a7a36cebab9ecb9e702b4bee8f1e9b645838b | 1ac2a49a60a821bd2efa4534b48041b9a279ccbe | /529121685.r | de90af07dc2e6071814f9d8684d4127de96d9977 | [] | no_license | erex/MT4113-2016-asmt1-old | 301ba43d616d529469cdfee16130234174b99827 | 42ceed426fb9980b53d01b724e8855df785ebcb4 | refs/heads/master | 2020-05-23T08:11:20.797156 | 2016-09-27T09:38:38 | 2016-09-27T09:38:38 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 10,413 | r | 529121685.r | #I confirm that the attached is my own work, except where clearly indicated in the text
#This function my.rnorm will take into account Marsaglia and Bray algorithm
# Input:
# n - number of values to return
# mean - specified mean of the normal distribution, 0 by default
# sd - standard deviation, 1 by default
# threads - number of treads to run to prevent infinte loops
#Output: returns back n number of randomly generated normally distributed numbers
#with specified mean and standart deviation
my.rnorm <- function ( n = NULL, mean = 0, sd = 1, threads = 1000){
#Handling all the standart errors in the input
handlingDefault( n, mean, sd)
#To handdle an odd number of numbers, generating an even number of numbers
#and outputting m-1
iterations <- ceiling( n/2 )
answer<-runWithRejection( iterations, threads)
#Checking if there any extra values t
if ( length( answer ) != n ){
answer <- answer[-1]
}
#Returns the answer taking into account non standart sd and mean
return( standartise( answer, sd, mean))
}
#Handler for common input errors
handlingDefault <- function( n, mean, sd){
errorMsg <- "invalid arguments"
#Handling the lack of n error
if ( is.null(n)) stop(errorMsg)
#Handling negative n values
if ( n <= 0) stop(errorMsg)
#Throws a message if the user did not specify the mean value
if ( mean == 0) message(errorMsg)
#Throws a message if the user did not specify the sd value
if( sd == 1) message(errorMsg)
#Throws an error if given sd is negative
if ( sd < 0) stop(errorMsg)
}
#Running the rejection stage: reject all the values outside of the unit square
#Limiting the loop to the number of threads and iterations
runWithRejection <- function( iterations, threads){
#Creating an empty vector to store our results
answer <- c()
errorMsg <- "invalid arguments"
for (m in 1:iterations){
#To catch an infinte loop creating a "running out of threads" error
i <- 0
#Starting values
unif <- runif( 2, -1, 1)
w <- sum( unif**2)
while ( w > 1){
unif <- toUnitSquare( generateUnif())
w <- sum( unif**2)
i <- i+1
if (i > threads) stop(errorMsg)
}
v <- sqrt(-2*log(w))/sqrt(w)
x <- unif*v
answer <- append( answer, x)
}
return( answer)
}
#Generating two unif values as a vector
generateUnif <- function() {
u <- runif( 2, 0, 1)
return(u)
}
#Transform the uniform values to the unit square
toUnitSquare <- function(u){
u <- 2*u -1
return(u)
}
#_________________Testing_______________________________________
#The code below is just a collection of unit tests with a brief explanation in the comments
test.my.rnorm <- function() {
#Creating a data frame for storing the tests results
results <- data.frame()
#Testing normality using Shapiro-Wilk test.
#User must keep in mind since generated numbers produces from pseudo-random numbers, the observed p-value may be below 5%
# but the code would still work correctly
a <- my.rnorm( n = 1000, mean = 10, sd = 5)
plot(a)
if (shapiro.test(a)$p > 0.1 ) {
results <- append( results, "test passed")
} else {
results <- append( results, "test failed")
}
#Unit tests for all the functions and errors
# Handling negative sd
res1 <- try(handlingDefault(n = 3, mean = 0, sd = -1),silent = TRUE)
if (class(res1) == "try-error"){
results <- append(results, "test passed")
} else {
results <- append(results, "failed")
}
#Chechikng functionality
if (toUnitSquare(2) == 3){
results <- append(results, "test passed")
} else {
results <- append(results, "failed")
}
#Gives the specified n numbers
if (length(runWithRejection( 3, 100)) == 6){
results <- append(results, "test passed")
}else {
results <- append(results, "failed")
}
#Throws an error if no n given
res2 <- try(my.rnorm() ,silent = TRUE)
if (class(res2) == "try-error"){
results <- append(results, "test passed")
} else {
results <- append(results, "failed")
}
#Gives the specified n numbers
if (length(my.rnorm(3)) == 3){
results <- append(results, "test passed")
} else {
results <- append(results, "failed")
}
#Throws an error if sd is negative
res3 <- try( my.rnorm( n = 5, sd = -4) ,silent = TRUE)
if (class(res3) == "try-error"){
results <- append(results, "test passed")
} else {
results <- append(results, "failed")
}
names(results) <- c("Shapiro-Wilk test", "Negative sd for handler", "UnitSquare", "Specified n in runWithRejection()", "No n in my.rnorm", "Specified n in my.rnorm", "Negative sd in my.rnorm")
return(results)
}
#_________________________________________________________________________
#This function general.rnorm will take into account Marsaglia and Bray algorithm,
#Box-Mueller algorithm and the Central Limit theorem
# Input:
# n - number of values to return
# mean - specified mean of the normal distribution, 0 by default
# sd - standard deviation, 1 by default
#number of treads to run
# threads - number of treads to run to prevent infinte loops
# method - 1 indicated Marsaglia and Bray, 2 for Box-Mueller algorithm
#and 3 for the Central Limit theorem
#Output: returns back n number of randomly generated normally distributed numbers
#with specified mean, standart deviation and using the specified method
general.rnorm <- function( n, mean = 0, sd = 1, threads = 1000, method = 1){
#Handling the default methods
handlingDefault( n, mean, sd)
errorMsg <- "invalis arguments"
#Throws the warning if the user did not specify the method
if ( method == 1){# add elif
message("Using Marsaglia and Bray algorithm")
return(my.rnorm( n, mean, sd, threads))
}else if( method == 2){
message("Using Box-Mueller algorithm")
answer <- box.mueller(n)
return( standartise( answer, sd, mean))
}else if( method == 3){
message("Using Central Limit theorem")
answer <- central(n)
return(standartise( answer, sd, mean))
} else {
stop(errorMsg)
}
}
#Modifies the answer according to the specified sd and mean
standartise <- function( a, sd, mean){
a <- a*sd + mean
return(a)
}
#_______Central limit theorem method__________________________
central <- function(n){
#Same approach as before, creates an empty vector to store the answer values
answer <- c()
for ( i in 1:n){
U <- runif( 16, 0, 1)
x <- (sum(U) - 8)*sqrt(3)/sqrt(4)
answer <- append( answer, x)
}
return(answer)
}
#______Box-Mueller algorithm________________________________
box.mueller <- function(n){
#In order to hangle odd n, as before, create the number of iterations
iterations <- ceiling(n/2)
answer <- c()
for (i in 1:iterations){
u <- generateUnif()
x1 <- sin(2*pi*u[1])*sqrt(-2*log(u[2]))
x2 <- cos(2*pi*u[1])*sqrt(-2*log(u[2]))
answer <- append( answer, c( x1, x2))
}
#Clean the frame from extra values
if (length(answer) != n){
answer <- answer[-1]
}
return(answer)
}
#__________Testing_____________________________
#The code below is just a collection of unit tests with a brief explanation in the comments
test.general.rnorm <- function() {
#Creating a data frame for storing the tests results
results <- data.frame()
#Testing normality using Shapiro-Wilk test.
#User must keep in mind since generated numbers produces from pseudo-random numbers, the observed p-value may be below 5%
# but the code would still work correctly
a <- general.rnorm( n = 4000, sd = 4, mean = 50, method = 2)
b <- general.rnorm( n = 4000, sd = 4, mean = 50, method = 3)
plot(a)
if (shapiro.test(a)$p > 0.1 ) {
results <- append( results, "test passed")
} else {
results <- append( results, "test failed")
}
plot(b)
if (shapiro.test(b)$p > 0.1 ) {
results <- append( results, "test passed")
} else {
results <- append( results, "test failed")
}
#Unit tests for all the functions and errors
# Handling no n in box.mueller()
res1 <- try(box.mueller(),silent = TRUE)
if (class(res1) == "try-error"){
results <- append(results, "test passed")
} else {
results <- append(results, "failed")
}
#Chechikng if returns the correct n
if (length(box.mueller(5)) == 5){
results <- append(results, "test passed")
} else {
results <- append(results, "failed")
}
# Handling no n in central()
res1 <- try(central(),silent = TRUE)
if (class(res1) == "try-error"){
results <- append(results, "test passed")
} else {
results <- append(results, "failed")
}
#Chechikng if returns the correct n
if (length(central(5)) == 5){
results <- append(results, "test passed")
} else {
results <- append(results, "failed")
}
#Throws an error if no n given
res2 <- try(general.rnorm() ,silent = TRUE)
if (class(res2) == "try-error"){
results <- append(results, "test passed")
} else {
results <- append(results, "failed")
}
#Throws an error if sd is negative
res3 <- try( general.rnorm( n = 3, sd = -1) ,silent = TRUE)
if (class(res3) == "try-error"){
results <- append(results, "test passed")
} else {
results <- append(results, "failed")
}
#Throws an error if a wrong method was parsed
res3 <- try( general.rnorm( n = 3, sd = -1, method = 4) ,silent = TRUE)
if (class(res3) == "try-error"){
results <- append(results, "test passed")
} else {
results <- append(results, "failed")
}
if (length(general.rnorm( n = 3, sd = 4)) == 3){
results <- append(results, "test passed")
} else {
results <- append(results, "failed")
}
names(results) <- c("Shapiro-Wilk test for Box-Mueller algorithm", "Shapiro-Wilk test for Central Limit Theorem", "No n in box.mueller()","Specified n in box.mueller()",
"No n in central()", "Specified n in central()", "No n in general.rnorm()",
"Negative sd for general.rnorm()", "Method input test", "Specified n in general.rnorm()")
return(results)
}
|
b1de5cde650a929367f7f0f7bf2ed061554e03cb | 826cc17cd51ccbceeb0b33ee23cab81ccee3932f | /tests/testthat/test-plot_original_vs_reconstructed.R | 7336ae5c89ac34621b36dd913ddd867a875b806e | [
"MIT"
] | permissive | UMCUGenetics/MutationalPatterns | 9b9d38a7ab69d7e29d8900f11fa9fb7ef328cfb9 | ca9caf0d0ba3cd1e13cb909009dc5b3b27b84631 | refs/heads/master | 2023-04-14T23:28:50.852559 | 2022-11-22T11:37:17 | 2022-11-22T11:37:17 | 53,409,261 | 86 | 37 | MIT | 2022-11-22T11:37:18 | 2016-03-08T12:10:11 | R | UTF-8 | R | false | false | 994 | r | test-plot_original_vs_reconstructed.R | context("test-plot_original_vs_reconstructed")
# Load mutation matrix
mut_mat <- readRDS(system.file("states/mut_mat_data.rds",
package = "MutationalPatterns"
))
# Load the nmf res
nmf_res <- readRDS(system.file("states/nmf_res_data.rds",
package = "MutationalPatterns"
))
# Load signature refit.
fit_res <- readRDS(system.file("states/snv_refit.rds",
package = "MutationalPatterns"
))
# Run function
output <- plot_original_vs_reconstructed(mut_mat, nmf_res$reconstructed)
output_fit <- plot_original_vs_reconstructed(mut_mat, fit_res$reconstructed)
output_intercept <- plot_original_vs_reconstructed(mut_mat, fit_res$reconstructed, y_intercept = 0.90)
output_lims <- plot_original_vs_reconstructed(mut_mat, fit_res$reconstructed, ylims = c(0, 1))
# Test
test_that("Output has correct class", {
expect_true(inherits(output, c("gg")))
expect_true(inherits(output_fit, c("gg")))
expect_true(inherits(output_intercept, c("gg")))
expect_true(inherits(output_lims, c("gg")))
})
|
e1a65a4b0c5b0c171146667ccada3af53d464073 | 5e63182ee47f35527bcfd28b9e53a280e1f245e8 | /Predictions.r | 9b5371dd7aaa726eef945e24ebb341a4745e4d79 | [] | no_license | MrNaz1/Midterm | 079300a70e793064b106cba770ec8110d68671cf | e9a7c787428fa35a014bb22e8f215cf10a042c4c | refs/heads/main | 2023-08-21T16:24:49.520947 | 2021-10-14T06:59:04 | 2021-10-14T06:59:04 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,045 | r | Predictions.r | library(tidyverse)
library(tidycensus)
library(sf)
library(kableExtra)
library(dplyr)
library(ggcorrplot)
library(caret)
#######################################################
# Getting data from the ACS tracts
#######################################################
varlist_2019 <- load_variables(2019, "acs5", cache = TRUE)
#Total population
#Total employed population
#Median household income
#Population with income below poverty level
#White population
#Vacant occupancy
#Owner-occupied housing units: bachelor's degree or higher
#Aggregate travel time to work
#Total number of bachelor's degrees in science and engineering related fields
tracts19 <- get_acs(geography = "tract",
variables = c("B01001_001","B23025_004","B06011_001",
"B06012_002","B02001_002","B25002_003",
"B25013_006","B08013_001","B15012_009"),
year=2019,
state=08,
county=013,
output = "wide",
geometry=TRUE) %>%
st_transform('ESRI:102254') %>%
select( c("B01001_001E","B23025_004E","B06011_001E",
"B06012_002E","B02001_002E","B25002_003E",
"B25013_006E","B08013_001E","B15012_009E") ) %>%
rename(tot_pop = "B01001_001E",
empl_pop = "B23025_004E",
med_inc = "B06011_001E",
pvty_pop = "B06012_002E",
white_pop = "B02001_002E",
vac_occ = "B25002_003E",
own_occ_bach = "B25013_006E",
tt_work = "B08013_001E",
sci_bach = "B15012_009E")
#######################################################
# Loading data, finding correlation within studentData
#######################################################
# This loads all the data into "studentData".
studentData <- st_read("studentData.geojson", crs = 'ESRI:102254')
# Attach ACS data
studentData <- st_join(studentData, tracts19, join = st_within)
# Load Green-space data
# GreenSpacePolygon <- st_union(st_read("County_Open_Space.geojson")) %>%
# st_transform('ESRI:102254')
# attach distance to green space data
# studentData %>% mutate(green_dis = st_distance(studentData, GreenSpacePolygon))
# Load Green-space data
landmarksPolygon <- st_union(st_read("Natural_Landmarks.geojson")) %>%
st_transform('ESRI:102254')
# attach distance to green space data
studentData <- mutate(studentData, landmark_dist = st_distance(studentData, landmarksPolygon))
# This selects all numeric variables as preparation for the
# correlation analysis that follows.
cleanData <-
select_if(st_drop_geometry(studentData), is.numeric) %>%
select(!c(ExtWallSec, IntWall, Roof_Cover, Stories, UnitCount, MUSA_ID)) %>%
na.omit()
# The test data only includes rows comprising the test set.
testData <- filter(cleanData, toPredict == 0)
# This function here attempts to fit a linear model to
# the relationship between "testData" variables.
ggplot(data = testData, aes(mainfloorSF, price)) +
geom_point(size = .5) +
geom_smooth(method = "lm")
# Correlation analysis: pearson for each relationship
ggcorrplot(
round(cor(testData), 1),
p.mat = cor_pmat(testData),
colors = c("#25CB10", "white", "#FA7800"),
type="lower",
insig = "blank") +
labs(title = "Correlation across numeric variables")
# This function allows for the plug-in of variables from "studentData".
testSignificance <- lm(price ~ ., data = cleanData %>%
dplyr::select(price,
qualityCode,
TotalFinishedSF,
mainfloorSF))
# This gives us our r-squared value, which measures fit to the training data.
summary(testSignificance)
# We'll need to try cross-validation to see how well the model predicts for
# data it has never seen before, which will be more useful than r squared.
# This sets up k-fold cross-validation.
k = 10
fitControl <- trainControl(method = "cv", number = k)
set.seed(324)
# variables in the "select(...)" function are considered in the analysis here.
regression.10foldcv <-
train(price ~ ., data = cleanData %>%
select(price,
qualityCode,
TotalFinishedSF,
mainfloorSF,
builtYear,
year,
Heating,
med_inc,
tot_pop,
tt_work,
white_pop,
vac_occ,
landmark_dist),
method = "lm", trControl = fitControl, na.action = na.pass)
# The resulting Mean Absolute Error (MAE) of running this line tells us how
# successful our model is at predicting unknown data.
regression.10foldcv
|
bffaba018855c24b77ec3f9aa848d5dd859a1e3c | 9c56533a13bb27b74697c105d5ac79a534d0dca6 | /Real_data/Import_code/Handler.R | 9d30e64b3911375641bb296e459053da0aeb0f24 | [] | no_license | EricaZ/FACD-model | e6bc7f210a176f7aa2c193cfbaa617574ca8ff17 | 2a2536bff2ffaab3be7f441383346df73e0f3d8a | refs/heads/master | 2016-09-06T13:49:41.138266 | 2014-03-21T07:27:14 | 2014-03-21T07:27:14 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,941 | r | Handler.R | Handler <- function(a) {
library(lubridate)
threshold <- 500000
amount <- a$V2 * a$V3
a1 <- a[which(amount > threshold),]
a1$V1 <- mdy_hms(a1$V1)
a2 <- a1[order(a1$V1),]
# compute time of the day (in seconds)
TofD <- as.numeric(difftime(a2$V1, floor_date(a2$V1, "day"), unit="secs"))
# Retain trades that occurred during 9:30 - 12:00 & 13:00 - 16:00
a3 <- cbind(a2, TofD)
a3 <- subset(a3, (a3$TofD >= 34200) & (a3$TofD < 57600) & ((a3$TofD < 43200)|(a3$TofD >= 46800)))
# Obtain indicator of the first trade of a day (nonzero if true, zero if false)
## a vector of the current date
getdate1 <- floor_date(a3$V1[-nrow(a3)], "day")
## a vector of the next date
getdate2 <- floor_date(a3$V1[-1], "day")
## a vector of the difference between the previous date and the current date
## (nonzero iff it is the first trade of a day)
datediff <- c(1, as.numeric(difftime(getdate2, getdate1, unit="days")))
# Obtain indicator of morning(1) or afternoon trade(0)
ismorning <- rep(0, nrow(a3))
ismorning[a3$TofD < 43200] <- 1
morningdiff <- c(0, ismorning[-1]-ismorning[-nrow(a3)])
## indicator of the first afternoon trade(1) of a day, or else(0)
isfirst <- rep(0, nrow(a3))
isfirst[(datediff==0) & (morningdiff== -1)] <- 1
# compute duration and add duration column into data set
dur <- c(0,int_length(int_diff(a3$V1)))
a3 <- cbind(a3, dur)
# remove the first trade of each day and the first trade of each afternoon
a4 <- a3[-which((datediff != 0)|(isfirst == 1)), ]
# remove obs with zero duration
a4 <- a4[-which(a4$dur == 0), ]
# Retain trades that occurred during 9:50 - 12:00 & 13:00 - 16:00
a5 <- subset(a4, a4$TofD >= 35400)
# add a vector of the current date to the data set
currdate <- floor_date(a5$V1, "day")
a5 <- cbind(a5, currdate)
firstdate <- floor_date(a5$V1[1], "day")
for (i in 1:5) {
tempdate <- firstdate + days(i-1)
assign(paste("date",i,sep=""), a5[a5$currdate == tempdate,])
}
# Retain trades that occurred during 9:50 - 10:00
a6 <- subset(a5, a5$TofD < 36000)
for (i in 1:5) {
tempdate <- firstdate + days(i-1)
assign(paste("date",i,"init",sep=""), a6[a6$currdate == tempdate,])
}
# Retain trades that occurred during 10:00 - 12:00 & 13:00 - 16:00
a7 <- subset(a5, a5$TofD >= 36000)
for (i in 1:5) {
tempdate <- firstdate + days(i-1)
assign(paste("date",i,"valid",sep=""), a7[a7$currdate == tempdate,])
}
# Assign the average duration of 9:30-9:50 to the first duration starting after 10:00
for (i in 1:5) {
tempdat1 <- get(paste("date",i,"init",sep=""))
tempdat2 <- get(paste("date",i,"valid",sep=""))
tempdat2$dur[1] <- mean(tempdat1$dur)
assign(paste("date",i,"final", sep=""), tempdat2)
}
mydat <- NULL
for (i in 1:5) {
mydat <- rbind(mydat, get(paste("date",i,"final", sep="")))
}
return(mydat)
}
|
a7e841dccd4b86c8ce09ad6fbc2640b605d75bb6 | 3fe1517654896fb0e0e821380c907660195b2e0f | /man/calcMetrics.Rd | 8959316a862013924269f07b8cee892d5e531c22 | [] | no_license | eliotmiller/metricTester | 9462035d8342e49d766ec37463cd27c2090c85c1 | 976d9b43773f1a06bc0254d3f355d2ee9f4be659 | refs/heads/master | 2020-04-06T06:30:42.528226 | 2019-12-12T20:38:53 | 2019-12-12T20:38:53 | 11,936,920 | 8 | 5 | null | 2017-03-21T15:00:28 | 2013-08-06T23:25:26 | R | UTF-8 | R | false | true | 2,837 | rd | calcMetrics.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/calcMetrics.R
\name{calcMetrics}
\alias{calcMetrics}
\title{Calculate phylogenetic community structure metrics}
\usage{
calcMetrics(metrics.input, metrics, new_ = FALSE)
}
\arguments{
\item{metrics.input}{Prepped metrics.input object}
\item{metrics}{Optional. If not provided, defines the metrics as all of those in
defineMetrics. If only a subset of those metrics is desired, then metrics should take
the form of a character vector corresponding to named functions from defineMetrics.
The available metrics can be determined by running names(defineMetrics()). Otherwise,
if the user would like to define a new metric on the fly, the argument metrics can take
the form of a named list of new functions (metrics). If the
latter, new_ must be set to TRUE.}
\item{new_}{Whether or not new metrics are being defined on the fly. Default is FALSE.
Set to TRUE if a new metric is being used.}
}
\value{
A data frame with the calculated metrics of all input "communities".
}
\description{
Given a prepped metrics.input object, calculate all phylogenetic community structure
metrics of interest.
}
\details{
Determine which metrics will be calculated by running names(defineMetrics()).
If only a subset of these is desired, supply metrics with a character vector of the
named, available metrics. IMPORTANTLY, note that some downstream functions expect the
first column returned from this function to be the species richness of each plot. It is
best practice therefore to always pass "richness" along as the first metric, even when
only a subset of metrics is being calculated. It is possible to provide this function
with both predefined metrics and metrics that are defined on the fly, but the call is
rather convoluted. See examples.
}
\examples{
#simulate tree with birth-death process
tree <- geiger::sim.bdtree(b=0.1, d=0, stop="taxa", n=50)
sim.abundances <- round(rlnorm(5000, meanlog=2, sdlog=1))
cdm <- simulateComm(tree, richness.vector=10:25, abundances=sim.abundances)
prepped <- prepData(tree, cdm)
results <- calcMetrics(prepped)
#an example of how to only calculate a subset of pre-defined metrics
results2 <- calcMetrics(prepped, metrics=c("richness","NAW_MPD"))
#an example of how to define ones own metrics for use in the metricTester framework
#this "metric" simply calculates the richness of each plot in the CDM
exampleMetric <- function(metrics.input)
{
output <- apply(metrics.input$picante.cdm, 1, lengthNonZeros)
output
}
calcMetrics(prepped, metrics=list("richness"=metricTester:::my_richness,
"example"=exampleMetric), new_=TRUE)
}
\references{
Miller, E. T., D. R. Farine, and C. H. Trisos. 2016. Phylogenetic community
structure metrics and null models: a review with new methods and software.
Ecography DOI: 10.1111/ecog.02070
}
|
5912246cd77aeace10597c3b4b8e736d041b97e1 | 9f4f31183546e098a869b44a58b78bfaff7059b4 | /campbell_file_process.r | 9017ff45c991978a543ecf521b63705474d7d37d | [] | no_license | kroppheather/VIPER_sensor_data | 5f53028db7c099476134cc83ee8c716ea3b6ad09 | 081957f3d687d4cf582abb0ea846a4d6ae478b96 | refs/heads/master | 2021-01-19T19:40:52.158093 | 2018-08-16T15:26:58 | 2018-08-16T15:26:58 | 101,202,191 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 8,853 | r | campbell_file_process.r | #######################################################################################
#######################################################################################
########### Script started in October 2017 by Heather Kropp ###########
########### Script for processing data files from Campbell sensors. ###########
########### This script accounts for issues with timestamps due to incorrect###########
########### computer times, different time zones on devices, and tracks use ###########
########### of sensors over time. Currently this script assumes that the ###########
########### incoming data from the logger is appended to each file with ###########
########### consistent time documentation. ###########
#######################################################################################
#######################################################################################
#load lubridate
library(lubridate)
#load plyr
library(plyr)
#setwd to the folder with compiled files saved as csv
#make sure there are only the compiled files in this folder
setwd("c:\\Users\\hkropp\\Google Drive\\viper_energy\\combined_files\\campbell\\csv_to_process")
#setwd("c:\\Users\\hkropp\\Google Drive\\viper_energy\\combined_files\\ls_toprocess")
#specify an output path
output.path<-"z:\\data_repo\\field_data\\viperData\\sensor\\campbell"
#indicate the date format of the data
dateFormat<-"%m/%d/%Y %H:%M"
#read in data tables with sensor and logger information
datMI<-read.csv("c:\\Users\\hkropp\\Google Drive\\viper_energy\\combined_files\\campbell\\sensor_info\\measurement_info.csv")
datDI<-read.csv("c:\\Users\\hkropp\\Google Drive\\viper_energy\\combined_files\\campbell\\sensor_info\\datatable_desc.csv")
datSI <- read.csv("c:\\Users\\hkropp\\Google Drive\\viper_energy\\combined_files\\campbell\\sensor_info\\sensor_info.csv")
#get unique filenames
datLI <- data.frame(loggerFile = unique(as.character(datDI$filename)))
datLI$loggID <- seq(1, dim(datLI)[1])
##get file names
tofix<-paste0(getwd(), "/", datLI$loggerFile, ".csv")
#read in files
fixmet<-list()
for(i in 1:length(tofix)){
fixmet[[i]]<-read.csv(tofix[i], na.strings=c("NAN", "NA"))
}
#create a fixed time stamp for each file
#first pull out the day of year and year
#info for each timestamp
fixmetD<-list()
for(i in 1:length(tofix)){
fixmetD[[i]]<-data.frame(DateI=as.Date(fixmet[[i]]$TIMESTAMP, dateFormat))
fixmetD[[i]]$doyUN<-yday(fixmetD[[i]]$DateI)
fixmetD[[i]]$yearUN<-year(fixmetD[[i]]$DateI)
}
#function to convert UTC -4 to siberian time which is +15 hours to EDT
CherskiyThour<-function(hour){
ifelse(hour<9,hour+15,hour-9)
}
CherskiyTdoy<-function(hour,doy){
ifelse(doy!=366,
ifelse(hour<9,doy,doy+1 ),
ifelse(hour<9,doy,1))
}
#need to fix mismatch in year on last day of the year
CherskiyTyear<-function(doy,year, hour){
ifelse(doy==366,
ifelse(hour<9,year,year+1),
year)
}
#convert to decimal hour
for(i in 1:length(tofix)){
fixmetD[[i]]$timeUN<-fixmet[[i]]$hour+(fixmet[[i]]$minute/60)
}
#fix the data
for(i in 1:length(tofix)){
fixmetD[[i]]$hourF<-ifelse(fixmet[[i]]$UTC==-4,
CherskiyThour(fixmetD[[i]]$timeUN),
fixmetD[[i]]$timeUN)
fixmetD[[i]]$doyF1<-ifelse(fixmet[[i]]$UTC==-4,
CherskiyTdoy(fixmetD[[i]]$timeUN,fixmetD[[i]]$doyUN),
fixmetD[[i]]$doyUN)
fixmetD[[i]]$yearF<-ifelse(fixmet[[i]]$UTC==-4,
CherskiyTyear(fixmetD[[i]]$doyUN,fixmetD[[i]]$yearUN,fixmetD[[i]]$timeUN),
fixmetD[[i]]$yearUN)
fixmetD[[i]]$doyF2<- fixmetD[[i]]$doyF1+ fixmet[[i]]$Day.offset
}
Fixout<-list()
#write to file
for(i in 1:length(tofix)){
#make a data frame with only the correct info
Fixout[[i]]<-data.frame(doy=fixmetD[[i]]$doyF2,year=fixmetD[[i]]$yearF,
hour=fixmetD[[i]]$hourF, minute=fixmet[[i]]$minute,
fixmet[[i]][,6:dim(fixmet[[i]])[2]])
}
########################################################################################
#### match sensor info to data
########################################################################################
#each datatable has the same initialization period because all of the sensors are set up together
#for the data table. Just need to subset Fixout based on the datatable
#just get the unique date for each datatable
dateStartD <- unique(data.frame(loggerFile=datDI$filename, timeoutEnd = datDI$timeoutEnd,
dayEnd=datDI$dayEnd, yearEnd=datDI$yearEnd))
#now merge with datatable ID
fileStart <- join(datLI, dateStartD, by="loggerFile",type="left")
#exclude any data in the warm up period before sensor install
Fixout2<- list()
fixStart <- numeric (0)
for(i in 1:length(tofix)){
#get the starting point for the data
if(length(which(Fixout[[i]]$doy==fileStart$dayEnd[i]&
Fixout[[i]]$hour==fileStart$timeoutEnd[i]&
Fixout[[i]]$year==fileStart$yearEnd[i]))!= 0){
fixStart[i] <- which(Fixout[[i]]$doy==fileStart$dayEnd[i]&
Fixout[[i]]$hour==fileStart$timeoutEnd[i]&
Fixout[[i]]$year==fileStart$yearEnd[i])
}else{fixStart[i] <- 1}
#subset to include starting point
Fixout2[[i]] <- Fixout[[i]][fixStart[i]:dim(Fixout[[i]])[1],]
}
#start by subsetting the info for each data table
measList <- list()
measList2 <- list()
for(i in 1:length(tofix)){
#pull out info
measList[[i]] <- datDI[datDI$filename==datLI$loggerFile[i],]
#now match up the possible types of measurements in each datatable
measList2[[i]] <- join(measList[[i]], datMI, by="sensorName", type="left")
}
#now need to pull out all by measurement type
#join measurement type
colnames(datSI)[1] <- "loggerFile"
fileStart <- join(fileStart, datSI, by="loggerFile", type="left" )
#pull out each type into a list
sapflowF <- fileStart[fileStart$measType=="sapflow",]
heatfluxF <- fileStart[fileStart$measType=="heatflux",]
radiationF <- fileStart[fileStart$measType=="radiation",]
#for sapflux variables are best left relatively untouched
#so just pull out and leave untouched
#directories to save to
dir1 <-c(#"c:\\Users\\hkropp\\Google Drive\\viper_energy\\combined_files\\campbell\\csv_out\\",
#"c:\\Users\\hkropp\\Google Drive\\Loranty_Lab_Sensor\\campbell\\",
#"c:\\Users\\hkropp\\Google Drive\\viperSensor\\",
#"z:\\student_research\\tobio\\viperSensor\\campbell\\"
"z:\\data_repo\\field_data\\viperData\\sensor\\campbell")
sapflowListTemp<-list()
for(k in 1:length(dir1)){
for(i in 1:dim(sapflowF )[1]){
sapflowListTemp[[i]] <- Fixout2[[sapflowF$loggID[i]]]
#clean up column names
colnames(sapflowListTemp[[i]]) <- gsub("[[:punct:]]", "", colnames(sapflowListTemp[[i]]))
write.table(sapflowListTemp[[i]],
paste0(dir1[k],"sapflow\\",sapflowF$loggerFile[i], ".csv" ),
sep=",", row.names=FALSE)
}
}
#now compile radiation
#just grab the info for the entire radiometer
datDIsub <- data.frame(loggerFile= datDI$filename, site= datDI$site, sensorZ= datDI$sensorZ, loc=datDI$sensorLoc)
radiationF <- join(radiationF, datDIsub, by="loggerFile", type="left")
radiationListTemp<-list()
for(i in 1:dim(radiationF )[1]){
radiationListTemp[[i]] <- Fixout2[[radiationF$loggID[i]]]
#add the site info, location, and height
radiationListTemp[[i]]$site <- rep(radiationF$site[i], dim(radiationListTemp[[i]])[1])
radiationListTemp[[i]]$loc <- rep(radiationF$loc[i], dim(radiationListTemp[[i]])[1])
radiationListTemp[[i]]$sensorZ <- rep(radiationF$sensorZ[i], dim(radiationListTemp[[i]])[1])
}
#now add all together
radiationAll <- ldply(radiationListTemp, data.frame)
# compile heatflux
#first grab relevant heatflux data
heatfluxListTemp<-list()
heatMeas <- list()
heatfluxListTemp2<-list()
for(i in 1:dim(heatfluxF )[1]){
heatfluxListTemp[[i]] <- Fixout2[[heatfluxF$loggID[i]]]
heatMeas[[i]] <- measList2[[heatfluxF$loggID[i]]]
#restructure to combine all into a dataframe
heatfluxListTemp2[[i]] <- data.frame(doy=rep(heatfluxListTemp[[i]][,1], times=dim(heatMeas[[i]])[1]),
year=rep(heatfluxListTemp[[i]][,2], times=dim(heatMeas[[i]])[1]),
hour=rep(heatfluxListTemp[[i]][,3], times=dim(heatMeas[[i]])[1]),
shf=as.vector(data.matrix(heatfluxListTemp[[i]][,6:(5+dim(heatMeas[[i]])[1])])),
site=rep(heatMeas[[i]]$site, each=dim(heatfluxListTemp[[i]])[1]),
loc=rep(heatMeas[[i]]$sensorLoc, each=dim(heatfluxListTemp[[i]])[1]),
sensorZ=rep(heatMeas[[i]]$sensorZ, each=dim(heatfluxListTemp[[i]])[1]),
sensorID=rep(heatMeas[[i]]$sensorID, each=dim(heatfluxListTemp[[i]])[1]))
}
#combine all together
heatfluxAll <- ldply(heatfluxListTemp2, data.frame)
#write output
for(k in 1:length(dir1)){
write.table(radiationAll, paste0(dir1[k],"\\radiation\\netR.csv"),
sep=",", row.names=FALSE)
write.table(heatfluxAll,paste0(dir1[k],"\\heatflux\\heatflux.csv"),
sep=",", row.names=FALSE)
} |
2c859e71b828246db675d3eb555342299aab2a85 | 4344aa4529953e5261e834af33fdf17d229cc844 | /input/gcamdata/R/zaglu_L120.LC_GIS_R_LTgis_Yh_GLU.R | 0c380080d3b906d513b0d47e6592ceb803c8b83e | [
"ECL-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | JGCRI/gcam-core | a20c01106fd40847ed0a803969633861795c00b7 | 912f1b00086be6c18224e2777f1b4bf1c8a1dc5d | refs/heads/master | 2023-08-07T18:28:19.251044 | 2023-06-05T20:22:04 | 2023-06-05T20:22:04 | 50,672,978 | 238 | 145 | NOASSERTION | 2023-07-31T16:39:21 | 2016-01-29T15:57:28 | R | UTF-8 | R | false | false | 24,790 | r | zaglu_L120.LC_GIS_R_LTgis_Yh_GLU.R | # Copyright 2019 Battelle Memorial Institute; see the LICENSE file.
#' module_aglu_L120.LC_GIS_R_LTgis_Yh_GLU
#'
#' Land cover by GCAM region / aggregate land type / historical year / GLU.
#'
#' @param command API command to execute
#' @param ... other optional parameters, depending on command
#' @return Depends on \code{command}: either a vector of required inputs,
#' a vector of output names, or (if \code{command} is "MAKE") all
#' the generated outputs: \code{L120.LC_bm2_R_LT_Yh_GLU}, \code{L120.LC_bm2_R_UrbanLand_Yh_GLU}, \code{L120.LC_bm2_R_Tundra_Yh_GLU}, \code{L120.LC_bm2_R_RckIceDsrt_Yh_GLU}, \code{L120.LC_bm2_ctry_LTsage_GLU}, \code{L120.LC_bm2_ctry_LTpast_GLU}. The corresponding file in the
#' original data system was \code{LB120.LC_GIS_R_LTgis_Yh_GLU.R} (aglu level1).
#' @details Aggregate the \code{L100.Land_type_area_ha} dataset, interpolate land use historical
#' years, and split into various sub-categories. Missing values are set to zero because the GLU files don't include
#' zero values (i.e. they only report nonzero land use combinations).
#' @importFrom assertthat assert_that
#' @importFrom dplyr arrange distinct filter group_by left_join mutate select summarise
#' @importFrom tidyr complete nesting spread
#' @importFrom stats quantile
#' @author BBL April 2017
module_aglu_L120.LC_GIS_R_LTgis_Yh_GLU <- function(command, ...) {
if(command == driver.DECLARE_INPUTS) {
return(c(FILE = "common/iso_GCAM_regID",
FILE = "aglu/LDS/LDS_land_types",
FILE = "aglu/SAGE_LT",
FILE = "aglu/Various_CarbonData_LTsage",
"L100.Ref_veg_carbon_Mg_per_ha",
# 09-24-2022 XZ
# The following two LDS files need updates for Base Year Update later!
"L100.Land_type_area_ha",
FILE = "aglu/LDS/L123.LC_bm2_R_MgdFor_Yh_GLU_beforeadjust"))
} else if(command == driver.DECLARE_OUTPUTS) {
return(c("L120.LC_bm2_R_LT_Yh_GLU",
"L120.LC_bm2_R_UrbanLand_Yh_GLU",
"L120.LC_bm2_R_Tundra_Yh_GLU",
"L120.LC_bm2_R_RckIceDsrt_Yh_GLU",
"L120.LC_bm2_ctry_LTsage_GLU",
"L120.LC_bm2_ctry_LTpast_GLU",
"L120.LC_prot_land_frac_GLU",
"L120.LC_soil_veg_carbon_GLU"))
} else if(command == driver.MAKE) {
iso <- GCAM_region_ID <- Land_Type <- year <- GLU <- Area_bm2 <- LT_HYDE <-
land_code <- LT_SAGE <- variable <- value <- Forest <- MgdFor <- Grassland <-
Shrubland <- Pasture <- nonForScaler <- ForScaler <- `mature age` <- Status <- prot_status <- prot_frac <-
non_prot_frac <- c_type <- Category <- `soil_c (0-100 cms)` <- `veg_c (above ground biomass)` <- `veg_c (below ground biomass)` <-
soil_c <- vegc_ag <- vegc_bg <- land_area <- veg_c <- Tot_land <- NULL # silence package check.
all_data <- list(...)[[1]]
# Load required inputs
get_data(all_data, "common/iso_GCAM_regID") %>%
select(iso, GCAM_region_ID) ->
iso_GCAM_regID
LDS_land_types <- get_data(all_data, "aglu/LDS/LDS_land_types")
SAGE_LT <- get_data(all_data, "aglu/SAGE_LT")
L123.LC_bm2_R_MgdFor_Yh_GLU_beforeadjust <- get_data(all_data, "aglu/LDS/L123.LC_bm2_R_MgdFor_Yh_GLU_beforeadjust")
L100.Land_type_area_ha <- get_data(all_data, "L100.Land_type_area_ha")
L100.Ref_veg_carbon_Mg_per_ha <- get_data(all_data, "L100.Ref_veg_carbon_Mg_per_ha")
Various_CarbonData_LTsage <- get_data(all_data,"aglu/Various_CarbonData_LTsage") %>%
filter(variable %in% c("mature age","soil_c","veg_c")) %>%
select(LT_SAGE,variable,value) %>%
distinct() %>%
spread(variable,value) %>%
select(LT_SAGE,`mature age`,soil_c_houghton=soil_c,veg_c_houghton=veg_c)
# Perform computations
land.type <-
L100.Land_type_area_ha %>%
## Add data for GCAM region ID and GLU
left_join_error_no_match(distinct(iso_GCAM_regID, iso, .keep_all = TRUE), by = "iso") %>%
## Add vectors for land type (SAGE, HYDE, and WDPA)
left_join_error_no_match(LDS_land_types, by = c("land_code" = "Category")) %>%
left_join(SAGE_LT, by = "LT_SAGE") %>% # includes NAs
rename(LT_SAGE_5 = Land_Type) %>%
## Drop all rows with missing values (inland bodies of water)
na.omit
##calculate protection_shares
land.type %>%
mutate(prot_status = if_else( Status %in% aglu.NONPROTECT_LAND_STATUS, "Non-protected" ,"Protected")) %>%
filter(LT_HYDE %in% c("Unmanaged","Pasture")) %>%
left_join(SAGE_LT, by = "LT_SAGE") %>% # includes NAs
## Drop all rows with missing values (inland bodies of water)
na.omit() %>%
# Note that Pasture is a land use type in moirai as opposed to a land cover type whereas in GCAM, it is treated as a separate land type.
# Therefore, we set the land type to Pasture based on the land use type so that we can map the same to the appropriate land types in GCAM.
mutate(Land_Type= if_else(LT_HYDE=="Pasture","Pasture",Land_Type)) %>%
group_by(GCAM_region_ID, year, GLU, Land_Type) %>%
mutate (Tot_land = sum(value)) %>%
ungroup() %>%
filter(prot_status == "Protected" ) %>%
group_by(GCAM_region_ID, year, GLU, Land_Type) %>%
mutate(value= sum(value)) %>%
ungroup() %>%
select(GCAM_region_ID, year, GLU, Tot_land, value, Land_Type) %>%
distinct() %>%
mutate(prot_frac = value/Tot_land, non_prot_frac = 1 -(value/Tot_land)) %>%
select(GCAM_region_ID, year, GLU,prot_frac, non_prot_frac,Land_Type) -> L120.LC_prot_land_frac_GLU
if(aglu.PROTECTION_DATA_SOURCE_DEFAULT == TRUE){
L120.LC_prot_land_frac_GLU %>%
mutate(prot_frac = aglu.PROTECT_DEFAULT,
non_prot_frac = 1-aglu.PROTECT_DEFAULT) -> L120.LC_prot_land_frac_GLU
}
##calculate soil and veg carbon
L100.Ref_veg_carbon_Mg_per_ha %>%
select(iso, GLU, land_code, c_type, !!(as.name(aglu.CARBON_STATE))) %>%
left_join_error_no_match(distinct(iso_GCAM_regID, iso, .keep_all = TRUE), by = "iso") %>%
left_join(LDS_land_types %>% rename(land_code = Category), by = c("land_code")) %>%
left_join(SAGE_LT, by = "LT_SAGE") %>% # includes NAs
## Drop all rows with missing values (inland bodies of water)
na.omit() %>%
spread(c_type, !!(as.name(aglu.CARBON_STATE))) %>%
rename(soil_c = `soil_c (0-30 cms)`, vegc_ag = `veg_c (above ground biomass)`, vegc_bg = `veg_c (below ground biomass)`) %>%
select(iso,GCAM_region_ID, GLU, Land_Type, soil_c, vegc_ag, vegc_bg, land_code) %>%
distinct() -> L120.LC_soil_veg_carbon_GLU_agg
L100.Land_type_area_ha %>%
## Add data for GCAM region ID and GLU
left_join_error_no_match(distinct(iso_GCAM_regID, iso, .keep_all = TRUE), by = "iso") %>%
## Add vectors for land type (SAGE, HYDE, and WDPA)
left_join_error_no_match(LDS_land_types, by = c("land_code" = "Category")) %>%
filter(LT_HYDE== "Unmanaged") %>%
left_join(SAGE_LT, by = "LT_SAGE") %>%
## Drop all rows with missing values (inland bodies of water)
na.omit() %>%
# moirai only outputs carbon values from unmanaged land. Therefore, we remove pastures, urbanland and cropland from the below. We continue to calculate the carbon values for these land types using the Houghton structure.
left_join_error_no_match(Various_CarbonData_LTsage %>%
filter(!LT_SAGE %in% c("Pasture","UrbanLand","Cropland")) %>%
mutate(LT_SAGE = gsub(" ","",LT_SAGE)), by= c("LT_SAGE")) %>%
rename(iso = iso) %>%
mutate(`mature age` = if_else(is.na(`mature age`),1,`mature age`)) %>%
complete(nesting(GCAM_region_ID, Land_Type, GLU,iso,land_code), year, fill = list(value = 0)) %>%
complete(nesting(GCAM_region_ID, Land_Type, GLU,iso,land_code), year = unique(c(year, aglu.LAND_COVER_YEARS))) %>%
filter(year == MODEL_CARBON_YEAR) %>%
select(-year) %>%
select(iso, GCAM_region_ID,GLU, Land_Type, value, land_code, `mature age`,soil_c_houghton, veg_c_houghton) %>%
rename(land_area= value) %>%
distinct() %>%
mutate(`mature age` = if_else(is.na(`mature age`),aglu.DEFAULT_MATURITY_AGE_ALL_LAND,`mature age`))->Land_for_carbon
Land_for_carbon %>%
left_join(L120.LC_soil_veg_carbon_GLU_agg, by=c("iso", "GCAM_region_ID", "Land_Type", "GLU", "land_code")) %>%
mutate(soil_c = if_else(is.na(soil_c),soil_c_houghton,if_else(soil_c==0,soil_c_houghton,soil_c)),
vegc_ag = if_else(is.na(vegc_ag),veg_c_houghton,if_else(vegc_ag ==0,veg_c_houghton,vegc_ag)),
vegc_bg = if_else(is.na(vegc_bg),0,vegc_bg),
soil_c = if_else(is.na(soil_c),0,soil_c),
vegc_ag = if_else(is.na(vegc_ag),0,vegc_ag)) %>%
group_by(GCAM_region_ID, Land_Type, GLU) %>%
#Note that soil and vegetation carbon units are in Mgc/ha. These are therefore converted to kg/m2 using CONV_THA_KGM2.
#We compute a weighted average using land area as a weight.
mutate( soil_c = (sum(land_area * soil_c)/sum(land_area))*CONV_THA_KGM2,
veg_c = (sum(land_area * (vegc_ag+ vegc_bg))/sum(land_area))*CONV_THA_KGM2,
`mature age` = sum(`mature age` * land_area )/sum(land_area)) %>%
ungroup() %>%
mutate(soil_c = if_else(is.na(soil_c),0,soil_c),
veg_c = if_else(is.na(veg_c),0,veg_c),
`mature age` = if_else(is.na(`mature age`),aglu.DEFAULT_MATURITY_AGE_ALL_LAND,`mature age`)) %>%
select(GCAM_region_ID, Land_Type, GLU,soil_c,veg_c,`mature age`) %>%
distinct() %>%
#Add adjustment for Tundra. Our Tundra values are unreliable. Use Houghton for those,
mutate(`mature age` = if_else(Land_Type == "Tundra", aglu.DEFAULT_TUNDRA_AGE, `mature age`))->L120.LC_soil_veg_carbon_GLU_all_cat
#Compute Cropland carbon
L100.Land_type_area_ha %>%
## Add data for GCAM region ID and GLU
left_join_error_no_match(distinct(iso_GCAM_regID, iso, .keep_all = TRUE), by = "iso") %>%
## Add vectors for land type (SAGE, HYDE, and WDPA)
left_join_error_no_match(LDS_land_types, by = c("land_code" = "Category")) %>%
filter(LT_HYDE== "Cropland") %>%
left_join(SAGE_LT, by = "LT_SAGE") %>%
## Drop all rows with missing values (inland bodies of water)
na.omit() %>%
# moirai only outputs carbon values from unmanaged land. Therefore, we remove pastures, urbanland and cropland from the below. We continue to calculate the carbon values for these land types using the Houghton structure.
left_join_error_no_match(Various_CarbonData_LTsage %>% filter(!LT_SAGE %in% c("Pasture","UrbanLand","Unmanaged")) %>% mutate(LT_SAGE = gsub(" ","",LT_SAGE)), by= c("LT_SAGE")) %>%
rename(iso = iso) %>%
mutate(`mature age` = if_else(is.na(`mature age`),1,`mature age`)) %>%
complete(nesting(GCAM_region_ID, Land_Type, GLU,iso,land_code), year, fill = list(value = 0)) %>%
complete(nesting(GCAM_region_ID, Land_Type, GLU,iso,land_code), year = unique(c(year, aglu.LAND_COVER_YEARS))) %>%
filter(year == MODEL_CARBON_YEAR) %>%
select(-year) %>%
select(iso, GCAM_region_ID,GLU, Land_Type, value, land_code, `mature age`) %>%
rename(land_area= value) %>%
distinct() %>%
mutate(`mature age` = if_else(is.na(`mature age`),aglu.DEFAULT_MATURITY_AGE_ALL_LAND,`mature age`))->Land_for_Crop_carbon
L120.LC_soil_veg_carbon_GLU_all_cat %>%
group_by(GCAM_region_ID,Land_Type,GLU) %>%
mutate(soil_c= mean(soil_c),
veg_c= mean(veg_c)) %>%
ungroup() %>%
select(GCAM_region_ID,Land_Type,GLU,soil_c,veg_c) %>%
distinct()->L120.LC_soil_veg_carbon_mean_LT_GLU_reg
Land_for_Crop_carbon %>%
left_join_keep_first_only(L120.LC_soil_veg_carbon_mean_LT_GLU_reg, by=c("GLU", "GCAM_region_ID", "Land_Type")) %>%
mutate(soil_c = if_else(is.na(soil_c),aglu.DEFAULT_SOIL_CARBON_CROPLAND,soil_c),
veg_c = if_else(is.na(veg_c),aglu.DEFAULT_VEG_CARBON_CROPLAND,veg_c),
Land_Type = "Cropland") %>%
group_by(GCAM_region_ID, Land_Type, GLU) %>%
#Note that soil and vegetation carbon units are in Mgc/ha. These are therefore converted to kg/m2 using CONV_THA_KGM2.
#We compute a weighted average using land area as a weight.
mutate( soil_c = (sum(land_area * soil_c)/sum(land_area))*0.7,
veg_c = aglu.DEFAULT_VEG_CARBON_CROPLAND,
`mature age` = 1) %>%
ungroup() %>%
mutate(soil_c = if_else(is.na(soil_c),aglu.DEFAULT_SOIL_CARBON_CROPLAND,soil_c),
veg_c = if_else(is.na(veg_c),aglu.DEFAULT_VEG_CARBON_CROPLAND,veg_c),
`mature age` = if_else(is.na(`mature age`),1,`mature age`)) %>%
select(GCAM_region_ID, Land_Type, GLU,soil_c,veg_c,`mature age`) %>%
distinct() ->L120.LC_soil_veg_carbon_GLU_crop
# Pasture carbon is the same as grassland carbon values. But since the grassland values are subject to uncertainty, we make sure the values are below the mean of
# all Grassland values for soil and vegetation.
L120.LC_soil_veg_carbon_GLU_all_cat %>%
select(-soil_c,-veg_c,-`mature age`,-Land_Type) %>%
distinct() %>%
left_join(L120.LC_soil_veg_carbon_GLU_all_cat %>% filter(Land_Type == aglu.GRASSLAND_NODE_NAMES), by =c("GCAM_region_ID","GLU")) %>%
#Reducing soil carbon on pastures by a factor. This is because these pastures have been grazed in the past, so will not have same carbon as undisturbed grasslands.
mutate(Land_Type = aglu.PASTURE_NODE_NAMES,
soil_c = if_else(is.na(soil_c), aglu.DEFAULT_SOIL_CARBON_PASTURE*aglu.CSOIL_MULT_UNMGDPAST_MGDPAST,if_else(soil_c==0,aglu.DEFAULT_SOIL_CARBON_PASTURE*aglu.CSOIL_MULT_UNMGDPAST_MGDPAST,
soil_c*aglu.CSOIL_MULT_UNMGDPAST_MGDPAST)),
veg_c = if_else(is.na(veg_c), aglu.DEFAULT_VEG_CARBON_PASTURE,if_else(veg_c==0,aglu.DEFAULT_VEG_CARBON_PASTURE,
veg_c)),
`mature age` = if_else(is.na(`mature age`),aglu.DEFAULT_MATURITY_AGE_PASTURE,if_else(
`mature age` ==1 , aglu.DEFAULT_MATURITY_AGE_PASTURE , `mature age`)))->L120.LC_soil_veg_carbon_GLU_pasture
# Note that we set the default maturity age for Urban Land to 1 based on Houghton values.
L120.LC_soil_veg_carbon_GLU_all_cat %>%
select(-soil_c,-veg_c,-`mature age`,-Land_Type) %>%
distinct() %>%
mutate(Land_Type = paste0("UrbanLand"),soil_c = aglu.DEFAULT_SOIL_CARBON_URBANLAND, veg_c = aglu.DEFAULT_VEG_CARBON_URBANLAND, `mature age`= 1)->L120.LC_soil_veg_carbon_GLU_urban
L120.LC_soil_veg_carbon_GLU <- bind_rows(L120.LC_soil_veg_carbon_GLU_all_cat,
L120.LC_soil_veg_carbon_GLU_pasture,
L120.LC_soil_veg_carbon_GLU_crop,
L120.LC_soil_veg_carbon_GLU_urban)
## Reset WDPA classification to "Non-protected" where HYDE classification
## is cropland, pasture, or urban land
hyde <- land.type$LT_HYDE
ltype <- land.type$LT_SAGE_5
#land.type$LT_WDPA <- replace(hyde, hyde != "Unmanaged", "Non-protected")
land.type$Land_Type <-
ltype %>%
replace(hyde=='Cropland', 'Cropland') %>%
replace(hyde=='Pasture', 'Pasture') %>%
replace(hyde=='UrbanLand', 'UrbanLand')
land.type$Area_bm2 <- land.type$value * CONV_HA_BM2
L100.Land_type_area_ha <- land.type # Rename to the convention used in the
# rest of the module
# LAND COVER FOR LAND ALLOCATION
# Aggregate into GCAM regions and land types
# Part 1: Land cover by GCAM land category in all model history/base years
# Collapse land cover into GCAM regions and aggregate land types
L100.Land_type_area_ha %>%
group_by(GCAM_region_ID, Land_Type, year, GLU) %>%
summarise(Area_bm2 = sum(Area_bm2)) %>%
ungroup %>%
# Missing values should be set to 0 before interpolation, so that in-between years are interpolated correctly
# We do his because Alan Di Vittorio (see sources above) isn't writing out all possible combinations of
# country, GLU, year (of which there are 30), and land use category (of which there are also about 30).
# If something isn't written out by the LDS, that is because it is a zero; this step back-fills the zeroes.
complete(nesting(GCAM_region_ID, Land_Type, GLU), year, fill = list(Area_bm2 = 0)) %>%
# Expand to all combinations with land cover years
complete(nesting(GCAM_region_ID, Land_Type, GLU), year = unique(c(year, aglu.LAND_COVER_YEARS))) %>%
group_by(GCAM_region_ID, Land_Type, GLU) %>%
# Interpolate
mutate(Area_bm2 = approx_fun(year, Area_bm2)) %>%
ungroup %>%
filter(year %in% aglu.LAND_COVER_YEARS) %>%
arrange(GCAM_region_ID, Land_Type, GLU, year) %>%
rename(value = Area_bm2) %>%
mutate(year = as.integer(year)) ->
L120.LC_bm2_R_LT_Yh_GLU
# scale forest to avoid negative unmanaged forest area which caused issue for yield in Pakistan and African regions
# L123.LC_bm2_R_MgdFor_Yh_GLU_beforeadjust, pulled from L123.LC_bm2_R_MgdFor_Yh_GLU before managed forest scaling, was used here.
L120.LC_bm2_R_LT_Yh_GLU %>%
left_join(L120.LC_bm2_R_LT_Yh_GLU %>%
spread(Land_Type, value, fill = 0) %>%
left_join(L123.LC_bm2_R_MgdFor_Yh_GLU_beforeadjust %>% select(-Land_Type),
by = c("GCAM_region_ID", "GLU", "year")) %>%
mutate(nonForScaler =
if_else((Forest - MgdFor) < 0 & Forest > 0,
1 + (Forest - MgdFor)/(Grassland + Shrubland + Pasture), 1),
ForScaler = if_else((Forest - MgdFor) < 0 & Forest > 0, MgdFor/Forest ,1)) %>%
select(GCAM_region_ID, GLU, year, nonForScaler, ForScaler),
by = c("GCAM_region_ID", "GLU", "year") ) %>%
mutate(value = if_else(Land_Type %in% c("Grassland", "Shrubland" , "Pasture"),
value * nonForScaler,
if_else(Land_Type == "Forest", value * ForScaler, value) )) %>%
select(-nonForScaler, -ForScaler) ->
L120.LC_bm2_R_LT_Yh_GLU
# Subset the land types that are not further modified
L120.LC_bm2_R_UrbanLand_Yh_GLU <- filter(L120.LC_bm2_R_LT_Yh_GLU, Land_Type == "UrbanLand")
L120.LC_bm2_R_Tundra_Yh_GLU <- filter(L120.LC_bm2_R_LT_Yh_GLU, Land_Type == "Tundra")
L120.LC_bm2_R_RckIceDsrt_Yh_GLU <- filter(L120.LC_bm2_R_LT_Yh_GLU, Land_Type == "RockIceDesert")
# LAND COVER FOR CARBON CONTENT CALCULATION
# Compile data for land carbon content calculation on unmanaged lands
# Note: not just using the final year, as some land use types may have gone to zero over the historical period.
# Instead, use the mean of the available years within our "historical" years
# The HYDE data are provided in increments of 10 years, so any GCAM model time period
# or carbon cycle year that ends in a 5 (e.g., 1975) is computed as an average of
# surrounding time periods. For most of the years that we want, we aren't doing any real
# averaging or interpolation.
L100.Land_type_area_ha %>%
filter(LT_HYDE == "Unmanaged") %>%
group_by(iso, GCAM_region_ID, GLU, land_code, LT_SAGE, Land_Type) %>%
summarise(Area_bm2 = mean(Area_bm2)) %>%
ungroup ->
L120.LC_bm2_ctry_LTsage_GLU
# Compile data for land carbon content calculation on pasture lands
L100.Land_type_area_ha %>%
filter(LT_HYDE == "Pasture") %>%
group_by(iso, GCAM_region_ID, GLU, land_code, LT_SAGE, Land_Type) %>%
summarise(Area_bm2 = mean(Area_bm2)) %>%
ungroup ->
L120.LC_bm2_ctry_LTpast_GLU
# Produce outputs
L120.LC_bm2_R_LT_Yh_GLU %>%
add_title("Land cover by GCAM region / aggregate land type / historical year / GLU") %>%
add_units("bm2") %>%
add_comments("Land types from SAGE, HYDE, WDPA merged and reconciled; missing zeroes backfilled; interpolated to AGLU land cover years") %>%
add_legacy_name("L120.LC_bm2_R_LT_Yh_GLU") %>%
add_precursors("common/iso_GCAM_regID", "aglu/LDS/LDS_land_types", "aglu/SAGE_LT", "L100.Land_type_area_ha",
"aglu/LDS/L123.LC_bm2_R_MgdFor_Yh_GLU_beforeadjust") ->
L120.LC_bm2_R_LT_Yh_GLU
L120.LC_bm2_R_UrbanLand_Yh_GLU %>%
add_title("Urban land cover by GCAM region / historical year / GLU") %>%
add_units("bm2") %>%
add_comments("Land types from SAGE, HYDE, WDPA merged and reconciled; missing zeroes backfilled; interpolated to AGLU land cover years") %>%
add_legacy_name("L120.LC_bm2_R_UrbanLand_Yh_GLU") %>%
add_precursors("common/iso_GCAM_regID", "aglu/LDS/LDS_land_types", "aglu/SAGE_LT", "L100.Land_type_area_ha") ->
L120.LC_bm2_R_UrbanLand_Yh_GLU
L120.LC_bm2_R_Tundra_Yh_GLU %>%
add_title("Tundra land cover by GCAM region / historical year / GLU") %>%
add_units("bm2") %>%
add_comments("Land types from SAGE, HYDE, WDPA merged and reconciled; missing zeroes backfilled; interpolated to AGLU land cover years") %>%
add_legacy_name("L120.LC_bm2_R_Tundra_Yh_GLU") %>%
add_precursors("common/iso_GCAM_regID", "aglu/LDS/LDS_land_types", "aglu/SAGE_LT", "L100.Land_type_area_ha") ->
L120.LC_bm2_R_Tundra_Yh_GLU
L120.LC_bm2_R_RckIceDsrt_Yh_GLU %>%
add_title("Rock/ice/desert land cover by GCAM region / historical year / GLU") %>%
add_units("bm2") %>%
add_comments("Land types from SAGE, HYDE, WDPA merged and reconciled; missing zeroes backfilled; interpolated to AGLU land cover years") %>%
add_legacy_name("L120.LC_bm2_R_RckIceDsrt_Yh_GLU") %>%
add_precursors("common/iso_GCAM_regID", "aglu/LDS/LDS_land_types", "aglu/SAGE_LT", "L100.Land_type_area_ha") ->
L120.LC_bm2_R_RckIceDsrt_Yh_GLU
L120.LC_bm2_ctry_LTsage_GLU %>%
add_title("Unmanaged land cover by country / SAGE15 land type / GLU") %>%
add_units("bm2") %>%
add_comments("Land types from SAGE, HYDE, WDPA merged and reconciled; missing zeroes backfilled; interpolated to AGLU land cover years") %>%
add_comments("Mean computed for HYDE 'Unmanaged' over available historical years") %>%
add_legacy_name("L120.LC_bm2_ctry_LTsage_GLU") %>%
add_precursors("common/iso_GCAM_regID", "aglu/LDS/LDS_land_types", "aglu/SAGE_LT", "L100.Land_type_area_ha") ->
L120.LC_bm2_ctry_LTsage_GLU
L120.LC_bm2_ctry_LTpast_GLU %>%
add_title("Pasture land cover by country / SAGE15 land type / GLU") %>%
add_units("bm2") %>%
add_comments("Land types from SAGE, HYDE, WDPA merged and reconciled; missing zeroes backfilled; interpolated to AGLU land cover years") %>%
add_comments("Mean computed for HYDE 'Pasture' over available historical years") %>%
add_legacy_name("L120.LC_bm2_ctry_LTpast_GLU") %>%
add_precursors("common/iso_GCAM_regID", "aglu/LDS/LDS_land_types", "aglu/SAGE_LT", "L100.Land_type_area_ha") ->
L120.LC_bm2_ctry_LTpast_GLU
L120.LC_prot_land_frac_GLU %>%
add_title("protected and unprotected fractions by year,GLU, land type.") %>%
add_units("fraction") %>%
add_comments("Land types from SAGE, HYDE, WDPA merged and reconciled; missing zeroes backfilled; interpolated to AGLU land cover years") %>%
add_legacy_name("L120.LC_prot_land_frac_GLU") %>%
add_precursors("common/iso_GCAM_regID", "aglu/LDS/LDS_land_types", "aglu/SAGE_LT", "L100.Land_type_area_ha") ->
L120.LC_prot_land_frac_GLU
L120.LC_soil_veg_carbon_GLU %>%
add_title("Spatially distinct soil and vegetation carbon by GLU") %>%
add_units("kg/m2") %>%
add_comments("Land types from SAGE, HYDE, WDPA merged and reconciled; missing zeroes backfilled; interpolated to AGLU land cover years. Soil carbon is at a depth of 0-30 cms and vegetation carbon is a combination of above and below ground biomass.") %>%
add_legacy_name("L120.LC_soil_veg_carbon_GLU") %>%
add_precursors("common/iso_GCAM_regID", "aglu/LDS/LDS_land_types", "aglu/SAGE_LT", "L100.Land_type_area_ha","L100.Ref_veg_carbon_Mg_per_ha","aglu/Various_CarbonData_LTsage")->L120.LC_soil_veg_carbon_GLU
return_data(L120.LC_bm2_R_LT_Yh_GLU, L120.LC_bm2_R_UrbanLand_Yh_GLU, L120.LC_bm2_R_Tundra_Yh_GLU, L120.LC_bm2_R_RckIceDsrt_Yh_GLU, L120.LC_bm2_ctry_LTsage_GLU, L120.LC_bm2_ctry_LTpast_GLU, L120.LC_prot_land_frac_GLU, L120.LC_soil_veg_carbon_GLU)
} else {
stop("Unknown command")
}
}
|
ffef987afd48fa08f759302967ae78258827e76c | 8a2ef8564dac1551f9a389c3cc04837453c933d6 | /17_FactoresTranscripcion/4_EncontrarFactoresTranscripcion_BaseDeDatos.R | 0f3b726a5ad643979f1f4097f423ebd6be2ce85f | [] | no_license | anemartinezlarrinaga2898/TFM_ANE_MARTINEZ | 0a311e96ccad34f331d798b1f3c459539614e1c7 | 8e41157613fff0cfa4f5eb27e6984d02d000b9ef | refs/heads/main | 2023-07-27T21:41:44.218050 | 2021-08-27T10:15:35 | 2021-08-27T10:15:35 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,142 | r | 4_EncontrarFactoresTranscripcion_BaseDeDatos.R | # TFM, Master en Metodos Computacionales UNAV
#Autor: Ane Martinez Larrinaga
#Tutores: Silvestre Vicent Cambra y Mikel Hernaez
#Fecha: 07-07-2021
#OBJETIVO: Generar los data.frames del analisis DESEQ y luego comprobar que factores de transcripcion tenemos.
###########################################################################################
direcotry <- setwd("/Users/anemartinezlarrinaga/OneDrive/2_MASTER_Computacional/5_TFM/CODIGO/SCRIPTS_Y_DATOS/19_AnalisisMatricesOriginales/")
library(tidyverse)
###############################################################################################
G1 <- readRDS("ResultadosAnalasisExpresion_1_Luad.rds")
G1 <- as.data.frame(dplyr::mutate(as.data.frame(G1)),
row.names=rownames(G1))
G1$ensembl <- rownames(G1)
G2 <- readRDS("ResultadosAnalasisExpresion_2_Luad.rds")
G2 <- as.data.frame(dplyr::mutate(as.data.frame(G2)),
row.names=rownames(G2))
G2$ensembl <- rownames(G2)
G3 <- readRDS("ResultadosAnalasisExpresion_3_Luad.rds")
G3 <- as.data.frame(dplyr::mutate(as.data.frame(G3)),
row.names=rownames(G3))
G3$ensembl <- rownames(G3)
FT <- readRDS("FactoresTranscripcion_Symbol_Entrez_Ensembel.rds")
FT <- FT[,-1]
colnames(FT)[3] <- "ensembl"
FT_G1 <- inner_join(G1,FT,by="ensembl")
FT_G1$Grupo <- "Grupo1"
FT_G2 <- inner_join(G2,FT,by="ensembl")
FT_G2$Grupo <- "Grupo2"
FT_G3 <- inner_join(G3,FT,by="ensembl")
FT_G3$Grupo <- "Grupo3"
FT_Totales <- rbind(FT_G1,FT_G2)
FT_Totales <- rbind(FT_Totales,FT_G3)
FT_Totales <- FT_Totales[order(FT_Totales$SYMBOL),]
FT_UP_G1 <- FT_G1[order(FT_G2$log2FoldChange,decreasing = TRUE),]
FT_UP_G1_5 <- FT_UP_G1[1:5,]
FT_down_G1 <- FT_G1[order(FT_G2$log2FoldChange,decreasing = FALSE),]
FT_down_G1 <- FT_down_G1[1:5,]
FT_G1_UP_DOWN <- rbind(FT_UP_G1_5,FT_down_G1)
write.table(FT_G1_UP_DOWN,"FT_G2_UP_DOWN.csv",sep ="\t",row.names = FALSE)
saveRDS(FT_Totales,"FT_TOTALES_LUAD.rds")
#·····························································································································
G1 <- readRDS("ResultadosAnalasisExpresion_1_Lusc.rds")
G1 <- as.data.frame(dplyr::mutate(as.data.frame(G1)),
row.names=rownames(G1))
G1$ensembl <- rownames(G1)
G2 <- readRDS("ResultadosAnalasisExpresion_2_Lusc.rds")
G2 <- as.data.frame(dplyr::mutate(as.data.frame(G2)),
row.names=rownames(G2))
G2$ensembl <- rownames(G2)
G3 <- readRDS("ResultadosAnalasisExpresion_3_Lusc.rds")
G3 <- as.data.frame(dplyr::mutate(as.data.frame(G3)),
row.names=rownames(G3))
G3$ensembl <- rownames(G3)
FT <- readRDS("FactoresTranscripcion_Symbol_Entrez_Ensembel.rds")
FT <- FT[,-1]
colnames(FT)[3] <- "ensembl"
FT_G1 <- inner_join(G1,FT,by="ensembl")
FT_G1$Grupo <- "Grupo1"
FT_G2 <- inner_join(G2,FT,by="ensembl")
FT_G2$Grupo <- "Grupo2"
FT_G3 <- inner_join(G3,FT,by="ensembl")
FT_G3$Grupo <- "Grupo3"
FT_Totales <- rbind(FT_G1,FT_G2)
FT_Totales <- rbind(FT_Totales,FT_G3)
FT_Totales <- FT_Totales[order(FT_Totales$SYMBOL),]
saveRDS(FT_Totales,"FT_TOTALES_LUSC.rds")
##############################################################################################################################
#Ahora buscamos en LUSC los FT que son de cada grupo en toda la huella.
FT <- readRDS("FT_TOTALES_LUSC.rds")
FT_Paper <- readRDS("GenesPaper_Lusc.rds")
FT_Paper[21,2] <-"NKX2-1"
colnames(FT_Paper)[2] <- "Symbol"
Paper <- inner_join(FT,FT_Paper, by="Symbol")
##############################################################################################################################
#Buscar los genes en todos los genes que se encuentran diferencialmente expresados.
GP <- readRDS("GenesPaper_Lusc.rds")
GP[21,2] <-"NKX2-1"
colnames(GP)[2] <- "SYMBOL"
H1 <- readRDS("ResultadosComparacion1_Lusc_DF.rds")
H1$ensembl <- rownames(H1)
H1$Grupo <- "Grupo1"
H2 <- readRDS("ResultadosComparacion2_Lusc_DF.rds")
H2$ensembl <- rownames(H2)
H2$Grupo <- "Grupo2"
H3 <- readRDS("ResultadosComparacion3_Lusc_DF.rds")
H3$ensembl <- rownames(H3)
H3$Grupo <- "Grupo3"
H <- rbind(H1,H2)
H <- rbind(H,H3)
PAPER <- inner_join(H,GP,by="SYMBOL")
PAPER <- PAPER[order(PAPER$SYMBOL),]
HP1 <- PAPER %>% filter(Grupo=="Grupo1")
HP2 <- PAPER %>% filter(Grupo=="Grupo2")
HP3 <- PAPER %>% filter(Grupo=="Grupo3")
###############################################################################################
H1 <- readRDS("ResultadosComparacion1_Lusc_DF.rds")
H1$ensembl <- rownames(H1)
H1$Grupo <- "Grupo1"
H2 <- readRDS("ResultadosComparacion2_Lusc_DF.rds")
H2$ensembl <- rownames(H2)
H2$Grupo <- "Grupo2"
H3 <- readRDS("ResultadosComparacion3_Lusc_DF.rds")
H3$ensembl <- rownames(H3)
H3$Grupo <- "Grupo3"
H <- rbind(H1,H2)
H <- rbind(H,H3)
GP <- readRDS("GenesPaper_Ampliados.rds")
PAPER <- inner_join(H,GP,by="SYMBOL")
PAPER <- PAPER[order(PAPER$SYMBOL),]
HP1 <- PAPER %>% filter(Grupo=="Grupo1")
HP2 <- PAPER %>% filter(Grupo=="Grupo2")
HP3 <- PAPER %>% filter(Grupo=="Grupo3")
|
ef8cfd01f0bc22b1b542d4e97ae9158e5b1fc33c | 47b7376893f2ee4cdd29ef36b07d4a44ca4e2071 | /man/to_gadget_formula.Rd | e93d663b49d6df70eddaf8a72ad117ffc2a21177 | [] | no_license | inspktrgadget/gadgetSim | 24eb767afc04376656b3827df762ac1bc054f17f | 1a29daa89e6a171176dd73b2d8763927751f7978 | refs/heads/master | 2020-03-16T02:25:37.840117 | 2019-05-02T14:23:35 | 2019-05-02T14:23:35 | 132,464,768 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,075 | rd | to_gadget_formula.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/convert_formulas.R
\name{to_gadget_formula}
\alias{to_gadget_formula}
\title{Turn R expression into Gadget formula string}
\usage{
to_gadget_formula(ex, stocknames = NULL)
}
\arguments{
\item{ex}{An unevaluated R expression (i.e. enclosed in quotes)}
\item{stocknames}{Optional. Character vector of stocknames to add to any formula variable names}
}
\value{
A character vector that is readable as a Gadget formula
}
\description{
This function is stolen directly from Rgadget::to.gadget.formulae. It takes an unevaluated R
expression (e.g. quote(2 + log(moo - 1))) and converts it into a character string that is readable
by Gadget
}
\details{
Gadget uses reverse Polish notation to read formulas (i.e. the operator comes first,
followed by the items to be operated on; 2 + 2 is read as (+ 2 2)). This function will take
an expression recognizable by R and convert it to one that is recognizable by Gadget
}
\examples{
to_gadget_formula(quote(2 + 2))
to_gadget_formula(quote(2 + log(moo - 1)))
}
|
8d010cf7ceddeb847906775c41a01c7c1d073bf5 | 4548bcd0862edd7b1997402206a24ff71f190d37 | /R/create.pgrid.R | 981f7dd33d3a390c751be8f96d0af60eaf9ca350 | [] | no_license | jfrench/ExceedanceTools | d7d533d4ad5b95a26b1f11b2ea697e2a133a57dd | 186cd08cb15a77e626bcc45d31d6818872da0354 | refs/heads/master | 2023-09-01T11:12:18.594832 | 2023-08-21T16:16:39 | 2023-08-21T16:16:39 | 3,113,489 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,468 | r | create.pgrid.R | #' Create grid of locations.
#'
#' \code{create.pgrid} creates a grid of locations from the boundaries of domain and other information.
#'
#' The key argument in the function midpoints. If this is \code{TRUE}, it is assumed that the boundaries of the spatial domain correspond to the midpoints of the cell/pixel in the grid. Otherwise, it is assumed that the boundaries correspond to the actual borders of the region of interest. If \code{poly.coords} is supplied, the grid returned is the grid of midpoints contained in the convex hull of \code{poly.coords}.
#'
#' @param xmin The minimum value of the boundary of the x coordinates of the spatial domain.
#' @param xmax The maximum value of the boundary of the x coordinates of the spatial domain.
#' @param ymin The minimum value of the boundary of the y coordinates of the spatial domain.
#' @param ymax The maximum value of the boundary of the y coordinates of the spatial domain.
#' @param nx The number of gridpoints/cells/pixels in the x direction.
#' @param ny The number of gridpoints/cells/pixels in the y direction.
#' @param midpoints A logical value (\code{TRUE} or \code{FALSE}) indicating whether the boundary values are for the midpoint of a pixel (\code{midpoints = TRUE}) or for the boundary of the spatial domain in general (\code{midpoints = FALSE}), in which case the midpoints are calculated internally). Default is \code{FALSE}.
#' @param poly.coords An \eqn{n \times 2} matrix with the coordinates specifying the polygon vertices of the true spatial domain of interest within the rectangular boundaries provided by \code{xmin}, \code{xmax}, \code{ymin}, and \code{ymax}. If this is provided, the \code{pgrid} returned will be within the convex hull of \code{poly.coords}.
#'
#' @return Returns an object of class \code{pgrid} with the following components:
#' \item{pgrid}{An \eqn{n \times 2} matrix of locations (the midpoints of the pixelized grid).}
#' \item{m}{The number of rows in pgrid.}
#' \item{p.in.grid}{A vector of 0s and 1s indicating whether the midpoint of each pixel is in the convex hull of \code{poly.coords}. If \code{poly.coords} is not provided, this is a vector of 1s.}
#' \item{ubx}{The pixel boundaries in the x direction.}
#' \item{uby}{The pixel boundaries in the y direction.}
#' \item{upx}{The pixel midpoints in the x direction.}
#' \item{upy}{The pixel midpoints in the y direction.}
#'
#' @author Joshua French
#' @importFrom splancs inout
#' @export
#' @examples
#' pgrida <- create.pgrid(0, 1, 0, 1, nx = 50, ny = 50, midpoints = FALSE)
#' pgridb <- create.pgrid(.01, .99, .01, .99, nx = 50, ny = 50, midpoints = TRUE)
create.pgrid <- function(xmin, xmax, ymin, ymax, nx, ny, midpoints = FALSE,
poly.coords = NULL)
{
if(midpoints)
{
xstep <- (xmax-xmin)/(nx - 1) #Calculate the pixel width
ystep <- (ymax-ymin)/(ny - 1) #Calculate the pixel height
#Determine x and y midpoints of all of the pixels
upx <- xmin + 0:(nx - 1) * xstep
upy <- ymin + 0:(ny - 1) * ystep
#Create boundaries for pixels
ubx <- xmin + 0:nx * xstep - xstep/2
uby <- ymin + 0:ny * ystep - ystep/2
}
else
{
xstep <- (xmax-xmin)/nx #Calculate the pixel width
ystep <- (ymax-ymin)/ny #Calculate the pixel height
#Determine x and y midpoints of all of the pixels
upx <- xmin + xstep/2 + 0:(nx-1) * xstep
upy <- ymin + ystep/2 + 0:(ny-1) * ystep
#Create boundaries for pixels
ubx <- xmin + 0:nx * xstep
uby <- ymin + 0:ny * ystep
}
#If coords are supplied, create pgrid based on whether points
#are contained in polygon of poly.coords.
if(!is.null(poly.coords))
{
all.grid <- as.matrix(expand.grid(upx, upy))
#Determine points of rectangular grid (based on xgrid and ygrid)
#within poly.coords
pip <- inout(all.grid, poly.coords, bound = TRUE)
#Extract prediction coordinates within border
pgrid <- as.matrix(all.grid[pip == 1,])
#Determine number of prediction locations
np <- nrow(pgrid)
#Determine which points are a prediction coordinates within
#rectangular grid
p.in.grid <- (pip == 1)
}else
{
pgrid <- as.matrix(expand.grid(upx, upy))
np <- length(upx) * length(upy)
p.in.grid <- rep(TRUE, np)
}
out <- (list(pgrid = as.matrix(pgrid), np = np, p.in.grid = p.in.grid,
ubx = ubx, uby = uby, upx = upx, upy = upy))
class(out) <- "pgrid"
return(out)
}
|
8e3e88bc652be801f311eebd1a0dfcc9778a4e50 | 8faa81854ac570de08164584910d757fd5cb5360 | /r-for-data-science-lessons/chapter-15-16-17.R | 8137e3f74003da37a1e4089e5987dce985054e44 | [] | no_license | shardsofblue/tutorials-and-study | a0e39f7bdad7032ff965f488eeccd0d29731fe56 | 3247ee9eb89859925c0f2fd9a9cc9e4caf66dc58 | refs/heads/master | 2020-09-09T16:29:02.065533 | 2019-11-22T18:16:42 | 2019-11-22T18:16:42 | 221,496,800 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 9,993 | r | chapter-15-16-17.R | # Chapters 15 (Functions), 16 (Vectors), 17 (Itteration with purr)
# Started Jan. 8, 2019
# By Roxanne Ready
# Load packages
#install.packages(c("magrittr"))
#library(magrittr)
library(tidyverse)
# SHORTCUTS
# cmd-shft-m: %>%
# cmd-shft-c: comment line/block
# cmd-opt-up/dn: duplicate line
# cmd-rtrn: run code line/block
# cmd-opt-e: run code until EoF
# cmd-shft-r: insert section break
# \
# Test section 0 ##########################################
?'if'
x <- 1L # The L specifies that this is an integer, not a double
y <- 0
z <- 1 # Numbers are doubles by default
# || or
# && and
identical(x, z) # integers, doubles, and floating points will not be coerced
near(x, z)
all(c(x == y, x == x)) # true for all in a list of comparisons
any(c(x == y, x == x)) # true for any " "
# Test section 1 ------------------------------------------------------------
if (this) {
# do thing 1
} else if (that) {
# do thing 2
} else {
# do thing 3
}
maths <- function(x, y, operator) {
switch(operator,
plus = x + y,
minus = x - y,
times = x * y,
multiply = x * y, # how to collapse into one?
# (times || multiply) = x * y, # Doesn't work
divide = x / y,
stop("Unknown operator")
)
}
maths(1, 5, "times")
maths(1, 5, "remainder")
?'if'
?ifelse
# Test section 2 ------------------------------------------------------------
?stop # Stop the function with an error message
wt_mean <- function(x, w) {
if (length(x) != length(w)) {
stop("`x` and `w` must be the same length", call. = FALSE)
}
sum(w * x) / sum(w)
}
wt_mean(1:6, 1:3)
?stopifnot # Stop the function if the value is not true. Faster, but the error message is not as detailed.
wt_mean <- function(x, w, na.rm = FALSE) {
stopifnot(is.logical(na.rm), length(na.rm) == 1) # Stop if not a valid input for na.rm
stopifnot(length(x) == length(w)) # Stop if the lengths aren't equal
if (na.rm) {
miss <- is.na(x) | is.na(w)
x <- x[!miss]
w <- w[!miss]
}
sum(w * x) / sum(w)
}
wt_mean(1:6, 6:1, na.rm = "foo")
# ...
commas <- function(...) {stringr::str_c(..., collapse = ", ")}
commas(letters[1:10])
# Bad
f <- function() {
if (x) {
# Do
# something
# that
# takes
# many
# lines
# to
# express
} else {
# return something short
}
}
# Good
f <- function() {
if (!x) {
return(something_short)
}
# Do
# something
# that
# takes
# many
# lines
# to
# express
}
# Vectors -----------------------------------------------------------------
typeof(letters)
typeof(1:10)
length(letters)
# Checks for doubles
# is.finite()
# is.infinite()
# is.na()
# is.nan()
# Checks for types
# Use purr (is_*) not baser (is.*)
# is_logical
# is_integer
# is_double
# is_numeric
# is_character
# is_atomic
# is_list
# is_vector
x <- list("a", "b", "c")
x
str(x)
x_named <- list(cola = "a", colb = "b", colc = "c")
x_named
str(x_named)
x <- set_names(x, c("ColA", "ColB", "ColC"))
str(x)
listception <- list("firstList" = x, "secondList" = x_named)
str(listception)
# Itteration --------------------------------------------------------------
# Test df
df <- tibble(
a = rnorm(10), # Random normal generation
b = rnorm(10),
c = rnorm(10),
d = rnorm(10)
)
output <- vector("double", ncol(df)) # Set the output size
for (i in seq_along(df)) { # Run a for loop
output[[i]] <- median(df[[i]]) # Replace the output at each position with the median of the corresponding df col
}
output # View the output
# Exercises
#1a. Compute the mean of every column in mtcars.
mtcars
output <- vector("double", ncol(mtcars))
for (i in seq_along(mtcars)) {
output[[i]] <- mean(mtcars[[i]])
}
output
#1b. Determine the type of each column in nycflights13::flights.
nycflights13::flights # Look at flights
sapply(nycflights13::flights[1], class) # Check how to find the col type
output <- vector("character", ncol(nycflights13::flights)) # Set the output length
for (i in seq_along(nycflights13::flights)) {
output[[i]] <- sapply(nycflights13::flights[i], class)
}
output
#1c. Compute the number of unique values in each column of iris.
iris # Look at iris
summary(iris)
# Check how to find and count unique values in a col
?unique
count(unique(iris[1]))
output <- vector("integer", ncol(iris)) # set the output length
for (i in seq_along(iris)) {
output[[i]] <- dplyr::pull( # Un-nest, or un-tibble the tibble that count() creates
count( # Count how many
unique(iris[i]))) # Find uniques
}
output
#1d. Generate 10 random normals for each of -10, 0, 10, 100
# Generate a single random normal
?rnorm
(rnorm(1, mean = -10))
(output <- vector("double", 4)) # Set output length
(mu <- -10)
for (i in seq_along(output)) {
if (i == 1) { # On first itteration...
# ...exit for loop
} else if (i == 4) { # On last itteration...
mu <- mu * 10 # ... multiply mu by 10
} else { # On all other itterations...
mu <- mu + 10 # ... add 10 to mu
}
output[i] <- rnorm(1, mu) # Store an RNG num with a mean of mu in output
}
output # View output
# For loop variations -----------------------------------------------------
# Test df
df <- tibble(
a = rnorm(10), # Random normal generation
b = rnorm(10),
c = rnorm(10),
d = rnorm(10)
)
# Function to rescale
rescale01 <- function(x) {
rng <- range(x, na.rm = TRUE)
(x - rng[1]) / (rng[2] - rng[1])
}
# Itteratively deploy function to the df
for (i in seq_along(df)) {
df[[i]] <- rescale01(df[[i]])
}
df
df[1]
df[[1]]
# Understanding lists in for loops
output <- list(vector("character", ncol(iris)), vector("character", ncol(iris)))
for (i in seq_along(iris)) {
name <- names(iris)[[i]] # Extract the name of a column
#value <- iris[[i]] # Extract the value of a variable
mean <- mean(iris[[i]][[i]]) # Extract the mean of a column
output[[1]][[i]] <- name
output[[2]][[i]] <- mean
}
output
output[[2]][[3]]
# Naming outputs
output <- vector("list", length(iris))
for (i in seq_along(iris)) {
names(output) <- stringr::str_c("Mean_", names(iris))
output[[i]] <- mean(iris[[i]])
}
output
output$Petal.Length
# Unknown output length
means <- c(0, 1, 2) # A vector of 3 numbers
out <- vector("list", length(means)) # A vector of lists of the same length as "means"
?sample
?rnorm
for (i in seq_along(means)) {
n <- sample(100, 1)
out[[i]] <- rnorm(n, means[[i]])
}
out # A vector of 3 lists, each holding a random number of numbers
str(out) # Shows the structure of "out"
out2 <- unlist(out) # Flatten those three lists into one vector
out2 # A vector of numbers
str(unlist(out)) # Shows the structure of out2
# Other useful functions to store iterations and optimize for loops
paste(output, collapse = "") # To combine character strings saved in a vector
dplyr::bind_rows(output) # To combine rows
# While loops -------------------------------------------------------------
## Find how many flips it takes to get three heads in a row
# Flip function
flip <- function() {
sample(c("T", "H"), 1) # Pull one value as T or H
}
flips <- 0
nheads <- 0
while (nheads < 3) {
if (flip() == "H") {
nheads <- nheads + 1
} else {
nheads <- 0 # Reset heads to 0
}
flips <- flips + 1
}
flips
# Functionals -------------------------------------------------------------
# Test df
df <- tibble(
a = rnorm(10), # Random normal generation
b = rnorm(10),
c = rnorm(10),
d = rnorm(10)
)
# Use an argument of a function as a call to another
col_summary <- function(df, fun) { # Note fun here
out <- vector("double", length(df))
for(i in seq_along(df)) {
out[i] <- fun(df[[i]]) # Fun now becomes fun() acting with respect to df and i
}
out
}
col_summary(df, median)
col_summary(df, mean)
col_summary(df, sum)
# purr functionals --------------------------------------------------------
# map() makes a list.
# map_lgl() makes a logical vector.
# map_int() makes an integer vector.
# map_dbl() makes a double vector.
# map_chr() makes a character vector.
# The main benefit to purr's map functions is clarity, not speed.
# For loops aren't any slower and haven't been for many years.
# Same as the homebrew function written above (line 335-344)
map_dbl(df, median)
map_dbl(df, mean)
map_dbl(df, sum)
df %>% map_dbl(median)
?map_dbl
# Split mtcars into values along cylinders
models <- mtcars %>%
split(.$cyl) %>% # Run the split; still has all info
#map(function(df) lm(mpg ~ wt, data = df)) # Use an anonymous function to do the below; verbose
map(~lm(mpg ~ wt, data = .)) # Replace info with a summary built from an anonymous function, using shortcuts
# Extract a summary statistic
models %>%
map(summary) %>%
#map_dbl(~.$r.squared) # Using expected syntax
map_dbl("r.squared") # Using a shortcut string
# Select elements within a nested list by position
(x <- list(list(1,2,3), list(4,5,6), list(7,8,9))) # List of lists to play with
x %>%
map_dbl(2)
# Handling Mapping Errors -------------------------------------------------
# Safely mapping functions so one error doesn't obfuscate all results
x <- list(1, 10, "a")
y <- x %>%
map(safely(log))
y
str(y)
# Transpose to put all results in one list and all errors in another
y <- y %>%
transpose()
str(y)
y
# Use the errors to pull out usable info
is_ok <- y$error %>%
map_lgl(is_null)
str(is_ok)
x[!is_ok] # View values of x where y is an error
y$result[is_ok] %>% # View the y values that are not errors
flatten_dbl()
# possibly() is a simpler safely(), outputting a default error return instead of error messages
x %>%
map_dbl(possibly(log, NA_real_))
# Mapping over multiple arguments -----------------------------------------
# Use map2() and pmap(), pp. 332-335
# Use walk() to handle printouts and file saves pp. 335-336
# reduce(dfs, full_join) will combine two dfs in a list into one, joined on a common element
# reduce(vs, inersect) will reduce two vectors in a list into their intersection
|
98649322f2e35536927c7f02510c0157e57c454d | ceb72ba7636eecc95d99f479be3b8f2cc9381af5 | /R/scribe_images.R | acabe10e9b12dab0c1cdc0377674f331390e3bf4 | [] | no_license | wipo-analytics/manual | 645f71d87219ea736537c43ec4b638ca101efb08 | 5ba9724c9f137c2022bca0d02bd353a304689764 | refs/heads/main | 2022-10-12T17:14:25.801869 | 2022-09-29T17:52:46 | 2022-09-29T17:52:46 | 356,185,658 | 1 | 1 | null | null | null | null | UTF-8 | R | false | false | 2,206 | r | scribe_images.R | library(tidyverse)
library(glue)
library(usethis)
# used to download Scribe files (as Markdown) save as text files. import as text fiule
# extract the URLS, download the images and change the numbering.
scribe <- read_lines("scribe/scribe.txt")
# extract the URLS
# https://github.com/mvkorpel/pickURL
source("~/Documents/manual/R/pick_urls.R")
urls <- pick_urls(scribe) %>%
tibble(url = .) %>%
mutate(image = str_detect(url, ".jpeg")) %>%
filter(image == TRUE)
## Create destination and project folders if they dont exist
# note that if jpegs or pngs already exist in these files then
# you should probably not proceed.
# something is returning NULL in this code
create_folders <- function(dest = NULL, project = NULL) {
if(!is.null(dest)) {
if (!dir.exists(dest)){
dir.create(dest)
ui_info("{ui_value(dest)} dir created")
} else {
ui_info("{ui_value(dest)} dir exists")
}
}
if(!is.null(project)) {
if (!dir.exists(paste0(dest, "/", project))){
dir.create(paste0(dest, "/", project)) # need to paste0 here I think
ui_info("{ui_value(project)} dir created")
} else {
ui_info("{ui_value(project)} dir exists")
}
}
path <- paste0(dest, "/", "project")
ftypes <- list.files(path, pattern = ".jpeg")
im <- ".jpeg" %in% ftypes
if(isTRUE(im)) {
usethis::ui_warn("jpegs present in project folder and will be overwritten")
} else (
usethis::ui_info("No jpegs present in project folder")
)
}
create_folders(dest = "images", project = "test")
# download the images
download_image <- function(url = NULL, dest = NULL, project = NULL, fname = NULL) {
download.file(urls$url, destfile = paste0(dest, "/", project, "/", basename(url)))
}
map(urls, download_image, dest = "images", project = "test")
rename_file <- function(path = NULL, project = NULL, fname = NULL) {
old_files <- list.files(path, pattern="*.jpeg", full.names = TRUE)
new_files <- paste0(fname, 1:length(old_files), "_", project, ".jpeg")
file.rename(old_files, paste0(path, "/", new_files))
}
rename_file(path = "images/test", project = "plotly", fname = "fig") |
e1aa7fda4dad13fd3e2a343b12d8bbff35198be1 | b5a3a327434768a0433a40602e07e2362de54e9b | /plot2.R | 8b278795cefc4228a0ee1fc4d9ab8906f3a3ce15 | [] | no_license | hrafnkelle/ExData_Plotting1 | 51500da6bd8cf0dd555f2d5a0f89b4e0795a7e5d | ecee83d5bf7d60f034cdfab648e913890a5395b6 | refs/heads/master | 2021-01-16T20:49:42.294853 | 2015-02-03T18:18:00 | 2015-02-03T18:18:00 | 30,209,216 | 0 | 0 | null | 2015-02-02T21:19:09 | 2015-02-02T21:19:09 | null | UTF-8 | R | false | false | 973 | r | plot2.R | library(lubridate)
library(dplyr)
# set the locale for time to english so we can get x axis day names in english
Sys.setlocale("LC_TIME", 'English')
if(!file.exists("household_power_consumption.txt"))
{
download.file("https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip",dest="household_power_consumption.zip")
unzip("household_power_consumption.zip")
}
# we read only the column headings first and then only the lines corresponding to the wanted dates
pow_head<-read.table('household_power_consumption.txt', stringsAsFactors=F, sep=";", na.strings=c("?"),header=TRUE,nrows=1)
pow<-read.table('household_power_consumption.txt', stringsAsFactors=F, sep=";", na.strings=c("?"),skip=66637,nrows=2880,col.names=colnames(pow_head))
pow<-mutate(pow,Date2=dmy_hms(paste(Date,Time)))
png(filename="plot2.png",width=480, height=480)
plot(pow$Global_active_power ~ pow$Date2,type="l",ylab="Global Active Power (kilowatts)",xlab="")
dev.off() |
aae21ee4619692902b7c99d99525ea9dbe5c0714 | 0d01742ed9712dfaee387edefbd494946a38192b | /Figure2A-E-TMB-Downsampling.R | ecc348d874435c3eb1c7c6e99182f8868fb04b2e | [] | no_license | ShwetaCh/wes-recaptures | 09c6d4d6d5b0a5c3356ea48726f6b71cf535a805 | f7d910875796b75cb23d6abe213afe09c6c892fc | refs/heads/master | 2022-12-14T02:09:08.010877 | 2020-08-25T02:11:22 | 2020-08-25T02:11:22 | 267,383,063 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 14,405 | r | Figure2A-E-TMB-Downsampling.R | library(RColorBrewer)
library(Rmisc)
library(purrr)
library(grid)
library(gridExtra)
library(gsheet)
library(ggpubr)
library(cowplot)
library(gridExtra)
source('/ifs/res/taylorlab/chavans/scripts-orphan/multiplot.R')
specify_decimal = function(x, k) format(round(x, k), nsmall=k)
"%ni%" = Negate("%in%")
curr_date = format(Sys.Date(),"%d-%b-%y")
ex_clin = fread('~/tempo-cohort-level/WES_metadata_040620.txt')
dim(ex_clin); head(ex_clin)
ex_clin2 = fread('~/tempo-cohort-level/WES_allTMBs_051020.txt') %>%
select(Tumor_Sample_Barcode, TMBWES_IMgenes) %>%
mutate(TMBWES_IMgenes = ifelse(is.na(TMBWES_IMgenes),0,TMBWES_IMgenes))
ex_clin3 = inner_join(ex_clin, ex_clin2, by = c(Tumor_Sample_Barcode = "Tumor_Sample_Barcode"))
im_clin = fread('~/tempo-cohort-level/IM_metadata_040620.txt') %>%
arrange(desc(TMBIMPACT)) %>%
select(CMO, TMBIMPACT, TMBIMPACT_Downsampled);
dim(im_clin); head(im_clin)
im_ex_clin = inner_join(ex_clin3, im_clin, by = c(Tumor_Sample_Barcode = "CMO"))
im_ex_clin0 = im_ex_clin %>%
select(DMP, TMBWES = TMB, TMBIMPACT, TMBWES_IMgenes, TMBWES_NonIMgenes, TMBIMPACT_Downsampled, Purity_Reviewed) %>%
filter(TMBIMPACT<10, Purity_Reviewed >=0.5) %>%
mutate(adj.depth = TMBIMPACT_Downsampled/TMBIMPACT,
adj.genecontent = TMBWES_NonIMgenes/TMBWES_IMgenes,
adj.TMBIMPACT = TMBIMPACT*adj.depth*adj.genecontent) %>%
select(-c("adj.depth","adj.genecontent", "Purity_Reviewed"))
dim(im_ex_clin0) #679 7
my_comparisons <- list( c("TMBIMPACT","TMBIMPACT_Downsampled"),
c("TMBWES_IMgenes","TMBWES_NonIMgenes"),
c("TMBWES","TMBWES_IMgenes"),
c("TMBIMPACT","TMBWES"),
c("TMBWES","TMBWES_NonIMgenes"),
c("TMBIMPACT","adj.TMBIMPACT"),
c("TMBWES","adj.TMBIMPACT"),
c("TMBIMPACT","TMBWES_IMgenes") )
pdatm = melt(im_ex_clin0, id = c("DMP"))
pdatm = pdatm %>% mutate(group = variable, TMB = value) %>% select(-c(variable,value))
head(pdatm)
mm_tmbs_ = pdatm
mm_tmbs_$group = factor(mm_tmbs_$group, levels = c("TMBIMPACT","TMBIMPACT_Downsampled","TMBWES_IMgenes","TMBWES_NonIMgenes","adj.TMBIMPACT","TMBWES"))
mm_tmbs_ = mm_tmbs_ %>% mutate(seq = ifelse(group %like% "WES", "WES", "IM"))
head(mm_tmbs_)
p14 = ggplot(mm_tmbs_, aes(x=group, y=TMB, fill = seq)) + theme_classic(base_size = 16) +
geom_boxplot() + ylab("TMB in Purity >50% \n(TMBIMPACT <10 Samples Only)") + xlab("") +
scale_fill_manual(values = c('gray35','gray65')) +
theme(axis.text.x = element_text(angle = 90, hjust = 1))
p14 + stat_compare_means(aes(label = ..p.signif..), method = "wilcox.test", comparisons = my_comparisons)
########### For Roslin data TMB boxplot to show adj.TMB
im_dnsmpl_tmb = fread('~/tempo-cohort-level/fixed.downsampledTMB_ForRoslin.txt') %>%
select(DMP, TMBIMPACT_Downsampled = TMBIMPACT.downsampled.fixed) %>%
filter(DMP %in% im_ex_clin$DMP)
dim(im_dnsmpl_tmb) #1413
url <- 'docs.google.com/spreadsheets/d/1ZQCZ-02b8VNNDL05w-FdUiTpIVJ0WAW0EwxCkgX3oM0'
ex_roslin <- read.csv(text=gsheet2text(url, format='csv'), stringsAsFactors=FALSE) %>%
select(DMP = DMPID, TMBWES = TMBExome, TMBIMPACT, TMBWES_IMgenes = TMBExomeIMgenes, TMBWES_NonIMgenes = TMBExome_NonIMgenes, Purity_Reviewed = PurityExome) %>%
filter(DMP %in% im_ex_clin$DMP)
dim(ex_roslin) #1636
im_ex_clin_roslin = inner_join(im_dnsmpl_tmb, ex_roslin, by = 'DMP')
dim(im_ex_clin_roslin) #1413 -- figure out why rest are missing?!
im_ex_clin0_roslin = im_ex_clin_roslin %>%
select(DMP, TMBWES, TMBIMPACT, TMBWES_IMgenes, TMBWES_NonIMgenes, TMBIMPACT_Downsampled, Purity_Reviewed) %>%
filter(TMBIMPACT<10, Purity_Reviewed >=0.5) %>%
mutate(adj.depth = TMBIMPACT_Downsampled/TMBIMPACT,
adj.genecontent = TMBWES_NonIMgenes/TMBWES_IMgenes,
adj.TMBIMPACT = TMBIMPACT*adj.depth*adj.genecontent) %>%
select(-c("adj.depth","adj.genecontent", "Purity_Reviewed"))
dim(im_ex_clin0_roslin) #601 7
pdatm = melt(im_ex_clin0_roslin, id = c("DMP"))
pdatm = pdatm %>% mutate(group = variable, TMB = value) %>% select(-c(variable,value))
head(pdatm)
mm_tmbs_ = pdatm
mm_tmbs_$group = factor(mm_tmbs_$group, levels = c("TMBIMPACT","TMBIMPACT_Downsampled","TMBWES_IMgenes","TMBWES_NonIMgenes","adj.TMBIMPACT","TMBWES"))
mm_tmbs_ = mm_tmbs_ %>% mutate(seq = ifelse(group %like% "WES", "WES", "IM"))
head(mm_tmbs_)
p15 = ggplot(mm_tmbs_, aes(x=group, y=TMB, fill = seq)) + theme_classic(base_size = 16) +
geom_boxplot() + ylab("TMB in Purity >50% \n(TMBIMPACT <10 Samples Only)") + xlab("") +
scale_fill_nejm() +
theme(axis.text.x = element_text(angle = 90, hjust = 1),
plot.margin = unit(c(1,1,1,1), 'lines'))
p15 + stat_compare_means(aes(label = ..p.signif..), method = "wilcox.test", comparisons = my_comparisons)
pdf("~/tempo-cohort-level/Figure2B_TMB_Roslin.pdf", paper = "a4r")
p15 + stat_compare_means(aes(label = ..p.signif..), method = "wilcox.test", comparisons = my_comparisons)
dev.off()
######### For Roslin showing TMB IMPACT vs. WES
im_clin = fread('~/tempo-cohort-level/IM_metadata_040620.txt') %>% select(DMP, TMBIMPACT)
im_ex_clin_roslin1 = inner_join(im_clin, ex_roslin, by = c('DMP')) %>%
select(DMP, TMBIMPACT=TMBIMPACT.x, TMBWES)
dim(im_ex_clin_roslin1) #1636
# Equation
label = lm_eqn(im_ex_clin_roslin,y=im_ex_clin_roslin$TMBIMPACT,x=im_ex_clin_roslin$TMBWES)
label
# R²
r2 = summary(lm(TMBWES~TMBIMPACT,data=im_ex_clin_roslin))$adj.r.squared
r2
#Spearman Correlation
corr = cor.test(im_ex_clin_roslin$TMBWES, im_ex_clin_roslin$TMBIMPACT,method = "spearman")
#print(str(corr))
rho = as.numeric(corr$estimate)
rho
### scatter plot LINEAR scale Original
# Plot
im_ex_clin_roslin_ = im_ex_clin_roslin1 %>%
mutate(TMBIMPACT = ifelse(is.na(TMBIMPACT),0,TMBIMPACT),
TMBIMPACT = TMBIMPACT + 1, TMBWES = TMBWES +1 ) %>%
mutate(grouptmb = ifelse(TMBIMPACT>=20 | TMBWES>=20,'High-TMB','Low-TMB'))
im_ex_clin_roslin_$grouptmb = as.factor(im_ex_clin_roslin_$grouptmb)
overall = ggplot(im_ex_clin_roslin_, aes(x = TMBIMPACT, y = TMBWES)) +
geom_point(pch = 21, size = 2, fill = 'lightgray') +
#scale_fill_manual(values = c('lightgreen','lightgray')) +
#scale_color_manual(values = c('red','blue')) +
geom_abline(intercept = 0, slope = 1, lty = 2) +
geom_smooth(method=lm, se=FALSE, lty=1) +
scale_y_continuous(trans = 'log10', limits = c(1,500), breaks = c(0,1,2,5,10,20,50,100,200,500)) +
scale_x_continuous(trans='log10', limits = c(1,500), breaks = c(0,1,2,5,10,20,50,100,200,500)) +
coord_fixed() + xlab('TMBIMPACT + 1') + ylab('TMBWES + 1') +
labs(title = paste0("R^2 = ",specify_decimal(r2,3),"\n")) +
theme_classic(base_size = 14) +
theme(legend.title = element_blank(), legend.position = 'right', plot.margin = unit(c(1,1,1,1), 'lines'))
#overall
r2_ = summary(lm(TMBWES~TMBIMPACT,data=filter(im_ex_clin_roslin_, grouptmb == 'Low-TMB')))$adj.r.squared
r2_
overall_2 = ggplot(im_ex_clin_roslin_, aes(x = TMBIMPACT, y = TMBWES, fill = grouptmb)) +
geom_point(pch = 21, size = 2) +
scale_fill_manual(values = c('lightgreen','lightgray')) +
scale_color_manual(values = c('red','blue')) +
geom_abline(intercept = 0, slope = 1, lty = 2) +
geom_smooth(method=lm, se=FALSE, lty=1, aes(group=grouptmb, color=grouptmb)) +
scale_y_continuous(trans = 'log10', limits = c(1,500), breaks = c(0,1,2,5,10,20,50,100,200,500)) +
scale_x_continuous(trans='log10', limits = c(1,500), breaks = c(0,1,2,5,10,20,50,100,200,500)) +
coord_fixed() + xlab('TMBIMPACT + 1') + ylab('TMBExome + 1') +
labs(title = paste0("R^2 = ",specify_decimal(r2_,3),"\n")) +
theme_classic(base_size = 14) +
theme(legend.title = element_blank(),legend.position = 'bottom', plot.margin = unit(c(1,1,1,1), 'lines'))
#overall_2
grid.arrange(overall, overall_2, newpage = FALSE, ncol = 2)
# tmb_plot = ggplot(im_ex_clin_roslin,
# aes(x = TMBIMPACT, y = TMBWES)) +
# #facet_wrap(~Algorithm) +
# geom_point(alpha = 0.5, size = 3, shape = 16, color = "#08519c") +
# geom_abline(col = "blue") +
# scale_x_continuous(limits = c(0,150)) +
# scale_y_continuous(limits = c(0,150)) +
# labs(x = "IMPACT TMB", y = "Exome TMB") +
# geom_point(alpha = 0.3, size = 3, shape = 16) +
# geom_abline(col = "black",slope=1, intercept =0) +
# geom_smooth(method=lm, se=FALSE, lty=2) +
# coord_fixed() +
# theme_classic(base_size = 16) +
# theme(axis.text.x = element_text(colour = "blue")) +
# theme(axis.text.y = element_text(colour = "blue")) +
# theme(plot.title = element_text(colour = "blue", size = 14)) +
# labs(title = paste0("Non-Synonymous Coding Mutations-based-TMB","\n",
# " R^2 = ",specify_decimal(r2,3),"\n",
# " Spearman Correlation = ",specify_decimal(rho,3),"\n",
# " IMPACT_TMB = 1.2*Exome_TMB + 1.9"))
pdf("~/tempo-cohort-level/Figure2A_TMB_Compare_Roslin.pdf", paper = "a4r")
grid.arrange(overall, overall_2, newpage = FALSE, ncol = 2)
dev.off()
#########################################################################################################################################################################
url <- 'docs.google.com/spreadsheets/d/1ZQCZ-02b8VNNDL05w-FdUiTpIVJ0WAW0EwxCkgX3oM0'
ex_roslin_purity_depth <- read.csv(text=gsheet2text(url, format='csv'), stringsAsFactors=FALSE)
ex_roslin_purity_depth0 = ex_roslin_purity_depth %>%
select(DMP = DMPID, TMBWES = TMBExome, purity_bin, depth_bin, TMBWESClonal = TMBExomeClonal) %>%
filter(DMP %in% im_ex_clin$DMP)
dim(ex_roslin_purity_depth0) #1636
# > filter(ex_roslin_purity_depth0, is.na(purity_bin))
# DMP TMBWES purity_bin depth_bin TMBWESClonal
# 1 P-0020573-T01-IM6 0 <NA> (100,200] 0
# 2 P-0020769-T01-IM6 0 <NA> (100,200] 0
# 3 P-0021159-T01-IM6 0 <NA> (200,300] 0
# 4 P-0021256-T01-IM6 0 <NA> (100,200] 0
# 5 P-0028349-T01-IM6 0 <NA> (100,200] 0
##Panel B : Effect of purity, and clonality on TMB
#mm_se1 = ddply(mm_, c("purity_bin"), summarise, N = length(TMBExome),mean = mean(TMBExome),sd = sd(TMBExome),se = sd / sqrt(N))
#mm_se = summarySE(mm_, measurevar="TMBExome", groupvars=c("purity_bin"))
mm_se_low = filter(ex_roslin_purity_depth0, !is.na(purity_bin), TMBWES<10) %>% summarySE(., measurevar="TMBWES", groupvars=c("purity_bin"))
mm_se_high = filter(ex_roslin_purity_depth0, !is.na(purity_bin), TMBWES>=10) %>% summarySE(., measurevar="TMBWES", groupvars=c("purity_bin"))
p3 = ggplot(mm_se_low, aes(x=purity_bin, y=TMBWES)) + theme_classic(base_size = 16) +
geom_point(size=5, shape=21, fill="white") + ylab("TMB WES\n(<10)") +
scale_y_continuous(limits=c(0,5),breaks = c(1,2,3,4,5)) +
scale_x_discrete(labels = c(0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,1)) +
geom_errorbar(aes(ymin=TMBWES-ci, ymax=TMBWES+ci), colour="black", width=.1)
p4 = ggplot(mm_se_high, aes(x=purity_bin, y=TMBWES)) + theme_classic(base_size = 16) +
geom_point(size=5, shape=21, fill="white") + ylab("TMB WES\n(>=10)") +
scale_y_continuous(limits=c(10,100),breaks = c(10,20,40,60,80,100)) +
scale_x_discrete(labels = c(0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9)) +
geom_errorbar(aes(ymin=TMBWES-ci, ymax=TMBWES+ci), colour="black", width=.1)
grid.arrange(p3,p4, ncol = 2, widths = c(0.5,0.5), newpage = F)
## Clonal TMB
mm_se_low_cl = filter(ex_roslin_purity_depth0, !is.na(purity_bin), TMBWESClonal<10) %>% summarySE(., measurevar="TMBWESClonal", groupvars=c("purity_bin"))
mm_se_high_cl = filter(ex_roslin_purity_depth0, !is.na(purity_bin), TMBWESClonal>=10) %>% summarySE(., measurevar="TMBWESClonal", groupvars=c("purity_bin"))
p5 = ggplot(mm_se_low_cl, aes(x=purity_bin, y=TMBWESClonal)) + theme_classic(base_size = 16) +
geom_point(size=5, shape=21, fill="white") + ylab("Clonal TMB WES\n(<10)") +
scale_y_continuous(limits=c(0,5),breaks = c(1,2,3,4,5)) +
scale_x_discrete(labels = c(0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,1)) +
geom_errorbar(aes(ymin=TMBWESClonal-ci, ymax=TMBWESClonal+ci), colour="black", width=.1)
p6 = ggplot(mm_se_high_cl, aes(x=purity_bin, y=TMBWESClonal)) + theme_classic(base_size = 16) +
geom_point(size=5, shape=21, fill="white") + ylab("Clonal TMB WES\n(>=10)") +
scale_y_continuous(limits=c(0,125),breaks = c(20,40,60,80,100)) +
scale_x_discrete(labels = c(0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9)) +
geom_errorbar(aes(ymin=TMBWESClonal-ci, ymax=TMBWESClonal+ci), colour="black", width=.1)
grid.arrange(p5,p6, ncol = 2, widths = c(0.5,0.5), newpage = F)
#########################################################################################################################################################################
#Panel C : Effect of depth on TMB
#Hist with a line
p<-ggplot(ex_roslin_purity_depth0, aes(x=TMBWES)) + theme_classic(base_size = 16) +
geom_histogram(color="black", fill="white", bins = 100) + scale_x_continuous(breaks = c(10,50,100,150,200,250,300,350))
p7 = p + geom_vline(aes(xintercept=10),color="red", lty=2);
p7
mm_se_low = filter(ex_roslin_purity_depth0, TMBWES<10, !is.na(depth_bin)) %>% summarySE(., measurevar="TMBWES", groupvars=c("depth_bin"))
mm_se_high = filter(ex_roslin_purity_depth0, TMBWES>=10, !is.na(depth_bin)) %>% summarySE(., measurevar="TMBWES", groupvars=c("depth_bin"))
#TMB by Sequencing depth (update for Exome depth)
p9 = ggplot(mm_se_low, aes(x=depth_bin, y=TMBWES)) + theme_classic(base_size = 16) +
geom_bar(stat="identity") + ylab("TMB WES\n(<10)") +
#scale_y_continuous(limits=c(0,5),breaks = c(1,2,3,4,5)) +
#scale_x_discrete(labels = c(50,100,150,200,250,300,350)) +
geom_errorbar(aes(ymin=TMBWES-ci, ymax=TMBWES+ci), colour="black", width=.1)
p10 = ggplot(mm_se_high, aes(x=depth_bin, y=TMBWES)) + theme_classic(base_size = 16) +
geom_bar(stat="identity") + ylab("TMB WES\n(>=10)") +
#scale_y_continuous(limits=c(0,120),breaks = c(10,20,40,60,80,100,120)) +
#scale_x_discrete(labels = c(50,100,150,200,250,300,350)) +
geom_errorbar(aes(ymin=TMBWES-ci, ymax=TMBWES+ci), colour="black", width=.1)
pdf('')
grid.arrange(p9,p10, ncol = 2, widths = c(0.5,0.5), newpage = F)
pdf("~/tempo-cohort-level/Figure2CDE_Purity_Depth_TMB_Roslin.pdf", paper = "a4")
grid.arrange(p3, p4, p5, p6, p9,p10, ncol = 2, nrow =3, widths = c(0.5,0.5), newpage = F)
dev.off()
|
cef8c61c1cb49564d4277c9daa4aed73f20fd04f | 0311b82d031ad3d1584be27c1ebe0a9bf2e5c136 | /tests/testthat/mod/s3_b.r | 5038d68fc8da1f43054ccffc015b9f02eea2cbbb | [
"MIT"
] | permissive | klmr/box | 239618ad913f2793f344236db24b56ed462037b9 | 2cef22e63abeedefa82e45b12f3f584301988733 | refs/heads/main | 2023-08-31T17:42:41.355022 | 2023-08-27T14:00:28 | 2023-08-27T14:00:28 | 10,223,958 | 508 | 24 | MIT | 2023-08-07T14:36:22 | 2013-05-22T16:03:42 | R | UTF-8 | R | false | false | 42 | r | s3_b.r | box::use(./s3)
#' @export
test = s3$test
|
2e8054fd0b4860ff3a6834b84c6bfbdc2afac20c | 80c9ba35106e5314544610f42cbb54a26d0289b0 | /warton method for koala_final.R | 806206b84dde0e612ce3fef7ebc525538abb54b6 | [] | no_license | bandara10/research | 91c5d176d3aee771f61801d8ef14dcc735c16fac | e5589c6a2a78a0f6b9135e2bc15aba706f15f1c4 | refs/heads/master | 2021-01-22T18:43:47.087735 | 2021-01-16T14:58:34 | 2021-01-16T14:58:34 | 102,412,442 | 0 | 0 | null | 2017-10-31T01:06:02 | 2017-09-04T23:55:48 | R | UTF-8 | R | false | false | 2,300 | r | warton method for koala_final.R | library(ppmlasso)
library(raster)
library(sp)
library(rgdal)
library(spatial.tools)
# Pre-standardise observer bias variables
setwd("C:\\Users\\uqrdissa\\ownCloud\\Covariates_analysis\\Mark_S\\raster_stack")
myenv <- list.files( path="wartondata", pattern="\\.tif$", full.names = TRUE)
myenv.stack <- stack(myenv)
names
myenv
extent(myenv.stack) <- extent(c(xmin(myenv.stack), xmax(myenv.stack), ymin(myenv.stack), ymax(myenv.stack))/1000)
#changed reolution .5
r <- raster(ncol = 200, nrow = 200)
extent(r) <- extent(myenv.stack[[3]])
distance_tertiaryandlink <- resample(myenv.stack[[1]],r)
elev <- resample(myenv.stack[[2]],r)
temp <- resample(myenv.stack[[3]],r)
plot(elev)
st <- stack(distance_tertiaryandlink,elev,temp )
stt <- as.data.frame(st, xy=TRUE, na.rm=T)
stt[] <- lapply(stt, as.integer)
colnames(stt)[1] <- "X"
colnames(stt)[2] <- "Y"
#stt[is.na(stt)] <- 0
xydatan <- stt[c(1,2)]
# test resolution on back transformatiotn
# sbd <- rasterFromXYZ(as.data.frame(stt)[, c("X", "Y", "temp")])
# plot(sbd)
#quad.1 = sample.quad(env.grid =stt , sp.scale = 1, file = "Quad") # this is quadrature points to be use for the analysis.
#koala data
kolaxy <- read.csv("wartondata\\koalaxy.csv", header = TRUE) # in km.XY| go to ppmFrom
kolaxy2 <- subset(kolaxy, X > 442 & X < 540)
kolaxyT <- subset(kolaxy2, Y > 6902 & Y < 7000) # xy within the area only.
#########
ppmForm = ~ poly(temp,elev,distance_tertiaryandlink, degree = 1)
ppmFit = ppmlasso(ppmForm, sp.xy = kolaxyT, env.grid = stt, sp.scale = 1)
# To predict using model-based control of observer bias (at min value for D_MAIN_RDS):
newEnv = stt
#newEnv$A.1 = min(stand.A.1)
pred.biasCorrect = predict(ppmFit, newdata=stt)
#newEnv$A.1 = min(stand.A.1)
#pred.biasCorrect.1 = predict(ppmFit, newdata=sss)
predictions <- cbind(xydatan, pred.biasCorrect)
xy.rr <- rasterFromXYZ(as.data.frame(predictions)[, c("X", "Y", "pred.biasCorrect")])
plot(xy.rr, las=0)
# To find the resolution (in the range from 0.5 to 16 km):
scales = c(0.5, 1, 2, 4, 8, 16)
findres(scales, sp.xy = kolaxyT, env.grid = stt, formula = ppmForm)
#which returns the log-likelihood at each scale, difference < 2 at 1km scale
# Diagnostic plots as in Fig 5:
kenv = envelope(ppmFit, fun = Kinhom)
resid.plot = diagnose(ppmFit, which = "smooth", type = "Pearson")
|
c61e02356917522b75f78b92ec9f5f399cdbd112 | d2eed7b611e7884ce9b002d42b3f8733c395366a | /man/ggqrcode.Rd | b19576a05c6ba209731fca750c2c179ee437760b | [] | no_license | GuangchuangYu/yyplot | 0660860aa9681d10dd240947c317e4c8efd1e4a0 | 99ae1de3d710bf622b955623645fa06b160340d8 | refs/heads/master | 2022-06-10T13:35:36.322705 | 2022-06-08T09:12:32 | 2022-06-08T09:12:32 | 94,998,109 | 46 | 16 | null | null | null | null | UTF-8 | R | false | true | 383 | rd | ggqrcode.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ggqrcode.R
\name{ggqrcode}
\alias{ggqrcode}
\title{ggqrcode}
\usage{
ggqrcode(text, color = "black", alpha = 1)
}
\arguments{
\item{text}{text string}
\item{color}{color}
\item{alpha}{[0, 1] for transparency}
}
\value{
ggplot object
}
\description{
generate qrcode by ggplot
}
\author{
guangchuang yu
}
|
0c81fcaacea26c77917944e67da4facf5889311c | 8909b40cd679e02e992ac761e0bf14227dcad1ad | /Script/wine.R | 2e53bc0c8f8894bba2d77b09d179f5a5926d9aa2 | [
"Apache-2.0"
] | permissive | stkrtka4/test2 | cc28b2dbf7a129a0d960a35c2db1a22a1dc550e0 | 31ba5ebce59b0b853c4b76a95058610f189fc5ef | refs/heads/main | 2023-06-17T15:04:42.566984 | 2021-07-06T17:14:57 | 2021-07-06T17:14:57 | 382,076,250 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,342 | r | wine.R | #-----------------------------------------------------------------------------
## LOANDING PACKAGES
library("tidyverse")
library("skimr")
library("lubridate")
library("rpart")
library("recipes")
library("caret")
#-----------------------------------------------------------------------------
## LOANDING FILES
### LOANDING FILES - GENERAL DATA INFO
source("4.scripts/1general_data.R")
#-----------------------------------------------------------------------------
## LOANDING FILES
### LOANDING FILES - VARIABLE MODIFICATIONS
source("4.scripts/2variable_modifications.R")
#-----------------------------------------------------------------------------
## LOANDING FILES
### LOANDING FILES - DATA EXPLORATIONS FOR PREDICT AND DEPENDENT VARIABLES
#source("4.scripts/3exploration_1D.R")
#-----------------------------------------------------------------------------
## LOANDING FILES
### LOANDING FILES - OTHERS
source("4.scripts/4correlations.R")
#-----------------------------------------------------------------------------
## LINEAR REGRESSION FOR EXPLANATION THE BEST WINE WITH LOW PRICE AND HIGH POINTS
### CREATE THE VARIABLE WINE_SUPER
wine_super <- wine
wine_super <- drop_na(wine, price, points)
wine_super_lm <- lm(points ~ log(price), wine_super)
summary(wine_super_lm)
|
de10fb4bf0d7030ca6b26fd22202c589d855a765 | a0293ea53719298dc161789f086af870779758b1 | /R/parsnip-exp_smoothing_reg.R | cd71578acd645fcd036614102f8c5db26b9dfbe7 | [
"MIT"
] | permissive | wildbs/bayesmodels | 2e5ab1530e52eaff8034f45fb140c3fc9eb84a05 | 86575c2f516e3ab6ee349ba45db41765c50760e6 | refs/heads/master | 2023-06-03T23:54:13.754970 | 2021-06-28T21:01:50 | 2021-06-28T21:01:50 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 11,999 | r | parsnip-exp_smoothing_reg.R | #' General Interface for Exponential Smoothing Models
#'
#' `exponential_smoothing()` is a way to generate a _specification_ of an ETS model
#' before fitting and allows the model to be created using
#' different packages. Currently the only package is `Rlgt`.
#'
#' @param mode A single character string for the type of model.
#' The only possible value for this model is "regression".
#' @param seasonality This specification of seasonality will be overridden by frequency of y,
#' if y is of ts or msts class. 1 by default, i.e. no seasonality.
#' @param second_seasonality Second seasonality.
#' @param seasonality_type Either "multiplicative" (default) or "generalized".
#' The latter seasonality generalizes additive and multiplicative seasonality types.
#' @param method "HW", "seasAvg", "HW_sAvg". Here, "HW" follows Holt-Winters approach.
#' "seasAvg" calculates level as a smoothed average of the last seasonality number of points
#' (or seasonality2 of them for the dual seasonality model), and HW_sAvg is an weighted
#' average of HW and seasAvg methods.
#' @param error_method Function providing size of the error. Either "std" (monotonically, but slower than proportionally,
#' growing with the series values) or "innov" (proportional to a smoothed abs size of innovations, i.e. surprises)
#'
#'
#' @details
#' The data given to the function are not saved and are only used
#' to determine the _mode_ of the model. For `exponential_smoothing()`, the
#' mode will always be "regression".
#'
#' The model can be created using the `fit()` function using the
#' following _engines_:
#'
#' - "stan" (default) - Connects to [Rlgt::rlgt()]
#'
#' __Main Arguments__
#'
#' The main arguments (tuning parameters) for the model are:
#'
#' - `seasonality`: Seasonality.
#' - `second_seasonality`: Second seasonality.
#' - `seasonality_type`: Either "multiplicative" (default) or "generalized".
#' - `method`: "HW", "seasAvg", "HW_sAvg"
#' - `error_method`: Either "std" or "innov"
#'
#' These arguments are converted to their specific names at the
#' time that the model is fit.
#'
#' Other options and argument can be
#' set using `set_engine()`.
#'
#' If parameters need to be modified, `update()` can be used
#' in lieu of recreating the object from scratch.
#'
#' __stan (default engine)__
#'
#' The engine uses [Rlgt::rlgt()].
#'
#'
#' Parameter Notes:
#' - `xreg` - This is supplied via the parsnip / bayesmodels `fit()` interface
#' (so don't provide this manually). See Fit Details (below).
#'
#'
#' @section Fit Details:
#'
#' __Date and Date-Time Variable__
#'
#' It's a requirement to have a date or date-time variable as a predictor.
#' The `fit()` interface accepts date and date-time features and handles them internally.
#'
#' - `fit(y ~ date)`
#'
#' __Univariate (No xregs, Exogenous Regressors):__
#'
#' For univariate analysis, you must include a date or date-time feature. Simply use:
#'
#' - Formula Interface: `fit(y ~ date)` will ignore xreg's.
#'
#' __Multivariate (xregs, Exogenous Regressors)__
#'
#' The `xreg` parameter is populated using the `fit()` function:
#'
#' - Only `factor`, `ordered factor`, and `numeric` data will be used as xregs.
#' - Date and Date-time variables are not used as xregs
#' - `character` data should be converted to factor.
#'
#' _Xreg Example:_ Suppose you have 3 features:
#'
#' 1. `y` (target)
#' 2. `date` (time stamp),
#' 3. `month.lbl` (labeled month as a ordered factor).
#'
#' The `month.lbl` is an exogenous regressor that can be passed to the `expotential_smoothing()` using
#' `fit()`:
#'
#' - `fit(y ~ date + month.lbl)` will pass `month.lbl` on as an exogenous regressor.
#'
#' Note that date or date-time class values are excluded from `xreg`.
#'
#'
#'
#' @seealso [fit.model_spec()], [set_engine()]
#'
#' @return A model spec
#'
#' @examples
#' \dontrun{
#' library(dplyr)
#' library(parsnip)
#' library(rsample)
#' library(timetk)
#' library(modeltime)
#' library(bayesmodels)
#'
#' # Data
#' m750 <- m4_monthly %>% filter(id == "M750")
#' m750
#'
#' # Split Data 80/20
#' splits <- rsample::initial_time_split(m750, prop = 0.8)
#'
#' # ---- ARIMA ----
#'
#' # Model Spec
#' model_spec <- exponential_smoothing() %>%
#' set_engine("stan")
#'
#' # Fit Spec
#' model_fit <- model_spec %>%
#' fit(log(value) ~ date + month(date), data = training(splits))
#' model_fit
#'}
#' @export
exponential_smoothing <- function(mode = "regression", seasonality = NULL, second_seasonality = NULL, seasonality_type = NULL,
method = NULL, error_method = NULL) {
args <- list(
seasonality = rlang::enquo(seasonality),
second_seasonality = rlang::enquo(second_seasonality),
seasonality_type = rlang::enquo(seasonality_type),
method = rlang::enquo(method),
error_method = rlang::enquo(error_method)
)
parsnip::new_model_spec(
"exponential_smoothing",
args = args,
eng_args = NULL,
mode = mode,
method = NULL,
engine = NULL
)
}
#' @export
print.exponential_smoothing <- function(x, ...) {
cat("Exponential Smoothing Model Specification (", x$mode, ")\n\n", sep = "")
parsnip::model_printer(x, ...)
if(!is.null(x$method$fit$args)) {
cat("Model fit template:\n")
print(parsnip::show_call(x))
}
invisible(x)
}
#' @export
#' @importFrom stats update
update.exponential_smoothing <- function(object, parameters = NULL,
seasonality = NULL, second_seasonality = NULL, seasonality_type = NULL,
method = NULL, error_method = NULL,
fresh = FALSE, ...) {
parsnip::update_dot_check(...)
if (!is.null(parameters)) {
parameters <- parsnip::check_final_param(parameters)
}
args <- list(
seasonality = rlang::enquo(seasonality),
second_seasonality = rlang::enquo(second_seasonality),
seasonality_type = rlang::enquo(seasonality_type),
method = rlang::enquo(method),
error_method = rlang::enquo(error_method)
)
args <- parsnip::update_main_parameters(args, parameters)
if (fresh) {
object$args <- args
} else {
null_args <- purrr::map_lgl(args, parsnip::null_value)
if (any(null_args))
args <- args[!null_args]
if (length(args) > 0)
object$args[names(args)] <- args
}
parsnip::new_model_spec(
"exponential_smoothing",
args = object$args,
eng_args = object$eng_args,
mode = object$mode,
method = NULL,
engine = object$engine
)
}
#' @export
#' @importFrom parsnip translate
translate.exponential_smoothing <- function(x, engine = x$engine, ...) {
if (is.null(engine)) {
message("Used `engine = 'stan'` for translation.")
engine <- "stan"
}
x <- parsnip::translate.default(x, engine, ...)
x
}
# FIT - Arima -----
#' Low-Level ARIMA function for translating modeltime to forecast
#'
#' @param x A dataframe of xreg (exogenous regressors)
#' @param y A numeric vector of values to fit
#' @param seasonality Seasonality
#' @param seasonality2 Second seasonality
#' @param seasonality.type Either "multiplicative" (default) or "generalized".
#' The latter seasonality generalizes additive and multiplicative seasonality types.
#' @param level.method "HW", "seasAvg", "HW_sAvg"
#' @param error.size.method Either "std" (monotonically, but slower than proportionally,
#' growing with the series values) or "innov" (proportional to a smoothed abs size of innovations, i.e. surprises)
#' @param ... Additional arguments passed to `forecast::Arima`
#'
#' @return A modeltime model
#'
#' @export
exp_smoothing_stan_fit_impl <- function(x, y, seasonality = 1, seasonality2 = 1, seasonality.type = "multiplicative",
error.size.method = "std", level.method = "HW", ...) {
# X & Y
# Expect outcomes = vector
# Expect predictor = data.frame
outcome <- y
predictor <- x
# INDEX & PERIOD
# Determine Period, Index Col, and Index
index_tbl <- modeltime::parse_index_from_data(predictor)
period <- modeltime::parse_period_from_index(index_tbl, "auto")
idx_col <- names(index_tbl)
idx <- timetk::tk_index(index_tbl)
names_predictor <- names(predictor) %>% dplyr::setdiff(idx_col)
predictor <- predictor %>% dplyr::select(dplyr::all_of(names_predictor))
# XREGS
# Clean names, get xreg recipe, process predictors
xreg_recipe <- modeltime::create_xreg_recipe(predictor, prepare = TRUE)
xreg_matrix <- modeltime::juice_xreg_recipe(xreg_recipe, format = "matrix")
# FIT
outcome <- stats::ts(outcome, frequency = period)
if (!is.null(xreg_matrix)) {
fit_smooth <- Rlgt::rlgt(y = outcome,
seasonality = seasonality,
seasonality2 = seasonality2,
seasonality.type = seasonality.type,
error.size.method = error.size.method,
level.method = level.method,
xreg = xreg_matrix,
...)
} else {
fit_smooth <- Rlgt::rlgt(y = outcome,
seasonality = seasonality,
seasonality2 = seasonality2,
seasonality.type = seasonality.type,
error.size.method = error.size.method,
level.method = level.method,
...)
}
rlgt_fit=function(x){
fit=rstan::extract(x,pars="l")
fit1=fit$l
d=dim(fit1)
values=c()
for(i in 1:d[2]){
values[i]=mean(fit1[,i])
}
return(values)
}
rlgt_res=function(x,y){
res=x-y
return(res)
}
# RETURN
modeltime::new_modeltime_bridge(
class = "exp_smoothing_stan_fit_impl",
# Models
models = list(
model_1 = fit_smooth
),
# Data - Date column (matches original), .actual, .fitted, and .residuals columns
data = tibble::tibble(
!! idx_col := idx ,
.actual = outcome,
.fitted = rlgt_fit(fit_smooth$samples),
.residuals = rlgt_res(outcome, rlgt_fit(fit_smooth$samples))
),
# Preprocessing Recipe (prepped) - Used in predict method
extras = list(
xreg_recipe = xreg_recipe
),
# Description - Convert arima model parameters to short description
desc = "Exponential Smoothing Model"
)
}
#' @export
print.exp_smoothing_stan_fit_impl <- function(x, ...) {
print(x$models$model_1)
invisible(x)
}
#' @export
predict.exp_smoothing_stan_fit_impl <- function(object, new_data, ...) {
exp_smoothing_stan_predict_impl(object, new_data, ...)
}
#' Bridge prediction function for ARIMA models
#'
#' @inheritParams parsnip::predict.model_fit
#' @param ... Additional arguments passed to `forecast::Arima()`
#'
#' @return A prediction
#'
#' @export
exp_smoothing_stan_predict_impl <- function(object, new_data, ...) {
# PREPARE INPUTS
model <- object$models$model_1
preds_foecast <- stats::predict(model, new_data, ...)
# Return predictions as numeric vector
preds <- tibble::as_tibble(preds_forecast) %>% purrr::pluck(1)
return(preds)
}
|
6858475e271e7fba7024942ead081e7856df36e5 | f1730e6b345331034fecb94eac65d95089bc19c2 | /RNA-Seq-UnequalLib.R | db5dea59acbed9aeca4a384e96f8cf52bc30bf72 | [] | no_license | cindymzhou/SCI-DEGs-analysis | 9c82fa535a6d82348ec290ec8e34b365ec68b244 | bcfe3348868ff6456744e3eea0ffe0050ba48399 | refs/heads/master | 2022-09-08T08:21:58.578309 | 2020-05-23T17:48:16 | 2020-05-23T17:48:16 | 265,879,726 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,279 | r | RNA-Seq-UnequalLib.R | ###Load in libraries:
library(edgeR)
library(limma)
library(biomaRt)
library(tidyverse)
#First, import the count file, and make sure to create clear row and column names
counts <- read.csv("GSE93249_raw_counts.csv")
#with working directory set as "Documents"
##To isolate the coding RNA samples:
counts <- counts[,-(2:3)]
counts <- data.frame(counts[,-1], row.names = counts[,1])
#This helps make the first column into the row name instead
counts <- counts[-(22076:30443),]
##GSE93249 has long non-coding RNA data, this line deletes that and only keeps the coding transcripts
##Now, we use edgeR's DGElist function
dge <- DGEList(counts=counts)
##Making the design matrix manually for time-course experiments:
design <- model.matrix(~ 0+factor(c(1,1,1,2,2,2,3,3,3,6,6)))
colnames(design) <- c("control", "m1", "m3", "m6")
##Filtering out counts that are close to 0
keep <- filterByExpr(y = dge$counts, design = design)
dge <- dge[keep,,keep.lib.sizes=FALSE]
#TMM Normalizing
dge <- calcNormFactors(dge)
#For samples with UNEQUAL sequencing depth (largest library size:smallest lib size = > than 3), we use voom:
v <- voom(dge, design, plot=TRUE)
fit <- lmFit(v, design)
##Then, we can run the limma pipeline as usual!
cont.matrix <- makeContrasts(Controlvs1Month = control-m1,
Controlvs3Months= control-m3,
Controlvs6Months= control-m6,
levels=design)
##This contrast matrix asks "Compared to control, which genes are responding at either 1 month, 3 months, OR 6 months?"
fit2 <- contrasts.fit(fit, cont.matrix)
fit2 <- eBayes(fit)
topTable(fit2, coef=1, adjust="BH")
##the coef specifies which contrast/comparison you want to look at. Coef=1 here refers to "Controlvs1Month"
##coef=ncol(design) makes it so that the coefficients (aka, # of RNA samples) is equivalent to the number of rows/columns in the design matrix
complete_list <- topTable(fit2, coef=1, number=Inf, adjust="BH")
##To get the gene IDs into the table:
complete_list <- rownames_to_column(complete_list, var="ID")
#This will make the ensembl rownames into a column! So that it can be name matched
mart = useMart('ensembl')
#This defines which database you want to pull IDs from
#To list all the ensembl database of organisms:
listDatasets(mart)
##This will list ALL of the ones available
searchDatasets(mart = mart, pattern = "norvegicus")
##This will help you search for a particular organism, using close words
#Next, we need to choose database of interest:
ensembl = useMart( "ensembl", dataset = "rnorvegicus_gene_ensembl" )
# choose attributes of your interest
listAttributes(ensembl)
##this will give all the attributes (i.e. gene ID, transcript ID)
gene <- getBM(attributes = c("ensembl_gene_id","external_gene_name"),values = complete_list$ID,mart = ensembl)
#Now we need to match the gene id with ensembl_gene_id
id <- match(complete_list$ID, gene$ensembl_gene_id)
#Add Gene symbol column into the data frame
complete_list$ID <- gene$external_gene_name[id]
head(complete_list)
##Converting RNA-Seq names to gene names is courtesy of: https://www.biostars.org/p/337072/
##To see a summary of the number of differentially expressed genes:
summary(decideTests(fit2))
|
ccc681f23d9c51c7dd996372e321a298ba0be890 | da3e71e423bdff81f4cb1b6862f2a3da1676bc85 | /housePriceAllInOne.R | 3496f39a369b569f7ac0d609c00a5aa19f5423a5 | [] | no_license | bikashg3/LearningR | 30a3d0f623b3296adef1bc9eb55f72b435d39d59 | a379924a7a135997c74b3ec02f59642d7bdff45a | refs/heads/master | 2020-12-24T08:08:56.276552 | 2016-05-16T20:07:16 | 2016-05-16T20:07:16 | 65,093,638 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 11,660 | r | housePriceAllInOne.R | #### Step 00 - Install Required Packages
# setwd( "C:/Users/jdlecy/Documents/GitHub/hedonic-prices" )
# https://docs.google.com/forms/d/13RWJ9LR3mFbQBNsX9bnLt5XRotfmZKxkwetYzh-h-oA/viewform
setwd("E:/R Training/hedonic-prices-master/hedonic-prices-master")
install.packages( "RCurl" )
install.packages( "ggmap" )
install.packages( "jsonlite" )
install.packages( "memisc" )
#### Step 01 - Load Housing Data
# setwd( "C:/Users/jdlecy/Documents/GitHub/hedonic-prices" )
# load package to read URL data
library( RCurl )
# address of google spreadsheet
# shared with Professor Lecy census survy on google doc with csv
my.url <- "https://docs.google.com/spreadsheets/d/1W0vM5sCRhZjkQh6A0WGV8j1rhQAjecCrQW7PguHft-E/pub?gid=1989082857&single=true&output=csv"
housing.raw <- getURL( my.url)#, ssl.verifypeer=FALSE )
# read as text, stringAsFactors=FALSE
dat <- read.csv( textConnection(housing.raw), stringsAsFactors=FALSE )
head( dat )
# RENAME VARIABLES
names( dat ) <- c("timestamp","price","X1","X2","sqft","your.name","lot.size","beds",
"bath","garage","year","elementary","middle","high","walk","tax","highway",
"restaurant","starbucks","park","mall","address","zip","tract" )
# remove commas from numbers
dat$price <- as.numeric( gsub( ",","", dat$price ) )
dat$tax <- as.numeric( gsub( ",","", dat$tax ) )
dat$lot.size <- as.numeric( gsub( ",","", dat$lot.size ) )
dat$sqft <- as.numeric( gsub( ",","", dat$sqft ) )
# replace if the value is missing
dat$lot.size[ is.na( dat$lot.size ) ] <- mean( dat$lot.size, na.rm=T )
# clean up
rm( housing.raw )
rm( my.url )
#### Step 01.01 - Graph Relationships
# setwd( "C:/Users/jdlecy/Documents/GitHub/hedonic-prices" )
source( "Step 01 - Load Housing Data.R" )
# create plot function with desired aesthetics
plotFun <- function( x1, x2=price, lab1, lab2="House Price" )
{
plot( x1, x2,
pch=19,
col=gray(0.6, alpha = 0.2),
cex=3.5,
bty = "n",
xlab=lab1,
ylab=lab2, cex.lab=1.5 )
lines( lowess(x2~x1), col="red", lwd=3 )
}
# CREATE GRAPHS AND SAVE AS PDF
dir.create( "Results" ) # set up a results directory
# start to create a pdf file, end with dev.off() # end of pdf call
pdf( "./Results/Predictors of Price.pdf" )
# HOUSE SIZE (SQFT)
plotFun( x1=dat$sqft, x2=dat$price, lab1="Size (Square Feet)", lab2="House Price" )
# LOT SIZE
plotFun( x1=dat$lot.size, x2=dat$price, lab1="Lot Size (Square Feet)", lab2="House Price" )
# AGE vs PRICE
plotFun( x1=(2014-dat$year), x2=dat$price, lab1="Age (Years)", lab2="House Price" )
# AGE vs SIZE
plotFun( x1=(2014-dat$year), x2=dat$sqft, lab1="Age (Years)", lab2="Size (Square Feet)" )
# WALK SCORE
plotFun( x1=dat$walk, x2=dat$price, lab1="Walk Score", lab2="House Price" )
# SCHOOL
school <- dat$elementary + dat$middle + dat$high
plotFun( x1=school, x2=dat$price, lab1="School Quality", lab2="House Price" )
# DIST TO RESTAURANT
plotFun( x1=dat$restaurant, x2=dat$price, lab1="Dist to Good Restaurant", lab2="House Price" )
# DIST TO STARBUCKS
plotFun( x1=dat$starbucks, x2=dat$price, lab1="Distance to Starbucks", lab2="House Price" )
# DIST TO PARK
plotFun( x1=dat$park, x2=dat$price, lab1="Dist to Park", lab2="House Price" )
# DIST TO Mall
plotFun( x1=dat$restaurant, x2=dat$price, lab1="Dist to Mall", lab2="House Price" )
plot( as.factor(dat$garage), dat$price, ylab="House Price", xlab="Garage" )
tapply( dat$price, as.factor(dat$garage), mean )
plot( as.factor(dat$bath), dat$price, ylab="House Price", xlab="Number of Bathrooms", cex.lab=1.5 )
tapply( dat$price, as.factor(dat$bath), mean )
plot( as.factor(dat$beds), dat$price, ylab="House Price", xlab="Number of Bedrooms", cex.lab=1.5 )
tapply( dat$price, as.factor(dat$beds), mean )
plot( as.factor(dat$highway), dat$price, ylab="House Price", xlab="Near a Highway?", cex.lab=1.5 )
tapply( dat$price, as.factor(dat$highway), mean )
dev.off() # end of pdf call
#### Step 02 - Geocode House Addresses
# setwd( "C:/Users/jdlecy/Documents/GitHub/hedonic-prices" )
source( "Step 01 - Load Housing Data.R" )
houses <- dat[ , c("address","zip") ]
houses$address <- gsub( ",", "", houses$address )
houses$address <- gsub( "\\.", "", houses$address )
addresses <- paste( houses$address, "Syracuse, NY", houses$zip, sep=", " )
head( addresses )
library( ggmap )
# translate street address to latitude longitude coordinates
#
# lat.long <- geocode( addresses )
#
# takes about 5 min to run
# pre-geocoded version of dataset for demo
lat.long <- read.csv( "Data/lat.long.csv" )
head( lat.long )
syracuse <- get_map(
location='syracuse, ny',
zoom = 12,
color="bw"
)
syr.map <- ggmap(
syracuse,
extent = "device"
)
syr.map + geom_point(
data=lat.long,
aes(x=lon, y=lat),
size=2,
col="red",
alpha=1
)
dat <- cbind( dat, lat.long )
rm( houses )
rm( addresses )
#### Step 03 - Match House Address to Census Tract
# setwd( "C:/Users/jdlecy/Documents/GitHub/hedonic-prices" )
source( "Step 02 - Geocode House Addresses.R" )
### MATCH GEOCODED ADRESSES TO A CENSUS TRACT
# to add census data we need to associate a house with a census tract
# use census API:
# # https://transition.fcc.gov/form477/censustracts.html
require( RCurl )
tract.id <- NULL
for( i in 1:nrow(lat.long) )
{
print( i )
aURL <- paste( "http://data.fcc.gov/api/block/2010/find?latitude=",lat.long$lat[i],"&longitude=",lat.long$lon[i], sep="" )
x <- getURL( aURL )
start.here <- regexpr( "Block FIPS", x )
this.one <- substr( x, (start.here+12), (start.here+26) )
# FIPS: 360670040001007 36=state, 067=county, 004000=census.tract 1007=block.group
tract.id[i] <- substr( this.one, 6, 11 )
}
# http://rfunction.com/archives/1719 #about regexpr
# combine house data with lat lon coordinates and census tract IDs
dat <- cbind( dat, tract.id )
rm( tract.id )
#### Step 04 - Download Census Data
# setwd( "C:/Users/jdlecy/Documents/GitHub/hedonic-prices" )
source( "Step 03 - Match House Address to Census Tract.R" )
### DOWNLOAD CENSUS DATA THROUGH API
# http://www.census.gov/developers/
library(RCurl)
library( jsonlite )
APIkey <- "b431c35dad89e2863681311677d12581e8f24c24"
# use this function to convert json data format to a data frame
json.to.data <- function( x )
{
a.matrix <- fromJSON(x) # converts json table to a matrix
c.names <- a.matrix[ 1 , ] # column names are the first row
a.matrix <- a.matrix[ -1 , ]
my.dat <- data.frame( a.matrix )
names( my.dat ) <- c.names
return( my.dat )
}
# you need to find variable codes in data dictionary:
# poverty: DP03_0119PE
# total pop: DP05_0028E
# pop black: DP05_0033E
fieldnm <- "DP03_0119PE" # poverty
state <- "36"
county <- "067"
resURL <- paste("http://api.census.gov/data/2013/acs5/profile/?get=",fieldnm,
"&for=tract:*&in=state:",state,"+county:",county,"&key=",
APIkey,sep="")
### Fetch the data
poverty <- getURL( resURL, ssl.verifypeer = FALSE )
poverty <- json.to.data( poverty )
# tract.id2 <- paste( poverty$state, poverty$county, poverty$tract, sep="" )
fieldnm <- "DP05_0033E" # black
resURL <- paste("http://api.census.gov/data/2013/acs5/profile/?get=",fieldnm,
"&for=tract:*&in=state:",state,"+county:",county,"&key=",
APIkey,sep="")
black <- getURL( resURL, ssl.verifypeer = FALSE )
black <- json.to.data( black )
black <- as.numeric( as.character( black[,1] ) )
fieldnm <- "DP05_0028E" # tot.pop
resURL <- paste("http://api.census.gov/data/2013/acs5/profile/?get=",fieldnm,
"&for=tract:*&in=state:",state,"+county:",county,"&key=",
APIkey,sep="")
tot.pop <- getURL( resURL, ssl.verifypeer = FALSE )
tot.pop <- json.to.data(tot.pop)
tot.pop <- as.numeric( as.character( tot.pop[,1] ) )
prop.black <- black / tot.pop
cen.dat <- cbind( poverty, prop.black )
names( cen.dat ) <- c( "poverty", "state", "county", "tract", "prop.black" )
rm( APIkey )
rm( black )
rm( county )
rm( fieldnm )
rm( json.to.data )
rm( poverty )
rm( prop.black )
rm( resURL )
rm( state )
rm( tot.pop )
#### Step 05 - Count Nearby Crimes
# setwd( "C:/Users/jdlecy/Documents/GitHub/hedonic-prices" )
source( "Step 04 - Download Census Data.R" )
### HOW MANY NEARBY CRIMES
# 2014 data downloaded from: http://www.syracuse.com/crime/police-reports/
#
# It has been geocoded using block locations:
crime.dat <- read.csv( "Data/crime.lat.lon.csv" )
library( ggmap )
syracuse <- get_map( location='syracuse, ny', zoom = 11, color="bw" )
syr.map <- ggmap( syracuse, extent = "device" )
syr.map + geom_point(
data=crime.dat,
aes(x=lon, y=lat),
size=3,
col="steel blue",
alpha=0.5
)
# reference for distance formula: sqrt( (43.056353-43.062111)^2 + (-76.140454 - -76.128620)^2 )
crime.count <- NULL
for( i in 1:nrow(lat.long) )
{
lat.i <- lat.long$lat[i]
lon.i <- lat.long$lon[i]
dist.c <- sqrt( (lat.i - crime.dat$lat)^2 + (lon.i - crime.dat$lon)^2 )
crime.count[i] <- sum( dist.c < 0.01 )
}
dat <- cbind( dat, crime.count )
#### MERGE DATA
dat <- merge( dat, cen.dat, by.x="tract.id", by.y="tract" )
names( dat )
rm( lat.long )
#### Step 05.01 - Graph Demographic Predictors
# setwd( "C:/Users/jdlecy/Documents/GitHub/hedonic-prices" )
source( "Step 05 - Count Nearby Crimes.R" )
### PLOT DEMOGRAPHIC VARIABLES VS HOME PRICES
# create plot function with desired aesthetics
plotFun <- function( x1, x2=price, lab1, lab2="House Price" )
{
plot( x1, x2,
pch=19,
col=gray(0.6, alpha = 0.2),
cex=3.5,
bty = "n",
xlab=lab1,
ylab=lab2, cex.lab=1.5 )
lines( lowess(x2~x1), col="red", lwd=3 )
}
pdf( "Results/Demographic Factors.pdf" )
# CRIME
plotFun( x1=dat$crime.count, x2=dat$price, lab1="Num of Nearby Crimes", lab2="House Price" )
# POVERTY
pov.vec <- as.numeric( as.factor( dat$poverty ) )
plotFun( x1=pov.vec, x2=dat$price, lab1="Poverty Rate", lab2="House Price" )
# BLACK
plotFun( x1=dat$prop.black, x2=dat$price, lab1="Proportion of Population Black", lab2="House Price" )
dev.off()
#### Step 05.02 - Regressions
# setwd( "C:/Users/jdlecy/Documents/GitHub/hedonic-prices" )
source( "Step 05 - Count Nearby Crimes.R" )
dat$school <- dat$elementary + dat$middle + dat$high
m.01 <- lm( price ~ sqft + lot.size + bath + as.factor(garage) + year + school + as.factor(highway), data=dat )
options( scipen=6 )
summary( m.01 )
m.02 <- lm( price ~ sqft + lot.size + bath + as.factor(garage) + year +
school + as.factor(highway) + crime.count + prop.black, data=dat )
options( scipen=6 )
summary( m.02 )
library( memisc )
mtab <- mtable(
"Model 1"=m.01,
"Model 2"=m.02,
summary.stats=c("R-squared","N", "p"),
digits=2
)
mtab
####
####
####
|
cff5bff78d687f1c7ef30eafff79c2efd01f9ff8 | 5f34476226443ef5cb5ca66db037d1fd47c41832 | /man/simCox.Rd | 5f48bc4ca905cbf30410174f7bde75b803ba698b | [] | no_license | cran/icmm | 172e4ce4079777c3b028fe084355823fa8e0c7ba | f9f25daed5d0ce12ff343dce7920e6a86bb317e5 | refs/heads/master | 2021-07-07T11:53:43.306143 | 2021-05-26T04:20:02 | 2021-05-26T04:20:02 | 98,514,042 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 738 | rd | simCox.Rd | \name{simCox}
\alias{simCox}
\docType{data}
\title{Simulated data from Cox's regression model}
\description{Simulated data from Cox's regression model. A data frame with 100 observations and 402 variables. The included variables are \cr
\code{V1} A numeric vector of responses for right censored data. \cr
\code{V2} A numeric vector of status indicator: \code{0}=right censored, \code{1}=event at time \code{V1}. \cr
\code{V3}-\code{V402} 400 vectors of covariates.
}
\usage{data(simCox)}
\format{
A data frame of simulated data from Cox's regression model with 100 observations and 402 variables.}
\examples{
data(simCox)
Y<-as.matrix(simCox[,1])
event<-as.matrix(simCox[,2])
X<-as.matrix(simCox[,-(1:2)])
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.