blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 327 | content_id stringlengths 40 40 | detected_licenses listlengths 0 91 | license_type stringclasses 2 values | repo_name stringlengths 5 134 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 46 values | visit_date timestamp[us]date 2016-08-02 22:44:29 2023-09-06 08:39:28 | revision_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | committer_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | github_id int64 19.4k 671M ⌀ | star_events_count int64 0 40k | fork_events_count int64 0 32.4k | gha_license_id stringclasses 14 values | gha_event_created_at timestamp[us]date 2012-06-21 16:39:19 2023-09-14 21:52:42 ⌀ | gha_created_at timestamp[us]date 2008-05-25 01:21:32 2023-06-28 13:19:12 ⌀ | gha_language stringclasses 60 values | src_encoding stringclasses 24 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 7 9.18M | extension stringclasses 20 values | filename stringlengths 1 141 | content stringlengths 7 9.18M |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
b3d6be6acaeba488251dadd4ff6cb4a905763b87 | 074dc1f44a74e4caac244716c89a3d83da05b655 | /man/workaholic.Rd | a693e7331e52217e867a8112e9922b562c9fd7d7 | [] | no_license | paytonjjones/networktree | 9356bbfc6eb9547a99812a51cf4f5b39b4751f95 | 9479f95a1cbe43bd41aa3098bac372fcc337b64d | refs/heads/main | 2022-09-22T06:13:48.472298 | 2022-09-05T18:40:46 | 2022-09-05T18:40:46 | 155,432,789 | 13 | 1 | null | null | null | null | UTF-8 | R | false | true | 1,344 | rd | workaholic.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/datadocumentation.R
\docType{data}
\name{workaholic}
\alias{workaholic}
\title{Workaholism and Psychiatric Symptoms}
\format{
a dataframe. Columns represent symptoms and rows represent individuals
}
\usage{
workaholic
}
\description{
This dataset includes 16,426 workers who were assessed on symptoms
of psychiatric disorders (ADHD, OCD, anxiety, depression) and workaholism.
}
\details{
Scales: Adult ADHD Self-Report Scale, Obsession-Compulsive
Inventory-Revised, Hospital Anxiety and Depression Scale, and the Bergen
Work Addiction Scale.
Also includes demographics such as age, gender, work status, position, sector,
annual income.
The dataset is publicly available at https://doi.org/10.1371/journal.pone.0152978
and can be cited as:
Andreassen, C. S., Griffiths, M. D., Sinha, R., Hetland, J.,
& Pallesen, S. (2016). The relationships between workaholism
and symptoms of psychiatric disorders: a large-scale
cross-sectional study. PloS One, 11, e0152978.
}
\examples{
head(workaholic)
\donttest{
## Example networktree with OCI-R scale
data(workaholic)
nodeVars <- paste("OCIR",1:18,sep="")
splitVars <- c("Workaholism_diagnosis","Gender")
myTree<-networktree(workaholic[,nodeVars], workaholic[,splitVars])
myTree
plot(myTree)
}
}
\keyword{datasets}
|
92920b18c60e20ea9876b3ebb9d0c1c8d028a29d | 25b23c8b1ef55d90e59bea00ef10803650b52548 | /R/dekoduj.R | 71b87eaa959a872b6e71ba496c7ba9d3d3b2452d | [
"CC-BY-4.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | mbojan/adr | e882338579a50ea333f3a902953df45c329c14b0 | 627a4e944b49e3c737686f44f6bf8792e353ed29 | refs/heads/master | 2022-03-11T15:29:20.380925 | 2022-02-19T17:38:46 | 2022-02-19T17:38:46 | 83,596,019 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,990 | r | dekoduj.R | #' Koduj lub dekoduj napisy z UTF-8 do Windows lub vice versa
#'
#' Zmiana strony kodowej UTF-8 <-> CP1250. Przydatne w przypadku interakcji z
#' bazami danych takimi, jak SQLite, w których kwerendy muszą być wysyłane w
#' UTF-8. W tym samym kodowaniu zwracane są wyniki.
#'
#' @param x ramka, lub wektor napisów do (de)kodowania, patrz Details
#' @param from_encoding,to_encoding strona kodowa źródłowa i docelowa
#' @param ... inne argumenty dla \code{\link{iconv}}
#'
#' Funkcja \code{dekoduj} bierze ramkę danych z zwraca ją ze wszystkimi
#' kolumnami napisowymi (character) przekodowanymi z UTF-8 na kodowanie CP1250
#' (Windows po Polsku).
#'
#' Funkcja \code{koduj} bierze wektor napisów w kodowaniu CP1250 i zwraca
#' przekształcony na UTF-8.
#'
#' @encoding UTF-8
#' @export
dekoduj <- function(x, ...) UseMethod("dekoduj")
#' @method dekoduj data.frame
#' @rdname dekoduj
#' @export
dekoduj.data.frame <- function(x, from_encoding = "UTF-8", to_encoding = "cp1250", ...)
{
# names of columns are encoded in specified encoding
my_names <- iconv(names(x), from=from_encoding, to=to_encoding, ...)
# if any column name is NA, leave the names
# otherwise replace them with new names
if(any(is.na(my_names))){
names(x)
} else {
names(x) <- my_names
}
# get column classes
x_char_columns <- sapply(x, class)
# identify character columns
x_cols <- names(x_char_columns[x_char_columns == "character"])
# convert all string values in character columns to
# specified encoding
x[x_cols] <- lapply(x[x_cols], iconv, from=from_encoding, to=to_encoding, ...)
# return x
return(x)
}
#' @method dekoduj default
#' @rdname dekoduj
#' @export
dekoduj.default <- function(x, from_encoding="UTF-8", to_encoding="CP1250", ...)
{
iconv(x, from=from_encoding, to=to_encoding, ...)
}
#' @export
#' @rdname dekoduj
koduj <- function(x, from_encoding="", to_encoding="UTF-8", ...) {
iconv(x, from=from_encoding, to=to_encoding, ...)
}
|
be96837571fa87aa9c247873e8bd77d25f0f1623 | 39c5f927d33c04498622ee5d78696ab32e6d8f35 | /Sentimental Analysis Amazon Book Reviews/Code/Sentimental_Analysis.R | 5fac1aba21a0665bb7fc508715c53e9ec8eafa44 | [] | no_license | weichung96/Sentimental-Analysis-Amazon-Book-Reviews | 8fcdea457c37ed40749202b22ad576af048d3f0f | fed4c3444ef423f44ca8d9c1a018613b1232ecf7 | refs/heads/master | 2020-08-12T15:58:20.477969 | 2018-03-01T02:47:49 | 2018-03-01T02:47:49 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,704 | r | Sentimental_Analysis.R | library(tm)
library(NLP)
#install.packages("qdap")
library(qdap)
library(stringr)
setwd("F:/INFO7390-ADS/Final_Project")
#Positive Lexicons
positive_lexicons <- read.csv(file="positive-words.csv", header=TRUE, sep= ',')
positive_lexicons <- Corpus(VectorSource(positive_lexicons)) #Converting to corpus
positive_lexicons <- tm_map(positive_lexicons,stemDocument) #Stemming
positive_lexicons <- data.frame(text=sapply(positive_lexicons, `[[`, "content"), stringsAsFactors=FALSE) #Converting to dataframe
positive_lexicons <- unique(positive_lexicons) #Fetching only unique positive words
positive_lexicons<-data.frame(Words=unlist(positive_lexicons)) #Unlisting
#Negative Lexicons
negative_lexicons <- read.csv(file="negative-words.csv", header=TRUE, sep= ',')
negative_lexicons <- Corpus(VectorSource(negative_lexicons)) #Converting to corpus
negative_lexicons <- tm_map(negative_lexicons,stemDocument) #Stemming
negative_lexicons <- data.frame(text=sapply(negative_lexicons, `[[`, "content"), stringsAsFactors=FALSE) #Converting to dataframe
negative_lexicons <- unique(negative_lexicons) #Fetching only unique positive words
negative_lexicons<-data.frame(Words=unlist(negative_lexicons)) #Unlisting
#Result Dataframe to store review,sentimental score
df <- data.frame("Review"=character(),"Positive Word Count"=integer(),"Negative Word Count"=integer(),"Total Word Count"=integer(),"Positivity Percentage"=integer(),"Negativity Percentage"=integer(),"Result"=character(),stringsAsFactors = FALSE)
#Review Data
review <- read.csv(file="Reviews.csv", header=TRUE, sep= ',')
NROW(review) #Number of rows of dataframe 'review'
for(k in 1:NROW(review))
{
review_data=review[k,] #To extract data from each row and all columns
review_original<-review_data
df[nrow(df)+1,1]<-c(toString(review_original))
review_data = tolower(review_data) #Making it lower case
review_data = gsub('[[:punct:]]', '', review_data) #Removing punctuation
review_data = gsub("[[:digit:]]", "", review_data) #Removing numbers
review_data <- Corpus(VectorSource(review_data)) #Converting into corpus
review_data = tm_map(review_data, removeWords, stopwords('english')) #Removing stop words
review_data=tm_map(review_data,stemDocument) #Stemming
#strwrap(b[[1]]) #To view the stemmed data
review_data <- data.frame(text=sapply(review_data, `[[`, "content"), stringsAsFactors=FALSE)#Converting corpus to dataframe
#review_data
#typeof(review_data)
review_data<-str_trim(clean(review_data)) #To remove extra white spaces
review_data<- as.String(review_data)
review_words <- strsplit(review_data, " ") #Splitting a sentence into words
length(review_words)
review_words<-data.frame(Words=unlist(review_words)) #Unlisting
review_words<-as.matrix(review_words,nrow=NROW(review_words),ncol=NCOL(review_words)) #Matrix
NROW(review_words)
positive_count=0
negative_count=0
total_word_count=NROW(review_words)
for(i in 1:NROW(review_words)) #Each word of that review
{
if(review_words[i][1] %in% positive_lexicons$Words)
positive_count=positive_count+1
else if (review_words[i][1] %in% negative_lexicons$Words)
negative_count=negative_count+1
}
positive_count
negative_count
total_word_count
positivity_percentage=(positive_count/total_word_count)*100
negativity_percentage=(negative_count/total_word_count)*100
result=""
if(positivity_percentage>negativity_percentage)
{result='Positive'
}else
{result='Negative'}
result
df[nrow(df),2:7]<- c(positive_count,negative_count,total_word_count,positivity_percentage,negativity_percentage,result)
#df
}
#Writing to csv
write.csv(df,"Sentimental_Analysis_Result.csv")
|
6477d15c25a2f854330b2aab0401681b0ea9fe3f | fbaac272c79f719d12d7af69682a54de2762608c | /Seurat_Integration/mye.R | fb7d197d3a9ede85ba6be9c441dc2040a26b3574 | [] | no_license | Genome-Institute-Of-Singapore-CTSO5/Onco-fetal-reprogramming-HCC | 820187498735facc025ef5a6b5b2ed3a4e9c0a4f | 876e7e1a83f9d1656175001a300a6bf3f23ec69e | refs/heads/master | 2022-12-16T23:55:58.882173 | 2020-09-29T08:36:06 | 2020-09-29T08:36:06 | 297,184,903 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 12,752 | r | mye.R |
##########----------------READY? START!--------------################
mye <- readRDS('/mnt/cellrangercount/HCC/HCC_fetal/data/mye.RData')
mye <- subset(reference.integrated, idents = c(37,18,23,13,30))
mye
DimPlot(mye, reduction = "umap", label = TRUE, pt.size = 2) + NoLegend()
##########----------------Usual Practice--------------################
mye <- RunPCA(mye, features = VariableFeatures(object = mye), npcs = 70)
ElbowPlot(mye, ndims = 50)
mye <- FindNeighbors(mye, dims= 1:15)
mye <- RunUMAP(mye, dims = 1:15, min.dist=0.01, spread=0.3)
mye <- FindClusters(mye, resolution = 0.6)
DimPlot(mye, reduction = "umap", pt.size = 0.8,
label.size = 6, group.by = c('condition'))+ NoLegend()
saveRDS(mye, file = '/mnt/justinedata/HCC_fetal/data/mye.RData')
#####################################################################################
#############---------------------PLOTS-------------------################
cond = c('seurat_clusters', 'NTF', 'PatientID')
DimPlot(mye, reduction = "umap", pt.size=1, label=TRUE) + NoLegend()
DimPlot(mye, reduction = "umap", group.by = "NTF", pt.size=1, cols = c("#1089eb", "#00ccb1","#a62626"))
mye@meta.data['DC1'] <- apply(FetchData(mye, c('CADM1', 'XCR1', 'CLEC9A', 'CD74')),1, median)
mye@meta.data['DC2'] <- apply(FetchData(mye, c('HLA-DRA', 'CD1C', 'FCER1A', 'HLA-DPB1', 'CLEC10A')),1, median)
mye@meta.data['pDC'] <- apply(FetchData(mye, c('IRF7', 'LILRA4', 'IL3RA', 'IGKC', 'BCL11A', 'GZMB')),1, median)
FeaturePlot(mye, min.cutoff = 0, pt.size = 0.1, blend.threshold = 0.01, cols = magma, ncol = 3,
features = c('S100A8','FOLR2','SPP1','MT1G','DC1','DC2','pDC'))
FeaturePlot(mye, min.cutoff = 0, pt.size = 0., blend.threshold = 0.01, cols = magma, ncol = 3,
features = c('FOLR2','HES1'))
FeaturePlot(mye, min.cutoff = 0, pt.size = 0.5, features = i,
blend.threshold = 0.01, cols = magma)
VlnPlot(mye, i, group.by = "seurat_clusters", pt.size = 0) + NoLegend())
#####################################################################################
#############---------------------Renaming columns-------------------################
fetalage <-df[df$fetal.age %in% c("Fw14","Fw16","Fw18","Fw21"),'fetal.age',drop = FALSE]
NT <- df[df$NormalvsTumor %in% c("Tumor","Adj Normal"),'NormalvsTumor',drop = FALSE]
disease <- df[df$orig.ident %in% c("Normal","Cirrhotic"),'orig.ident',drop = FALSE]
df1 <- df %>% mutate(mycol = coalesce(fetal.age, NormalvsTumor, orig.ident)) %>%
select( mycol)
rownames(df1) <- rownames(df)
unique(df1$mycol)
mye@meta.data['condition'] <-df1
mye@meta.data$condition[mye@meta.data$condition %in% c("Fw14","Fw16","Fw18","Fw21")] <- "Fetal"
mye <- mye[,!mye@meta.data$condition == 'Normal']
DimPlot(mye, reduction = "umap", group.by = "condition", pt.size=1)
##################################################################################
#############---------------------DE Genes-------------------################
# all against all
mye.markers <- FindAllMarkers(mye, only.pos = TRUE, min.pct = 0.25, logfc.threshold = 0.25)
# differential genes between cluster
cluster7.markers <- FindMarkers(mye, ident.1 = 7, ident.2 = 4, min.pct = 0.25)
head(cluster7.markers, n = 5)
#saving top100genes percluster
df <- mye.markers[,c('cluster', 'gene')]
rankgenes <- as.data.frame( df %>%
group_by(cluster) %>% # group by everything other than the value column.
mutate(row_id=1:n()) %>% ungroup() %>% # build group index
spread(key=cluster, value=gene) %>% # spread
select(-row_id)) # drop the index
write.csv(rankgenes[c(1:100),],file='top100genes_myel.csv')
#heatmaps
top5 <- mye.markers %>% group_by(cluster) %>% top_n(n = 5, wt = avg_logFC)
DoHeatmap(mye, features = top5$gene) + NoLegend()
DoHeatmap(mye, features = top5$gene) + scale_fill_gradientn(colors = something)
#####################################################################################
###########----------------COLORS--------------#################
#--> Previous Colors
#condition color = c("#1089eb", "#00ccb1", "#b52ec9", "#FF7F00", "#14690a", "#a62626")
NTF color = c("#1089eb", "#00ccb1", "#a62626")
# c(alpha("#1089eb", 0.5), alpha("#00ccb1",0.5), alpha("#a62626",0.5)))
#condition color = c("#1089eb", "#00ccb1", "#b52ec9", "#FF7F00", "#14690a", "#a62626")
magma = colorRampPalette(c('#050238','#59139e','#fa7916', '#fffb82', '#fffdbd'), alpha = TRUE)(8)
viridis = colorRampPalette(c('#fde725','#35b778','#3e4a89','#430154'), alpha = TRUE)(8)
magma = c("#050238FF", "#280963FF", "#4D108FFF", "#863077FF", "#CC5B3CFF", "#FA8B2DFF", "#FCC475FF", "#FFFDBD1A")
magma = c("#0502381A", "#280963FF", "#4D108FFF", "#863077FF", "#CC5B3CFF", "#FA8B2DFF", "#FCC475FF", "#FFFDBDFF")
magma = c("#0502381A", "#340B72FF", "#6F218AFF", "#CB5B3CFF", "#FB9E34FF", "#FEE872FF", "#FFFB9BFF", "#FFFDBDFF")
something = colorRampPalette(c('#000000','#000000','#33313b','#333333','#640E27','#900c3f','#ff8300','#ffd369', '#feffdb'), alpha = TRUE)(8)
NTF_colors = c('#f5bb06','#81007f','#e6194b')
# Load the "scales" package
require(scales)
# Create vector with levels of object@ident
identities <- unique(mye$PatientID)
# Create vector of default ggplot2 colors
my_color_palette <- hue_pal()(5)
################################################################
##########-----------Proportion barplot---------################
# --> estimate PDF size
# --> save on export
# conditions --> c('seurat_clusters', 'NTF', 'PatientID')
library(dplyr)
library(tidyr)
meta <- mye@meta.data[,colnames(mye@meta.data) %in% c('seurat_clusters','PatientID')]
head(meta)
df <- meta %>% group_by(seurat_clusters,PatientID) %>% summarise(n = n()) %>% spread(PatientID, n, fill = 0)
df <- as.data.frame(df)
rownames(df) <- df$seurat_clusters
df <- df[, -which(names(df) %in% c("seurat_clusters"))]
df <- df/colSums(df)*100
df <- df/rowSums(df)*100
par(mfrow=c(1, 1), mar=c(5, 5, 4, 8))
barplot(as.matrix(t(df)), horiz=TRUE, col = my_color_palette, legend = colnames(df), args.legend = list(x = "topright", bty = "n", inset = c(-0.25, 0)), border = FALSE)
head(mye@meta.data)
meta <- mye@meta.data[,colnames(mye@meta.data) %in% c('seurat_clusters','NTF')]
head(meta)
df <- meta %>% group_by(seurat_clusters,NTF) %>% summarise(n = n()) %>% spread(NTF, n, fill = 0)
df <- as.data.frame(df)
rownames(df) <- df$seurat_clusters
df <- df[, -which(names(df) %in% c("seurat_clusters"))]
df <- df/colSums(df)*100
df <- df/rowSums(df)*100
par(mfrow=c(1, 1), mar=c(3, 5, 1, 7))
barplot(as.matrix(t(df)), horiz=TRUE, col = NTF_colors , legend = colnames(df),
args.legend = list(x = "topright", bty = "n", inset = c(-0.23, 0.05)), border = FALSE)
################################################################
meta <- mye@meta.data[,colnames(mye@meta.data) %in% c('seurat_clusters','tech')]
##########----------------Saving plots--------------################
#setwd("/mnt/justinedata/HCC_fetal/figures/endo/")
# estimate pdf size on plots
# change the width and height accordingly
pdf(paste0("/mnt/justinedata/HCC_fetal/figures/mye/umapclsuters_11.pdf"), width=9, height=5.7)
DimPlot(mye, reduction = "umap", group.by = 'seurat_clusters', pt.size=0.5)
dev.off()
genes = c('S100A8','FOLR2','SPP1','MT1G','CD163','DC1','DC2','pDC','C1QB')
print(paste0("saving expression plot --> /mnt/justinedata/HCC_fetal/figures/mye/"))
for (i in genes){
print(i)
pdf(paste0("/mnt/justinedata/HCC_fetal/figures/mye/expr_", toString(i),".pdf"), width=7, height=5)
print(FeaturePlot(mye, min.cutoff = 0, pt.size = 0.5, features = i,
blend.threshold = 0.01, cols = magma))
dev.off()
}
print(paste0("saving violin plot --> /mnt/justinedata/HCC_fetal/figures/mye/"))
for (i in genes){
print(i)
pdf(paste0("/mnt/justinedata/HCC_fetal/figures/mye/vln_", toString(i),".pdf"), width=7, height=5)
print(VlnPlot(mye, i, group.by = "seurat_clusters", pt.size = 0) + NoLegend())
dev.off()
}
mye.markers <- FindAllMarkers(endo, only.pos = TRUE, min.pct = 0.25, logfc.threshold = 0.25)
print(paste0("saving heatmap --> /mnt/justinedata/HCC_fetal/figures/mye/"))
top5 <- mye.markers %>% group_by(cluster) %>% top_n(n = 5, wt = avg_logFC)
pdf(paste0("/mnt/justinedata/HCC_fetal/figures/mye/heatmap.pdf"), width=11, height=8)
print(DoHeatmap(mye, features = top5$gene))
dev.off()
mye@meta.data["PatientID"] <- wholeatlas@meta.data[rownames(wholeatlas@meta.data) %in% rownames(mye@meta.data), "PatientID", drop = FALSE]
pdf(paste0("/mnt/justinedata/HCC_fetal/figures/mye/PatientID.pdf"), width=7, height=5)
DimPlot(mye, reduction = "umap", group.by = 'PatientID', pt.size=0.5, cols = my_color_palette)
dev.off()
pdf(paste0("/mnt/justinedata/HCC_fetal/figures/mye/NTF.pdf"), width=9, height=5.7)
DimPlot(mye, reduction = "umap", group.by = 'NTF',cols = c('#81007f', '#f5bb06', '#e6194b'), pt.size=0.5)
dev.off()
################################################################
##########----------------Saving plots--------------################
#setwd("/mnt/justinedata/HCC_fetal_norm_cirr/figures/endo/")
# estimate pdf size on plots
# change the width and height accordingly
pdf(paste0("/mnt/justinedata/HCC_fetal_norm_cirr/figures/mye/umapcluster.pdf"), width=9, height=5.7)
DimPlot(mye, reduction = "umap", group.by = 'seurat_clusters', pt.size=0.5)
dev.off()
pdf(paste0("/mnt/justinedata/HCC_fetal_norm_cirr/figures/mye/umapcondition.pdf"), width=9, height=5.7)
DimPlot(mye, reduction = "umap", group.by = "condition", pt.size=0.5, cols = c('#f5bb06', "#f45905", '#81007f', "#9aceff",'#e6194b'))
dev.off()
genes = c('S100A8','FOLR2','SPP1','MT1G','CD163','DC1','DC2','pDC','C1QB')
print(paste0("saving expression plot --> /mnt/justinedata/HCC_fetal_norm_cirr/figures/mye/"))
for (i in genes){
print(i)
pdf(paste0("/mnt/justinedata/HCC_fetal_norm_cirr/figures/mye/expr_", toString(i),".pdf"), width=7, height=5)
print(FeaturePlot(mye, min.cutoff = 0, pt.size = 0.5, features = i,
blend.threshold = 0.01, cols = magma))
dev.off()
}
print(paste0("saving violin plot --> /mnt/justinedata/HCC_fetal_norm_cirr/figures/mye/"))
for (i in genes){
print(i)
pdf(paste0("/mnt/justinedata/HCC_fetal_norm_cirr/figures/mye/vln_", toString(i),".pdf"), width=7, height=5)
print(VlnPlot(mye, i, group.by = "seurat_clusters", pt.size = 0) + NoLegend())
dev.off()
}
print(paste0("saving violin plot --> /mnt/justinedata/HCC_fetal_norm_cirr/figures/mye/"))
for (i in genes){
print(i)
pdf(paste0("/mnt/justinedata/HCC_fetal_norm_cirr/figures/mye/vln_", toString(i),".pdf"), width=7, height=5)
print(VlnPlot(mye, i, group.by = "seurat_clusters", pt.size = 0) + NoLegend())
dev.off()
}
genes = c('FOLR2','SPP1')
print(paste0("saving violin plot --> /mnt/justinedata/HCC_fetal_norm_cirr/figures/mye/"))
for (i in genes){
print(i)
pdf(paste0("/mnt/justinedata/HCC_fetal_norm_cirr/figures/mye/vln_", toString(i),"_TAMScond.pdf"), width=7, height=5)
print(VlnPlot(mye[,mye$seurat_clusters %in% c(3,4,5,8,9) ], i, group.by = "condition", pt.size = 0) + NoLegend())
dev.off()
}
mye.markers <- FindAllMarkers(mye, only.pos = TRUE, min.pct = 0.25, logfc.threshold = 0.25)
print(paste0("saving heatmap --> /mnt/justinedata/HCC_fetal_norm_cirr/figures/mye/"))
top5 <- mye.markers %>% group_by(cluster) %>% top_n(n = 5, wt = avg_logFC)
pdf(paste0("/mnt/justinedata/HCC_fetal_norm_cirr/figures/mye/heatmap.pdf"), width=11, height=10)
print(DoHeatmap(mye, features = top5$gene))
dev.off()
##########-----------Proportion barplot---------################
# --> estimate PDF size
# --> save on export
# conditions --> c('seurat_clusters', 'NTF', 'PatientID')
meta <- mye@meta.data[,colnames(mye@meta.data) %in% c('seurat_clusters','condition')]
head(meta)
df <- meta %>% group_by(seurat_clusters,condition) %>% summarise(n = n()) %>% spread(condition, n, fill = 0)
df <- as.data.frame(df)
rownames(df) <- df$seurat_clusters
df <- df[, -which(names(df) %in% c("seurat_clusters"))]
df <- df/colSums(df)*100
df <- df/rowSums(df)*100
par(mfrow=c(1, 1), mar=c(5, 5, 4, 8))
barplot(as.matrix(t(df)), horiz=TRUE, col = my_color_palette, legend = colnames(df), cols = my_color_palette,
args.legend = list(x = "topright", bty = "n", inset = c(-0.25, 0)), border = FALSE)
################################################################
VlnPlot(mye, c('CD209'), group.by = "condition", pt.size = 0.5) + NoLegend()
FeaturePlot(mye, min.cutoff = 0, pt.size = 0.5, features = 'CD209',
blend.threshold = 0.01, cols = magma)
table(mye[,mye$seurat_clusters %in% c(5,4,3)]$condition)
|
8788e4e7fb23816959364755b52aa5dfc9c52a87 | 6d6c66f9e3dd89d229da1bf2cb188731fb4b8f57 | /classification5.R | 7591a551236cd931c1c550bf270c0205f1f338c6 | [] | no_license | srhumir/RF-classification | 3f14008730bfbf1d9dddebcffbe4e73247c2920c | aabb0433bb79f46c3bd50f4b14ac77f5a8e305ff | refs/heads/master | 2020-12-31T04:55:49.003386 | 2016-05-16T21:33:18 | 2016-05-16T21:33:18 | 57,036,602 | 2 | 3 | null | null | null | null | UTF-8 | R | false | false | 4,018 | r | classification5.R | library(caret)
library(randomForest)
library(e1071)
library(raster)
library(maptools)
library(RStoolbox)
library(rgeos)
library(snow)
library(glcm)
library(parallel)
#loading MNF Image (brick would load 0 as NA)
MNF <- stack(file.choose())
#MNF <- dropLayer(MNF, 1)
names(MNF) <- sapply(1:dim(MNF)[3], function(i) paste("MNF", i, sep="."))
mask <- raster(file.choose())
MNF <- MNF * (mask-1)
#load trainig shp
xx <- readShapePoly( file.choose())
projection(xx) <- projection(MNF)
#compute texture
## Calculate the number of cores
no_cores <- detectCores() - 1
##Create a list of first four MNF bands
rasterlist <- list(raster(MNF, layer = 1), raster(MNF, layer = 2),
raster(MNF, layer = 3), raster(MNF, layer = 4))
## Initiate cluster
Sys.time()
cl <- makeCluster(no_cores)
##Send variable to cluster
clusterExport(cl,c("MNF", "texture", "rasterlist"))
##send packages to cluster
clusterEvalQ(cl, library(raster))
clusterEvalQ(cl, library(glcm))
Sys.time()
##compute texture
texlist <- parLapply(cl, rasterlist, glcm)
Sys.time()
stopCluster(cl)
#
#stack three MNF bands and their textures
toClass <- stack(c(rasterlist, texlist))
Sys.time()
#mask
toClass <- toClass * (mask-1)
#extract values in trainig shapefile
values <- extract(toClass, xx, df = TRUE)
#take out the attributr table of the trainig and assigne ID to polygons
classes <- data.frame(ID = 1:length(xx@data$CLASS_NAME), xx@data)
#Assign class to extracted values
values <- data.frame(Class = classes$CLASS_NAME[values$ID], values)
##when load from disk
#values <- read.csv(file.choose())
# values <- values[,-1]
#drop rows with all zero MNF
values <- values[values[names(values)[3]] > 0 & !is.na(values[names(values)[3]]),]
##a backup
valuesBackup <- values
##No need for ID
values <- values[,-2]
## keep class seperate for speeding up the training and ...
Class <- values$Class
#check fot too many NA
NAs <- sapply(1:dim(toClass)[3],function(i) sum(is.na(getValues(subset(toClass,i)))))
#there are too many na's in bands 11, 19,27. So I omit them
tooNA <- which(NAs > 19000)
toClass2 <- dropLayer(toClass, tooNA)
values <- values[,-(tooNA+1)]
#convert inf to NA for the model to work
for (i in 1:dim(values)[1]){
for (j in 1:dim(values)[2]){
if (is.infinite(values[i,j])){values[i,j] <- NA}
}
}
#fill NAs
##model
Nafill <- preProcess(values, method = "bagImpute")
##fill
valuesNAfilled <- predict(Nafill, values)
##check
sum(is.na(valuesNAfilled))
#ommiting too correlated data
##compute corrolation matrix
corMatrix = cor(valuesNAfilled[,-1])
##get highly coorolated
highlyCorrelated_ <- apply(corMatrix,2, function(x) which(abs(x) >= .95))
##get values to keep. did not use the caret package
##since I like to keep first three bands any way
keepIndex <- integer()
keepIndex <- unique(sapply(highlyCorrelated_, function(x) c(keepIndex, x[1])))
##drop highly corrolated values
valuesNAfilled <- valuesNAfilled[, (keepIndex+1)]
##drop highly coorolated bands from raster as well
dropIndex <- c(1:dim(toClass2)[3])[-keepIndex]
toClass2 <- dropLayer(toClass2, dropIndex)
#choose trainig and testing
#set.seed(1235)
##index
#intrain <- createDataPartition(values$Class, p=.8, list=F)
##trainig
#training <- valuesNAfilled[intrain, ]
#ClassTrainig <- Class[intrain]
#names(training)
##testing
#testing <- valuesNAfilled[-intrain,]
#ClassTesting <- Class[-intrain]
#defing train control
# define training control
train_control <- trainControl(method="cv", number=10)
#traing RF model
system.time(
modelRF <- train(valuesNAfilled,droplevels(Class), trControl=train_control,
method="rf")
)
#predict on testing
#pred <- predict(modelRF, testing)
#check the accuracy on testing
#confusionMatrix(pred, ClassTesting)
#predict on raster
system.time(
predraster <- predict(toClass2, modelRF,
filename = "C:\\Users\\Haniyeh\\Hoa_Binh\\NDBaI\\classification\\classification-soil-urbanRoof2.tif",
na.rm=T,inf.rm = TRUE)
)
|
6fc04e4a64e60e12d1832735d0d960e597745662 | 893a1e1ede5640080d6cc49b449c0863ff407642 | /Whats Cooking/second.R | 0f1b5404527c5254b599c0e60413daebd505e673 | [] | no_license | osoblanco/Whats-Cooking-Kaggle | 660a5903c5b22f45664bd79121b78fbf4cee92f3 | f55260157b26ea57d3571e7807efb4f8513cd62a | refs/heads/master | 2021-05-30T05:18:36.188930 | 2015-12-20T17:06:06 | 2015-12-20T17:06:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 9,465 | r | second.R | #resart the project once more
library(nnet)
library(class)
library(caret)
library(caTools)
library(jsonlite)
library(tm)
library(rpart)
library(rpart.plot)
library(data.table)
library(Matrix)
library(e1071)
library(stringdist)
library(kernlab)
library(nnet)
library(RCurl)
#library(RWeka)
library(koRpus)
library(randomForest)
library(SnowballC)
library(party)
library(neuralnet)
library(randomForestSRC)
#TRY
#preprocess to different columns of ingredients
train <- fromJSON('train.json', flatten = TRUE)
test <- fromJSON('test.json', flatten = TRUE)
table(train$cuisine)
train$ingredients <- lapply(train$ingredients, FUN=tolower)
test$ingredients <- lapply(test$ingredients, FUN=tolower)
train$ingredients <- lapply(train$ingredients, FUN=function(x) gsub("-", "_", x))
test$ingredients <- lapply(test$ingredients, FUN=function(x) gsub("-", "_", x))
train$ingredients <- lapply(train$ingredients, FUN=function(x) gsub("[^a-z0-9_ ]", "", x))
test$ingredients <- lapply(test$ingredients, FUN=function(x) gsub("[^a-z0-9_ ]", "", x))
#use perhaps
#control = list(weighting = function(x) weightTfIdf(x, normalize = FALSE),stopwords = TRUE)
BigramTokenizer <- function(x) NGramTokenizer(x, Weka_control(min = 2, max = 2))
tokenize_ngrams <- function(x, n=3) return(rownames(as.data.frame(unclass(textcnt(x,method="string",n=n)))))
comb_ingredients <- c(Corpus(VectorSource(train$ingredients)), Corpus(VectorSource(test$ingredients)))
comb_ingredients <- tm_map(comb_ingredients, stemDocument, language="english")
comb_ingredients <- tm_map(comb_ingredients, removeNumbers)
# remove punctuation
comb_ingredients <- tm_map(comb_ingredients, removePunctuation)
comb_ingredients <- tm_map(comb_ingredients, content_transformer(tolower),lazy=TRUE)
comb_ingredients <- tm_map(comb_ingredients,removeWords, stopwords(),lazy=TRUE)
comb_ingredients <- tm_map(comb_ingredients,stripWhitespace,lazy=TRUE)
comb_ingredients <- tm_map(comb_ingredients, removeWords, stopwords("english"),lazy=TRUE)
datasetMAIN <- DocumentTermMatrix(comb_ingredients)
datasetMAIN<- removeSparseTerms(datasetMAIN, 1-3/nrow(datasetMAIN))
datasetMAIN <- as.data.frame(as.matrix(datasetMAIN))
datasetMAIN$ingredients_count <- rowSums(datasetMAIN) # simple count of ingredients per receipe
#datasetMAIN$cuisine
print("Done creating dataframe for decision trees")
temporaryData <- datasetMAIN #just to be safe
datasetMAIN<-temporaryData
datasetMAIN$cuisine <- as.factor(c(train$cuisine, rep("italian", nrow(test))))
str(datasetMAIN$cuisine)
#Cleanup. (BAD IDEA)
datasetMAIN$allpurpos<-NULL
datasetMAIN$and<-NULL
datasetMAIN$bake<-NULL
datasetMAIN$bell<-NULL
datasetMAIN$black<-NULL
datasetMAIN$boneless<-NULL
datasetMAIN$boil<-NULL
datasetMAIN$leaf<-NULL
datasetMAIN$brown<-NULL
datasetMAIN$cold<-NULL
datasetMAIN$cook<-NULL
datasetMAIN$crack<-NULL
datasetMAIN$dark<-NULL
datasetMAIN$free<-NULL
datasetMAIN$fat<-NULL
datasetMAIN$hot<-NULL
datasetMAIN$fine<-NULL
datasetMAIN$green<-NULL
datasetMAIN$bake<-NULL
datasetMAIN$breast<-NULL
datasetMAIN$chile<-NULL
datasetMAIN$extract<-NULL
datasetMAIN$ground<-NULL
datasetMAIN$golden<-NULL
datasetMAIN$flat<-NULL
datasetMAIN$frozen<-NULL
datasetMAIN$fresh<-NULL
datasetMAIN$larg<-NULL
datasetMAIN$firm<-NULL
datasetMAIN$ice<-NULL
trainDataset <- datasetMAIN[1:nrow(train), ]
testDataset <- datasetMAIN[-(1:nrow(train)), ]
fit<-rpart(cuisine~., data=trainDataset, method="class",control = rpart.control(cp = 0.000002)) #method-> classification
prp(fit, type=1, extra=4)
prp(fit)
PredFit<-predict(fit, newdata=testDataset, type="class")
table(PredFit,testDataset$cuisine)
confusionMatrix(table(PredFit,testDataset$cuisine))
printcp(fit) # display the results
plotcp(fit) # visualize cross-validation results
summary(fit) # detailed summary of splits
Prediction <- predict(fit, newdata = testDataset, type = "class")
FINALLYY <- cbind(as.data.frame(test$id), as.data.frame(Prediction))
colnames(FINALLYY) <-c("id","cuisine")
write.csv(FINALLYY, file = 'SubmitRpart.csv', row.names=F, quote=F)
#alternative random forest
tuneER<-tuneRF(trainDataset,trainDataset$cuisine,ntreeTry = 100)
fit1 <- randomForest(cuisine~., data=trainDataset,ntree=1000,mtry=188) ##100tree mtry99 done 75%......1000tree mtry188 76%....mixup resamplin 79%
plot(fit1)
Prediction <- predict(fit1, newdata = testDataset, type = "class")
FINALLYY <- cbind(as.data.frame(test$id), as.data.frame(Prediction))
colnames(FINALLYY) <-c("id","cuisine")
write.csv(FINALLYY, file = 'newVersion.csv', row.names=F, quote=F)
varImpPlot(fit1)
varImp(fit1)
importance(fit1)
plot(Prediction)
print(fit1) # view results
importance(fit1) # importance of each predictor
#ctree try (bad stuff)
fit <- ctree(cuisine ~.,
data=trainDataset)
plot(fit, main="Conditional Inference Tree for data")
#H2O Random Forest (didnt get it)
library(h2o)
install.packages("h2o")
trainH2O <- as.h2o(localH2O, trainDataset, key="trainDataset.hex")
prostate.hex <- as.h2o(localH2O, trainDataset, key="prostate.hex")
h2o.randomForest(x=trainDataset,y=cuisine,ntrees = 10)
help(h2o.randomForest)
#Cforest
library(party)
install.packages("party")
fit3 <- cforest(cuisine~., data=trainDataset,controls = cforest_unbiased( ntree = 10))
plot(fit3)
varimp(fit3)
Prediction <- predict(fit3, newdata = testDataset,type="prob")
Prediction
table(Prediction,testDataset$cuisine)
confusionMatrix(table(Prediction,testDataset$cuisine))
FINALLYY <- cbind(as.data.frame(test$id), as.data.frame(Prediction))
colnames(FINALLYY) <-c("id","cuisine")
write.csv(FINALLYY, file = 'SubmitAnhuisERO3.csv', row.names=F, quote=F)
varImpPlot(fit1)
# WHY NOT SVM
Model<-svm(cuisine~.,data = trainDataset)
Prediction1 <- predict(Model, newdata = testDataset)
table(Prediction1,testDataset$cuisine)
confusionMatrix(table(Prediction1,testDataset$cuisine))
FINALLYY <- cbind(as.data.frame(test$id), as.data.frame(Prediction1))
plot(Prediction1)
colnames(FINALLYY) <-c("id","cuisine")
write.csv(FINALLYY, file = 'SubmitAnhuisERO_SVM_Linear.csv', row.names=F, quote=F)
varImpPlot(Model)
#ksvm
svp <- ksvm(cuisine~.,data = trainDataset,type="C-svc",kernel="vanilladot",C=10)
Prediction2 <- predict(svp, newdata = testDataset)
plot(Prediction2)
confusionMatrix(table(Prediction2,testDataset$cuisine))
FINALLYY <- cbind(as.data.frame(test$id), as.data.frame(Prediction2))
colnames(FINALLYY) <-c("id","cuisine")
write.csv(FINALLYY, file = 'SubmitAnhuisERO_KSVM_Linear.csv', row.names=F, quote=F)
varImpPlot(Model)
#tuning SVM
obj <-best.tune(svm, cuisine ~., data = trainDataset, kernel =
"polynomial")
obj
ModelTUNED<-svm(cuisine~.,data = trainDataset,kernel =
"polynomial",gamma=0.0004977601,cost=1,coef.0=0,degree=3)
Prediction3 <- predict(ModelTUNED, newdata = testDataset)
plot(Prediction3)
confusionMatrix(table(Prediction3,testDataset$cuisine))
FINALLYY <- cbind(as.data.frame(test$id), as.data.frame(Prediction3))
colnames(FINALLYY) <-c("id","cuisine")
write.csv(FINALLYY, file = 'SubmitAnhuisERO_KSVM_Linear.csv', row.names=F, quote=F)
#trying Neural Networks
net<-nnet(cuisine~.,data = trainDataset, size=6, rang = 0.7, decay = 0, Hess = FALSE, trace = TRUE, MaxNWts = 25000,
abstol = 1.0e-4, reltol = 1.0e-8,maxit=2000)
par(mar=numeric(4),mfrow=c(1,2),family='serif')
newOne<-predict(net, testDataset,type="class")
table(newOne,testDataset$cuisine)
confusionMatrix(table(newOne,testDataset$cuisine))
FINALLYY <- cbind(as.data.frame(test$id), as.data.frame(newOne))
colnames(FINALLYY) <-c("id","cuisine")
str(FINALLYY)
write.csv(FINALLYY, file = 'Neural3.csv', row.names=F, quote=F)
#4 layer nnet with 1000 iteration gave 65% result (best)
#naive bayes
naive<-naiveBayes(cuisine~.,data = trainDataset,laplace = 0)
print(naive)
predictor1<-predict(naive, testDataset,type="class")
plot(predictor1)
predictor1
table(predictor1,testDataset$cuisine)
confusionMatrix(table(predictor1,testDataset$cuisine))
FINALLYY <- cbind(as.data.frame(test$id), as.data.frame(predictor1))
colnames(FINALLYY) <-c("id","cuisine")
str(FINALLYY)
write.csv(FINALLYY, file = 'Naive.csv', row.names=F, quote=F)
#survive This Forest
#200tree mtry188
ModelOfSurvival<-rfsrc(cuisine~.,trainDataset,ntree=200,mtry=188)
plot(ModelOfSurvival)
predictorSurvival<-predict(ModelOfSurvival, testDataset,type="class")
plot(preder)
preder
table(preder$class,testDataset$cuisine)
confusionMatrix(table(preder$class,testDataset$cuisine))
FINALLYY <- cbind(as.data.frame(test$id), as.data.frame(preder$class))
colnames(FINALLYY) <-c("id","cuisine")
str(FINALLYY)
write.csv(FINALLYY, file = 'survival2.csv', row.names=F, quote=F)
preder<-predict.rfsrc(ModelOfSurvival, testDataset,type="class")
#pure Random forest (top 10-25 items)
ModelOfSurvival<-randomForest(cuisine~tortilla+ingredients_count+soy+oliv+ginger+cumin+parmesan+cilantro+chees+lime+sauc+chili+fish+sesam+basil+pepper+curri+sugar+salsa+corn+oil+garlic+masala+butter+buttermilk+rice+seed+milk+feta+egg,trainDataset,ntree=500)
plot(ModelOfSurvival)
varImpPlot(ModelOfSurvival)
Prediction <- predict(ModelOfSurvival, newdata = testDataset, type = "class")
FINALLYY <- cbind(as.data.frame(test$id), as.data.frame(Prediction))
colnames(FINALLYY) <-c("id","cuisine")
write.csv(FINALLYY, file = 'newVersion.csv', row.names=F, quote=F)
varImpPlot(fit1)
varImp(fit1)
importance(fit1)
plot(Prediction)
print(fit1) # view results
importance(fit1) # importance of each predictor
|
8fc90b2e98f37bb4fb15f9cb8c8f0d30c0358c6b | 127f22757ddb994bf8bd0a105181e018a53e53ac | /images/L_Decomp_study02_118bus.R | bf21eeb6207001db676cd9bb2acb237524704569 | [] | no_license | NREL/tda-ps | 0394c8d767e4bccfbaf611e13ad0318bf06bb913 | f43900eeaa70daf0cec4cb065a6dfbe40ebb402f | refs/heads/master | 2021-07-12T16:53:33.759295 | 2020-07-19T22:58:46 | 2020-07-19T22:58:46 | 186,669,605 | 7 | 3 | null | 2019-07-25T12:54:20 | 2019-05-14T17:30:17 | MATLAB | UTF-8 | R | false | false | 11,910 | r | L_Decomp_study02_118bus.R |
rm(list = ls())
start.time = Sys.time()
###################
#=== LIBRARIES ===#
###################
library(rio)
##############################################################################################################################################
###################
#=== FUNCTION ====#
###################
LMAX_parts = function(trial){
fifo = trial
t = 1
LS_max = apply(fifo, 1, sum)[-c(1,nrow(fifo))]
L_DS = L_S = L_G = L_CAS = matrix(NA, ncol = ncol(fifo), nrow = (nrow(fifo) - 2))
for(i in 1:(nrow(fifo) - 2)){
t = t + 1
aa = fifo[t,]
bb = fifo[(t+1),]
for(j in 1:length(aa)){
if(aa[j] == 0 & bb[j] == 0){
L_DS[i,j] = 0
L_S[i,j] = 0
L_CAS[i,j] = 0
L_G[i,j] = 0
}else if(aa[j] == 0 & bb[j] > 0){
L_S[i,j] = as.numeric(bb[j])
L_DS[i,j] = 0
L_G[i,j] = as.numeric(bb[j])
L_CAS[i,j] = 0
}else if(aa[j] >0 & bb[j] == 0){
L_DS[i,j] = as.numeric(aa[j])
L_CAS[i,j] = 0
L_G[i,j] = 0
L_S[i,j] = as.numeric(bb[j])
}else if(aa[j] >0 & bb[j] > 0 ){
if(aa[j] == bb[j]){
L_DS[i,j] = 0
L_S[i,j] = as.numeric(bb[j])
L_G[i,j] = 0
L_CAS[i,j] = 0
}else if(aa[j] < bb[j]){
L_DS[i,j] = 0
L_CAS[i,j] = 0
L_G[i,j] = as.numeric(bb[j]) - as.numeric(aa[j])
L_S[i,j] = as.numeric(bb[j])
}else if (aa[j]> bb[j]){
L_DS[i,j] = 0
L_CAS[i,j] = as.numeric(aa[j]) - as.numeric(bb[j])
L_G[i,j] = 0
L_S[i,j] = as.numeric(bb[j])
}
}
}
}
L_S = as.data.frame(L_S)
L_S$tot = apply(L_S, 1, sum)
L_DS = as.data.frame(L_DS)
L_DS$tot = apply(L_DS, 1, sum)
L_G = as.data.frame(L_G)
L_G$tot = apply(L_G, 1, sum)
L_CAS = as.data.frame(L_CAS)
L_CAS$tot = apply(L_CAS, 1, sum)
nmn = cbind(L_S$tot, L_DS$tot, L_CAS$tot, L_G$tot,LS_max)
mopo = cbind(L_S$tot, L_DS$tot, L_CAS$tot, L_G$tot)
dmopo = mopo[,1] + mopo[,2] + mopo[,3] - mopo[,4]
resultsN = data.frame(nmn, dmopo)
colnames(resultsN) = c("L_SERV", "L_DSHD", "L_CAS", "L_GAIN", "LMAX", "CHK_LMAX")
return(resultsN)
}
##############################################################################################################################################
# Decomposotion #
for(i in 1:100){
setwd("C:/Users/doforib/Desktop/test_folder/dataset")
daata = import(paste0("result-",i,".tsv"), format = "csv")
dataa_L = daata[which(colnames(daata)=="L_1"):which(colnames(daata)=="L_118")]
res2_pad = LMAX_parts(dataa_L)
setwd("C:/Users/doforib/Desktop/test_folder/results")
write.csv(res2_pad, paste0("L_Decomp-",i,".csv"))
}
##############################################################################################################################################
############
# Plotting #
############
# Overlaying lines #
setwd("C:/Users/doforib/Desktop/test_folder/results")
#----------------------------#
#=== All plots one-by-one ===#
#----------------------------#
par(mfrow = c(1,1))
################
# Single plots #
################
i = 1
res2_pad = read.csv(paste0("L_Decomp-",i,".csv"))
plot(res2_pad$LMAX, type = "l", col = "red", ylab = "TL", ylim = c(0,45),xlab = "# of nodes removed")
for(i in 2:100){
res2_pad = read.csv(paste0("L_Decomp-",i,".csv"))
lines(res2_pad$LMAX, type = "l", col = "red")
}
i = 1
res2_pad = read.csv(paste0("L_Decomp-",i,".csv"))
plot(res2_pad$L_SERV, type = "l", col = "blue", ylab = "LS", ylim = c(0,45),xlab = "# of nodes removed")
for(i in 2:100){
res2_pad = read.csv(paste0("L_Decomp-",i,".csv"))
lines(res2_pad$L_SERV, type = "l", col = "blue")
}
i = 1
res2_pad = read.csv(paste0("L_Decomp-",i,".csv"))
plot(res2_pad$L_DSHD, type = "l", col = "orange", ylab = "LDSHD", ylim = c(0,5),xlab = "# of nodes removed")
for(i in 2:100){
res2_pad = read.csv(paste0("L_Decomp-",i,".csv"))
lines(res2_pad$L_DSHD, type = "l", col = "orange")
}
i = 1
res2_pad = read.csv(paste0("L_Decomp-",i,".csv"))
plot(res2_pad$L_CAS, type = "l", col = "green", ylab = "L_CAS", ylim = c(0,6.1),xlab = "# of nodes removed")
for(i in 2:100){
res2_pad = read.csv(paste0("L_Decomp-",i,".csv"))
lines(res2_pad$L_CAS, type = "l", col = "green")
}
i = 1
res2_pad = read.csv(paste0("L_Decomp-",i,".csv"))
plot(res2_pad$L_GAIN, type = "l", col = "black", ylab = "L_G", ylim = c(0,3.5),xlab = "# of nodes removed")
for(i in 2:100){
res2_pad = read.csv(paste0("L_Decomp-",i,".csv"))
lines(res2_pad$L_GAIN, type = "l", col = "black")
}
###############
# Joint plots #
###############
i = 1
res2_pad = read.csv(paste0("L_Decomp-",i,".csv"))
plot(res2_pad$LMAX, type = "l", col = "red", ylim = c(0,45), ylab = "Load value",xlab = "# of nodes removed")
for(i in 2:100){
res2_pad = read.csv(paste0("L_Decomp-",i,".csv"))
lines(res2_pad$LMAX, type = "l", col = "red")
}
i = 1
res2_pad = read.csv(paste0("L_Decomp-",i,".csv"))
lines(res2_pad$L_SERV, type = "l", col = "blue")
for(i in 2:100){
res2_pad = read.csv(paste0("L_Decomp-",i,".csv"))
lines(res2_pad$L_SERV, type = "l", col = "blue")
}
i = 1
res2_pad = read.csv(paste0("L_Decomp-",i,".csv"))
lines(res2_pad$L_DSHD, type = "l", col = "orange")
for(i in 2:100){
res2_pad = read.csv(paste0("L_Decomp-",i,".csv"))
lines(res2_pad$L_SERV, type = "l", col = "blue")
}
i = 1
res2_pad = read.csv(paste0("L_Decomp-",i,".csv"))
lines(res2_pad$L_CAS, type = "l", col = "green")
for(i in 2:100){
res2_pad = read.csv(paste0("L_Decomp-",i,".csv"))
lines(res2_pad$L_CAS, type = "l", col = "green")
}
i = 1
res2_pad = read.csv(paste0("L_Decomp-",i,".csv"))
lines(res2_pad$L_GAIN, type = "l", col = "black")
for(i in 2:100){
res2_pad = read.csv(paste0("L_Decomp-",i,".csv"))
lines(res2_pad$L_GAIN, type = "l", col = "black")
}
legend("topright", bty = "n", inset = 0.005,
c("TL", "LS", "LDSHD", "L_CAS", "L_G"),
cex = 0.95, col=c("red", "blue", "orange", "green","black"))
#==============================================================================================================================#
#------------------------------------------------------------------------------------------------------------------------------#
#==============================================================================================================================#
#-----------------------------#
#=== All plots on one plot ===#
#-----------------------------#
op = par(mfrow = c(2,3))
################
# Single plots #
################
i = 1
res2_pad = read.csv(paste0("L_Decomp-",i,".csv"))
plot(res2_pad$LMAX, type = "l", col = "red", ylab = "TL", ylim = c(0,45),xlab = "# of nodes removed")
for(i in 2:100){
res2_pad = read.csv(paste0("L_Decomp-",i,".csv"))
lines(res2_pad$LMAX, type = "l", col = "red")
}
i = 1
res2_pad = read.csv(paste0("L_Decomp-",i,".csv"))
plot(res2_pad$L_SERV, type = "l", col = "blue", ylab = "LS", ylim = c(0,45),xlab = "# of nodes removed")
for(i in 2:100){
res2_pad = read.csv(paste0("L_Decomp-",i,".csv"))
lines(res2_pad$L_SERV, type = "l", col = "blue")
}
i = 1
res2_pad = read.csv(paste0("L_Decomp-",i,".csv"))
plot(res2_pad$L_DSHD, type = "l", col = "orange", ylab = "LDSHD", ylim = c(0,5),xlab = "# of nodes removed")
for(i in 2:100){
res2_pad = read.csv(paste0("L_Decomp-",i,".csv"))
lines(res2_pad$L_DSHD, type = "l", col = "orange")
}
i = 1
res2_pad = read.csv(paste0("L_Decomp-",i,".csv"))
plot(res2_pad$L_CAS, type = "l", col = "green", ylab = "L_CAS", ylim = c(0,6.1),xlab = "# of nodes removed")
for(i in 2:100){
res2_pad = read.csv(paste0("L_Decomp-",i,".csv"))
lines(res2_pad$L_CAS, type = "l", col = "green")
}
i = 1
res2_pad = read.csv(paste0("L_Decomp-",i,".csv"))
plot(res2_pad$L_GAIN, type = "l", col = "black", ylab = "L_G", ylim = c(0,3.5),xlab = "# of nodes removed")
for(i in 2:100){
res2_pad = read.csv(paste0("L_Decomp-",i,".csv"))
lines(res2_pad$L_GAIN, type = "l", col = "black")
}
###############
# Joint plots #
###############
i = 1
res2_pad = read.csv(paste0("L_Decomp-",i,".csv"))
plot(res2_pad$LMAX, type = "l", col = "red", ylim = c(0,45), ylab = "Load value",xlab = "# of nodes removed")
for(i in 2:100){
res2_pad = read.csv(paste0("L_Decomp-",i,".csv"))
lines(res2_pad$LMAX, type = "l", col = "red")
}
i = 1
res2_pad = read.csv(paste0("L_Decomp-",i,".csv"))
lines(res2_pad$L_SERV, type = "l", col = "blue")
for(i in 2:100){
res2_pad = read.csv(paste0("L_Decomp-",i,".csv"))
lines(res2_pad$L_SERV, type = "l", col = "blue")
}
i = 1
res2_pad = read.csv(paste0("L_Decomp-",i,".csv"))
lines(res2_pad$L_DSHD, type = "l", col = "orange")
for(i in 2:100){
res2_pad = read.csv(paste0("L_Decomp-",i,".csv"))
lines(res2_pad$L_SERV, type = "l", col = "blue")
}
i = 1
res2_pad = read.csv(paste0("L_Decomp-",i,".csv"))
lines(res2_pad$L_CAS, type = "l", col = "green")
for(i in 2:100){
res2_pad = read.csv(paste0("L_Decomp-",i,".csv"))
lines(res2_pad$L_CAS, type = "l", col = "green")
}
i = 1
res2_pad = read.csv(paste0("L_Decomp-",i,".csv"))
lines(res2_pad$L_GAIN, type = "l", col = "black")
for(i in 2:100){
res2_pad = read.csv(paste0("L_Decomp-",i,".csv"))
lines(res2_pad$L_GAIN, type = "l", col = "black")
}
legend("topright", bty = "n", inset = 0.005,
c("TL", "LS", "LDSHD", "L_CAS", "L_G"),
cex = 0.95, col=c("red", "blue", "orange", "green","black"))
par(op)
#==============================================================================================================================#
#------------------------------------------------------------------------------------------------------------------------------#
#==============================================================================================================================#
# (Functional) Box plots #
data_TL = data_LS = data_DSHD = data_CAS = data_G = matrix(NA, ncol = 100, nrow = nrow(res2_pad))
for(i in 1:100){
data_TL[,i] = (read.csv(paste0("L_Decomp-",i,".csv")))$LMAX
data_LS[,i] = (read.csv(paste0("L_Decomp-",i,".csv")))$L_SERV
data_DSHD[,i] = (read.csv(paste0("L_Decomp-",i,".csv")))$L_DSHD
data_CAS[,i] = (read.csv(paste0("L_Decomp-",i,".csv")))$L_CAS
data_G[,i] = (read.csv(paste0("L_Decomp-",i,".csv")))$L_GAIN
}
data_TL = as.data.frame(data_TL)
boxplot(data_TL, col = "red")
data_LS = as.data.frame(data_LS)
boxplot(data_LS, col = "green")
data_DSHD = as.data.frame(data_DSHD)
boxplot(data_DSHD,col = "orange")
data_CAS = as.data.frame(data_CAS)
boxplot(data_CAS,col = "blue")
data_G = as.data.frame(data_G)
boxplot(data_G,col = "brown")
op = par(mfrow = c(2,3))
boxplot(data_TL$V1,col="grey")
points(data_TL$V2)
par(op)
##############################################################################################################################################
end.time = Sys.time()
time.taken = end.time - start.time
|
82b31483176ff602d7562941996d1b52d6063e03 | 3122ac39f1ce0a882b48293a77195476299c2a3b | /clients/r/generated/R/InputStepImpl.r | 8da845bd6e1683fe367ed139226b4da1f6d71b14 | [
"MIT"
] | permissive | miao1007/swaggy-jenkins | 4e6fe28470eda2428cbc584dcd365a21caa606ef | af79438c120dd47702b50d51c42548b4db7fd109 | refs/heads/master | 2020-08-30T16:50:27.474383 | 2019-04-10T13:47:17 | 2019-04-10T13:47:17 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,054 | r | InputStepImpl.r | # Swaggy Jenkins
#
# Jenkins API clients generated from Swagger / Open API specification
#
# OpenAPI spec version: 1.1.1
# Contact: blah@cliffano.com
# Generated by: https://openapi-generator.tech
#' InputStepImpl Class
#'
#' @field _class
#' @field _links
#' @field id
#' @field message
#' @field ok
#' @field parameters
#' @field submitter
#'
#' @importFrom R6 R6Class
#' @importFrom jsonlite fromJSON toJSON
#' @export
InputStepImpl <- R6::R6Class(
'InputStepImpl',
public = list(
`_class` = NULL,
`_links` = NULL,
`id` = NULL,
`message` = NULL,
`ok` = NULL,
`parameters` = NULL,
`submitter` = NULL,
initialize = function(`_class`, `_links`, `id`, `message`, `ok`, `parameters`, `submitter`){
if (!missing(`_class`)) {
stopifnot(is.character(`_class`), length(`_class`) == 1)
self$`_class` <- `_class`
}
if (!missing(`_links`)) {
stopifnot(R6::is.R6(`_links`))
self$`_links` <- `_links`
}
if (!missing(`id`)) {
stopifnot(is.character(`id`), length(`id`) == 1)
self$`id` <- `id`
}
if (!missing(`message`)) {
stopifnot(is.character(`message`), length(`message`) == 1)
self$`message` <- `message`
}
if (!missing(`ok`)) {
stopifnot(is.character(`ok`), length(`ok`) == 1)
self$`ok` <- `ok`
}
if (!missing(`parameters`)) {
stopifnot(is.list(`parameters`), length(`parameters`) != 0)
lapply(`parameters`, function(x) stopifnot(R6::is.R6(x)))
self$`parameters` <- `parameters`
}
if (!missing(`submitter`)) {
stopifnot(is.character(`submitter`), length(`submitter`) == 1)
self$`submitter` <- `submitter`
}
},
toJSON = function() {
InputStepImplObject <- list()
if (!is.null(self$`_class`)) {
InputStepImplObject[['_class']] <- self$`_class`
}
if (!is.null(self$`_links`)) {
InputStepImplObject[['_links']] <- self$`_links`$toJSON()
}
if (!is.null(self$`id`)) {
InputStepImplObject[['id']] <- self$`id`
}
if (!is.null(self$`message`)) {
InputStepImplObject[['message']] <- self$`message`
}
if (!is.null(self$`ok`)) {
InputStepImplObject[['ok']] <- self$`ok`
}
if (!is.null(self$`parameters`)) {
InputStepImplObject[['parameters']] <- lapply(self$`parameters`, function(x) x$toJSON())
}
if (!is.null(self$`submitter`)) {
InputStepImplObject[['submitter']] <- self$`submitter`
}
InputStepImplObject
},
fromJSON = function(InputStepImplJson) {
InputStepImplObject <- jsonlite::fromJSON(InputStepImplJson)
if (!is.null(InputStepImplObject$`_class`)) {
self$`_class` <- InputStepImplObject$`_class`
}
if (!is.null(InputStepImplObject$`_links`)) {
_linksObject <- InputStepImpllinks$new()
_linksObject$fromJSON(jsonlite::toJSON(InputStepImplObject$_links, auto_unbox = TRUE))
self$`_links` <- _linksObject
}
if (!is.null(InputStepImplObject$`id`)) {
self$`id` <- InputStepImplObject$`id`
}
if (!is.null(InputStepImplObject$`message`)) {
self$`message` <- InputStepImplObject$`message`
}
if (!is.null(InputStepImplObject$`ok`)) {
self$`ok` <- InputStepImplObject$`ok`
}
if (!is.null(InputStepImplObject$`parameters`)) {
self$`parameters` <- lapply(InputStepImplObject$`parameters`, function(x) {
parametersObject <- StringParameterDefinition$new()
parametersObject$fromJSON(jsonlite::toJSON(x, auto_unbox = TRUE))
parametersObject
})
}
if (!is.null(InputStepImplObject$`submitter`)) {
self$`submitter` <- InputStepImplObject$`submitter`
}
},
toJSONString = function() {
sprintf(
'{
"_class": %s,
"_links": %s,
"id": %s,
"message": %s,
"ok": %s,
"parameters": [%s],
"submitter": %s
}',
self$`_class`,
self$`_links`$toJSON(),
self$`id`,
self$`message`,
self$`ok`,
lapply(self$`parameters`, function(x) paste(x$toJSON(), sep=",")),
self$`submitter`
)
},
fromJSONString = function(InputStepImplJson) {
InputStepImplObject <- jsonlite::fromJSON(InputStepImplJson)
self$`_class` <- InputStepImplObject$`_class`
InputStepImpllinksObject <- InputStepImpllinks$new()
self$`_links` <- InputStepImpllinksObject$fromJSON(jsonlite::toJSON(InputStepImplObject$_links, auto_unbox = TRUE))
self$`id` <- InputStepImplObject$`id`
self$`message` <- InputStepImplObject$`message`
self$`ok` <- InputStepImplObject$`ok`
self$`parameters` <- lapply(InputStepImplObject$`parameters`, function(x) StringParameterDefinition$new()$fromJSON(jsonlite::toJSON(x, auto_unbox = TRUE)))
self$`submitter` <- InputStepImplObject$`submitter`
}
)
)
|
88f06bd73c4b9be239b6f94c9838e0ebd14e9759 | 28301c658db610c9040e168cd6d76c20d2b39629 | /man/simulate_no_shock.Rd | c26bd4d724d257a3021b05e56241aa02c82796f9 | [] | no_license | nuritovbek/lrem | da336e43add7c3830059baa9e231e6c27f62dedf | 17de96031dc72e59768d22bf872bc2008e2ecaa4 | refs/heads/master | 2021-01-21T17:22:28.530320 | 2017-06-09T07:47:49 | 2017-06-09T07:47:49 | 91,946,037 | 0 | 3 | null | 2017-06-09T07:47:50 | 2017-05-21T09:27:07 | R | UTF-8 | R | false | true | 663 | rd | simulate_no_shock.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/simulation.R
\name{simulate_no_shock}
\alias{simulate_no_shock}
\title{Simulation for an LRE without exogenous shocks. Takes either the autonomous case or the AR case specified g and h.}
\usage{
simulate_no_shock(g, h, x0, t)
}
\arguments{
\item{g}{Decision rule}
\item{h}{Motion rule}
\item{x0}{vector of predetermined variables}
\item{t}{length of the simulation}
}
\value{
A matrix showing the dynamic paths of the variables in the specification
}
\description{
Simulation for an LRE without exogenous shocks. Takes either the autonomous case or the AR case specified g and h.
}
|
62f3defef86142c63280c143912e5f3c38468263 | 469dcd84bb0202c7ee98c3ae00d7c5ba38b7281a | /analyse.R | bbd74e26d616ec3e106d5b0e66d361acb8608476 | [] | no_license | lchski/university-acts | 329004790d31c40a835e791a1a39c74a87c62087 | 2fe4d61e0e47af598cce2acbae2c5cda8762f63c | refs/heads/master | 2021-09-25T21:57:24.167167 | 2021-09-15T00:37:39 | 2021-09-15T00:37:39 | 214,310,669 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,230 | r | analyse.R | source("load.R")
source("lib/similarity.R")
library(lubridate)
library(tidytext)
library(exploratory)
library(SnowballC)
data("stop_words")
stop_words <- stop_words
purposes_to_compare <- purposes %>%
mutate(id = paste(university, year, section, subsection, sep = "-"))
compared_purposes <- analyse_statement_similarity(
statements = purposes_to_compare,
similarity_threshold = 0.75
)
## Compare text of similar statements
compared_purposes$above_threshold %>%
left_join(purposes_to_compare %>%
select(id, text), by = c("id.x" = "id")
) %>%
rename(text.x = text) %>%
left_join(purposes_to_compare %>%
select(id, text), by = c("id.y" = "id")
) %>%
rename(text.y = text) %>%
View()
## Compare similar statements from the same institution
compared_purposes$above_threshold %>%
left_join(purposes_to_compare %>%
select(id, text, university), by = c("id.x" = "id")
) %>%
rename(text.x = text, university.x = university) %>%
left_join(purposes_to_compare %>%
select(id, text, university), by = c("id.y" = "id")
) %>%
rename(text.y = text, university.y = university) %>%
filter(university.x == university.y) %>%
View()
|
c5eb21d5975887db545f11950b281e038518bbb7 | b652c1b557b258508d07971eeb4b7c162b2d9548 | /man/ci.GM.Rd | 822049be242354d03a35a77cd3b22d064bf220ad | [] | no_license | RajeswaranV/vcdPlus | 655d0d37f387e56e36be8af323410732ae4cc34d | 0bb68115d95a96fd3cd1755733d944fe2ac5090a | refs/heads/master | 2021-01-01T19:54:28.569205 | 2017-08-01T15:20:53 | 2017-08-01T15:20:53 | 89,842,430 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,251 | rd | ci.GM.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/400.ConsolidatedEstimationMethods.R
\name{ci.GM}
\alias{ci.GM}
\title{The simultaneous confidence interval for multinomial proportions based on the method proposed in Goodman (1965)}
\usage{
ci.GM(inpmat, alpha)
}
\arguments{
\item{inpmat}{- The input matrix}
\item{alpha}{- Alpha value (significance level required)}
}
\value{
A list of dataframes
\item{GM.Volume}{ GM Volume}
\item{GM.UpLim}{ Dataframe of GM Upper Limits}
\item{GM.LowLim}{ Dataframe of GM Lower Limits}
\item{GM.Length}{ Dataframe of GM Lengths}
}
\description{
The simultaneous confidence interval for multinomial proportions based on the method proposed in Goodman (1965)
}
\examples{
x = c(56,72,73,59,62,87,68,99,98)
inpmat = cbind(x[1:3],x[4:6],x[7:9])
alpha=0.05
ci.GM(inpmat,alpha)
}
\references{
[1] Goodman, L.A. (1965).
On Simultaneous Confidence Intervals for Multinomial Proportions.
Technometrics 7: 247-254.
}
\seealso{
Other Confidence Interval for Multinomial Proportion: \code{\link{ci.BMDU}},
\code{\link{ci.FS}}, \code{\link{ci.QH}},
\code{\link{ci.SG}}, \code{\link{ci.WS}},
\code{\link{ci.WaldCC}}, \code{\link{ci.Wald}}
}
\author{
Subbiah and Balakrishna S Kesavan
}
|
0a182406d80ccc6d3bcf553bdc1753f2ad12ea6f | 9cb659691e96fdcf3b0485e880cdcae47c7271c4 | /R/aaa.R | dc1b4d87010f0a24f0ef319f04a552091a42c73f | [] | no_license | petr0vsk/MACtools | eecc5e99ff7762667a98e09c99a0355052b81ac6 | 4750d83608b977c64e47fd6753c641e9d604c575 | refs/heads/master | 2020-05-02T19:03:37.420472 | 2019-01-28T17:14:34 | 2019-01-28T17:14:34 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,536 | r | aaa.R | set_names <- function (object = nm, nm) {
names(object) <- nm
object
}
set_names(
c("0000", "0001", "0010", "0011", "0100", "0101", "0110", "0111", "1000",
"1001", "1010", "1011", "1100", "1101", "1110", "1111", "1010", "1011",
"1100", "1101", "1110", "1111"),
c('0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'A', 'B', 'C',
'D', 'E', 'F', 'a', 'b', 'c', 'd', 'e', 'f')
) -> hex_map
.pkgenv <- new.env(parent=emptyenv())
#' Rebuild in-memory search tries
#'
#' The search structures created by `triebeard` are full of external pointers
#' which point to the deep, dark void when bad things happen to R sessions.
#' When the package reloads everything _should_ return to normal, but you
#' can use this function to rebuild the in-memory search structures at any time.
#'
#' @md
#' @export
rebuild_search_tries <- function() {
data("mac_age_db", envir = .pkgenv, package = "MACtools")
data("mac_registry_data", envir = .pkgenv, package = "MACtools")
triebeard::trie(
.pkgenv$mac_age_db$to_match,
.pkgenv$mac_age_db$prefix
) -> age_trie
age_masks <- as.integer(unique(.pkgenv$mac_age_db$mask))
assign("age_trie", age_trie, envir = .pkgenv)
assign("age_masks", age_masks, envir = .pkgenv)
triebeard::trie(
.pkgenv$mac_registry_data$to_match,
.pkgenv$mac_registry_data$assignment
) -> reg_trie
reg_masks <- sort(unique(.pkgenv$mac_registry_data$mask), decreasing = TRUE)
assign("reg_trie", reg_trie, envir = .pkgenv)
assign("reg_masks", reg_masks, envir = .pkgenv)
}
|
1b838e589fc6197b696c2a1dd33d85a8ea8f1752 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/Peacock.test/examples/peacock3.Rd.R | d9557f0c7cb72c08eaee21ec4ac70b6217763c5a | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 408 | r | peacock3.Rd.R | library(Peacock.test)
### Name: peacock3
### Title: Three Dimensional Kolmogorov-Smirnov/Peacock Two-Sample test
### Aliases: peacock3
### Keywords: Three-dimensional Kolmogorov-Smirnov/Peacock Two-Sample test
### Three-dimensional Fasano-Franceschini Two-Sample test
### ** Examples
x <- matrix(rnorm(12, 0, 1), ncol=3)
y <- matrix(rnorm(18, 0, 1), ncol=3)
ks <- peacock3(x, y)
ks
|
94e6afb68aa99a3c1ed76b0ecaca73d623a676a1 | 5accc3630f3966f0790646fc33425f99ca07d4f3 | /code/generate_coef.R | 14bd8c50a6fb904a11de1f24e7d53c103ea782fc | [] | no_license | vnkn17/GenderInequality | e52754363e5d183a3b6822b2e3350bed636b2785 | 449aaed8565ba573ba275cfad84af9198de7dad0 | refs/heads/master | 2021-01-23T13:10:21.317895 | 2017-06-03T08:42:26 | 2017-06-03T08:42:26 | 93,234,209 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,392 | r | generate_coef.R | df = read.csv("Documents/Mine New/Harvard 2016/Stat 139/projectdata_1/data1.csv", header = T)
df = df[,2:11]
library(plyr)
df=rename(df, c("X"="name", "X.1"="Y","Sociopolieconomic.Factors"
= "gender_index","X.2"="primary","X.3"="govt",
"X.4"="gdp","X.5"="develop","Biological.Factors"=
"teen","X.6"="obesity","X.7"="abortion"))
#create primary enrollment indicator
df$primary_ind <- cut(df$primary, c(0,0.95, max(df$primary)), labels=c(0:1))
#transform variables
gender_index <- df$gender_index
primary_ind <- df$primary_ind
govt <- sqrt(df$govt)
gdp <- log(df$gdp)
teen <- log(df$teen)
obesity <- sqrt(df$obesity)
abortion <- df$abortion
y <- (df$Y)^2
#new dataframe with transformed variables
trans_df <- cbind(y, gender_index, primary_ind, govt, gdp, teen,
obesity, abortion)
#baseline main effect model
baseline <- lm(y ~ gender_index+primary_ind+govt+gdp+teen+obesity+abortion)
summary(baseline)
#stepwise selection
null = lm(y~1, data=as.data.frame(trans_df))
full = lm(y~.^2, data=as.data.frame(trans_df))
model1 = step(baseline, scope = list(lower = null, upper=full), direction="both")
#fit best model with data
fit1 = lm(y~primary_ind + govt + gdp + teen + obesity + abortion + primary_ind:abortion +
gdp:abortion + govt:obesity + govt:gdp + govt:teen)
#save coefficients
c20 = coef(summary(fit1))
|
59fa16a850b62cf40d8d92c290c4b9971ecca3a9 | 9a14746a73f911e671d3083b7a50e67609869011 | /man/normalize.matrix.Rd | d73f3a131c6c6a5ce2459ac9a6d46275116537d1 | [] | no_license | simecek/HPQTL | 441b3ae013158e9324cb12162f1c27026864afc2 | 0ce4902f910d97ba1d86d27007a5ece2c64f1853 | refs/heads/master | 2016-09-06T08:54:48.118812 | 2015-06-20T18:03:09 | 2015-06-20T18:03:09 | 16,997,988 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 875 | rd | normalize.matrix.Rd | % Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/normalize.matrix.r
\name{normalize.matrix}
\alias{normalize.matrix}
\title{Normalize Matrix}
\usage{
normalize.matrix(W, method = c("sample-variance", "diagonal",
"diagonal-average", "none"))
}
\arguments{
\item{W}{symmetric, positive semidefinite matrix}
\item{method}{"sample-variance", "diagonal" or "diagonal-average"}
}
\value{
Normalized matrix
}
\description{
Normalize genetic similarity matrix.
}
\details{
To calculate heritability, particularly for different genetic similarity estimators,
the genetic similarity matrix needs to be properly normalized. The default way is to force sample
variance of animal effect to be unit.
}
\examples{
W <- matrix(c(1,1/2,1/2,1),2,2)
normalize.matrix(W, method="sample-variance")
normalize.matrix(W, method="diagonal")
}
\keyword{manip}
|
af367dfe3c008919145fc138a3017eac3d0d8851 | 7217693dc00b148a48c6503f6fe4ec1d478f52e8 | /fine_mapping/filtered_hits/3_join_annotation.R | a30e35d827bc546d0bea23522ca4efc18ce69cd3 | [] | no_license | Eugene77777777/biomarkers | 8ac6250e1726c9233b43b393f42076b573a2e854 | 9e8dc2876f8e6785b509e0fce30f6e215421f45b | refs/heads/master | 2023-07-25T10:52:30.343209 | 2021-09-08T18:12:45 | 2021-09-08T18:12:45 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,940 | r | 3_join_annotation.R | suppressPackageStartupMessages(library(tidyverse))
suppressPackageStartupMessages(library(data.table))
hits <- fread('filtered.hits.99.tsv', colClasses = c("chromosome" = "character")) %>%
mutate(chromosome = str_replace(chromosome, '^0', ''))
anno <- fread('filtered.hits.99.anno.tsv', colClasses = c("#CHROM" = "character"))
# read the list of filtered regions (this file has the paths of the .z files from. FINEMAP)
regions_all <- fread(
cmd=paste(
'zcat',
'@@@@@@/users/ytanigaw/repos/rivas-lab/biomarkers/fine_mapping/filtration/filtered_regions.txt.gz'
)
) %>%
rename('CHROM' = '#CHROM') %>%
mutate(
zpath = file.path(
'@@@@@@/users/christian',
paste0('chr', CHROM),
TRAIT,
paste0('GLOBAL_', TRAIT, '_chr', CHROM, '_range', BEGIN, '-', END, '.z')
)
)
# read .z files and generate a mapping between variants and regions
map_df <-
regions_all %>% select(TRAIT) %>% unique() %>% pull() %>%
lapply(
function(trait){
regions_all %>% filter(TRAIT == trait) %>%
select(zpath) %>% pull() %>%
lapply(
function(zpath){
fread(
zpath,
colClasses = c("chromosome" = "character")
) %>%
mutate(
trait=trait,
region=str_replace_all(basename(zpath), 'GLOBAL_|.z', '')
)
}
) %>% bind_rows()
}
) %>% bind_rows()
# join the 3 data frames
# 1. map_df (variant to region)
# 2. hits
# 3. variant annotations
joined <- map_df %>% select(chromosome, position, trait, region) %>%
right_join(
hits, by=c('chromosome', 'position', 'trait')
) %>%
left_join(
anno %>%
rename(
'chromosome' = '#CHROM',
'position' = 'POS'
) %>%
select(-maf),
by=c('chromosome', 'position')
)
joined %>%
fwrite('filtered.hits.99.anno.joined.tsv', sep='\t')
|
cfa6c5005f423f7bafd20babbe3db62df1c0e8c8 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/NISTunits/examples/NISTukThUnITPerSqrFtSecTOwattPerSqrMeter.Rd.R | e716c1c3b537f08393dfa074225441bac531e44d | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 314 | r | NISTukThUnITPerSqrFtSecTOwattPerSqrMeter.Rd.R | library(NISTunits)
### Name: NISTukThUnITPerSqrFtSecTOwattPerSqrMeter
### Title: Convert British thermal unitIT per square foot second to watt
### per square meter
### Aliases: NISTukThUnITPerSqrFtSecTOwattPerSqrMeter
### Keywords: programming
### ** Examples
NISTukThUnITPerSqrFtSecTOwattPerSqrMeter(10)
|
3a5990cbb94143d30053a844fe6b5bee67ca3b13 | 7a6696bce6217cbb919bd53f2c23ce32154bad0a | /run_analysis.R | e68cfbf0fc3122bfc9992359206144381cf96870 | [] | no_license | magyro/Getting-Cleaning-Data-Project | a09fd3ac9acb18fb3817d409eb6e62ee0199be43 | 5bf813bea1bce17169775b35c855ea83f7c9271a | refs/heads/master | 2021-01-10T07:47:47.737192 | 2015-05-24T17:18:06 | 2015-05-24T17:18:06 | 36,179,639 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,277 | r | run_analysis.R | #
#Getting and Cleaning Data Course Project
#
#Task:
# Based on the data from
# https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip
# Create one R script called run_analysis.R that does the following.
# 1.Merges the training and the test sets to create one data set.
# 2.Extracts only the measurements on the mean and standard deviation for each measurement.
# 3.Uses descriptive activity names to name the activities in the data set
# 4.Appropriately labels the data set with descriptive variable names.
# 5.From the data set in step 4, creates a second, independent tidy data set
# with the average of each variable for each activity and each subject
rm(list=ls())
setwd("~/Coursera R/Readata/runAnalysis")
library(plyr)
library(dplyr)
#-------------------------------
#| Load global code tables |
#-------------------------------
#1. get descriptive definition of activities codes
activityLabels <-
read.table("UCI HAR Dataset/activity_labels.txt", header = FALSE, stringsAsFactors=FALSE)
#2. get descriptive definition of features codes
featureLabels<-
read.table("UCI HAR Dataset/features.txt", header = FALSE)
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
#|1. Merge the training and the test sets to create one data set |
#|(Requirement no.1) |
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
#---------------------------------------------------------------------------------
#|1.1 create a full data set for train data, including the subject, the |
#|activity and the data of the measurements. total 563 colunms |
#---------------------------------------------------------------------------------
#read train data
trainMeasurements <-
read.table("UCI HAR Dataset/train/X_train.txt", header = FALSE)
#read activities codes related to the train data
trainActivity<-read.table("UCI HAR Dataset/train/Y_train.txt", header = FALSE)
#read subject for train data
trainSubjects <- read.table("UCI HAR Dataset/train/subject_train.txt", header = FALSE)
# Build the complete train dataframe
trainMeasurements <-cbind(trainSubjects,trainActivity,trainMeasurements)
#--------------------------------------------------------------------------
#|1.2 create a full data set for test data, including the subject, the |
#|activity code and the data of the measurements. total 563 colunms |
#--------------------------------------------------------------------------
#read test data
testMeasurements<-
read.table("UCI HAR Dataset/test/X_test.txt", header = FALSE)
#read activities codes related to the test data
testActivity<-read.table("UCI HAR Dataset/test/Y_test.txt", header = FALSE)
#read subject for test data
testSubjects <- read.table("UCI HAR Dataset/test/subject_test.txt", header = FALSE)
# Build the complete test dataframe
testMeasurements <-cbind(testSubjects,testActivity,testMeasurements)
#----------------------------------------------------------------
#|1.3 Merge the two data frames to create one complete dataframe|
#----------------------------------------------------------------
allMeasurements <-rbind(trainMeasurements,testMeasurements)
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
#++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
#|4. Appropriately labels the data set with descriptive variable names|
#|(Requirement no. 4) |
#++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
#read features labels
featureLabels<-
read.table("UCI HAR Dataset/features.txt", header = FALSE)
#create labels for the two columns added to the dataframe (subject and activity)
cols1 <- data.frame( c(0L,0L), c("Subject","Activity"))
names(cols1)<-names(featureLabels)
#create a full labels dataframe
dataLabels <-rbind(cols1,featureLabels)
#colnames(allMeasurements)<-featureLabels[,2]
colnames(allMeasurements)<-dataLabels[,2]
#++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
#|2. Extracts only the measurements on the mean and standard deviation |
#|for each measurement. (Requirement no. 2) assuming the requirement |
#| does not include meanFreq or the angel() mean_variables |
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
#build the indices of the "mean()" and "std()" columns
meanIndices <- grepl("mean\\(\\)", names(allMeasurements),ignore.case=TRUE)
stdIndices <- grepl("std\\(\\)", names(allMeasurements),ignore.case=TRUE)
combindIndices <- meanIndices | stdIndices
# add the "subject" and "activity" columns
combindIndices[1] <- TRUE
combindIndices[2] <- TRUE
meanAndStdMeasurements <- allMeasurements[, combindIndices]
#++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
#++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
#|3.Uses descriptive activity names to name the activities in the data set (from2)|
#++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# create named activities for mean and std dataframe
namedActivities <-
activityLabels[match(meanAndStdMeasurements[,2], activityLabels[,1]), 2]
#replace activities codes by activity names
meanAndStdMeasurements[,2]<-namedActivities
#++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
#++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
#|5. From the data set in step 4 ,creates a second, |
#|independent tidy data set |
#| with the average of each variable for each activity and each subject |
#++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
#create average activities per subject data frame
averageActivities <-
ddply(meanAndStdMeasurements, .(Subject, Activity), colwise(mean))
# write the output file
write.table(averageActivities, file="averageActivities.txt", row.name=FALSE)
Enter file contents here
|
2f4f269d387232b43d13d686f68aa18d344fd0a7 | d36160653d0352aae2d32bb4fe92d9c60a9a759a | /R/R Data Analysis/Supervised.R | 6f746f7453b3fd8435cd77e9bb97e14cbf2b32f7 | [] | no_license | SamW18/DataExploration | bcfc5a4fcafc46c6c2600561c6ad967509818cf6 | ea7b697927cf30a7bd57c9e94e52ae9f43d31e8e | refs/heads/master | 2020-08-01T09:35:24.034631 | 2019-12-18T18:45:10 | 2019-12-18T18:45:10 | 210,952,581 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,816 | r | Supervised.R | library(ggplot2)
library(MASS)
tmp= MASS::Boston
View(tmp)
working_data= aggregate(tmp$medv, by = list(tmp$chas), FUN=mean)
working_data
ggplot(working_data, aes(x=as.factor(Charles), y=Median_Value)) + geom_bar(stat="identity") +geom_text(aes(label=paste("$",floor(Median_Value))))
split= .75
smp_size= floor(split *nrow(Boston))
set.seed(123)
train_ind= sample(seq_len(nrow(Boston)), size=smp_size)
train = Boston[train_ind,]
test= Boston[-train_ind, ]
attach(train)
sam.lm.fit= lm(medv~chas)
test$sam_test= data.frame(predict(sam.lm.fit,data.frame(chas~test$chas)))
plot(test$medv, test$sam_test)
### Classification Problem
data(BreastCancer, package= "mlbench")
#Only keep complete rows
bc= BreastCancer[complete.cases(BreastCancer),]
#Remove ID column
bc= bc[,-1]
#finding structure of the dataframe
str(bc)
#convert to numeric
for (i in 1:9)
{
bc[,i]= as.numeric(as.character(bc[,i]))
}
#Change y values to 1's and 0's
bc$Class= ifelse(bc$Class== "malignant", 1,0)
bc$Class= factor(bc$Class, levels=c(0,1))
library(caret)
#Define function- "not contained in"
'%ni%' = Negate('%in%')
options(scipen=999)
set.seed(100)
trainDataIndex=createDataPartition(bc$Class, p=.7, list= F)
trainDataIndex
trainData= bc[trainDataIndex,]
testData= bc[-trainDataIndex,]
table(trainData$Class)
#Down Sample
set.seed(100)
down_train= downSample(x=trainData[,colnames(trainData)[1:9]], y= trainData$Class)
View(down_train)
#Build Logistic Model
logitmod = glm(Class~ Cl.thickness+ Cell.size + Cell.shape, family= "binomial", data= down_train)
#Predict Function
pred= predict(logitmod, newdata= testData, type= "response")
pred
y_pred_num = ifelse(pred > .5, 1,0)
y_pred= factor(y_pred_num, levels=c(0,1))
y_act= testData$Class
mean(y_pred==y_act)
confusionMatrix(y_pred,y_act)
|
1ffd43e4fe891b40051e878d4ce5cefa73e306ef | 753e3ba2b9c0cf41ed6fc6fb1c6d583af7b017ed | /service/paws.opsworks/man/describe_stack_summary.Rd | b675529a7118a36a6404299c8e809e62ac2d8bd7 | [
"Apache-2.0"
] | permissive | CR-Mercado/paws | 9b3902370f752fe84d818c1cda9f4344d9e06a48 | cabc7c3ab02a7a75fe1ac91f6fa256ce13d14983 | refs/heads/master | 2020-04-24T06:52:44.839393 | 2019-02-17T18:18:20 | 2019-02-17T18:18:20 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,041 | rd | describe_stack_summary.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/paws.opsworks_operations.R
\name{describe_stack_summary}
\alias{describe_stack_summary}
\title{Describes the number of layers and apps in a specified stack, and the number of instances in each state, such as running_setup or online}
\usage{
describe_stack_summary(StackId)
}
\arguments{
\item{StackId}{[required] The stack ID.}
}
\description{
Describes the number of layers and apps in a specified stack, and the number of instances in each state, such as \code{running_setup} or \code{online}.
}
\details{
\strong{Required Permissions}: To use this action, an IAM user must have a Show, Deploy, or Manage permissions level for the stack, or an attached policy that explicitly grants permissions. For more information about user permissions, see \href{http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html}{Managing User Permissions}.
}
\section{Accepted Parameters}{
\preformatted{describe_stack_summary(
StackId = "string"
)
}
}
|
05ff743604d95b6bd64d7e60b8c63cdf4f8a1a8f | e92ef6cb991b50f911b128ffac7befea8bf5416a | /Plot1.R | 36a8de0d4d9cbebb0a634dd5147ffb50fae4df88 | [] | no_license | HarmLammers/ExData_Plotting1 | 8c4930509df89fb5bd27e58b4c19d615722d128c | 1250643519d3a6c9ecc7f67b764c3b397de51466 | refs/heads/master | 2021-01-12T13:08:16.913860 | 2016-03-27T10:32:13 | 2016-03-27T10:32:13 | 54,798,084 | 0 | 0 | null | 2016-03-26T20:25:49 | 2016-03-26T20:25:49 | null | UTF-8 | R | false | false | 1,259 | r | Plot1.R | setwd("C:/AA Bestanden/MOOC Data Science - Johns Hopkins")
## Reading datafile
FileName <- "household_power_consumption.txt"
Data <- read.csv(FileName, header = TRUE, sep=";", na.strings="?", dec=".", stringsAsFactors = FALSE, comment.char="", quote ='\"')
# summary(Data)
# tail(Data)
## Subsetting data - limit to first two days of february 2007
SubSetData <- Data[Data$Date %in% c("1/2/2007","2/2/2007") ,]
# dim(SubSetData)
rm(Data)
## Create Date as Date-object
SubSetData$Date <- as.Date(SubSetData$Date, format="%d/%m/%Y")
## Converting dates and corresponding times to DateTime
timestamp <- paste(SubSetData$Date, SubSetData$Time)
SubSetData$Datetime <- as.POSIXct(timestamp)
## Make Histogram
windows()
hist(SubSetData$Global_active_power, main = "Global Active Power", xlab = "Global Active Power (kilowatts)", col="Red")
# dev.off()
## Saving screenplot to file
# dev.copy(png, file="plot1.png", height=480, width=480)
# dev.off()
## there's some trouble in the layout of the copy; so better make png directly.
## Make Histogram
png("Plot1.png", width = 480, height=480)
hist(SubSetData$Global_active_power, main = "Global Active Power", xlab = "Global Active Power (kilowatts)", col="Red")
dev.off()
|
ad59f12b02ecf7238a90adcd834272ce9462e1f1 | c91b227c26552d207765aee509d896d1daa424a3 | /R/MC_util.R | de689939b5914a88134f14697967adf973b1cf46 | [] | no_license | cran/amei | c3827194ce5c6fdd2f3ef4b13be5cc7a2ee9964e | 6ec4dc1abc827534b1920db6d4911e38cd16224c | refs/heads/master | 2016-09-06T07:55:28.224132 | 2013-12-13T00:00:00 | 2013-12-13T00:00:00 | 17,694,356 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 8,214 | r | MC_util.R | SimulateEpidemic <- function(init,T,params,vacc,vaccstop,costs,starttime, ...)
{
if(!is.function(params)) {
soln = .C("RSimulateEpidemic",
S = as.integer(rep(init$S,T)),
I = as.integer(rep(init$I,T)),
R = as.integer(rep(init$R,T)),
D = as.integer(rep(init$D,T)),
V = as.integer(rep(0,T)),
C = as.double(rep(0,T)),
T = as.integer(T),
b = as.double(params$b),
k = as.double(params$k),
nu = as.double(params$nu),
mu = as.double(params$mu),
vacc = as.double(vacc),
vaccstop = as.integer(vaccstop),
cvacc = as.double(costs$vac),
cdeath = as.double(costs$death),
cinfected = as.double(costs$infect),
starttime = as.integer(starttime),
PACKAGE = "amei")
} else {
## params is an epistep
epistep <- params
## initialize the soln data frame
soln <- init.soln(init, T)
## initialize the prime data frame
prime <- data.frame(matrix(0, ncol=2, nrow=T))
names(prime) <- c("S", "I")
## get the initial last setting in epistep
last <- formals(epistep)$last
for( i in 2:T ){
## deal with starttime
if(i <= starttime) { VAC <- 0; STOP <- 0 }
else { VAC <- vacc; STOP <- vaccstop }
## treat params like an epistep function
out <- epimanage(soln=soln, epistep=epistep, i=i, VAC=VAC, STOP=STOP,
last=last, ...)
last <- out$last
out <- out$out
## update (prime) totals
prime[i,] <- epi.update.prime(soln[i-1,], out)
## update (soln) totals
soln[i,] <- epi.update.soln(soln[i-1,], out, costs)
}
}
list(S = soln$S, I=soln$I, R=soln$R, D=soln$D, V=soln$V, C=soln$C)
}
## ok, if midepidemic is true, then we're just going to accept all epidemics
## if midepidemic is false, then we're going to assume we're at the beginning
## of an epidemic, and that starttime is something >0. in this case, we'll
## throw away any epidemics for which the number of infecteds hasn't grown
## by startime.
VarStopTimePolicy <- function(S0,I0,T,b,k,nu,mu,cvacc,cdeath,cinfected,
MCvits,Vprobs,Vstops,midepidemic,starttime)
{
cout = .C("RVarStopTimePolicy",
S0 = as.integer(S0),
I0 = as.integer(I0),
T = as.integer(T),
b = as.double(b),
k = as.double(k),
nu = as.double(nu),
mu = as.double(mu),
cvacc = as.double(cvacc),
cdeath = as.double(cdeath),
cinfected = as.double(cinfected),
MCvits = as.integer(MCvits),
Vprobs = as.double(Vprobs),
nVprobs = as.integer(length(Vprobs)),
Vstops = as.integer(Vstops),
nVstops = as.integer(length(Vstops)),
EC = double(length(Vprobs)*length(Vstops)),
midepidemic = as.integer(midepidemic),
starttime = as.integer(starttime),
PACKAGE = "amei")
C <- matrix(cout$EC,length(Vprobs),length(Vstops),byrow=TRUE)
return(C)
}
SimulateManagementQuantiles <- function(epistep,Time,init, pinit, hyper, vac0,
costs, start, MCvits, MCMCpits, bkrate, vacsamps,
vacgrid, nreps,lowerq, upperq, verb=FALSE, ...)
{
Sall <- matrix(0,nrow=Time,ncol=nreps)
Iall <- matrix(0,nrow=Time,ncol=nreps)
Rall <- matrix(0,nrow=Time,ncol=nreps)
Dall <- matrix(0,nrow=Time,ncol=nreps)
Vall <- matrix(0,nrow=Time,ncol=nreps)
Call <- matrix(0,Time,nreps)
PoliciesAll <- array(0,c(Time,2,nreps))
for(n in 1:nreps)
{
if(verb) cat("*** Simulating epidemic",n,"***\n")
foo <- manage(epistep=epistep,pinit=pinit,T=Time,Tstop=Time,init=init,
hyper=hyper, vac0, costs=costs, MCMCpits=MCMCpits, bkrate=bkrate,
vacsamps=vacsamps, vacgrid=vacgrid, start=start, ...)
Sall[,n] <- foo$soln$S
Iall[,n] <- foo$soln$I
Rall[,n] <- foo$soln$R
Dall[,n] <- foo$soln$D
Vall[,n] <- foo$soln$V
Call[,n] <- foo$soln$C
PoliciesAll[,,n] <- as.matrix(foo$pols)
}
SQ1 <- apply(Sall,1,quantile,prob=lowerq)
Smean <- apply(Sall,1,mean)
Smed <- apply(Sall,1,median)
SQ3 <- apply(Sall,1,quantile,prob=upperq)
IQ1 <- apply(Iall,1,quantile,prob=lowerq)
Imean <- apply(Iall,1,mean)
Imed <- apply(Iall,1,median)
IQ3 <- apply(Iall,1,quantile,prob=upperq)
RQ1 <- apply(Rall,1,quantile,prob=lowerq)
Rmean <- apply(Rall,1,mean)
Rmed <- apply(Rall,1,median)
RQ3 <- apply(Rall,1,quantile,prob=upperq)
DQ1 <- apply(Dall,1,quantile,prob=lowerq)
Dmean <- apply(Dall,1,mean)
Dmed <- apply(Dall,1,median)
DQ3 <- apply(Dall,1,quantile,prob=upperq)
VQ1 <- apply(Vall,1,quantile,prob=lowerq)
Vmean <- apply(Vall,1,mean)
Vmed <- apply(Vall,1,median)
VQ3 <- apply(Vall,1,quantile,prob=upperq)
CQ1 <- apply(Call,1,quantile,prob=lowerq)
Cmean <- apply(Call,1,mean)
Cmed <- apply(Call,1,median)
CQ3 <- apply(Call,1,quantile,prob=upperq)
PolQ1 <- apply(PoliciesAll,c(1,2),quantile,prob=lowerq)
Polmean <- apply(PoliciesAll,c(1,2),mean)
Polmed <- apply(PoliciesAll,c(1,2),median)
PolQ3 <- apply(PoliciesAll,c(1,2),quantile,prob=upperq)
list(Q1 = data.frame(S=SQ1,I=IQ1,R=RQ1,D=DQ1,V=VQ1,C=CQ1,frac=PolQ1[,1],stop=PolQ1[,2]),
Mean = data.frame(S=Smean,I=Imean,R=Rmean,D=Dmean,V=Vmean,C=Cmean,frac=Polmean[,1],stop=Polmean[,2]),
Median = data.frame(S=Smed,I=Imed,R=Rmed,D=Dmed,V=Vmed,C=Cmed,frac=Polmed[,1],stop=Polmed[,2]),
Q3 = data.frame(S=SQ3,I=IQ3,R=RQ3,D=DQ3,V=VQ3,C=CQ3,frac=PolQ3[,1],stop=PolQ3[,2]))
}
SimulateEpidemicQuantiles <- function(init,T,params,vacc,vaccstop,costs,
nreps,lowerq,upperq,midepidemic,starttime)
{
Sall <- matrix(0,nrow=T,ncol=nreps)
Iall <- matrix(0,nrow=T,ncol=nreps)
Rall <- matrix(0,nrow=T,ncol=nreps)
Dall <- matrix(0,nrow=T,ncol=nreps)
Vall <- matrix(0,nrow=T,ncol=nreps)
Call <- matrix(0,T,nreps)
for(n in 1:nreps)
{
isvalid <- TRUE;isvalidcount<-0
while(isvalid)
{
tmpsim <- SimulateEpidemic(init,T,params,vacc,vaccstop,costs,starttime)
if(!midepidemic)
{
isvalidcount <- isvalidcount+1
if(tmpsim$I[starttime-1]>init$I)
isvalid <- FALSE
if(isvalidcount==100)
{
cat("Warning: <1% chance of an epidemic\n")
isvalid<-FALSE
}
}
}
Sall[,n] <- tmpsim$S
Iall[,n] <- tmpsim$I
Rall[,n] <- tmpsim$R
Dall[,n] <- tmpsim$D
Vall[,n] <- tmpsim$V
Call[,n] <- tmpsim$C
}
SQ1 <- apply(Sall,1,quantile,prob=lowerq)
Smean <- apply(Sall,1,mean)
Smed <- apply(Sall,1,median)
SQ3 <- apply(Sall,1,quantile,prob=upperq)
IQ1 <- apply(Iall,1,quantile,prob=lowerq)
Imean <- apply(Iall,1,mean)
Imed <- apply(Iall,1,median)
IQ3 <- apply(Iall,1,quantile,prob=upperq)
RQ1 <- apply(Rall,1,quantile,prob=lowerq)
Rmean <- apply(Rall,1,mean)
Rmed <- apply(Rall,1,median)
RQ3 <- apply(Rall,1,quantile,prob=upperq)
DQ1 <- apply(Dall,1,quantile,prob=lowerq)
Dmean <- apply(Dall,1,mean)
Dmed <- apply(Dall,1,median)
DQ3 <- apply(Dall,1,quantile,prob=upperq)
VQ1 <- apply(Vall,1,quantile,prob=lowerq)
Vmean <- apply(Vall,1,mean)
Vmed <- apply(Vall,1,median)
VQ3 <- apply(Vall,1,quantile,prob=upperq)
CQ1 <- apply(Call,1,quantile,prob=lowerq)
Cmean <- apply(Call,1,mean)
Cmed <- apply(Call,1,median)
CQ3 <- apply(Call,1,quantile,prob=upperq)
list(Q1 = data.frame(S=SQ1,I=IQ1,R=RQ1,D=DQ1,V=VQ1,C=CQ1),
Mean = data.frame(S=Smean,I=Imean,R=Rmean,D=Dmean,V=Vmean,C=Cmean),
Median = data.frame(S=Smed,I=Imed,R=Rmed,D=Dmed,V=Vmed,C=Cmed),
Q3 = data.frame(S=SQ3,I=IQ3,R=RQ3,D=DQ3,V=VQ3,C=CQ3))
}
|
16e81920a19ac022e9fb7cd7c083dd090efab168 | 679e870b7678f4f3f54246125b89a9d7ae3cf0d5 | /R/AWRstudioAddins.R | 22bda163cb0e1c5e5be9ab0bea418744af1cab2d | [] | no_license | afwillia/AWRstudioAddins | c6c5308219920ad9968f5d80fde09934497323a9 | b0a4eaaf4c14b49763407c3a887465c10caa5ec2 | refs/heads/master | 2021-01-10T14:55:41.127103 | 2016-02-10T18:50:36 | 2016-02-10T18:50:36 | 51,463,522 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 633 | r | AWRstudioAddins.R | #' A Function for Generating Script Header Labels
#'
#' This function adds a header with title, date, and summary to cursor location
#' @param NULL no arguments
#' @keywords Header
#' @export
#' @examples
#' insertScriptHeader
insertScriptHeader <- function() {
rstudioapi::insertText("# Filename:\n# Date:\n# Summary: ")
}
#' A Function for Generating R Markdown Header Labels
#'
#' This function adds a basic Rmd header to cursor location
#' @param NULL no arguments
#' @keywords Header
#' @export
#' @examples
#' insertRmdHeader
insertRmdHeader <- function() {
rstudioapi::insertText("---\ntitle:\ndate:\noutput:\n---")
}
|
b0c5f92d51e098c212bcc0dd5b67ef77723427db | 4dc0121f00fed79e6ef350f65639d90aaa9ce324 | /man/diff_graph.Rd | d1f765ab74a1631abf09dd74753e2786481dca36 | [] | no_license | hd2326/pageRank | c1abb0b86fa57c4963dc8b1d0cae98a85196a433 | fb96e50e8add16df5223a7f46eb252016dc4538e | refs/heads/master | 2023-01-31T22:07:55.614090 | 2020-12-16T01:36:18 | 2020-12-16T01:36:18 | 292,374,830 | 0 | 2 | null | null | null | null | UTF-8 | R | false | true | 1,159 | rd | diff_graph.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/diff_graph.R
\name{diff_graph}
\alias{diff_graph}
\title{Calculate Temporal PageRank from Two Graphs}
\usage{
diff_graph(graph1, graph2)
}
\arguments{
\item{graph1}{(igraph) The 1st graph.}
\item{graph2}{(igraph) The 2nd graph.}
}
\value{
(igraph) Network graph1-graph2 with "moi (mode of interaction)" and
"pagerank" as edge and vertex attributes.
}
\description{
Calculate temporal PageRank by changing edges between graph1 and graph2.
This is a simplified version of temporal PageRank described by Rozenshtein
and Gionis, by only analyzing temporally adjacent graph pairs.
}
\examples{
library(igraph)
set.seed(1)
graph1 <- igraph::erdos.renyi.game(100, 0.01, directed = TRUE)
igraph::V(graph1)$name <- 1:100
set.seed(2)
graph2 <- igraph::erdos.renyi.game(100, 0.01, directed = TRUE)
igraph::V(graph2)$name <- 1:100
diff_graph(graph1, graph2)
}
\references{
Rozenshtein, Polina, and Aristides Gionis. "Temporal pagerank."
Joint European Conference on Machine Learning and Knowledge Discovery in
Databases. Springer, Cham, 2016.
}
\author{
DING, HONGXU (hd2326@columbia.edu)
}
|
5f6fed88e80852768a9e41324b53eb6f4ce2fb7f | 74f5847952b0e6ccc81178407944a9adb2a186c2 | /0_Scripts/9h_AnalyzeAlphaDiversityCofactors.r | 2f6170e9016925725965449e277dc90936bdf647 | [] | no_license | wallacelab/paper-maize-phyllosphere-2014 | b439e0cfbf62e86a634c094196a95e6985ff799c | a0212ca728e2088f6255eb1c051f84e7fdf13d0b | refs/heads/master | 2021-09-20T05:19:06.076797 | 2018-08-03T17:27:26 | 2018-08-03T17:27:26 | 114,800,257 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,158 | r | 9h_AnalyzeAlphaDiversityCofactors.r | #! /usr/bin/env Rscript
library(argparse)
library(car) # For Type II ANOVA
library(colorspace) # For plot of variance explained
options(stringsAsFactors=F)
parser=ArgumentParser()
parser$add_argument("-i", "--infile", help="Input table of alpha diversity stats")
parser$add_argument("-o", "--outprefix", help="Output file prefix")
parser$add_argument("-m", "--mapfile", help="QIIME mapping file")
parser$add_argument("-f", "--factors", nargs="*", default="Description", help="Which factors to test for differences")
parser$add_argument("--hidden-factors", default=FALSE, action="store_true", help="Flag to perform hidden factor analysis on the chosen cofactors (= fit their principal components)")
args=parser$parse_args()
# setwd('/home/jgwall/Projects/282MaizeLeaf16s_cleaned/9_PrettyGraphics/9f_AlphaDiversity/')
# args=parser$parse_args(c("-i","9f_alpha_diversity_normalized_data.txt","-o",'99_tmp','-m', '../../2_QiimeOtus/2f_otu_table.sample_filtered.no_mitochondria_chloroplast.key.tsv', '-f', 'subpop', 'date', '--hidden-factors'))
# Load data
cat("Analyzing alpha diversity for",args$infile,"\n")
alpha=read.delim(args$infile, row.names=1)
key=read.delim(args$mapfile, row.names=1)
# Match up to key
keymatch = match(rownames(alpha), rownames(key))
key = key[keymatch,]
data = data.frame(alpha, key)
# Helper function to run analysis and write output
get_significance=function(data, cofactors, outprefix){
# Go through and test factors for differences
cat("\tCalculating Type II SS significance for",cofactors,"\n")
cofactors = paste(cofactors, collapse=" + ")
anovas = lapply(names(alpha), function(metric){
myformula = paste(metric,"~",cofactors)
mymodel = lm(myformula, data=data)
myanova = Anova(mymodel, type=2)
})
# Extract p-values and turn into useful data frame
pvals = lapply(anovas, function(x){
x = as.data.frame(x)
x = subset(x, select=names(x) == 'Pr(>F)')
})
pvals = do.call(cbind, pvals)
ss = lapply(anovas, function(x){
x = as.data.frame(x)
x = subset(x, select=names(x) == 'Sum Sq')
x = x / sum(x)
})
ss = do.call(cbind, ss)
# Format for output
names(pvals) = names(alpha)
pvals=as.data.frame(t(pvals))
pvals$Residuals=NULL
outfile = paste(outprefix, ".pvals.txt", sep="")
write.table(pvals, file=outfile, sep='\t', row.names=T, col.names=T, quote=F)
# Get fraction total variance explained
fractional = 1- ss["Residuals",]
names(fractional) = names(alpha)
# Plot for output for easy check
outpng = paste(outprefix, ".pvals.png", sep="")
ss=as.data.frame(t(ss))
ss$Residuals=NULL
png(outpng, width=500, height=1000)
par(mfrow=c(2,1))
boxplot(-log10(pvals), xlab="cofactor", ylab="-log10 pvalues")
boxplot(ss, xlab="cofactor", ylab="Faction total variance explained")
dev.off()
return(fractional)
}
# Get directly on selected factors, and save the proportion total variance explained
cofactor_fractional = get_significance(data=data, cofactors=args$factors, outprefix=args$outprefix)
rownames(cofactor_fractional)[1] = "raw_cofactors"
# Perform hidden factor analysis if requested
fractionals = list() # List to save proportion variance explained
if(args$hidden_factors){
cat("Peforming hidden factor analysis (=principal components of the cofactors)\n")
cofactor_set=subset(data, select=names(data)%in%args$factors)
cofactor_set = as.data.frame(lapply(cofactor_set, as.factor))
cofactor_formula = formula(paste("dummy", paste(args$factors, collapse=" + "), sep=" ~ "))
dummy=rep(0, nrow(cofactor_set))
cofactor_matrix=model.matrix(cofactor_formula, data=cofactor_set)[,-1] # Remove 1st column because is the intercept
for(n in 1:ncol(cofactor_matrix)){
hiddens = tryCatch({factanal(cofactor_matrix[,-1], factors=n, scores="regression")$scores}, error=function(x){return(NA)})
if(class(hiddens)=="logical" && is.na(hiddens)){ # Check if failed
cat("\tUnable to do hidden factor analysis with",n,"hidden factors given specified covariates\n")
next
}
hiddens=data.frame(hiddens)
names(hiddens) = paste("HF",n,names(hiddens), sep="")
myfactors = names(hiddens)
newdata=data.frame(data, hiddens)
fractionals[[n]] = get_significance(data=newdata, cofactors=myfactors, outprefix=paste(args$outprefix, ".hf",n, sep=""))
rownames(fractionals[[n]]) = paste("hidden_factors",n, sep="")
}
}
# Collate proportion variance explained
variance_explained = do.call(rbind, fractionals)
if(is.null(variance_explained)){ variance_explained = cofactor_fractional # Conditional to handle if hidden factor analysis was skipped
} else {variance_explained = rbind(cofactor_fractional, variance_explained) }
# Plot proportion variance explained
for( i in 1:2){
if(i==1){png(paste(args$outprefix, ".variance_explained.png", sep=""), width=16, height=6, res=150, units='in')
}else{svg(paste(args$outprefix, ".variance_explained.svg", sep=""), width=16, height=6)}
par(mar=c(12,4,4,1), las=2, font.lab=2, font.axis=2)
colors = rainbow_hcl(nrow(variance_explained))
barplot(as.matrix(variance_explained), ylab="% variance explained", beside=T, legend=T, col=colors, args.legend=list(x='topleft', cex=0.6))
dev.off()
} |
a738f9e8c6cbe01cc6ffae8ab09ee7fdc9d9c641 | 5b5f5457db245c5bf7d3bc0c77c3c7dce6dccb6e | /Try.R | 72c68c82488aba8cdc367f87e9573a4de49b8c47 | [] | no_license | eibm00/Payment_Delay | 2fd2413346fe683d22a39ac9aae38fc9aafe5e96 | 1b28a7bb04920ed50e7ccfc44de119e1075d3582 | refs/heads/main | 2023-01-19T23:17:06.612603 | 2020-11-21T16:21:35 | 2020-11-21T16:21:35 | 314,752,578 | 0 | 0 | null | 2020-11-21T06:55:12 | 2020-11-21T06:55:11 | null | UTF-8 | R | false | false | 23,590 | r | Try.R |
### Dependencies ---------------------------------------------------------------
rm(list = ls()) # clear
if (!require(tidyverse)) install.packages("tidyverse")
if (!require(naniar)) install.packages("naniar")
if (!require(styler)) install.packages("styler")
if (!require(GGally)) install.packages("GGally")
if (!require(skimr)) install.packages("skimr")
library(tidyverse)
library(naniar)
library(styler)
library(GGally)
library(skimr)
### Load the initial data ------------------------------------------------------
# Put data files outside of the git folder in order to avoid pushing too large
# files to repository
# path_to_data <- 'D:/..../payment_dates_final.csv'
path_to_data <- "D:/01 Skola VSE Statistika/DataX/zaverecny projekt/payment_dates_final.csv"
#path_to_data <- "..\\payment_dates_final.csv"
data_collection <- read.csv(path_to_data)
### Data understanding ---------------------------------------------------------
# Data description -------------------------------------------------------------
# Data volume (number of rows and columns)
nrow <- nrow(data_collection)
ncol <- ncol(data_collection)
# Convert columns to the correct data type
data_collection <- data_collection %>%
mutate(due_date = as.Date(due_date, format = "%Y-%m-%d"))
data_collection <- data_collection %>%
mutate(payment_date = as.Date(payment_date, format = "%Y-%m-%d"))
data_collection <- data_collection %>%
mutate(product_type = as.factor(product_type))
data_collection <- data_collection %>%
mutate(contract_status = as.factor(contract_status))
data_collection <- data_collection %>%
mutate(business_discount = as.factor(business_discount))
data_collection <- data_collection %>%
mutate(gender = as.factor(gender))
data_collection <- data_collection %>%
mutate(marital_status = as.factor(marital_status))
data_collection <- data_collection %>%
mutate(clients_phone = as.factor(clients_phone))
data_collection <- data_collection %>%
mutate(client_mobile = as.factor(client_mobile))
data_collection <- data_collection %>%
mutate(client_email = as.factor(client_email))
data_collection <- data_collection %>%
mutate(total_earnings = factor(total_earnings, labels = c(
"level1", "level2", "level3", "level4",
"level5", "level6", "level7", "level8",
"level9", "level10", "not_declared"
)))
data_collection <- data_collection %>%
mutate(living_area = as.factor(living_area))
data_collection <- data_collection %>%
mutate(different_contact_area = as.factor(different_contact_area))
data_collection <- data_collection %>%
mutate(kc_flag = as.factor(kc_flag))
# Problem! cf_val appears to not be a factor, despite the description file !!!
data_collection <- data_collection %>%
mutate(cf_val = as.numeric(cf_val))
data_collection <- data_collection %>%
mutate(kzmz_flag = as.factor(kzmz_flag))
data_collection <- data_collection %>%
mutate(due_amount = as.numeric(due_amount))
data_collection <- data_collection %>%
mutate(payed_amount = as.numeric(payed_ammount))
# Remove feature "payed_ammount" which was replaced by feature "payed_amount"
data_collection <- subset(data_collection, select = -payed_ammount)
# Create a feature for delay in days
data_collection$delay <- difftime(data_collection$payment_date,
data_collection$due_date, tz,
units = "days"
)
data_collection <- data_collection %>%
mutate(delay = as.numeric(delay))
# Display the internal structure of the data
str(data_collection)
# Analyze correlations among numeric features
corr_pairs_name <- matrix(nrow = choose(ncol(data_collection), 2), ncol = 3)
numeric_rel <- matrix(nrow = choose(ncol(data_collection), 2), ncol = 2)
corr_vector <- vector("integer")
iteration <- 0
for (i in c(9:(ncol(data_collection) - 1))) {
if (is.numeric(data_collection[, i])) {
for (j in c((i + 1):ncol(data_collection))) {
iteration <- iteration + 1
if (is.numeric(data_collection[, j])) {
correlation <- cor.test(
x = data_collection[, i],
y = data_collection[, j]
)
if (correlation$p.value <= 0.05) {
numeric_rel[iteration, ] <- c(i, j)
corr_pairs_name[iteration, 1] <- names(data_collection)[i]
corr_pairs_name[iteration, 2] <- names(data_collection)[j]
corr_pairs_name[iteration, 3] <- round(correlation$estimate, digits = 4)
corr_vector <- c(corr_vector, i, j)
}
}
}
}
}
# Save the pairs of numeric features that are correlated into a data frame
corr_pairs_name <- as.data.frame(corr_pairs_name)
corr_pairs_name <- corr_pairs_name %>%
filter_all(any_vars(!is.na(.)))
numeric_rel <- as.data.frame(numeric_rel)
numeric_rel <- numeric_rel %>%
filter_all(any_vars(!is.na(.)))
# Create correlation plots and export them into PNG files
corr_vector <- unique(corr_vector)
par(mfrow = c(length(corr_vector), length(corr_vector)))
for (i in 1:nrow(numeric_rel)) {
x <- data_collection[, numeric_rel[i, 1]]
y <- data_collection[, numeric_rel[i, 2]]
g <- ggplot(data_collection, aes(x, y)) +
geom_point(size = 1) +
xlab(names(data_collection)[numeric_rel[i, 1]]) +
ylab(names(data_collection)[numeric_rel[i, 2]])
ggsave(filename = paste0("correlation_", i, ".png"), g, width = 14, height = 10, units = "cm")
}
# Examine relationship between categorical features using chi-squared test with
# the significance level 0.05
# Overestimating the matrix size saves time compared to building the matrix one
# row at a time:
categorical_rel <- matrix(nrow = choose(ncol(data_collection), 2), ncol = 2)
cont_vector <- vector("integer")
iteration <- 0
for (i in c(1:(ncol(data_collection) - 1))) {
if (is.factor(data_collection[, i])) {
for (j in c((i + 1):ncol(data_collection))) {
iteration <- iteration + 1
if (is.factor(data_collection[, j])) {
contingency_table <- table(data_collection[, i], data_collection[, j])
chisq <- (chisq.test(contingency_table, correct = FALSE))
if (chisq$p.value <= 0.05) {
categorical_rel[iteration, ] <- c(i, j)
cont_vector <- c(cont_vector, i, j)
}
}
}
}
}
# Save the pairs of categorical features that are correlated into a data frame
categorical_rel <- as.data.frame(categorical_rel)
categorical_rel <- categorical_rel %>%
filter_all(any_vars(!is.na(.)))
cont_vector <- unique(cont_vector)
# Suggestions
data_collection %>%
group_by(product_type) %>%
summarize(
n_records = n(), n_contracts = n_distinct(contract_id),
mean_payed_amount = mean(payed_amount), min_birth_year = min(birth_year)
)
ggplot(data_collection, aes(x = product_type, y = payed_amount)) +
geom_boxplot() +
theme_minimal() +
labs(
title = "Boxplot of paid amount by product type",
x = "Product type", y = "Paid Amount"
)
ggplot(data_collection, aes(x = payed_amount)) +
geom_histogram(fill = "limegreen", alpha = 0.5) +
theme_minimal() +
labs(
title = "Histogram of paid amount",
x = "Paid amount", y = "Count"
)
# Data exploration--------------------------------------------------------------
# Analyze properties of interesting attributes in detail include graphs and
# plots
# Summary statistics of the data
# Check attribute value ranges, coverage, NAs occurence
summary <- summary(data_collection)
print(summary)
detailed_statistics <- skim(data_collection)
print(detailed_statistics)
# Verify data quality ----------------------------------------------------------
# Are there missing values in the data? If so, how are they represented, where
# do they occur, and how common are they?
variables_miss <- miss_var_summary(data_collection)
print(variables_miss)
# different_contract_area missing 20%, cf_val living_area kc_flag missing 19,9%
# 1173 payment date missing, not yet paid
gg_miss_var(data_collection)
# more characteristics missing at the same time
data_collection %>%
gg_miss_var(facet = total_earnings)
# what with NAs in payment order
# to do payment order id!!
sum(is.na(data_collection$different_contact_area) == T &
is.na(data_collection$cf_val) == T &
is.na(data_collection$living_area) == T &
is.na(data_collection$kc_flag) == T)
sum(is.na(data_collection$kc_flag) == T)
# Zaver: ze ctyr promennych s nejvice NA zaznamy je vetsina NA pozorovani
# na stejnych radcich, to muze indikovat "missing not at random".
# Check for plausibility of values
# Check for plausibility of values
for (i in c(1:ncol)) {
if (is.factor(data_collection[, i])) {
print(colnames(data_collection[i]))
print(prop.table(table(data_collection[, i])))
cat(sep = "\n\n")
}
}
# Check interesting coverage
# most products are type 1
ggplot(data = data_collection, aes(x = product_type)) +
geom_bar() +
theme(axis.text.x = element_text(angle = 0, hjust = 1))
table(data_collection$product_type)
# most payment orders have discount
ggplot(data = data_collection, aes(x = business_discount)) +
geom_bar() +
theme(axis.text.x = element_text(angle = 0, hjust = 1))
table(data_collection$business_discount)
# contract status mostly 1, then 5,6,8,7 some 2,3,4...What does it mean??
ggplot(data = data_collection, aes(x = contract_status)) +
geom_bar() +
theme(axis.text.x = element_text(angle = 0, hjust = 1))
table(data_collection$contract_status)
# marital status mostly 3, some 4,2,6 5 and 1 mostly not...What does it mean?
ggplot(data = data_collection, aes(x = marital_status)) +
geom_bar() +
theme(axis.text.x = element_text(angle = 0, hjust = 1))
table(data_collection$marital_status)
# mostly 0 children, drops with number
prop.table(table(data_collection$number_of_children))
# mostly 1 other product, drops with number
prop.table(table(data_collection$number_other_product))
# almost no email contact
ggplot(data = data_collection, aes(x = client_email)) +
geom_bar() +
theme(axis.text.x = element_text(angle = 0, hjust = 1))
table(data_collection$client_email)
# total earning level mostly not declared
ggplot(data = data_collection, aes(x = total_earnings)) +
geom_bar() +
theme(axis.text.x = element_text(angle = 0, hjust = 1))
table(data_collection$total_earnings)
# mostly not different contact area
ggplot(data = data_collection, aes(x = different_contact_area)) +
geom_bar() +
theme(axis.text.x = element_text(angle = 0, hjust = 1))
table(data_collection$different_contact_area)
# mostly false KC flag - mostly owns local citizenship
ggplot(data = data_collection, aes(x = kc_flag)) +
geom_bar() +
theme(axis.text.x = element_text(angle = 0, hjust = 1))
table(data_collection$kc_flag)
# mostly false KZMZ flag mostly did not fill employer
ggplot(data = data_collection, aes(x = kzmz_flag)) +
geom_bar() +
theme(axis.text.x = element_text(angle = 0, hjust = 1))
table(data_collection$kzmz_flag)
####### Generate test and train data ########
# fix random generator
set.seed(2020)
n_train <- round(0.08 * nrow)
index_train <- sample(1:nrow, n_train)
DTrain <- data_collection[index_train, ]
DTest <- data_collection[-index_train, ]
# Summary to find if data have NAs
summary(DTrain)
summary(DTest)
# Detailed summary of data
install.packages("skimr")
library(skimr)
Dtrainskim <- skim(DTrain)
Dtestskrim <- skim(DTest)
## See all NAs for all dataset
skim(DTrain)
skim(DTest)
## Create a new data set without NAs
DTrain_new <- na.omit(DTrain)
DTest_new <- na.omit(DTest)
# Number of column and row and summary in data train w-o NAs
dim(DTrain_new) # number of columns and rows for clean data Train
summary(DTrain_new)
# Number of column and row and summary in data test w-o NAs
dim(DTest_new) # number of columns and rows for clean data Test
summary(DTest_new)
# delete NAs in whole data source
data_collection <- na.omit(data_collection)
## Summary for each attribute
headofTable <- c(
"Num. of Children", "Num. Other Product", "Year of Birth",
"Due amount", "payed amount", "delay"
)
EX <- c(
mean(data_collection$number_of_children),
mean(data_collection$number_other_product), mean(data_collection$birth_year),
mean(data_collection$due_amount), mean(data_collection$payed_amount),
mean(data_collection$delay)
)
VarX <- c(
var(data_collection$number_of_children),
var(data_collection$number_other_product), var(data_collection$birth_year),
var(data_collection$due_amount), var(data_collection$payed_amount),
var(data_collection$delay)
)
Median <- c(
median(data_collection$number_of_children),
median(data_collection$number_other_product),
median(data_collection$birth_year), median(data_collection$due_amount),
median(data_collection$payed_amount), median(data_collection$delay)
)
Q1 <- c(
quantile(data_collection$number_of_children, probs = 1 / 4),
quantile(data_collection$number_other_product, probs = 1 / 4),
quantile(data_collection$birth_year, probs = 1 / 4),
quantile(data_collection$due_amount, probs = 1 / 4),
quantile(data_collection$payed_amount, probs = 1 / 4),
quantile(data_collection$delay, probs = 1 / 4)
)
Q3 <- c(
quantile(data_collection$number_of_children, probs = c(3 / 4)),
quantile(data_collection$number_other_product, probs = c(3 / 4)),
quantile(data_collection$birth_year, probs = c(3 / 4)),
quantile(data_collection$due_amount, probs = c(3 / 4)),
quantile(data_collection$payed_amount, probs = c(3 / 4)),
quantile(data_collection$delay, probs = 3 / 4)
)
Min <- c(
min(data_collection$number_of_children),
min(data_collection$number_other_product),
min(data_collection$birth_year), min(data_collection$due_amount),
min(data_collection$payed_amount), min(data_collection$delay)
)
Max <- c(
max(data_collection$number_of_children),
max(data_collection$number_other_product), max(data_collection$birth_year),
max(data_collection$due_amount), max(data_collection$payed_amount),
max(data_collection$delay)
)
summaryData <- distinct(data.frame(headofTable, EX, VarX, Median, Q1, Q3, Min,
Max,
check.rows = FALSE, check.names = FALSE
))
############## exploring Data ########################
#### Data statistic ####
# Statistic addiction delay on gender
meanG_D <- data_collection %>%
group_by(gender) %>%
summarise(mean = mean(delay))
medG_D <- data_collection %>%
group_by(gender) %>%
summarise(med = median(delay))
maxG_D <- data_collection %>%
group_by(gender) %>%
summarise(max = max(delay))
minG_D <- data_collection %>%
group_by(gender) %>%
summarise(min = min(delay))
Q1G_D <- data_collection %>%
group_by(gender) %>%
summarise(Q1 = quantile(delay, probs = 1 / 4))
Q3G_D <- data_collection %>%
group_by(gender) %>%
summarise(Q3 = quantile(delay, probs = 3 / 4))
data_GD <- data.frame(meanG_D, medG_D[, 2], minG_D[, 2], maxG_D[, 2],
Q1G_D[, 2], Q3G_D[, 2],
check.names = FALSE
)
# Statistic addiction payed amount to gender
meanG_PA <- data_collection %>%
group_by(gender) %>%
summarise(mean = mean(payed_amount))
medG_PA <- data_collection %>%
group_by(gender) %>%
summarise(med = median(payed_amount))
maxG_PA <- data_collection %>%
group_by(gender) %>%
summarise(max = max(payed_amount))
minG_PA <- data_collection %>%
group_by(gender) %>%
summarise(min = min(payed_amount))
Q1G_PA <- data_collection %>%
group_by(gender) %>%
summarise(Q1 = quantile(payed_amount, probs = 1 / 4))
Q3G_PA <- data_collection %>%
group_by(gender) %>%
summarise(Q3 = quantile(payed_amount, probs = 3 / 4))
data_GPA <- data.frame(meanG_PA, medG_PA[, 2], minG_PA[, 2], maxG_PA[, 2],
Q1G_PA[, 2], Q3G_PA[, 2],
check.names = FALSE
)
# Statistic addiction due amount to gender
meanG_DA <- data_collection %>%
group_by(gender) %>%
summarise(mean = mean(due_amount))
medG_DA <- data_collection %>%
group_by(gender) %>%
summarise(med = median(due_amount))
maxG_DA <- data_collection %>%
group_by(gender) %>%
summarise(max = max(due_amount))
minG_DA <- data_collection %>%
group_by(gender) %>%
summarise(min = min(due_amount))
Q1G_DA <- data_collection %>%
group_by(gender) %>%
summarise(Q1 = quantile(due_amount, probs = 1 / 4))
Q3G_DA <- data_collection %>%
group_by(gender) %>%
summarise(Q3 = quantile(due_amount, probs = 3 / 4))
data_GDA <- data.frame(meanG_DA, medG_DA[, 2], minG_DA[, 2], maxG_DA[, 2],
Q1G_DA[, 2], Q3G_DA[, 2],
check.names = FALSE
)
# Addiction payed amount to gender, product type and business discount
data_collection %>%
group_by(gender, product_type, business_discount) %>%
summarise(payedAmount = mean(payed_amount)) %>%
spread(gender, payedAmount)
data_collection %>%
group_by(gender, number_of_children) %>%
summarise(delay = mean(delay)) %>%
spread(gender, delay)
#### FEATURE ENGINEERING ------------------------------------------------------
# This function takes a vector and shifts its values by 1. This means that
# on the second position is the first value, on the third position is
# the second value etc. This is necessary for feature engineering.
# We want a cumulative sum/cumulative mean for previous payments, but
# functions cummean/cumsum applied to a row[i] make the calculations
# using also the number on this line. This is where this function comes in handy.
f_vec_shift <- function(x){
new_vec <- c(0, x[1:length(x)-1])
return(new_vec)
}
# Creating new features: was the delay larger than 21(140) days?
data_collection <- data_collection %>%
mutate(delay_21_y = ifelse(delay > 21, 1, 0)) %>%
mutate(delay_140_y = ifelse(delay > 140, 1, 0))
# Creating new features: mean delay for the whole client's history
data_collection <- data_collection %>%
nest(-contract_id) %>%
mutate(delay_indiv_help = map(.x = data, .f = ~cummean(.x$delay))) %>%
mutate(delay_indiv = map(.x = delay_indiv_help, .f = ~f_vec_shift(.x))) %>%
select(-delay_indiv_help) %>%
unnest(c(data, delay_indiv))
# Creating new features: number of delays larger than 21(140) days
# for the whole client's history.
data_collection <- data_collection %>%
nest(-contract_id) %>%
mutate(delay_indiv_21_help = map(.x = data, .f = ~cumsum(.x$delay_21_y))) %>%
mutate(delay_indiv_21 = map(.x = delay_indiv_21_help, .f = ~f_vec_shift(.x))) %>%
mutate(delay_indiv_140_help = map(.x = data, .f = ~cumsum(.x$delay_140_y))) %>%
mutate(delay_indiv_140 = map(.x = delay_indiv_140_help, .f = ~f_vec_shift(.x))) %>%
select(-delay_indiv_21_help, -delay_indiv_140_help) %>%
unnest(c(data, delay_indiv_21, delay_indiv_140))
# This part of the feature engineering process is the longest, but the syntax
# is not that complicated (compared to map/nest etc.).
# It works like this:
# First, we create new features (variables, columns) for storing
# the average payment delay for last 12/6/3/1 month.
# In this section, I am also creating a set of new features with suffix _help,
# they serve only as a temporary helper.
# It was necessasry rep() NA values, since 0 or any other number
# would be misleading.
data_collection <- data_collection %>%
mutate(mean_delay_12m = rep(NA, nrow(data_collection))) %>%
mutate(mean_delay_12m_help = rep(NA, nrow(data_collection))) %>%
mutate(mean_delay_6m = rep(NA, nrow(data_collection))) %>%
mutate(mean_delay_6m_help = rep(NA, nrow(data_collection))) %>%
mutate(mean_delay_3m = rep(NA, nrow(data_collection))) %>%
mutate(mean_delay_3m_help = rep(NA, nrow(data_collection))) %>%
mutate(mean_delay_1m = rep(NA, nrow(data_collection))) %>%
mutate(mean_delay_1m_help = rep(NA, nrow(data_collection))) %>%
filter(is.na(payment_order) == F) %>% # POZOR, OVERIT!!!!!!!!
filter(is.na(delay) == F) # POZOR, OVERIT!!!!!!!!
# Second we nest data again
data_collection <- data_collection %>%
nest(-contract_id)
# And third, we loop through the data. A lot.
# The first loop loops through the nested data for each contract.
# The purpose of this main loop is similar to map() function.
# Maybe the inner loop could be written as a function and passed to
# map() in .f argument, but not sure how big the runtime gains would be.
for(i in 1:nrow(data_collection)){
df_actual <- data_collection$data[[i]]
# The second loop loops through the dataframe for each contract.
# There are 4 if conditions in this for loop -> 12/6/3/1 M delay.
# The logic is this:
# The algorithm identifies rows with due_date in the given date range, which
# means last 12/6/3/1 months. These rows are marked with 1 in the _help column.
# Then, the average is calculated simply by multiplying delay column by
# _help column and divided by the sum of _help. This works because the
# _help column has only zeroes and ones.
# Lastly, the _help columned is restarted for the next round.
for(j in 1:nrow(df_actual)){
# Mean for the last 12 months
if(j > 12){
# Mark relevant rows with 1
df_actual <- df_actual %>%
mutate(mean_delay_12m_help = ifelse(due_date >= df_actual$due_date[j] - months(12) &
due_date < df_actual$due_date[j], 1, 0))
# Calculate mean
df_actual$mean_delay_12m[j] <-sum(df_actual$mean_delay_12m_help*df_actual$delay)/sum(df_actual$mean_delay_12m_help)
# Restart helper column
df_actual$mean_delay_12m_help = rep(NA, nrow(df_actual))
}
# Mean for the last 6 months
if(j > 6){
# Mark relevant rows with 1
df_actual <- df_actual %>%
mutate(mean_delay_6m_help = ifelse(due_date >= df_actual$due_date[j] - months(6) &
due_date < df_actual$due_date[j], 1, 0))
# Calculate mean
df_actual$mean_delay_6m[j] <-sum(df_actual$mean_delay_6m_help*df_actual$delay)/sum(df_actual$mean_delay_6m_help)
# Restart helper column
df_actual$mean_delay_6m_help = rep(NA, nrow(df_actual))
}
# Mean for the last 3 months
if(j > 3){
# Mark relevant rows with 1
df_actual <- df_actual %>%
mutate(mean_delay_3m_help = ifelse(due_date >= df_actual$due_date[j] - months(3) &
due_date < df_actual$due_date[j], 1, 0))
# Calculate mean
df_actual$mean_delay_3m[j] <-sum(df_actual$mean_delay_3m_help*df_actual$delay)/sum(df_actual$mean_delay_3m_help)
# Restart helper column
df_actual$mean_delay_3m_help = rep(NA, nrow(df_actual))
}
# Mean for the last 1 month
if(j > 1){
# Mark relevant rows with 1
df_actual <- df_actual %>%
mutate(mean_delay_1m_help = ifelse(due_date >= df_actual$due_date[j] - months(1) &
due_date < df_actual$due_date[j], 1, 0))
# Calculate mean
df_actual$mean_delay_1m[j] <-sum(df_actual$mean_delay_1m_help*df_actual$delay)/sum(df_actual$mean_delay_1m_help)
# Restart helper column
df_actual$mean_delay_1m_help = rep(NA, nrow(df_actual))
}
}
data_collection$data[[i]] <- df_actual
# Progress bar might be more elegant, someone can add later
# There is 86 980 observations to loop through, so printing [i] gives a good idea of progress.
print(i)
}
# Unnest the data
# Remove helper columns
data_collection <- data_collection %>%
unnest(-data) %>%
select(-mean_delay_12m_help, -mean_delay_6m_help, -mean_delay_3m_help, -mean_delay_1m_help)
# Write data to .txt for the model creation
write.table(data_collection, file = "data_collection_prepared.txt", sep = ";")
# Data preparation -------------------------------------------------------------
# Clean the data - estimation of missing data
# Create derived attributes
|
f1834dc7dbaf5452f5618dd3b5216c92bbe9d6e5 | 43ecaba0b376d125f5b9c78a1a0f480003a744d1 | /man/set_overedge_options.Rd | 1e5523f24b65a9307db7c754b75cd861160d23ca | [
"MIT"
] | permissive | elipousson/overedge | fc26a11ebbf2bdda30d5a88a35c6bbbd67bc176e | 27dd49ed496601efdc25f1cb56c9d40e2f02155a | refs/heads/main | 2023-05-23T05:56:13.443240 | 2022-08-10T21:28:44 | 2022-08-10T21:28:44 | 449,945,031 | 13 | 0 | null | null | null | null | UTF-8 | R | false | true | 716 | rd | set_overedge_options.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils-pkg.R
\name{set_overedge_options}
\alias{set_overedge_options}
\title{Set overedge packge options}
\usage{
set_overedge_options(
dist = NULL,
diag_ratio = NULL,
unit = NULL,
asp = NULL,
data_package = NULL,
data_filetype = NULL,
from_crs = NULL,
crs = NULL,
overwrite = TRUE
)
}
\arguments{
\item{dist, diag_ratio, unit, asp, data_package, data_filetype, from_crs, crs}{options to set, e.g. "crs = 2804" with
\code{pkg = "overedge"} to set "overedge.crs" to 2804.}
\item{overwrite}{If \code{TRUE}, overwrite any existing option value.}
}
\description{
Can set options for package, diag_ratio, dist, asp, or crs
}
|
16df0779345bb90a6850d944ae5a54280cf261d1 | b7107673041509eda45697a7b52c2a69d129bef8 | /man/metaclipcc.Map.Rd | be4fb9ef3e524a5677b92b05138533f09593663f | [] | no_license | metaclip/metaclipcc | d595c5f15ffd98ad07451f699faaa2e05faa9cca | 11f6b7af627394d3d3bb06267fedf5fef2cba9df | refs/heads/master | 2022-09-27T17:56:46.632232 | 2021-09-23T15:20:14 | 2021-09-23T15:20:14 | 179,584,790 | 2 | 0 | null | null | null | null | UTF-8 | R | false | true | 5,367 | rd | metaclipcc.Map.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/metaclipcc.Map.R
\name{metaclipcc.Map}
\alias{metaclipcc.Map}
\title{Directed metadata graph construction for AR6 Atlas Map products}
\usage{
metaclipcc.Map(
project = "CMIP5",
variable = NULL,
climate.index = NULL,
delta = FALSE,
experiment,
baseline,
future.period = NULL,
season,
bias.adj.method = NULL,
ref.obs.dataset = NULL,
proj,
map.bbox = NULL,
uncertainty = NULL,
test.mode = FALSE
)
}
\arguments{
\item{project}{Project. Unused so far. Default to \code{"CMIP5"}.}
\item{variable}{Code of the input ECV (it can be omitted if \code{climate.index} is indicated).
Current accepted values are restricted to \code{"tas", "meanpr", "TX", "TN", "prsn", "wind"}
and \code{"siconc", "ph", "tos"} for oceanic variables (CMIP6 only).}
\item{climate.index}{If the map is for a climate index, name of the index. Otherwise NULL (the default). Currently accepted values are restricted to
the set of indices to be included in the Atlas, namely:
\code{"TXx", "TNn", "Rx1day", "Rx5day", "spi6", "CDD", "tx35", "tx40", "cd", "hdd", "fd"}, as well as the
bias adjusted versions of \code{"tx35isimip", "tx40isimip", "fdisimip"}.}
\item{delta}{Logical. Is it a delta map?. The type of delta displayed can be either \code{"absolute"} or \code{"relative"}.}
\item{experiment}{Experiment results displayed in the map. Accepted values are restricted to \code{"historical", "rcp26", "rcp45", "rcp85"},
for CORDEX and CMIP5 products, and \code{"historical", "ssp126", "ssp245", "ssp370", "ssp460" and "ssp585"} for CMIP6 products.}
\item{baseline}{Character string indicating the \code{"start-end"} years of the baseline (historical) period. Accepted values are:
\code{"1981-2010"} and \code{"1961-1990"} (WMO standard periods), \code{"1986-2005"} (AR5 period), \code{"1995-2014"} (AR6 period) and \code{"1850-1900"}
(Preindustrial). Internally, there is a tricky part here in some cases, see Details.}
\item{future.period}{future period. Default to \code{NULL}, for historical maps (i.e., period defined by the \code{baseline} argument). Otherwise, a character string
indicating either the \code{"start-end"} years of the future period or a (GCM-specific) warming level. Current options include the standard AR5 future time slices
for near term, \code{"2021-2040"}, medium term \code{"2041-2060"} and long term \code{"2081-2100"}, and the global
warming levels of +1.5 degC \code{"1.5"}, +2 \code{"2"}, +3 \code{"3"} and +4 \code{"4"}.}
\item{season}{season. Integer vector of correlative months, in ascending order, encompassing the target season.}
\item{bias.adj.method}{Default to \code{NULL} and unused. If the map displays a bias-corrected product, a character string idetifying the method. Current accepted values a re \code{"EQM"},
for the standard VALUE empirical quantile mapping method. NOTE: since metaclipcc v1.0.0 the parameter is automatically
set to "ISIMIP3" when needed as a function of the index name.}
\item{ref.obs.dataset}{Default to \code{NULL}, and unused unless a \code{bias.adj.method} has been specified.
This is the reference observational dataset to perform the correction. This is an individual that must be defined in the datasource vocabulary,
belonging to either classes \code{ds:ObservationalDataset} or \code{ds:Reanalysis}. Currently accepted values are \code{"W5E5"} and \code{"EWEMBI"}.
Note that the individual instances of the observational reference are assumed to be described in the datasource vocabulary.
NOTE: since metaclipcc v0.3.0 the parameter is
set to "W5E5" when needed as a function of the index name.}
\item{proj}{Map projection string. Accepted values are \code{"Robin"}, \code{"Arctic"}, \code{"Antarctic"} and \code{"Pacific"}
for Robinson and WGS84 Arctic/Antarctic Polar stereographic, and Robinson Pacific-centric projections respectively.}
\item{map.bbox}{Optional. numeric vector of length 4, containing, in this order the \code{c(xmin, ymin, xmax, ymax)} coordinates of the target area
zoomed in by the user. If missing, then the HorizontalExtent associated to the \code{project} is assumed.}
\item{uncertainty}{Uncertainty layer characteristics. Describes different hatched patterns
of the output graphical product, depending of the user's choice of visualization. Possible values are \code{NULL}
(default), indicating no hatching at all, or \code{"simple"} or \code{"advanced"}.}
\item{test.mode}{For internal use only. When the test mode is on, only the first two models are used.}
}
\description{
Build a directed metadata graph describing a Map product of the AR6 Interactive Atlas
}
\details{
\strong{Baseline period definition}
Two of the baseline periods considered (WMO, 1981-2010 and AR6, 1995-2014) go beyond the temporal extent of the historical experiment simulations in AR5, ending in 2005.
In this case, the strategy followed in the different IPCC reports is to fill the missing period between 2006 and the end of the baseline with the years of the future
simulation used in each case. For example, to compute a RCP 8.5 delta using the WMO baseline, the baseline data for the period 2006-2010 will be extracted from the RCP85
simulation, and then concatenated with the 1981-2005 period of the historical simulation in order to complete the baseline period.
}
\author{
J. Bedia
}
|
c8468a11ad716239950f2deb4c0c39f33d249c6d | 77d18679b43522728be6bc19f79e8940a94266b7 | /18_마라톤그래프분석2.R | a2b2fc273174e9b22f434c92e8321e891e1503db | [] | no_license | Junohera/R_Basic | 5e738aef03bc20c2c365927cb2c9359261dfd8a9 | a9e9941c53275f048739e92fda9f0dacefdd0f93 | refs/heads/master | 2023-03-27T16:49:51.453261 | 2021-04-02T08:55:15 | 2021-04-02T08:55:15 | 353,197,650 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,390 | r | 18_마라톤그래프분석2.R | # 1. marathon 2015_2017.csv 변수에 저장
# 2. 나이에서 십의자리만 남기고 일의자리를 절사하여 연령대 필드명 Age_10을 만들고 절사한 값을 저장
# 3. Age_10을 기준으로 한 bar차트를 그립니다
# 4. 차트 제목 : 연령대별 참석인원, 축제목은 y축만 인원으로
# 5. 성별로 그룹, 빨강, 파랑으로 차트색 지정
# 6. 가장 인원이 많은 연령대의 bar위에 "최대참여 연령"이라고 annotate를 이용해 텍스트를 표시
# 7. 나머지 설정은 이전 차트의 내용을 따르거나 임의 조정
# (x 눈금, y 눈금, 각제목들의 글자 크기, 범례 설정 등)
# 1
marathon_df = read.csv('./R_Script/resource/marathon_merge[2015~2017].csv')
# 2
floor(marathon_df$Age/10) * 10 -> marathon_df$Age_10
# 3
library('ggplot2')
ggplot(
data = marathon_df
, aes(
x = Age_10
, group = M.F
, fill = M.F
)
) + geom_bar(
position = 'dodge'
, alpha = 0.8
) +
labs(
y = '인원'
, title = '연령대별 참석인원'
) +
theme(
legend.title = element_text(size=15)
, legend.text = element_text(size=15)
, legend.position = 'bottom'
, plot.title = element_text(
size = 35
, hjust = 0.5
, face = 'bold'
)
, axis.title.x = element_blank()
, axis.title.y = element_text(
size = 20
, hjust = 0.5
, face = 'bold'
)
) +
scale_x_continuous(
limits = c(10, 80)
, breaks = seq(10, 80, 10)
, labels = seq(10, 80, 10)
) +
scale_y_continuous(
limits = c(0, 14000)
, breaks = seq(0, 14000, 2000)
, labels = seq(0, 14000, 2000)
) +
annotate("text", x=40, y=max(table(marathon_df$Age_10, marathon_df$M.F)), label="최대참여 연령", size=6, color = 'magenta') +
scale_fill_manual(
values = c("M"="blue", "F"="red")
)
table(marathon_df$Age_10)
ageGenderTable = table(marathon_df$Age_10, marathon_df$M.F)
max(ageGenderTable)
match(ageGenderTable, max(ageGenderTable))
|
3f2453f2027029f3588b976a397bf5bd04489e5d | a188192e32330117438c7bb2b4681ff515b4e775 | /R/cleanFFCMA.R | b2fcd992c603de16e7421bf9a9e6a6dee4ff7db1 | [] | no_license | jrs9/FFAQR | 6e5e80efced864c9f3f1ad0bbf0882878d8b1def | fd4f45a4a83899112ee8676113990c5e92c2c131 | refs/heads/master | 2016-09-05T20:20:01.386675 | 2015-01-28T15:17:18 | 2015-01-28T15:17:18 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,057 | r | cleanFFCMA.R | #' Reads in, cleans, and subdivides FFCMA data set located in data folder.
cleanFFCMA <- function() {
# Imports FFCMA from data folder
setwd(file.path(getwd(), "data"))
colNames = c("Date", "Lo30", "Med40", "Hi30", "Lo20", "Qnt2", "Qnt3", "Qnt4",
"Hi20", "Lo10", "Dec2", "Dec3", "Dec4", "Dec5", "Dec6",
"Dec7", "Dec8", "Dec9", "Hi10")
FFCMA <- read.table("FFCMA.txt", fill=TRUE, skip=18, col.names = colNames,
stringsAsFactors=FALSE)
setwd("..")
# Divides FFCMA
FFCMAValWgtMthly <- FFCMA[1:618,]
FFCMAEqWgtMthly <- FFCMA[622:1239,]
FFCMAValWgtAnn <- FFCMA[1243:1293,]
FFCMAEqWgtAnn <- FFCMA[1297:1347,]
FFCMANumOfFirms <- FFCMA[1351:1968,]
FFCMAAvgFirmSize <- FFCMA[1972:2589,]
FFCMAValWgtAvgInvAnn <- FFCMA[2593:2644,]
# Cleans data
cleanSubFF(FFCMAValWgtMthly)
cleanSubFF(FFCMAEqWgtMthly)
cleanSubFF(FFCMAValWgtAnn)
cleanSubFF(FFCMAEqWgtAnn)
cleanSubFF(FFCMANumOfFirms, spr=FALSE)
cleanSubFF(FFCMAAvgFirmSize, spr=FALSE)
cleanSubFF(FFCMAValWgtAvgInvAnn)
}
|
c2efe96e442c335e058258adeb65d64883a191bc | 6e25221d44b46f1532487df6d2702673c9a0eeb8 | /man/spmle.Rd | 8eae52e6f9963f4e275cbc64be91e066857d868c | [
"MIT"
] | permissive | alexasher/caseControlGE | a18b6079575ed68f18ae3537e55abf04ae3617db | 47cae11175ef74e38eeebf2dee1b50dba0bad0b7 | refs/heads/master | 2020-03-17T22:55:05.623266 | 2018-05-29T19:25:08 | 2018-05-29T19:25:08 | 134,023,493 | 0 | 1 | null | null | null | null | UTF-8 | R | false | true | 8,903 | rd | spmle.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/estimation_functions.R
\name{spmle}
\alias{spmle}
\title{Semiparametric Maximum Pseudolieklihood Estimator for Case-Control Studies Under G-E Independence.}
\usage{
spmle(D, G, E, pi1, data, control = list(), swap = FALSE, startvals)
}
\arguments{
\item{D}{a binary vector of disease status (1=case, 0=control).}
\item{G}{a vector or matrix (if multivariate) containing genetic data. Can be continuous, discrete, or a combination.}
\item{E}{a vector or matrix (if multivariate) containing environmental data. Can be continuous, discrete, or a combination.}
\item{pi1}{the population disease rate, a scalar in [0, 1) or the string "rare".
Using \code{pi1=0} is the rare disease approximation.}
\item{data}{an optional list or environment containing the variables in the model.
If not found in \code{data}, the variables are taken from the environment
from which \code{spmle} is called.}
\item{control}{a list of control parameters that allow the user to control the optimization algorithm. See 'Details'.}
\item{swap}{a logical scalar rarely of interest to the end user. Dependence on the distributions of G and E
are removed using different methods; this switch swaps them to produce a symmetric estimator with identical
properties to the SPMLE. Default \code{FALSE}.}
\item{startvals}{an optional numeric vector of coefficient starting values for optimization. Usually left blank,
in which case logistic regression estimates are used as starting values.}
}
\value{
an object of class \code{"spmle"}. The function \code{summary} (i.e., \code{summary.spmle})
can be used to obtain or print a summary of the results.
The function \code{anova} (i.e., \code{anova.spmle}) will conduct likelihood-ratio
tests comparing one \code{spmle} object to another. These are valid tests
because the loglikelihood reported by \code{logLik.spmle} is accurate up to
an additive constant. However \code{anova} should not be used
to compare an \code{spmle} object to a model fit by a different method.
\code{\link{predict.spmle}}, the \code{predict} method for S3 class \code{"spmle"},
can predict the expected response (on logistic or probability scales), compute
confidence intervals for the expected response, and provide standard errors.
The generic accessor functions \code{coefficients}, \code{fitted.values}
and \code{residuals} can be used to extract various useful features of the
value returned by \code{spmle}.
An object of class \code{"spmle"} is a list containing at least the following components:
\describe{
\item{\code{coefficients}}{a named vector of coefficients}
\item{\code{pi1}}{the value of pi1 used during the analysis}
\item{\code{SE}}{standard error estimate of coefficients}
\item{\code{cov}}{estimated covariance matrix of coefficients}
\item{\code{glm_fit}}{a logistic regression model fit using the same model as \code{spmle}}
\item{\code{call}}{the matched call}
\item{\code{formula}}{the formula used}
\item{\code{data}}{the \code{data argument}}
\item{\code{model}}{the model frame}
\item{\code{terms}}{the \code{terms} object used}
\item{\code{linear.predictors}}{the linear fit on the logistic link scale}
\item{\code{fitted.values}}{the fitted values on the probability scale}
\item{\code{residuals}}{the Pearson residuals}
\item{\code{null.deviance}}{the deviance for the null model. Deviance = \code{-2*logLik}.}
\item{\code{df.residual}}{the residual degrees of freedom}
\item{\code{df.null}}{the residual degrees of freedom for the null model}
\item{\code{rank}}{the numeric rank of the fitted linear model
(i.e. the number of parameters estimated)}
\item{\code{nobs}}{number of observations}
\item{\code{ncase}}{number of cases}
\item{\code{ncontrol}}{number of controls}
}
\code{spmle} objects created by \code{spmle()} additionally have components \code{logLik}
(log pseudolikelihood), \code{deviance} (-2 * log pseudolikelihood), \code{aic}, \code{bic},
\code{ucminf} (optimization output), and matrices \code{H_inv}, \code{Sigma}, \code{zeta0},
and \code{zeta1}, which are used in calculating the asymptotic estimate of standard error.
}
\description{
\code{spmle} maximizes the retrospective pseudolikelihood of case-control data under the assumption
of G-E independence in the underlying population.
The marginal distributions of G and E are treated nonparametrically.
}
\details{
This function applies the method of Stalder et. al. (2017) to maximize the
retrospective pseudolikelihood of case-control data under the assumption of G-E independence.
It currently supports the model with G and E main effects and a multiplicative G*E interaction.
The \code{control} argument is a list that controls the behavior of the optimization algorithm
\code{\link[ucminf]{ucminf}} from the \pkg{ucminf} package. When \code{ucminf} works,
it works brilliantly (typically more than twice as fast as the next-fastest algorithm).
But it has a nasty habit of declaring convergence before actually converging.
To address this, \code{spmle} checks the maximum gradient at "convergence", and can rerun the optimization
using different starting values. The \code{control} argument can supply any of the following components:
\describe{
\item{\code{max_grad_tol}}{maximum allowable gradient at convergence. \code{spmle} does not
consider the optimization to have converged if the maximum gradient \code{> max_grad_tol}
when \code{ucminf} stops. Default \code{max_grad_tol} \code{= 0.001}.}
\item{\code{num_retries}}{number of times to retry optimization. An error is produced if
the optimization has not converged after \code{num_retries}. Different starting values
are used for each retry. Default \code{num_retries = 2}.}
\item{\code{use_hess}}{a logical value instructing \code{spmle} to use the analytic hessian
to precondition the optimization. This brings significant speed benefits, and is one reason
\code{ucminf} is so fast. For unknown reasons, preconditioning causes computers with
certain Intel CPUs to prematurely terminate iterating. By default, \code{use_hess = TRUE},
but if you notice that \code{ucminf} never converges during the first attempt, try setting \code{use_hess = FALSE}.}
\item{\code{trace}}{a scalar or logical value that is used by both \code{spmle} and \code{ucminf}
to control the printing of detailed tracing information.
If TRUE or > 0, details of each \code{ucminf} iteration are printed.
If FALSE or 0, \code{ucminf} iteration details are suppressed but \code{spmle} still
prints optimization retries. If \code{trace < 0} nothing is printed. Default \code{trace = 0}.}
\item{additional control parameters}{not used by \code{spmle}, but are passed to \code{\link[ucminf]{ucminf}}.
Note that the \code{ucminf} algorithm has four stopping criteria, and \code{ucminf} will
declare convergence if any one of them has been met. The \code{ucminf} control parameter
"\code{grtol}" controls \code{ucminf}'s gradient stopping criterion, which defaults to
\code{1e-6}. \code{grtol} should not be set larger than the \code{spmle} control parameter \code{max_grad_tol}.}
}
}
\section{References}{
Stalder, O., Asher, A., Liang, L., Carroll, R. J., Ma, Y., and Chatterjee, N. (2017).
\emph{Semi-parametric analysis of complex polygenic gene-environment interactions in case-control studies.}
Biometrika, 104, 801–812.
}
\examples{
# Simulation from Table 1 in Stalder et. al. (2017)
set.seed(2018)
dat = simulateCC(ncase=500, ncontrol=500, beta0=-4.165,
betaG_SNP=c(log(1.2), log(1.2), 0, log(1.2), 0),
betaE_bin=log(1.5),
betaGE_SNP_bin=c(log(1.3), 0, 0, log(1.3), 0),
MAF=c(0.1, 0.3, 0.3, 0.3, 0.1),
SNP_cor=0.7, E_bin_freq=0.5)
# SPMLE with known population disease rate of 0.03
spmle(D=D, G=G, E=E, pi1=0.03, data=dat)
# Simulation with a single SNP and a single binary environmental variable.
# True population disease rate in this simulation is 0.03.
# This simulation scenario was used in the Supplementary Material of Stalder et. al. (2017)
# to compare performance against the less flexible method of Chatterjee and Carroll (2005),
# which is available as the function as snp.logistic in the Bioconductor package CGEN.
dat2 = simulateCC(ncase=100, ncontrol=100, beta0=-3.77,
betaG_SNP=log(1.2), betaE_bin=log(1.5),
betaGE_SNP_bin=log(1.3), MAF=0.1,
E_bin_freq=0.5)
# SPMLE using the rare disease assumption, optimization tracing,
# and no hessian preconditioning.
spmle(D=D, G=G, E=E, pi1=0, data=dat2, control=list(trace=0, use_hess=FALSE))
}
\seealso{
\code{\link{spmleCombo}} for a slower but more precise estimator, \code{\link{simulateCC}} to simulate data
}
|
bf124746df49290746724a46f4bdbdab64ce5850 | 3b23dcf7210df9bf783504194ea38c79cf529679 | /leaflet_indic.R | a9700e57884218535fa9a1e3db5dad5eb49883de | [] | no_license | stienheremans/inbo-changedetection | f9117dda435f4ba5dec71418fd1b0b3f83a875fc | d4787194be822f9ee8fdee71dbebadd05152e7a6 | refs/heads/master | 2021-07-16T03:52:23.479769 | 2019-11-13T09:27:09 | 2019-11-13T09:27:09 | 179,023,397 | 0 | 0 | null | 2020-06-18T19:27:24 | 2019-04-02T07:32:20 | JavaScript | UTF-8 | R | false | false | 3,083 | r | leaflet_indic.R | # Make leaflet illustration of indicator rasters
library(raster)
library(leaflet)
library(htmlwidgets)
library(htmltools)
library(stringr)
library(mapview)
dirs <- list.dirs("Q:/Projects/PRJ_RemSen/Change detection 2018/change-detection files/data/Sen2_data/begin May")
dirs <- dirs[ grepl("BE", dirs)]
for (n in dirs){
# import all necessary files (4 miica indicators for each year, outliers for each indicator without Planet for each year)
files <- list.files(n, full.names = T)
files_indicators <- files[grepl("to.*tif$", files)]
# fixed map layout
tag.map.title <- tags$style(HTML("
.leaflet-control.map-title {
transform: translate(-50%,20%);
position: fixed !important;
left: 50%;
text-align: center;
padding-left: 10px;
padding-right: 10px;
background: rgba(255,255,255,0.75);
font-weight: bold;
font-size: 28px;
}
"))
map <- leaflet() %>%
addProviderTiles('Esri.WorldImagery', group = "basemap")
indics <- list()
for (o in 1:length(files_indicators)){
year_loc <- str_locate(files_indicators[o], ".tif")[1]
year <- str_sub(files_indicators[o],year_loc - 20 , year_loc - 1)
ind_loc_start <- str_locate_all(files_indicators[o], " ")[[1]][5,1]
ind_loc_end <- str_locate_all(files_indicators[o], " ")[[1]][6,1]
ind <- str_sub(files_indicators[o],ind_loc_start, ind_loc_end)
studysite_loc <- str_locate_all(files_indicators[o], "/BE")[[1]][2,1]
studysite <- str_sub(files_indicators[o],studysite_loc+1, ind_loc_start-1)
rast_ind <- raster(files_indicators[o])
proj_WGS84 <- CRS("+proj=longlat +datum=WGS84")
rast_WGS84 <- projectRaster(rast_ind, crs = proj_WGS84)
indics <- c(indics, paste0(ind, year))
pal <- colorNumeric(c("royalblue", "yellow", "red"), values(rast_WGS84),
na.color = "transparent")
map <- map %>%
addRasterImage(rast_WGS84, colors = pal, opacity = 1, group = paste0(ind, year)) %>%
addLegend("bottomleft", pal = pal, values = values(rast_WGS84),
title = paste0(ind, year),
opacity = 0.8, group = paste0(ind, year))
}
title_map <- tags$div(
tag.map.title, HTML(paste0(studysite)))
map <- map%>%
addLayersControl(
baseGroups = c("basemap"),
overlayGroups = indics,
options = layersControlOptions(collapsed = FALSE)) %>%
addControl(title_map, position = "topleft", className="map-title") %>%
hideGroup(indics)
map
file_name <- paste0("Q:/Projects/PRJ_RemSen/Change detection 2018/change-detection files/data/Sen2_data/leaflets/begin May/indicators_", studysite, ".html")
saveWidget(map, file = file_name)
}
|
0882c5854f662f14cfd87464654153541ff9f6c3 | cdb10ffcb9fa754366b1cf42c09e3185206fefef | /tests/testthat/test-downlit-html.R | 9578b0d16e7be041626fc9a18cd4c4649db71073 | [
"MIT"
] | permissive | JohnMount/downlit | 8ed71215975a0278200fcdc48d00fae0b580834b | a7cadfc8810bc9dfff565a2cb4cf362fe662a3d9 | refs/heads/master | 2022-12-10T00:32:58.254073 | 2020-09-06T20:14:04 | 2020-09-06T20:14:04 | 293,344,315 | 0 | 0 | NOASSERTION | 2020-09-06T19:05:57 | 2020-09-06T19:05:57 | null | UTF-8 | R | false | false | 304 | r | test-downlit-html.R | test_that("can highlight html file", {
# verify_output() seems to be generating the wrong line endings
skip_on_os("windows")
verify_output(test_path("test-downlit-html.txt"), {
out <- downlit_html_path(test_path("autolink.html"), tempfile())
cat(brio::read_lines(out), sep = "\n")
})
})
|
b3a220dead439d3b2983c26cbe39de495c10ce36 | 9226726d7e3ce23f5222e77493722910717b8427 | /man/annotatePairs.Rd | e7162a54b98bc11142d135898dfc1fa2ba1fac28 | [] | no_license | LTLA/diffHic | 7f2e1211e1a2816e1df92dd3606550792909d3a5 | 8d7f0095c95a020da09edb20b751536a65151aaa | refs/heads/master | 2021-08-08T15:45:25.018021 | 2021-07-15T08:46:22 | 2021-07-15T08:46:34 | 103,892,130 | 3 | 3 | null | 2018-07-28T13:33:17 | 2017-09-18T04:38:11 | R | UTF-8 | R | false | false | 3,578 | rd | annotatePairs.Rd | \name{annotatePairs}
\alias{annotatePairs}
\title{Annotate bin pairs}
\description{Annotate bin pairs based on features overlapping the anchor regions.}
\usage{
annotatePairs(data.list, regions, rnames=names(regions), indices, ...)
}
\arguments{
\item{data.list}{An InteractionSet or a list of InteractionSet objects containing bin pairs.}
\item{regions}{A GRanges object containing coordinates for the regions of interest.}
\item{rnames}{A character vector containing names to be used for annotation.}
\item{indices}{An integer vector or list of such vectors, indicating the cluster identity for each interaction in \code{data.list}.}
\item{...}{Additional arguments to pass to \code{\link{findOverlaps}}.}
}
\value{
A list of two character vectors \code{anchor1} and \code{anchor2} is returned, containing comma-separated strings of \code{names} for entries in \code{regions} overlapped by the first and second anchor regions respectively.
If \code{indices} is not specified, overlaps are identified to anchor regions of each interaction in \code{data.list}.
Otherwise, overlaps are identified to anchor regions for any interaction in each cluster.
}
\details{
Entries in \code{regions} are identified with any overlap to anchor regions for interactions in \code{data.list}.
The \code{names} for these entries are concatenated into a comma-separated string for easy reporting.
Typically, gene symbols are used in \code{names}, but other values can be supplied depending on the type of annotation.
This is done separately for the first and second anchor regions so that potential interactions between features of interest can be identified.
If \code{indices} is supplied, all interactions corresponding to each unique index are considered to be part of a single cluster.
Overlaps with all interactions in the cluster are subsequently concatenated into a single string.
Cluster indices should range from \code{[1, nclusters]} for any given number of clusters.
This means that the annotation for a cluster corresponding to a certain index can be obtained by subsetting the output vectors with that index.
Otherwise, if \code{indices} is not set, all interactions are assumed to be their own cluster, i.e., annotation is returned for each interaction separately.
Multiple InteractionSet objects can be supplied in \code{data.list}, e.g., if the cluster consists of bin pairs of different sizes.
This means that \code{indices} should also be a list of vectors where each vector indicates the cluster identity of the entries in the corresponding InteractionSet of \code{data.list}.
}
\author{
Aaron Lun
}
\seealso{
\code{\link{findOverlaps}},
\code{\link{clusterPairs}}
}
\examples{
# Setting up the objects.
a <- 10
b <- 20
cuts <- GRanges(rep(c("chrA", "chrB"), c(a, b)), IRanges(c(1:a, 1:b), c(1:a, 1:b)))
param <- pairParam(cuts)
all.combos <- combn(length(cuts), 2)
y <- InteractionSet(matrix(0, ncol(all.combos), 1),
GInteractions(anchor1=all.combos[2,], anchor2=all.combos[1,], regions=cuts, mode="reverse"),
colData=DataFrame(lib.size=1000), metadata=List(param=param, width=1))
regions <- GRanges(rep(c("chrA", "chrB"), c(3,2)), IRanges(c(1,5,8,3,3), c(1,5,8,3,4)))
names(regions) <- LETTERS[seq_along(regions)]
out <- annotatePairs(y, regions=regions)
# Again, with indices:
indices <- sample(20, length(y), replace=TRUE)
out <- annotatePairs(y, regions=regions, indices=indices)
# Again, with multiple InteractionSet objects:
out <- annotatePairs(list(y, y[1:10,]), regions=regions, indices=list(indices, indices[1:10]))
}
\keyword{annotation}
|
b5e16d46645db599004c578b02858197d4ad104c | 2017c56a3fefdb408096651c824f99720bf1482e | /man/folding.ratio.Rd | c7ec7d21241108d4ecafbd172b16f10736ed7f41 | [] | no_license | cran/Rfolding | 6a3dc0c6485529410f1bfb05959c3192bc9e3b71 | 7585a6de46b106e9b09a89394ab322b48dd4500e | refs/heads/master | 2020-03-30T07:49:11.411257 | 2018-09-30T12:10:12 | 2018-09-30T12:10:12 | 150,968,661 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 463 | rd | folding.ratio.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fold.R
\name{folding.ratio}
\alias{folding.ratio}
\title{Computes the folding ratio of the input data}
\usage{
folding.ratio(X)
}
\arguments{
\item{X}{nxd matrix (n observations, d dimensions)}
}
\value{
the folding ratio
}
\description{
Computes the folding ratio of the input data
}
\examples{
X = matrix(runif(n = 1000, min = 0., max = 1.), ncol = 1)
phi = folding.statistics(X)
}
|
bf2a80690203ca0c7a50ce18dd004a10ae5cb512 | 4408ba15bd88aa15dc3c1c9962fe39d280591ad7 | /Practical_machine_learning/quizzes/quizz3/4-glm.R | d8d58f7915cefd0ee8cc9861b0859918b9cb2d7a | [] | no_license | OliveLv/Coursera-Udacity | 57866bfac5bfa113dcca65a05ce4876f4d6e08d0 | ddc2e447e13e3c2104fed50de52ff096fa8ea4c0 | refs/heads/master | 2020-05-20T04:41:35.245573 | 2015-09-22T07:07:43 | 2015-09-22T07:07:43 | 41,250,795 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 486 | r | 4-glm.R | library(ElemStatLearn)
data(SAheart)
set.seed(8484)
train = sample(1:dim(SAheart)[1],size=dim(SAheart)[1]/2,replace=F)
trainSA = SAheart[train,]
testSA = SAheart[-train,]
missClass=function(values,prediction){
sum(((prediction > 0.5)*1) != values)/length(values)
}
set.seed(13234)
modelFit<-train(chd~age+alcohol+obesity+tobacco+typea+ldl,data=trainSA,method="glm")
pred<-predict(modelFit,testSA)
missClass(testSA$chd,pred)
pred<-predict(modelFit,trainSA)
missClass(trainSA$chd,pred) |
8ca62832b323e220185b40595f4365d8379a69f5 | 300f64c050b76a3db369c0099b9dd74fe40cca2b | /plot4.R | 1814919d7fc673861194e99c8d5a3a3f0a91599e | [] | no_license | pcds2015/ExData_Plotting1 | 28e16dd8c41a619a7c1e3629ef590401860393e1 | 330cbbddfd9d8c06a6661212e3ffba8dd1d5bf82 | refs/heads/master | 2021-01-15T21:07:46.567878 | 2015-08-09T22:35:49 | 2015-08-09T22:35:49 | 40,449,503 | 0 | 0 | null | 2015-08-09T20:29:19 | 2015-08-09T20:29:19 | null | UTF-8 | R | false | false | 100 | r | plot4.R | source("load_data.R")
DF <- load_data()
png("plot4.png", width=480, heigh=480)
plot4(DF)
dev.off()
|
13e64adfd6f8104965bfba51ddf3f54d1d0c8466 | f7b1efede242c3a66c5b652f8eb41273e454402c | /ui.R | 81e60e4e05273241d5684062fbb550228146087b | [] | no_license | NikSeldon/iris_shiny_app | aea7b09ab9c437758cceff3f5ac246bdebccec75 | 99cb7dbf92949cf91b32cb336ff9aff67a807aad | refs/heads/master | 2021-01-10T15:03:38.132908 | 2015-05-23T13:22:25 | 2015-05-23T13:22:25 | 36,123,983 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,444 | r | ui.R | library(shiny)
library(ggplot2)
library(lattice)
library(randomForest)
shinyUI(
fluidPage(
# Application title
titlePanel("Prediction of Iris Species"),
mainPanel(
fluidRow(
column(8,
"This application aims to predict the species of an Iris flower, based on your measurement of the dimensions of the flowers sepals and petals.",br(),
"The Iris flower data set or Fisher's Iris data set by Sir Ronald Fisher (1936) and a random forest machine learning algorithm is used to
classify and predict the three Iris species: Iris setosa, Iris virginica and Iris versicolor." ,br(),br(),br()
),
column(8,
h4('Input Panel'),
'Please fill in your measurements of the Iris Petal and Sepal sizes. The definitions are shown in the picture. After pressing the "Submit" bottom, your values and the predicted Iris species will be shown below.',br(),br()
)
),
fluidRow(
column(4,
numericInput('sl', 'Sepal Length (cm)', 4, min = 0.1, max = 10, step = 0.1),
numericInput('sw', 'Sepal Width (cm)', 3, min = 0.1, max = 10, step = 0.1),
numericInput('pl', 'Petal Length (cm)', 4, min = 0.1, max = 10, step = 0.1),
numericInput('pw', 'Petal Width (cm)', 2, min = 0.1, max = 10, step = 0.1),
submitButton('Submit'),
br()
),
column(4,
img(src="iris_petal_sepal.png")
)
),
fluidRow(
#mainPanel(
br(),
h4('Your entered values are depicted by the two circles in the figures:'),
#h5('Sepal Length of:'),
#verbatimTextOutput("inputValue1"),
#h5('Sepal Width of:'),
#verbatimTextOutput("inputValue2"),
#h5('Petal Length of:'),
#verbatimTextOutput("inputValue3"),
#h5('Petal Width of:'),
#verbatimTextOutput("inputValue4"),
column(6,
plotOutput(outputId = "main_plot1", height = "300px")
),
column(6,
plotOutput(outputId = "main_plot2", height = "300px")
)
),
fluidRow(
h3('Results of prediction'),
h4('Your Input data yielded the following prediction of Iris species: '),
verbatimTextOutput("prediction"),br(),
"Sources: http://en.wikipedia.org/wiki/Iris_flower_data_set",br(),
"http://archive.ics.uci.edu/ml/datasets/Iris"
)
,width=10)#mainPanel
) #fluidPage
)#shinyUI
|
83002e6de0bb828db8913c597b849ecd1f434739 | a33234fdbe753ff3dc55c8e243574e8269660c23 | /man/CVsplit.Rd | 4631a3d8cbee17a04d22c552eed497924029e0ec | [] | no_license | MGallow/statr | 10d3148dea856dfd59720ddc6b468781810e1ec7 | 05d79ed7e811ec6210c913a9322f75c89b64abae | refs/heads/master | 2021-01-19T14:28:00.618046 | 2018-07-26T14:27:24 | 2018-07-26T14:27:24 | 88,106,853 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 722 | rd | CVsplit.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/regression.R
\name{CVsplit}
\alias{CVsplit}
\title{CV split}
\usage{
CVsplit(X, Y, split = 0.5, N = NULL)
}
\arguments{
\item{X}{nxp data matrix. Each row corresponds to a single observation and each column contains n observations of a single feature/variable.}
\item{Y}{nxr response matrix. Each row corresponds to a single response and each column contains n response of a single feature/response.}
\item{split}{fraction of objects devoted to training set}
\item{N}{option to provide number of objects devoted to training set}
}
\value{
X.train, Y.train, X.test, Y.test
}
\description{
splits data objects into training and testing sets
}
|
ccb558d132753074c3befc7c8410e56f5a6fbd44 | a1341b35ca3b2a4e2de75d8d3f47805644c88b82 | /R/create_to_do_vec.R | 9bbb87adbfdc08d50ae9d98caa2f3e8ddc193475 | [
"MIT"
] | permissive | aranryan/arlodr | 304f3bfed8a7878bd6277fc3be6dd17f73701e2a | 923eca51099f6d162f1ef35077ab01627b009d1b | refs/heads/master | 2021-01-17T06:49:08.966104 | 2020-10-06T21:39:48 | 2020-10-06T21:39:48 | 48,458,911 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,365 | r | create_to_do_vec.R | #' Create a vector of geographies to be run for various equations.
#'
#' @param in_area_sh
#' @param ex_area_sh
#' @param market_vec
#'
#' @return
#' @export
#'
#' @examples
create_to_do_vec <- function(in_area_sh, ex_area_sh, market_vec) {
# Break apart into a vector of elements rather than a single string.
# In other words, what comes in from Excel ends up being recognized
# as a single string, this converts it to a vector.
in_area_sh <- unlist(strsplit(in_area_sh, split=", "))
# Set up vector of area_sh to include
if(in_area_sh[1] == "all"){
in_area_sh_vec <- market_vec
} else {
in_area_sh_vec <- in_area_sh
}
# set up vector of area_sh to exclude
if(is.na(ex_area_sh)[[1]] ){
ex_area_sh_vec <- c("")
} else {
ex_area_sh <- unlist(strsplit(ex_area_sh, split=", "))
ex_area_sh_vec <- ex_area_sh
}
# drop those that should be excluded
vec <- in_area_sh_vec [! in_area_sh_vec %in% ex_area_sh_vec]
# Only include those that will have data. This helps to address the fact that
# we can filter the source data frame to the point where it only contains data
# for certain area_sh's, so we we only want to set up to run the analysis for
# situations that are going to work. Maybe I could have handled in a join step
# in the processing, but this also worked here.
vec <- vec[vec %in% market_vec]
}
|
b95607445ce6087309c8c43e87fada46035cb765 | d1a5939dde8c04c67b12940e8227c83b097a1267 | /man/gwasvcf_to_finemapr.Rd | 708b8303737a32e2f8482514fe111fbe4abf68f9 | [
"MIT"
] | permissive | MRCIEU/gwasglue | be15889efca130aa3fc3b0b9456ba2e4bebb1a86 | c2d5660eed389e1a9b3e04406b88731d642243f1 | refs/heads/master | 2022-06-19T02:41:34.998614 | 2022-02-10T12:36:47 | 2022-02-10T12:36:47 | 219,610,930 | 70 | 34 | NOASSERTION | 2022-05-23T11:03:10 | 2019-11-04T22:45:00 | R | UTF-8 | R | false | true | 816 | rd | gwasvcf_to_finemapr.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/finemapr.r
\name{gwasvcf_to_finemapr}
\alias{gwasvcf_to_finemapr}
\title{Generate data for fine mapping analysis}
\usage{
gwasvcf_to_finemapr(
region,
vcf,
bfile,
plink_bin = genetics.binaRies::get_plink_binary(),
threads = 1
)
}
\arguments{
\item{region}{Region of the genome to extract eg 1:109317192-110317192". Can be array}
\item{vcf}{Path to VCF file or VCF object}
\item{bfile}{LD reference panel}
\item{plink_bin}{Path to plink. Default = genetics.binaRies::get_plink_binary()}
\item{threads}{Number of threads to run in parallel. Default=1}
}
\value{
List of datasets for finemapping
}
\description{
For a given region and VCF file, extracts the variants in the region along with LD matrix from a reference panel
}
|
e4f7824712a5ccc9a10048b1aa265f77be88cc13 | afa3c9510c8e292d46127908dac141e3638f3be5 | /Žan Jarc - R/1. naloga/Obrestne mere.R | bd4c1e9cedf810fffd0d4de38c78272246544b78 | [] | no_license | ZanJarc/Financni-praktikum | fab9813a8567475b9c6029fa071af35da551cc21 | c27e2801596a2f1adad9cac73e426bd05517512e | refs/heads/master | 2020-08-13T11:03:12.363228 | 2019-12-19T20:14:06 | 2019-12-19T20:14:06 | 214,958,622 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,037 | r | Obrestne mere.R | data_2014 = read.csv("hist_EURIBOR_2014.csv")
data_2015 = read.csv("hist_EURIBOR_2015.csv")
data_2016 = read.csv("hist_EURIBOR_2016.csv")
data <- data.frame()
data <-rbind(data, c(data_2014$X2.01.2014))
names(data) = c('1w', '2w', '1m', '2m', '3m', '6m', '9m', '12m')
data <- rbind(data, c(data_2014$X3.02.2014))
data <- rbind(data, c(data_2014$X3.03.2014))
data <- rbind(data, c(data_2014$X1.04.2014))
data <- rbind(data, c(data_2014$X2.05.2014))
data <- rbind(data, c(data_2014$X2.06.2014))
data <- rbind(data, c(data_2014$X1.07.2014))
data <- rbind(data, c(data_2014$X1.08.2014))
data <- rbind(data, c(data_2014$X1.09.2014))
data <- rbind(data, c(data_2014$X1.10.2014))
data <- rbind(data, c(data_2014$X3.11.2014))
data <- rbind(data, c(data_2014$X1.12.2014))
data <- rbind(data, c(data_2015$X02.01.2015))
data <- rbind(data, c(data_2015$X02.02.2015))
data <- rbind(data, c(data_2015$X02.03.2015))
data <- rbind(data, c(data_2015$X01.04.2015))
data <- rbind(data, c(data_2015$X04.05.2015))
data <- rbind(data, c(data_2015$X01.06.2015))
data <- rbind(data, c(data_2015$X01.07.2015))
data <- rbind(data, c(data_2015$X03.08.2015))
data <- rbind(data, c(data_2015$X01.09.2015))
data <- rbind(data, c(data_2015$X01.10.2015))
data <- rbind(data, c(data_2015$X02.11.2015))
data <- rbind(data, c(data_2015$X01.12.2015))
data <- rbind(data, c(data_2016$X04.01.2016))
data <- rbind(data, c(data_2016$X01.02.2016))
data <- rbind(data, c(data_2016$X01.03.2016))
data <- rbind(data, c(data_2016$X01.04.2016))
data <- rbind(data, c(data_2016$X02.05.2016))
data <- rbind(data, c(data_2016$X01.06.2016))
data <- rbind(data, c(data_2016$X01.07.2016))
data <- rbind(data, c(data_2016$X01.08.2016))
data <- rbind(data, c(data_2016$X01.09.2016))
data <- rbind(data, c(data_2016$X03.10.2016))
data <- rbind(data, c(data_2016$X01.11.2016))
data <- rbind(data, c(data_2016$X01.12.2016))
row.names(data) <- c('X2.01.2014', 'X3.02.2014', 'X3.03.2014', 'X1.04.2014',
'X2.05.2014', 'X2.06.2014','X1.07.2014', 'X1.08.2014',
'X1.09.2014','X1.10.2014','X3.11.2014','X1.12.2014',
'X02.01.2015','X02.02.2015','X02.03.2015','X01.04.2015',
'X04.05.2015','X01.06.2015','X01.07.2015','X03.08.2015',
'X01.09.2015','X01.10.2015','X02.11.2015','X01.12.2015',
'X04.01.2016','X01.02.2016','X01.03.2016','X01.04.2016',
'X02.05.2016','X01.06.2016','X01.07.2016','X01.08.2016',
'X01.09.2016','X03.10.2016','X01.11.2016','X01.12.2016')
casi
t_data_6 <- ts(data[,6], start = 2014, frequency=12)
t_data_12 <- ts(data[,8], start = 2014, frequency=12)
ts.plot(t_data_6, t_data_12, main='Euribor', ylab='%', col=c('red', 'black'))
legend('topright', c('6 mesečne ', '12 mesečne'), col=c('red',' black'), lwd=1)
#zanimivi datumi: maj 2014, december 2015, september 2016
maj_2014<- data[5,]
december_2015 <- data[24,]
september_2016 <- data[33,]
cas = c(1/4, 1/2, 1, 2, 3, 6, 9, 12)
plot(cas,maj_2014,, ylim=c(-1/2, 1.5), type="o", pch = 10, col='red')
lines(cas, december_2015,type="o",pch = 10, text(10.5,0.2,"1.12.2015"), col='green')
lines(cas, september_2016, type="o",pch = 10, text(10,-0.2,"1.09.2016"), col='blue')
t <- 6
u <- 12
terminske <- (((1 + u*data[,8]) /(1+t*data[,6]))-1)/(u-t)
data_terminske <- data[,c(6, 8)]
colnames(data_terminske) <- c('Euribor 6m', 'Euribor 12m')
data_terminske$'Terminske 6x12'<- terminske
data_napovedi <- data [,c(6, 8)]
colnames(data_napovedi) <- c('Euribor 6m', 'Euribor 12m')
velikost <-nrow(data)
napovedi <- c(1:36)
for ( i in 1:t){
napovedi[i] <- NA
}
for (i in (t+1):velikost){
napovedi[i] = (((1 + u*data[i - t,8]) /(1+t*data[i - t,6]))-1)/(u-t)
}
napovedi_2014 <- napovedi [(t+1): u]
napovedi_2015 <- napovedi [(u+1): (2*u)]
napovedi_2016 <- napovedi [(2*u+1): (3*u)]
data_napovedi$'Napovedi 6m' <-napovedi
fit <-lm(napovedi[(t+1):(3*u)]~data$`6m`[7:36])
plot(data$`6m`[7:36], napovedi[(t+1):(3*u)],pch = 10, cex = 1.0, col = "blue")
|
9e8d5a5574ec9ccf320b97188519a3b8748f823f | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/datamart/examples/SftpLocation-class.Rd.R | ed06461df7ec36e8b7ac4a9c2284aa04301731f1 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 160 | r | SftpLocation-class.Rd.R | library(datamart)
### Name: SftpLocation-class
### Title: SFTP location
### Aliases: SftpLocation-class sftpdir
### ** Examples
getSlots("SftpLocation")
|
cab78db050376046c85ce298538a6d5da350a9bc | c1a1727e41a1befdbb2b4597c0ffa0bdb019f70d | /tests/test-all.R | fbf38ab2545d4e5926057dd6e428998957c493f9 | [] | no_license | briatte/psData | a89b5e2580fd60bf9b3e911b36eab05cb4c97b3c | 87c0ce2cae86cefae9ee4b8b2e390c4761569b23 | refs/heads/master | 2020-12-03T05:27:38.113499 | 2014-03-05T07:19:02 | 2014-03-05T07:19:02 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 62 | r | test-all.R | library("testthat")
library("psData")
test_package("psData")
|
90848a970fa48e99665d4f63edc19727adeb563d | cf6d10fd5882383c92306ce90b4df96a06c290c3 | /cachematrix.R | 6b3abb647441211a2422304d0c525d563be0c1db | [] | no_license | sheldon-white/ProgrammingAssignment2 | 0b23a577d445b72238f84cad0deb966b8e11ada7 | c73bbda2e1d35c62e036fa023c6e2c9c38219ca1 | refs/heads/master | 2020-04-08T22:03:12.387632 | 2014-06-22T00:56:31 | 2014-06-22T00:56:31 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,552 | r | cachematrix.R | ## These functions implement the programming assignment:
## (https://class.coursera.org/rprog-004/human_grading/view/courses/972139/assessments/3/submissions)
## Please refer to that page for a full description of the assignment.
# Implement a simple cache. This is creating a closure, since the functions like set() and get()
# operate on the environment variables ('x' and 'm') defined inside makeCacheMatrix()
#
# Args:
# x: The matrix to be cached
#
# Returns:
# A list of functions that can be called by name. It's similar to a virtual-method table in C++.
makeCacheMatrix <- function(x = matrix()) {
# initalize cached value
m <- NULL
# setter function, which also clears the cached result (if any)
set <- function(y) {
x <<- y
m <<- NULL
}
# getter function
get <- function() {
x
}
# cache the calculated result
setInverse <- function(inverse) {
m <<- inverse
}
# retrieve the calculated result
getInverse <- function() {
m
}
# return a handle to the function table (essentially)
list(set = set,
get = get,
setInverse = setInverse,
getInverse = getInverse)
}
# Calculate the inverse (if any) of a supplied matrix. This is a barebones implementation that lacks
# several things:
# 1) It should check that a square matrix is being passed in.
# 2) It should have nicer error handling in the matrix is not invertable
# (see http://en.wikipedia.org/wiki/Invertible_matrix)
# This uses a matrix cache as a data container and to allow the return of a cached value when possible.
# In real life I might use the 'memoise' package
# (http://cran.r-project.org/web/packages/memoise/index.html)
#
# Args:
# x: a function list returned from a makeCacheMatrix() call.
# verbose: If TRUE, prints messages describing the internal activity. Default is FALSE
#
# Returns:
# The inverse matrix if it exists. If the matrix isn't invertable an error will be thrown.
cacheSolve <- function(x, verbose = FALSE) {
m <- x$getInverse()
if (!is.null(m)) {
if (verbose) {
message("returning cached data:")
print(m)
message()
}
return(m)
}
data <- x$get()
if (verbose) {
message("calculating inverse of:")
print(data)
message()
}
m <- solve(data)
if (verbose) {
message("caching calculated inverse:")
print(m)
message()
}
x$setInverse(m)
m
}
|
22d90ae81b722340e2c5e4d63f8e8bfa4ac0a4ba | 57854e2a3731cb1216b2df25a0804a91f68cacf3 | /R/members.R | c8ddc9b3536b564b74cc8f8e625b3d64557bf4fd | [] | no_license | persephonet/rcrunch | 9f826d6217de343ba47cdfcfecbd76ee4b1ad696 | 1de10f8161767da1cf510eb8c866c2006fe36339 | refs/heads/master | 2020-04-05T08:17:00.968846 | 2017-03-21T23:25:06 | 2017-03-21T23:25:06 | 50,125,918 | 1 | 0 | null | 2017-02-10T23:23:34 | 2016-01-21T17:56:57 | R | UTF-8 | R | false | false | 2,812 | r | members.R | #' @rdname catalog-extract
#' @export
setMethod("[[", c("MemberCatalog", "character"), function (x, i, ...) {
## TODO: eliminate duplication with PermissionCatalog
stopifnot(length(i) == 1L)
w <- whichNameOrURL(x, i, emails(x))
if (is.na(w)) {
halt("Subscript out of bounds: ", i)
}
tup <- index(x)[[w]]
return(ShojiTuple(index_url=self(x), entity_url=urls(x)[w],
body=tup)) ## TODO: MemberTuple
})
#' @rdname catalog-extract
#' @export
setMethod("[", c("MemberCatalog", "character"), function (x, i, ...) {
w <- whichNameOrURL(x, i, emails(x))
if (any(is.na(w))) {
halt("Undefined elements selected: ", serialPaste(i[is.na(w)]))
}
return(x[w])
})
#' @rdname catalog-extract
#' @export
setMethod("[[<-", c("MemberCatalog", "ANY", "missing", "ANY"), .backstopUpdate)
#' @rdname catalog-extract
#' @export
setMethod("[[<-", c("MemberCatalog", "character", "missing", "NULL"),
function (x, i, j, value) {
## Remove the specified user from the catalog
payload <- sapply(i, null, simplify=FALSE)
crPATCH(self(x), body=toJSON(payload))
return(refresh(x))
})
#' @rdname teams
#' @export
setMethod("members<-", c("CrunchTeam", "MemberCatalog"), function (x, value) {
## TODO: something
## For now, assume action already done in other methods, like NULL
## assignment above.
return(x)
})
#' @rdname teams
#' @export
setMethod("members<-", c("CrunchTeam", "character"), function (x, value) {
payload <- sapply(value, emptyObject, simplify=FALSE)
crPATCH(self(members(x)), body=toJSON(payload))
return(refresh(x))
})
#' Read and set edit privileges
#'
#' @param x PermissionCatalog or MemberCatalog
#' @param value For the setter, logical: should the indicated users be allowed
#' to edit the associated object?
#' @return \code{is.editor} returns a logical vector corresponding to whether
#' the users in the catalog can edit or not. \code{is.editor<-} returns the
#' catalog, modified.
#' @name is.editor
#' @aliases is.editor is.editor<-
NULL
#' @rdname is.editor
#' @export
setMethod("is.editor", "MemberCatalog", function (x) {
## N.B.: this is for projects; teams don't work this way.
vapply(index(x), function (a) {
isTRUE(a[["permissions"]][["edit"]])
}, logical(1), USE.NAMES=FALSE)
})
#' @rdname is.editor
#' @export
setMethod("is.editor<-", c("MemberCatalog", "logical"), function (x, value) {
stopifnot(length(x) == length(value))
changed <- is.editor(x) != value
if (any(changed)) {
payload <- structure(lapply(value[changed], {
function (v) list(permissions=list(edit=v))
}), .Names=urls(x)[changed])
crPATCH(self(x), body=toJSON(payload))
x <- refresh(x)
}
return(x)
})
|
f45a966b82cd2e2d57d84d3286b4d46f93da03e3 | f339641cefa9025ef94fe53c9d23856b4ac69933 | /man/geomAustralia.Rd | 9e527608dfabb28b162223755d9c7cf9e79319e6 | [
"MIT"
] | permissive | sjbeckett/localcovid19now | 8ba7f044d8e7458cb4c8c490c8389b596624c84f | af7979dd7e4b1f73751617bd2bae17bdbd285184 | refs/heads/main | 2023-04-17T21:06:30.905493 | 2023-01-23T18:31:06 | 2023-01-23T18:31:06 | 413,489,189 | 4 | 4 | MIT | 2022-12-23T18:10:25 | 2021-10-04T15:53:44 | R | UTF-8 | R | false | true | 339 | rd | geomAustralia.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{geomAustralia}
\alias{geomAustralia}
\title{geomAustralia}
\format{
An object of class \code{sf} (inherits from \code{data.frame}) with 8 rows and 9 columns.
}
\usage{
geomAustralia
}
\description{
geomAustralia
}
\keyword{datasets}
|
b91e3226be8b319238dca3cc550b08aeaa41d956 | 215f9bf96cd02f79201da8472a2ab9ce04b786a4 | /Donald Trump Analysis.R | cc450eebbab35d09e16378e30d8e3ae93406afc6 | [] | no_license | Anushi0701/Assignment-5 | ef84c09b77cf78d07ab8eea21d4b0b2bc86100bc | bc91aefbc3caf6512500b2a68860fc90bb8a404a | refs/heads/master | 2020-07-04T02:15:53.717775 | 2016-11-19T20:12:29 | 2016-11-19T20:12:29 | 74,217,916 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 8,825 | r | Donald Trump Analysis.R | # Assignment 5
# Part 2
# Donald Trump Speech Evaluations
library(quanteda)
library(stm)
library(tm)
library(NLP)
library(openNLP)
library(ggplot2)
library(ggdendro)
library(cluster)
library(fpc)
#load data from DonaldTrumpSpeech.csv
url<-"/Users/anushiarora/Desktop/Study Material/Semester 4/Business Analytics/Assignments/Assignment 5/Part 2/DonaldTrumpSpeech.csv"
precorpus<- read.csv(url,
header=TRUE, stringsAsFactors=FALSE)
dim(precorpus)
names(precorpus)
head(precorpus)
str(precorpus)
# Creating a corpus for speech
require(quanteda)
speechcorpus<- corpus(precorpus$Full.Text,
docnames=transcriptcorpus$Documents)
#explore the corpus of speech
names(speechcorpus)
summary(speechcorpus)
head(speechcorpus)
#Generate DFM
corpus<- toLower(speechcorpus, keepAcronyms = FALSE)
cleancorpus <- tokenize(corpus,
removeNumbers=TRUE,
removePunct = TRUE,
removeSeparators=TRUE,
removeTwitter=FALSE,
verbose=TRUE)
stop_words <- c("re", "net", "six", "room", "g", "gut", "oliv", "tripi","physic", "craft", "fair", "second",
"may", "touch", "don", "voucher", "draw", "aren", "oh", "hello", "lo", "gotten", "glass","whose",
"__they'v", "__so", "__it", "__for", "per", "novemb", "averag", "chao", "materi", "tool", "seven",
"vet", "howev", "without", "lot", "wit", "line", "nov", "didn", "set", "abl", "would'v", "__we",
"one", "year", "s", "t", "know", "also", "just", "like", "can", "need", "number", "say", "includ",
"new", "go","now", "look", "back", "take", "thing", "even", "ask", "seen", "said", "put", "day",
"anoth", "come", "use", "total", "happen", "place", "thank", "ve", "get", "much")
stop_words <- tolower(stop_words)
dfm<- dfm(cleancorpus, toLower = TRUE,
ignoredFeatures = c(stop_words, stopwords("english")),
verbose=TRUE,
stem=TRUE)
# Reviewing top features
topfeatures(dfm, 100)
#dfm with trigrams
cleancorpus1 <- tokenize(corpus,
removeNumbers=TRUE,
removePunct = TRUE,
removeSeparators=TRUE,
removeTwitter=FALSE,
ngrams=3, verbose=TRUE)
dfm.trigram<- dfm(cleancorpus1, toLower = TRUE,
ignoredFeatures = c(stop_words, stopwords("english")),
verbose=TRUE,
stem=FALSE)
topfeatures.trigram<-topfeatures(dfm.trigram, n=50)
topfeatures.trigram
# Wordcloud for Speech
library(wordcloud)
set.seed(142) #keeps cloud' shape fixed
dark2 <- brewer.pal(8, "Set1")
freq<-topfeatures(dfm, n=100)
wordcloud(names(freq),
freq, max.words=200,
scale=c(3, .1),
colors=brewer.pal(8, "Set1"))
#Sentiment Analysis
mydict <- dictionary(list(positive = c("win", "love", "respect", "prestige", "power", "protect", "struggle",
"enrich", "good", "survival", "morally", "movement", "strongly",
"heaven", "highly-successful", "bright", "hope", "fix", "happy",
"thrilled", "safety", "prosperity", "peace", "rise", "glorious", "great", "stable"),
negative = c("failed", "corrupt", "lie", "threat", "disastrous", "illegal", "dry",
"robbed", "raided", "locked", "crooked", " illusion", "rigged", "deformed",
"attack", "destroy", "destruction", "slander", "concerted", "vicious",
"exposing", "corruption", "misrepresented", "horrible", "ISIS", "deport",
"incompetent", "worse", "sickness", "immorality", "guilty", "warned", "debt",
"terrorist", "crime", "crushed", "poverty", "war", "conflict", "destroy", "defeat")))
dfm.sentiment <- dfm(speechcorpus, dictionary = mydict)
topfeatures(dfm.sentiment)
View(dfm.sentiment)
#running topics
temp<-textProcessor(documents=precorpus$Full.Text, metadata = precorpus)
names(temp) # produces: "documents", "vocab", "meta", "docs.removed"
meta<-temp$meta
vocab<-temp$vocab
docs<-temp$documents
out <- prepDocuments(docs, vocab, meta)
docs<-out$documents
vocab<-out$vocab
meta <-out$meta
prevfit <-stm(docs , vocab ,
K=3,
verbose=TRUE,
data=meta,
max.em.its=25)
topics <-labelTopics(prevfit , topics=c(1:3))
topics
plot.STM(prevfit, type="summary")
plot.STM(prevfit, type="perspectives", topics = c(1,3))
plot.STM(prevfit, type="perspectives", topics = c(1,2))
plot.STM(prevfit, type="perspectives", topics = c(2,3))
# to aid on assigment of labels & intepretation of topics
mod.out.corr <- topicCorr(prevfit) #Estimates a graph of topic correlations
plot.topicCorr(mod.out.corr)
### Advanced method for Topic Modeling
#######################################
library(dplyr)
require(magrittr)
library(tm)
library(ggplot2)
library(stringr)
library(NLP)
library(openNLP)
#load .csv file with news articles
url<-"/Users/anushiarora/Desktop/Study Material/Semester 4/Business Analytics/Assignments/Assignment 5/Part 2/DonaldTrumpSpeech.csv"
precorpus<- read.csv(url,
header=TRUE, stringsAsFactors=FALSE)
#passing Full Text to variable news_2015
speech<-precorpus$Full.Text
#Cleaning corpus
stop_words <- stopwords("SMART")
## additional junk words showing up in the data
stop_words <- c(stop_words, "said", "the", "also", "say", "just", "like","for",
"us", "re", "net", "six", "room", "g", "gut", "oliv",
"can", "may", "now", "year", "according", "mr")
stop_words <- tolower(stop_words)
speech <- gsub("'", "", speech) # remove apostrophes
speech <- gsub("[[:punct:]]", " ", speech) # replace punctuation with space
speech <- gsub("[[:cntrl:]]", " ", speech) # replace control characters with space
speech <- gsub("^[[:space:]]+", "", speech) # remove whitespace at beginning of documents
speech <- gsub("[[:space:]]+$", "", speech) # remove whitespace at end of documents
speech <- gsub("[^a-zA-Z -]", " ", speech) # allows only letters
speech <- tolower(speech) # force to lowercase
## get rid of blank docs
speech <- speech[speech != ""]
# tokenize on space and output as a list:
doc.list <- strsplit(speech, "[[:space:]]+")
# compute the table of terms:
term.table <- table(unlist(doc.list))
term.table <- sort(term.table, decreasing = TRUE)
# remove terms that are stop words or occur fewer than 5 times:
del <- names(term.table) %in% stop_words | term.table < 5
term.table <- term.table[!del]
term.table <- term.table[names(term.table) != ""]
vocab <- names(term.table)
# now put the documents into the format required by the lda package:
get.terms <- function(x) {
index <- match(x, vocab)
index <- index[!is.na(index)]
rbind(as.integer(index - 1), as.integer(rep(1, length(index))))
}
documents <- lapply(doc.list, get.terms)
#############
# Compute some statistics related to the data set:
D <- length(documents) # number of documents (1)
W <- length(vocab) # number of terms in the vocab (1741)
doc.length <- sapply(documents, function(x) sum(x[2, ])) # number of tokens per document [312, 288, 170, 436, 291, ...]
N <- sum(doc.length) # total number of tokens in the data (56196)
term.frequency <- as.integer(term.table)
# MCMC and model tuning parameters:
K <- 10
G <- 3000
alpha <- 0.02
eta <- 0.02
# Fit the model:
library(lda)
set.seed(357)
t1 <- Sys.time()
fit <- lda.collapsed.gibbs.sampler(documents = documents, K = K, vocab = vocab,
num.iterations = G, alpha = alpha,
eta = eta, initial = NULL, burnin = 0,
compute.log.likelihood = TRUE)
t2 <- Sys.time()
## display runtime
t2 - t1
theta <- t(apply(fit$document_sums + alpha, 2, function(x) x/sum(x)))
phi <- t(apply(t(fit$topics) + eta, 2, function(x) x/sum(x)))
news_for_LDA <- list(phi = phi,
theta = theta,
doc.length = doc.length,
vocab = vocab,
term.frequency = term.frequency)
library(LDAvis)
library(servr)
# create the JSON object to feed the visualization:
json <- createJSON(phi = news_for_LDA$phi,
theta = news_for_LDA$theta,
doc.length = news_for_LDA$doc.length,
vocab = news_for_LDA$vocab,
term.frequency = news_for_LDA$term.frequency)
serVis(json, out.dir = 'vis', open.browser = TRUE)
|
5cdb8b4d141e26a7329974f200fbff0f2d40bfb4 | 5fb4e774d645b917694cb169a5e3f29f71b74206 | /afile.R | f61579529d23f3589e86ea3aedcf2deaf6b8cc01 | [] | no_license | Sages11/sasap_test2 | 1d46b0b16b19f05689bc528a1b5fb651454c9c71 | a243c52e84c6296e583309093f6626d77d93c675 | refs/heads/master | 2021-08-22T10:19:35.916688 | 2017-11-30T00:41:52 | 2017-11-30T00:41:52 | 112,393,748 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 21 | r | afile.R | #Here is some stuff
|
68b9bc5f822a58bcf3df01870c74e198ee0a31d5 | 29585dff702209dd446c0ab52ceea046c58e384e | /crossReg/R/nonLinearC.R | 23f7864755200f7148bdf4673539b3d4c5109b17 | [] | no_license | ingted/R-Examples | 825440ce468ce608c4d73e2af4c0a0213b81c0fe | d0917dbaf698cb8bc0789db0c3ab07453016eab9 | refs/heads/master | 2020-04-14T12:29:22.336088 | 2016-07-21T14:01:14 | 2016-07-21T14:01:14 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 834 | r | nonLinearC.R | nonLinearC <-
function (Data,startingValue) {
catchError <- tryCatch (
nlsResults <- nls(y ~ A0 + B1*(x1-C)+B2*((x1-C)*x2)
,start=startingValue,data=Data),
error = function(e) e
)
# return -1 if nls() return error
if(inherits(catchError, "error")) return(-1)
coefficient <- summary(nlsResults)$coefficients # extract coefficients
C_Hat <- coefficient[4,1]
SE <- coefficient[4,2]
# Use the standard error (SE) from nonlinear regression
# to construct a confidence interval under the assumption that
# the sampling distribution of C_Hat is N(C,SE^2)
LowCI <- C_Hat-1.96*SE # lower bound of CI
UpperCI <- C_Hat+1.96*SE # upper bound of CI
# collect values into a list
results <- list(C_Hat = C_Hat, SE = SE, LowCI = LowCI, UpperCI =
UpperCI)
print(coefficient)
return(results) # return list
}
|
0a7cfb7abb2642740ee00fa9517fe995e41caff9 | 3b01f1418b7b8ec99fcaf457be2ee42fa4a6524e | /R/day-08.R | c2fc3f17e33cd362d769534f334aa44e786efd35 | [] | no_license | MycraySH/geog176A-daily-exercises | 0eb121dc7e14546ba7c116dd6e3ea1e73f8f1506 | 1d96082e8dc581e86c7a50e3310e274601a6f6fc | refs/heads/master | 2022-12-08T06:50:32.408281 | 2020-08-17T23:25:28 | 2020-08-17T23:25:28 | 286,769,874 | 0 | 0 | null | 2020-08-11T14:44:36 | 2020-08-11T14:44:35 | null | UTF-8 | R | false | false | 1,013 | r | day-08.R | #Name: Stone SHI
#Date: 08/16/2020
#To visualize the COVID-19 cases data
library(zoo)
library(ggthemes)
library(tidyverse)
covid = read_csv("data/covid.csv")
state.of.interest = "Virginia"
covid %>%
filter(state == state.of.interest) %>%
group_by(date) %>%
summarise(cases = sum(cases,na.rm = TRUE)) %>%
ungroup() %>%
mutate(newcases = cases - lag(cases),
roll7 = rollmean(newcases, 7, fill = NA, align="right")) %>%
ggplot(aes(x = as.Date(date))) +
geom_col(aes(y = newcases), col = NA, fill = "#AD1453") +
geom_line(aes(y = roll7), col = "blue", size = 1) +
theme_update()+
ggthemes::theme_gdocs() +
labs(title = paste("New Reported cases by day in", state.of.interest)) +
theme(plot.background = element_rect(fill = "white"),
panel.background = element_rect(fill = "white"),
plot.title = element_text(size = 13, face = 'bold')) +
theme(aspect.ratio = .5)->
newplot
ggsave(newplot, file = "img/newcasesstatelevel.png",
width = 8,
unit = "in")
|
ff6db8a992c0dabc3b720ca492fddddfc0f90915 | 9a872a9007c31dba091e79d69ce3c776d33e5028 | /Day_3.R | 81c3ef975e0129b3c1b24dc3ef0828e50bc7d058 | [] | no_license | KimScholtz/Basic_stats | cc0fa0b7b7909a6559030f119dd540fdc5864a25 | fa0dfb1ef0ed990f1db638412e336fd6ddde16a9 | refs/heads/master | 2020-03-10T11:29:32.601851 | 2018-04-26T14:29:02 | 2018-04-26T14:29:02 | 129,357,614 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 5,418 | r | Day_3.R |
# Day 3
# Kim Scholtz
# 17 April 2018
# Distributions
# Generate a Cullen and Frey graph
# Load libraries ----------------------------------------------------------
library(fitdistrplus)
library(logspline)
# Generate log-normal data
r_norm <- rnorm(n = 1000, mean = 13, sd = 1)
hist(r_norm)
descdist(r_norm)
# running a fit distribution
hist(r_norm)
descdist(r_norm, discrete = FALSE, boot = 100)
# uniform data
y <- runif(100)
par(mfrow = c(1, 1))
plot(x = c(1:100), y = y)
hist(y)
descdist(y, discrete = FALSE)
# Chapter 6
# t-test: if you compare 2 things
# ANOVA: more than 2 things
# Load libraries ----------------------------------------------------------------
library(tidyverse)
library(plotly)
# Random normal data
r_dat <- data.frame(dat = c(rnorm(n = 1000, mean = 10, sd = 3),
rnorm(n = 1000, mean = 8, sd = 2)),
sample = c(rep("A", 1000), rep("B", 1000)))
# Check assumptions -------------------------------------------------------
# Normality
# For this we may use the Shapiro-Wilk test
shapiro.test(r_dat$dat)
shapiro.test(r_dat$dat)[1]
shapiro.test(r_dat$dat)[2]
# But that is testing all of the data together
# We must be abit more clever about how we make this test
r_dat %>%
group_by(sample) %>%
summarise(r_norm_dist = as.numeric(shapiro.test(dat)[2]))
# Remember the data are normal when p>= 0.05
# The data are non-normal when p <= 0.05
# Check homoscedasticity --------------------------------------------------
# There are many ways to check for homoscedasticity
# Which is the similarity of variance between sample sets
# for now we will simply say that this assumption is met
# the variance of the samples are not more than 2-4 times greater
# then one another
# check everything at once
# WRONG
# Check variance for entire dataset
var(r_dat$dat)
# or do it the tidy
r_dat %>%
group_by(sample) %>%
summarise(r_norm_dist = as.numeric(shapiro.test(dat)[2]),
r_norm_var = var(dat))
# The observations in the groups being compared are independent of each other
# A one sample t-test -----------------------------------------------------
r_one <- data.frame(dat = rnorm(n = 20, mean = 20, sd = 5),
sample = "A")
# Visualisation showing the density plot
ggplot(data = r_one, aes(x = dat)) +
geom_density(aes(fill = sample)) +
labs(x = "Data", y = "Count")
# Run the test
t.test(r_one$dat, mu = 20)
# Run a test we know will produce a significant result --------------------
t.test(r_one$dat, mu = 30)
# Pick a side -------------------------------------------------------------
# Are these data SMALLER/LESS than the population mean
t.test(r_one$dat, mu = 20, alternative = "less")
# or Greater
t.test(r_one$dat, mu = 20, alternative = "greater")
# But what about for the larger population mean?
# Are the samples less than the population of 30?
t.test(r_one$dat, mu = 30, alternative = "less")
# What about greater than?
t.test(r_one$dat, mu = 30, alternative = "greater")
# Two sample t-tests ------------------------------------------------------
# Create a dataframe ------------------------------------------------------
r_two <- data.frame(dat = c(rnorm(n = 20, mean = 4, sd = 1),
rnorm(n = 20, mean = 5, sd = 1)),
sample = c(rep("A", 20), rep("B", 20)))
# Run a default/basic test ------------------------------------------------
t.test(dat ~ sample, data = r_two, var.equal = TRUE)
# Pick a side -------------------------------------------------------------
# Is A less than B?
t.test(dat ~ sample, data = r_two, var.equal = TRUE, alternative = "less")
# Is A greater than B?
t.test(dat ~ sample, data = r_two, var.equal = TRUE, alternative = "greater")
# Working on Ecklonia exercise
# Question
# H0:Epiphyte length at Batsata is not greater than at Boulders Beach.
# H1:Epiphyte length at Batsata is greater than at Boulders Beach.
ecklonia <- read_csv("ecklonia.csv") %>%
gather(key = "variable", value = "value", -species, -site, -ID)
ggplot(data = ecklonia, aes(x = variable, y = value, fill = site)) +
geom_boxplot() +
coord_flip()
# filter the data
ecklonia_sub <- ecklonia %>%
filter(variable == "epiphyte_length")
# then create a new figure
ggplot(data = ecklonia_sub, aes(x = variable, y = value, fill = site)) +
geom_boxplot() +
coord_flip() +
labs(y = "epiphyte_length (m)", x = "")
library(ggpubr)
t.test(value ~ site, data = ecklonia_sub, var.equal = TRUE, alternative = "greater")
compare_means(value ~ site, data = ecklonia_sub, method = "t.test", var.equal = TRUE, alternative = "greater")
# Exercise 1
pH_new <- read_csv("pH.new.csv")
graph1 <- ggplot(data = pH_new, aes(x = pH, fill = Site)) +
geom_histogram(position = "dodge", binwidth = 1, alpha = 0.8) +
geom_density(aes(y = 1*..count.., fill = Site), colour = NA, alpha = 0.4) +
labs(x = "value")
graph1
t.test(pH ~ Site, data = pH_new, var.equal = TRUE)
#Two Sample t-test
#data: pH by Site
#t = 0.74708, df = 18, p-value = 0.4647
#alternative hypothesis: true difference in means is not equal to 0
#95 percent confidence interval:
# -0.4479361 0.9422951
#sample estimates:
# mean in group Lower mean in group Middle
#6.317179 6.070000
# From the t-test we see that there is a significant difference
# RWS: The p-value is 0.4647... That is not a significant difference.
|
8f5ef0a87790f4ce96eb9447034113ad414b2c70 | 2b74f55865ec9430f1dd729552547c24c0f8a554 | /app/modules/map.R | 452d4197b6b9445b9741a925af302a7b52d3077f | [
"MIT"
] | permissive | STAT691P-Project-Seminar/SIR-Models | 6124fb7c0b762da9dfa1f58200466a32e392b72b | 7f7fdf391b1a031396ae9ea6298abd5c48e9c69a | refs/heads/master | 2023-03-13T05:35:19.600446 | 2020-05-01T05:07:36 | 2020-05-01T05:07:36 | 255,493,284 | 0 | 0 | MIT | 2020-04-21T19:52:29 | 2020-04-14T02:43:54 | R | UTF-8 | R | false | false | 1,313 | r | map.R | getStaticMap <- function(switchPop, thematic_date){
ma = getMapData()$ma
ma.county.data = getMapData()$county
ma.county.data <- ma.county.data[ma.county.data$Date2==thematic_date, ]
ma.county.data$Cases.Per.1000 <- round((ma.county.data$Cases/ma.county.data$Population * 1000), 0)
static <- ggplot(ma, aes(x=long, y=lat)) + geom_polygon(aes(group=group), colour='white') +
theme(
axis.text.x = element_text(colour = "white"),
axis.text.y = element_text(colour = "white"),
axis.title.y = element_text(colour = "white"),
axis.title.x = element_text(colour = "white"),
axis.title = element_text(colour = "white"),
plot.background = element_rect(fill = "#282b29"),
panel.background = element_rect(fill = "#282b29"),
legend.position="none",
panel.grid.major.x = element_blank(),
panel.grid.minor.x = element_blank()
) + labs(x = 'Longitude', y = 'Latitude')
if(switchPop){
static <- static + geom_point(aes(x=long, y=lat, colour = County, size=Cases.Per.1000), data=ma.county.data, alpha=.5)
}else{
static <- static + geom_point(aes(x=long, y=lat, colour = County, size=Cases), data=ma.county.data, alpha=.5)
}
static <- ggplotly(static, tooltip = c("County", "Cases", "Cases.Per.1000"))
return(static)
}
|
7e7e7f28ad490155911927eb0b73981f4a3f16cd | 80d12950078df31cc0dcf8c745b490194cc600b8 | /code.r | adcd03c2eb32503c32308b4c074d193824e81a58 | [] | no_license | all4buckeyes/Homework-0 | c0750760dd3f97bdf6b25bc0d8c2a526cad1c506 | 3c09c2ce5438ed9c00f016566ecac8855c66e8b5 | refs/heads/master | 2022-09-17T20:33:43.985045 | 2020-06-03T18:57:13 | 2020-06-03T18:57:13 | 269,079,782 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 12 | r | code.r | r <- "Test"
|
b782fbf22d890934174b3117daeab60da726f4ea | 47a8dff9177da5f79cc602c6d7842c0ec0854484 | /man/SingleRasterMap.Rd | ded269968892edb67ed5bdeeaeee42413aa9c5ba | [
"MIT"
] | permissive | satijalab/seurat | 8949973cc7026d3115ebece016fca16b4f67b06c | 763259d05991d40721dee99c9919ec6d4491d15e | refs/heads/master | 2023-09-01T07:58:33.052836 | 2022-12-05T22:49:37 | 2022-12-05T22:49:37 | 35,927,665 | 2,057 | 1,049 | NOASSERTION | 2023-09-01T19:26:02 | 2015-05-20T05:23:02 | R | UTF-8 | R | false | true | 1,038 | rd | SingleRasterMap.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/visualization.R
\name{SingleRasterMap}
\alias{SingleRasterMap}
\title{A single heatmap from ggplot2 using geom_raster}
\usage{
SingleRasterMap(
data,
raster = TRUE,
cell.order = NULL,
feature.order = NULL,
colors = PurpleAndYellow(),
disp.min = -2.5,
disp.max = 2.5,
limits = NULL,
group.by = NULL
)
}
\arguments{
\item{data}{A matrix or data frame with data to plot}
\item{raster}{switch between geom_raster and geom_tile}
\item{cell.order}{...}
\item{feature.order}{...}
\item{colors}{A vector of colors to use}
\item{disp.min}{Minimum display value (all values below are clipped)}
\item{disp.max}{Maximum display value (all values above are clipped)}
\item{limits}{A two-length numeric vector with the limits for colors on the plot}
\item{group.by}{A vector to group cells by, should be one grouping identity per cell}
}
\value{
A ggplot2 object
}
\description{
A single heatmap from ggplot2 using geom_raster
}
\keyword{internal}
|
4a8e9f6ea355981e88f1d77d8a4b46087c153bb4 | 415071fd271606d46fea7d2482c9f57e5cfeb26a | /man/dist_lnorm1.Rd | 93fb74de8137aa9c0bf9bb0fae15449383ea42bb | [
"MIT"
] | permissive | mrajeev08/treerabid | 9e97d81acb665cf2c222c227db62edb33e1c12f3 | 864f671cc94c28f272d5e828390d5b74204798e8 | refs/heads/master | 2023-07-02T06:00:23.610108 | 2023-06-07T20:21:03 | 2023-06-07T20:21:03 | 361,056,754 | 1 | 0 | NOASSERTION | 2023-06-07T20:21:04 | 2021-04-24T02:56:48 | R | UTF-8 | R | false | true | 268 | rd | dist_lnorm1.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/parameterize.R
\name{dist_lnorm1}
\alias{dist_lnorm1}
\title{Lognormal dispersal kernel}
\usage{
dist_lnorm1(ttree, params, cutoff = NULL)
}
\description{
See ?si_gamma1 for more details.
}
|
3b260a4688b17470579c8a35e6ce301f185bbf2e | 0f97039d2b7be6f7c7c6f2b94914e72401ef12fc | /R/friedman.plot.R | e5d619aa4757075113d667fcc481ecce35696965 | [
"MIT"
] | permissive | jhk0530/Rstat | aaa6692c2f4c8a768707e1e51992f5f2dbd817ad | 6b79d8047b6bdbb56e8ac7a6c6eed6c31694b079 | refs/heads/master | 2023-07-31T22:28:09.874352 | 2021-09-12T06:07:15 | 2021-09-12T06:07:15 | 263,038,018 | 4 | 3 | null | null | null | null | UTF-8 | R | false | false | 1,470 | r | friedman.plot.R | #' @title Friedman Test
#' @description Friedman Test with a Plot
#' @usage friedman.plot(x, a, b, dig = 4, plot = FALSE)
#' @param x Data vector
#' @param a Vector of factor levels
#' @param b Vector of block levels
#' @param dig Number of digits below the decimal point, Default: 4
#' @param plot Plot test results? Default: FALSE
#'
#' @return None.
#' @examples
#' x <- c(71, 77, 75, 79, 88, 70, 94, 74, 74, 83, 72, 95, 77, 80, 90, 94, 64, 76, 76, 89)
#' a <- rep(1:4, each = 5)
#' b <- rep(1:5, 4)
#' friedman.plot(x, a, b, plot = TRUE)
#' @export
friedman.plot <- function(x, a, b, dig = 4, plot = FALSE) {
nn <- length(x)
kk <- length(unique(a))
rr <- length(unique(b))
af <- as.factor(a)
bf <- as.factor(b)
rx <- tapply(x, b, rank)
urx <- unlist(rx)
rxm <- matrix(urx, kk, rr)
a2 <- rep(1:kk, rr)
ra <- tapply(urx, a2, sum)
rtab <- cbind(rxm, ra)
rownames(rtab) <- paste0("Group", 1:kk)
colnames(rtab) <- c(1:rr, "Sum")
cat("Rank Sum within each Group -----------\n")
print(rtab)
F <- 12 / kk / (kk + 1) / rr * sum(ra^2) - 3 * rr * (kk + 1)
pv <- pchisq(F, kk - 1, lower.tail = FALSE)
cat("Friedman Test ----------\n")
cat(paste0(
"F = (12 / ", kk, " / ", kk + 1, " / ",
rr, ") × ", sum(ra^2), " - 3 × ", rr, " × ",
kk + 1, " = ", round(F, dig)
), "\n")
cat(paste0("df=", kk - 1, "\t p-value=", round(
pv,
dig
)), "\n")
if (plot) {
chitest.plot2(stat = F, df = kk - 1, side = "up")
}
}
|
3a12d35394a85b08be514124bf7d6685e7ddb74f | e023591ca5c7d699bc728520bbe2602e95239360 | /R/3_kinetics.R | bd402db49588c83793fd2f6f74686f805e80d192 | [] | no_license | atuldeshpande/dyngen | e1e1dbe2f9b2812dc361b90028cd245129991180 | 7cf55816a3344b9d0dcbb7911113cbb1e0aab482 | refs/heads/master | 2020-05-29T10:35:42.957178 | 2018-10-16T10:04:15 | 2018-10-16T10:04:15 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,798 | r | 3_kinetics.R | # helper functions to get configuration ids (of TF binding)
# get the configuration of a binding based on the configuration_id
get_binding_configuration <- function(configuration_id, n_regulators) {
number2binary <- function(number, noBits) {
binary_vector = rev(as.numeric(intToBits(number)))
if(missing(noBits)) {
return(binary_vector)
} else {
binary_vector[-(1:(length(binary_vector) - noBits))]
}
}
as.logical(rev(number2binary(configuration_id, n_regulators)))
}
# get all configuration ids, eg. with 2 regulators -> 1,2,3, with one regulator -> 1, with no regulators -> numeric()
get_configuration_ids <- function(n) {
if(n == 0) {
numeric()
} else {
seq(1, 2**(n)-1)
}
}
get_default_kinetics_samplers <- function() {
list(
sample_r = function(n) runif(n, 10, 200),
sample_d = function(n) runif(n, 2, 8),
sample_p = function(n) runif(n, 2, 8),
sample_q = function(n) runif(n, 1, 5),
calculate_a0 = function(effects) {
if(length(effects) == 0) {
1
} else if(sum(effects == -1) == 0) {
0.0001
} else if(sum(effects == 1) == 0) {
1
} else {
0.5
}
},
calculate_a = function(configuration_id, effects) {
bound <- get_binding_configuration(configuration_id, length(effects))
if(any(effects[bound] == -1)) {
0
} else {
1
}
},
sample_strength = function(n) runif(n, 1, 20),
calculate_k = function(max_protein, strength) {
max_protein/2/strength
},
sample_cooperativity = function(n) runif(n, 1, 4)
)
}
#' Randomize kinetics of genes
#' @rdname generate_system
#' @export
randomize_gene_kinetics <- function(
feature_info,
net,
samplers = get_default_kinetics_samplers()) {
# sample r, d, p, q and k ----------------------
feature_info <- feature_info %>% mutate(
r = samplers$sample_r(n()),
d = samplers$sample_d(n()),
p = samplers$sample_p(n()),
q = samplers$sample_q(n())
)
# sample a0 and a ---------------------------------------
# include a list of effects in the feature_info
feature_info <- feature_info %>%
select(-matches("effects"), -matches("regulator_ids")) %>% # remove previously added effects
left_join(
net %>% group_by(to) %>%
summarise(effects = list(effect), regulator_ids = list(from))
, by = c("gene_id" = "to")
)
# for a0 and a
# calculate all a based on effects
calculate_all_a <- function(effects) {
# for every binding configuration
configuration_ids <- get_configuration_ids(length(effects))
map_dbl(configuration_ids, samplers$calculate_a, effects = effects) %>% set_names(configuration_ids)
}
feature_info <- feature_info %>%
mutate(
a0 = ifelse(!is.na(a0), a0, map_dbl(effects, samplers$calculate_a0)),
configuration_ids = map(effects, ~get_configuration_ids(length(.))),
as = map(effects, calculate_all_a)
)
# calculate interaction cooperativity and binding strength
# calculate k
feature_info <- feature_info %>% mutate(max_protein = r/d * p/q) # calculate maximal protein
net <- net %>%
left_join(feature_info %>% select(max_protein, gene_id), by = c("from" = "gene_id")) %>% # add maximal protein of regulator
mutate(
strength = ifelse(is.na(strength), samplers$sample_strength(n()), strength),
k = samplers$calculate_k(max_protein, strength)
)
# calculate c
net <- net %>% mutate(
c = ifelse(is.na(cooperativity), samplers$sample_cooperativity(n()), cooperativity)
)
lst(feature_info, net)
}
#' Randomize kinetics of cells
#' @rdname generate_system
#' @export
randomize_cell_kinetics <- function(cells) {
sample_kg <- function(n) 1
sample_rg <- function(n) 1
sample_pg <- function(n) 1
sample_qg <- function(n) 1
sample_dg <- function(n) 1
cells %>%
mutate(
kg = sample_kg(n()),
rg = sample_kg(n()),
pg = sample_kg(n()),
qg = sample_kg(n()),
dg = sample_kg(n())
)
}
extract_params <- function(feature_info, net, cells) {
params <- c()
# extract r, d, p, q, a0
params <- feature_info %>% select(r, d, p, q, a0, gene_id) %>%
gather("param_type", "param_value", -gene_id) %>%
mutate(param_id = glue::glue("{param_type}_{gene_id}")) %>%
{set_names(.$param_value, .$param_id)} %>%
c(params)
# extract a
params <- feature_info %>%
unnest(as, configuration_ids) %>%
mutate(param_id = glue::glue("a_{.$gene_id}_{.$configuration_ids}"), param_value = as) %>%
{set_names(.$param_value, .$param_id)} %>%
c(params)
# extract k and c
params <- net %>% select(c, k, from, to) %>%
gather("param_type", "param_value", -from, -to) %>%
mutate(param_id = glue::glue("{param_type}_{to}_{from}")) %>%
{set_names(.$param_value, .$param_id)} %>%
c(params)
# extract cell parameters
params <- cells %>% select(kg, rg, pg, qg, dg, cell_id) %>%
gather("param_type", "param_value", -cell_id) %>%
mutate(param_id = glue::glue("{param_type}_{cell_id}")) %>%
{set_names(.$param_value, .$param_id)} %>%
c(params)
params
}
#' Generate a system from a network using
#'
#' @param net Regulatory network dataframe
#' @param feature_info Geneinfo dataframe
#' @param cells Cells dataframe
#' @param samplers The samplers for the kinetics parameters
#' @export
generate_system <- function(net, feature_info, cells, samplers) {
# Randomize
randomized_gene_kinetics <- randomize_gene_kinetics(feature_info, net, samplers)
feature_info <- randomized_gene_kinetics$feature_info
net <- randomized_gene_kinetics$net
cells <- randomize_cell_kinetics(cells)
# Create variables
feature_info$x <- glue("x_{feature_info$gene_id}")
feature_info$y <- glue("y_{feature_info$gene_id}")
molecule_ids <- c(feature_info$x, feature_info$y)
# Extract formulae & kinetics
formulae_changes <- generate_formulae(net, feature_info, cells)
initial_state <- rep(0, length(molecule_ids)) %>% setNames(molecule_ids)
params <- extract_params(feature_info, net, cells)
# Extract nus
formulae <- formulae_changes$formula %>% set_names(formulae_changes$formula_id)
formulae_changes$molecule <- factor(formulae_changes$molecule, levels = molecule_ids) # fix order
formulae_changes$formula_id <- factor(formulae_changes$formula_id, levels = names(formulae)) # fix order
nus <- reshape2::acast(formulae_changes, molecule~formula_id, value.var = "effect", fun.aggregate = first, fill = 0, drop = F)
# Burn in
burn_variables <- feature_info %>% filter(as.logical(burn)) %>% select(x, y) %>% unlist() %>% unname()
lst(formulae, initial_state, params, nus, burn_variables, molecule_ids, feature_info, net, cells)
} |
c8c422970d43c993a008aa870116f2e15289b365 | 9ad4b4acb8bd2b54fd7b82526df75c595bc614f7 | /MainIntegrateAll_RunPipline.R | 934b93115c8f251617799d27e5f17844696a092e | [] | no_license | sylvia-science/Ghobrial_EloRD | f27d2ff20bb5bbb90aa6c3a1d789c625540fbc42 | 041da78479433ab73335b09ed69bfdf6982e7acc | refs/heads/master | 2023-03-31T14:46:27.999296 | 2021-04-02T15:09:49 | 2021-04-02T15:09:49 | 301,811,184 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 69,956 | r | MainIntegrateAll_RunPipline.R | # source('C:/Users/Sylvia/Dropbox (Partners HealthCare)/Sylvia_Romanos/scRNASeq/Code/Integrate All/MainIntegrateAll_RunPipline.R')
rm(list = ls())
gc()
source('/home/sujwary/Desktop/scRNA/Code/Functions.R')
source('/home/sujwary/Desktop/scRNA/Code/Integration/FunctionsIntegrate.R')
source('/home/sujwary/Desktop/scRNA/Code/Integration/PlotAll.R')
source('/home/sujwary/Desktop/scRNA/Code/Plot_func.R')
source('/home/sujwary/Desktop/scRNA/Code/Integrate All/PipelineIntegrateAll.R')
source('/home/sujwary/Desktop/scRNA/Code/Integrate All/PlotFunctionIntegrateAll.R')
source('/home/sujwary/Desktop/scRNA/Code/Integrate All/IntegrateAll_ClusterUmap.R')
library(dplyr)
library(Seurat)
library(h5)
library(readxl)
library(ggplot2)
library(ggrepel)
library(stringr)
library(data.table)
require(gridExtra)
require(data.table)
integrate_merge = 'Integrate'
ConvertCategorical = 'ConvCatF'
# Cell type if subsetting
celltype = 'Mono'
#celltype = 'T Cell'
#celltype = 'NK'
celltype = ''
# Variables to regress
regress_var = sort(c("Patient","dexa","kit","Response"))
regress_var = sort(c('dexa','kit',"nCount_RNA", "percent.mt"))
regress_var = sort(c('kit','SampleType'))
#regress_var = sort(c("PatientLast","dexa",'kit'))
#regress_var = sort(c("dexa","kit"))
regress_var = sort(c(""))
regress_var = sort(c("kit"))
#regress_var = sort(c("dexa","kit",'Patient'))
#regress_var = sort(c(""))
#regress_var = c('Patient')
#regress_var = ''
str = paste0('Reg',paste(regress_var, collapse = '_'))
# Set to Clean if cell types have been removed
clean = '/3TimePoints_'
clean = '/'
saveClean = FALSE
# Set to rpca is rpca was used to integrate
rpca = '_rpca'
rpca = ''
if (integrate_merge == 'Merge'){
rpca = ''
}
print(paste0('integrate_merge:', integrate_merge))
if (integrate_merge == 'Integrate' || integrate_merge == 'Merge'){
filename_sampleParam <- paste0('/home/sujwary/Desktop/scRNA/Data/sample_','Combine','_parameters.xlsx')
filename_metaData <- paste0('/home/sujwary/Desktop/scRNA/Data/EloRD Meta','','.xlsx')
sampleParam <- read_excel(filename_sampleParam)
metaData = read_excel(filename_metaData)
}else{
filename_sampleParam <- '/home/sujwary/Desktop/scRNA/Data/Data/sample_parameters.xlsx'
sampleParam <- read_excel(filename_sampleParam)
}
print(filename_sampleParam)
sample_type = 'NBMHCL_MT15'
sample_type = 'Tcell_MergeInt_Sub'
sample_type = 'PrePostEOTNBM'
sample_type = 'PrePostEOTNBM_MT15'
if (celltype == ''){
sample_name = paste0(integrate_merge,'_',sample_type,'_',str)
}else{
sample_name = paste0(integrate_merge,'_',sample_type,'_',str,'_',celltype)
}
folder_base = '/home/sujwary/Desktop/scRNA/'
folder_base_output = paste0('/home/sujwary/Desktop/scRNA/Output/',
integrate_merge ,' All/',sample_type,'',clean,'',str,'/')
PCA_dim = sampleParam$PCA_dim[sampleParam['Sample'] == sample_name]
resolution_val = sampleParam$resolution_val[sampleParam['Sample'] == sample_name]
cluster_IDs = sampleParam$Cluster_IDs[sampleParam['Sample'] == sample_name]
cell_features = getCellMarkers(folder_base)
print(PCA_dim)
############################################
RunPipeline = T
if (RunPipeline){
print('Run')
#celltype = 'T Cell'
if (celltype != ''){
folder_base_output = paste0(folder_base_output, 'SubsetIntegrate/',celltype,'/')
path = paste0(folder_base_output, 'data_',integrate_merge,'_',sample_type,'.Robj')
regress_var = c('')
}else{
path = paste0(folder_base,'Output/',integrate_merge ,' All/',sample_type,'/data_',integrate_merge,'_',sample_type,'.Robj')
}
str_data_input = '_features2000'
#path = paste0('/home/sujwary/Desktop/scRNA/Output/',integrate_merge ,' All/',sample_type
# ,'/data',str_data_input,'.Robj')
data_integrate = loadRData(path)
#browser()
#folder_base_output = paste0(folder_base_output,str,'/')
#folder_base_output = paste0(folder_base_output, ConvertCategorical,'/')
pathName = paste0(folder_base_output,'PCA')
dir.create( pathName, recursive = TRUE)
pathName = paste0(folder_base_output,'Cluster')
dir.create( pathName, recursive = TRUE)
data_integrate$Patient = data_integrate$'Patient Number'
data_integrate$kit = data_integrate$'10X kit'
#browser()
##################################
#regress_var = sort(c(""))
cell_features = getCellMarkers(folder_base)
data_run = PipelineIntegrateAll(data_integrate,
sample_name,
folder_base_output,
sampleParam,
integrate_merge,regress_var =regress_var,
markersTF = FALSE,
cell_features,
ConvertCategorical = F)
browser()
# Renaming variables to be simpler
data_run@meta.data$Treatment = gsub("baseline", "Baseline", data_run@meta.data$Treatment)
data_run$split_var = data_run$Treatment
path = paste0(folder_base_output,
'/data_run_',integrate_merge,rpca,'_PCAdim',PCA_dim,'_',sample_type,
'.Robj')
save(data_run,file= path)
browser()
groupBy_list = c('dexa','sample_name','Treatment','kit','Patient')
#groupBy_list = c('dexa','sample_name', 'Response','Treatment','kit','Patient','Cell_Label','SampleType')
plotAll(data_run, folder = folder_base_output,
sample_name,sampleParam,
cell_features = cell_features,
label_TF = F,integrate_TF = F, DE_perm_TF = F,
clusterTF = F, markersTF =T, keepOldLabels = F, groupBy = groupBy_list,
PCA_dim = NA,resolution_val = NA)
filepath_cluster = paste0( folder_base_output, 'Cluster/', 'PCA',PCA_dim,'/res',resolution_val,'/' )
cell_features = getCellMarkers(folder_base)
PlotKnownMarkers(data_run, folder = paste0(filepath_cluster,'Cell Type/HeatMap/'), cell_features = cell_features,
plotType ='HeatMap' , str = '')
PlotKnownMarkers(data_run, folder = paste0(filepath_cluster,'Cell Type/Violin/'), cell_features = cell_features,
plotType ='Violin' , str = '')
PlotKnownMarkers(data_run, folder = paste0(filepath_cluster,'Cell Type/FeaturePlotFix/'), cell_features = cell_features,
plotType ='FeaturePlotFix' , str = '')
#browser()
###################################
}else{
# Load data and plot
if (celltype != ''){
folder_base_output = paste0(folder_base_output, 'SubsetIntegrate/',celltype,'/')
path = paste0(folder_base_output,
'/data_run_',integrate_merge,rpca,'_PCAdim',PCA_dim,'_',sample_type,
'.Robj')
}else{
path = paste0(folder_base_output,
'/data_run_',integrate_merge,rpca,'_PCAdim',PCA_dim,'_',sample_type,
'.Robj')
}
#PCA_dim = 10
#resolution_val =
filepath_cluster = paste0( folder_base_output, 'Cluster/', 'PCA',PCA_dim,'/res',resolution_val,'/' )
data_run = loadRData(path) # The main data we're working with
data_run = FindClusters(data_run, resolution = resolution_val)
#data_run@meta.data$split_var = paste(data_run@meta.data$orig.ident,data_run@meta.data$dexa)
#data_run@meta.data$split_var = gsub("data_post Yes", "Post D", data_run@meta.data$split_var)
#data_run@meta.data$split_var = gsub("data_pre Yes", "Pre D", data_run@meta.data$split_var)
#data_run@meta.data$split_var = gsub("data_post No", "Post ND", data_run@meta.data$split_var)
#data_run@meta.data$split_var = gsub("data_pre No", "Pre ND", data_run@meta.data$split_var)
#data_run@meta.data$split_var = gsub("data_NBM NBM", "NBM", data_run@meta.data$split_var)
# the split_var metadata will contain relevent info for our project
data_run@meta.data$split_var = data_run@meta.data$orig.ident
# Renaming variables to be simpler
data_run@meta.data$split_var = gsub("data_baseline", "Baseline", data_run@meta.data$split_var)
data_run@meta.data$split_var = gsub("data_EOT", "EOT", data_run@meta.data$split_var)
data_run@meta.data$split_var = gsub("data_C9D1", "C9D1", data_run@meta.data$split_var)
data_run@meta.data$split_var = gsub("data_NBM", "NBM", data_run@meta.data$split_var)
data_run@meta.data$Response = gsub("Minimal Response then Progression", "MRP", data_run@meta.data$Response)
data_run@meta.data$Response = gsub("Minimal Response", "MR", data_run@meta.data$Response)
data_run@meta.data$Response[is.na(data_run@meta.data$Response)] = 'NBM'
data_run@meta.data$kit[(data_run@meta.data$Response) == 'NA'] = 'Microwell-seq'
data_run$Best_Overall_Response = ''
data_run$Current_Status = ''
data_run$Response = ''
for (sample in unique(data_run$sample_name)){
data_run$Response[data_run$sample_name == sample] = metaData[metaData$Sample == sample,]$General_Response
data_run$Best_Overall_Response[data_run$sample_name == sample] = metaData[metaData$Sample == sample,]$Best_Overall_Response
data_run$Current_Status[data_run$sample_name == sample] = metaData[metaData$Sample == sample,]$Current_Status
}
sample_list = unique(data_run$sample_name)
SampleNum = data.frame(matrix(ncol = 2, nrow = length(sample_list)))
colnames(SampleNum) = c('sample','Cells')
for (i in 1:length(sample_list)){
sample = sample_list[i]
print (sample)
data_subset = data_run[,data_run$sample_name == sample]
SampleNum$sample[i] = sample
SampleNum$Cells[i] = ncol(data_subset)
}
#SampleNum = SampleNum[order(SampleNum$Cells),]
SampleNum = SampleNum[order(SampleNum$Cells),]
write.csv(SampleNum, file=paste0(filepath_cluster,'/Stats/','SampleNum.csv'), row.names = F)
umap = data_run@reductions[["umap"]]@cell.embeddings
write.csv(umap, file=paste0(filepath_cluster,'Umap.csv'))
cell_list = c('0', '2', '3', '4', '5', '6', '7', '9', '10', '11', '13', '22', '28', '37', '38')
data_run_tcell = data_run[,Idents(data_run) %in% cell_list]
data_run_NK = data_run[,Idents(data_run) %in% c('8','17')]
cell_list = c('12', '14', '15', '16', '23', '25', '39', '41')
data_run_mono = data_run[,Idents(data_run) %in% cell_list]
data_matrix = data_run@assays[["RNA"]]@counts
#data_matrix = data_matrix[rownames(data_matrix) %in% data_run@assays[["integrated"]]@var.features,]
#data_matrix = NormalizeData(data_matrix, normalization.method = "LogNormalize", scale.factor = 10000)
write.table(data_matrix, file='/home/sujwary/Desktop/scRNA/Data/NMF/PrePostEOTNBM_MT15_All_AllFeatures.tsv', quote=FALSE, sep='\t')
data_matrix = data_run_tcell@assays[["RNA"]]@counts
data_matrix = data_matrix[rownames(data_matrix) %in% data_run@assays[["integrated"]]@var.features,]
data_matrix = NormalizeData(data_matrix, normalization.method = "LogNormalize", scale.factor = 10000)
write.table(data_matrix, file='/home/sujwary/Desktop/scRNA/Data/NMF/PrePostEOTNBM_MT15_TCell.tsv', quote=FALSE, sep='\t')
data_matrix = data_run_NK@assays[["RNA"]]@counts
data_matrix = data_matrix[rownames(data_matrix) %in% data_run@assays[["integrated"]]@var.features,]
data_matrix = NormalizeData(data_matrix, normalization.method = "LogNormalize", scale.factor = 10000)
write.table(data_matrix, file='/home/sujwary/Desktop/scRNA/Data/NMF/PrePostEOTNBM_MT15_NK.tsv', quote=FALSE, sep='\t')
data_matrix = data_run_mono@assays[["RNA"]]@counts
data_matrix = data_matrix[rownames(data_matrix) %in% data_run@assays[["integrated"]]@var.features,]
data_matrix = NormalizeData(data_matrix, normalization.method = "LogNormalize", scale.factor = 10000)
write.table(data_matrix, file='/home/sujwary/Desktop/scRNA/Data/NMF/PrePostEOTNBM_MT15_Mono.tsv', quote=FALSE, sep='\t')
# Label data
data_run_label = label_cells(data_run,cluster_IDs)
cell_list = c('NK','mNK','Tgd','CD8+ T Cell','Treg','TEM','TEMRA','TCM','TSCM','U1', 'U2','U3')
data_run_label_clean = data_run_label[,Idents(data_run_label) %in% cell_list]
Idents(data_run_label_clean) = factor(Idents(data_run_label_clean) , levels = cell_list)
cell_list = c('NK','CD8+ T Cell','T Cell')
data_run_label_clean = data_run_label[,Idents(data_run_label) %in% cell_list]
Idents(data_run_label_clean) = factor(Idents(data_run_label_clean) , levels = cell_list)
idents_unique = sort(unique(Idents(data_run)))
for (i in idents_unique){
data_subset = subset(data_run, idents = i)
print(i)
print( summary(data.frame(data_subset$Cell_Label)))
}
data_run_label_orig = data_run_label[,data_run_label$kit != 'Microwell-seq']
for (i in (unique(Idents(data_run_label)))){
print('')
data_subset = subset(data_run_label, idents = i)
print(i)
data_summary = summary(data.frame(data_subset$Cell_Label))
print(data_summary )
data_subset_orig = data_subset[,data_subset$kit != 'Microwell-seq']
sample_names = unique(data_subset_orig$sample_name)
print('Number of cells')
print(length(data_subset_orig$sample_name))
print('Percentage')
print(ncol(data_subset_orig)/ncol(data_run_label_orig)*100)
#print('SM')
#sum(str_count(sample_names, "GL"))
#print('NBM')
#sum(str_count(sample_names, "NBM"))
print('Num Samples')
print(length(sample_names))
}
browser()
# Begin plotting data
cell_features = getCellMarkers(folder_base)
groupBy_list = c('dexa','sample_name', 'Response','split_var','kit','Patient')
cell_list = c("CD8+ T Cell","mNK","NK","TCM","TEM","TEMRA","Tgd","Treg","TSCM" )
data_run_label_clean = data_run_label[,Idents(data_run_label) %in% cell_list]
plotAll(data_run, folder = folder_base_output,
sample_name,sampleParam,
cell_features = cell_features,
label_TF = F,integrate_TF = F, DE_perm_TF = F,
clusterTF =F, markersTF = T, keepOldLabels = F, groupBy = groupBy_list,
PCA_dim = NA,resolution_val = NA)
plotAll(data_run, folder = folder_base_output,
sample_name = sample_name,sampleParam = sampleParam,
cell_features = cell_features,
label_TF = T,integrate_TF = F, DE_perm_TF = F,
clusterTF = F, markersTF = T, keepOldLabels = F, groupBy = groupBy_list)
plotAll(data_run_label_clean, folder = folder_base_output,
sample_name = sample_name,sampleParam = sampleParam,
cell_features = cell_features,
label_TF = F,integrate_TF = F, DE_perm_TF = F,
clusterTF = F, markersTF = T, keepOldLabels = T, groupBy = groupBy_list, str = '_Clean')
print('Done Plotting')
browser()
############################
celltype = data.frame(matrix(ncol = 4, 0))
colnames(stats_summary_line) = c("Sample",'Cluster','Num','Percent')
sample_list = unique(data_run_label$sample_name)
celltype_iterate = as.character(unique(Idents(data_run_label)))
patient_list = unique(data_run_label$Patient)
patient = patient_list[1]
celltype = celltype_iterate[1]
stats_summary_line = data.frame(matrix(ncol = 4, 0))
colnames(stats_summary_line) = c("Sample",'Cluster','Num','Percent')
for (sample in sample_list){
data_run_input = data_run_label
data_run_input = data_run_input[,data_run_input$sample_name == sample]
cluster_num = clusterStats(data_run_input)
cluster_num$Sample = sample
stats_summary_line = rbind(stats_summary_line,cluster_num)
}
stats_summary_line =merge(stats_summary_line, metaData, by = 'Sample')
stats_summary_line_baseline = stats_summary_line[stats_summary_line$Treatment == 'baseline'| stats_summary_line$Treatment == 'NBM',]
stats_summary_celltype = data.frame(matrix(ncol = 4, nrow = 46 ))
colnames(stats_summary_celltype) = c('Patient','NBM','Cluster','Proportion')
stats_summary_celltype$Patient = stats_summary_line_baseline$`Patient Number`
stats_summary_celltype$NBM = stats_summary_line_baseline$`Treatment`
stats_summary_celltype$Cluster = stats_summary_line_baseline$Cluster
stats_summary_celltype$Proportion = stats_summary_line_baseline$Percent
write.csdevtools::install_github("constantAmateur/SoupX")
v(stats_summary_celltype,'/home/sujwary/Desktop/scRNA/Output/Integrate All/PrePostEOTNBM/Regkit/SubsetIntegrate/NK/Analysis/stats_summary_celltype.csv')
stats_summary_celltype = data.frame(matrix(ncol = 2, nrow = length(patient_list) ))
colnames(stats_summary_celltype) = c('Patient','Time')
rownames(stats_summary_celltype) = patient_list
for (patient in patient_list){
rowdata = stats_summary_line[stats_summary_line$`Patient Number` == patient,]
for(i in 1:nrow(rowdata)){
print(i)
patient = (rowdata$`Patient Number`)[i]
celltype = rowdata$Cluster[i]
time = rowdata$Treatment[i]
stats_summary_celltype[patient,'Patient'] = patient
colname = paste0(celltype,' ', time)
stats_summary_celltype[[colname]] = NA
}
}
for (patient in patient_list){
rowdata = stats_summary_line[stats_summary_line$`Patient Number` == patient,]
for(i in 1:nrow(rowdata)){
print(i)
patient = (rowdata$`Patient Number`)[i]
celltype = rowdata$Cluster[i]
print(celltype)
time = rowdata$Treatment[i]
stats_summary_celltype[patient,'Patient'] = patient
colname = paste0(celltype,' ', time)
stats_summary_celltype[patient,colname] = rowdata$Percent[i]
}
}
stats_summary_celltype = stats_summary_celltype[ , order(colnames(stats_summary_celltype))]
write.csv(stats_summary_celltype, file = paste0(filepath_cluster,'Stats/stats_summary_celltype.csv'),row.names = T)
stats_summary_celltype
####################
# Plot known markers
####################
#data_run = getCluster (data_run,resolution_val, PCA_dim)
cell_features = getCellMarkers(folder_base)
PlotKnownMarkers(data_run, folder = paste0(filepath_cluster,'Cell Type/HeatMap/'), cell_features = cell_features,
plotType ='HeatMap' , str = '')
PlotKnownMarkers(data_run, folder = paste0(filepath_cluster,'Cell Type/Violin/'), cell_features = cell_features,
plotType ='Violin' , str = '')
PlotKnownMarkers(data_run, folder = paste0(filepath_cluster,'Cell Type/FeaturePlotFix/'), cell_features = cell_features,
plotType ='FeaturePlotFix' , str = '')
idents_unique = unique(Idents(data_run))
data_run_subset_small = subset(data_run, idents = round(length(idents_unique)/2):(length(idents_unique) -1))
data_run_subset_large = subset(data_run, idents = 0:round(length(idents_unique)/2))
inc = round(length(idents_unique)/4)
data_run_subset_small = subset(data_run, idents = (inc*3 + 1):(length(idents_unique)-1))
data_run_subset_medium = subset(data_run, idents = (inc*2 + 1):(inc*3))
data_run_subset_large = subset(data_run, idents = (inc-1):(inc*2))
data_run_subset_extralarge = subset(data_run, idents = 0:(inc-2))
PlotKnownMarkers(data_run_subset_small, folder = paste0(filepath_cluster,'Cell Type/HeatMap/Small Clusters/'), cell_features = cell_features,
plotType ='HeatMap' ,split = F, str = '')
PlotKnownMarkers(data_run_subset_medium, folder = paste0(filepath_cluster,'Cell Type/HeatMap/Medium Clusters/'), cell_features = cell_features,
plotType ='HeatMap' ,split = F, str = '')
PlotKnownMarkers(data_run_subset_large, folder = paste0(filepath_cluster,'Cell Type/HeatMap/Large Clusters/'), cell_features = cell_features,
plotType ='HeatMap' ,split = F, str = '')
PlotKnownMarkers(data_run_subset_extralarge, folder = paste0(filepath_cluster,'Cell Type/HeatMap/Extra Large Clusters/'), cell_features = cell_features,
plotType ='HeatMap' ,split = F,str = '')
data_run_subset = subset(data_run, idents = c('0','1','2','3','4','5','6','7','9','17','20'))
data_run_subset = data_run_label_clean
pathName = paste0(folder_base_output,
paste0('HeatMapTCellMarkers_Clean','.png'))
png(file=pathName,width=2000, height=2500, res = 100)
plot = DoHeatmap(object = data_run_subset, features = gene_list,assay = 'RNA', slot = "data",
group.by = "ident", label = T)
plot = plot + theme(
axis.text= element_text(color="black", size=38))
print(plot)
dev.off()
pathName <- paste0(folder_base_output,
paste0('ViolinTCellMarkers8_14','.png'))
png(file=pathName,width=1000, height=1000, res = 100)
plot = StackedVlnPlot(obj = data_run_subset, features = gene_list ) +
ggtitle('' ) +
theme(plot.title = element_text(hjust = 0.5)) +
theme(plot.title = element_text(size=24))
plot = plot + theme(
axis.text= element_text(color="black", size=24))
print(plot)
dev.off()
gene_list = c("PTPRC", "HNRNPLL", "CD3D","CD3E","CD3G","CD4","CD8A","CD8B","SELL","CCR7","CD27",
"CD28", "CD69", "IL2RA", "IL2RB", "IL2RG", "CD38", "FAS", "IL7R", "KLRG1", "ZEB1","ZEB2",
"ZEB2", "PRF1", "GNLY", "NKG7","FCGR3A", "ITGAL", "CX3CR1", "B3GAT1", "BCL2", "MCL1", "LEF1",
"TRDC", "TRGC1", "TRGC2", "TRAV10", "KLRB1","LAMP1","TRAC",'TCF7',"FOXP3",'IL10', 'TGFB1',
'CTLA4', 'TNFRSF18', 'LTB','NOSIP','NTDP1','GZMA','GZMB','GZMK','GZMH','GZMM',
'CCL3','IFNG','KLRD1','ITGAM','HAVCR2','LAG3','PDCD1','TIGIT','TBX21','ADGRG1',
'NCAM1','SRGN',"HLA-DRA" , "HLA-DRB5", "HLA-DRB1","ENTB1","TRDC")
gene_list = c( "CD3D","CD3E","CD3G","CD4","CD8A","CD8B",'NCAM1','CX3CR1', 'SELL', 'CXCR4','GZMA','GZMB','GZMK','GZMH','GZMM',
'NKG7', 'PRF1', 'GNLY', 'FCGR3A', 'ITGAL', 'KLRG1', 'KLRB1',
'TRDC', 'ZEB1', 'ZEB2', 'IL7R','CD27','TRDC','TRGC1','TRGC2','TCF7')
##################
#data_run_NKGenes = data_run[,Idents(data_run) != '6']
#VariableFeatures(data_run_NKGenes) = gene_list
#file_str = '_NKGenes_removeTgd_label'
#data_run_NKGenes = ScaleData(data_run_NKGenes)
#data_run_NKGenes = RunPCA(data_run_NKGenes, npcs = 10)
#data_run_NKGenes = FindNeighbors(data_run_NKGenes, dims = 1:10)
#data_run_NKGenes = FindClusters(data_run_NKGenes, resolution = 0.5) # Value of the resolution parameter, use a value above (below) 1.0 if you want to obtain a larger (smaller) number of communities.)
#data_run_NKGenes = RunUMAP(data_run_NKGenes, dims = 1:10)
#data_run_NKGenes = data_run_NKGenes[,Idents(data_run_NKGenes)!= '4']
#data_run_NKGenes = ScaleData(data_run_NKGenes)
#data_run_NKGenes = RunPCA(data_run_NKGenes, npcs = 10)
#data_run_NKGenes = FindNeighbors(data_run_NKGenes, dims = 1:10)
#data_run_NKGenes = FindClusters(data_run_NKGenes, resolution = 2) # Value of the resolution parameter, use a value above (below) 1.0 if you want to obtain a larger (smaller) number of communities.)
#data_run_NKGenes = RunUMAP(data_run_NKGenes, dims = 1:10)
#data_run_NKGenes = label_cells(data_run_NKGenes,'CX3CR1+GZMK+, CXCR4+GZMK-, CXCR4+GZMK+, SELL+GZMK+, CX3CR1+ GZMK-, CXCR4+GZMK-')
#pathName <- paste0(filepath_cluster,paste0('ClusterUmap', '_PCA',10,'_res',2,file_str,'.png'))
#png(file=pathName,width=1000, height=1000, res = 100)
#print(DimPlot(data_run_NKGenes,pt.size = 0.8, reduction = "umap",label = TRUE))
#dev.off()
###################
file_str = ''
gene_list = unique(gene_list)
folder = paste0(filepath_cluster,'Cell Type/HeatMap Gene/')
dir.create(folder,recursive = T)
pathName = paste0(folder,'NKGenes','_heatmap',file_str,'.png')
pathName = paste0(folder,'LymphoGenes','_heatmap','',file_str,'.png')
plot = DoHeatmap(object = data_run, features = gene_list,assay = 'RNA', slot = "data",
group.by = "ident", label = T) +
ggtitle('' ) +
theme(plot.title = element_text(hjust = 0.5)) +
theme(plot.title = element_text(size=24))
plot = plot + theme(
axis.title.x = element_text(color="black", size=24 ),
axis.title.y = element_text(color="black", size=24),
axis.text= element_text(color="black", size=12),
legend.text=element_text(size=24),
legend.title=element_text(size=24),
text = element_text(size = 20))
png(file=pathName,width=1000, height=1000,res=100)
print(plot)
dev.off()
for (j in 1:length(gene_list)){
gene = gene_list[j]
print(gene)
#browser()
folder = paste0(filepath_cluster,'Cell Type/FeaturePlotFix/T Cell/')
folder = paste0(filepath_cluster,'Cell Type/FeaturePlotFix/NK/')
dir.create(folder,recursive = T)
plot = FeaturePlotFix(data_run, feature = gene, folder =folder,
str = '',split = F, markerSize = 3,gene_TF = TRUE,title = '',saveTF = FALSE)
plot = plot + theme(
axis.title.x = element_text(color="black", size=24 ),
axis.title.y = element_text(color="black", size=24),
axis.text= element_text(color="black", size=24),
legend.text=element_text(size=24),
legend.title=element_text(size=24),
text = element_text(size = 20)
)
pathName = paste0(folder,gene,file_str,'.png')
pathName = paste0(folder,gene,'','.png')
png(filename = pathName,width=2000, height=2000)
print(plot)
dev.off()
remove(plot)
}
##################################
pathName <- paste0(folder_base_output,
paste0('FeaturePlotTCellMarkersSmall','.png'))
png(file=pathName,width=2000, height=2500, res = 100)
plot = DoHeatmap(object = data_run_subset_small, features = gene_list,assay = 'RNA', slot = "data",
group.by = "ident", label = T)
plot = plot + theme(
axis.text= element_text(color="black", size=42))
print(plot)
dev.off()
PlotKnownMarkers(data_run_subset_large, paste0(filepath_cluster,'Cell Type/FeaturePlot/Large Clusters/'), cell_features = cell_features,
plotType ='FeaturePlot' ,split = F,featurePlotFix = F,str = '')
PlotKnownMarkers(data_run_subset_small, paste0(filepath_cluster,'Cell Type/FeaturePlot/Small Clusters/'), cell_features = cell_features,
plotType ='FeaturePlot' ,split = F,featurePlotFix = F,str = '')
PlotKnownMarkers(data_run,data_run, paste0(filepath_cluster,'Cell Type/FeaturePlotFix/'), cell_features = cell_features,
plotType ='FeaturePlot' ,split = F,featurePlotFix = TRUE,str = '_FIX')
data_run_label = label_cells(data_run,cluster_IDs)
PlotKnownMarkers(data_run_label,data_run_label, folder = paste0(filepath_cluster,'Cell Type/HeatMapLabel/'), cell_features = cell_features,
plotType ='HeatMap' ,split = F,featurePlotFix = TRUE, str = '')
PlotKnownMarkers(data_run_label,data_run_label, folder = paste0(filepath_cluster,'Cell Type/ViolinLabel/'), cell_features = cell_features,
plotType ='Violin' ,split = F,featurePlotFix = TRUE, str = '')
idents_unique = levels(unique(Idents(data_run_label)))
data_run_subset_small = subset(data_run_label, idents = idents_unique[round(length(idents_unique)/2):(length(idents_unique) -1)])
data_run_subset_large = subset(data_run_label, idents = idents_unique[0:round(length(idents_unique)/2)])
inc = round(length(idents_unique)/3)
data_run_subset_small = subset(data_run_label, idents = idents_unique[(inc*2):(length(idents_unique)-1)])
data_run_subset_medium = subset(data_run_label, idents = idents_unique[inc:(inc*2)])
data_run_subset_large = subset(data_run_label, idents = idents_unique[0:(inc-1)])
PlotKnownMarkers(data_run_subset_small, folder = paste0(filepath_cluster,'Cell Type/HeatMapLabel/Small Clusters/'), cell_features = cell_features,
plotType ='HeatMap' ,split = F,featurePlotFix = TRUE, str = '')
PlotKnownMarkers(data_run_subset_medium, folder = paste0(filepath_cluster,'Cell Type/HeatMapLabel/Medium Clusters/'), cell_features = cell_features,
plotType ='HeatMap' ,split = F,featurePlotFix = TRUE, str = '')
PlotKnownMarkers(data_run_subset_large, folder = paste0(filepath_cluster,'Cell Type/HeatMapLabel/Large Clusters/'), cell_features = cell_features,
plotType ='HeatMap' ,split = F,featurePlotFix = TRUE, str = '')
############
data_run_subset = data_run[,Idents(data_run) %in% c('1','4', '18', '34', '4', '3', '0')]
num_markers = 100
file_str = ''
file = paste0(filepath_cluster,'Features',file_str,'.csv')
markers = read.csv(file)
markers = markers[markers$cluster == '1',]
str = '_Cluster1'
markers_small = markers[markers$cluster %in% round(length(idents_unique)/2):(length(idents_unique) -1) ,]
markers_large = markers[markers$cluster %in% 1:round(length(idents_unique)/2) ,]
TopNHeatmap(data_run_subset,markers = markers,filepath_cluster,PCA_dim,resolution_val,num_markers,file_str = paste0('_',num_markers,str))
TopNHeatmap(data_run_subset_small,markers = markers_small,filepath_cluster,PCA_dim,resolution_val,num_markers,file_str = paste0('smallCluster_',num_markers,str))
TopNHeatmap(data_run_subset_large,markers = markers_large,filepath_cluster,PCA_dim,resolution_val,num_markers,file_str = paste0('largeCluster_',num_markers,str))
file_str = '_label'
file = paste0(filepath_cluster,'Features',file_str,'.csv')
markers = read.csv(file)
TopNHeatmap(data_run_label,markers = markers,filepath_cluster,PCA_dim,resolution_val,num_markers,file_str = paste0('label_',num_markers))
## Plot Markers from DE between each cluster and all other clusters combined
## The files are saved from plotAll when markersTF == T
#
# Saving subset of data if desired
if (saveClean){
# cluster_IDs_list = unlist(strsplit(cluster_IDs, ","))
# cluster_IDs_list = trimws(cluster_IDs_list, which = c("both"), whitespace = "[ \t\r\n]")
# #cluster_IDs_new = cluster_IDs_list[cluster_IDs_list!='Erythrocyte' & cluster_IDs_list!='Ignore','cluster_IDs_new']
# cluster_IDs_new = cluster_IDs_list[cluster_IDs_list!='CD14+ Mono']
# data_run_clean = subset(data_run_label, idents = cluster_IDs_new)
# data_run_clean = FindVariableFeatures(data_run_clean, selection.method = "vst", nfeatures = 2000)
# #
# path = paste0(folder_base_output,
# '/data_run_clean_',integrate_merge,rpca,'_PCAdim',PCA_dim,'_',sample_type,'.Robj')
# save(data_run_clean,file= path)
cluster_IDs_list = unlist(strsplit(cluster_IDs, ","))
cluster_IDs_list = trimws(cluster_IDs_list, which = c("both"), whitespace = "[ \t\r\n]")
#cluster_IDs_new = cluster_IDs_list[cluster_IDs_list!='Erythrocyte' & cluster_IDs_list!='Ignore','cluster_IDs_new']
remove_cluster_list = c('CD14+ Mono','Mast','CXCL9 High','Basal','DC')
cluster_IDs_new = cluster_IDs_list[!(cluster_IDs_list %in% remove_cluster_list)]
patient_complete_list = c('5','6','12','16','20','30','31','40','51')
data_run_clean = data_run_label[,data_run$Patient %in% patient_complete_list]
data_run_clean = subset(data_run_clean, idents = cluster_IDs_new)
data_run_clean = subset(data_run_label, idents = c('T Cell','CD8+ T Cell','NK'))
path = paste0(folder_base_output,
'/data_run','_clean','_PCAdim',PCA_dim,'_','features2000','.Robj')
save(data_run_clean,file= path)
}
################
## DE Between conditions
################
sortby = 'avg_logFC'
gene_num = c(40,40)
category_list = c('Pre ND','Pre D', 'Post ND', 'Post D')
cluster_ids_new = gsub("CD14+ Mono1", "CD14+ Mono", cluster_ids_new)
celltype_iterate = c('NK', 'T Cell','CD8+ T Cell','CD14+ Mono')
#celltype_iterate = c('CD14+ Mono')
#
split_var = 'split_var'
# Split by pre/post + response
# Compare markers using pre/post response
celltype_iterate = c('NK', 'T Cell','CD8+ T Cell','CD14+ Mono','CD16+ Mono')
#celltype_iterate = c('NK', 'T Cell','CD14+ Mono')
#celltype_iterate = c('CD8+ T Cell')
#celltype_iterate = c('NK', 'T Cell','CD8+ T Cell','CD14+ Mono')
#data_run_remove10 = data_run[,data_run$Patient !='10']
#data_run_remove21 = data_run[,data_run$Patient !='21']
# Go through each cell type and compare conditions in that cell type
for (celltype in celltype_iterate){
print(celltype)
celltype_list = c(celltype)
#########################
data_run_label = label_cells(data_run, cluster_ids_new)
#data_run_label = RenameIdents(object = data_run_label, 'dCD14+ Mono' = 'CD14+ Mono')
#data_run_label = data_run
data_run_label@meta.data$split_var = gsub("Pre ND", "Pre", data_run_label@meta.data$split_var)
data_run_label@meta.data$split_var = gsub("Pre D", "Pre", data_run_label@meta.data$split_var)
data_run_label@meta.data$split_var = gsub("Post ND", "Post", data_run_label@meta.data$split_var)
data_run_label@meta.data$split_var = gsub("Post D", "Post", data_run_label@meta.data$split_var)
data_run_label@meta.data$Response = gsub("MRP", "TMP", data_run_label@meta.data$Response)
data_run_label@meta.data$Response = gsub("MR", "TMP", data_run_label@meta.data$Response)
data_run_label@meta.data$Response = gsub("TMP", "MR", data_run_label@meta.data$Response)
data_run_label$response_split_var = paste0(data_run_label$split_var ,' ',data_run_label$Response )
data_run_input = data_run_label
Idents(data_run_input) = paste0(Idents(data_run_input),' ',
data_run_input@meta.data[,'response_split_var'])
ident1 = paste0(celltype, ' ', 'baseline',' ','Poor Response')
ident2 = paste0(celltype, ' ', 'baseline',' ','Good Response')
folder_name = paste0('DoHeatmap/Response/',ident1, '_', ident2)
folder_name = paste0('',ident1, '_', ident2)
folder_heatMap = paste0(folder_base_output,'Analysis/', folder_name,'/')
#folder_heatMap = paste0(folder_base_output,'', folder_name,'/')
Features = FindMarkers(data_run_input, ident.1 = ident1, ident.2 = ident2
,min.pct = 0.1, logfc.threshold = 0.1, only.pos = FALSE)
cluster_ids_new = cluster_IDs
data_run_input = data_run_label
DoHeatMapHelper(data_run_input,folder_base_output,folder_heatMap, Features,
ident1,ident2,celltype_list,category_list,sortby,split_var = 'response_split_var',
cluster_ids_new,cellPair = FALSE,gene_num,str = '')
################################
category_list = unique(data_run_label$response_split_var)
for (category in category_list){
data_run_input = data_run_label
data_run_input = SubsetData(object = data_run_input, cells = data_run_input$response_split_var == category )
DoHeatMapHelper(data_run_input,folder_base_output,folder_heatMap, Features,
ident1,ident2,celltype_list,category_list,sortby,split_var = 'sample_name',
cluster_ids_new,cellPair = FALSE,gene_num,str = paste0('_',category))
Idents(data_run_input) = paste0(Idents(data_run_input),' ',
data_run_input@meta.data[,'response_split_var'])
#stats = clusterStats(data_run_input)
#write.csv(stats, file = paste0(folder_heatMap,'Stats_',category,'.csv'),row.names = FALSE)
}
data_run_input = data_run_label
Idents(data_run_input) = paste0(Idents(data_run_input),' ',
data_run_input@meta.data[,'split_var'])
ident1 = paste0(celltype, ' ', 'C9D1')
ident2 = paste0(celltype, ' ', 'EOT')
folder_name = paste0('DoHeatmap/Response/',ident1, '_', ident2)
folder_name = paste0('',ident1, '_', ident2)
folder_heatMap = paste0(folder_base_output,'Analysis/', folder_name,'/')
#folder_heatMap = paste0(folder_base_output,'', folder_name,'/')
Features = FindMarkers(data_run_input, ident.1 = ident1, ident.2 = ident2
,min.pct = 0.1, logfc.threshold = 0.1, only.pos = FALSE)
data_run_input = data_run_label
data_run_input$response_split_var[data_run_input$response_split_var == 'NBM NBM'] = 'NBM'
DoHeatMapHelper(data_run_input,folder_base_output,folder_heatMap, Features,
ident1,ident2,celltype_list,category_list,sortby,split_var = 'split_var',
cluster_ids_new,cellPair = FALSE,gene_num,str = '', Ident_order = NA)
Ident_order = c('NBM','Pre MR','Pre VGPR','Post MR', 'Post VGPR')
Ident_order = paste0(celltype,' ', Ident_order)
# DoHeatMapHelper(data_run_input,folder_base_output,folder_heatMap, Features,
# ident1,ident2,celltype_list,category_list,sortby,split_var = 'response_split_var',
# cluster_ids_new,cellPair = FALSE,gene_num,str = '', Ident_order = Ident_order)
category_list = unique(data_run_label$response_split_var)
for (category in category_list){
data_run_input = data_run_label
data_run_input = SubsetData(object = data_run_input, cells = data_run_input$response_split_var == category )
DoHeatMapHelper(data_run_input,folder_base_output,folder_heatMap, Features,
ident1,ident2,celltype_list,category_list,sortby,split_var = 'sample_name',
cluster_ids_new,cellPair = FALSE,gene_num,str = paste0('_',category))
Idents(data_run_input) = paste0(Idents(data_run_input),' ',
data_run_input@meta.data[,'response_split_var'])
stats = clusterStats(data_run_input)
#write.csv(stats, file = paste0(folder_heatMap,'Stats_',category,'.csv'),row.names = FALSE)
}
}
############################################################
## Check which clusters have only one sample
ignore_cluster_list = c()
for (cluster in unique(data_run@active.ident)){
print('Cluster')
print(cluster)
data_subset = subset(data_run, idents = cluster)
patient_list = unique(data_subset$Patient)
sample_list = unique(data_subset$sample_name)
if (length(sample_list) == 1){
print('Patient List')
print(sample_list)
print('Cluster with only 1 patient')
ignore_cluster_list = c(ignore_cluster_list,cluster)
}
print('')
}
#####
# Score for CD14 markers in Oksana paper
m2_markers = c('HLA-DPB1','HLA-DPA1','TAGLN2','HLA-DRB5','F13A1','LIPA','HLA-DQA1')
m7_markers = c('ZFP36L1','PSME2','IFITM2','PLAC8','APOBEC3A','TNFSF10','LY6E','ISG15','IFITM3','IFI44L')
data_run <- AddModuleScore(
object = data_run,
features = m2_markers,
name = 'm2_markers',
assay = 'RNA'
)
data_run <- AddModuleScore(
object = data_run,
features = m7_markers,
name = 'm7_markers',
assay = 'RNA'
)
print(FeaturePlot(data_run,pt.size = 0.5, features = c("m2", "m7")))
##
# Permute for p values
##
perm_df = data.frame(matrix(ncol = 2, nrow = length( unique(cluster_IDs_list))))
colnames(perm_df) = c("PercentDiff",'p_val')
cluster_IDs_list = unlist(strsplit(cluster_IDs, ","))
cluster_IDs_list = trimws(cluster_IDs_list, which = c("both"), whitespace = "[ \t\r\n]")
cluster_IDs_list = unique(Idents(data_run_label))
rownames(perm_df) = cluster_IDs_list
for (j in 1:length(unique(cluster_IDs_list))){
cell = unique(cluster_IDs_list)[j]
print(cell)
diff_result = diff_exp (data_run_label, cell)
print(diff_result)
perm_df[j,] = diff_result
}
write.csv(perm_df, file = paste0(filepath_cluster,'Stats/perm_pval.csv'),row.names = T)
#browser()
####
# cell_features = getCellMarkers()
# ident1 = '4'
# ident2 = '5'
# Features_mvn = FindMarkers(data_run, ident.1 = ident1, ident.2 = ident2
# ,min.pct = 0.1, logfc.threshold = 0.01, only.pos = TRUE)
# Features_mvn$gene = rownames(Features_mvn)
# rownames(Features_mvn) = NULL
# Features_mvn = cellMarkers(Features_mvn,cell_features)
# filepath_mvn = paste0( filepath_cluster, 'DE/nVsm/')
# filepath = paste0(filepath_mvn
# ,'Features_',ident1,'Vs',ident2
# ,'.csv')
# write.csv(Features_mvn, file = filepath,row.names = FALSE)
#
#data_run_label@meta.data$split_var = gsub("Pre ND", "Pre", data_run_label@meta.data$split_var)
#data_run_label@meta.data$split_var = gsub("Pre D", "Pre", data_run_label@meta.data$split_var)
#data_run_label@meta.data$split_var = gsub("Post ND", "Post", data_run_label@meta.data$split_var)
#
#PlotKnownMarkers(data_run_label,data_run, paste0(filepath_cluster,'Cell Type/'), cell_features = NA,
# plotType ='HeatMap' ,split = FALSE,featurePlotFix = TRUE, str = 'label')
#browser()
#cell_features = c('CD34','CDK6','CD24', 'CD9','MZB1', 'GYPA', 'HBB','MZB1', 'TIGIT', 'LAG3','CD8A','CD8B')
cell_features = c('FCER1A','ITGAX','CD83','THBD','CD209','CD1C','LYZ','MZB1')
cell_features = c('IL3RA','CLEC4C','NRP1', 'MZB1')
cell_features = c('CD19','MS4A1')
cell_features = c('CD27','CD38','SDC1','SLAMF7','IL6','CD138','TNFRSF17') # pDC
cell_features = c('CD21','CD5','CD10','PAX5','CD24','CD34','CD93', 'CD23'
,'CD19','CD21','MS4A1','CD27','IL10','CD1D','TGFB1','TGFB2','EBI3','PRDM1','IGHM','CD1D','CD4') # B Cell
cell_features = c('CD10')
cell_features = c('FCGR3A')
#cell_features = gene_list[13:22]
celltype = 'NK'
data_run_subset = data_run[,Idents(data_run) == celltype]
data_run_subset = data_run
data_run_subset@meta.data$Response = gsub("MRP", "TMP", data_run_subset@meta.data$Response)
data_run_subset@meta.data$Response = gsub("MR", "TMP", data_run_subset@meta.data$Response)
data_run_subset@meta.data$Response = gsub("TMP", "MR", data_run_subset@meta.data$Response)
data_run_subset = data_run_subset[,data_run_subset@meta.data$Response != 'NBM']
Idents(data_run_subset) = paste0(Idents(data_run_subset), ' ',data_run_subset@meta.data$Response)
Idents(data_run_subset) = factor(Idents(data_run_subset) , levels = paste0(celltype,c(' MR',' VGPR')))
data_run_subset_VGPR = data_run_subset[,data_run_subset$Response == 'VGPR']
data_run_subset_MR = data_run_subset[,data_run_subset$Response == 'MR']
PlotKnownMarkers(data_run_subset_VGPR,data_run_subset_VGPR, paste0(filepath_cluster,'Cell Type/'), cell_features = cell_features,
plotType ='FeaturePlot' ,split = FALSE,featurePlotFix = TRUE, str = paste0('_VGPR'))
PlotKnownMarkers(data_run_subset_MR,data_run_subset_MR, paste0(filepath_cluster,'Cell Type/'), cell_features = cell_features,
plotType ='FeaturePlot' ,split = FALSE,featurePlotFix = TRUE, str = paste0('_MR'))
PlotKnownMarkers(data_run_subset,data_run_subset, paste0(filepath_cluster,'Cell Type/'), cell_features = cell_features,
plotType ='HeatMap' ,split = FALSE,featurePlotFix = TRUE, str = paste0(celltype,' ', cell_features))
data_pre = data_run[,data_run@meta.data$orig.ident == "data_pre"]
data_post = data_run[,data_run@meta.data$orig.ident == "data_post"]
PlotKnownMarkers(data_pre,data_pre, paste0(filepath_cluster,'Cell Type/'), cell_features = cell_features,
plotType ='FeaturePlot' ,split = FALSE,featurePlotFix = TRUE, str = '_Pre')
PlotKnownMarkers(data_post,data_post, paste0(filepath_cluster,'Cell Type/'), cell_features = cell_features,
plotType ='FeaturePlot' ,split = FALSE,featurePlotFix = TRUE, str = '_Post')
PlotKnownMarkers(data_run,data_run, paste0(filepath_cluster,'Cell Type/'), cell_features = cell_features,
plotType ='FeaturePlot' ,split = FALSE,featurePlotFix = TRUE, str = '_Post')
#data_run = getCluster (data_run,resolution_val, PCA_dim)
data_run_label = label_cells(data_run,cluster_IDs)
#browser()
###############################################################################
for (celltype in celltype_iterate){
celltype_list = c(celltype)
data_run_label = label_cells(data_run, cluster_ids_new)
Idents(data_run_label) = paste0(Idents(data_run_label),' ', data_run_label@meta.data[,'split_var'])
ident1 = paste0(celltype, ' ', 'Post ND')
ident2 = paste0(celltype, ' ', 'Post D')
folder_name = paste0('DoHeatmap/',ident1, '_', ident2)
folder_heatMap = paste0(folder_base_output,'Analysis/', folder_name,'/')
Features = FindMarkers(data_run_label, ident.1 = ident1, ident.2 = ident2
,min.pct = 0.1, logfc.threshold = 0.1, only.pos = FALSE)
data_run_label = label_cells(data_run, cluster_ids_new)
DoHeatMapHelper(data_run_label,folder_base_output,folder_heatMap, Features,
ident1,ident2,celltype_list,category_list,sortby,split_var = split_var,
cluster_ids_new,cellPair = FALSE,gene_num,str = '')
ident1 = paste0(celltype, ' ', 'Pre ND')
ident2 = paste0(celltype, ' ', 'Post ND')
DoHeatMapHelper(data_run_label,folder_base_output,folder_heatMap, Features,
ident1,ident2,celltype_list,category_list,sortby,split_var = split_var,
cluster_ids_new,cellPair = TRUE,gene_num,str = '')
ident1 = paste0(celltype, ' ', 'Pre D')
ident2 = paste0(celltype, ' ', 'Post D')
DoHeatMapHelper(data_run_label,folder_base_output,folder_heatMap, Features,
ident1,ident2,celltype_list,category_list,sortby,split_var = split_var,
cluster_ids_new,cellPair = TRUE,gene_num,str = '')
ident1 = paste0(celltype, ' ', 'Post ND')
ident2 = paste0(celltype, ' ', 'Post D')
DoHeatMapHelper(data_run_label,folder_base_output,folder_heatMap, Features,
ident1,ident2,celltype_list,category_list,sortby,split_var = split_var,
cluster_ids_new,cellPair = TRUE,gene_num,str = '')
}
# Split by pre/post + response
# Use D/ND markers
split_var = 'response_split_var'
for (celltype in celltype_iterate){
celltype_list = c(celltype)
data_run_label = label_cells(data_run, cluster_ids_new)
Idents(data_run_label) = paste0(Idents(data_run_label),' ', data_run_label@meta.data[,'split_var'])
ident1 = paste0(celltype, ' ', 'Pre D')
ident2 = paste0(celltype, ' ', 'Post D')
folder_name = paste0('DoHeatmap/Response/',ident1, '_', ident2)
folder_heatMap = paste0(folder_base_output,'Analysis/', folder_name,'/')
Features = FindMarkers(data_run_label, ident.1 = ident1, ident.2 = ident2
,min.pct = 0.1, logfc.threshold = 0.1, only.pos = FALSE)
data_run_label = label_cells(data_run, cluster_ids_new)
data_run_label@meta.data$split_var = gsub("Pre ND", "Pre", data_run_label@meta.data$split_var)
data_run_label@meta.data$split_var = gsub("Pre D", "Pre", data_run_label@meta.data$split_var)
data_run_label@meta.data$split_var = gsub("Post ND", "Post", data_run_label@meta.data$split_var)
data_run_label@meta.data$split_var = gsub("Post D", "Post", data_run_label@meta.data$split_var)
data_run_label$response_split_var = paste0(data_run_label$split_var ,' ',data_run_label$Response )
DoHeatMapHelper(data_run_label,folder_base_output,folder_heatMap, Features,
ident1,ident2,celltype_list,category_list,sortby,split_var = split_var,
cluster_ids_new,cellPair = FALSE,gene_num,str = '')
}
# Split by pre/post + response
# Don't Use D/ND markers
#data_run_label = RenameIdents(object = data_run_label, 'dCD14+ Mono' = 'CD14+ Mono')
for (celltype in celltype_iterate){
print(celltype)
celltype_list = c(celltype)
#data_run_label = label_cells(data_run, cluster_ids_new)
#
data_run_label@meta.data$split_var = gsub("Pre ND", "Pre", data_run_label@meta.data$split_var)
data_run_label@meta.data$split_var = gsub("Pre D", "Pre", data_run_label@meta.data$split_var)
data_run_label@meta.data$split_var = gsub("Post ND", "Post", data_run_label@meta.data$split_var)
data_run_label@meta.data$split_var = gsub("Post D", "Post", data_run_label@meta.data$split_var)
Idents(data_run_label) = paste0(Idents(data_run_label),' ',
data_run_label@meta.data[,'split_var'])
ident1 = paste0(celltype, ' ', 'Pre')
ident2 = paste0(celltype, ' ', 'Post')
folder_name = paste0('DoHeatmap/Response/',ident1, '_', ident2)
folder_heatMap = paste0(folder_base_output,'Analysis/', folder_name,'/')
Features = FindMarkers(data_run_label, ident.1 = ident1, ident.2 = ident2
,min.pct = 0.1, logfc.threshold = 0.1, only.pos = FALSE)
data_run_label = label_cells(data_run, cluster_ids_new)
data_run_label$split_var = gsub("Pre ND", "Pre", data_run_label$split_var)
data_run_label$split_var = gsub("Pre D", "Pre", data_run_label$split_var)
data_run_label$split_var = gsub("Post ND", "Post", data_run_label$split_var)
data_run_label$split_var = gsub("Post D", "Post", data_run_label$split_var)
data_run_label$response_split_var = paste0(data_run_label$split_var ,' ',data_run_label$Response )
DoHeatMapHelper(data_run_label,folder_base_output,folder_heatMap, Features,
ident1,ident2,celltype_list,category_list,sortby,split_var = 'response_split_var',
cluster_ids_new,cellPair = FALSE,gene_num,str = '')
DoHeatMapHelper(data_run_label,folder_base_output,folder_heatMap, Features,
ident1,ident2,celltype_list,category_list,sortby,split_var = 'split_var',
cluster_ids_new,cellPair = FALSE,gene_num,str = '')
}
#############################
################################
# Make heatmaps with specific genes
celltype = 'CD14+ Mono'
celltype = 'NK'
celltype = 'CD16+ Mono'
celltype = 'CD8+ T Cell'
celltype = 'T Cell'
celltype_list = c(celltype)
sortby = 'avg_logFC'
gene_num = c(40,40)
folder_name = paste0('DoHeatmap/Response/',celltype,' Genes/')
folder_name = paste0('HeatMap/',celltype, '')
folder_heatMap = paste0(folder_base_output,'Analysis/', folder_name,'/')
data_run_label = label_cells(data_run, cluster_IDs)
data_run_label = RenameIdents(object = data_run_label, 'dCD14+ Mono' = 'CD14+ Mono')
data_run_label@meta.data$split_var = gsub("Pre ND", "Pre", data_run_label@meta.data$split_var)
data_run_label@meta.data$split_var = gsub("Pre D", "Pre", data_run_label@meta.data$split_var)
data_run_label@meta.data$split_var = gsub("Post ND", "Post", data_run_label@meta.data$split_var)
data_run_label@meta.data$split_var = gsub("Post D", "Post", data_run_label@meta.data$split_var)
data_run_label@meta.data$Response = gsub("MRP", "TMP", data_run_label@meta.data$Response)
data_run_label@meta.data$Response = gsub("MR", "TMP", data_run_label@meta.data$Response)
data_run_label@meta.data$Response = gsub("TMP", "MR", data_run_label@meta.data$Response)
data_run_label$response_split_var = paste0(data_run_label$split_var ,' ',data_run_label$Response )
ident1 = paste0(celltype, ' ', 'Pre MR')
ident2 = paste0(celltype, ' ', 'Pre VGPR')
#ident1 = paste0(celltype, ' ', 'Pre')
#ident2 = paste0(celltype, ' ', 'Post')
Ident_order = c('Pre MR','Post MR','Pre VGPR', 'Post VGPR')
Ident_order = paste0(celltype,' ', Ident_order)
category_list = unique(data_run_label$response_split_var)
gene_list = c('FCER1G','CD302','ILF2','MYCBP2') # Mono pre VGPR Pre MR
gene_list = c('IRF8','AHNAK','DDAH2','PIK3R1','SKAP1','RIOK3','DUSP2') # NK pre MR Pre VGPR
gene_list = c('NKG7','CMTM6') # NK Pre Post
gene_list = c('PA2G4') # NK MR Pre Post VGPR Pre Post
gene_list = c('FOSB', 'KLF2', 'PPP4R3A', 'NKG7', 'CD58', 'SERB1') # CD8+ T Cells Pre MR Pre VGPR
#gene_list = c('HIPK2', 'ZFR') # CD16+
gene_list = c('FOSB', 'IQGAP1', 'HLA-DPA1', 'ITGB1', 'TRAM1')
Features= data.frame(matrix(ncol = 0, nrow = length(gene_list)))
rownames(Features) =gene_list
Features$p_val_adj = integer( length(gene_list))
Features$avg_logFC = integer( length(gene_list))
split_var = 'response_split_var'
#split_var = 'split_var'
DoHeatMapHelper(data_run_label,folder_base_output,folder_heatMap, Features,
ident1,ident2,celltype_list,category_list,sortby,split_var = split_var,
cluster_IDs,cellPair = FALSE,gene_num,str = '', Ident_order = Ident_order)
split_var = 'response_split_var'
data_run_label_subset_cell = subset(data_run_label, idents = celltype_list)
data_run_label_subset_cell =data_run_label_subset_cell[,data_run_label_subset_cell$response_split_var != 'NBM NBM']
Idents(data_run_label_subset_cell) = data_run_label_subset_cell@meta.data[,split_var]
ident_list = c('Pre MR','Post MR','Pre VGPR','Post VGPR')
ident_list = c('Pre MR','Pre VGPR')
data_run_label_subset = subset(data_run_label_subset_cell, idents = ident_list)
Idents(data_run_label_subset) = factor(Idents(data_run_label_subset) , levels = ident_list)
str = ''
if (all(ident_list == c('Pre MR','Post MR','Pre VGPR','Post VGPR'))){
color_list = c('Blue','Blue','Red','Red')
}else{
color_list = c('Blue','Red')
}
for (gene in gene_list){
folder_name = paste0('ViolinPlots/',celltype)
folder = paste0(folder_base_output,'Analysis/', folder_name,'/')
dir.create(folder,recursive = TRUE)
pathName = as.character(paste0(folder,paste0('Violin ',celltype,' Split_',split_var,str,gene,'.png')) )
png(file=pathName,width=900, height=600)
plot= VlnPlot(data_run_label_subset, gene, pt.size = 0.1,cols =color_list)
plot = plot + theme(
title =element_text(size=24, color="black"),
axis.title.x = element_text(color="black", size=24 ),
axis.title.y = element_text(color="black", size=24),
axis.text= element_text(color="black", size=24),
legend.text=element_text(size=18),
legend.title=element_text(size=18),
legend.position = 'none'
)
print(plot)
dev.off()
folder_name = paste0('Ridge Plots/',celltype)
folder = paste0(folder_base_output,'Analysis/', folder_name,'/')
dir.create(folder,recursive = TRUE)
pathName = as.character(paste0(folder,paste0('Ridge ',celltype,' Split_',split_var,str,gene,'.png')) )
png(file=pathName,width=900, height=600)
plot= RidgePlot(data_run_label_subset, features = gene,cols = color_list)
plot = plot + theme(
title =element_text(size=24, color="black"),
axis.title.x = element_text(color="black", size=24 ),
axis.title.y = element_text(color="black", size=24),
axis.text= element_text(color="black", size=24),
legend.text=element_text(size=18),
legend.title=element_text(size=18),
legend.position = 'none'
)
print(plot)
dev.off()
}
######################################33
# CompareCellNum
data_run_label = data_run
data_run_label = label_cells(data_run, cluster_ids_new)
data_run_label = RenameIdents(object = data_run_label, 'dCD14+ Mono' = 'CD14+ Mono')
data_run_label@meta.data$split_var = gsub("Pre ND", "Pre", data_run_label@meta.data$split_var)
data_run_label@meta.data$split_var = gsub("Pre D", "Pre", data_run_label@meta.data$split_var)
data_run_label@meta.data$split_var = gsub("Post ND", "Post", data_run_label@meta.data$split_var)
data_run_label@meta.data$split_var = gsub("Post D", "Post", data_run_label@meta.data$split_var)
data_run_label@meta.data$Response = gsub("MRP", "TMP", data_run_label@meta.data$Response)
data_run_label@meta.data$Response = gsub("MR", "TMP", data_run_label@meta.data$Response)
data_run_label@meta.data$Response = gsub("TMP", "MR", data_run_label@meta.data$Response)
data_run_label$response_split_var = paste0(data_run_label$split_var ,' ',data_run_label$Response )
folder_stats = paste0(folder_base_output, 'Analysis/Stats/')
CompareCellNum(data_run_label,folder_stats,split_var = 'split_var',metaData = metaData)
stats = data.frame(table(data_run_label$sample_name))
names(stats)[names(stats) == "Var1"] <- "Sample"
stats =merge(stats, metaData, by = 'Sample')
pathName = paste0(folder_stats,'boxplot_', 'Cell Num','.png')
x_name = 'Treatment'
y_name = 'Freq'
png(file=pathName,width=600, height=600)
plot = ggplot(stats, aes(x = !!ensym(x_name), y = !!ensym(y_name), fill = stats$`10X kit`)) +
geom_boxplot()+
coord_cartesian(ylim = c(0, 3000))+
ggtitle('Cell Num')+
xlab("") + ylab("%")
# Box plot with dot plot
plot = plot + geom_jitter(shape=16, position=position_jitter(0.2))
plot = plot + theme(
plot.title = element_text(color="black", size=24, face="bold.italic"),
axis.title.x = element_text(color="black", size=24, face="bold"),
axis.title.y = element_text(color="black", size=24, face="bold"),
axis.text=element_text(size=24),
)
plot = plot + theme(plot.title = element_text(hjust = 0.5))
print(plot)
dev.off()
plot = VlnPlot(data_run_label, features = c("nFeature_RNA", "nCount_RNA", "percent.mt"),
ncol = 4,group.by = 'split_var', point.size.use = 0.0001)
pathName <- paste0(folder_stats,'violin.png')
print(pathName)
png(file=pathName,width=1000, height=600)
print(plot)
dev.off()
browser()
celltype = 'NK1'
celltype_list = c('NK1','NK2')
ident1 = paste0(celltype, ' ', 'Post D')
ident2 = paste0(celltype, ' ', 'Post D')
DoHeatMapHelper(data_run,folder_base_output,folder_heatMap = NA, Features = NA,
ident1,ident2,celltype_list,category_list,sortby,cluster_IDs,cellPair = FALSE)
#################################################
data_run_label = label_cells(data_run,cluster_ids_new)
Idents(data_run_label) = paste0(Idents(data_run_label),' ', data_run@meta.data$split_var)
ident1 = paste0('T Cell', ' ', 'Post ND')
ident2 = paste0('T Cell', ' ', 'Post D')
Features = FindMarkers(data_run_label, ident.1 = ident1, ident.2 = ident2
,min.pct = 0.1, logfc.threshold = 0.1, only.pos = FALSE)
Features = Features[Features$p_val_adj < 0.05,]
folder_name = paste0('Feature Plots/',ident1, '_', ident2 )
Features = Features[order(Features$avg_logFC),]
gene_list = rownames(Features)
gene_list = c(gene_list[1:20], tail(gene_list, n=20))
FeaturePlot_GeneList(data_run_label,gene_list,folder_base_output,folder_name,sample_type, FeaturePlotFix = TRUE)
##############
ident1 = paste0('CD14+ Mono', ' ', 'Pre ND')
ident2 = paste0('CD14+ Mono', ' ', 'Post ND')
Features = FindMarkers(data_run_label, ident.1 = ident1, ident.2 = ident2
,min.pct = 0.1, logfc.threshold = 0.1, only.pos = FALSE)
Features = Features[order(Features$avg_logFC),]
Features = Features[Features$p_val_adj < 0.05,]
gene_list = rownames(Features)
gene_list = gene_list[1:50]
folder_name = 'Dexa CD14+ ND Gene Feature Plots 12-17-2019'
FeaturePlot_GeneList(data_run,gene_list,folder_base_output,folder_name,sample_type, FeaturePlotFix = TRUE)
#################################################
path = paste0(folder_base_output,'DE/','T','/')
dir.create( path, recursive = TRUE)
path = paste0(path, 'DE ',ident1,' Vs ', ident2,'.csv')
print(path)
write.csv(Features, file = path,row.names=TRUE)
browser()
#PlotKnownMarkers(data_run,data_run, paste0(folder_base_output,'Cell Type/'), cell_features = NA,split = FALSE)
# Label:
#IntegrateAll_ClusterUmap(data_run,sample_type,folder_base_output,PCA_dim,resolution_val,label = FALSE)
#IntegrateAll_ClusterUmap(data_run_label,sample_type,folder_base_output,PCA_dim,resolution_val,label = TRUE)
#########################
data_run_label = label_cells(data_run,cluster_ids_new)
filepath_cluster = paste0( folder_base_output, 'Cluster/', 'PCA',PCA_dim,'/res',resolution_val,'/' )
pathName <- paste0(filepath_cluster,paste0('ClusterUmap',resolution_val,'_splitAll', '','.png'))
png(file=pathName,width=2600, height=500,res = 100)
print(DimPlot(data_run_label, label=T, repel=F, reduction = "umap", split.by = "split_var"))
dev.off()
###############################
#browser()
#data_run = label_cells(data_run,cluster_IDs)
#
# data = SubsetData(object = data_run, cells = data_run$split_var == 'Post D' )
# plot = DimPlot(data, label=T, repel=F, reduction = "umap",pt.size = 1) +
# ggtitle('Post D' ) +
# theme(plot.title = element_text(hjust = 0.5))
# pathName <- paste0(filepath_cluster,paste0('ClusterUmap',resolution_val,'_splitAll_PostD', '','.png'))
# png(file=pathName,width=1000, height=500,res = 100)
# print(plot)
# dev.off()
#
#browser()
####################
print('Get Markers')
data_run_label = label_cells(data_run,gsub("dCD14+ Mono", "CD14+ Mono", cluster_IDs))
Idents(data_run_label) = paste0(Idents(data_run_label),' ', data_run@meta.data$split_var)
cell_list = c('T Cell','Mono1','Mono2','Mono3', 'NK')
category_list = c('Pre ND','Pre D', 'Post ND', 'Post D')
for (cat_i in category_list){
for (cat_j in category_list){
for (cell in cell_list){
if (cat_i != cat_j){
ident1 = paste0(cell, ' ', cat_i)
ident2 = paste0(cell, ' ', cat_j)
Features = FindMarkers(data_run_label, ident.1 = ident1, ident.2 = ident2
,min.pct = 0.1, logfc.threshold = 0.1, only.pos = FALSE)
path = paste0(folder_base_output,'DE/',cell,'/')
dir.create( path, recursive = TRUE)
path = paste0(path, 'DE ',ident1,' Vs ', ident2,'.csv')
print(path)
write.csv(Features, file = path,row.names=TRUE)
}
}
}
}
######################################33
data = SubsetData(object = data_run_label, cells = (data_run$dexa == "Yes" && data_run$orig.ident == "data_post"))
data_run_label = label_cells(data_run,cluster_IDs)
gene_list = c('GZMA','GZMB','GZMH','GZMK','FCER1G' ,'CXCR4','KLRF1',
'KLRB1', 'KLRD1', 'KLRC1', 'KLRG1', 'IL2RB', 'IL2RG', 'TSC22D3', 'NR4A2',
'EVL', 'IFITM2', 'TNFAIP3','TGFB1','NFKBIA', 'GNLY', 'NKG7','FCGR3A', 'CCND3'
, 'LTB','RGS1', 'CXCR4','TSC22D3','JUN', 'JUNB','JUND', 'FOS', 'GIMAP4',
'GIMAP7','FTH1','THBS1','CCR2','FCGR1A','HLA-DRB5','HLA-DQB1')
gene_list = c('KLRK1','ITGAX','CX3CR1','RGS1', 'CXCR4','TSC22D3','JUN', 'JUNB','JUND', 'FOS', 'GIMAP4',
'GIMAP7','FTH1','THBS1','CCR2','FCGR1A','HLA-DRB5','HLA-DQB1')
folder_name = 'Dexa Gene Feature Plots'
FeaturePlot_GeneList(data_run_label,folder_base_output,folder_name)
#
# gene_list = c('GZMA','GZMB','GZMH','GZMK','FCER1G' ,'CXCR4','KLRF1',
# 'KLRB1', 'KLRD1', 'KLRC1', 'KLRG1', 'IL2RB', 'IL2RG', 'TSC22D3', 'NR4A2',
# 'EVL', 'IFITM2', 'TNFAIP3','TGFB1','NFKBIA', 'GNLY', 'NKG7','FCGR3A', 'CCND3',
# 'LTB','RGS1', 'CXCR4','TSC22D3','JUN', 'JUNB','JUND', 'FOS', 'GIMAP4','GIMAP7',
# 'KLRK1','ITGAX','CX3CR1','RGS1', 'CXCR4','TSC22D3','JUN', 'JUNB','JUND', 'FOS', 'GIMAP4',
# 'GIMAP7','FTH1','THBS1','CCR2','FCGR1A','HLA-DRB5','HLA-DQB1','IL2RB')
#
###############################################################################
###############################################################################
# HeatMap Compare Cell Types
#gene_list = c('CXCR4','NFKBIA', 'TNFAIP3', 'NR4A2', 'TGFB1', 'RGS1','TSC22D3')
category_list = c('Pre ND','Pre D', 'Post ND', 'Post D')
data_run_label = label_cells(data_run,cluster_IDs)
Features = FindMarkers(data_run_label, ident.1 = ident1, ident.2 = ident2
,min.pct = 0.1, logfc.threshold = 0.1, only.pos = FALSE)
Features = Features[Features$p_val_adj < 0.05,]
Features = Features[order(Features$avg_logFC),]
gene_list = rownames(Features)
gene_list = c(gene_list[1:20], tail(gene_list, n=20))
for (category in category_list){
folder_name = paste0('DoHeatmap/',ident1, '_', ident2)
folder_featureplot = paste0(folder_base_output,'Analysis/', folder_name,'/')
dir.create( folder_featureplot, recursive = TRUE)
data = SubsetData(object = data_run_label, cells = data_run_label$split_var == category )
print(category)
print(data)
data = subset(data, idents =celltype_list)
plot = DoHeatmap(object = data, features = gene_list,
group.by = "ident") +
ggtitle(category ) +
theme(plot.title = element_text(hjust = 0.5)) +
theme(plot.title = element_text(size=24))
pathName <- paste0(folder_featureplot,paste0(category,'.png'))
png(file=pathName,width=600, height=600)
print(plot)
dev.off()
pathName <- paste0(folder_featureplot,paste0(ident1, '_', ident2,' Markers','.csv'))
write.csv(Features, file = pathName,row.names = TRUE)
}
folder_name = paste0('DoHeatmap/',ident1, '_', ident2)
folder_featureplot = paste0(folder_base_output,'Analysis/', folder_name,'/')
data_run_label = label_cells(data_run, cluster_IDs)
Idents(data_run_label) = paste0(Idents(data_run_label),' ', data_run@meta.data$split_var)
celltype_list = c(ident1,ident2)
data = subset(data_run_label, idents = celltype_list)
plot = DoHeatmap(object = data, features = gene_list,
group.by = "ident") +
ggtitle('' ) +
theme(plot.title = element_text(hjust = 0.5)) +
theme(plot.title = element_text(size=24))
pathName <- paste0(folder_featureplot,paste0(ident1, '_', ident2,'.png'))
png(file=pathName,width=600, height=1000)
print(plot)
dev.off()
#################################################
# HeatMap
gene_list = c('HLA-DQB1', 'HLA-DRB5', 'FCGR1A', 'CCR2', 'CX3CR1')
for (category in category_list){
folder_name = paste0('Dexa Gene DoHeatmap ',category)
folder_featureplot = paste0(folder_base_output,'Analysis/', folder_name,'/')
dir.create( folder_featureplot, recursive = TRUE)
data = SubsetData(object = data_run, cells = data_run$split_var == category )
data = subset(data, idents = c('CD14+ Mono'))
plot = DoHeatmap(object = data, features = gene_list,
group.by = "ident", disp.min = 0, disp.max = 2.5) +
ggtitle(category ) +
theme(plot.title = element_text(hjust = 0.5)) +
theme(plot.title = element_text(size=24))
pathName <- paste0(folder_featureplot,paste0(category,' All Mono','.png'))
png(file=pathName,width=600, height=600)
print(plot)
dev.off()
}
###############################
## Save Data in matrix
###############################
#SaveAsMatrix(data_run,folder_base_output)
###############################
## Plot samples seperately
###############################
sample_list = unique(data_run$sample_name)
data_run_label = label_cells(data_run,cluster_IDs)
for (sample in sample_list){
folder_output = paste0(folder_base_output,'Samples Seperate/',sample,'/')
print(folder_output)
pathName <- paste0(folder_output,'Cluster')
dir.create( pathName, recursive = TRUE)
pathName <- paste0(folder_output,'DE')
dir.create( pathName, recursive = TRUE)
pathName <- paste0(folder_output,'Stats')
dir.create( pathName, recursive = TRUE)
pathName <- paste0(folder_output,'PCA')
dir.create( pathName, recursive = TRUE)
data = SubsetData(object = data_run_label, cells = data_run_label$sample_name == sample )
plotAll(data,folder_output,sample_name,sampleParam,label_TF = TRUE,integrate_TF = FALSE, DE_perm_TF = FALSE,clusterTF = FALSE)
PCA_dim = sampleParam$PCA_dim[sampleParam['Sample'] == sample]
resolution_val = sampleParam$resolution_val[sampleParam['Sample'] == sample]
#data_run_label = getCluster (data,resolution_val, PCA_dim)
#data_run_label = data
#cluster_IDs_sample <- sampleParam$Cluster_IDs[sampleParam['Sample'] == sample]
#browser()
#data_run_label = label_cells(data_run_label,cluster_IDs)
sample_type = ''
folder_name = 'Analysis/Feature Plots'
#FeaturePlot_GeneList(gene_list,folder_output,folder_name,sample_type, FeaturePlotFix = TRUE)
data = subset(data, idents = celltype)
}
data_run_label = label_cells(data_run,cluster_IDs)
celltype = 'T Cell'
ident1 = paste0(celltype, ' ', 'Post ND')
ident2 = paste0(celltype, ' ', 'Post D')
folder_name = paste0('DoHeatmap/',ident1, '_', ident2)
folder_featureplot = paste0(folder_base_output,'Analysis/', folder_name,'/')
filename = paste0(folder_featureplot, ident1, '_', ident2,' Markers.csv')
Features = read.csv(filename)
Features = Features[Features$p_val_adj < 0.05,]
Features = Features[order(Features$avg_logFC),]
gene_list = as.character(Features$X)
gene_list = c(gene_list[1:10], tail(gene_list, n=50))
category_list = unique(data_run_label$split_var)
folder_output = paste0(folder_base_output,'Samples Seperate/')
for (category in category_list){
data = SubsetData(object = data_run_label, cells = data_run$split_var == category )
data = subset(data, idents = celltype)
plot = DoHeatmap(object = data, features = gene_list,
group.by = "sample_name") +
ggtitle(celltype ) +
theme(plot.title = element_text(hjust = 0.5)) +
theme(plot.title = element_text(size=24))
folder_name = paste0('DoHeatmap/',ident1, '_', ident2)
folder_heatMap = paste0(folder_output,'Analysis/', folder_name,'/')
dir.create( folder_heatMap, recursive = TRUE)
pathName <- paste0(folder_heatMap,paste0(ident1, '_', ident2,'_',category,'.png'))
png(file=pathName,width=1500, height=1000)
print(plot)
dev.off()
}
}
|
de8e2b31b49d15bbe01c77a1e626e0bc323001d1 | 6453df600f99837257c753829b49734751e7f280 | /R/upload_paper.R | 63e1c34e31d6e875f3625582176e3e1cee37ce51 | [] | no_license | ciaranmcmonagle/SCRCdataAPI | 6b9922df2aa51cbc78c5d236a9b7e1d4798ed25b | 2f7994dc42344482cdf4b8c43c61de512c8df3f4 | refs/heads/master | 2022-11-26T08:23:23.339651 | 2020-07-27T10:46:54 | 2020-07-27T10:46:54 | 272,692,052 | 0 | 0 | null | 2020-07-27T10:29:15 | 2020-06-16T11:40:10 | R | UTF-8 | R | false | false | 4,193 | r | upload_paper.R | #' upload_paper
#'
#' @param title a \code{string} specifying the title of the paper
#' @param authors a \code{string} specifying the authors
#' @param journal a \code{string} specifying the full journal name
#' @param journal_abbreviation a \code{string} specifying the journal
#' abbreviation
#' @param journal_website a \code{string} specifying the journal homepage
#' @param release_date a \code{POSIXct} of format "%Y-%m-%d %H:%M:%S"
#' specifying the release date of the paper
#' @param abstract a \code{string} specifying the abstract
#' @param keywords a \code{string} specifying keywords or keyphrases
#' seperated by "and", e.g. "keyword1 and keyword2 and key phrase1 and keyword3"
#' @param doi a \code{string} specifying the doi
#' @param primary_not_supplement (optional) an object of class \code{logical}
#' where \code{TRUE} is the primary source (default) and false is a supplement
#' @param version (optional) a \code{string} specifying the version number
#' @param key key
#'
#' @export
#'
#' @examples
#' title <- "Covid-19: A systemic disease treated with a wide-ranging approach: A case report"
#' authors <- "Massabeti, Rosanna and Cipriani, Maria Stella and Valenti, Ivana"
#' journal <- "Journal of Population Therapeutics and Clinical Pharmacology"
#' journal_abbreviation <- "J Popul Ther Clin Pharmacol"
#' journal_website <- "https://www.jptcp.com/index.php/jptcp"
#' release_date <- as.POSIXct("2020-01-01 12:00:00", format = "%Y-%m-%d %H:%M:%S")
#' abstract <- "At the end of December 2019, the Health Commission of the"
#' keywords <- "covid-19 and coronavirus disease and monoclonal antibodies and non-invasive mechanical ventilation and treatment"
#' doi <- "10.15586/jptcp.v27iSP1.691"
#'
upload_paper <- function(title,
authors,
journal,
journal_abbreviation,
journal_website,
release_date,
abstract,
keywords,
doi,
primary_not_supplement = TRUE,
version = "1.0.0",
key) {
# Check if paper exists in the data registry ----------------------------
check_exists("external_object",
list(doi_or_unique_name = paste0("doi://", doi)))
# Check if journal exists in the data registry ----------------------------
if(check_exists("source", list(name = journal))) {
sourceId <- get_url("source", list(name = journal))
} else {
sourceId <- new_source(name = journal,
abbreviation = journal_abbreviation,
website = journal_website,
key = key)
}
# Add paper metadata ------------------------------------------------------
objectId <- new_object(storage_location_id = "",
key = key)
# Authors
if(grepl("and", authors)) {
authorList <- as.list(strsplit(authors, " and ")[[1]])
} else {
authorList <- list(authors)
}
for(i in seq_along(authorList)) {
tmp <- strsplit(authorList[[i]], ", ")[[1]]
new_author(family_name = tmp[1],
personal_name = tmp[2],
object_id = objectId,
key = key)
}
# Keywords
if(!is.na(keywords)) {
if(grepl("and", keywords)) {
keywordList <- as.list(strsplit(keywords, " and ")[[1]])
} else {
keywordList <- list(keywords)
}
for(i in seq_along(keywordList)) {
new_keyword(keyphrase = keywordList[[i]],
object_id = objectId,
key = key)
}
}
# Paper metadata
new_external_object(doi_or_unique_name = paste0("doi://", doi),
primary_not_supplement = primary_not_supplement,
release_date = release_date,
title = title,
description = abstract,
version = version,
object_id = objectId,
source_id = sourceId, # journal
original_store_id = "", # pdf website
key = key)
}
|
b9ece20146c37fbdf1495a057248b7a92950635e | 3d16db3b9a795c87cc899f3012ee3731e158b9c7 | /shiny_pokerena/mods/player_mod.R | 727cd2de521b5b1d1368d3b8dbe90e93988aaea4 | [] | no_license | systats/pokerena | 5e710d157839693462d3132a50f72f1c782d09c8 | 4c0386e1c4a10de2787a782a20537c320f577f71 | refs/heads/master | 2022-12-18T13:18:50.280356 | 2020-09-14T12:40:32 | 2020-09-14T12:40:32 | 286,004,561 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 724 | r | player_mod.R | init <- 'rbind(
# tibble(name = "aaaaa", fun = list(player_fish)),
# tibble(name = "yyyyy", fun = list(player_random)),
# tibble(name = "meeeee", fun = list(player_app))
tibble(name = "potman", fun = list(player_api)),
tibble(name = "me", fun = list(player_app))
) %>% mutate(credit = 100, bb = 2)'
player_ui <- function(id){
ns <- NS(id)
tagList(
aceEditor(ns("code"), mode = "r", value = init),
br(),
verbatimTextOutput(ns("dev"))
)
}
player_server <- function(input, output, session){
players <- reactive({
req(input$code)
eval(parse(text = isolate(input$code)))
})
output$dev <- renderPrint({
players() %>% glimpse
})
return(players)
}
|
d6671748c18e04a6364773ee17844c308eb988a5 | abccc6e1ef3a0211c77ce89c5249d84708f525e2 | /R/generate_dataset.R | 2039d6f71d1c8b85a26c19d91374dc2cd017c86c | [] | no_license | FedericoCortese/R4DScm | c6658e24a734d2d6aa6a4aa8d7a8190f76040ba8 | f39aba478d153a6c4a9c7ae017846658b1deb2b9 | refs/heads/master | 2023-06-20T10:40:40.041378 | 2021-07-08T06:22:45 | 2021-07-08T06:22:45 | 278,444,948 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 540 | r | generate_dataset.R | #' Generate dataset for examples
#'
#' This function generates the dataset used for examples
#'
#' @param n: number of observations in the dataset
#'
#' @return A list containing the vector of observations for the dependent variable and a matrix containing the observations of the independent variables
#'
#' @examples
#' set.seed(8675309)
#' generate_dataset(n)
#' @export
generate_dataset <- function(n) {
set.seed(8675309)
x1 = rnorm(n)
x2 = rnorm(n)
X <- cbind(x1, x2)
y = 1 + .5*x1 + .2*x2 + rnorm(n)
return(list(y, X))
}
|
ad36d7d1bccc21e0020b48022b42fc9be0273a58 | a114996ecfdd212ea60e54238a24b3bf51112b13 | /Problems/Problem1.R | 69de1567cd6f4b4751a584608d503209bd67738b | [] | no_license | Adam-Hoelscher/ProjectEuler.R | 35610697d5bc4c61e9adfaec6d923a424df20486 | e060484365cafcfcd400c3cecacc189293dfc682 | refs/heads/master | 2021-01-23T01:29:54.585429 | 2017-09-11T18:28:46 | 2017-09-11T18:28:46 | 102,432,553 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 83 | r | Problem1.R | Problem1<-function(){
return (sum((1:999)[1:999 %% 3 == 0 | 1:999 %% 5 == 0]))
}
|
5b763ae14e62cfd65a5aacb3284ebc7f4c87b751 | 04fd68830067728bd3d93c0c081e6d10889d69ea | /man/SampleImpPars.Rd | 06555c383720eef5f15828de1bda6f1eb66b0b09 | [] | no_license | DLMtool/DLMtool | e13ecd9a7a03da70cb62c1041eb2cdbc083757cb | 9fadad574086a7af10cadcffe11c1a1ac2834972 | refs/heads/master | 2021-07-17T05:49:07.717089 | 2021-06-18T15:39:41 | 2021-06-18T15:39:41 | 72,065,930 | 20 | 13 | null | 2021-06-18T15:39:42 | 2016-10-27T03:05:51 | R | UTF-8 | R | false | true | 595 | rd | SampleImpPars.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/SampleOM.R
\name{SampleImpPars}
\alias{SampleImpPars}
\title{Sample Implementation Error Parameters}
\usage{
SampleImpPars(Imp, nsim = NULL, cpars = NULL)
}
\arguments{
\item{Imp}{An object of class 'Imp' or class 'OM'}
\item{nsim}{Number of simulations. Ignored if 'Imp' is class 'OM'}
\item{cpars}{Optional named list of custom parameters. Ignored if 'OM' is class 'OM'}
}
\value{
A named list of sampled Implementation Error parameters
}
\description{
Sample Implementation Error Parameters
}
\keyword{internal}
|
7706765ad1df0be429bba184aa6cde94a18129ce | a8a6e75a91354ef2b0394b9aedd9e336b76ed039 | /man/planBMP.Rd | 4b6add625e84828ad60313e14bcd2807cb9e6e4f | [] | no_license | sashahafner/biogas | 577e0eaf166ba4905c0cb8f17f7088bf9b1ace12 | 0d31b2bad37d5dccf717a7ec53c703b927a68af7 | refs/heads/master | 2023-08-21T16:31:05.246176 | 2020-04-07T20:30:16 | 2020-04-07T20:30:16 | 131,808,750 | 11 | 2 | null | null | null | null | UTF-8 | R | false | false | 5,412 | rd | planBMP.Rd | \name{planBMP}
\alias{planBMP}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Claculate Inoculum and Substrate Mass for BMP Experiments
}
\description{
\code{planBMP} assists in the design of BMP experiments.
It can be used to determine inoculum and substrate masses based on inoculum-to-substrate ratio and volatile solids concentrations, or to calculate inoculum-to-substrate ratio based on masses.
}
\usage{
planBMP(vs.inoc, vs.sub, isr = NA, m.inoc = NA, m.sub = NA,
m.tot = NA, m.vs.sub = vs.sub * m.sub, digits = 3, warn = TRUE,
nice = TRUE)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{vs.inoc}{
volatile solids (VS) concentration of inoculum (g/g = g VS per g inoculum).
Required.
Numeric vector.
}
\item{vs.sub}{
volatile solids (VS) concentration of substrate (g/g = g VS per g substrate).
Required.
Numeric vector.
}
\item{isr}{
inoculum-to-substrate ratio, VS mass basis.
Optional.
Numeric vector.
}
\item{m.inoc}{
total mass of inoculum (g).
Optional.
Numeric vector.
}
\item{m.sub}{
total mass of substrate (g).
Optional.
Numeric vector.
}
\item{m.tot}{
total mass of mixture (inoculum plus substrate) (g).
Optional.
Numeric vector.
}
\item{m.vs.sub}{
VS mass of substrate (g).
Optional.
Numeric vector.
}
\item{digits}{
number of significant digits to display in output.
Default of 3.
Integer vector with length 1.
}
\item{warn}{
control whether warnings are displayed.
Default of TRUE.
Logical vector with length 1.
}
\item{nice}{
control whether output is formatted to look nice and make reading easier.
Default of TRUE.
Only applied for non-vectorized (length 1) calls.
Logical vector with length 1.
}
}
\details{
BMP experiments should be designed giving consideration to the inoculum-to-substrate ratio (ISR), the substrate VS mass, and the mixture VS concentration.
This function calculates inoculum and substrate masses based on VS concentrations and ISR, along with either total mixture mass or substrate VS mass.
Alternatively, it can be used to calculate ISR if the masses have been selected.
Warnings are based on the guidelines of Holliger et al. (2016).
}
\value{
A named numeric vector, or (if any of the first 7 input arguments have a length > 1, i.e., a vectorized call), a data frame.
Names and interpretation are identical to the first 7 input arguments, and also include:
\item{vs.mix}{VS concentration in mixture (g/g)}
\item{m.vs.tot}{total VS mass in mixture (g)}
For non-vectorized calls, the results is returned invisibly and a easy-to-read summary is printed (see \code{nice} argument).
}
\references{
Holliger, C., Alves, M., Andrade, D., Angelidaki, I., Astals, S., Baier, U., Bougrier, C., Buffiere, P., Carbella, M., de Wilde, V., Ebertseder, F., Fernandez, B., Ficara, E., Fotidis, I., Frigon, J.-C., Fruteau de Laclos, H., S. M. Ghasimi, D., Hack, G., Hartel, M., Heerenklage, J., Sarvari Horvath, I., Jenicek, P., Koch, K., Krautwald, J., Lizasoain, J., Liu, J., Mosberger, L., Nistor, M., Oechsner, H., Oliveira, J.V., Paterson, M., Pauss, A., Pommier, S., Porqueddu, I., Raposo, F., Ribeiro, T., Rusch Pfund, F., Stromberg, S., Torrijos, M., van Eekert, M., van Lier, J., Wedwitschka, H., Wierinck, I., 2016. Towards a standardization of biomethane potential tests. \emph{Water Science and Technology} \bold{74}, 2515-2522.
}
\note{
Calculations used in this function are trivial, and they could also be done with a spreadsheet or even pencil and paper.
The advantage here is ease and some flexibility.
In addition to ISR and the other parameters included in this function, expected biogas production rate and bottle headspace volume are important, depending on the method.
For more details, see Holliger et al. (2016).
}
\author{
Sasha D. Hafner, based on suggestion by Konrad Koch
}
\seealso{
\code{\link{calcBgVol}},
\code{\link{calcBgMan}},
\code{\link{calcBgGD}},
\code{\link{cumBg}},
\code{\link{summBg}},
\code{\link{predBg}}
}
\examples{
# Bottles are 500 mL, substrate is wastewater sludge.
# Assume we want no more than 250 mL reacting volume (~250 g)
# First try setting ISR and total mass.
# VS concentrations: 0.02 g/g in inoculum, 0.07 g/g for substrate, ISR = 2.
planBMP(vs.inoc = 0.02, vs.sub = 0.07, isr = 2, m.tot = 250)
# Get 31 g substrate, 220 g inoculum.
# After setup, we can check final values.
planBMP(vs.inoc = 0.018, vs.sub = 0.072, m.sub = 32, m.inoc = 218)
# We didn't quite meet our target in this case--next time use more inoculum to be sure
# We can alternatively specify substrate VS mass
planBMP(vs.inoc = 0.02, vs.sub = 0.07, isr = 2, m.vs.sub = 2)
# Some options
planBMP(vs.inoc = 0.02, vs.sub = 0.07, isr = 2, m.vs.sub = 2, nice = FALSE)
# Perhaps we want to use three different ISRs
planBMP(vs.inoc = 0.02, vs.sub = 0.07, isr = 2:4, m.vs.sub = 2, nice = FALSE)
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{manip}
\concept{biogas}
|
fbcf1750abaec86a70af7cd5dabc4ec21b4f439f | de7cc9c6b9d4d3898875d4fb37363728f029d09c | /cachematrix.R | 97f51ce5217b4f68168a2a9f7b5e567a8c0e878f | [] | no_license | nyishamoten/ProgrammingAssignment2 | a74fd70428a2343c27726b3a2a43ca83e8dca59d | 986ece65b9440c48ba7d50fedae189488683fe5a | refs/heads/master | 2021-01-16T22:48:20.229870 | 2014-05-25T19:47:33 | 2014-05-25T19:47:33 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 880 | r | cachematrix.R | ## Put comments here that give an overall description of what your
## functions do
## makeCacheMatrix: This function creates a special "matrix" object that can cache its inverse.
makeCacheMatrix <- function(x = matrix()) {
i<- Null
set<- function(y) {
x<<-y
i<<-NULL
}
get<-function() x
setinverse<-function(inverse) i<<-inverse
getinverse<-function() i
list(set=set,get=get,
setinverse=setinverse,
getinverse=getinverse)
}
## cacheSolve: This function computes the inverse of the special "matrix" returned by makeCacheMatrix above.
## If the inverse has already been calculated (and the matrix has not changed), then the cachesolve should retrieve the inverse from the cache
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
i<-x$getinverse()
if(!is.null(i)) {
return(i)
}
data<-x$get()
m<-solve(data,...)
x$setinverse(i)
i
}
|
622f7497b9ad2a59f085307699586c8e377a956c | 8a99fce65d3a5aa4bf466eb7054caa4ff652aa23 | /xpathDemo1.R | d500356c0fe1ec2a02c631b9e9ba5a4a46feb226 | [] | no_license | datasci-info/Data-Parsers-in-R | b475d71e43feeac1a7f0801f5f6b312f2fdfaac7 | f9576dfbfc06c498d517468c918055cbe8e55bbb | refs/heads/master | 2020-06-05T11:33:33.872040 | 2015-05-22T23:22:48 | 2015-05-22T23:22:48 | 40,424,547 | 1 | 1 | null | null | null | null | UTF-8 | R | false | false | 591 | r | xpathDemo1.R | library(XML)
doc <- "<html>
<head></head>
<body>
<div id='character1' class='character'>
<span class='name'>Mike</span>
<span class='level digit'>10</span>
</div>
<div id='character2' class='character'>
<span class='name'>Stan</span>
</div>
</body>
</html>"
doc <- htmlParse(doc)
# try ...
doc["//*[@class='name']"]
xmlValue(doc["//*[@class='name']"][[1]])
# extract data via xpath
xpathApply(doc,"//*[@class='name']",xmlValue)
unlist(xpathApply(doc,"//*[@class='name']",xmlValue))
# compare with css selector
library(CSS)
doc[cssToXpath(".name")]
cssApply(doc, ".name", cssCharacter)
|
83127ce5357001f1840ef6d1e10bd1cd7ab230ce | ce091b228f0090a94e9723e9bae9e8d8c2ef3b94 | /R/newsfreq.R | c5b34658e73b121329b1e061fa530b1f5c8b4f31 | [] | no_license | hrbrmstr/newsfreq | 2034e06611df8d7c08b8cc5b8bfda4c6b1db1f29 | 746bfed43267158e883a07ec224a3b22a848a14f | refs/heads/master | 2021-01-13T02:02:53.901899 | 2015-02-02T11:48:40 | 2015-02-02T11:48:40 | 30,116,907 | 9 | 0 | null | null | null | null | UTF-8 | R | false | false | 10,359 | r | newsfreq.R | #' @title Search newsfreq.com API
#' @description \code{news_search} provides an interface to the \code{newsfreq.com} news keyword search API
#' @details \code{newsfreq.com}'s interface shows you the frequency that given keywords appear in
#' American News Media and can be used to look at trends in news reporting.
#' \code{news_search} provides programmatic access to the search function, returning results in an R data frame where
#' additional analyses can be performed or data can be visualized.\cr
#' \cr
#' You can use boolean operators \code{AND} and \code{OR} with parentheses to specify multiple terms in \code{keywords}.\cr
#' \cr
#' The \code{target} parameter controls which "field" in news stories are queried. Valid values are:
#' \itemize{
#' \item \code{""} Search all article fields (the default)
#' \item \code{lead} Search in article lead paragraph
#' \item \code{title} Search in article headlines
#' \item \code{topics} Search in terms identified as index terms by the article provider
#' \item \code{author} Search for articles by a particular author
#' \item \code{section} Search for articles appearing in a particular news section
#' \item \code{source} Search for articles from a particular news source
#' }
#' Search dates must not be in the future and must also not be on the current day (they won't be in the API database).
#' They can be an atomic character vector in a format \code{as.Date} will recognize or
#' anything that can be coerced to a \code{Date} class.
#' @param keywords search term(s) to query (see Details for specifics)
#' @param target news article component to search in (see Details for valid values and their meaning)
#' @param date_from start date for the search (<= \code{date_to} and not current day or future date).
#' Defaults to yesterday. See Details for information on date formatting.
#' @param date_to end date for search (>= \code{date_from} and not current day). Defaults to yesterday.
#' See Details for information on date formatting.
#' @param source filter search by news source. You can filter your search by news organization.
#' Entering 'Fox News' or 'The Boston Globe' will search only articles from sources matching that name.
#' Full list available on \href{http://bit.ly/newsbanksources}{NewsBank}
#' @param summarize Either "\code{monthly}" or "\code{annual}"
#' @return \code{data.frame} (with additional class of \code{newsfreq}) of search
#' results. If \code{summarize} is "\code{monthly}",
#' then the results will also include \code{year}, \code{month} and
#' \code{month_abb}. For "\code{annual}", only \code{year} will be added to
#' the results.
#' @note The "\code{percent}" value is a per-annum calculation and will only be calculated
#' and added to the results of searches that are summarized \code{monthly} and span full years.
#' It is at best a crude way to normalize the results since the number of news sources
#' (and, hence, articles) changes over time.
#' @export
#' @examples \dontrun{
#' news_search("data breach", date_from="2014-01-01", date_to="2014-12-31")
#' ## date_from date_to year month_abb month count search_date search_terms pct
#' ## 1 2014-01-01 2014-01-31 2014 Jan 01 3963 2015-02-02 data breach 0.12539155
#' ## 2 2014-02-01 2014-02-28 2014 Feb 02 2856 2015-02-02 data breach 0.09036545
#' ## 3 2014-03-01 2014-03-31 2014 Mar 03 2589 2015-02-02 data breach 0.08191742
#' ## 4 2014-04-01 2014-04-30 2014 Apr 04 2170 2015-02-02 data breach 0.06866002
#' ## 5 2014-05-01 2014-05-31 2014 May 05 2680 2015-02-02 data breach 0.08479671
#' ## 6 2014-06-01 2014-06-30 2014 Jun 06 1973 2015-02-02 data breach 0.06242683
#' ## 7 2014-07-01 2014-07-31 2014 Jul 07 1962 2015-02-02 data breach 0.06207879
#' ## 8 2014-08-01 2014-08-31 2014 Aug 08 2585 2015-02-02 data breach 0.08179086
#' ## 9 2014-09-01 2014-09-30 2014 Sep 09 3326 2015-02-02 data breach 0.10523651
#' ## 10 2014-10-01 2014-10-31 2014 Oct 10 2862 2015-02-02 data breach 0.09055529
#' ## 11 2014-11-01 2014-11-30 2014 Nov 11 2473 2015-02-02 data breach 0.07824711
#' ## 12 2014-12-01 2014-12-31 2014 Dec 12 2166 2015-02-02 data breach 0.06853346
#' }
news_search <- newsfreq <- function(keywords=NA, target="",
date_from=(Sys.Date()-1), date_to=(Sys.Date()-1),
source="",
summarize="monthly") {
if (is.na(keywords) | length(keywords)==0) stop("Must specify keywords to search for")
date_from <- try(as.Date(date_from), silent=TRUE)
date_to <- try(as.Date(date_to), silent=TRUE)
if (class(date_from) == "try-error" | class(date_to) == "try-error") stop("Must use valid dates")
if (date_to < date_from) stop("date_to must be >= date_from")
if (date_to >= Sys.Date()) stop("Cannot search in the future or current day")
target <- tolower(target)
if (!target %in% c("", "lead", "topics", "author", "section", "source")) stop("invalid search target specified")
summarize <- tolower(summarize)
if (!summarize %in% c("monthly", "annual")) stop("summarize must be one of 'monthly' or 'annual'")
DateFrom <- format(date_from, "%m/%d/%Y")
DateTo <- format(date_to, "%m/%d/%Y")
DateString <- sprintf("%s to %s", date_from, date_to)
SearchString <- keywords
SearchTarget <- ucfirst(target)
SearchSource <- source
SearchType <- ucfirst(summarize)
url <- "http://www.newsfreq.com/DataNervesApi/api/NewsLibrary"
ua <- "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2272.17 Safari/537.36 R/3.0.0"
resp <- POST(url,
encode="form",
user_agent(ua),
add_headers(Referer="http://www.newsfreq.com/",
`X-Requested-With`="XMLHttpRequest",
Accept="application/json, text/javascript, */*; q=0.01"),
body=list(`query[DateFrom]`=DateFrom,
`query[DateTo]`=DateTo,
`query[DateString]`=DateString,
`query[SearchString]`=SearchString,
`query[SearchTarget]`=SearchTarget,
`query[SearchSource]`=SearchSource,
searchType=SearchType))
ct <- new_context()
dat <- fromJSON(ct$call("JSON.parse", content(resp, as="text")))
dat %>%
mutate(DateFrom=js_date_parse(DateFrom),
DateTo=js_date_parse(DateTo),
CreatedDate=js_date_parse(CreatedDate),
year=format(DateFrom, "%Y"),
month=format(DateFrom, "%m"),
month_abb=format(DateFrom, "%b")) %>%
select(date_from=DateFrom,
date_to=DateTo,
year,
month_abb,
month, count=Count,
search_date=CreatedDate,
search_terms=SearchString) -> dat
if (summarize == "annual") {
dat %>% select(-month_abb, -month) -> dat
} else {
mods <- dat %>% group_by(year) %>% summarize(tot=n() %% 12)
if (all(mods$tot == 0)) {
dat %>%
group_by(year) %>%
mutate(tot=sum(count),
pct=count/tot) %>%
ungroup %>%
select(-tot) -> dat
}
}
class(dat) <- c("newsfreq", "data.frame")
dat
}
#' @rdname news_search
#' @export
is.newsfreq <- function(x) inherits(x, "newsfreq")
#' @title Autoplot for \code{newsfreq} objects
#' @description Quick method with sane defaults for generating a \code{ggplot}
#' plot for \code{news_search} results.
#' @param data a \code{newsfreq} object
#' @param breaks A character string, containing one of "\code{day}", "\code{week}",
#' "\code{month}", "\code{quarter}" or "\code{year}". This can optionally
#' be preceded by a (positive or negative) integer and a space, or followed
#' by "\code{s}". Passed to \code{scale_x_date}. Defaults to "\code{year}".
#' @param label_format passed to \code{scale_x_date}. Defaults to "\code{\%Y}"
#' @param value either "\code{count}" for raw article counts or "\code{percent}"
#' Defaults to "\code{count}".
#' @param add_point add points to the lines? Default "\code{FALSE}" (do not plot points)
#' @return \code{ggplot} object
#' @export
autoplot.newsfreq <- function(data, breaks="year", label_format="%Y",
value="count", add_point=FALSE) {
if (!inherits(dat, "newsfreq")) {
stop("'data' must be a 'newsfreq' object")
}
if (!value %in% c("count", "percent")) {
stop("'value' must be either 'count' or 'percent'")
}
if (value == "percent" & !"pct" %in% names(data)) {
value <- "count"
message("'pct' column not found, using 'count'")
}
y_lab <- "# Articles"
if (value == "count") {
gg <- ggplot(data, aes(x=date_from, y=count))
} else {
gg <- ggplot(data, aes(x=date_from, y=pct))
y_lab <- "% Articles"
}
gg <- gg + geom_line()
if (add_point) gg <- gg + geom_point(size=1.5)
gg <- gg + scale_x_date(breaks=date_breaks(breaks),
labels=date_format(label_format), expand=c(0,0))
if (value == "count") {
gg <- gg + scale_y_continuous(label=comma)
gg <- gg + labs(x=NULL, y="# Articles")
} else {
gg <- gg + scale_y_continuous(label=percent)
gg <- gg + labs(x=NULL, y="% Articles")
}
gg <- gg + ggtitle(sprintf(unique(data$search_terms)))
gg <- gg + theme_bw()
gg <- gg + theme(panel.grid=element_blank())
gg <- gg + theme(panel.border=element_blank())
gg
}
.onAttach <- function(...) {
if (!interactive()) return()
packageStartupMessage("Data provided by DataNerves; Quantitative data sourced from NewsLibrary.com for use for educational, scholarly and news reporting purposes only")
}
# from Hmisc
ucfirst <- function (string) {
capped <- grep("^[^A-Z]*$", string, perl = TRUE)
substr(string[capped], 1, 1) <- toupper(substr(string[capped], 1, 1))
return(string)
}
# helper for fugly "Date(#)" components
js_date_parse <- function(x) {
str_extract(x, "[[:digit:]]+") %>%
as.numeric %>%
divide_by(1000) %>%
as.POSIXct(origin="1970-01-01 00:00:00") %>%
as.Date()
} |
e84fa9007af1975ec4e8a7b90efe3e60addc9ec6 | 2bec5a52ce1fb3266e72f8fbeb5226b025584a16 | /metadynminer3d/man/read.hills3d.Rd | 05a4ac0151e8738c17af49875560547a922a9c64 | [] | no_license | akhikolla/InformationHouse | 4e45b11df18dee47519e917fcf0a869a77661fce | c0daab1e3f2827fd08aa5c31127fadae3f001948 | refs/heads/master | 2023-02-12T19:00:20.752555 | 2020-12-31T20:59:23 | 2020-12-31T20:59:23 | 325,589,503 | 9 | 2 | null | null | null | null | UTF-8 | R | false | true | 1,142 | rd | read.hills3d.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/readingandfes.R
\name{read.hills3d}
\alias{read.hills3d}
\title{Read 3D HILLS from Plumed}
\usage{
read.hills3d(file = "HILLS", per = c(FALSE, FALSE, FALSE),
pcv1 = c(-pi, pi), pcv2 = c(-pi, pi), pcv3 = c(-pi, pi),
ignoretime = FALSE)
}
\arguments{
\item{file}{HILLS file from Plumed.}
\item{per}{logical vector specifying periodicity of collective variables.}
\item{pcv1}{periodicity of CV1.}
\item{pcv2}{periodicity of CV2.}
\item{pcv3}{periodicity of CV3.}
\item{ignoretime}{time in the first column of the HILLS file will be ignored.}
}
\value{
hillsfile object.
}
\description{
`read.hills3d` reads a HILLS file generated by Plumed and returns a hillsfile3d object.
User can specify whether some collective variables are periodic.
}
\examples{
l1<-"1 -1.587 -2.969 3.013 0.3 0.3 0.3 1.111 10"
l2<-"2 -1.067 2.745 2.944 0.3 0.3 0.3 1.109 10"
l3<-"3 -1.376 2.697 3.049 0.3 0.3 0.3 1.080 10"
l4<-"4 -1.663 2.922 -3.065 0.3 0.3 0.3 1.072 10"
fourhills<-c(l1,l2,l3,l4)
tf <- tempfile()
writeLines(fourhills, tf)
read.hills3d(tf, per=c(TRUE,TRUE))
}
|
353d7a2b5f1a219052510a2b6cec18bb5f6c495f | 94fcea6865cbdcdf878238e14e475209e4c55819 | /Task 3- Day 1-Introductory to R.R | 589291bec9cfe06ec59b4fb2953cf6e61c0d534b | [] | no_license | yayazelinsky/R_newprouwc | 67ce31cd0faa6230c5db116c7696cac56da0adf6 | 0e33a854ef49d14c476e5920720ebcac859e2bce | refs/heads/master | 2020-04-19T09:22:13.611118 | 2019-02-01T12:19:09 | 2019-02-01T12:19:09 | 168,108,114 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,363 | r | Task 3- Day 1-Introductory to R.R | #Day 1 Introdctory R
#Author: Ayanda
#Laminaria dataset exploring and learning
#date: 29 January 2019
#Loading Libraries
library(tidyverse)
lam <- read_csv("data/laminaria.csv")
head(lam)#head shows you the first six rows of your dataset, default
tail(lam)#head shows you the last six rows of your dataset, default
head(lam, n = 3)#head shows you the first three rows of your dataset because you have instructed it show the first three, n=3
tail(lam, n = 3)#tail shows you the last three rows of your dataset, you have instructed it show the last three, n=3
lam_select <- lam %>% #assigning
select(site, total_length) #in the laminaria dataset, select only the site and total_length variables
slice (54:80)#Slice row 54 to 80
View(lam)
lam_kom <- lam %>%
filter(site == "Kommetjie") #select laminaria dataset, filter (extract) only out the Kommetjie
lam_Sea <- lam %>%
filter(site == "Sea Point")
lam_Sea2 <- lam %>%
select(site, blade_length) %>%
filter(site == "Sea Point")
View(lam_Sea2)
lam %>%
filter(total_length == max(total_length)) #from the Laminaria dataset, filter , the total length that is equal to the total length
lam %>%
filter(total_length == min(total_length))#Read above, this time it is mninimum
summary(lam)
dim(lam) #dimentions, observations and varaiables
lam %>% #select laminaria dataset
summarise(avrg_bl = mean(blade_length),
med_bl = median(blade_length),
stdev_bl = sd(blade_length)) #The piped dataset, then summarise, and give it a new name could have used another term instead of avrg_bl
lam %>%
group_by(site) %>% #group my data by site
summarise(var_bl = var(blade_length), #then summarise
n = n()) %>%
mutate(se = sqrt(var_bl/n)) #creating a new column, use mutate, se, stadard error, term that we just gave for the new column
lam_2 <- lam %>% #trying to avoid changing the original data, thus creating a new dataset termed lam_2
select (-blade_length, -blade_thickness)# using this to remove blade_thickness and blade_length from the dataset, by putting a minus sign
lam_count <- lam %>%
select(stipe_mass) %>%
summarise(n = n()) #how many entries for stipe_mass
lam_count <- lam %>%
select(stipe_mass) %>%
na.omit %>% #removing of invalid entries, eg where there is NA in data, so would use what is applicable to your dataset
summarise(n = n()) #how many entries for stipe_mass
lam %>%
select(blade_length) %>%
summarise(n = n()) #how many entries for blade_length
lam %>%
select(blade_length) %>%
na.omit %>% #removing of invalid entries, eg where there is NA in data, so would use what is applicable to your dataset
summarise(n = n()) #how many entries for blade_length
#there is no difference since the are no "na" entries in the blade_length column
#Exercise 1
total_length_half <- lam %>% #trying to avoid changing the original data, thus creating a new dataset termed lam_2
mutate(total_length_half = total_length/2) %>%
na.omit %>%
filter(total_length_half >100)%>%
select (site, total_length_half)
#Exercise: Use group_by and summarize () to find the mean ,
#and max_blade_length for each size. Also add the number of observations (hint: see "7n")\
lam %>%
group_by (site) %>%
summarise(mean_blade_length = mean(blade_length),
min_blade_length = min(blade_
|
3e2bcbdf5a455aa04dd88f9c8ff9ed0f2e505d1f | 28fb670db2ea145c66e7c83d71596fb1ef1d8bef | /man/s.caretModelList.Rd | ac24c039496a5b5f7853ce5c055fe1d4227b9871 | [] | no_license | rajkar86/shinypipe | c9e886296ac99fa67a8426909be20b1cc9b2c9cf | 26a788362554372bdc9b44b8f42ef0fd95e3429c | refs/heads/master | 2023-07-16T15:29:53.780104 | 2021-08-19T06:06:55 | 2021-08-19T06:07:08 | 114,509,849 | 6 | 1 | null | null | null | null | UTF-8 | R | false | true | 583 | rd | s.caretModelList.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/p_caretModelList.R
\name{s.caretModelList}
\alias{s.caretModelList}
\title{shinypipe server function for returning a data frame that can potentially be used
as a tuning grid for caret::train}
\usage{
s.caretModelList(input, output, session)
}
\arguments{
\item{input}{shiny input}
\item{output}{shiny output}
\item{session}{shiny session}
\item{return}{shiny session}
}
\description{
shinypipe server function for returning a data frame that can potentially be used
as a tuning grid for caret::train
}
|
87ded31dd2c0dfed3699f4910071d274ed7100f7 | 6c9848cb57ac23a4a2b57a0fe6c227366015b4c1 | /multivariada_2017/scripts/Script 1.R | 619173041b1d7d7e01091d64bc7a4c82e54aef18 | [] | no_license | marcosvital/scriptsR | 543c044c7efd29a9b4443cbb863bda8414e798e9 | b1fc07e8081a282a20175cee8ffb2d95cab34777 | refs/heads/master | 2022-10-11T03:02:00.463112 | 2022-09-27T19:50:25 | 2022-09-27T19:50:25 | 80,924,221 | 3 | 7 | null | null | null | null | ISO-8859-1 | R | false | false | 3,368 | r | Script 1.R | ###############################################################
#ESTATISTICA MULTIVARIADA AULA 2 NO R #
#15/08/2017 #
#Marcos V C Vital #
#Estat. multivariada, ppg-dibict, ufal #
###############################################################
#Estabelecendo a pasta e lendo os dados:
setwd("D:/R/multivariada 2017")
#Mude o endereço acima, para o da pasta de trabalho no seu computador
#Ou, se preferir, faça: setwd(choose.dir())
#Neste caso, o R vai abrir uma janela, e você seleciona a pasta manualmente
#Conferindo os arquivos da pasta:
dir()
spe<-read.csv("DoubsSpe.csv", row.names=1) #Dados das espécies de peixes
env<-read.csv("DoubsEnv.csv", row.names=1) #Variáveis ambientais nos locais de coleta
spa<-read.csv("DoubsSpa.csv", row.names=1) #Coordenadas dos locais de coleta
#O argumento row.names=1 indica que a primeira coluna dos arquivos contém os nomes das unidades amostrais
#Conferindo os dados com a função str:
str(spe)
str(env)
str(spa)
#Alternativamente, você pode conferir os dados usando summary()
#Mas cuidado: não é muito prático quando os dados tem muitas colunas (como é o caso do nosso objeto spe)
#Carregando o pacote necessário:
library(vegan)
#Se você não tem o pacote, pode mandar o R instalar usando o comando install.packages("vegan")
##################################################
#Começando a trabalhar com os dados
#Visualizando os pontos no espaço:
plot(spa$X, spa$Y)
#"Desenhando" o trajeto do rio:
plot(spa$X, spa$Y, type="n")
lines(spa$X, spa$Y, col="blue4")
text(spa$X, spa$Y, row.names(spa), col="red")
#Adicionando riqueza de espécies:
riqueza<-specnumber(spe)
plot(spa$X, spa$Y, type="n", ylim=c(20, 120), xlab="Coordenada X (km)", ylab="Coordenada Y (km)", las=1)
lines(spa$X, spa$Y, col="blue4")
points(spa$X, spa$Y, cex=riqueza/3.5, pch=16, col="gray") #Aqui adicionamos círculos cinza, com tamanho proporcional ao número de espécies de cada ponto.
points(spa$X, spa$Y, cex=riqueza/3.5, pch=1) #Agora adicionamos bordas aos círculos.
points(spa$X, spa$Y, cex=0.5, pch=16) #E, finalmente, adicionamos um ponto central em cada ponto.
###########################################
#Explorando correlações:
#Vamos criar um objeto com as variáveis físico-químicas da água:
fisqui<-env[ , 5:11]
#Correlações entre as variáveis:
cor(fisqui, method="pearson")
#Painel de gráficos:
pairs(fisqui)
###
#Painel de gráficos com correlação:
panel.hist <- function(x, ...)
{
usr <- par("usr"); on.exit(par(usr))
par(usr = c(usr[1:2], 0, 1.5) )
h <- hist(x, plot = FALSE)
breaks <- h$breaks; nB <- length(breaks)
y <- h$counts; y <- y/max(y)
rect(breaks[-nB], 0, breaks[-1], y, col = "cyan", ...)
}
panel.cor <- function(x, y, digits = 2, prefix = "", cex.cor, ...)
{
usr <- par("usr"); on.exit(par(usr))
par(usr = c(0, 1, 0, 1))
r <- abs(cor(x, y))
txt <- format(c(r, 0.123456789), digits = digits)[1]
txt <- paste0(prefix, txt)
if(missing(cex.cor)) cex.cor <- 0.8/strwidth(txt)
text(0.5, 0.5, txt, cex = 2)
}
pairs(fisqui, diag.panel=panel.hist, upper.panel = panel.cor)
|
957a0523ab92d05c3ef1c46b3d612e42f2c09f9a | 8f53b91d4c9fc6b672f16496f7b28e5154e07bc0 | /plot3.R | b6a08009facbd7659f2d56e52416117887235475 | [] | no_license | jorditejedor/ExData_Plotting1 | 8b22035b424d3159ba5e92fdfa48fb3a6fcaa1d3 | 71c4139e73b95a6e290378b6b685ac21028aebf7 | refs/heads/master | 2023-08-18T01:34:56.102792 | 2021-10-13T20:54:17 | 2021-10-13T20:54:17 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 488 | r | plot3.R | source("~/R/ExData_Plotting1/getData.R")
emc<-getData()
png("plot3.png")
plot(emc$DateTime, emc$Sub_metering_1,type="l",xlab="",
ylab="Energy sub metering")
#(emc$Sub_metering_1,lty="solid",lwd=1)
lines(emc$DateTime,emc$Sub_metering_2,col="red",lty="solid",lwd=1)
lines(emc$DateTime,emc$Sub_metering_3,col="blue",lty="solid",lwd=1)
legend("topright",
legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3"),
lty = c(1, 1, 1),col=c("black","red","blue"))
dev.off() |
243d0043ea0e1ac859101da58da297505fd0c71e | 4cd406c50f327914abfc75a0e5b1f99fe28ad903 | /graph_aaf.R | b7f51e13713a6dae3d61768008b528c7539a4205 | [
"LicenseRef-scancode-warranty-disclaimer",
"LicenseRef-scancode-public-domain"
] | permissive | niaid/primer-id-progs | 80c51acc65f36b069b47475c3fa7c33bfdfeb682 | 87e5770f55c3799513cbe26a885d4f787142c06f | refs/heads/master | 2021-03-19T13:28:24.270943 | 2018-01-31T15:28:18 | 2018-01-31T15:28:18 | 73,227,663 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 3,131 | r | graph_aaf.R | #!/usr/bin/env Rscript
# Example input file
# name position coverageDepth numUnambigNonConsensus
# 4 4 3265 5
# 5 5 3265 0
# 6 6 3265 1
# graph_aaf.R <input>
# Makes a scatterplot of alternate allele frequency by position
# Takes output of convert_reads_to_amino_acid.pl or merge_tally.pl or add_consensus_columns_to_frequency_tables.pl or ( add_consensus_columns_to_frequency_tables.pl + merge_tally_overlapping_regions.pl )
args <- commandArgs(TRUE)
pdfname <- sub("[.][^.]*$", ".pdf", args[1], perl=TRUE)
#print(pdfname)
data_in <- read.delim(args[1])
#head(data_in)
data <- subset(data_in, unambigCoverageDepth != 0) # To avoid dividing by zero
#data$freqNonConsensus = data$numUnambigNonConsensus/data$coverageDepth
data$freqNonConsensus = data$numUnambigNonConsensus/data$unambigCoverageDepth
#head(data)
meanfreq <- mean(data$freqNonConsensus)
medianfreq <- median(data$freqNonConsensus)
medianfreq.round <- signif(medianfreq, digits=3)
print(paste("Replacing zero with median frequency:", medianfreq.round))
# How do I get the average background?? medianfreq is closer to the average background.
for (i in row.names(data)){
if(data[i,"freqNonConsensus"] == 0){
data[i,"freqNonConsensus"] = medianfreq
}
} # Replace zero values with median frequency
# Make sure we have a 'position' column.
if (!('position' %in% colnames(data))){
#print("no position");
if ('aminoAcidPosition' %in% colnames(data)){
data$position = data$aminoAcidPosition
} else if ('nucleotidePosition' %in% colnames(data)){
data$position = data$nucleotidePosition;
} else {
print("no position column")
quit(save = "no", status = 1, runLast = FALSE)
}
}
#print(head(data))
library(ggplot2)
pdf(pdfname, width=12, height=8)
#p <- ggplot(data, aes(x=position, y=freqNonConsensus)) + geom_point(stat = "identity") + theme_bw() + coord_trans(y="log") + scale_y_continuous(limits=c(0.00001,1), breaks=c(0.00001,0.0001,0.001,0.01,0.1,1), labels=c("0.00001","0.0001","0.001","0.01","0.1","1"), minor_breaks = c(0.00005,0.0005,0.005,0.05,0.5)) + labs(x="Position", y="Alternate Allele Frequency") + theme(axis.text.x = element_text(size=9, hjust=1, vjust = 0.5, angle=90 ))
#p + geom_hline(yintercept=medianfreq) + annotate("text", x=max(data$position) * 1.05, y=medianfreq * 1.2, label=paste("Median:", medianfreq.round), size=2)
if ('merged' %in% colnames(data)){
p <- ggplot(data, aes(x=position, y=freqNonConsensus, color=merged))
} else {
p <- ggplot(data, aes(x=position, y=freqNonConsensus))
}
p <- p + geom_point(stat = "identity") + theme_bw() + coord_trans(y="log") + scale_y_continuous(limits=c(0.00001,1), breaks=c(0.00001,0.0001,0.001,0.01,0.1,1), labels=c("0.00001","0.0001","0.001","0.01","0.1","1"), minor_breaks = c(0.00005,0.0005,0.005,0.05,0.5)) + labs(x="Position", y="Alternate Allele Frequency") + theme(axis.text.x = element_text(size=9, hjust=1, vjust = 0.5, angle=90 )) + scale_colour_manual(values = c("TRUE" = "red","FALSE" = "black"))
p + geom_hline(yintercept=medianfreq) + annotate("text", x=max(data$position) * 1.05, y=medianfreq * 1.2, label=paste("Median:", medianfreq.round), size=2)
dev.off()
|
938446d6501460d53216cea454e0c8cadbcc1e10 | 5350e973f5f296db9d3e53dac27bcacf157b6592 | /tests/testthat/test_Models.R | 157a1349d384727f11992561c32d784d39352d82 | [
"MIT"
] | permissive | five-dots/ml4e | 781e3e501dff1703c7e691d6fe59b70361dbfbdd | 1275f9aae162d169c0cfd7eeb9174046e81b5de0 | refs/heads/master | 2021-03-11T00:32:38.877672 | 2020-06-08T05:28:23 | 2020-06-08T05:28:23 | 246,498,466 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 654 | r | test_Models.R |
test_that("Models instance works", {
m <- models$clone(deep = TRUE)
m
self <- m
private <- m$.__enclos_env__$private
super <- m$.__enclos_env__$super
expect_is(m, "Models")
expect_is(m[1L], "Model")
expect_equal(dim(m$items), c(2L, 2L))
mod_lm <- new_model("lm")
expect_is(m$add(mod_lm), "Models")
expect_equal(dim(m$items), c(3L, 2L))
## .is_equal() prevent not to add the same engine's model
mod_lm2 <- new_model("lm")
expect_is(m$add(mod_lm2), "Models")
expect_equal(dim(m$items), c(3L, 2L))
## error check
expect_error(m$add(test = 1L),
"A value for key = \"test\" must be a Model object.")
})
|
e2665dc012cc84b79aaa364185dcfd25b2590c7f | e9becf49064780b061251d32bcf11fcf84fc5f8f | /ICC_Script.R | 5f5608b5afa4474c3f3416a9220189f187e9ce60 | [] | no_license | hrdashora/pet-scoring | f7811ec9dd58881025681e9afbc2dd9a1858b4bc | 05d27a17c757be50d837f23e02b0ae5970d91afa | refs/heads/master | 2022-11-07T01:51:03.883738 | 2020-06-12T19:19:47 | 2020-06-12T19:19:47 | 271,389,485 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 490 | r | ICC_Script.R | # Computing ICCs in R
library(irr)
# Joel-Armin ICC
SUVScorecard <- read.csv("data/JoelJoel_ICC_FormattedData.csv", header=TRUE)
SUVRatings <- SUVScorecard[,2:3]
SUVICC <- icc(SUVRatings, model="twoway", type="consistency", unit="single")
print(SUVICC)
# Mark-Mark ICC
PETVASScorecard <- read.csv("data/MarkMark_ICC_FormattedData.csv", header=TRUE)
PETVASRatings <- PETVASScorecard[,2:3]
PETVASICC <- icc(PETVASRatings, model="twoway", type="consistency", unit="single")
print(PETVASICC)
|
313f7553ae9c7e7f908d174c8a298757ab81608b | ee62daaa8a34abd342ed79730a9fdd5093728f57 | /scripts/03_Data_Visualization_NCAA.R | 01a6b3f18fe2a877a9272783929e547140554a8b | [] | no_license | joshorenstein/SPRTPS5323 | 1bea763ed184e6e76db466d264ca1d087299094e | f660afbd35df45b278f08d9ba58a19ab9d021604 | refs/heads/master | 2023-06-08T01:21:37.527857 | 2021-06-14T19:31:53 | 2021-06-14T19:31:53 | 366,211,363 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,724 | r | 03_Data_Visualization_NCAA.R | #install.packages("tidyverse") #install the packages if need be
#install.packages("mdsr")
library(tidyverse)
library(mdsr)
df <- read_csv("data/nba_basic.csv") #nba data
kp <- read_csv("data/kenpom.csv") #ncaa data from kenpom.com
names(kp) #look at the column names
kp <- kp %>% filter(OE!=0) %>% filter(TOPct_off!=100) #filter out teams that didn't play in 20-21 season
View(kp) #view the data
kp %>% distinct(Conference) #look at all of the conferences
main <- kp %>% filter(Conference %in% c("B10","P12","SEC","B12","ACC","BE","AAC","WCC")) #keep the major conferences in a sep dataframe
#Aesthetics
names(main)
g <- ggplot(data = main,aes(y=OE,x=FG3Pct))
g
g + geom_point(size = 3) #scatterplot
g + geom_point(aes(color = Conference), size = 3) #with color
g + geom_text(aes(label = TeamName, color = Conference), size = 3) #use team names as labels
g + geom_point(aes(color = Conference, size = Tempo)) #use the size aesthetic
#Facets
g +
geom_point(alpha = 0.9, aes(size = Tempo)) +
coord_trans(y = "log10") +
facet_wrap(~Conference, nrow = 1) + #group by Conference
theme(legend.position = "top")
#Layers
head(kp)
c <- kp %>%
group_by(Conference) %>%
summarise_if(is.numeric, mean, na.rm = TRUE) #summarize all numeric data by conf
conf <-
kp %>%
group_by(Conference) %>%
summarise(TOPct=mean(TOPct_off)) #group by conference and get average TO
#Bar graph for all conference by Turnover %
p <- ggplot(
data = conf,
aes(x = reorder(Conference, TOPct), y = TOPct)
) +
geom_col(fill = "gray") +
ylab("Turnover %") +
xlab("Conference") +
theme(axis.text.x = element_text(angle = 90, hjust = 1, size = rel(1)))
p
names(main)
p + geom_point(data = conf, size = 1, alpha = 0.3) #add a point
#Univariate displays
names(kp)
g <- ggplot(data = kp, aes(x = OE))
#histogram with binwidth of 1
g + geom_histogram(binwidth = 1) + labs(x = "Average Offensive Eff")
#histogram as density plot
g + geom_density(adjust = 0.3)
names(kp)
#keep pac 12 teams only
p12 <- kp %>%
filter(Conference=="P12")
#bar graph
ggplot(
data = head(p12),#keep top 6 teams
aes(x = reorder(TeamName, -OE), y = OE)
) +
geom_col() +
labs(x = "Team", y = "Average Offensive Eff")
#Multivariate displays
names(main)
g <- ggplot(
data = main,
aes(y = OE, x = FG3Pct)
) +
geom_point()
g #scatterplot
g <- g +
geom_smooth(method = "lm", se = FALSE) +
xlab("Average 3P%") +
ylab("Average Offensive Rating")
g # add a regression line
g <- g +
geom_smooth(method = "lm", se = TRUE) + #add error bars to regression line
xlab("Average 3P%") +
ylab("Average Offensive Rating")
g
summary(kp$FG3Pct)
kp <- kp %>%
mutate(
FG3Pct_rate = cut(
FG3Pct,
breaks = c(25, 31.84, 35.45, 100), #cut the data at the quartiles
labels = c("low", "medium", "high") #split into 3 categories
)
)
View(kp)
g <- g %+% kp # redo the plot with the new dataset
g + aes(color = FG3Pct_rate) # add color of new category
g + facet_wrap(~ FG3Pct_rate) #facet wrap
height <- read_csv('data/height.csv') #load height data
kp <- kp %>%
inner_join(height) #join int
summary(kp$Size)
kp <- kp %>%
mutate(
Size_rate = cut(
Size,
breaks = c(70, 76.32, 77.52, 100),
labels = c("short", "medium", "tall")
)
)
#similar plot to above but with height and block %
ggplot(
data = kp,
aes(x = Size, y = BlockPct, color = Size_rate)
) +
geom_point() +
geom_smooth() +
ylab("Block %") +
xlab("Average Height (in)") +
labs(color = "Team Size")
#boxplots of Tempo/Pace by Conference.
ggplot(
data = main,
aes(
x = Conference,
y = Tempo
)
) +
geom_boxplot() +
xlab("Conference") +
ylab("Tempo")
|
a1dfb5aec0f0608d03467056cdf558f77874d7a0 | 98cfb034e88c36aea1567b4be939f815eaa16da8 | /Classificadores/Tree.r | e9e4d975fcd4d88673c2f39285c2bf86aaff6fe4 | [] | no_license | laendle1999/NuncaFuiContaminada | a6d518c35aa882b47ed07b8389a4c2914ec82cf2 | 57499c2e1351e660e88f4c3ef19ea8f1e1ab669d | refs/heads/main | 2023-06-22T16:53:40.058706 | 2021-07-18T13:41:50 | 2021-07-18T13:41:50 | 381,188,855 | 3 | 1 | null | 2021-07-06T22:04:07 | 2021-06-28T23:52:14 | R | UTF-8 | R | false | false | 845 | r | Tree.r | # Installing Packages
install.packages("e1071")
install.packages("caTools")
install.packages("caret")
# Loading package
library(e1071)
library(caTools)
library(caret)
library(readxl)
library(class)
library(rpart)
library(rattle)
library(RColorBrewer)
# Loading Files
source("Classificadores/CalculaIndicadores.R")
#Loading Train and Test
train.set <- treino_evolucao1_datas
test.set <- teste_evolucao1_datas
#Formatting
train.set$X <- NULL
test.set$X <- NULL
test.set$EVOLUCAO <- ifelse(test.set$EVOLUCAO < 2, 'Cura', 'Obito')
train.set$EVOLUCAO <- ifelse(train.set$EVOLUCAO < 2, 'Cura', 'Obito')
#Training
rpart.tree <- rpart(EVOLUCAO ~ ., data=train.set)
#Predictiong
predictions <- predict(rpart.tree, test.set, type="class")
tb = table(test.set$EVOLUCAO, predictions)
fancyRpartPlot(rpart.tree, caption = NULL)
acccFromTable(tb)
|
3739adf1f37472998beb832e1d1df676c09f12d1 | d4ae1c925add14251620f7fa4b92d52573185829 | /SNU/R/Business_analytics/171115_실습자료_with practice/3_marketbasket_code with practice2.R | f41f888b64b366e872ac51bba1cb2abc0df4713e | [] | no_license | kookoowaa/Repository | eef9dce70f51696e35cec6dc6a5d4ce5ba28c6d7 | 26f9016e65dbbc6a9669a8a85d377d70ca8a9057 | refs/heads/master | 2023-03-08T01:36:29.524788 | 2023-02-21T16:11:43 | 2023-02-21T16:11:43 | 102,667,024 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 8,918 | r | 3_marketbasket_code with practice2.R | rm(list=ls())
gc()
setwd("D:/Dropbox/조교자료/고용노동부_추천_201710/recommender_실습자료")
#install.packages("dplyr")
#install.packages("tidyr")
#install.packages("glmnet")
#install.packages("xgboost")
#########################################
# Market Basket Regression #
#########################################
# Practice 1: Card dataset
data <- read.csv("card.csv") #5027693 by 6
# 고객 "P223597622"
data[data$CLNN=="P223597622" & data$APV_TS_D < 20140700,]
data[which(data$CLNN=="P223597622" & APV_TS_D > 20140700),]
library(dplyr)
#month 추출
data1 <- data %>% mutate(month=ifelse(APV_TS_D>20140700, 7, 6),
month=ifelse(APV_TS_D<20140601, 5, month)) %>% select(-APV_TS_D)
#user정보 추출. 60879명
user <- data1 %>% select(CLNN, SEX_CCD, CLN_AGE, AVG_Y_INA) %>%
distinct(CLNN, .keep_all=TRUE)
#나이, 성별 더미
user <- user %>% mutate(age2 =ifelse( (CLN_AGE>=40 & CLN_AGE <60), 1, 0),
age3 =ifelse(CLN_AGE >=60, 1, 0)) %>% select(-CLN_AGE)
user$SEX_CCD <- ifelse(user$SEX_CCD =="F", 1, 0)
library(tidyr)
#5,6월 자료로 설명변수 만듦
input <- data1 %>% filter(month !=7) %>% group_by(CLNN, MCT_RY_NM) %>%
summarise(count=n()) %>% spread(MCT_RY_NM, count) %>% ungroup()
input <- input %>% inner_join(user, by="CLNN")
input[is.na(input)]=0
head(input)
#7월 자료로 종속변수 만듦
label <- data1 %>% filter(month==7) %>% group_by(CLNN, MCT_RY_NM) %>% summarise(label=1)%>% ungroup()
label <- label %>% group_by(CLNN) %>% spread(MCT_RY_NM, label) %>% ungroup()
label[is.na(label)]=0
head(label)
#고객 순서 똑같은지 check
sum(input$CLNN != label$CLNN)
#30% 는 평가자료로 사용하자.
set.seed(1001)
idx.ts = sample(1:nrow(input), round(nrow(input)*0.3))
idx.ts = sort(idx.ts)
train=input[-idx.ts,]; label.tr = label[-idx.ts,]
test=input[idx.ts,]; label.ts = label[idx.ts,]
#user index는 따로 저장
user.tr = train$CLNN; user.ts = test$CLNN
train = train[,-1]; test = test[,-1]
label.tr = label.tr[,-1]; label.ts = label.ts[,-1]
#구매횟수 많거나 적은 품목 추천
item.count=apply(train[,1:30], 2, sum)
item.count=sort(item.count, decreasing = T)
head(item.count)
#---------- 모형 1: 추천횟수 많은 품목 추천------------------------------------------------#
real.item=colSums(label.ts)
real.item #29: 할/슈, 28: 한식, 27: 편의점
real.item[29]/length(user.ts) #할인점/슈퍼마켓 추천
sum(real.item[c(29,28)])/(2*length(user.ts)) #할인점/슈퍼마켓, 한식 추천
sum(real.item[c(29,28,27)])/(3*length(user.ts)) #할인점/슈퍼마켓,한식, 편의점 추천
sum(real.item[c(25,9,21)])/(3*length(user.ts)) #커피전문점, 백화점, 제과점. 강의노트 틀림
#---------- 모형 2: 로지스틱 모형 ------------------------------------------------#
p.logis = label.ts #확률 저장할 table
library(glmnet)
for(i in 1:30){
lm=glmnet(x=as.matrix(train), y=as.matrix(label.tr[,i]), family="binomial", alpha=0, lambda = 0.02)
p.logis[,i]=predict(lm, as.matrix(test), type="response")
rm(lm); gc()
}
#user별 첫번째, 두번째, 세번째 확률 높은 아이템 인덱스 추출
index1=apply(p.logis, 1, function(x) sort.int(t(x), index.return=TRUE, decreasing = T)$ix[1])
index2=apply(p.logis, 1, function(x) sort.int(t(x), index.return=TRUE, decreasing = T)$ix[2])
index3=apply(p.logis, 1, function(x) sort.int(t(x), index.return=TRUE, decreasing = T)$ix[3])
#Hit ratio (Precision)
sum(as.matrix(label.ts)[cbind(1:nrow(label.ts),index1)])/length(user.ts)
(sum(as.matrix(label.ts)[cbind(1:nrow(label.ts),index1)]) + sum(as.matrix(label.ts)[cbind(1:nrow(label.ts),index2)]))/
(2*length(user.ts))
(sum(as.matrix(label.ts)[cbind(1:nrow(label.ts),index1)]) + sum(as.matrix(label.ts)[cbind(1:nrow(label.ts),index2)])+
sum(as.matrix(label.ts)[cbind(1:nrow(label.ts),index3)]))/
(3*length(user.ts))
#추천 품목수
length(unique(index1))
length(unique(index2))
length(unique(index3))
#품목별로 구매가능성 높은 일부 고객에게 추천
#커피전문점, 백화점, 제과점
colnames(p.logis)[25]; colnames(p.logis)[9]; colnames(p.logis)[21]
sum(label.ts[,25]); sum(label.ts[,9]); sum(label.ts[,21])
#품목별 구매가능성 높은 고객7000명에게 추천
(sum(label.ts[sort.int(t(p.logis[,25]), index.return=TRUE, decreasing = T)$ix[1:7000],25]) +
sum(label.ts[sort.int(t(p.logis[,9]), index.return=TRUE, decreasing = T)$ix[1:7000],9]) +
sum(label.ts[sort.int(t(p.logis[,21]), index.return=TRUE, decreasing = T)$ix[1:7000],21])) / (7000*3)
#---------- 모형 3: boosting 모형 ------------------------------------------------#
p.boost = label.ts #확률 저장할 table
library(xgboost)
for(i in 1:30){
X=xgb.DMatrix(as.matrix(train), label=as.matrix(label.tr)[,i])
model <- xgboost(X, max_depth=3, eta=0.1, nrounds = 200, objective="binary:logistic", verbose = F)
p.boost[,i]=predict(model, as.matrix(test), type="response")
rm(model);gc()
}
#user별 첫번째, 두번째, 세번째 확률 높은 아이템 인덱스 추출
ind1=apply(p.boost, 1, function(x) sort.int(t(x), index.return=TRUE, decreasing = T)$ix[1])
ind2=apply(p.boost, 1, function(x) sort.int(t(x), index.return=TRUE, decreasing = T)$ix[2])
ind3=apply(p.boost, 1, function(x) sort.int(t(x), index.return=TRUE, decreasing = T)$ix[3])
sum(as.matrix(label.ts)[cbind(1:nrow(label.ts),ind1)])/length(user.ts)
(sum(as.matrix(label.ts)[cbind(1:nrow(label.ts),ind1)]) + sum(as.matrix(label.ts)[cbind(1:nrow(label.ts),index2)]))/
(2*length(user.ts))
(sum(as.matrix(label.ts)[cbind(1:nrow(label.ts),ind1)]) + sum(as.matrix(label.ts)[cbind(1:nrow(label.ts),index2)])+
sum(as.matrix(label.ts)[cbind(1:nrow(label.ts),ind3)]))/
(3*length(user.ts))
length(unique(ind1))
length(unique(ind2))
length(unique(ind3))
#품목별로 구매가능성 높은 일부 고객에게 추천
#커피전문점, 백화점, 제과점
(sum(label.ts[sort.int(t(p.boost[,25]), index.return=TRUE, decreasing = T)$ix[1:7000],25]) +
sum(label.ts[sort.int(t(p.boost[,9]), index.return=TRUE, decreasing = T)$ix[1:7000],9]) +
sum(label.ts[sort.int(t(p.boost[,21]), index.return=TRUE, decreasing = T)$ix[1:7000],21])) / (7000*3)
#실제 구매 고객수
sum(label.ts[,25], na.rm=T)
sum(label.ts[,9], na.rm=T)
sum(label.ts[,21], na.rm=T)
# Practice 2: 직접 해보세요!!
rm(list=ls()) #objective 정리
gc()
insta <- read.csv("instacart.csv")
#---------------------------------------------------------------------------------------------------#
# Model -------------------------------------------------------------------
head(insta)
length(unique(insta$user_id)); length(unique(insta$product_id))
#30% 고객 검증자료로 사용
set.seed(1000)
idx.ts<- sample(1:length(unique(insta$user_id)),length(unique(insta$user_id))*0.3 , replace=FALSE)
idx.ts<- sort(idx.ts)
#자료 (고객, 상품) 쌍이기 때문에 index가 아니라 고객 번호 저장
user.tr <- as.data.frame(unique(insta$user_id)[-idx.ts])
user.ts <- as.data.frame(unique(insta$user_id)[idx.ts])
colnames(user.tr)<- c("user_id")
colnames(user.ts)<- c("user_id")
library(dplyr)
tr.mat <- insta %>% inner_join(user.tr, by='user_id')
ts.mat <- insta %>% inner_join(user.ts, by='user_id')
#xgboost 설치
#install.packages("xgboost")
library(xgboost)
colnames(tr.mat)
X <- xgb.DMatrix(as.matrix(tr.mat[,-c(1,2,37)]), label = tr.mat$reordered)
model <- xgboost(data = X, max_depth=5, eta=0.1, nrounds = 200, objective="binary:logistic")
importance <- xgb.importance(colnames(X), model = model)
xgb.ggplot.importance(importance)
# Apply model -------------------------------------------------------------
test.mat <- xgb.DMatrix(as.matrix(ts.mat[,-c(1,2,37)]))
ts.mat$fitted <- predict(model, test.mat)
thres <- 0.2
ts.mat$fitted.y <- ifelse(ts.mat$fitted > thres, 1, 0)
Con.mat=table(Actual = ts.mat$reordered, Predicted = ts.mat$fitted.y)
print(Con.mat)
#accuracy
sum(diag(Con.mat))/ sum(Con.mat)
#---------- test set의 precision, recall, f1만들어보자 ----------#
table <- ts.mat %>% group_by(user_id) %>% summarize(den.rec = sum(reordered),
den.pre = sum(fitted.y),
nom = sum(reordered==1 & fitted.y==1))
head(table)
#none예측. 실제 0, 예측 0이면 분자 1
table <- table %>% mutate(nom=ifelse(den.rec==0 & den.pre==0, 1, nom),
den.pre=ifelse(den.pre==0, 1, den.pre),
den.rec = ifelse(den.rec==0, 1, den.rec))
#precision and recall
table <- table %>% mutate(precision = nom/den.pre, recall = nom/den.rec)
#f1 score
table <- table %>% mutate(f1 = ifelse(precision ==0 & recall==0, 0,
2* precision * recall/(precision + recall)))
#precision, recall 둘다 0이면 f1도 0으로.
print(mean(table$f1))
|
933480f21ed521e59d2891a5585bd179ec186d04 | 36a2cfd1b36a4907232bca2bb3764e766cbf8c79 | /expected-points/archive/modeling/pbp team rate calculations.R | 233ffc15050ed1972efaf5de22dce3c9c5d9ad3e | [] | no_license | jkope892/research | 8afbe3e2b0324be33d0b54c1c0729d7a6705796c | 508c4fd9f6d6001f0acb22bc53f0f12b123fdac0 | refs/heads/master | 2023-05-02T12:57:50.174845 | 2021-05-24T04:45:39 | 2021-05-24T04:45:39 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,051 | r | pbp team rate calculations.R | group_by(game_id, posteam) %>%
mutate(team_air_yards = slide_dbl(air_yards, ~sum(.x, na.rm = TRUE), .before = Inf, .after = -1),
team_rec_yards = slide_dbl(yards_gained, ~sum(.x, na.rm = TRUE), .before = Inf, .after = -1),
team_tds = slide_dbl(pass_touchdown, ~sum(.x, na.rm = TRUE), .before = Inf, .after = -1),
team_attempts = slide_dbl(pass_attempt, ~sum(.x, na.rm = TRUE), .before = Inf, .after = -1)) %>%
ungroup() %>%
group_by(game_id, posteam) %>%
mutate(team_rush_yards = slide_dbl(yards_gained, ~sum(.x, na.rm = TRUE), .before = Inf, .after = -1),
team_touchdowns = slide_dbl(rush_touchdown, ~sum(.x, na.rm = TRUE), .before = Inf, .after = -1),
team_attempts = slide_dbl(rush_attempt, ~sum(.x, na.rm = TRUE), .before = Inf, .after = -1)) %>%
ungroup() %>%
new_game_flag = ifelse(lag(game_id) == game_id, 0, 1),
new_game_flag = ifelse(is.na(new_game_flag),1,new_game_flag),
new_team_airyards = ifelse(new_game_flag == 1, team_air_yards, team_air_yards - lag(team_air_yards)),
new_team_attempts = ifelse(new_game_flag == 1, team_attempts, team_attempts - lag(team_attempts)),
new_team_yards = ifelse(new_game_flag == 1, team_rec_yards, team_rec_yards - lag(team_rec_yards)),
new_team_tds = ifelse(new_game_flag == 1, team_tds, team_tds - lag(team_tds)),
air_yards_share_ToDate = slide2_dbl(lag(air_yards), new_team_airyards, ~get_rate(.x,.y), .before = Inf, .after = 0),
air_yards_share_ToDate = ifelse(is.nan(air_yards_share_ToDate) | is.infinite(air_yards_share_ToDate),0,air_yards_share_ToDate),
yards_share_ToDate = slide2_dbl(lag(yards_gained), new_team_yards, ~get_rate(.x,.y), .before = Inf, .after = 0),
yards_share_ToDate = ifelse(is.nan(yards_share_ToDate) | is.infinite(yards_share_ToDate),0,yards_share_ToDate),
td_share_ToDate = slide2_dbl(lag(pass_touchdown), new_team_tds, ~get_rate(.x,.y), .before = Inf, .after = 0),
td_share_ToDate = ifelse(is.nan(td_share_ToDate) | is.infinite(td_share_ToDate),0,td_share_ToDate),
target_share_ToDate = slide2_dbl(lag(pass_attempt), new_team_attempts, ~get_rate(.x,.y), .before = Inf, .after = 0),
target_share_ToDate = ifelse(is.nan(target_share_ToDate) | is.infinite(target_share_ToDate),0,target_share_ToDate),
new_game_flag = ifelse(lag(game_id) == game_id, 0, 1),
new_game_flag = ifelse(is.na(new_game_flag),1,new_game_flag),
new_team_yards = ifelse(new_game_flag == 1, team_rush_yards, team_rush_yards - lag(team_rush_yards)),
new_team_attempts = ifelse(new_game_flag == 1, team_attempts, team_attempts - lag(team_attempts)),
new_team_tds = ifelse(new_game_flag == 1, team_touchdowns, team_touchdowns - lag(team_touchdowns)),
rush_yards_share_ToDate = slide2_dbl(lag(yards_gained), new_team_yards, ~get_rate(.x,.y), .before = Inf, .after = 0),
rush_yards_share_ToDate = case_when(is.nan(rush_yards_share_ToDate) | is.infinite(rush_yards_share_ToDate) | rush_yards_share_ToDate < 0 ~ 0,
rush_yards_share_ToDate > 1 ~ 1,
TRUE ~ rush_yards_share_ToDate),
rush_td_share_ToDate = slide2_dbl(lag(rush_touchdown), new_team_tds, ~get_rate(.x,.y), .before = Inf, .after = 0),
rush_td_share_ToDate = case_when(is.nan(rush_td_share_ToDate) | is.infinite(rush_td_share_ToDate) | rush_td_share_ToDate < 0 ~ 0,
rush_td_share_ToDate > 1 ~ 1,
TRUE ~ rush_td_share_ToDate),
yards_per_team_attempt = slide2_dbl(lag(yards_gained), new_team_attempts, ~get_rate(.x,.y), .before = Inf, .after = 0),
yards_per_team_attempt = case_when(is.nan(yards_per_team_attempt) | is.infinite(yards_per_team_attempt) ~ 0,
TRUE ~ yards_per_team_attempt),
attempt_share_ToDate = slide2_dbl(lag(rush_attempt), new_team_attempts, ~get_rate(.x,.y), .before = Inf, .after = 0),
attempt_share_ToDate = ifelse(is.na(attempt_share_ToDate) | is.infinite(attempt_share_ToDate),0,attempt_share_ToDate),
rush_share_attempt_share_diff = rush_yards_share_ToDate - attempt_share_ToDate, |
bd5381024fb56502dc3c68368d949a23c6a2d409 | 5358ae49ad5701229b6a9a0f2a215b86ccc2ef49 | /Vancouver_CC-SO.R | 0df658cbc44bf4342a131cbe87eff7034871bc33 | [] | no_license | jumorris2017/SQL-queries | 3f22bd1e1f827523a98fb8d62621c1f739d0cdcd | 32aaa4557737e924d5ffa38bd2d53a36f548cc4d | refs/heads/master | 2018-09-17T17:21:55.939702 | 2018-08-16T23:49:39 | 2018-08-16T23:49:39 | 110,997,468 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,418 | r | Vancouver_CC-SO.R | ##SR/Non-SR Analysis using Brand Equity Study##
##December FY 18 data##
#load libraries
library(data.table)
library(tidyverse)
library(ggthemes)
#load data
c1 <- fread("O:/CoOp/CoOp194_PROReportng&OM/Julie/canada-all_cc-so.csv")
c1[, area := "Canada"]
c2 <- fread("O:/CoOp/CoOp194_PROReportng&OM/Julie/canada-vancouver_cc-so.csv")
c2[, area := "Vancouver"]
#rbind
l <- list(c1,c2)
can <- rbindlist(l,use.names=T,fill=T)
#melt
can <- melt(can, id.vars=c("FSCL_PER_IN_YR_NUM","FSCL_YR_NUM","area"))
#reduce to the past year
can <- can[(FSCL_YR_NUM==2018&FSCL_PER_IN_YR_NUM<=5)|(FSCL_YR_NUM==2017&FSCL_PER_IN_YR_NUM>5)]
#make year-month variable for plotting
can[, fyfp := paste0(FSCL_YR_NUM,".",str_pad(can[,FSCL_PER_IN_YR_NUM],2,pad="0"))]
#make a plotting height label
can[variable=="SO_SCORE"&area=="Canada", value_y := value+2.5]
can[variable=="SO_SCORE"&area=="Vancouver", value_y := value-2.5]
can[variable=="CC_SCORE"&area=="Canada", value_y := value+2.5]
can[variable=="CC_SCORE"&area=="Vancouver", value_y := value-2.5]
#set labels
xlabels <- c("Mar 17", "Apr 17", "May 17", "June 17", "July 17", "Aug 17",
"Sep 17", "Oct 17", "Nov 17", "Dec 17", "Jan 18", "Feb 18")
ylabel <- "TB Score"
tlabel <- "Vancouver, Canada Customer Experience"
sublabel <- "Company-Operated Stores, March 2017 - February 2018"
caption <- "Canada Store N = 1,750\nVancouver Store N = 122"
#manual legend labels
lname1 <- "Area"
llabels1 <- c("Canada","Vancouver")
lname2 <- "Metric"
llabels2 <- c("Customer Connection","Store Operations")
#values
pdata <- can
px <- can[,fyfp]
py <- can[,value]
groupvar <- can[,variable]
colourvar <- can[,area]
#plot itself
plot2 <- ggplot(data=pdata, aes(x=px, y=py, colour=factor(colourvar), group=interaction(groupvar, colourvar))) +
geom_point(size=1) + geom_line(size=1) +
xlab("") + ylab(ylabel) +
scale_x_discrete(labels=xlabels) +
scale_colour_discrete(name="", labels=llabels1, guide=guide_legend(order=1)) +
guides(colour = guide_legend(override.aes = list(size = 7))) +
scale_y_continuous(limits=c(0,70)) + theme_economist_white(gray_bg = FALSE) +
ggtitle(tlabel) + labs(subtitle=sublabel,caption=caption) +
annotate(size=5, geom="text", x=1, y=63, label= "Store Operations",hjust = 0) +
annotate(size=5, geom="text", x=1, y=38, label= "Customer Connection",hjust = 0) +
geom_text(size = 4, aes(label=py,y=value_y), stat="identity")
print(plot2)
|
a14735d2d4fb279765c9a200077e60cbe31ad884 | cf8992d5754c0dd02a4f0f14ac794b4200e6c699 | /man/lflt_bubbles_size_GcdCat.Rd | 84accb04a9e52653436a085125f497159f39ac91 | [] | no_license | isciolab/lfltmagic | 290541df91e9785aec46ae7c0cdf78a8731f4e72 | effa73c0bd395349a34797fb3d47ab99c4d347d0 | refs/heads/master | 2020-05-01T18:42:28.774587 | 2017-11-20T17:07:23 | 2017-11-20T17:07:23 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 618 | rd | lflt_bubbles_size_GcdCat.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/bubbles.R
\name{lflt_bubbles_size_GcdCat}
\alias{lflt_bubbles_size_GcdCat}
\title{Leaflet bubbles size by categorical variable}
\usage{
lflt_bubbles_size_GcdCat(data, color = "navy", fillOpacity = 0.5,
label = NULL, popup = NULL, minSize = 3, maxSize = 20,
scope = "world_countries", tiles = "CartoDB.Positron")
}
\arguments{
\item{x}{A data.frame}
}
\value{
leaflet viz
}
\description{
Leaflet bubbles size by categorical variable
}
\section{ctypes}{
Gcd-Cat
}
\examples{
lflt_bubbles_size_GcdCat(sampleData("Gcd-Cat", nrow = 10))
}
|
a0eb58ac8a20c1597a14856cd94a4f6168b17137 | ba126cda1f26a903e5373715b027cf6ca31b8293 | /Development/UI/pages/model_moderator_results_page.R | 50d05ab9cbbd23f41b3d40c407c771a43cfa5688 | [
"MIT"
] | permissive | nsalani22/thinkCausal_dev | 1776ea77e7dbf8270b5ea24d688fc1e378bf8b2f | 90b5972845a2be202059691ed7786055dced28e8 | refs/heads/master | 2023-08-04T03:41:19.947925 | 2021-10-06T20:56:58 | 2021-10-06T20:56:58 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,796 | r | model_moderator_results_page.R | moderator_page <- tabPanel(
title = "Subgroup Results",
tabsetPanel(
id = "moderator_tabs",
tabPanel(
title = 'ICATE',
sidebarLayout(
sidebarPanel(
awesomeRadio(inputId = "icate_type",
label = 'Plot:',
choices = list("Ordered ICATE" = 'ordered', "Histagram of ICATE" = 'histagram')),
br(),
br(),
div(class = 'backNextContainer',
actionButton(inputId = 'back_results',
label = 'Back'),
actionButton(inputId = 'next_icate_tree',
label = 'Next'))
),
mainPanel(
br(),
plotOutput(outputId = "histigram_icate",
height = 500)
)
)),
tabPanel(title = 'ICATE Regression Tree',
sidebarLayout(
sidebarPanel(
h5("Variable importance interpretation"),
p("Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum."),
br(),
awesomeRadio(inputId = 'set_tree_depth',
label = 'Tree Depth',
choices = list('1' = 1, '2' = 2, '3' = 3),
inline = T,
selected = '2'),
br(),
br(),
div(class = 'backNextContainer',
actionButton(inputId = 'back_icate',
label = 'Back'),
actionButton(inputId = 'next_subgroup',
label = 'Next'))
),
mainPanel(
br(),
plotOutput(outputId = "analysis_moderator_single_tree",
height = 500)
)
)),
tabPanel(
title = 'Subgroup Analyses',
sidebarLayout(
sidebarPanel(
conditionalPanel(condition = "input.analysis_model_moderator_yes_no == 'Yes'",
awesomeRadio(inputId = 'moderation_type_class',
label = 'Type of Subgroup Analysis:',
choices = c('Prespecified',
'Exploratory'),
selected = 'Prespecified')
),
conditionalPanel(condition = "input.moderation_type_class == 'Prespecified' & input.analysis_model_moderator_yes_no == 'Yes'",
selectInput(inputId = "analysis_moderator_vars",
label = "Group by:",
multiple = FALSE,
choices = NULL,
selected = NULL)),
conditionalPanel(condition = "input.moderation_type_class != 'Prespecified' & input.analysis_model_moderator_yes_no == 'Yes'",
selectInput(inputId = "analysis_moderators_explore_select",
label = "Group by:",
multiple = FALSE,
choices = NULL,
selected = NULL)),
conditionalPanel(condition = "input.analysis_model_moderator_yes_no == 'No'",
selectInput(inputId = "analysis_moderators_explore_only",
label = "Group by:",
multiple = FALSE,
choices = NULL,
selected = NULL)),
br(),
br(),
div(class = 'backNextContainer',
actionButton(inputId = 'back_icate_tree',
label = 'Back')),
br(),
div(class = 'backNextContainer',
actionButton(inputId = 'to_results',
label = 'Back to Results')),
br(),
div(class = 'backNextContainer',
actionButton(inputId = 'to_download',
label = 'Log Analyses'))
),
mainPanel(
br(),
plotOutput(outputId = "analysis_moderators_explore_plot",
height = 500)
)))
)
)
|
483340a28e749903ebf0501d295feab3a499bfbb | 7aa529cf113c927bc2afe300d473e4dd2b916b05 | /tests/testthat.R | 066ade41ed2eb6dc5e03f7be75c49fcb27d6ffb2 | [
"MIT"
] | permissive | zeta1999/saasr | 3943489b39ec338fd1832ebd9f4854841e54d842 | 484f0b0b73be6936e8347c291196899942db8ebd | refs/heads/master | 2021-05-18T23:58:40.311743 | 2019-01-04T21:10:15 | 2019-01-04T21:10:15 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 54 | r | testthat.R | library(testthat)
library(saasr)
test_check("saasr")
|
0e2c3bfbb6a0a027652132ff3f22491718e90e86 | 0a84589c859e409ceefe5ce1f8778979c1a082e9 | /FSGLSimulationStudy/scripts/Tables/FSGLassoSimulation_Table2.R | 0cf70e421ccb18a349cf27bf25cfd3ef8e7606af | [
"MIT"
] | permissive | jcbeer/fsgl | 889a1c664e70066e2eea8cc4dddcd11568c5512d | e28b2fd876079ec5e77511d6e82cfd77deedf85a | refs/heads/master | 2021-04-27T00:53:08.535075 | 2020-07-08T19:20:57 | 2020-07-08T19:20:57 | 122,661,954 | 6 | 1 | null | null | null | null | UTF-8 | R | false | false | 5,231 | r | FSGLassoSimulation_Table2.R | ####################################################################
# Fused Sparse Group Lasso Simulation Study
# Make Table 2: Combinations of (alpha, gamma) yielding the most frequent
# lowest error out of 100 simulation replications
####################################################################
# This script is used to analyze the simulation results
# reported in the manuscript
# "Incorporating Prior Information with Fused Sparse Group Lasso:
# Application to Prediction of Clinical Measures from Neuroimages"
### INPUTS:
## original simulation statistics plus test set MSE
# compaggplot.csv, partaggplot.csv, compdistplot.csv,
# spcompaggplot.csv, sppartaggplot.csv, spcompdistplot.csv,
# exspplot.csv, misspplot.csv, misspspplot.csv
### OUTPUTS:
# text printed to console that can be cut and pasted into LaTeX document
####################################################################
library(xtable) # creates LaTeX tables
########################################
# LOAD DATASETS
########################################
# set the working and data directories
# setwd()
datadir1 <- paste0(getwd(), '/DataForPlots/')
compagg <- read.csv(paste0(datadir1, 'compaggplot.csv'))
partagg <- read.csv(paste0(datadir1, 'partaggplot.csv'))
compdist <- read.csv(paste0(datadir1, 'compdistplot.csv'))
spcompagg <- read.csv(paste0(datadir1, 'spcompaggplot.csv'))
sppartagg <- read.csv(paste0(datadir1, 'sppartaggplot.csv'))
spcompdist <- read.csv(paste0(datadir1, 'spcompdistplot.csv'))
exsp <- read.csv(paste0(datadir1, 'exspplot.csv'))
missp <- read.csv(paste0(datadir1, 'misspplot.csv'))
misspsp <- read.csv(paste0(datadir1, 'misspspplot.csv'))
########################################
# function to determine optimal alpha, gamma combo
# based on most frequent lowest error
########################################
optparam <- function(data, errorvar){
lowest <- rep(NA, 100)
for (i in 1:100){
seeddata <- data[data$seed==i,]
seeddata$alphagamma <- as.character(interaction(seeddata$alpha, seeddata$gamma, sep=', '))
lowest[i] <- seeddata$alphagamma[which.min(seeddata[,errorvar])]
}
mostfreqlowest <- names(which.max(table(lowest)))
return(mostfreqlowest)
}
########################################
# determine optimal alpha, gamma combos
# based on most frequent lowest error
# mean CVE
########################################
meancve <- c(
paste0('(', optparam(compagg, 'mean.cve'), ')'),
paste0('(', optparam(partagg, 'mean.cve'), ')'),
paste0('(', optparam(compdist, 'mean.cve'), ')'),
paste0('(', optparam(spcompagg, 'mean.cve'), ')'),
paste0('(', optparam(sppartagg, 'mean.cve'), ')'),
paste0('(', optparam(spcompdist, 'mean.cve'), ')'),
paste0('(', optparam(exsp, 'mean.cve'), ')'),
paste0('(', optparam(missp, 'mean.cve'), ')'),
paste0('(', optparam(misspsp, 'mean.cve'), ')')
)
########################################
# determine optimal alpha, gamma combos
# based on most frequent lowest error
# mse.beta
########################################
msebeta <- c(
paste0('(', optparam(compagg, 'mse.betas'), ')'),
paste0('(', optparam(partagg, 'mse.betas'), ')'),
paste0('(', optparam(compdist, 'mse.betas'), ')'),
paste0('(', optparam(spcompagg, 'mse.betas'), ')'),
paste0('(', optparam(sppartagg, 'mse.betas'), ')'),
paste0('(', optparam(spcompdist, 'mse.betas'), ')'),
paste0('(', optparam(exsp, 'mse.betas'), ')'),
paste0('(', optparam(missp, 'mse.betas'), ')'),
paste0('(', optparam(misspsp, 'mse.betas'), ')')
)
########################################
# determine optimal alpha, gamma combos
# based on most frequent lowest error
# mse y.test
########################################
mseytest <- c(
paste0('(', optparam(compagg, 'mse.y.test'), ')'),
paste0('(', optparam(partagg, 'mse.y.test'), ')'),
paste0('(', optparam(compdist, 'mse.y.test'), ')'),
paste0('(', optparam(spcompagg, 'mse.y.test'), ')'),
paste0('(', optparam(sppartagg, 'mse.y.test'), ')'),
paste0('(', optparam(spcompdist, 'mse.y.test'), ')'),
paste0('(', optparam(exsp, 'mse.y.test'), ')'),
paste0('(', optparam(missp, 'mse.y.test'), ')'),
paste0('(', optparam(misspsp, 'mse.y.test'), ')')
)
########################################
# scenarios
########################################
scenarios <- c(
'1A. Completely aggregated',
'2B. Partially aggregated',
'3C. Completely distributed',
'4A. Sparse completely aggregated',
'5B. Sparse partially aggregated',
'6C. Sparse completely distributed',
'7B. Extra sparse partially aggregated',
'8B. Misspecified partially aggregated',
'9B. Misspecified sparse partially aggregated'
)
########################################
# make table
# NOTE: There was a tie for 7B test MSE
# (See Supplementary Table S8)
# (0.8, 0.8) needs to be added manually
########################################
header <- c('True coefficient scenario', 'Mean CVE', 'MSE Beta', 'MSE Y Test')
table2 <- cbind(scenarios, meancve, msebeta, mseytest)
colnames(table2) <- header
print(xtable(table2, caption='Combinations of ($\\alpha$, $\\gamma$) yielding the most frequent lowest error out of 100 simulation replications', label='tab:simresults', align=c('l', 'l', 'r', 'r', 'r')), include.rownames=FALSE)
|
58975b07070a85b1aac6b68faa667e933148de98 | 8cfbb80abcbb830227239f10d305b550892d821a | /NumericalAnalysis.R | 4bf0f24a0a3a84ca60adbb0ca03101ca60583b83 | [] | no_license | mcastagnaa/PerfNCCF | e0d535479c274dd42ce0c9804deb7a179008695f | f0fb47e2aa28fcb2a1cdc199e88a8e1e5557cd5f | refs/heads/master | 2021-01-19T09:23:46.608291 | 2014-12-09T20:59:35 | 2014-12-09T20:59:35 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,187 | r | NumericalAnalysis.R | #exploring plots
hist(panelA$NCCF, prob = TRUE)
curve(dnorm(x, mean = mean(panelA$NCCF, na.rm = TRUE),
sd = sd(panelA$NCCF, na.rm = TRUE)),
add=TRUE, col = "red")
plot(panelA$RelPerf3y, panelA$NCCF)
plot(panelA$Rank1y, panelA$NCCF)
# TODO:
# add sector information
modelKitchenSink <- lm(NCCF ~
AbsPerf3m +
AbsPerf6m +
AbsPerf1y +
AbsPerf2y +
AbsPerf3y +
RelPerf3m +
RelPerf6m +
RelPerf1y +
RelPerf2y +
RelPerf3y +
Rank3m +
Rank6m +
Rank1y +
Rank2y +
Rank3y,
data = panelA, na.action = na.omit)
summary(modelKitchenSink)
model <- step(modelKitchenSink, direction="both")
#
#
# model1 <- lm(NCCF ~ AbsPerf3m +
# AbsPerf6m +
# AbsPerf1y +
# AbsPerf2y +
# AbsPerf3y, data = panelA, na.action = na.omit)
# summary(model1)
#
# model2 <- lm(NCCF ~ RelPerf3m +
# RelPerf6m +
# RelPerf1y +
# RelPerf2y +
# RelPerf3y, data = panelA, na.action = na.omit)
# summary(model2)
#
# model3 <- lm(NCCF ~ Rank3m +
# Rank6m +
# Rank1y +
# Rank2y +
# Rank3y, data = panelA, na.action = na.omit)
# summary(model3)
#
# model4 <- lm(NCCF ~
# RelPerf3y +
# Rank1y, data = panelA, na.action = na.omit)
#
# summary(model4)
# confint(model4)
#
# model4s <- lm(NCCF ~
# RelPerf3y +
# Rank1y,
# data = modelScaled, na.action = na.omit)
summary(model)
confint(model)
rm(modelKitchenSink)
#nice plots
# xyplot(NCCF ~ RelPerf3y | Vehicle,
# data=panelA, #type=c("p","r"),
# panel = function(x,y,...){
# panel.xyplot(x, y, pch = 21,col = "black")
# panel.lmline(x,y,col = "red")}
# )
|
f3091b034a27656761762923d16758a0249e7c2d | 780b7cd922960389411c2dd7134b37bfaf7779f9 | /03UcenjeModela/Sonce/Vkljuci validacijsko mnozico/randomForest_pVkljuciValid.r | cdc9905ea378ec1f725ebb06db60f1be504d1eaf | [] | no_license | JureP/Napove-elektro | 8b77f2d5faf7fd0c55994ebeb3dfa5aacf45c4cc | 51f2de6dedd85031693d17c9f2531cbd24152677 | refs/heads/master | 2021-01-10T15:57:05.264566 | 2015-11-15T22:25:09 | 2015-11-15T22:25:09 | 44,756,358 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,897 | r | randomForest_pVkljuciValid.r | ## random forest model (samo soncen elektrarne) UCENJE Z VKLJUCNO VALIDACIJSKO MNOZICO
## odstrani obdobja ko elektrarna ne deluje
## iz nocnih ur vzame samo nekaj nakljucnih ur na dan
library(randomForest)
library(xts)
## okolje kjer so funckije
OkoljeFunkcije <- 'C:/Users/Podlogar/Documents/Projekt Elektro/Funkcije'
## okolje kjer so feature matrike (train)
OkoljeFM <- 'C:/Users/Podlogar/Documents/Projekt Elektro/00_Podatki/featureMatrix/Train'
## okolje kjer so feature matrike (valid)
OkoljeFM_valid <- 'C:/Users/Podlogar/Documents/Projekt Elektro/00_Podatki/featureMatrix/Valid'
## okolje kjer so realizacije (train)
OkoljeReal <- 'C:/Users/Podlogar/Documents/Projekt Elektro/00_Podatki/realizacija/Train'
## okolje kjer so realizacije (valid)
OkoljeReal_valid <- 'C:/Users/Podlogar/Documents/Projekt Elektro/00_Podatki/realizacija/Valid'
## okolje kjer se ustvari mapa v katero se shranijo modeli
Okolje1 <- 'C:/Users/Podlogar/Documents/Projekt Elektro/03_Ucenje modela/Sonce/VkljuciValid/Model/RF_vkljuciValid'
setwd(OkoljeFunkcije)
for(fun in dir()){
print(fun)
source(fun)
}
stDreves = 500
setwd(OkoljeFM)
for(kraj in dir()){
print(kraj)
## nalozi vremensko napvoed za kraj
if(substr(kraj, 1, 5) == 'vreme'){
## ucna matrika
featureMatrix <- readRDS(kraj)
## validacijska matrika
setwd(OkoljeFM_valid)
krajValid <- paste0(substr(kraj, 1, nchar(kraj)-9), 'Valid.rds')
featureMatrixValid <- readRDS(krajValid)
## zdruzevanje matrik
featureMatrix <- rbind(featureMatrix, featureMatrixValid)
setwd(OkoljeReal)
## za vse elektrarne v blizini kraja kraj uporabi vremensko napvoed
for(elekt in dir()){
setwd(Okolje1)
if (gsub(".*vreme_|_FM.*", "", kraj) == gsub(".*K]|_.*", "", elekt) &
gsub(".*T]|_.*", "", elekt) == 'Sonce' &
!(substr(elekt,1,8) %in% substr(dir(),1,8))){
setwd(OkoljeReal)
print(elekt)
realizacija <- readRDS(elekt)
setwd(OkoljeReal_valid)
imeValid <- dir()[substr(dir(),1,8) == substr(elekt,1,8)]
realizacijaValid <- readRDS(imeValid)
realizacija <- c(realizacija, realizacijaValid)
## ciscenje pdoatkov
## odstrani obdobje ko elektrarna ne dela
realizacija <- nedelovanje(realizacija, 1)
## odstranitev dela noci
realizacija <- odstNoc(realizacija, 2)
## odstranjevanje NA-jev
X <- as.matrix(featureMatrix)
Y <- as.vector(realizacija)
A <- cbind(X,Y)
A <- na.omit(A)
## ucenje modela
model <- randomForest(A[,1:(ncol(A)-1)], A[, ncol(A)], ntree = stDreves, do.trace = TRUE) ## glej clanek
## validacija modela
napovedRF <- predict(model, featureMatrixValid)
napovedRF <- xts(napovedRF, index(featureMatrixValid))
## shranjevanje modela
setwd(Okolje1)
## ime modela
imeModel <- paste0(substr(elekt, 1, 8), '_model_', stDreves,'_RF_vkljuciValid.rds')
saveRDS(model, imeModel)
}
}
}
setwd(OkoljeFM)
}
|
e1df180611dc28d1ab3e9cd98a538f70548f7299 | e48983167846b89c4d3fdb1ed4e5a770f465ab02 | /R/check_list.R | d5b2715cbe0adfc79becc76c22cc1e361d6812d5 | [] | no_license | Dustin21/GP.Bagging | edac2c00153a945df1e096e0281ca7e28919031a | 9f48dbe5a6b5f4f9496ed30aafc0e306249e4643 | refs/heads/master | 2016-09-01T18:33:50.178749 | 2014-11-27T19:47:26 | 2014-11-27T19:47:26 | 27,163,009 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 46 | r | check_list.R | check_list <- function(mod) {
is.list(mod)
}
|
45e463dbfca0e29681c6c03a093b1aa62fb7834f | 9a5bb87f55bb92b94ee5c930fee6555340909b9c | /process-gbk.R | e38343ff7560f8af6ed9ab7235c99d5bafb93422 | [] | no_license | michbur/csgA-biofilm | 622be7d10e8c2dc0adc9da08ef96533236b5b55f | 0ff945c1a19b9f79390b94c476b2c16c4b79de89 | refs/heads/master | 2021-08-04T03:34:22.372290 | 2019-04-05T09:21:42 | 2019-04-05T09:21:42 | 102,089,201 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 7,772 | r | process-gbk.R | prime_field_re = "^[[:upper:]]+[[:space:]]+"
sec_field_re = "^( {5}|\\t)[[:alnum:]'_]+[[:space:]]+(complement|join|order|[[:digit:]<,])"
strip_fieldname = function(lines) gsub("^[[:space:]]*[[:upper:]]*[[:space:]]+", "", lines)
readGenBank2 <- function(file, text = readLines(file), partial = NA,
ret.seq = TRUE, verbose = FALSE) {
if(missing(text)) {
if(missing(file))
stop("One of text or file must be specified.")
if(is(file, "GBAccession"))
text = .getGBfromNuccore(file)
else if (!file.exists(file))
stop("file does not appear to an existing file or a GBAccession object. Valid file or text argument is required.")
}
if(is(text, "list"))
return(pblapply(text, function(txt) readGenBank(text = txt, partial = partial, ret.seq= ret.seq, verbose = verbose)))
## we always read in sequence because it is required for variant annotations
## we throw it away after if the user set ret.seq=FALSE
prsed = parseGenBank2(text = text, partial = partial, verbose = verbose,
ret.seq = TRUE)
lineage <- gsub("[ .]", "", prsed[["SOURCE"]][["lineage"]])
all_features <- lapply(prsed[["FEATURES"]], function(ith_feature) {
if(class(ith_feature) == "data.frame") {
data.frame(protein_id = ith_feature[["protein_id"]], translation = ith_feature[["translation"]],
stringsAsFactors = FALSE)
} else {
NULL
}
}) %>%
bind_rows() %>%
mutate(organism = prsed[["SOURCE"]][["organism"]],
lineage = paste0(gsub("[ .]", "", prsed[["SOURCE"]][["lineage"]]), collapse = "|"),
definition = prsed[["DEFINITION"]]) %>%
select(definition, organism, lineage, protein_id, translation)
}
parseGenBank2 = function(file, text = readLines(file), partial = NA,
verbose = FALSE,
ret.anno = TRUE,
ret.seq = TRUE) {
if(!ret.anno && !ret.seq)
stop("Must return at least one of annotations or sequence.")
bf = proc.time()["elapsed"]
if(missing(text) && !file.exists(file))
stop("No text provided and file does not exist or was not specified. Either an existing file or text to parse must be provided.")
if(length(text) == 1)
text = fastwriteread(text)
fldlines = grepl(prime_field_re, text)
fldfac = cumsum(fldlines)
fldnames = gsub("^([[:upper:]]+).*", "\\1", text[fldlines])[fldfac]
spl = split(text, fldnames)
resthang = list(LOCUS = genbankr:::readLocus(spl[["LOCUS"]]))
resthang[["FEATURES"]] = readFeatures2(spl[["FEATURES"]],
source.only=!ret.anno,
partial = partial)
seqtype = genbankr:::.seqTypeFromLocus(resthang$LOCUS)
resthang$ORIGIN = if(ret.seq)
genbankr:::readOrigin(spl[["ORIGIN"]],
seqtype = seqtype)
else NULL
if(ret.anno) {
resthang2 = mapply(function(field, lines, verbose) {
switch(field,
DEFINITION = genbankr:::readDefinition(lines),
ACCESSION = genbankr:::readAccession(lines),
VERSION = genbankr:::readVersions(lines),
KEYWORDS = genbankr:::readKeywords(lines),
SOURCE = genbankr:::readSource(lines),
## don't read FEATURES, ORIGIN, or LOCUS because they are
## already in resthang from above
NULL)
}, lines = spl, field = names(spl), SIMPLIFY=FALSE, verbose = verbose)
resthang2$FEATURES = resthang2$FEATURES[sapply(resthang2$FEATURES,
function(x) length(x)>0)]
resthang2 = resthang2[!names(resthang2) %in% names(resthang)]
resthang = c(resthang, resthang2)
}
##DNAString to DNAStringSet
origin = resthang$ORIGIN
if(ret.seq && length(origin) > 0) {
typs = sapply(resthang$FEATURES, function(x) x$type[1])
srcs = genbankr:::fill_stack_df(resthang$FEATURES[typs == "source"])
## dss = DNAStringSet(lapply(GRanges(ranges(srcs), function(x) origin[x])))
dss = switch(seqtype,
bp = DNAStringSet(lapply(ranges(srcs), function(x) origin[x])),
aa = AAStringSet(lapply(ranges(srcs), function(x) origin[x])),
stop("Unrecognized origin sequence type: ", seqtype)
)
names(dss) = sapply(srcs,
function(x) as.character(GenomeInfoDb:::seqnames(x)[1]))
if(!ret.anno)
resthang = dss
else
resthang$ORIGIN = dss
} else if (!ret.anno) { ##implies ret.seq is TRUE
stop("Asked for only sequence (ret.anno=FALSE) from a file with no sequence information")
}
af = proc.time()["elapsed"]
if(verbose)
message("Done Parsing raw GenBank file text. [ ", af-bf, " seconds ]")
resthang
}
readFeatures2 = function(lines, partial = NA, verbose = FALSE,
source.only = FALSE) {
if(substr(lines[1], 1, 8) == "FEATURES")
lines = lines[-1] ## consume FEATURES line
fttypelins = grepl(sec_field_re, lines)
featfactor = cumsum(fttypelins)
if(source.only) {
srcfeats = which(substr(lines[fttypelins], 6, 11) == "source")
keepinds = featfactor %in% srcfeats
lines = lines[keepinds]
featfactor = featfactor[keepinds]
}
##scope bullshittery
chr = "unk"
totsources = length(grep("[[:space:]]+source[[:space:]]+[<[:digit:]]", lines[which(fttypelins)]))
numsources = 0
everhadchr = FALSE
do_readfeat = function(lines, partial = NA) {
## before collapse so the leading space is still there
type = gsub("[[:space:]]+([[:alnum:]_']+).*", "\\1", lines[1])
##feature/location can go across multpiple lines x.x why genbank? whyyyy
attrstrts = cumsum(grepl("^[[:space:]]+/[^[:space:]]+($|=([[:digit:]]|\"))", lines))
lines = tapply(lines, attrstrts, function(x) {
paste(gsub("^[[:space:]]+", "", x), collapse="")
}, simplify=TRUE)
rawlocstring = lines[1]
rngstr = genbankr:::strip_feat_type(rawlocstring)
## consume primary feature line
lines = lines[-1]
if(length(lines)) {
attrs = genbankr:::read_feat_attr(lines)
names(attrs) = gsub("^[[:space:]]*/([^=]+)($|=[^[:space:]].*$)", "\\1", lines)
if(type == "source") {
numsources <<- numsources + 1
if("chromosome" %in% names(attrs)) {
if(numsources > 1 && !everhadchr)
stop("This file appears to have some source features which specify chromosome names and others that do not. This is not currently supported. Please contact the maintainer if you need this feature.")
everhadchr <<- TRUE
chr <<- attrs$chromosome
} else if(everhadchr) {
stop("This file appears to have some source features which specify chromosome names and others that do not. This is not currently supported. Please contact the maintainer if you need this feature.")
## this assumes that if one source has strain, they all will.
## Good assumption?
} else if("strain" %in% names(attrs)) {
chr <<- if(totsources == 1) attrs$strain else paste(attrs$strain, numsources, sep=":")
} else {
chr <<- if(totsources == 1) attrs$organism else paste(attrs$organism, numsources, sep=":")
}
}
} else {
attrs = list()
}
genbankr:::make_feat_gr(str = rngstr, chr = chr, ats = c(type = type, attrs),
partial = partial)
}
if(verbose)
message(Sys.time(), " Starting feature parsing")
resgrs = tapply(lines, featfactor, function(i) {
do_readfeat(i, partial = partial)
}, simplify=FALSE)
if(verbose)
message(Sys.time(), " Done feature parsing")
resgrs
}
|
473bf40ded4f39d8672ce1ebf5f1c7ffcf60f2eb | afc067cc21eb8058a93e9c4f9af1d648e22ce9b8 | /hyphy-src/src/lib/LibraryModules/R/HyPhy.R | fb4a08d442ece57a8318fb279977707db1aa1145 | [
"MIT"
] | permissive | veg/hyphy-python | 76c0ea7d6a6a30735903b1182327762d64cc104a | 6f3e2d19bafe27d0b8a6626201a3326022010811 | refs/heads/master | 2023-05-25T19:00:26.404623 | 2023-05-15T19:17:33 | 2023-05-15T19:17:33 | 39,527,089 | 0 | 1 | NOASSERTION | 2023-05-25T02:19:32 | 2015-07-22T19:52:43 | C++ | UTF-8 | R | false | false | 39,161 | r | HyPhy.R | # This file was automatically generated by SWIG (http://www.swig.org).
# Version 2.0.4
#
# Do not make changes to this file unless you know what you are doing--modify
# the SWIG interface file instead.
## Generated via the command line invocation:
## swig -c++ -r THyPhy.h
# srun.swg #
#
# This is the basic code that is needed at run time within R to
# provide and define the relevant classes. It is included
# automatically in the generated code by copying the contents of
# srun.swg into the newly created binding code.
# This could be provided as a separate run-time library but this
# approach allows the code to to be included directly into the
# generated bindings and so removes the need to have and install an
# additional library. We may however end up with multiple copies of
# this and some confusion at run-time as to which class to use. This
# is an issue when we use NAMESPACES as we may need to export certain
# classes.
######################################################################
if(length(getClassDef("RSWIGStruct")) == 0)
setClass("RSWIGStruct", representation("VIRTUAL"))
if(length(getClassDef("ExternalReference")) == 0)
# Should be virtual but this means it loses its slots currently
#representation("VIRTUAL")
setClass("ExternalReference", representation( ref = "externalptr"))
if(length(getClassDef("NativeRoutinePointer")) == 0)
setClass("NativeRoutinePointer",
representation(parameterTypes = "character",
returnType = "character",
"VIRTUAL"),
contains = "ExternalReference")
if(length(getClassDef("CRoutinePointer")) == 0)
setClass("CRoutinePointer", contains = "NativeRoutinePointer")
if(length(getClassDef("EnumerationValue")) == 0)
setClass("EnumerationValue", contains = "integer")
if(!isGeneric("copyToR"))
setGeneric("copyToR",
function(value, obj = new(gsub("Ref$", "", class(value))))
standardGeneric("copyToR"
))
setGeneric("delete", function(obj) standardGeneric("delete"))
SWIG_createNewRef =
function(className, ..., append = TRUE)
{
f = get(paste("new", className, sep = "_"), mode = "function")
f(...)
}
if(!isGeneric("copyToC"))
setGeneric("copyToC",
function(value, obj = RSWIG_createNewRef(class(value)))
standardGeneric("copyToC"
))
#
defineEnumeration =
function(name, .values, where = topenv(parent.frame()), suffix = "Value")
{
# Mirror the class definitions via the E analogous to .__C__
defName = paste(".__E__", name, sep = "")
assign(defName, .values, envir = where)
if(nchar(suffix))
name = paste(name, suffix, sep = "")
setClass(name, contains = "EnumerationValue", where = where)
}
enumToInteger <- function(name,type)
{
if (is.character(name)) {
ans <- as.integer(get(paste(".__E__", type, sep = ""))[name])
if (is.na(ans)) {warning("enum not found ", name, " ", type)}
ans
}
}
enumFromInteger =
function(i,type)
{
itemlist <- get(paste(".__E__", type, sep=""))
names(itemlist)[match(i, itemlist)]
}
coerceIfNotSubclass =
function(obj, type)
{
if(!is(obj, type)) {as(obj, type)} else obj
}
setClass("SWIGArray", representation(dims = "integer"), contains = "ExternalReference")
setMethod("length", "SWIGArray", function(x) x@dims[1])
defineEnumeration("SCopyReferences",
.values = c( "FALSE" = 0, "TRUE" = 1, "DEEP" = 2))
assert =
function(condition, message = "")
{
if(!condition)
stop(message)
TRUE
}
if(FALSE) {
print.SWIGFunction =
function(x, ...)
{
}
}
#######################################################################
R_SWIG_getCallbackFunctionStack =
function()
{
# No PACKAGE argument as we don't know what the DLL is.
.Call("R_SWIG_debug_getCallbackFunctionData")
}
R_SWIG_addCallbackFunctionStack =
function(fun, userData = NULL)
{
# No PACKAGE argument as we don't know what the DLL is.
.Call("R_SWIG_R_pushCallbackFunctionData", fun, userData)
}
#######################################################################
setClass('C++Reference', contains = 'ExternalReference')
setClass('_p__THyPhyReturnObject', contains = 'C++Reference')
setClass('_p__THyPhyString', contains = c('_p__THyPhyReturnObject'))
setClass('_p__THyPhyNumber', contains = c('_p__THyPhyReturnObject'))
setClass('_p__THyPhyMatrix', contains = c('_p__THyPhyReturnObject'))
setClass('_p__THyPhy', contains = 'C++Reference')
setMethod('[', "ExternalReference",
function(x,i,j, ..., drop=TRUE)
if (!is.null(x$"__getitem__"))
sapply(i, function(n) x$"__getitem__"(i=as.integer(n-1))))
setMethod('[<-' , "ExternalReference",
function(x,i,j, ..., value)
if (!is.null(x$"__setitem__")) {
sapply(1:length(i), function(n)
x$"__setitem__"(i=as.integer(i[n]-1), x=value[n]))
x
})
setAs('ExternalReference', 'character',
function(from) {if (!is.null(from$"__str__")) from$"__str__"()})
setMethod('print', 'ExternalReference',
function(x) {print(as(x, "character"))})
# Start of _THyPhyReturnObject_myType
`_THyPhyReturnObject_myType` = function(self, .copy = FALSE)
{
;.Call('R_swig__THyPhyReturnObject_myType', self, as.logical(.copy), PACKAGE='HyPhy');
}
attr(`_THyPhyReturnObject_myType`, 'returnType') = 'integer'
attr(`_THyPhyReturnObject_myType`, "inputTypes") = c('_p__THyPhyReturnObject')
class(`_THyPhyReturnObject_myType`) = c("SWIGFunction", class('_THyPhyReturnObject_myType'))
# Start of delete__THyPhyReturnObject
`delete__THyPhyReturnObject` = function(self)
{
;.Call('R_swig_delete__THyPhyReturnObject', self, PACKAGE='HyPhy');
}
attr(`delete__THyPhyReturnObject`, 'returnType') = 'void'
attr(`delete__THyPhyReturnObject`, "inputTypes") = c('_p__THyPhyReturnObject')
class(`delete__THyPhyReturnObject`) = c("SWIGFunction", class('delete__THyPhyReturnObject'))
# Start of _THyPhyReturnObject_castToString
`_THyPhyReturnObject_castToString` = function(self)
{
;ans = .Call('R_swig__THyPhyReturnObject_castToString', self, PACKAGE='HyPhy');
class(ans) <- "_p__THyPhyString";
ans
}
attr(`_THyPhyReturnObject_castToString`, 'returnType') = '_p__THyPhyString'
attr(`_THyPhyReturnObject_castToString`, "inputTypes") = c('_p__THyPhyReturnObject')
class(`_THyPhyReturnObject_castToString`) = c("SWIGFunction", class('_THyPhyReturnObject_castToString'))
# Start of _THyPhyReturnObject_castToNumber
`_THyPhyReturnObject_castToNumber` = function(self)
{
;ans = .Call('R_swig__THyPhyReturnObject_castToNumber', self, PACKAGE='HyPhy');
class(ans) <- "_p__THyPhyNumber";
ans
}
attr(`_THyPhyReturnObject_castToNumber`, 'returnType') = '_p__THyPhyNumber'
attr(`_THyPhyReturnObject_castToNumber`, "inputTypes") = c('_p__THyPhyReturnObject')
class(`_THyPhyReturnObject_castToNumber`) = c("SWIGFunction", class('_THyPhyReturnObject_castToNumber'))
# Start of _THyPhyReturnObject_castToMatrix
`_THyPhyReturnObject_castToMatrix` = function(self)
{
;ans = .Call('R_swig__THyPhyReturnObject_castToMatrix', self, PACKAGE='HyPhy');
class(ans) <- "_p__THyPhyMatrix";
ans
}
attr(`_THyPhyReturnObject_castToMatrix`, 'returnType') = '_p__THyPhyMatrix'
attr(`_THyPhyReturnObject_castToMatrix`, "inputTypes") = c('_p__THyPhyReturnObject')
class(`_THyPhyReturnObject_castToMatrix`) = c("SWIGFunction", class('_THyPhyReturnObject_castToMatrix'))
# Start of accessor method for _THyPhyReturnObject
setMethod('$', '_p__THyPhyReturnObject', function(x, name)
{
accessorFuns = list('myType' = _THyPhyReturnObject_myType, 'castToString' = _THyPhyReturnObject_castToString, 'castToNumber' = _THyPhyReturnObject_castToNumber, 'castToMatrix' = _THyPhyReturnObject_castToMatrix);
; idx = pmatch(name, names(accessorFuns));
if(is.na(idx))
return(callNextMethod(x, name));
f = accessorFuns[[idx]];
formals(f)[[1]] = x;
f;
}
);
# end of accessor method for _THyPhyReturnObject
setMethod('delete', '_p__THyPhyReturnObject', function(obj) {delete__THyPhyReturnObject(obj)})
# Start of new__THyPhyString
`_THyPhyString__SWIG_0` = function(s_arg1, s_arg2)
{
s_arg1 = as(s_arg1, "character");
s_arg2 = as.integer(s_arg2);
if(length(s_arg2) > 1) {
warning("using only the first element of s_arg2");
};
;ans = .Call('R_swig_new__THyPhyString__SWIG_0', s_arg1, s_arg2, PACKAGE='HyPhy');
class(ans) <- "_p__THyPhyString";
reg.finalizer(ans, delete__THyPhyString)
ans
}
attr(`_THyPhyString__SWIG_0`, 'returnType') = '_p__THyPhyString'
attr(`_THyPhyString__SWIG_0`, "inputTypes") = c('character', 'integer')
class(`_THyPhyString__SWIG_0`) = c("SWIGFunction", class('_THyPhyString__SWIG_0'))
# Start of new__THyPhyString
`_THyPhyString__SWIG_1` = function(s_arg1)
{
s_arg1 = as(s_arg1, "character");
;ans = .Call('R_swig_new__THyPhyString__SWIG_1', s_arg1, PACKAGE='HyPhy');
class(ans) <- "_p__THyPhyString";
reg.finalizer(ans, delete__THyPhyString)
ans
}
attr(`_THyPhyString__SWIG_1`, 'returnType') = '_p__THyPhyString'
attr(`_THyPhyString__SWIG_1`, "inputTypes") = c('character')
class(`_THyPhyString__SWIG_1`) = c("SWIGFunction", class('_THyPhyString__SWIG_1'))
# Start of new__THyPhyString
`_THyPhyString__SWIG_2` = function()
{
;ans = .Call('R_swig_new__THyPhyString__SWIG_2', PACKAGE='HyPhy');
class(ans) <- "_p__THyPhyString";
reg.finalizer(ans, delete__THyPhyString)
ans
}
attr(`_THyPhyString__SWIG_2`, 'returnType') = '_p__THyPhyString'
class(`_THyPhyString__SWIG_2`) = c("SWIGFunction", class('_THyPhyString__SWIG_2'))
`_THyPhyString` <- function(...) {
argtypes <- mapply(class, list(...));
argv <- list(...);
argc <- length(argtypes);
# dispatch functions 3
if (argc == 0) {
f <- _THyPhyString__SWIG_2;
} else if (argc == 1) {
if (is.character(argv[[1]])) {
f <- _THyPhyString__SWIG_1;
}
} else if (argc == 2) {
if (is.character(argv[[1]]) && (is.integer(argv[[2]]) || is.numeric(argv[[2]]))) {
f <- _THyPhyString__SWIG_0;
}
} else {
stop("cannot find overloaded function for _THyPhyString with argtypes (",toString(argtypes),")");
};
f(...);
}
# Dispatch function
# Start of _THyPhyString_myType
`_THyPhyString_myType` = function(self, .copy = FALSE)
{
;.Call('R_swig__THyPhyString_myType', self, as.logical(.copy), PACKAGE='HyPhy');
}
attr(`_THyPhyString_myType`, 'returnType') = 'integer'
attr(`_THyPhyString_myType`, "inputTypes") = c('_p__THyPhyString')
class(`_THyPhyString_myType`) = c("SWIGFunction", class('_THyPhyString_myType'))
# Start of delete__THyPhyString
`delete__THyPhyString` = function(self)
{
;.Call('R_swig_delete__THyPhyString', self, PACKAGE='HyPhy');
}
attr(`delete__THyPhyString`, 'returnType') = 'void'
attr(`delete__THyPhyString`, "inputTypes") = c('_p__THyPhyString')
class(`delete__THyPhyString`) = c("SWIGFunction", class('delete__THyPhyString'))
# Start of _THyPhyString_sLength_set
`_THyPhyString_sLength_set` = function(self, s_sLength)
{
s_sLength = as.integer(s_sLength);
if(length(s_sLength) > 1) {
warning("using only the first element of s_sLength");
};
;.Call('R_swig__THyPhyString_sLength_set', self, s_sLength, PACKAGE='HyPhy');
}
attr(`_THyPhyString_sLength_set`, 'returnType') = 'void'
attr(`_THyPhyString_sLength_set`, "inputTypes") = c('_p__THyPhyString', 'integer')
class(`_THyPhyString_sLength_set`) = c("SWIGFunction", class('_THyPhyString_sLength_set'))
# Start of _THyPhyString_sLength_get
`_THyPhyString_sLength_get` = function(self, .copy = FALSE)
{
;.Call('R_swig__THyPhyString_sLength_get', self, as.logical(.copy), PACKAGE='HyPhy');
}
attr(`_THyPhyString_sLength_get`, 'returnType') = 'integer'
attr(`_THyPhyString_sLength_get`, "inputTypes") = c('_p__THyPhyString')
class(`_THyPhyString_sLength_get`) = c("SWIGFunction", class('_THyPhyString_sLength_get'))
# Start of _THyPhyString_sData_set
`_THyPhyString_sData_set` = function(self, s_sData)
{
s_sData = as(s_sData, "character");
;.Call('R_swig__THyPhyString_sData_set', self, s_sData, PACKAGE='HyPhy');
}
attr(`_THyPhyString_sData_set`, 'returnType') = 'void'
attr(`_THyPhyString_sData_set`, "inputTypes") = c('_p__THyPhyString', 'character')
class(`_THyPhyString_sData_set`) = c("SWIGFunction", class('_THyPhyString_sData_set'))
# Start of _THyPhyString_sData_get
`_THyPhyString_sData_get` = function(self)
{
;.Call('R_swig__THyPhyString_sData_get', self, PACKAGE='HyPhy');
}
attr(`_THyPhyString_sData_get`, 'returnType') = 'character'
attr(`_THyPhyString_sData_get`, "inputTypes") = c('_p__THyPhyString')
class(`_THyPhyString_sData_get`) = c("SWIGFunction", class('_THyPhyString_sData_get'))
# Start of accessor method for _THyPhyString
setMethod('$', '_p__THyPhyString', function(x, name)
{
accessorFuns = list('myType' = _THyPhyString_myType, 'sLength' = _THyPhyString_sLength_get, 'sData' = _THyPhyString_sData_get);
vaccessors = c('sLength', 'sData');
; idx = pmatch(name, names(accessorFuns));
if(is.na(idx))
return(callNextMethod(x, name));
f = accessorFuns[[idx]];
formals(f)[[1]] = x;
if (is.na(match(name, vaccessors))) f else f(x);
}
);
# end of accessor method for _THyPhyString
# Start of accessor method for _THyPhyString
setMethod('$<-', '_p__THyPhyString', function(x, name, value)
{
accessorFuns = list('sLength' = _THyPhyString_sLength_set, 'sData' = _THyPhyString_sData_set);
; idx = pmatch(name, names(accessorFuns));
if(is.na(idx))
return(callNextMethod(x, name, value));
f = accessorFuns[[idx]];
f(x, value);
x;
}
);
setMethod('[[<-', c('_p__THyPhyString', 'character'),function(x, i, j, ..., value)
{
name = i;
accessorFuns = list('sLength' = _THyPhyString_sLength_set, 'sData' = _THyPhyString_sData_set);
; idx = pmatch(name, names(accessorFuns));
if(is.na(idx))
return(callNextMethod(x, name, value));
f = accessorFuns[[idx]];
f(x, value);
x;
}
);
# end of accessor method for _THyPhyString
setMethod('delete', '_p__THyPhyString', function(obj) {delete__THyPhyString(obj)})
# Start of new__THyPhyNumber
`_THyPhyNumber__SWIG_0` = function(s_arg1)
{
;ans = .Call('R_swig_new__THyPhyNumber__SWIG_0', s_arg1, PACKAGE='HyPhy');
class(ans) <- "_p__THyPhyNumber";
reg.finalizer(ans, delete__THyPhyNumber)
ans
}
attr(`_THyPhyNumber__SWIG_0`, 'returnType') = '_p__THyPhyNumber'
attr(`_THyPhyNumber__SWIG_0`, "inputTypes") = c('numeric')
class(`_THyPhyNumber__SWIG_0`) = c("SWIGFunction", class('_THyPhyNumber__SWIG_0'))
# Start of new__THyPhyNumber
`_THyPhyNumber__SWIG_1` = function()
{
;ans = .Call('R_swig_new__THyPhyNumber__SWIG_1', PACKAGE='HyPhy');
class(ans) <- "_p__THyPhyNumber";
reg.finalizer(ans, delete__THyPhyNumber)
ans
}
attr(`_THyPhyNumber__SWIG_1`, 'returnType') = '_p__THyPhyNumber'
class(`_THyPhyNumber__SWIG_1`) = c("SWIGFunction", class('_THyPhyNumber__SWIG_1'))
`_THyPhyNumber` <- function(...) {
argtypes <- mapply(class, list(...));
argv <- list(...);
argc <- length(argtypes);
# dispatch functions 2
if (argc == 0) {
f <- _THyPhyNumber__SWIG_1;
} else if (argc == 1) {
if (is.numeric(argv[[1]])) {
f <- _THyPhyNumber__SWIG_0;
}
} else {
stop("cannot find overloaded function for _THyPhyNumber with argtypes (",toString(argtypes),")");
};
f(...);
}
# Dispatch function
# Start of _THyPhyNumber_myType
`_THyPhyNumber_myType` = function(self, .copy = FALSE)
{
;.Call('R_swig__THyPhyNumber_myType', self, as.logical(.copy), PACKAGE='HyPhy');
}
attr(`_THyPhyNumber_myType`, 'returnType') = 'integer'
attr(`_THyPhyNumber_myType`, "inputTypes") = c('_p__THyPhyNumber')
class(`_THyPhyNumber_myType`) = c("SWIGFunction", class('_THyPhyNumber_myType'))
# Start of delete__THyPhyNumber
`delete__THyPhyNumber` = function(self)
{
;.Call('R_swig_delete__THyPhyNumber', self, PACKAGE='HyPhy');
}
attr(`delete__THyPhyNumber`, 'returnType') = 'void'
attr(`delete__THyPhyNumber`, "inputTypes") = c('_p__THyPhyNumber')
class(`delete__THyPhyNumber`) = c("SWIGFunction", class('delete__THyPhyNumber'))
# Start of _THyPhyNumber_nValue_set
`_THyPhyNumber_nValue_set` = function(self, s_nValue)
{
;.Call('R_swig__THyPhyNumber_nValue_set', self, s_nValue, PACKAGE='HyPhy');
}
attr(`_THyPhyNumber_nValue_set`, 'returnType') = 'void'
attr(`_THyPhyNumber_nValue_set`, "inputTypes") = c('_p__THyPhyNumber', 'numeric')
class(`_THyPhyNumber_nValue_set`) = c("SWIGFunction", class('_THyPhyNumber_nValue_set'))
# Start of _THyPhyNumber_nValue_get
`_THyPhyNumber_nValue_get` = function(self, .copy = FALSE)
{
;.Call('R_swig__THyPhyNumber_nValue_get', self, as.logical(.copy), PACKAGE='HyPhy');
}
attr(`_THyPhyNumber_nValue_get`, 'returnType') = 'numeric'
attr(`_THyPhyNumber_nValue_get`, "inputTypes") = c('_p__THyPhyNumber')
class(`_THyPhyNumber_nValue_get`) = c("SWIGFunction", class('_THyPhyNumber_nValue_get'))
# Start of accessor method for _THyPhyNumber
setMethod('$', '_p__THyPhyNumber', function(x, name)
{
accessorFuns = list('myType' = _THyPhyNumber_myType, 'nValue' = _THyPhyNumber_nValue_get);
vaccessors = c('nValue');
; idx = pmatch(name, names(accessorFuns));
if(is.na(idx))
return(callNextMethod(x, name));
f = accessorFuns[[idx]];
formals(f)[[1]] = x;
if (is.na(match(name, vaccessors))) f else f(x);
}
);
# end of accessor method for _THyPhyNumber
# Start of accessor method for _THyPhyNumber
setMethod('$<-', '_p__THyPhyNumber', function(x, name, value)
{
accessorFuns = list('nValue' = _THyPhyNumber_nValue_set);
; idx = pmatch(name, names(accessorFuns));
if(is.na(idx))
return(callNextMethod(x, name, value));
f = accessorFuns[[idx]];
f(x, value);
x;
}
);
setMethod('[[<-', c('_p__THyPhyNumber', 'character'),function(x, i, j, ..., value)
{
name = i;
accessorFuns = list('nValue' = _THyPhyNumber_nValue_set);
; idx = pmatch(name, names(accessorFuns));
if(is.na(idx))
return(callNextMethod(x, name, value));
f = accessorFuns[[idx]];
f(x, value);
x;
}
);
# end of accessor method for _THyPhyNumber
setMethod('delete', '_p__THyPhyNumber', function(obj) {delete__THyPhyNumber(obj)})
# Start of new__THyPhyMatrix
`_THyPhyMatrix__SWIG_0` = function()
{
;ans = .Call('R_swig_new__THyPhyMatrix__SWIG_0', PACKAGE='HyPhy');
class(ans) <- "_p__THyPhyMatrix";
reg.finalizer(ans, delete__THyPhyMatrix)
ans
}
attr(`_THyPhyMatrix__SWIG_0`, 'returnType') = '_p__THyPhyMatrix'
class(`_THyPhyMatrix__SWIG_0`) = c("SWIGFunction", class('_THyPhyMatrix__SWIG_0'))
# Start of new__THyPhyMatrix
`_THyPhyMatrix__SWIG_1` = function(s_arg1, s_arg2, s_arg3)
{
s_arg1 = as.integer(s_arg1);
if(length(s_arg1) > 1) {
warning("using only the first element of s_arg1");
};
s_arg2 = as.integer(s_arg2);
if(length(s_arg2) > 1) {
warning("using only the first element of s_arg2");
};
;ans = .Call('R_swig_new__THyPhyMatrix__SWIG_1', s_arg1, s_arg2, s_arg3, PACKAGE='HyPhy');
class(ans) <- "_p__THyPhyMatrix";
reg.finalizer(ans, delete__THyPhyMatrix)
ans
}
attr(`_THyPhyMatrix__SWIG_1`, 'returnType') = '_p__THyPhyMatrix'
attr(`_THyPhyMatrix__SWIG_1`, "inputTypes") = c('integer', 'integer', 'numeric')
class(`_THyPhyMatrix__SWIG_1`) = c("SWIGFunction", class('_THyPhyMatrix__SWIG_1'))
`_THyPhyMatrix` <- function(...) {
argtypes <- mapply(class, list(...));
argv <- list(...);
argc <- length(argtypes);
# dispatch functions 2
if (argc == 0) {
f <- _THyPhyMatrix__SWIG_0;
} else if (argc == 3) {
if ((is.integer(argv[[1]]) || is.numeric(argv[[1]])) && (is.integer(argv[[2]]) || is.numeric(argv[[2]])) && is.numeric(argv[[3]])) {
f <- _THyPhyMatrix__SWIG_1;
}
} else {
stop("cannot find overloaded function for _THyPhyMatrix with argtypes (",toString(argtypes),")");
};
f(...);
}
# Dispatch function
# Start of _THyPhyMatrix_myType
`_THyPhyMatrix_myType` = function(self, .copy = FALSE)
{
;.Call('R_swig__THyPhyMatrix_myType', self, as.logical(.copy), PACKAGE='HyPhy');
}
attr(`_THyPhyMatrix_myType`, 'returnType') = 'integer'
attr(`_THyPhyMatrix_myType`, "inputTypes") = c('_p__THyPhyMatrix')
class(`_THyPhyMatrix_myType`) = c("SWIGFunction", class('_THyPhyMatrix_myType'))
# Start of delete__THyPhyMatrix
`delete__THyPhyMatrix` = function(self)
{
;.Call('R_swig_delete__THyPhyMatrix', self, PACKAGE='HyPhy');
}
attr(`delete__THyPhyMatrix`, 'returnType') = 'void'
attr(`delete__THyPhyMatrix`, "inputTypes") = c('_p__THyPhyMatrix')
class(`delete__THyPhyMatrix`) = c("SWIGFunction", class('delete__THyPhyMatrix'))
# Start of _THyPhyMatrix_MatrixCell
`_THyPhyMatrix_MatrixCell` = function(self, s_arg2, s_arg3, .copy = FALSE)
{
s_arg2 = as.integer(s_arg2);
if(length(s_arg2) > 1) {
warning("using only the first element of s_arg2");
};
s_arg3 = as.integer(s_arg3);
if(length(s_arg3) > 1) {
warning("using only the first element of s_arg3");
};
;.Call('R_swig__THyPhyMatrix_MatrixCell', self, s_arg2, s_arg3, as.logical(.copy), PACKAGE='HyPhy');
}
attr(`_THyPhyMatrix_MatrixCell`, 'returnType') = 'numeric'
attr(`_THyPhyMatrix_MatrixCell`, "inputTypes") = c('_p__THyPhyMatrix', 'integer', 'integer')
class(`_THyPhyMatrix_MatrixCell`) = c("SWIGFunction", class('_THyPhyMatrix_MatrixCell'))
# Start of _THyPhyMatrix_mRows_set
`_THyPhyMatrix_mRows_set` = function(self, s_mRows)
{
s_mRows = as.integer(s_mRows);
if(length(s_mRows) > 1) {
warning("using only the first element of s_mRows");
};
;.Call('R_swig__THyPhyMatrix_mRows_set', self, s_mRows, PACKAGE='HyPhy');
}
attr(`_THyPhyMatrix_mRows_set`, 'returnType') = 'void'
attr(`_THyPhyMatrix_mRows_set`, "inputTypes") = c('_p__THyPhyMatrix', 'integer')
class(`_THyPhyMatrix_mRows_set`) = c("SWIGFunction", class('_THyPhyMatrix_mRows_set'))
# Start of _THyPhyMatrix_mRows_get
`_THyPhyMatrix_mRows_get` = function(self, .copy = FALSE)
{
;.Call('R_swig__THyPhyMatrix_mRows_get', self, as.logical(.copy), PACKAGE='HyPhy');
}
attr(`_THyPhyMatrix_mRows_get`, 'returnType') = 'integer'
attr(`_THyPhyMatrix_mRows_get`, "inputTypes") = c('_p__THyPhyMatrix')
class(`_THyPhyMatrix_mRows_get`) = c("SWIGFunction", class('_THyPhyMatrix_mRows_get'))
# Start of _THyPhyMatrix_mCols_set
`_THyPhyMatrix_mCols_set` = function(self, s_mCols)
{
s_mCols = as.integer(s_mCols);
if(length(s_mCols) > 1) {
warning("using only the first element of s_mCols");
};
;.Call('R_swig__THyPhyMatrix_mCols_set', self, s_mCols, PACKAGE='HyPhy');
}
attr(`_THyPhyMatrix_mCols_set`, 'returnType') = 'void'
attr(`_THyPhyMatrix_mCols_set`, "inputTypes") = c('_p__THyPhyMatrix', 'integer')
class(`_THyPhyMatrix_mCols_set`) = c("SWIGFunction", class('_THyPhyMatrix_mCols_set'))
# Start of _THyPhyMatrix_mCols_get
`_THyPhyMatrix_mCols_get` = function(self, .copy = FALSE)
{
;.Call('R_swig__THyPhyMatrix_mCols_get', self, as.logical(.copy), PACKAGE='HyPhy');
}
attr(`_THyPhyMatrix_mCols_get`, 'returnType') = 'integer'
attr(`_THyPhyMatrix_mCols_get`, "inputTypes") = c('_p__THyPhyMatrix')
class(`_THyPhyMatrix_mCols_get`) = c("SWIGFunction", class('_THyPhyMatrix_mCols_get'))
# Start of _THyPhyMatrix_mData_set
`_THyPhyMatrix_mData_set` = function(self, s_mData)
{
;.Call('R_swig__THyPhyMatrix_mData_set', self, s_mData, PACKAGE='HyPhy');
}
attr(`_THyPhyMatrix_mData_set`, 'returnType') = 'void'
attr(`_THyPhyMatrix_mData_set`, "inputTypes") = c('_p__THyPhyMatrix', 'numeric')
class(`_THyPhyMatrix_mData_set`) = c("SWIGFunction", class('_THyPhyMatrix_mData_set'))
# Start of _THyPhyMatrix_mData_get
`_THyPhyMatrix_mData_get` = function(self)
{
;ans = .Call('R_swig__THyPhyMatrix_mData_get', self, PACKAGE='HyPhy');
class(ans) <- "_p_double";
ans
}
attr(`_THyPhyMatrix_mData_get`, 'returnType') = 'numeric'
attr(`_THyPhyMatrix_mData_get`, "inputTypes") = c('_p__THyPhyMatrix')
class(`_THyPhyMatrix_mData_get`) = c("SWIGFunction", class('_THyPhyMatrix_mData_get'))
# Start of accessor method for _THyPhyMatrix
setMethod('$', '_p__THyPhyMatrix', function(x, name)
{
accessorFuns = list('myType' = _THyPhyMatrix_myType, 'MatrixCell' = _THyPhyMatrix_MatrixCell, 'mRows' = _THyPhyMatrix_mRows_get, 'mCols' = _THyPhyMatrix_mCols_get, 'mData' = _THyPhyMatrix_mData_get);
vaccessors = c('mRows', 'mCols', 'mData');
; idx = pmatch(name, names(accessorFuns));
if(is.na(idx))
return(callNextMethod(x, name));
f = accessorFuns[[idx]];
formals(f)[[1]] = x;
if (is.na(match(name, vaccessors))) f else f(x);
}
);
# end of accessor method for _THyPhyMatrix
# Start of accessor method for _THyPhyMatrix
setMethod('$<-', '_p__THyPhyMatrix', function(x, name, value)
{
accessorFuns = list('mRows' = _THyPhyMatrix_mRows_set, 'mCols' = _THyPhyMatrix_mCols_set, 'mData' = _THyPhyMatrix_mData_set);
; idx = pmatch(name, names(accessorFuns));
if(is.na(idx))
return(callNextMethod(x, name, value));
f = accessorFuns[[idx]];
f(x, value);
x;
}
);
setMethod('[[<-', c('_p__THyPhyMatrix', 'character'),function(x, i, j, ..., value)
{
name = i;
accessorFuns = list('mRows' = _THyPhyMatrix_mRows_set, 'mCols' = _THyPhyMatrix_mCols_set, 'mData' = _THyPhyMatrix_mData_set);
; idx = pmatch(name, names(accessorFuns));
if(is.na(idx))
return(callNextMethod(x, name, value));
f = accessorFuns[[idx]];
f(x, value);
x;
}
);
# end of accessor method for _THyPhyMatrix
setMethod('delete', '_p__THyPhyMatrix', function(obj) {delete__THyPhyMatrix(obj)})
# Start of new__THyPhy
`_THyPhy__SWIG_0` = function(s_arg1, s_arg2, s_arg3)
{
s_arg2 = as(s_arg2, "character");
s_arg3 = as.integer(s_arg3);
if(length(s_arg3) > 1) {
warning("using only the first element of s_arg3");
};
;ans = .Call('R_swig_new__THyPhy__SWIG_0', s_arg1, s_arg2, s_arg3, PACKAGE='HyPhy');
class(ans) <- "_p__THyPhy";
reg.finalizer(ans, delete__THyPhy)
ans
}
attr(`_THyPhy__SWIG_0`, 'returnType') = '_p__THyPhy'
attr(`_THyPhy__SWIG_0`, "inputTypes") = c('_p_f_p_char_int_double__bool', 'character', 'integer')
class(`_THyPhy__SWIG_0`) = c("SWIGFunction", class('_THyPhy__SWIG_0'))
# Start of new__THyPhy
`_THyPhy__SWIG_1` = function(s_arg1, s_arg2)
{
s_arg2 = as(s_arg2, "character");
;ans = .Call('R_swig_new__THyPhy__SWIG_1', s_arg1, s_arg2, PACKAGE='HyPhy');
class(ans) <- "_p__THyPhy";
reg.finalizer(ans, delete__THyPhy)
ans
}
attr(`_THyPhy__SWIG_1`, 'returnType') = '_p__THyPhy'
attr(`_THyPhy__SWIG_1`, "inputTypes") = c('_p_f_p_char_int_double__bool', 'character')
class(`_THyPhy__SWIG_1`) = c("SWIGFunction", class('_THyPhy__SWIG_1'))
# Start of new__THyPhy
`_THyPhy__SWIG_2` = function(s_arg1, s_arg2)
{
s_arg1 = as(s_arg1, "character");
s_arg2 = as.integer(s_arg2);
if(length(s_arg2) > 1) {
warning("using only the first element of s_arg2");
};
;ans = .Call('R_swig_new__THyPhy__SWIG_2', s_arg1, s_arg2, PACKAGE='HyPhy');
class(ans) <- "_p__THyPhy";
reg.finalizer(ans, delete__THyPhy)
ans
}
attr(`_THyPhy__SWIG_2`, 'returnType') = '_p__THyPhy'
attr(`_THyPhy__SWIG_2`, "inputTypes") = c('character', 'integer')
class(`_THyPhy__SWIG_2`) = c("SWIGFunction", class('_THyPhy__SWIG_2'))
# Start of new__THyPhy
`_THyPhy__SWIG_3` = function(s_arg1)
{
s_arg1 = as(s_arg1, "character");
;ans = .Call('R_swig_new__THyPhy__SWIG_3', s_arg1, PACKAGE='HyPhy');
class(ans) <- "_p__THyPhy";
reg.finalizer(ans, delete__THyPhy)
ans
}
attr(`_THyPhy__SWIG_3`, 'returnType') = '_p__THyPhy'
attr(`_THyPhy__SWIG_3`, "inputTypes") = c('character')
class(`_THyPhy__SWIG_3`) = c("SWIGFunction", class('_THyPhy__SWIG_3'))
`_THyPhy` <- function(...) {
argtypes <- mapply(class, list(...));
argv <- list(...);
argc <- length(argtypes);
# dispatch functions 4
if (argc == 1) {
if (is.character(argv[[1]])) {
f <- _THyPhy__SWIG_3;
}
} else if (argc == 2) {
if (extends(argtypes[1], '_p_f_p_char_int_double__bool') && is.character(argv[[2]])) {
f <- _THyPhy__SWIG_1;
}
else if (is.character(argv[[1]]) && (is.integer(argv[[2]]) || is.numeric(argv[[2]]))) {
f <- _THyPhy__SWIG_2;
}
} else if (argc == 3) {
if (extends(argtypes[1], '_p_f_p_char_int_double__bool') && is.character(argv[[2]]) && (is.integer(argv[[3]]) || is.numeric(argv[[3]]))) {
f <- _THyPhy__SWIG_0;
}
} else {
stop("cannot find overloaded function for _THyPhy with argtypes (",toString(argtypes),")");
};
f(...);
}
# Dispatch function
# Start of delete__THyPhy
`delete__THyPhy` = function(self)
{
;.Call('R_swig_delete__THyPhy', self, PACKAGE='HyPhy');
}
attr(`delete__THyPhy`, 'returnType') = 'void'
attr(`delete__THyPhy`, "inputTypes") = c('_p__THyPhy')
class(`delete__THyPhy`) = c("SWIGFunction", class('delete__THyPhy'))
# Start of _THyPhy_ExecuteBF
`_THyPhy_ExecuteBF__SWIG_0` = function(self, s_arg2, s_arg3)
{
s_arg2 = as(s_arg2, "character");
s_arg3 = as.logical(s_arg3);
;ans = .Call('R_swig__THyPhy_ExecuteBF__SWIG_0', self, s_arg2, s_arg3, PACKAGE='HyPhy');
class(ans) <- "_p__THyPhyString";
ans
}
attr(`_THyPhy_ExecuteBF__SWIG_0`, 'returnType') = '_p__THyPhyString'
attr(`_THyPhy_ExecuteBF__SWIG_0`, "inputTypes") = c('_p__THyPhy', 'character', 'logical')
class(`_THyPhy_ExecuteBF__SWIG_0`) = c("SWIGFunction", class('_THyPhy_ExecuteBF__SWIG_0'))
# Start of _THyPhy_ExecuteBF
`_THyPhy_ExecuteBF__SWIG_1` = function(self, s_arg2)
{
s_arg2 = as(s_arg2, "character");
;ans = .Call('R_swig__THyPhy_ExecuteBF__SWIG_1', self, s_arg2, PACKAGE='HyPhy');
class(ans) <- "_p__THyPhyString";
ans
}
attr(`_THyPhy_ExecuteBF__SWIG_1`, 'returnType') = '_p__THyPhyString'
attr(`_THyPhy_ExecuteBF__SWIG_1`, "inputTypes") = c('_p__THyPhy', 'character')
class(`_THyPhy_ExecuteBF__SWIG_1`) = c("SWIGFunction", class('_THyPhy_ExecuteBF__SWIG_1'))
`_THyPhy_ExecuteBF` <- function(...) {
argtypes <- mapply(class, list(...));
argv <- list(...);
argc <- length(argtypes);
# dispatch functions 2
if (argc == 2) {
if (extends(argtypes[1], '_p__THyPhy') && is.character(argv[[2]])) {
f <- _THyPhy_ExecuteBF__SWIG_1;
}
} else if (argc == 3) {
if (extends(argtypes[1], '_p__THyPhy') && is.character(argv[[2]]) && extends(argtypes[3], 'logical')) {
f <- _THyPhy_ExecuteBF__SWIG_0;
}
} else {
stop("cannot find overloaded function for _THyPhy_ExecuteBF with argtypes (",toString(argtypes),")");
};
f(...);
}
# Dispatch function
# Start of _THyPhy_InitTHyPhy
`_THyPhy_InitTHyPhy` = function(self, s_arg2, s_arg3, s_arg4)
{
s_arg3 = as(s_arg3, "character");
s_arg4 = as.integer(s_arg4);
if(length(s_arg4) > 1) {
warning("using only the first element of s_arg4");
};
;.Call('R_swig__THyPhy_InitTHyPhy', self, s_arg2, s_arg3, s_arg4, PACKAGE='HyPhy');
}
attr(`_THyPhy_InitTHyPhy`, 'returnType') = 'void'
attr(`_THyPhy_InitTHyPhy`, "inputTypes") = c('_p__THyPhy', '_p_f_p_char_int_double__bool', 'character', 'integer')
class(`_THyPhy_InitTHyPhy`) = c("SWIGFunction", class('_THyPhy_InitTHyPhy'))
# Start of _THyPhy_ClearAll
`_THyPhy_ClearAll` = function(self)
{
;.Call('R_swig__THyPhy_ClearAll', self, PACKAGE='HyPhy');
}
attr(`_THyPhy_ClearAll`, 'returnType') = 'void'
attr(`_THyPhy_ClearAll`, "inputTypes") = c('_p__THyPhy')
class(`_THyPhy_ClearAll`) = c("SWIGFunction", class('_THyPhy_ClearAll'))
# Start of _THyPhy_AskFor
`_THyPhy_AskFor` = function(self, s_arg2)
{
s_arg2 = as(s_arg2, "character");
;ans = .Call('R_swig__THyPhy_AskFor', self, s_arg2, PACKAGE='HyPhy');
class(ans) <- "_p_void";
ans
}
attr(`_THyPhy_AskFor`, 'returnType') = '_p_void'
attr(`_THyPhy_AskFor`, "inputTypes") = c('_p__THyPhy', 'character')
class(`_THyPhy_AskFor`) = c("SWIGFunction", class('_THyPhy_AskFor'))
# Start of _THyPhy_DumpResult
`_THyPhy_DumpResult` = function(self, s_arg2)
{
;.Call('R_swig__THyPhy_DumpResult', self, s_arg2, PACKAGE='HyPhy');
}
attr(`_THyPhy_DumpResult`, 'returnType') = 'void'
attr(`_THyPhy_DumpResult`, "inputTypes") = c('_p__THyPhy', '_p_void')
class(`_THyPhy_DumpResult`) = c("SWIGFunction", class('_THyPhy_DumpResult'))
# Start of _THyPhy_CanCast
`_THyPhy_CanCast` = function(self, s_arg2, s_arg3, .copy = FALSE)
{
s_arg3 = as.integer(s_arg3);
if(length(s_arg3) > 1) {
warning("using only the first element of s_arg3");
};
;.Call('R_swig__THyPhy_CanCast', self, s_arg2, s_arg3, as.logical(.copy), PACKAGE='HyPhy');
}
attr(`_THyPhy_CanCast`, 'returnType') = 'logical'
attr(`_THyPhy_CanCast`, "inputTypes") = c('_p__THyPhy', '_p_void', 'integer')
class(`_THyPhy_CanCast`) = c("SWIGFunction", class('_THyPhy_CanCast'))
# Start of _THyPhy_CastResult
`_THyPhy_CastResult` = function(self, s_arg2, s_arg3)
{
s_arg3 = as.integer(s_arg3);
if(length(s_arg3) > 1) {
warning("using only the first element of s_arg3");
};
;ans = .Call('R_swig__THyPhy_CastResult', self, s_arg2, s_arg3, PACKAGE='HyPhy');
class(ans) <- "_p__THyPhyReturnObject";
ans
}
attr(`_THyPhy_CastResult`, 'returnType') = '_p__THyPhyReturnObject'
attr(`_THyPhy_CastResult`, "inputTypes") = c('_p__THyPhy', '_p_void', 'integer')
class(`_THyPhy_CastResult`) = c("SWIGFunction", class('_THyPhy_CastResult'))
# Start of _THyPhy_SetCallbackHandler
`_THyPhy_SetCallbackHandler` = function(self, s_arg2)
{
;.Call('R_swig__THyPhy_SetCallbackHandler', self, s_arg2, PACKAGE='HyPhy');
}
attr(`_THyPhy_SetCallbackHandler`, 'returnType') = 'void'
attr(`_THyPhy_SetCallbackHandler`, "inputTypes") = c('_p__THyPhy', '_p_f_p_char_int_double__bool')
class(`_THyPhy_SetCallbackHandler`) = c("SWIGFunction", class('_THyPhy_SetCallbackHandler'))
# Start of _THyPhy_GetCallbackHandler
`_THyPhy_GetCallbackHandler` = function(self)
{
;ans = .Call('R_swig__THyPhy_GetCallbackHandler', self, PACKAGE='HyPhy');
class(ans) <- "_p_f_p_char_int_double__bool";
ans
}
attr(`_THyPhy_GetCallbackHandler`, 'returnType') = '_p_f_p_char_int_double__bool'
attr(`_THyPhy_GetCallbackHandler`, "inputTypes") = c('_p__THyPhy')
class(`_THyPhy_GetCallbackHandler`) = c("SWIGFunction", class('_THyPhy_GetCallbackHandler'))
# Start of _THyPhy_GetWarnings
`_THyPhy_GetWarnings` = function(self)
{
;ans = .Call('R_swig__THyPhy_GetWarnings', self, PACKAGE='HyPhy');
class(ans) <- "_p__THyPhyString";
ans
}
attr(`_THyPhy_GetWarnings`, 'returnType') = '_p__THyPhyString'
attr(`_THyPhy_GetWarnings`, "inputTypes") = c('_p__THyPhy')
class(`_THyPhy_GetWarnings`) = c("SWIGFunction", class('_THyPhy_GetWarnings'))
# Start of _THyPhy_GetErrors
`_THyPhy_GetErrors` = function(self)
{
;ans = .Call('R_swig__THyPhy_GetErrors', self, PACKAGE='HyPhy');
class(ans) <- "_p__THyPhyString";
ans
}
attr(`_THyPhy_GetErrors`, 'returnType') = '_p__THyPhyString'
attr(`_THyPhy_GetErrors`, "inputTypes") = c('_p__THyPhy')
class(`_THyPhy_GetErrors`) = c("SWIGFunction", class('_THyPhy_GetErrors'))
# Start of _THyPhy_GetStdout
`_THyPhy_GetStdout` = function(self)
{
;ans = .Call('R_swig__THyPhy_GetStdout', self, PACKAGE='HyPhy');
class(ans) <- "_p__THyPhyString";
ans
}
attr(`_THyPhy_GetStdout`, 'returnType') = '_p__THyPhyString'
attr(`_THyPhy_GetStdout`, "inputTypes") = c('_p__THyPhy')
class(`_THyPhy_GetStdout`) = c("SWIGFunction", class('_THyPhy_GetStdout'))
# Start of _THyPhy_PushWarning
`_THyPhy_PushWarning` = function(self, s_arg2)
{
;.Call('R_swig__THyPhy_PushWarning', self, s_arg2, PACKAGE='HyPhy');
}
attr(`_THyPhy_PushWarning`, 'returnType') = 'void'
attr(`_THyPhy_PushWarning`, "inputTypes") = c('_p__THyPhy', '_p_void')
class(`_THyPhy_PushWarning`) = c("SWIGFunction", class('_THyPhy_PushWarning'))
# Start of _THyPhy_PushError
`_THyPhy_PushError` = function(self, s_arg2)
{
;.Call('R_swig__THyPhy_PushError', self, s_arg2, PACKAGE='HyPhy');
}
attr(`_THyPhy_PushError`, 'returnType') = 'void'
attr(`_THyPhy_PushError`, "inputTypes") = c('_p__THyPhy', '_p_void')
class(`_THyPhy_PushError`) = c("SWIGFunction", class('_THyPhy_PushError'))
# Start of _THyPhy_PushOutString
`_THyPhy_PushOutString` = function(self, s_arg2)
{
;.Call('R_swig__THyPhy_PushOutString', self, s_arg2, PACKAGE='HyPhy');
}
attr(`_THyPhy_PushOutString`, 'returnType') = 'void'
attr(`_THyPhy_PushOutString`, "inputTypes") = c('_p__THyPhy', '_p_void')
class(`_THyPhy_PushOutString`) = c("SWIGFunction", class('_THyPhy_PushOutString'))
# Start of accessor method for _THyPhy
setMethod('$', '_p__THyPhy', function(x, name)
{
accessorFuns = list('ExecuteBF' = _THyPhy_ExecuteBF, 'InitTHyPhy' = _THyPhy_InitTHyPhy, 'ClearAll' = _THyPhy_ClearAll, 'AskFor' = _THyPhy_AskFor, 'DumpResult' = _THyPhy_DumpResult, 'CanCast' = _THyPhy_CanCast, 'CastResult' = _THyPhy_CastResult, 'SetCallbackHandler' = _THyPhy_SetCallbackHandler, 'GetCallbackHandler' = _THyPhy_GetCallbackHandler, 'GetWarnings' = _THyPhy_GetWarnings, 'GetErrors' = _THyPhy_GetErrors, 'GetStdout' = _THyPhy_GetStdout, 'PushWarning' = _THyPhy_PushWarning, 'PushError' = _THyPhy_PushError, 'PushOutString' = _THyPhy_PushOutString);
; idx = pmatch(name, names(accessorFuns));
if(is.na(idx))
return(callNextMethod(x, name));
f = accessorFuns[[idx]];
formals(f)[[1]] = x;
f;
}
);
# end of accessor method for _THyPhy
setMethod('delete', '_p__THyPhy', function(obj) {delete__THyPhy(obj)})
# Start of _THyPhyGetLongStatus
`_THyPhyGetLongStatus` = function(.copy = FALSE)
{
;.Call('R_swig__THyPhyGetLongStatus', as.logical(.copy), PACKAGE='HyPhy');
}
attr(`_THyPhyGetLongStatus`, 'returnType') = 'integer'
class(`_THyPhyGetLongStatus`) = c("SWIGFunction", class('_THyPhyGetLongStatus'))
# Start of _THyPhyGetStringStatus
`_THyPhyGetStringStatus` = function()
{
;.Call('R_swig__THyPhyGetStringStatus', PACKAGE='HyPhy');
}
attr(`_THyPhyGetStringStatus`, 'returnType') = 'character'
class(`_THyPhyGetStringStatus`) = c("SWIGFunction", class('_THyPhyGetStringStatus'))
# Start of _THyPhyGetDoubleStatus
`_THyPhyGetDoubleStatus` = function(.copy = FALSE)
{
;.Call('R_swig__THyPhyGetDoubleStatus', as.logical(.copy), PACKAGE='HyPhy');
}
attr(`_THyPhyGetDoubleStatus`, 'returnType') = 'numeric'
class(`_THyPhyGetDoubleStatus`) = c("SWIGFunction", class('_THyPhyGetDoubleStatus'))
# Start of globalInterfaceInstance_set
`globalInterfaceInstance_set` = function(s_globalInterfaceInstance)
{
;.Call('R_swig_globalInterfaceInstance_set', s_globalInterfaceInstance, PACKAGE='HyPhy');
}
attr(`globalInterfaceInstance_set`, 'returnType') = 'void'
attr(`globalInterfaceInstance_set`, "inputTypes") = c('_p__THyPhy')
class(`globalInterfaceInstance_set`) = c("SWIGFunction", class('globalInterfaceInstance_set'))
# Start of globalInterfaceInstance_get
`globalInterfaceInstance_get` = function()
{
;ans = .Call('R_swig_globalInterfaceInstance_get', PACKAGE='HyPhy');
class(ans) <- "_p__THyPhy";
ans
}
attr(`globalInterfaceInstance_get`, 'returnType') = '_p__THyPhy'
class(`globalInterfaceInstance_get`) = c("SWIGFunction", class('globalInterfaceInstance_get'))
globalInterfaceInstance =
function(value)
{
if(missing(value)) {
globalInterfaceInstance_get()
} else {
globalInterfaceInstance_set(value)
}
}
|
36e05b6edd42097e79645d2496e8c76f40bb1a7c | 96e7b59e6d92c8ee06cb22ff3711299eec7e7834 | /ui.R | e2e623dee6bb6414bf969e4a2ac4e56284119768 | [] | no_license | rflsierra/DevelopingDataProducts-PeerAssessment | 270293b38bd750c41b6aa0a7996b899bc3cfb160 | 61a65e2788efb479ea207c53145935e6efbe92a6 | refs/heads/master | 2021-01-10T05:10:44.258322 | 2016-01-30T15:16:48 | 2016-01-30T15:16:48 | 50,710,538 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,509 | r | ui.R | library(shiny)
ui <- navbarPage(
"Developing Data Products' Project",
tabPanel("Instructions",
fluidRow(),
tags$h2("What this app does:"),
tags$p("This app calculates the required amount of sample cases to be applied in a poll in order to be able to fill three requirements that are defined by the user: max error allowed, level of confidence and the size of the Universe (the proportion has been set as a fixed value in 50%)."),
tags$p("The user may also have the opportunity to study the costs of the poll required with the number of sample cases determined, and compare it to the available budget in each case."),
tags$p("There are two outputs:"),
tags$p("1. A normal distribution simulating the expected behaviour of the sample size determined, showing the standar deviations that apply for each set of inputs."),
tags$p("2. A x-y plot showing the costs associated with the number of cases to be applied and the cost per case accompanied by two lines showing the required sample size and total budget available, to understand the economical feasibility of the proposed sample size given the requirements of the user."),
fluidRow(),
tags$h2("How to use this app:"),
tags$p("1. Choose values for the margin of error (min = 0.5%, max = 10.0%), for the confidence level (just 3 options, 90%, 95% and 99%), and the size of the Universe (min = 10000, max = 100000)."),
tags$p("2. Choose values for the cost of each interview in the poll (from 10 to 100 USD) and the complete budget (from 500 to 1000000 USD)."),
tags$p("3. Review the results in the two plots provided."),
tags$p("4. Determine the gaps from the ideal situation for the user."),
tags$p("5. Adjust the inputs and retry until satisfied with the result."),
fluidRow(),
tags$h2("Github Repository:"),
tags$p("https://github.com/rflsierra/DevelopingDataProducts-PeerAssessment")
),
tabPanel("Determine a poll's sample size",
tags$h3("Sample's statistical requirements"),
fluidRow(
column(4, sliderInput(inputId = "error", label = "Desired error (%)", min = 0.5, max = 10.0, step = 0.1, value = 0.5)),
column(4, selectInput(inputId = "confidence", label = "Confidence level (%)", choices = c("90%" = 1.645, "95%" = 1.96, "99%" = 2.575))),
column(4, sliderInput(inputId = "universe", label = "Universe", min = 10000, max = 100000, step = 100, value = 10000))
),
fluidRow(),
tags$h3("Sample's budgetary requirements"),
fluidRow(
column(4, numericInput(inputId = "cost", label = "Cost per interview (USD)", min = 10, max = 100, step = 5, value = 10)),
column(4, numericInput(inputId = "budget", label = "Total available budget (USD)", min = 500, max = 1000000, step = 500, value = 70000))
),
fluidRow(),
tags$h4("Determined sample size"),
textOutput(outputId = "samplecalc"),
fluidRow(
column(7, plotOutput(outputId = "sample")),
column(5, plotOutput(outputId = "breakeven"))
)
)
) |
a0334463c42f0ba49d28de428542ebf3cc4f58b0 | 9883964ebda2b4d9cb5979be66f91406c8da247b | /cachematrix.R | b6ceb4afdd3dcdc453e5b542c59c2e377ae65a07 | [] | no_license | LesEvans/ProgrammingAssignment2 | 6d8bd1a93174be42a5bb88bedf8ad0d086481444 | 9836243aa1c81926d026423cadf82090a7a566ca | refs/heads/master | 2020-12-30T17:20:26.454613 | 2014-07-13T10:00:03 | 2014-07-13T10:00:03 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 876 | r | cachematrix.R | ##Acting as a pair the first function creates a square matrix and its inverse.
## The second returns the inverse from its cache.
## Creates square matrix, randomly assigned values using Poisson, finally finds and caches the inverse of matrix
makeCacheMatrix <- function(x = matrix()) {
t<-x^2
m<-matrix(rpois(t,20),x,x)
i <- NULL
set <- function(y) {
m <<- y
i <<- NULL
}
get<- function() m
setinverse <- solve(m)
getinverse <- function() m
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## uses the cache of matrix and its inverse found in makeCacheMatrix
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
m <- x$setinverse
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- solve(data)
x$setinverse(m)
m
}
|
af45344be0d3e313c4e3a0611e025d429a061588 | db83b472fa766185bf16cf50dbacb867f913947c | /mart_range.R | 120f4951a1a6fa682c0d8dc8bb17d9b7d59adb2c | [] | no_license | alexpenson/scripts | c9e249959f7660bb69f850624ec8b422c47b3f86 | 5a8ad0cec4f6cc1516d2deb13cbc50ab3ff426a4 | refs/heads/master | 2016-09-11T06:43:11.470710 | 2013-12-10T20:12:00 | 2013-12-10T20:12:00 | 7,923,894 | 2 | 0 | null | null | null | null | UTF-8 | R | false | false | 552 | r | mart_range.R | #!/usr/bin/env Rscript
samtools_range <- function(x){
paste0(x[1], ":", as.integer(x[2]), "-", as.integer(x[3]))
}
suppressPackageStartupMessages(suppressWarnings(library(biomaRt)))
args <- commandArgs(trailingOnly=TRUE)
ensembl <- useMart(biomart="ensembl", dataset="hsapiens_gene_ensembl")
ranges <- apply(
getBM(mart=ensembl,
attributes=c('chromosome_name',
'start_position',
'end_position'),
filters='hgnc_symbol',
values=args),
1,
samtools_range
)
cat(ranges, sep="\n")
|
0e90183870d8a5e60502a20a5cd42f14839dcd6e | 6a883d4bd103405a90abb0c25b3956de029e810d | /statistical_learning/auto_EDA.R | e0a8429e6a3314c041b1407e54bdfc0b8ac4e475 | [] | no_license | qiaozhi9092/machine_learning_projects_in_R | 716b8ad1bae948410b9b56abb64bb795286170a3 | 1654394ed1581ef608d6724e6ebc5f2e09749397 | refs/heads/master | 2020-04-30T14:16:54.365839 | 2019-03-31T05:13:43 | 2019-03-31T05:13:43 | 176,885,891 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 601 | r | auto_EDA.R | auto = read.csv("Auto.csv", header=T, na.strings="?")
auto = na.omit(auto)
summary(auto)
dim(auto)
#apply functions to first 7 features
sapply(auto[,1:7], range)
sapply(auto[, 1:7], mean)
sapply(auto[, 1:7], sd)
#remove the 10th through 85th observations
new_auto = auto[-(10:85), ]
sapply(new_auto[, 1:7], range)
sapply(new_auto[, 1:7], mean)
sapply(new_auto[, 1:7], sd)
pairs(auto)
par(mfrow=c(1,1))
plot(auto$mpg, auto$weight)
# Heavier weight correlates with lower mpg.
plot(auto$mpg, auto$cylinders)
# More cylinders, less mpg.
plot(auto$mpg, auto$year)
# Cars become more efficient over time. |
72d480043be2a9dc81c1b5e646f414baeabde256 | 3f37a013c6e91872be88a9c42252e7d11662caac | /R/column_letter_to_num.R | 82356472a4e9b810e07f11e6bff0b6990fdabb08 | [] | no_license | michaeldorman/geobgu | 72d7d276c647cf7d7ff297fca6382107d5fa4edb | 5d31c91f758292624ad314eb67af484ef3833024 | refs/heads/master | 2021-06-28T02:01:52.448363 | 2021-01-06T08:14:19 | 2021-01-06T08:14:19 | 67,873,299 | 2 | 1 | null | 2020-04-09T15:42:00 | 2016-09-10T13:43:44 | R | UTF-8 | R | false | false | 934 | r | column_letter_to_num.R | #' Find the corresponding column number for an Excel column letter
#'
#' @param x A vector of Excel column letters
#'
#' @return A vector of column numbers
#'
#' @references
#' \url{https://stackoverflow.com/questions/34537243/convert-excel-column-names-to-numbers}
#'
#' @export
#'
#' @examples
#' column_letter_to_num("A")
#' column_letter_to_num("Z")
#' column_letter_to_num(c("Z", "AA"))
#' column_letter_to_num("BA")
#' column_letter_to_num("AAA")
#' column_letter_to_num(LETTERS)
#' column_letter_to_num(NA)
column_letter_to_num = function(x) {
result = rep(NA, length(x))
for(i in 1:length(x)) {
s = x[i]
if(!is.na(s)) {
s_upper = toupper(s)
s_split = unlist(strsplit(s_upper, split = ""))
s_number = sapply(s_split, function(x) {which(LETTERS == x)})
numbers = 26^((length(s_number)-1):0)
column_number = sum(s_number * numbers)
result[i] = column_number
}
}
result
}
|
1a1a4d93401be4887ffcf6c732337d45a75a06c6 | f45333719c9d6f1b55c95be0f9dc03dea9b6c086 | /R/set_params.R | 6d67b792d128f92274c1a07d8c5d288ba99d29ea | [] | no_license | fintzij/stemr | 7c95bde20622142c7f62aad49cfb764a46342b47 | 185375e0933331da49bdc53563ce61338de4c450 | refs/heads/master | 2022-05-19T18:29:05.622681 | 2022-03-16T17:16:52 | 2022-03-16T17:16:52 | 52,254,628 | 8 | 6 | null | 2022-03-16T01:35:15 | 2016-02-22T07:16:45 | R | UTF-8 | R | false | false | 620 | r | set_params.R | #' Set the parameter values for a stochastic epidemic model object.
#'
#' @param stem_object stochastic epidemic model object for which parameter
#' values should be set.
#' @param ...
#'
#' @return stem object with updated parameters
#' @export
set_params <- function(stem_object, ...) {
newpars <- list(...)
newpar_names <- lapply(newpars, names)
oldpar_names <- names(stem_object$dynamics$parameters)
for(l in seq_along(newpars)) {
stem_object$dynamics$parameters[match(newpar_names[[l]], oldpar_names)] <- newpars[[l]]
}
return(stem_object)
} |
687806c3299188c78bf6d15de1ba0eefb7beed11 | 6f732726e35e370fbb5074d8b1f57159b1b17a49 | /DM WBP Survival Estimates RMark Nest.R | c9cc9f2a4fd629989546f7621e03e6c78f4142eb | [] | no_license | erpansing/whitebark-pine-demographic-model-master | a31adb2625e62fad48716989ca589bc61b69710b | 402d2f65d78fb5cd65813178b4acd4502aeb2551 | refs/heads/master | 2021-04-27T01:42:36.989110 | 2018-06-16T17:09:29 | 2018-06-16T17:09:29 | 122,680,280 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 9,136 | r | DM WBP Survival Estimates RMark Nest.R | ## Convert to nest survival data frame
# rm(list = ls()) # only use when playing with this script, and be sure to comment out before saving as it can ruin dependent scripts
library(dplyr)
library(RMark)
library(ggplot2)
library(reshape2)
load("/Users/elizabethpansing/Box Sync/Yellowstone/88-Fires-Analysis/2017 YNP Data.Rda")
#Omit IDs that only have one entry that is "D"
StatusSummarybyID <- pial %>% # create table of number of statuses
group_by(., IDNumber, Status) %>% # for each ID
tally() %>%
ungroup(.)
StatusSummarybyID <- dcast(StatusSummarybyID, IDNumber ~ Status, value.var = "n", fill = 0) %>%
filter(., D > 0 & L == 0) # ID trees with only one status (D)
IDs <- StatusSummarybyID$IDNumber # Collect IDNumbers as a vector
pial_SD <- pial %>%
filter(., !IDNumber %in% IDs) # Remove those trees from dataframe
rm(IDs, pial, StatusSummarybyID) # clean up environment
## Remove IDs with duplicate ID_Year combinations
ID <- unique(pial_SD$IDNumber[duplicated(pial_SD$ID_Year)]) # ID trees
# with identical ID_Year combinations
pial_SD <- pial_SD %>%
filter(., !IDNumber %in% ID) # Omit these trees. Once QC is complete, this
# step will be unnecessary
rm(ID) # clean up environment
## Remove IDs of pre-fire regen
ID <- unique(pial_SD$IDNumber[pial_SD$YearGerminated < 1990]) #ID prefire trees
pial_SD <- pial_SD %>%
filter(., !IDNumber %in% ID) # Omit these trees.
rm(ID) # clean up environment
pial_SD$Status[is.na(pial_SD$Status)] <- "D"
#
# FirstFound <- pial_SD %>%
# group_by(., IDNumber) %>%
# filter(., Year == min(Year)) %>%
# ungroup(.) %>%
# dplyr::rename(FirstFound = Year) %>%
# select(., IDNumber, FirstFound) %>%
# mutate(., FirstFound = FirstFound - 1989)
#
# LastChecked <- pial_SD %>%
# group_by(., IDNumber) %>%
# filter(., Year == max(Year)) %>%
# ungroup(.) %>%
# dplyr::rename(LastChecked = Year) %>%
# select(., IDNumber, LastChecked) %>%
# mutate(., LastChecked = LastChecked - 1989)
#
# LastPresent <- pial_SD %>%
# group_by(., IDNumber) %>%
# filter(., Status == "L") %>%
# filter(., Year == max(Year)) %>%
# ungroup(.) %>%
# dplyr::rename(LastPresent = Year) %>%
# select(., IDNumber, LastPresent) %>%
# mutate(LastPresent = LastPresent - 1989)
#
#
# AgeFound <- pial_SD %>%
# group_by(., IDNumber) %>%
# filter(., Year == min(Year)) %>%
# ungroup(.) %>%
# mutate(., AgeFound = Age) %>%
# select(., IDNumber, AgeFound)
#
#
# Fate <- pial_SD %>%
# group_by(., IDNumber) %>%
# filter(., Year == max(Year)) %>%
# mutate(., Fate = ifelse(Status == "L", 0, 1)) %>%
# select(., IDNumber, Fate)
#
# pial_SD_ns <- merge(FirstFound, LastChecked, by = "IDNumber") %>%
# merge(., LastPresent) %>%
# merge(., Fate) %>%
# merge(., AgeFound) %>%
# # mutate(., Freq = 1) %>%
# select(., FirstFound, LastPresent, LastChecked,
# Fate, AgeFound, IDNumber) %>%
# filter(., AgeFound >= 0 | is.na(AgeFound)) %>%
# filter(., !(FirstFound == LastPresent))
#
# rm(AgeFound, Fate, FirstFound, LastChecked, LastPresent, pial_SD)
#
#
# run.wbp.models=function()
# {
# # 1. A model of constant survival rate (SR)
#
# Dot = mark(pial_SD_ns, nocc = 28, model="Nest",
# model.parameters = list(S = list(formula = ~1)))
#
# # 2. SR varies as a function of time
#
# Time = mark(pial_SD_ns, nocc = 28, model = "Nest",
# model.parameters = list(S = list(formula = ~ Time)))
#
# # # 3. SR varies with discrete time
#
# # time = mark(pial_SD_ns, nocc = 28, model = "Nest",
# # model.parameters = list(S = list(formula = ~ time)))
#
# # Return model table and list of models
# #
# return(collect.models() )
# }
#
# wbp.results=run.wbp.models()
#
# #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
# # Examine table of model-selection results #
# #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
#
# wbp.results # print model-selection table to screen
#
# SD_survival_mean <- wbp.results$Dot$results$real$estimate
# SD_survival_var <- nrow(pial_SD_ns) * (wbp.results$Dot$results$real$se)^2
#
# #
# rm(pial_SD_ns,wbp.results, run.wbp.models)
# predictions <- data.frame(time = seq(from = 1991, to = 2017, by = 1),
# prob = wbp.results$Dot$results$real$estimate,
# lcl = wbp.results$Dot$results$real$lcl,
# ucl = wbp.results$Dot$results$real$ucl)
# #
# #
# ggplot(predictions, aes(x = time, y = prob)) +
# # geom_point() +
# geom_ribbon(aes(ymin = lcl, ymax = ucl), alpha = 0.5, fill = "#ef8a62") +
# geom_line(size = 0.5) +
# xlab("Year") +
# ylab("Annual survival rate") +
# scale_y_continuous(limits = c(0.75,1)) +
# scale_x_continuous(limits = c(1990, 2017),
# breaks = seq(1990, 2017, 2)) +
# theme(axis.title = element_text(size = 18, face = "bold")) +
# theme(axis.text = element_text(size = 15))
##--------------------------------------------------------------------##
## ##
## Age > 10 considered success ##
## ##
##--------------------------------------------------------------------##
olduns <- pial_SD %>% filter(., YearGerminated < 2006)
FirstFound_10 <- olduns %>%
dplyr::group_by(., IDNumber) %>%
dplyr::filter(., Year == min(Year)) %>%
dplyr::ungroup(.) %>%
dplyr::rename(., FirstFound = Year) %>%
dplyr::select(., IDNumber, FirstFound) %>%
dplyr::mutate(., FirstFound = FirstFound - 1989)
LastChecked_10 <- olduns %>%
dplyr::group_by(., IDNumber) %>%
dplyr::filter(., Year == max(Year)) %>%
dplyr::ungroup(.) %>%
dplyr::rename(LastChecked = Year) %>%
dplyr::select(., IDNumber, LastChecked) %>%
dplyr::mutate(., LastChecked = LastChecked - 1989)
LastPresent_10 <- olduns %>%
dplyr::group_by(., IDNumber) %>%
dplyr::filter(., Status == "L") %>%
dplyr::filter(., Year == max(Year)) %>%
dplyr::ungroup(.) %>%
dplyr::rename(LastPresent = Year) %>%
dplyr::select(., IDNumber, LastPresent) %>%
dplyr::mutate(LastPresent = LastPresent - 1989)
AgeFound_10 <- olduns %>%
dplyr::group_by(., IDNumber) %>%
dplyr::filter(., Year == min(Year)) %>%
dplyr::ungroup(.) %>%
dplyr::mutate(., AgeFound = Age) %>%
dplyr::select(., IDNumber, AgeFound)
Fate_10 <- olduns %>%
dplyr::group_by(., IDNumber) %>%
dplyr::filter(., Age >= 10) %>%
dplyr::mutate(., Fate = ifelse(Status == "L", 0, 1)) %>%
dplyr::select(., IDNumber, Fate)
pial_SD_ns_10 <- merge(FirstFound_10, LastChecked_10, by = "IDNumber") %>%
merge(., LastPresent_10) %>%
merge(., Fate_10) %>%
merge(., AgeFound_10) %>%
# mutate(., Freq = 1) %>%
dplyr::select(., FirstFound, LastPresent, LastChecked,
Fate, AgeFound, IDNumber) # %>%
# filter(., AgeFound >= 0 | is.na(AgeFound)) %>%
# filter(., !(FirstFound == LastPresent))
run.wbp.models_10=function()
{
# 1. A model of constant survival rate (SR)
Dot = mark(pial_SD_ns_10, nocc = 28, model="Nest",
model.parameters = list(S = list(formula = ~1)))
# 2. SR varies as a function of time
Time = mark(pial_SD_ns_10, nocc = 28, model = "Nest",
model.parameters = list(S = list(formula = ~ Time)))
# 3. SR varies with discrete time
time = mark(pial_SD_ns_10, nocc = 28, model = "Nest",
model.parameters = list(S = list(formula = ~ time)))
# 4. SR varies with Age
age = mark(pial_SD_ns_10, nocc = 28, model = "Nest",
model.parameters = list(S = list(formula = ~AgeFound)))
# Return model table and list of models
return(collect.models() )
}
wbp.results_10=run.wbp.models_10()
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
# Examine table of model-selection results #
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
wbp.results_10 # print model-selection table to screen
SD_survival_mean <- wbp.results_10$Dot$results$real$estimate
SD_survival_var <- nrow(pial_SD_ns_10) * (wbp.results_10$Dot$results$real$se)^2
# predictions <- data.frame(time = seq(from = 1991, to = 2017, by = 1),
# prob = wbp.results_10$Dot$results$real$estimate,
# lcl = wbp.results_10$Dot$results$real$lcl,
# ucl = wbp.results_10$Dot$results$real$ucl)
# #
# #
# ggplot(predictions, aes(x = time, y = prob)) +
# # geom_point() +
# geom_ribbon(aes(ymin = lcl, ymax = ucl), alpha = 0.5, fill = "#ef8a62") +
# geom_line(size = 0.5) +
# xlab("Year") +
# ylab("Annual survival rate") +
# scale_y_continuous(limits = c(0.75,1)) +
# scale_x_continuous(limits = c(1990, 2017),
# breaks = seq(1990, 2017, 2)) +
# theme(axis.title = element_text(size = 18, face = "bold")) +
# theme(axis.text = element_text(size = 15))
#
#
#
# rm(list = ls()[!ls() %in% c("SD_survival_mean", "SD_survival_var")])
del <- paste0("10|old|pial|^del$")
rm(list = ls(pattern = del))
|
38831a1c8397ad4ba328e0fa16acc15b08dc19bc | 81d7581130d9101d10af646fcaca68f91b98b1f3 | /learn_data_manipulation.R | 7df0957bb6538b4988a6acee95b791f0f35c8413 | [] | no_license | jocelinojr/Codigos-R | cd52ee1b896fc2656d4f1e3db73c2f128504db05 | 9ed3a7b29f7a0ae5faa4255c677f048713bf0dad | refs/heads/master | 2020-03-28T02:14:47.110813 | 2019-08-09T20:10:14 | 2019-08-09T20:10:14 | 147,557,825 | 0 | 0 | null | null | null | null | ISO-8859-1 | R | false | false | 8,125 | r | learn_data_manipulation.R | ################################################################################
install.packages("nycflights13")
###############################################################################
library(tidyverse)
library(nycflights13)
library(dplyr)
library(magrittr)
###############################################################################
# dados do ambiente
# pega qual é a pasta atual de traballho
getwd()
# lista os arquivos da pasta atual
list.files()
# Open the dataset in the RStudio Viewer
View(flights)
# The six verbs of the data manipulation grammar (language) of dplyr
# filter, select, arrange, mutate, summarise, group_by
# filter, assigns the result dataset to a variable and print it out by the using of parenthesis
(jan <- filter(flights, month==1, day==1))
##############################################
# HOT TIP ON LEADING WITH REAL NUMBERS
# the square root of two raised to 2 should be equal to two., however...
sqrt(2) ^ 2 == 2
# instead, we should use, near
near(sqrt(2) ^ 2, 2)
# using boolean operators
# finding all 2013 january flights
jan_dez_2013 <- filter(flights, month == 1 | month == 12)
###########################################################
# AWESOME OPERATOR!
nov_dez <- filter(flights, month %in% c(11, 12))
(jfk_lga_ewr <- filter(flights, origin %in% c("JFK", "LGA", "EWR")))
###################################################################
# The NA problem. NA is contagious!
NA > 5
df <- tibble(x = c(1, NA, 3))
df_1 = filter(df, x>1)
df_2 <- filter(df, is.na(x)|x>1)
# criando tabelas
df2 <- tibble(x = c(1, 4, 10), y = c(9, 10, 12.5))
?flights
#######################################################################
# Exercises from R for Data Science
# Find all flights that had an arrival delay of two or more hours
arr_dly_2_more <- filter(flights, arr_delay >= 120)
# Flew to houston
flew_houston <- filter(flights, dest %in% c("IAH", "HOU"))
# Departed in summer (July, August or Spetember)
summer_months <- c(7, 8, 9)
summer_departs <- filter(flights, month %in% summer_months)
m# com o pipe
summer_departs1 <- flights %>% filter(month %in% summer_months)
# Arrived more than two hours late, but didn't leave late
arr_2late_dept_on <- filter(flights, arr_delay > 120 & dep_delay <=0)
# Departed between midnight and 6 am (inclusive)
departed_mid_six <- filter(flights, between(dep_time, 1, 600) & !is.na(dep_time))
# arrange (changing the order of the rows)
# the most delayed flights
(arrange(flights, desc(dep_delay)))
# the flights that left earliest
(arrange(flights, dep_delay))
########################################
# Select
(select(flights, year, dep_time, day))
# com o pipe
flights %>% select(., year, dep_time, day)
# we can select a range of columns names!
(select(flights, day:arr_time))
# we can even use regular expressions to select columns names!
?flights
# cria uma nova variável com a diferença
(flights %>%
select(arr_time, dep_time, air_time, arr_delay, dep_delay) %>%
mutate(air_calc = arr_time - dep_time) %>%
mutate(air_dif = air_calc - air_time)
)
# investigando a relação entre distância percorrida e atraso médio
by_dest <- group_by(flights, dest)
delay <- summarise(by_dest,
qtde = n(),
distancia_media = mean(distance, na.rm = TRUE),
atraso_medio = mean(arr_delay, na.rm = TRUE))
# pega os maiores atrasos
delay <- filter(delay, qtde > 20, dest !="HNL")
ggplot(data = delay, mapping = aes(x = distancia_media, y =atraso_medio)) +
geom_point(aes(size=qtde), alpha = 1/2) +
geom_smooth(se= FALSE)
#################################################################################
# trabalhando no arquivo de convênios do portal da transparência
##################################################################################
# Para ler o arquivo corretamente, é preciso:
# 1 - Usar csv2, para csv separados por ";"
# 2 - muda o locale para modificar a codificação do arquivo para ISO-8859-1
convenios <- read_csv2("20180907_Convenios.csv", locale = locale(encoding = 'ISO-8859-1'))
# filtra apenas os da PB
convenios_PB <- convenios %>%
filter(UF == "PB")
# conhecendo nosso data.frame
str(convenios_PB)
# converte nosso dataframe para uma tibble
conv_pb_ti <- as_tibble(convenios_PB)
conv_pb_ti
# column names that doesn't fit in R paterns can be refered to using backticks ``
str(conv_pb_ti)
conv_pb_ti$`DATA FINAL VIGÊNCIA` <- as.date(conv_pb_ti$`DATA FINAL VIGÊNCIA`)
View(conv_pb_ti)
por_muni <- conv_pb_ti %>%
group_by(`NOME MUNICÍPIO`) %>%
filter(`SITUAÇÃO CONVÊNIO` == "EM EXECUÇÃO") %>%
#filter(`VALOR LIBERADO` > 1000000) %>%
summarise(qt_conv = n(), total_libe = sum(`VALOR LIBERADO`)) %>%
arrange(desc(total_libe))
ggplot(data=por_muni, mapping = aes(x = qt_conv, y = total_libe )) +
geom_point(alpha = 1/2) +
geom_smooth(se= FALSE)
##################################################################
# Exploratory Data Analysis
# notando as variações no valor de variáveis CATEGÓRICAS (usar BAR CHART)
ggplot(data = diamonds) +
geom_bar(mapping = aes(x = cut))
# obtendo a quantidade exata por meio da função count
diamonds %>%
count(cut) %>%
arrange(desc(n))
# notando as variações no valor de variáveis contínuas (Histograma)
str(diamonds)
ggplot(data = diamonds) +
geom_histogram(mapping = aes(x = carat), binwidth = 0.5)
# agrupando os valores numéricos em faixas (categorização de variáveis numéricas)
carat_categorizado <- diamonds %>%
count(cut_width(carat, 0.5)) %>%
arrange(desc(n))
# colocando num gráfico (precisamos mudar o stat padrão do bar chart, que seria count)
ggplot(data=carat_categorizado, mapping = aes(x = "", y=n, fill = `cut_width(carat, 0.5)`)) +
geom_bar(width = 0.75, stat = "identity" )
# dando um zoom nos diamantes pequenos
pequenos <- diamonds %>%
filter(carat <3)
ggplot(data = pequenos, mapping = aes(x = carat)) +
geom_histogram(binwidth = 0.1)
# sobrepondo historgramas - usar linhas em vez de histogramas
ggplot(data= pequenos, mapping = aes(x = carat, color = cut)) +
geom_freqpoly(binwidth = 0.1)
# identificando padrões
ggplot(data = pequenos, mapping = aes(x = carat)) +
geom_histogram(binwidth = 0.01)
str(mtcars)
mean(mtcars$mpg)
ggplot(data=mtcars, mapping = aes(x = mpg)) +
geom_histogram(binwidth = 5)
mtcars %>%
count(cut_width(mpg, 5))
# OUTLIERS ################################################
# Explorando a variável y
ggplot(data = diamonds, mapping = aes(x = y)) +
geom_histogram(binwidth = 0.5)
# dando um zoom nos valores que raramente aparecem
ggplot(data = diamonds, mapping = aes(x = y)) +
geom_histogram(binwidth = 0.5) +
coord_cartesian(ylim = c(0, 50))
# Explorando x
ggplot(data = diamonds, mapping = aes(x = x)) +
geom_histogram(binwidth = 0.5)
# Explorando z
ggplot(data = diamonds, mapping = aes(x = z)) +
geom_histogram(binwidth = 0.5)
# vendo a distribuição em faixas
diamonds %>%
count(cut_width(x, 0.5)) %>%
arrange(desc(n))
diamonds %>%
count(cut_width(y, 0.5)) %>%
arrange(desc(n))
diamonds %>%
count(cut_width(z, 0.5)) %>%
arrange(desc(n))
# Explorando o preço dos diamantes
ggplot(data=diamonds, mapping = aes(x = price, fill=cut)) +
geom_histogram(binwidth = 10000)
price_by_cut <- diamonds %>%
count(cut_width(price, 10000), cut) %>%
arrange(`cut_width(price, 1000)`)
# Explorando o preço dos diamantes
ggplot(data=diamonds, mapping = aes(x = price, fill=color)) +
geom_histogram(binwidth = 1000)
# Explorando o preço dos diamantes
ggplot(data=diamonds, mapping = aes(x = carat, y = price)) +
geom_point(aes(color=cut)) +
geom_smooth(se= FALSE)
|
07d292646a751efa6cbfe34c8bf56124689b46d3 | a34687bd7f646ed793ef65b94136f1e52e1e0abc | /scripts/DGE_analysis/pro_scripts/DGE_intersect_w_logfc.r | a69aff5295f66d74f5b888494dae655e35526ac2 | [] | no_license | almaan/breast_dge | a1722cd7b660a843e313f922d441cd67f24ed13b | 30582543435733ac3e0ddaecdd25c80cde1f49b9 | refs/heads/master | 2020-04-06T12:35:02.979021 | 2019-04-04T09:37:36 | 2019-04-04T09:37:36 | 157,461,458 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,411 | r | DGE_intersect_w_logfc.r | library(gplots)
pth <- "/home/alma/ST-2018/CNNp/DGE/res/DGEresults_all/logfccounts2.tsv"
pth_map <- "/home/alma/ST-2018/CNNp/DGE/res/DGEresults_all/results_summary.tsv"
sets <- read.csv(pth,sep='\t', header = T, row.names = 1 )
ressum <- read.csv(pth_map,sep='\t', header = T, row.names = 1, stringsAsFactors = F)
sep1 <- rep("T:",dim(ressum)[1])
sep2 <- rep("A",dim(ressum)[1])
sep3 <- rep("C",dim(ressum)[1])
new_names <- sapply(c(1:dim(ressum)[1]), function(x)
paste(c("T",ressum[x,'tested_for'],"A",ressum[x,'accounted_for'],"C",ressum[x,"conditioned_on"]),collapse = '_'))
new_names <- as.data.frame(new_names,row.names = rownames(ressum))
intermat <- matrix(0, nrow = dim(sets)[1], ncol = dim(sets)[1])
for (xx in 1:(dim(sets)[1]-1)){
print(xx)
intermat[xx,xx] <- length(sets$all_genes[xx])
s <- 1+xx
for (jj in s:dim(sets)[1]){
g1 <-strsplit(as.character(sets$all_genes[xx]),",")[[1]]
g2 <- strsplit(as.character(sets$all_genes[jj]),",")[[1]]
inter <- length(intersect(g1,g2))
print(c(xx,jj,inter))
intermat[xx,jj] <- inter
intermat[jj,xx] <- inter
}
}
cn <-as.character(sapply(rownames(sets), function (x)
paste(tail(strsplit(x,"/")[[1]],ifelse(2,1,length(strsplit(x,"/")[[1]]) > 2),collapse = "."))))
cn <- gsub("\\.fancy\\.tsv","",cn)
colnames(intermat) <- new_names[cn,1]
rownames(intermat) <- new_names[cn,1]
df <- as.data.frame(intermat)
pdf("/home/alma/ST-2018/CNNp/DGE/res/heatmaps/heatmap_all_gens.pdf",width = 40, height = 40)
heatmap.2(log1p(as.matrix(df)),scale = "none", Rowv = T, Colv = T, dendrogram = "none", key = F,
margins = c(60,60),trace = "none",cexRow = 2.3, cexCol = 2.3, offsetRow = 1.0)
dev.off()
new_names_down <- sapply(c(1:dim(ressum)[1]), function(x)
paste(c("T",ressum[x,'tested_for'],"A",ressum[x,'accounted_for'],"C",ressum[x,"conditioned_on"],"D"),collapse = '_'))
new_names_down <- as.data.frame(new_names_down,row.names = rownames(ressum))
new_names_up <- sapply(c(1:dim(ressum)[1]), function(x)
paste(c("T",ressum[x,'tested_for'],"A",ressum[x,'accounted_for'],"C",ressum[x,"conditioned_on"],"U"),collapse = '_'))
new_names_up <- as.data.frame(new_names_up,row.names = rownames(ressum))
new_names_down <- as.character(new_names_down[cn,1])
new_names_up <- as.character(new_names_up[cn,1])
dirdf <- as.data.frame(matrix(0,nrow = length(new_names_up)*2),ncol = 1,row.names = as.character(c(new_names_down,new_names_up)))
dirdf[1:dim(sets)[1],] <- as.character(sets$neg_genes)
dirdf[(1+dim(sets)[1]):(2*dim(sets)[1]),] <- as.character(sets$pos_genes)
intermat_lfc <- matrix(0, nrow = dim(dirdf)[1], ncol = dim(dirdf)[1])
for (xx in 1:(dim(dirdf)[1]-1)){
intermat_lfc[xx,xx] <- length(dirdf[xx,1])
s <- 1+xx
for (jj in s:dim(dirdf)[1]){
g1 <-strsplit(as.character(dirdf[xx,1]),",")[[1]]
g2 <- strsplit(as.character(dirdf[jj,1]),",")[[1]]
inter <- length(intersect(g1,g2))
print(c(xx,jj,inter))
intermat_lfc[xx,jj] <- inter
intermat_lfc[jj,xx] <- inter
}
}
rownames(intermat_lfc) <- rownames(dirdf)
colnames(intermat_lfc) <- rownames(dirdf)
pdf("/home/alma/ST-2018/CNNp/DGE/res/heatmaps/heatmap_all_genes_lfc.pdf",width = 65, height = 65)
heatmap.2(log1p(intermat_lfc),scale = "none", Rowv = T, Colv = T, dendrogram = "none", key = F,
margins = c(60,60),trace = "none",cexRow = 2.3, cexCol = 2.3, offsetRow = 1.0)
dev.off()
|
696e51f7b9a205d3ab203127ea179940e3ca4298 | a4b54463ecff70130460e892d2eacdb0737442f0 | /man/find_extreme_cells.Rd | ec1003035c2ca19248f5e0bdb77f5ee57c06c750 | [] | no_license | dynverse/SLICER | 7ce68813600d6231fa1657ca76aba8f8462cfd37 | 32acfb4e60acd9210e9753de6975f7ee01e6a85b | refs/heads/master | 2018-09-20T06:48:25.138042 | 2017-10-17T08:40:48 | 2017-10-17T09:50:13 | 107,240,181 | 0 | 0 | null | 2017-10-17T08:31:29 | 2017-10-17T08:31:28 | null | UTF-8 | R | false | true | 889 | rd | find_extreme_cells.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/SLICER.R
\name{find_extreme_cells}
\alias{find_extreme_cells}
\title{Identify candidate start cells for the trajectory}
\usage{
find_extreme_cells(traj_graph, embedding, do_plot = TRUE)
}
\arguments{
\item{traj_graph}{Nearest neighbor graph built from LLE embedding}
\item{embedding}{Low-dimensional LLE embedding of cells}
\item{do_plot}{Whether or not to plot results}
}
\value{
Indices of potential starting cells
}
\description{
Plots the embedding generated by LLE and highlights
potential starting cells for the trajectory. The candidates
are chosen based on the longest shortest path through the
nearest neighbor graph.
}
\examples{
genes=1:200
cells=sample(1:500,30)
k=10
traj_lle = lle::lle(traj[cells,genes],m=2,k)$Y
traj_graph = conn_knn_graph(traj_lle,5)
find_extreme_cells(traj_graph,traj_lle)
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.