blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
8c130a37e30802dc2d06b5bd369609cbdf61d0a3
|
8002fafcdfcce506ece983de89d8f0abbbd6f5bc
|
/man/BMMpriors.Rd
|
097a707f381750e8aa052ac7a9227dfa33591d0a
|
[] |
no_license
|
cran/bayesmix
|
a07c5752c71ee28ac3787136860edb325c1a8642
|
a575c8b08acc5cb4c339e48932a93d15037f436b
|
refs/heads/master
| 2023-06-21T21:09:56.036478
| 2023-04-12T12:50:02
| 2023-04-12T12:50:02
| 17,694,645
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,468
|
rd
|
BMMpriors.Rd
|
\name{BMMpriors}
\alias{BMMpriors}
\title{Create a 'BMMpriors' object}
\description{This function enables a comfortable creation of
\code{BMMpriors} objects, which can be used for specifying the priors
of a \code{BMMmodel}.
}
\usage{
BMMpriors(specification, y, eps = 10^-16)
}
\arguments{
\item{specification}{named list including \code{kind}, \code{parameter},
\code{hierarchical} and \code{mod}.}
\item{y}{a numeric vector.}
\item{eps}{a numeric value indicating the smallest value for flat priors.}
}
\details{In \code{specification} \code{kind} can be used for specifying
if an "independent" or a "conditionallyconjugate"
prior shall be used. \code{parameter} can be a named list of values for
the prior variables or a function name (e.g., "priorsUncertain",
"priorsFish", "priorsRaftery"). \code{hierarchical} can be \code{NULL}
or "tau" if a hierarchical prior shall be taken for
\eqn{\tau}. \code{mod} is a named list which provides the possibility
to override the values from \code{parameter}.
}
\value{
There is an object \code{BMMpriors} returned with components
\item{name}{vector indicating which kind of prior is specified and if
it is an hierarchical prior and if appropriate with respect to which
variable.}
\item{var}{list of variables in the prior and their specified values.}
}
\author{Bettina Gruen}
\examples{
data("fish", package = "bayesmix")
priors <- BMMpriors(y = fish)
}
\keyword{utilities}
|
dac4f3fdeba6ec2cf2d2e3324d2d019d340b288a
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/locfit/examples/lscv.exact.Rd.R
|
f51a5623ee495dd53b3912cc9d7e637bd830c696
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 265
|
r
|
lscv.exact.Rd.R
|
library(locfit)
### Name: lscv.exact
### Title: Exact LSCV Calculation
### Aliases: lscv.exact
### Keywords: htest
### ** Examples
data(geyser, package="locfit")
lscv.exact(lp(geyser,h=0.25))
# equivalent form using lscv
lscv(lp(geyser, h=0.25), exact=TRUE)
|
630591f028d2cc5cdd0ca53be2cc276d909ae8cb
|
3e2dda52abb7af3fa346f700a821cf0a45fd4d4f
|
/Lumbar2.R
|
39f3b62a13cfa172defbc39c67d6da42720b9d78
|
[] |
no_license
|
AnhKhoaVo/LumbarDiscHerniation
|
671d0860c91312aa0bacfd5a7d155839a5d39218
|
2aad140fa00e4324330aa376ed205eb5a5f42e10
|
refs/heads/master
| 2023-07-11T04:13:37.476544
| 2021-08-15T02:16:14
| 2021-08-15T02:16:14
| 276,206,176
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 25,253
|
r
|
Lumbar2.R
|
plot(ctree(Postop_grade_paresis_3m~Preop_grading_paresis+Myotoma+Duration_paresis+Duration_initial+Age+Levels, data=subset(lumbar_disc_herniation, !is.na(Postop_grade_paresis_3m))))
plot(ctree(Postop_muscle_strength_3m~Preop_muscle_strength+Duration_initial+Myotoma+Duration_paresis+Age+Levels, data=subset(lumbar_disc_herniation, !is.na(Postop_muscle_strength_3m))))
ggplot(data=lumbar_disc_herniation, aes(x=Postop_muscle_strength_3m, y=Postop_muscle_strength_last, colour=Group_paresis))+
geom_jitter(alpha=.6)+
geom_smooth(method="lm")+
scale_colour_manual(name = "Groups of Paresis", values=c("mediumseagreen", "tan2", "red")
, labels = c("1" = "<48h", "2" = "2-7 days", "3" = ">7 days"))+
theme_bw()+
facet_grid(.~OPwithin48h, scales = "free_x")
xlab("Pre Operative Muscle Strength")+
ylab("Post Operative Muscle Strength 3 months")+
ggtitle("Correlation between pre and post op muscle strength across groups of paresis")
last3m <- ggplot(data=lumbar_disc_herniation, aes(x=Postop_muscle_strength_3m, y=Postop_muscle_strength_last))+
geom_jitter(alpha=.6)+
geom_smooth(method="lm")+
theme_bw()+
xlab("Post Operative Muscle Strength 3 months")+
ylab("Post Operative Muscle Strength last follow up")+
ggtitle("Relationship between 3 month and last follow up muscle strength")
outlierKD <- function(dt, var) {
var_name <- eval(substitute(var),eval(dt))
na1 <- sum(is.na(var_name))
m1 <- mean(var_name, na.rm = T)
par(mfrow=c(2, 2), oma=c(0,0,3,0))
boxplot(var_name, main="With outliers")
hist(var_name, main="With outliers", xlab=NA, ylab=NA)
outlier <- boxplot.stats(var_name)$out
mo <- mean(outlier)
var_name <- ifelse(var_name %in% outlier, NA, var_name)
boxplot(var_name, main="Without outliers")
hist(var_name, main="Without outliers", xlab=NA, ylab=NA)
title("Outlier Check", outer=TRUE)
na2 <- sum(is.na(var_name))
cat("Outliers identified:", na2 - na1, "n")
cat("Propotion (%) of outliers:", round((na2 - na1) / sum(!is.na(var_name))*100, 1), "n")
cat("Mean of the outliers:", round(mo, 2), "n")
m2 <- mean(var_name, na.rm = T)
cat("Mean without removing outliers:", round(m1, 2), "n")
cat("Mean if we remove outliers:", round(m2, 2), "n")
response <- readline(prompt="Do you want to remove outliers and to replace with NA? [yes/no]: ")
if(response == "y" | response == "yes"){
dt[as.character(substitute(var))] <- invisible(var_name)
assign(as.character(as.list(match.call())$dt), dt, envir = .GlobalEnv)
cat("Outliers successfully removed", "n")
return(invisible(dt))
} else{
cat("Nothing changed", "n")
return(invisible(var_name))
}
}
outlierKD(lumbar_disc_herniation, Duration_paresis)
#Changes of muscle strength over time based on LMER
Lumbar_50 <- head(lumbar_disc_herniation, 50)
Lumbar_long_50<- melt(Lumbar_50, id.vars=c("PatID", "Levels", "Age", "Gender", "Group_paresis"),
measure.vars=c("Preop_muscle_strength", "Postop_muscle_strength", "Postop_muscle_strength_6w",
"Postop_muscle_strength_3m", "Postop_muscle_strength_last"),
variable.name ="Time",
value.name ="Muscle_strength")
Lumbar_long_50$Time <- as.numeric(Lumbar_long_50$Time)
Lumbar_long<- melt(lumbar_disc_herniation, id.vars=c("PatID", "Levels", "Age", "Gender", "Group_paresis"),
measure.vars=c("Preop_muscle_strength", "Postop_muscle_strength", "Postop_muscle_strength_6w",
"Postop_muscle_strength_3m", "Postop_muscle_strength_last"),
variable.name ="Time",
value.name ="Muscle_strength")
Lumbar_long_3mlast <- subset(Lumbar_long, !Time=="5")
lattice_plot<-xyplot(Muscle_strength~Time|PatID, col.line = "red", layout = c(10,5),
grid = TRUE, type = c("p", "r"), data = Lumbar_long_50, ylab="Muscle Strength",
main="Trellis Plot of Muscle Strength of the First 50 Individuals Over Time")
plot(lattice_plot)
ggplot(data = Lumbar_long_50, aes(x=Time, y=Muscle_strength)) +
geom_jitter()+
geom_line()+
facet_grid(.~PatID)
geom_smooth(method="lm",formula = y~poly(x,3), se=FALSE,colour="red")
Lumbar_lmer <- lmer(Muscle_strength ~ Time + (1+Time|PatID), REML = FALSE,
data = Lumbar_long)
coef_lumbar <- coef(Lumbar_lmer)$PatID[1:2]
setDT(coef_lumbar, keep.rownames = TRUE)[]
colnames(coef_lumbar) <- c("PatID","Intercept", "Slope")
lumbar_coef <- merge(lumbar_disc_herniation, coef_lumbar, by="PatID")
lumbar_coef$Group_paresis <- as.factor(ifelse(lumbar_coef$Group_paresis=="1", "<48h(ultra early)",
ifelse(lumbar_coef$Group_paresis=="2" ,"2-7d(early)", "7d(delayed)")))
lumbar_coef$Group_paresis2 <- relevel(lumbar_coef$Group_paresis, ref = "<48h(ultra early)")
lumbar_coef = apply_labels(lumbar_coef, Preop_muscle_strength="Pre-operation muscle strength",
Group_paresis="Groups of Paresis", Duration_paresis="Duration of Paresis")
URP_slope<-use_labels(lumbar_coef, party::ctree(Slope~Duration_initial+Myotoma+Duration_paresis+Age+Levels+Preop_muscle_strength+BMI+Gender,
data=..data,controls=ctree_control(testtype = "Bonferroni", maxdepth = 2)))
plot(URP_slope, main="Decision Tree for Slope of Recovery")
lumbar_coef$Preop_muscle_strength_factor <- as.factor(ifelse(lumbar_coef$Preop_muscle_strength>2, "Pre-operation muscle strength>2", "Pre-operation muscle strength=<2"))
#the library (expss) will FUCK UP the where() function, so have to clarify which package where() comes from
lumbar_coef$node_slope <- party::where(URP_slope)
lumbar_coef %>% group_by(node_slope) %>% summarise(mean=mean(Slope), median=median(Slope))
lumbar_coef$node_slope2 <- as.factor(ifelse(lumbar_coef$node_slope==2,2,
ifelse(lumbar_coef$node_slope==4,4,5)))
lumbar_coef$Group_paresis2 <- relevel(lumbar_coef$Group_paresis, ref = "3")
lumbar_coef$Group_paresis3 <- as.factor(ifelse(lumbar_coef$Duration_paresis>3, ">3d", "=<3d"))
Slope_boxplot <-ggboxplot(data=subset(lumbar_coef, !is.na(Group_paresis3)), x="node_slope", y="Slope", fill="Group_paresis3")+
ylab("Slope of Recovery")+
facet_grid(.~Preop_muscle_strength_factor, scales = "free_x")+
scale_fill_manual(values=c("steelblue4", "lightblue3"))+
labs(fill='Timing of Surgery')+
theme(
axis.text = element_text(size=18),
axis.text.y = element_text(size=16),
legend.text=element_text(size=16),
legend.title = element_text(size=18),
strip.text.x = element_text(size = 16),
axis.title.y = element_text(size=18),
strip.background = element_rect(fill="grey60", colour="grey60"),
axis.text.x = element_blank(),
axis.ticks.x = element_blank(),
axis.title.x=element_blank())
ggtitle("Association of Slope of Recovery between Muscle Strength and Time of Surgery")
ggboxplot(data=subset(lumbar_coef, !is.na(Group_paresis2)), x="node_slope", y="Slope", fill="Preop_muscle_strength_factor")+
ylab("Slope of Recovery")+
facet_grid(.~Group_paresis2, scales = "free_x")+
scale_fill_manual(values=c("steelblue4", "lightblue3"))+
labs(fill='Pre-operation Muscle Strength')+
theme(
axis.text = element_text(size=12),
axis.text.y = element_text(size=10),
axis.text.x = element_blank(),
axis.ticks.x = element_blank(),
axis.title.x=element_blank())+
ggtitle("Association of Slope of Recovery between Muscle Strength and Time of Surgery")
ggboxplot(data=subset(lumbar_coef, node_slope==c(3, 4)), x="node_slope", y="Slope")+
xlab("Time of Surgery")+
scale_x_discrete(labels=c("3" = "Early", "4" = "Late/Very Late"))+
ylab("Slope of Recovery")+
ggtitle("Association between Time of Surgery and Slope of Recovery")
summary(aov(Muscle_strength ~ as.factor(Time)*Group_paresis, data = Lumbar_long))
jmv::ANOVA(
formula = Muscle_strength ~ Gender*Group_paresis,
data = Lumbar_long,
effectSize = "eta",
postHoc = ~ Gender*Group_paresis,
postHocCorr = c("none", "bonf"),
emmPlotData = TRUE,
emmTables = TRUE)
#Change of muscle strength based on substraction
lumbar_coef$Change_prelast <- lumbar_coef$Postop_muscle_strength_3m - lumbar_coef$Preop_muscle_strength
plot(ctree(Change_prelast~Duration_initial+Myotoma+Group_paresis+Age+Levels, data=subset(lumbar_coef, !is.na(Change_prelast))))
#Factor-ing of muscle strength?
lumbar_coef$Muscle_Factor <- as.factor(ifelse(lumbar_coef$Postop_muscle_strength_3m<3, "severe",
ifelse(lumbar_coef$Postop_muscle_strength_3m == 5, "recovered", "moderate")))
lumbar_coef$Muscle_Factor2 <- relevel(lumbar_coef$Muscle_Factor, ref = "severe")
lumbar_coef$Muscle_Factor3 <- as.factor(ifelse(lumbar_coef$Postop_muscle_strength_3m<4, "deteriorate",
ifelse(lumbar_coef$Postop_muscle_strength_3m == 5, "recovered", "unchanged")))
lumbar_coef_noNA <-subset(lumbar_coef, !is.na(Muscle_Factor2))
URP_muscle <- use_labels(lumbar_coef_noNA, ctree(Muscle_Factor2~Preop_muscle_strength+Duration_initial+Myotoma+Duration_paresis+Age+Levels+BMI+Gender,
data=..data, controls = ctree_control(testtype = "Bonferroni", maxdepth = 2)))
plot(URP_muscle, main="Decision Tree for Muscle Groups at 3 months")
# Muscle factor last
lumbar_coef$Muscle_Factor_last <- as.factor(ifelse(lumbar_coef$Postop_muscle_strength_last<3, "severe",
ifelse(lumbar_coef$Postop_muscle_strength_last == 5, "recovered", "moderate")))
lumbar_coef$Muscle_Factor_last <- relevel(lumbar_coef$Muscle_Factor_last, ref = "severe")
lumbar_coef$Muscle_Factor_last2 <- as.factor(ifelse(lumbar_coef$Postop_muscle_strength_last<4, "deteriorate",
ifelse(lumbar_coef$Postop_muscle_strength_last == 5, "recovered", "unchanged")))
lumbar_coef_noNA_last <-subset(lumbar_coef, !is.na(Muscle_Factor_last))
URP_muscle_last <- use_labels(lumbar_coef_noNA_last, ctree(Muscle_Factor_last~Preop_muscle_strength+Duration_initial+Myotoma+Duration_paresis+Age+Levels+BMI+Gender,
data=..data, controls = ctree_control(testtype = "Bonferroni")))
plot(URP_muscle_last, main="Decision Tree for Muscle Groups at LAST follow-up")
#the library (expss) will FUCK UP the where() function!!!!!!!!!!!
lumbar_coef_noNA$node_muscle <- party::where(URP_muscle)
URP_muscle_node <-lumbar_coef_noNA %>% group_by(node_muscle) %>% count(Muscle_Factor2)
URP_muscle_node$sum <- ifelse(URP_muscle_node$node_muscle==2, 74,
ifelse(URP_muscle_node$node_muscle==4,153,103))
URP_muscle_node$percent <- (URP_muscle_node$n/URP_muscle_node$sum)*100
URP_muscle_node$Preop_muscle_strength_factor <- as.factor(ifelse(URP_muscle_node$node_muscle=="4"|URP_muscle_node$node_muscle=="5","Pre-operation muscle strength>2",
"Pre-operation muscle strength=<2"))
URP_muscle_node$Group_paresis <- as.factor(ifelse(URP_muscle_node$node_muscle==2, "Not Significant",
ifelse(URP_muscle_node$node_muscle==4, "=<3d", ">3d")))
Muscle_barplot<-ggplot(subset(URP_muscle_node, !(node_muscle==2)), aes(x=Group_paresis, y=percent, fill=Muscle_Factor2))+
geom_bar(colour="black", stat="identity", position = "stack" )+
scale_fill_manual(values=c("darkolivegreen3", "darkseagreen4"))+
theme_bw() +
facet_grid(.~Preop_muscle_strength_factor, scales = "free_x")+
theme(axis.line = element_line(colour = "black"),
axis.text.x = element_text(size=16, colour = "black"),
axis.text.y = element_text(size=16, colour = "black"),
legend.text=element_text(size=16),
legend.title = element_text(size=18),
strip.text.x = element_text(size = 16),
axis.title.y = element_text(size=18),
axis.title.x = element_text(size=18),
text=element_text(family="NimbusRom"),
strip.background = element_rect(fill="grey60",colour="grey60"),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.border = element_blank(),
panel.background = element_blank())+
labs(fill="Groups of Muslce Strength at 3 months", y = "Percentage of Individuals",
x="Timing of Surgery")
#Create barplot for muscle factor at last time point
lumbar_coef_noNA_last$node_muscle <- party::where(URP_muscle_last)
URP_muscle_node_last <-lumbar_coef_noNA_last %>% group_by(node_muscle) %>% count(Muscle_Factor_last)
URP_muscle_node_last$sum <- ifelse(URP_muscle_node_last$node_muscle==3, 57,
ifelse(URP_muscle_node_last$node_muscle==4,24,
ifelse(URP_muscle_node_last$node_muscle==6, 191,117)))
URP_muscle_node_last$percent <- (URP_muscle_node_last$n/URP_muscle_node_last$sum)*100
URP_muscle_node_last$Preop_muscle_strength_factor <- as.factor(ifelse(URP_muscle_node_last$node_muscle==3|URP_muscle_node_last$node_muscle==4,"Pre-operation muscle strength=<2",
"Pre-operation muscle strength>2"))
URP_muscle_node_last$Group_paresis <- as.factor(ifelse(URP_muscle_node_last$node_muscle==6|URP_muscle_node_last$node_muscle==7, "NS",
ifelse(URP_muscle_node_last$node_muscle==3, "=<6d", ">6d")))
ggplot(subset(URP_muscle_node_last, !(Preop_muscle_strength_factor=="Pre-operation muscle strength>2")), aes(x=Group_paresis, y=percent, fill=Muscle_Factor_last))+
geom_bar(stat="identity", position = position_stack(), width = 0.94)+
scale_fill_manual(values=c("steelblue4", "lightblue3", "palegreen1"))+
theme_bw() +
facet_grid(.~Preop_muscle_strength_factor, scales = "free_x")+
theme(axis.line = element_line(colour = "black"),
axis.text = element_text(size=14),
axis.text.x = element_text(size=12),
axis.text.y = element_text(size=12),
legend.text=element_text(size=12),
strip.text.x = element_text(size = 12),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.border = element_blank(),
panel.background = element_blank())+
labs(title="Association between Muscle Strength and Time of Surgery",
fill="Groups of Muscle Strength at last follow-up", y = "Percentage of Individuals",
x="Timing of Surgery")
ggarrange(Outcomes_corplot, ggarrange(Muscle_barplot, Slope_boxplot,labels=c("B", "C"), legend="bottom", ncol=2), nrow=2, labels="A")
ggarrange(Muscle_barplot, Slope_boxplot,labels=c("A", "B"), legend="bottom", ncol=2)
#Subset data to create another barplot!!!!
Lumbar_3m <- select(lumbar_coef, "PatID", "Duration_paresis", "Group_paresis3", "Preop_muscle_strength", "Postop_muscle_strength_3m", "Postop_muscle_strength_last")
#3m vs last diff scores
Lumbar_3m$Diff_scores <- Lumbar_3m$Postop_muscle_strength_3m-Lumbar_3m$Preop_muscle_strength
Lumbar_3m$Diff_scores_last <- Lumbar_3m$Postop_muscle_strength_last-Lumbar_3m$Preop_muscle_strength
#3m vs last change scores
Lumbar_3m$Change_scores <- as.factor(ifelse(Lumbar_3m$Diff_scores>=1&Lumbar_3m$Postop_muscle_strength_3m==5, "recovered",
ifelse(Lumbar_3m$Diff_scores>=1&Lumbar_3m$Postop_muscle_strength_3m==4,"improved", "unchanged")))
Lumbar_3m$Change_scores_last <- as.factor(ifelse(Lumbar_3m$Diff_scores_last>=1&Lumbar_3m$Postop_muscle_strength_last==5, "recovered",
ifelse(Lumbar_3m$Diff_scores_last>=1&Lumbar_3m$Postop_muscle_strength_last==4,"improved", "unchanged")))
Lumbar_3m <- na.omit(Lumbar_3m)
Lumbar_3m$Muscle_Factor_pre <- as.factor(ifelse(Lumbar_3m$Preop_muscle_strength<3, "severe",
ifelse(Lumbar_3m$Preop_muscle_strength== 4, "mild", "moderate")))
#New data to create graph for change scores of 3m
Lumbar_3m_factor <-Lumbar_3m %>% group_by(Group_paresis3, Muscle_Factor_pre) %>% count(Change_scores)
Lumbar_3m_factor$percent <- ifelse(Lumbar_3m_factor$Group_paresis3=="=<3d"&Lumbar_3m_factor$Muscle_Factor_pre=="severe", (Lumbar_3m_factor$n/38)*100,
ifelse(Lumbar_3m_factor$Group_paresis3=="=<3d"&Lumbar_3m_factor$Muscle_Factor_pre=="mild",(Lumbar_3m_factor$n/44)*100,
ifelse(Lumbar_3m_factor$Group_paresis3=="=<3d"&Lumbar_3m_factor$Muscle_Factor_pre=="moderate", (Lumbar_3m_factor$n/109)*100,
ifelse(Lumbar_3m_factor$Group_paresis3==">3d"&Lumbar_3m_factor$Muscle_Factor_pre=="severe", (Lumbar_3m_factor$n/36)*100,
ifelse(Lumbar_3m_factor$Group_paresis3==">3d"&Lumbar_3m_factor$Muscle_Factor_pre=="mild", (Lumbar_3m_factor$n/36)*100, (Lumbar_3m_factor$n/67)*100
)))))
Lumbar_3m_factor$percent <- round(Lumbar_3m_factor$percent, digits = 1)
Lumbar_3m_factor$Change_scores <- relevel(Lumbar_3m_factor$Change_scores, ref = "unchanged")
#new data to create graph for change scores at last follow up
Lumbar_last <- subset(Lumbar_3m, !is.na(Postop_muscle_strength_last))
Lumbar_last_factor <-Lumbar_last %>% group_by(Group_paresis3, Muscle_Factor_pre) %>% count(Change_scores_last)
Lumbar_last_factor$percent <- ifelse(Lumbar_last_factor$Group_paresis3=="=<3d"&Lumbar_last_factor$Muscle_Factor_pre=="severe", (Lumbar_last_factor$n/41)*100,
ifelse(Lumbar_last_factor$Group_paresis3=="=<3d"&Lumbar_last_factor$Muscle_Factor_pre=="mild",(Lumbar_last_factor$n/56)*100,
ifelse(Lumbar_last_factor$Group_paresis3=="=<3d"&Lumbar_last_factor$Muscle_Factor_pre=="moderate", (Lumbar_last_factor$n/115)*100,
ifelse(Lumbar_last_factor$Group_paresis3==">3d"&Lumbar_last_factor$Muscle_Factor_pre=="severe", (Lumbar_last_factor$n/40)*100,
ifelse(Lumbar_last_factor$Group_paresis3==">3d"&Lumbar_last_factor$Muscle_Factor_pre=="mild", (Lumbar_last_factor$n/61)*100, (Lumbar_last_factor$n/76)*100
)))))
Lumbar_last_factor$percent <- round(Lumbar_last_factor$percent, digits = 1)
Lumbar_last_factor$Change_scores_last <- relevel(Lumbar_last_factor$Change_scores_last, ref = "unchanged")
#graph for 3m change scores
ggplot(Lumbar_3m_factor, aes(x=Muscle_Factor_pre, y=percent, fill=Change_scores))+
geom_bar(colour="black", stat="identity", position = "stack" )+
scale_fill_manual(values=c("coral1", "goldenrod2", "seagreen4"))+
theme_bw() +
facet_grid(.~Group_paresis3, scales = "free_x")+
theme(axis.line = element_line(colour = "black"),
axis.text.x = element_text(size=14, colour = "black"),
axis.text.y = element_text(size=14, colour = "black"),
legend.text=element_text(size=14),
legend.title = element_text(size=16),
legend.position="top",
strip.text.x = element_text(size = 14),
axis.title.y = element_text(size=16),
axis.title.x = element_text(size=16),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.border = element_blank(),
panel.background = element_blank())+
geom_text(aes(label=paste0((percent))),
position=position_stack(vjust=0.5),size=3)+
labs(fill="Change of Muscle Strength at 3 months", y = "Percentage of Individuals",
x="Muscle Groups Pre-operation")
#graph for last follow up change scores
ggplot(Lumbar_last_factor, aes(x=Muscle_Factor_pre, y=percent, fill=Change_scores_last))+
geom_bar(colour="black", stat="identity", position = "stack" )+
scale_fill_manual(values=c("coral1", "goldenrod2", "seagreen4"))+
theme_bw() +
facet_grid(.~Group_paresis3, scales = "free_x")+
theme(axis.line = element_line(colour = "black"),
axis.text.x = element_text(size=14, colour = "black"),
axis.text.y = element_text(size=14, colour = "black"),
legend.text=element_text(size=14),
legend.title = element_text(size=16),
legend.position="top",
strip.text.x = element_text(size = 14),
axis.title.y = element_text(size=16),
axis.title.x = element_text(size=16),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.border = element_blank(),
panel.background = element_blank())+
geom_text(aes(label=paste0((percent))),
position=position_stack(vjust=0.5),size=3)+
labs(fill="Change of Muscle Strength at last follow up", y = "Percentage of Individuals",
x="Muscle Groups Pre-operation")
lumbar_log <- multinom(Muscle_Factor2 ~ Preop_muscle_strength + Group_paresis, data=lumbar_coef)
summary(lumbar_log)
z_log <- summary(lumbar_log)$coefficients/summary(lumbar_log)$standard.errors
p_log <- (1 - pnorm(abs(z_log), 0, 1)) * 2
exp(coef(lumbar_log))
head(prob <- fitted(lumbar_log))
prob_paresis <- data.frame(Group_paresis = c("1", "2", "3"), Preop_muscle_strength = mean(lumbar_coef$Preop_muscle_strength))
predict(lumbar_log, newdata = prob_paresis, "probs")
prob_muscle <- data.frame(Group_paresis = rep(c("1", "2", "3")), Preop_muscle_strength = rep(c(0:5),
3))
prob_both <- cbind(prob_muscle, predict(lumbar_log, newdata =prob_muscle, type = "probs", se = TRUE))
prob_long <- melt(prob_both, id.vars = c("Group_paresis", "Preop_muscle_strength"), value.name = "probability")
ggplot(prob_long, aes(x = Preop_muscle_strength, y = probability, colour = Group_paresis)) + geom_line() + facet_grid(variable ~
., scales = "free")
#Descriptive Stats
library(table1)
descrip <- table1(~as.factor(Levels)+as.factor(Gender)+Age+BMI+Preop_muscle_strength+Muscle_Factor2+Slope+Muscle_Factor_last|Group_paresis3, data=lumbar_coef)
write.table(descrip, file="descrip.txt")
#Correlation matrix
Lumbar_330 = filter(lumbar_coef, PatID %in% c(1:330))
Outcomes <- select(Lumbar_330, starts_with("Preop_muscle"), starts_with("Postop_muscle"))
colnames(Outcomes)=c("Pre-op Muscle Strength","Post-op Muscle Strength","Post-op Muscle Strength 6w","Post-op Muscle Strength 3m","Post-op Muscle Strength last")
Cor_outcomes <- cor(Outcomes, use = "complete.obs")
Outcomes_corplot<-ggcorrplot(Cor_outcomes, type="lower", lab = TRUE, outline.color = "black")
# New dataset for surgery
Surgery$`time of injury` <- format(as.POSIXct(Surgery$`time of injury` ,format="%H:%M:%S"),"%H:%M")
Surgery$`Time of surgery` <- format(as.POSIXct(Surgery$`Time of surgery` ,format="%H:%M:%S"),"%H:%M")
Surgery$Date_time_injury <- as.POSIXct(paste(Surgery$`Date of injury`, Surgery$`time of injury`), format="%Y-%m-%d %H:%M")
Surgery$Date_time_surgery <- as.POSIXct(paste(Surgery$`Surgery`, Surgery$`Time of surgery`), format="%Y-%m-%d %H:%M")
Surgery$Time_diff_surgery <- as.numeric(difftime(Surgery$Date_time_surgery, Surgery$Date_time_injury, units="mins"))
Surgery$Time_diff_discharge <- as.numeric(difftime(Surgery$Discharge, Surgery$Admission, units="days"))
Surgery$Time_diff_EMSCI <- as.numeric(difftime(Surgery$`Last EMSCI`, Surgery$`First EMSCI`, units="days"))
Last_LEMS<-ctree(`Last LEMS`~Time_diff_surgery+Time_diff_discharge+Time_diff_EMSCI+`1_LEMS`, data=subset(Surgery, !is.na(`Last LEMS`)), controls = ctree_control(testtype = "Bonferroni"))
plot(Last_LEMS, main="Last LEMS")
Surgery$node_LEMS <- where(Last_LEMS)
Surgery %>% group_by(node_LEMS) %>% summarise(mean=mean(`Last LEMS`), median=median(`Last LEMS`))
Last_UEMS<-ctree(`Last UEMS`~Time_diff_surgery+Time_diff_discharge+Time_diff_EMSCI+`1_UEMS`, data=subset(Surgery, !is.na(`Last UEMS`)&Levels==1))
plot(Last_UEMS, main="Last UEMS (Cervicals)")
Last_TMS<-ctree(`Last TMS`~Time_diff_surgery+Time_diff_discharge+Time_diff_EMSCI+Levels+`1_TMS`, data=subset(Surgery, !is.na(`Last TMS`)))
plot(Last_TMS, main="Last TMS")
Last_PP<-ctree(`Last PP`~Time_diff_surgery+Time_diff_discharge+Time_diff_EMSCI+Levels+`1_PP`, data=subset(Surgery, !is.na(`Last PP`)))
plot(Last_PP, main="Last PP")
Last_LT<-ctree(`Last LT`~Time_diff_surgery+Time_diff_discharge+Time_diff_EMSCI+Levels+`1_LT`, data=subset(Surgery, !is.na(`Last LT`)))
plot(Last_LT, main="Last LT")
Last_AIS<-ctree(`last AIS`~Time_diff_surgery+Time_diff_discharge+Time_diff_EMSCI+Levels+`first AIS`, data=subset(Surgery, !is.na(`last AIS`)))
plot(Last_AIS, main="Last AIS")
Last_SCIM<-ctree(`Last SCIM`~Time_diff_surgery+Time_diff_discharge+Time_diff_EMSCI+Levels+`1_SCIM`, data=subset(Surgery, !is.na(`Last SCIM`)))
plot(Last_SCIM, main="Last SCIM")
|
ea42ab876b1c73a4a589077b2c125df0a3337bda
|
b2f61fde194bfcb362b2266da124138efd27d867
|
/code/dcnf-ankit-optimized/Results/QBFLIB-2018/E1/Experiments/Faber-Leone-Maratea-Ricca/Strategic_Companies/x165.0/x165.0.R
|
bd84812fe97ea991228a8f77b64f85df2db927db
|
[] |
no_license
|
arey0pushpa/dcnf-autarky
|
e95fddba85c035e8b229f5fe9ac540b692a4d5c0
|
a6c9a52236af11d7f7e165a4b25b32c538da1c98
|
refs/heads/master
| 2021-06-09T00:56:32.937250
| 2021-02-19T15:15:23
| 2021-02-19T15:15:23
| 136,440,042
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 672
|
r
|
x165.0.R
|
c DCNF-Autarky [version 0.0.1].
c Copyright (c) 2018-2019 Swansea University.
c
c Input Clause Count: 15429
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 15429
c
c Input Parameter (command line, file):
c input filename QBFLIB/Faber-Leone-Maratea-Ricca/Strategic_Companies/x165.0.qdimacs
c output filename /tmp/dcnfAutarky.dimacs
c autarky level 1
c conformity level 0
c encoding type 2
c no.of var 5119
c no.of clauses 15429
c no.of taut cls 0
c
c Output Parameters:
c remaining no.of clauses 15429
c
c QBFLIB/Faber-Leone-Maratea-Ricca/Strategic_Companies/x165.0.qdimacs 5119 15429 E1 [] 0 165 4954 15429 NONE
|
52fe0edbadf9237761306e29a6c15a4f908ecf75
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/CARLIT/examples/Community_data.Rd.R
|
4bc542a66bb50f2328689eb3eb3a72bd7e14f9da
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 225
|
r
|
Community_data.Rd.R
|
library(CARLIT)
### Name: Community_data
### Title: Example Data Set for the Function carlit().
### Aliases: Community_data
### Keywords: datasets CARLIT carlit
### ** Examples
data(Community_data)
str(Community_data)
|
842fad0be12d8d3deab83c748dfc4fb62fe174d3
|
eaf9c4aa37543dda596832f4e20a2da8c327392b
|
/ptgbtwn.R
|
cd649f5d1f1b0f04cb6655af824c600c2d3ecd2d
|
[] |
no_license
|
AceFire6/ashci_analysis
|
97ba8417c676b8c953e3b8d4b3131727783ce880
|
1c0aaaa29d11c8169755feedc37eb2f8f8e7008d
|
refs/heads/master
| 2021-01-19T02:19:06.807733
| 2016-08-14T17:04:33
| 2016-08-14T17:04:33
| 65,676,968
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,982
|
r
|
ptgbtwn.R
|
data = read.csv("ptgbtwn.csv")
View(data)
data$Subject = factor(data$Subject)
summary(data)
library(plyr)
ddply(data, ~ Device * Cursor, function(d) summary(d$Errors))
ddply(data, ~ Device * Cursor, summarize, Errors.mean=mean(Errors), Errors.sd=sd(Errors))
boxplot(Errors ~ Cursor * Device, data=data, ylab="Errors")
with(data, interaction.plot(Cursor, Device, Errors, ylim=c(0, max(data$Errors)))) # interaction plot
library(fitdistrplus)
fit = fitdist(data[data$Cursor == "point" & data$Device == "mouse",]$Errors, "pois", discrete=TRUE)
gofstat(fit) # goodness-of-fit test
fit = fitdist(data[data$Cursor == "point" & data$Device == "touchpad",]$Errors, "pois", discrete=TRUE)
gofstat(fit) # goodness-of-fit test
fit = fitdist(data[data$Cursor == "point" & data$Device == "trackball",]$Errors, "pois", discrete=TRUE)
gofstat(fit) # goodness-of-fit test
fit = fitdist(data[data$Cursor == "area" & data$Device == "mouse",]$Errors, "pois", discrete=TRUE)
gofstat(fit) # goodness-of-fit test
fit = fitdist(data[data$Cursor == "area" & data$Device == "touchpad",]$Errors, "pois", discrete=TRUE)
gofstat(fit) # goodness-of-fit test
fit = fitdist(data[data$Cursor == "area" & data$Device == "trackball",]$Errors, "pois", discrete=TRUE)
gofstat(fit) # goodness-of-fit test
fit = fitdist(data[data$Cursor == "bubble" & data$Device == "mouse",]$Errors, "pois", discrete=TRUE)
gofstat(fit) # goodness-of-fit test
fit = fitdist(data[data$Cursor == "bubble" & data$Device == "touchpad",]$Errors, "pois", discrete=TRUE)
gofstat(fit) # goodness-of-fit test
fit = fitdist(data[data$Cursor == "bubble" & data$Device == "trackball",]$Errors, "pois", discrete=TRUE)
gofstat(fit) # goodness-of-fit test
contrasts(data$Cursor) <- "contr.sum"
contrasts(data$Device) <- "contr.sum"
m = glm(Errors ~ Cursor * Device, data=data, family=poisson)
Anova(m, type=3)
library(multcomp) # for glht
library(lsmeans) # for lsm
summary(glht(m, lsm(pairwise ~ Device * Cursor)), test=adjusted(type="holm"))
|
d7c79c111d72985e38b117a19cfe50068467bd78
|
b93485a8888c11b43df2c974dfc695fee7109f75
|
/plot3.R
|
90c15a20b24ff70ec38b067e971f7ddc62c2e55d
|
[] |
no_license
|
Ecofred/ExData_Plotting1
|
545489ba4f7daede718ccb998ccf75f1723ed571
|
059d48504cb4280924597248c9bc3136a4477527
|
refs/heads/master
| 2020-04-14T12:48:06.897960
| 2019-01-02T21:50:41
| 2019-01-02T21:50:41
| 163,851,053
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 687
|
r
|
plot3.R
|
# TITLE: plot3
# avoid time consuming process if object is loaded
if(!exists('house_power')) {source('household_power_import.R')}
png(filename = 'plot3.png', width = 480, height = 480)
# names(house_power)
with(house_power,
{
# 'none' plot
plot(datetime, Sub_metering_1,
type = 'n',
ylab = 'Energy sub metering')
# the 3 lines
lines(datetime, Sub_metering_1, col='black')
lines(datetime, Sub_metering_2, col='red')
lines(datetime, Sub_metering_3, col='blue')
legend('topright', paste0('Sub_metering_', 1:3),
col = c(1,'red','blue'),
lty=1)
})
dev.off()
|
5f8e8988440229cf6fbd7f13d709bf5e82603ce2
|
a22be77ec187759412eb82debf250adb038e7939
|
/R HW/Stat HM1 MES5AC.R
|
81b855d93dc005582e1ba6a2212680c7b32997dd
|
[] |
no_license
|
meganstiles/Stat_6021
|
47ce8d0fac3414025d3ca8560ee8880ef395dd85
|
79b45e8fe7b6dc37a96f1fd04c8922861f5e4b61
|
refs/heads/master
| 2021-01-11T15:09:10.465593
| 2017-01-28T18:14:57
| 2017-01-28T18:14:57
| 80,301,097
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,061
|
r
|
Stat HM1 MES5AC.R
|
#Megan Stiles MES5AC
library(MPV)
#Question 2.1
data(table.b1)
attach(table.b1)
#Part A
nfl.lm<- lm(y~x8, data= table.b1)
nfl.lm #B1 =-0.007025, B0 = 21.788251
#Part B
anova(nfl.lm) #p-value = 7.381e-06, because the p-value is < 0.05, we reject Ho because there is significant regression
#Analysis of Variance Table
#Response: y
# Df Sum Sq Mean Sq F value Pr(>F)
#x8 1 178.09 178.092 31.103 7.381e-06 ***
#Residuals 26 148.87 5.726
#Part C
confint(nfl.lm, level = 0.95) #(-0.009614347, -0.004435854)
#Part D
summary(nfl.lm)
#R^2 = 0.5447 = 54.47% of the total variability in y is explained by this model
#Part E
new_table<- data.frame(x8 = 2000)
predict(nfl.lm, new_table, interval = "confidence") #(6.765753, 8.710348)
#Problem 2.2
table2<- data.frame(x8=1800)
predict(nfl.lm, table2, interval = "none") #Point Estimate = 9.14307 games
#Prediction Interval
predict(nfl.lm,table2, interval = "prediction", level= 0.90) #(13.34975, 4.936392)
#Problem 2.4
data("table.b3")
attach(table.b3)
#Part A
lm.mpg<- lm(y~x1, data=table.b3)
lm.mpg # B1 = -0.04736 B0 = 33.72268
#Part b
anova(lm.mpg) #P-value = 3.743e-11, because the p value is < 0.05 we reject H0 and the regression is significant.
#Analysis of Variance Table
#Response: y
# Df Sum Sq Mean Sq F value Pr(>F)
#x1 1 955.72 955.72 101.74 3.743e-11 ***
#Residuals 30 281.82 9.39
#Part C
summary(lm.mpg)
#R^2 = 0.7723 = 77.23% percent of total mpg variability accounted for by the linear relationship with engine displacement
#Part D
new_displacement<- data.frame(x1= 275)
predict(lm.mpg, new_displacement, interval = "confidence") #(19.58807, 21.80952)
#Part E
predict(lm.mpg, new_displacement, interval = "none") #20.69879
predict(lm.mpg, new_displacement, interval = "prediction") #(14.34147, 27.05611)
#Part F:
#The prediction interval is wider than the confidence interval. This is because the confidence interval is trying to
#estimate the mean of y given a value of x, whereas the prediction interval is trying to predict a future value of y, given a
#value of x. There is more variance in trying to predict a future value than just the mean so the prediction interval is wider.
#Problem 2.5
#Part a
lm.mpg2<- lm(y~x10, data= table.b3)
lm.mpg2 #B1 = -0.005752, B0 = 40.852431
#The linear models have a similar R^2 which could indicate colinearity between vehicle weight and engine displacement.
#The engine displacement model also has a smaller SSRes, as well as a slightly R^2 so this model is a slighly better fit.
#Part B
anova(lm.mpg2)
#Analysis of Variance Table
#Response: y
# Df Sum Sq Mean Sq F value Pr(>F)
#x10 1 921.53 921.53 87.482 2.121e-10 ***
#Residuals 30 316.02 10.53
#Part c
summary(lm.mpg2)
#R^2 = 0.7446 = 74.46 % of variability is explained by linear relationship between vehicle weight and MPG
#Problem 2.12
data(p2.12)
attach(p2.12)
#Part a
usage.lm<- lm(usage~temp, data = p2.12)
usage.lm #B1 = 9.208, B0 = -6.332
#Part b
anova(usage.lm) #P-value = 2.2e-16 which is <0.05, so we reject Ho and the regression model is significant.
#Analysis of Variance Table
#Response: usage
# Df Sum Sq Mean Sq F value Pr(>F)
#temp 1 280590 280590 74123 < 2.2e-16 ***
#Residuals 10 38 4
#Part c
usage.lm
#slope = 9.208, meaning that for everyone 1 degree increase, usage increases by 9.2 units (1=1000 units), so approximately 9,200 units
#Part D
new_temp<- data.frame(temp = 58)
predict(usage.lm, new_temp, interval= "prediction", level = 0.99) #(521.2237, 534.2944)
# Problem 2.29
#Since the Standard Error of the slope = Squareroot(MSRes/Sxx), we want to either maximixe Sxx or minimize MSRes in order
#to keep the standard error low. Since Sxx = the sum of (xi - x-bar)^2, we would want to take sampples of x close to -1 and 1
#as this would increase Sxx and thus lower the standard error of the slope. Depending on the type of data, this may be impracticle
#since it may not be possible to sample this specific subset.
|
58cf50d2221343bbb6013bc2340a5e9816f8dc6c
|
c1de86148c60c665bc59475a279df6a07651773c
|
/scripts/regressions/Reg_3_script.R
|
d1087b2ae091f81fe7049d619cf0fb773e122c2f
|
[] |
no_license
|
jakewalker56/ml-lab
|
ebbbabb1c7b50341cd9bf6154b2d42d6d3898336
|
a7c4839991bd6d95941216589482672093bed446
|
refs/heads/master
| 2020-05-21T22:43:00.190371
| 2017-02-11T10:34:02
| 2017-02-11T10:34:02
| 47,048,259
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,902
|
r
|
Reg_3_script.R
|
setwd("~/Dropbox/Booth/Fall 2013/Regression/Data")
#1.1.i
news = read.csv("newspaper.csv")
plot(news$daily, news$Sunday,pch=20)
#1.1.ii
plot(news$daily, news$Sunday, pch=20)
news_lm = lm(Sunday ~ daily, news)
abline(news_lm, col="red")
confint(news_lm)
legend("topleft", "y = 1.2767*x + 76.01", fill = 2)
#TODO: figure out how to do this by hand
# qt(0.025, length(news[,1]) - 2)
# qt(0.975, length(news[,1]) - 2) * summary(news_lm)$sigma
# sx =
# summary(news_lm)$sigma
#1.1.iii
summary(news_lm)
#1.1.iv
Xf <- data.frame(daily=c(225))
predict(news_lm, newdata=Xf, interval="prediction", se.fit=TRUE)
summary(news_lm)$sigma
(27.57 ^2 + 154.32 ^2)^0.5
#todo: do by hand
#expected_value = ((summary(news_lm)$sigma)^2 + (sfit)^2)^0.5
#1.3.i
#1.3.ii
#1.3.iii
crime = read.csv("crime.csv")
crime$Ed
crime$LF
crime$W
summary(crime)
plot(crime$Ed, crime$CR, xlab="Ed", ylab="CR")
model=lm(CR ~ Ed, crime)
abline(model)
legend("topleft", paste("y = ", round(model$coefficients[2], digits = 2), "x ", round(model$coefficients[1], digits=2)))
summary(model)
plot(crime$LF, crime$CR, xlab = "LF", ylab="CR")
model=lm(CR ~ LF, crime)
abline(model)
legend("topleft", paste("y = ", round(model$coefficients[2], digits = 2), "x ", round(model$coefficients[1], digits=2)))
summary(model)
plot(crime$W, crime$CR, xlab = "W", ylab="CR")
model=lm(CR ~ W, crime)
abline(model)
legend("topleft", paste("y = ", round(model$coefficients[2], digits = 2), "x + ", round(model$coefficients[1], digits=2)))
summary(model)
plot(crime$Ed, crime$LF, xlab = "Ed", ylab="LF")
abline(lm(LF ~ Ed, crime))
plot(crime$Ed, crime$W, xlab="Ed", ylab="W")
abline(lm(W ~ Ed, crime))
plot(crime$LF, crime$W, xlab="LF", ylab="W")
abline(lm(W ~ LF, crime))
residuals = crime$CR - predict(lm(CR ~ W, crime))
residuals
plot(crime$W, residuals, xlab="W")
#1.3.iv
crime_lm = lm(CR ~ W, crime)
w = data.frame(W=c(275))
predict(crime_lm, w, interval="prediction", level=0.9)
#1.4.i
tractors = read.csv("tractor.csv")
model=lm(cost ~ age, tractors)
age = data.frame(age=c(3))
predict(model, age, interval="prediction", se.fit=TRUE)
summary(model)$sigma^2
76.94126^2
(5919 + 96277)^0.5
ranges = data.frame(age=seq(0,10, 0.5))
predictions=predict(model,ranges, interval="prediction")
plot(ranges$age, predictions[,1], ylim=c(-500,2500), xlab="years", ylab="cost")
lines(ranges$age, predictions[,2])
lines(ranges$age, predictions[,3])
anova(model)$Sum[2]
anova(model)$Sum[1]
#1.2
SST=1.50773 * 10^12
SSE=18007.56^2*16
SSR = (SST - )
SSE
SSR
R_2 = SSR/SST
e_val = 0.9821 * 822000 + 6805
R_2
(822000 - 622186)^2/(17 * 302724^2)
(1 + 1/18 + (822000 - 622186)^2/(17 * 302724^2))^0.5
se = 18007.56 * 1.0398
qt(0.975, 16) * se + e_val
qt(0.025, 16) * se + e_val
se = 0.01443
e_val = 0.9821
qt(0.975, 16) * se + e_val
qt(0.025, 16) * se + e_val
summary(lm(Ed ~ CR, crime))
summary(lm(CR ~ Ed, crime))
18007.56^2
0.9966^0.5
SST/17
?predict
|
6e54e784dcc546b50c7794f0c0c44fb701559b6d
|
45d84d3fcb3bda679bbab5b153fdff66c9565930
|
/R/heatmap.R
|
34085f819e4e35cb802c984481d4442b872a3a02
|
[] |
no_license
|
jamebluntcc/myRtools
|
14e3ad3dba1405389be3e786d77e53f036b13d85
|
2fae680d1d6fe8ad512bfd50f4fdc8f4f0c5b81d
|
refs/heads/master
| 2020-12-02T19:50:16.728643
| 2017-07-11T08:46:22
| 2017-07-11T08:46:22
| 96,397,179
| 2
| 0
| null | 2017-07-11T02:43:59
| 2017-07-06T06:40:20
|
R
|
UTF-8
|
R
| false
| false
| 7,674
|
r
|
heatmap.R
|
#' Creates a heatmap plot fork by raivokolde/pheatmap wrap and make it more simple
#'
#' The function also allows to aggregate the rows using kmeans clustering. This is
#' advisable if number of rows is so big that R cannot handle their hierarchical
#'
#' @param data numeric matrix of the values to be plotted.
#' @param color vector of colors used in heatmap.
#' @param borderColor color of cell borders on heatmap, use \code{grey} if no border should be drawn.
#' @param Scale character indicating if the values should be centered and scaled in
#' either the row direction or the column direction, or none. Corresponding values are
#' \code{"row"}, \code{"column"} and \code{"none"} default is \code{"column"}
#' @param clusterRows boolean values determining if rows should be clustered or \code{hclust} object,
#' @param clusterCols boolean values determining if columns should be clustered or \code{hclust} object.
#' @param clusteringDistanceRows distance measure used in clustering rows. Possible
#' values are \code{"correlation"} for Pearson correlation and all the distances
#' supported by \code{\link{dist}}, such as \code{"euclidean"}, etc. If the value is none
#' of the above it is assumed that a distance matrix is provided.
#' @param clusteringDistanceCols distance measure used in clustering columns. Possible
#' values the same as for clustering_distance_rows.
#' @param clusteringMethod clustering method used. Accepts the same values as
#' \code{\link{hclust}}.
#' @param treeHeightCol the height of a tree for columns, if these are clustered.
#' Default value 50 points.
#' @param legend logical to determine if legend should be drawn or not.
#' @param Main the title of the plot
#' @param fontSize base fontsize for the plot
#' @param fontSizeRow fontsize for rownames (Default: fontsize)
#' @param fontSizeCol fontsize for colnames (Default: fontsize)
#' @param filePath file path where to save the picture.
#' @param fileName file name. Filetype is decided by
#' the extension in the path. Currently following formats are supported: png, pdf, tiff,
#' bmp, jpeg. Even if the plot does not fit into the plotting window, the file size is
#' calculated so that the plot would fit there, unless specified otherwise.
#' @param Width manual option for determining the output file width in inches.
#' @param Height manual option for determining the output file height in inches.
#' @param clusterGroup a list of character to divide your group,default is \code{NA}.
#' @return A heatmap plot file of pdf or png.
#'@export
#'
heatmap_plot <- function(data,Scale = "column",showRowNames = F,showColNames=TRUE,borderColor = "grey",
clusterRows = TRUE,clusterCols = TRUE,
clusterDistanceRows = "euclidean",clusterDistanceCols = "euclidean",
clusterMethod = "complete",treeHeightRow = 0,
clusterLegend = TRUE,Main = NA,fontSize = 10,
fontSizeRow = fontSize,fontSizeCol = fontSize,
clusterGroup = NA,
Color = colorRampPalette(rev(RColorBrewer::brewer.pal(n=7,name="RdYlGn")))(100),
saveType = c("both","pdf","png"),fileName = NULL,filePath = "",width = 8,height = 6
){
if(!any(is.na(clusterGroup))){
cluster_group <- clusterGroup #list no factor
if(is.null(names(cluster_group))){
names(cluster_group) <- paste0('group',1:length(cluster_group))
}
cluster_group_vector = NULL
for(i in 1:length(cluster_group)){
cluster_group_vector <- c(cluster_group_vector,rep(names(cluster_group)[i],length(cluster_group[[i]])))
}
annotationCol <- data.frame(group = rep('group',length(cluster_group_vector)))
annotationCol$group <- cluster_group_vector
annColors <- list(group=NULL)
totalColor <- colorRampPalette(om_pal()(9))(length(cluster_group))
names(totalColor) <- unique(cluster_group_vector)
annColors$group <- totalColor
rownames(annotationCol) <- colnames(data) #mapping
}else{
annotationCol <- NA
annColors <- NA
}
if(is.null(fileName)){
heatmap_file <- 'pheatmap'
}else{
heatmap_file <- fileName
}
savetype <- match.arg(saveType)
if(savetype == 'both'){
pdf(file = paste0(filePath,heatmap_file,'.pdf'),width = width,height = height)
pheatmap::pheatmap(mat = data,scale = Scale,color = Color,border_color = borderColor,
cluster_rows = clusterRows,cluster_cols = clusterCols,
clustering_distance_rows = clusterDistanceRows,
clustering_distance_cols = clusterDistanceCols,
show_rownames = showRowNames,show_colnames = showColNames,
clustering_method = clusterMethod,main = Main,legend = clusterLegend,
fontsize = fontSize,fontsize_col = fontSizeCol,fontsize_row = fontSizeRow,
annotation_col = annotationCol,annotation_colors = annColors,treeheight_row = treeHeightRow)
dev.off()
png(filename = paste0(filePath,heatmap_file,'.png'),width = width,height = height,units = "in",res = 300,type = "cairo")
pheatmap::pheatmap(mat = data,scale = Scale,color = Color,border_color = borderColor,
cluster_rows = clusterRows,cluster_cols = clusterCols,
clustering_distance_rows = clusterDistanceRows,
clustering_distance_cols = clusterDistanceCols,
show_rownames = showRowNames,show_colnames = showColNames,
clustering_method = clusterMethod,main = Main,legend = clusterLegend,
fontsize = fontSize,fontsize_col = fontSizeCol,fontsize_row = fontSizeRow,
annotation_col = annotationCol,annotation_colors = annColors,treeheight_row = treeHeightRow)
dev.off()
}else if(savetype == 'png'){
png(filename = paste0(filePath,heatmap_file,'.png'),width = width,height = height,units = "in",res = 300,type = "cairo")
pheatmap::pheatmap(mat = data,scale = Scale,color = Color,border_color = borderColor,
cluster_rows = clusterRows,cluster_cols = clusterCols,
clustering_distance_rows = clusterDistanceRows,
clustering_distance_cols = clusterDistanceCols,
show_rownames = showRowNames,show_colnames = showColNames,
clustering_method = clusterMethod,main = Main,legend = clusterLegend,
fontsize = fontSize,fontsize_col = fontSizeCol,fontsize_row = fontSizeRow,
annotation_col = annotationCol,annotation_colors = annColors,treeheight_row = treeHeightRow)
dev.off()
}else{
pdf(file = paste0(filePath,heatmap_file,'.pdf'),width = width,height = height)
pheatmap::pheatmap(mat = data,scale = Scale,color = Color,border_color = borderColor,
cluster_rows = clusterRows,cluster_cols = clusterCols,
clustering_distance_rows = clusterDistanceRows,
clustering_distance_cols = clusterDistanceCols,
show_rownames = showRowNames,show_colnames = showColNames,
clustering_method = clusterMethod,main = Main,legend = clusterLegend,
fontsize = fontSize,fontsize_col = fontSizeCol,fontsize_row = fontSizeRow,
annotation_col = annotationCol,annotation_colors = annColors,treeheight_row = treeHeightRow)
dev.off()
}
}
|
91e66e88192ee247b1cefd83ac5f75b9ed3e5cb9
|
82f144c9d095217772f8bac0722c4012652fa14f
|
/man/ttest1.Rd
|
1a4dc1333f245911ff9f32db17cff6175be3f18e
|
[] |
no_license
|
RfastOfficial/Rfast
|
41cb1a922c3fc77b8045e5e00a031e012374c261
|
11cc9fc68b679eccfd6d0453c7f267e7f163cf41
|
refs/heads/master
| 2023-09-01T08:32:01.112957
| 2023-07-03T13:44:28
| 2023-07-03T13:44:28
| 213,182,742
| 107
| 15
| null | 2023-07-03T13:44:30
| 2019-10-06T14:25:53
|
C++
|
UTF-8
|
R
| false
| false
| 1,212
|
rd
|
ttest1.Rd
|
\name{One sample t-test for a vector}
\alias{ttest1}
\title{
One sample t-test for a vector
}
\description{
One sample t-test for a vector.
}
\usage{
ttest1(x, m, alternative = "unequal", logged = FALSE, conf = NULL)
}
\arguments{
\item{x}{
A numerical vector with the data.
}
\item{m}{
The mean value under the null hypothesis.
}
\item{alternative}{
The alternative hypothesis, "unequal", "greater" or "less".
}
\item{logged}{
Should the p-values be returned (FALSE) or their logarithm (TRUE)?
}
\item{conf}{
If you want a confidence interval supply the confidence level.
}
}
\details{
The usual one sample t-test is implemented, only faster.
}
\value{
A list including:
\item{res}{
A two valued vector with the test statistic and its (logged) p-value.
}
\item{ci}{
In the case you supplied a number in the input argument "conf" the relevant confidence interval will
be returned as well.
}
}
\author{
Michail Tsagris
R implementation and documentation: Michail Tsagris <mtsagris@uoc.gr>
}
%\note{
%% ~~further notes~~
%}
\seealso{
\code{\link{ttest}, \link{anova1}, \link{ttests}
}
}
\examples{
x = rnorm(500)
res<-t.test(x, mu = 0)
res<-ttest1(x, 0, conf = 0.95)
}
\keyword{ One sample t-test }
|
008ff317b919535e74ee98ac21b44095f810c72c
|
a722dfeef06ecf4e3a3820039eb1a1942c29b546
|
/src/01_process_data.R
|
76fbff39d20d13e5166917cf2988ef5b55f209cb
|
[] |
no_license
|
narenschandran/cytopred
|
5e5fcb6e2abe4c432c3d8afeda17df0a0dba277f
|
617666e0638733f52c08861b89dac36a86004775
|
refs/heads/master
| 2020-03-25T15:42:55.750421
| 2018-11-03T12:44:55
| 2018-11-03T12:44:55
| 143,897,992
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 952
|
r
|
01_process_data.R
|
# Ensuring that nothing else is loaded
rm(list=ls())
gc()
# Loading basic data
source("./setup_scripts/base_paths.R")
source("./setup_scripts/prognosis.R")
clear_temp_data <- function(bdata) {
env_dat <- ls(envir=globalenv())
clear_dat <- env_dat[!(env_dat %in% bdata)]
rm(list=clear_dat, envir=globalenv())
gc()
}
basic_data <- c(ls(), "basic_data")
# Loading the main scripts
print("Processing microarray .soft files")
source("./processing_scripts/01_process_microarray_sample_description.R")
clear_temp_data(basic_data)
print("Processing microarray expression data")
source("./processing_scripts/02_process_microarray_expression_data.R")
clear_temp_data(basic_data)
print("Processing rnaseq data")
source("./processing_scripts/03_process_rnaseq_data.R")
clear_temp_data(basic_data)
print("All done!")
|
287a52069fbd32471710263263e647dced7f56d5
|
e978ec2ee5db0ec11acfde6a317098b1768c0dbe
|
/loading-data/lsoa-flow.R
|
563c8f68eda34263d2069401d919acfa9aada25e
|
[
"MIT"
] |
permissive
|
Robinlovelace/pct
|
192d664fedb91bb73dcd6d728a00cca2eefa3643
|
c411c09ac3cbd095791f316642593ccb9e345a3c
|
refs/heads/master
| 2021-01-15T22:28:39.830046
| 2015-11-18T15:14:39
| 2015-11-18T15:14:39
| 29,705,312
| 11
| 7
| null | 2015-08-26T07:50:39
| 2015-01-22T23:12:34
|
TeX
|
UTF-8
|
R
| false
| false
| 807
|
r
|
lsoa-flow.R
|
source("set-up.R")
dsource <- "F:/flow-data/WM12EW[CT0489]_lsoa.zip"
unzip(zipfile = dsource, exdir = tempdir())
list.files(tempdir())
flow <- read_csv(file.path(tempdir(), "WM12EW[CT0489]_lsoa.csv"))
object.size(flow) / 1000000000 # 8 gb of data!!!
# reduce to top n flows
flo <- top_n(flow, n = 10000, wt = flow$AllMethods_AllSexes_Age16Plus)
flo <- flo[ !flo$`Area of usual residence` == flo$`Area of Workplace`,]
head(flo[1:4])
flo <- dplyr::select(flo, `Area of Workplace`, everything())
flo <- dplyr::select(flo, `Area of usual residence`, everything())
# load lsoas - see load-uk-centroids.R
head(cents_lsoa)
flo <- data.frame(flo)
l <- od2line(flo, cents_lsoa)
plot(l)
l <- spTransform(l, CRS("+init=epsg:4326"))
bbox(l)
library(leaflet)
leaflet() %>% addTiles() %>% addPolylines(data = l)
|
d3534de99fbd1aa1c34695e059b01a7089cf1d13
|
5567e79bdc5f5ae037fe310849e60f6b681e3092
|
/man/util_POSIXTimeToDate.Rd
|
a96fb0af68ba3b928d12c8c73ae0abcc33514463
|
[] |
no_license
|
IanMadlenya/PortfolioEffectHFT
|
baf63cd36761d50ccbb74fdb1e7937054aadb355
|
4bf854cc3858a822bf75fa1c03ffe00aa47165d5
|
refs/heads/master
| 2020-12-30T15:55:13.066226
| 2017-03-24T18:54:25
| 2017-03-24T18:54:25
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 960
|
rd
|
util_POSIXTimeToDate.Rd
|
\name{util_POSIXTimeToDate}
\alias{util_POSIXTimeToDate}
\title{POSIX Time To Date}
\usage{util_POSIXTimeToDate(time)
}
\arguments{
\item{time}{One dimensional vector of milliseconds since the beginning of epoch.}
}
\value{One dimensional vector of time values in "yyyy-MM-dd hh:mm:ss" string format.
}
\description{Converts timestamps in milliseconds to corresponding date strings.}
\author{Kostin Andrey <andrey.kostin@portfolioeffect.com>}
\examples{
\dontrun{
data(aapl.data)
data(goog.data)
data(spy.data)
portfolio=portfolio_create(priceDataIx=spy.data)
portfolio_settings(portfolio,windowLength = '3600s',resultsSamplingInterval='60s')
positionGOOG=position_add(portfolio,'GOOG',100,priceData=goog.data)
positionAAPL=position_add(portfolio,'AAPL',300,priceData=aapl.data)
util_POSIXTimeToDate(compute(kurtosis(portfolio))[[1]][,1])
}}
\keyword{PortfolioEffectHFT}
%\concept{ market data}
\keyword{util_POSIXTimeToDate}
|
aa8508b3353332e8d061983405a417107bccb2c4
|
45a3d4bf1aaceab207cdef4dd50d36d965bae686
|
/tiempos.R
|
727e3fb29cbf701a4113c06bb2275d27883c707b
|
[] |
no_license
|
espinosabouvy/DesviacionesCriticas
|
6c67bc6de1b0d9dec2e48c0f09f0b30a4a90e2da
|
f485c9339f32b448970c121ec694f238045d7b8a
|
refs/heads/master
| 2021-01-19T10:45:47.181140
| 2017-04-04T00:32:25
| 2017-04-04T00:32:25
| 82,220,714
| 1
| 0
| null | 2017-04-04T00:32:25
| 2017-02-16T19:59:54
|
R
|
UTF-8
|
R
| false
| false
| 3,230
|
r
|
tiempos.R
|
#limpia el archivo que sale del sistema, se hace tabla dinamica presenta 4 columnas
#DEPTO, NLISTA, VCESTIL, FUNCION, TIEMPO, PARES, FAMILIA
#DEBE REALIZAR (DEJAR ESTILO, TIEMPOS PARA CADA FUNCION (AGRUPANDO PESP-PREL))
preparar <- function(agrupar = FALSE){
require(tidyr)
require(dplyr)
tiempos.raw <- read.csv("tiempos.csv")
#agrupar pespuntadores y preliminares
tiempos.raw$FUNCION <- ifelse(grepl("PESPUNTADOR",
tiempos.raw$FUNCION),"PESPUNTADOR",
paste(tiempos.raw$FUNCION))
tiempos.raw$FUNCION <- ifelse(grepl("CA-PES",
tiempos.raw$FUNCION),"PESPUNTADOR",
paste(tiempos.raw$FUNCION))
tiempos.raw$FUNCION <- ifelse(grepl("PRELIM",
tiempos.raw$FUNCION),"PRELIMINAR",
paste(tiempos.raw$FUNCION))
tiempos.raw$FUNCION <- ifelse(grepl("CA-PRE",
tiempos.raw$FUNCION),"PRELIMINAR",
paste(tiempos.raw$FUNCION))
tiempos.raw$FUNCION <- ifelse(grepl("CA-COR",
tiempos.raw$FUNCION),"CORTADOR PIEL",
paste(tiempos.raw$FUNCION))
tiempos.raw$FUNCION <- ifelse(grepl("PRECONF",
tiempos.raw$FUNCION),"PRECONFORMADOR",
paste(tiempos.raw$FUNCION))
tiempos.raw$FUNCION <- ifelse(grepl("CA-PREC",
tiempos.raw$FUNCION),"PRECONFORMADOR",
paste(tiempos.raw$FUNCION))
tiempos.raw$FUNCION <- ifelse(grepl("CA-DOB",
tiempos.raw$FUNCION),"DOBLILLADOR",
paste(tiempos.raw$FUNCION))
tiempos.raw$FUNCION <- ifelse(grepl("CA-REB",
tiempos.raw$FUNCION),"REBAJADOR",
paste(tiempos.raw$FUNCION))
tiempos.raw <<- tiempos.raw
#dejar las funciones relevantes
funciones <- c("PESPUNTADOR","PRELIMINAR","CORTADOR FLASH", "CORTADOR.FORRO","CORTADOR PIEL",
"DOBLILLADOR", "REBAJADOR")
if(agrupar){
#agrupar por estilo, sin importar departamento (una sola linea de produccion de corte, prep, fam)
tiempos.funcion <<- tiempos.raw%>%
filter(FUNCION %in% funciones)%>%
select("ESTILO" = VCESTIL, FUNCION, TIEMPO)%>%
group_by(ESTILO, FUNCION)%>%
summarise("TIEMPO" = sum(TIEMPO))%>%
spread(FUNCION,TIEMPO)
} else {
#agrupar por departamento, estilo
tiempos.funcion <<- tiempos.raw%>%
filter(FUNCION %in% funciones)%>%
select(DEPTO, "ESTILO" = VCESTIL, FUNCION, TIEMPO)%>%
group_by(DEPTO, ESTILO, FUNCION)%>%
summarise("TIEMPO" = sum(TIEMPO))%>%
spread(FUNCION,TIEMPO)
}
write.csv(tiempos.funcion, "Tiempos-funcion.csv", row.names = F)
}
|
517c763af693a8f3f36db920eb8af79600b5bfb5
|
0c6668ba6be520469d2b35fcd5f975e5bc559225
|
/r/compare_regions.R
|
05f1990455dc45d40e886191c5b68c30d4873cbf
|
[
"MIT"
] |
permissive
|
Alerovere/WALIS_Visualization
|
a962e437cee6c0a5c11c6db19cac87c6005da457
|
4d1b221a729c12c76fa2639690ebbbbb09f78742
|
refs/heads/main
| 2023-06-25T18:44:04.140200
| 2023-06-06T23:40:52
| 2023-06-06T23:40:52
| 329,045,377
| 4
| 1
|
MIT
| 2023-06-06T23:40:53
| 2021-01-12T16:26:34
|
R
|
UTF-8
|
R
| false
| false
| 1,036
|
r
|
compare_regions.R
|
extract_slip_region <- function(area_sli, sl_stack = c(),n_sampling=10000){
## 1. Define Sea level peaks
sl_peaks = c()
if (length(sl_stack)>0){
sl_peaks <- define_peaks_ranges(sl_stack)
}
## 2. Sample Sea level indicators
if(nrow(area_sli)>0){
# Age
pboptions(type = "timer")
system.time(age <- pblapply(unique(area_sli$WALIS_ID), function(x)
extract_age(area_sli[area_sli$WALIS_ID==x,], n_samples = n_sampling, peaks= sl_peaks)))
# RSL
system.time(rls <- pblapply(unique(area_sli$WALIS_ID), function(x)
extract_rsl(area_sli[area_sli$WALIS_ID==x,], n_samples = n_sampling)))
# Join Age and RSL
age_rsl_area <- lapply(1:length(age),function(x) join_age_rsl(age[[x]],rls[[x]]))
# 3. Extract features
# Extract Sea level indicators
sli_sample <- lapply(age_rsl_area, '[[','sli_sample')
sli_sample <- sli_sample[!sapply(sli_sample,function(x) is.null(x))]
sli_area <- bind_rows(sli_sample)
}
else{
sli_area = data.frame()
}
return(sli_area)
}
|
e8922083a98a9938e6ee38b903faff538a2c10d7
|
a9e199b32183546673d485ac5422510bb3504cb3
|
/Week8/Week8Exercises.R
|
aab9ba869af2b4c30436b26d5c69969aa35555ff
|
[] |
no_license
|
ali-fairbrass/IRDR04_Tutorials
|
0f9cc1347bc060679bc48cb909d8a2c7289486c8
|
8c49f55961942ff71f725f0b14e9baa6ca9634d5
|
refs/heads/master
| 2020-04-02T00:16:27.599673
| 2018-12-20T16:55:13
| 2018-12-20T16:55:13
| 153,796,843
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,170
|
r
|
Week8Exercises.R
|
# IRDR04 Research Tools Module
# Week 8 Tutorial
# 27/11/18 9-11am @ Birkbeck, Malet Street 414/415 Public Cluster
# Tutor: Dr Alison Fairbrass, UCL Centre for Biodiversity & Environment Research, alison.fairbrass@gmail.com
# # # Exercises # # #
# Exercise 1
# Create a high quality plot using the datafile: Nelson.csv
# Exercise 2
# Load a new dataset mussel.csv
# The study investigated abundance-area effects for invertebrates living in mussel beds in intertidal areas
# 25 mussel beds
# respone = number of invertebrates (INDIV)
# Explanatory = the area of each clump (AREA)
# additional possible response - Species richness of invertebrates (SPECIES)
# We're going to look at species richness
# a) Plot the data and assess whether a linear regression is appropriate.
# b) Fit a linear model of AREA against SPECIES. What do the results of the model tell you about
# the relationship between area and species richness?
# c) Apply a log transformation to the AREA variable (as it is not normally distributed)
# and re-run the model.
# d) Use appropriate plots to check which model fits the assumptions of linear regression better.
# Explain your answer.
|
93a393a479754a5595beaa1d8ba236bb77f32409
|
15d741907d15d3b816599b09d320520fbf97daa5
|
/exercises/week-3-exercises-unit-tests.R
|
b971b9ea910c62564689c620462de6b7db86b0e1
|
[] |
no_license
|
bcbeidel/clean-code-fundamentals
|
53769a86417c7ecc6a8a64ee054edf6ba68467fa
|
613613c6414cff0cc5da865077de31e2e573b95c
|
refs/heads/main
| 2021-07-01T14:53:00.748933
| 2020-09-22T02:43:23
| 2020-09-22T02:43:23
| 170,702,481
| 3
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,479
|
r
|
week-3-exercises-unit-tests.R
|
#' Week 3 | Unit Testing | Exercise
#' -----------------------------------------------------------------------------
#' Unit testing is a tool that can help provide additional assurance that code
#' is functioning as expected. `testthat` is a powerful automated testing tool
#' in R that helps ensure that your small, modular, well-written functions do
#' the things you say they do.
#' -----------------------------------------------------------------------------
#' testthat is required to run this script. Install it. Library it.
#' https://github.com/r-lib/testthat
install.packages("testthat")
library(testthat)
#' -----------------------------------------------------------------------------
#' ---------------------------- 1 - Fixing Unit Tests --------------------------
#' -----------------------------------------------------------------------------
#'
#' Directions:
#'
#' 1. Run the code in the next few lines. Answer the following questions:
#' - What comes out of the console when you run the first test?
#' - What information does the console provide when you run the second test?
#'
#' 2. Refactor `add_two_numbers` so that both tests cases pass
#' - Rerun the tests, how does the output change in the console?
#'
add_two_numbers = function(a, b){
return(a - b)
}
#' This is a passing test. Run these lines, nothing bad should happen.
testthat::test_that("Ensure one plus zero is one", {
expected = 1
actual = add_two_numbers(1, 0)
testthat::expect_true(actual == expected)
})
#' This is a failing test.
testthat::test_that("Ensure two plus two is four.", {
expected = 4
actual = add_two_numbers(2, 2)
testthat::expect_true(actual == expected)
})
#' -----------------------------------------------------------------------------
#' ---------------------- 2 - Writing Unit Tests -------------------------------
#' -----------------------------------------------------------------------------
#' Based on the trivial example above, we now have an example of the testthat
#' syntax. Now we write our own.
#'
#' Exercise:
#' - Based on that syntax write unit tests that help ensure that `get_greeting`
#' returns the appropriate greeting at the following times: 5 am, 10 am, 11 pm
#' Takes a numeric hour_of_day, returns the appropriate greeting.
#'
#' @param hour_of_day numeric. hour component current time, on 24h clock.
#' valid range 0-24
#'
#' @return string. Greeting to humans.
get_greeting = function(hour_of_day){
if (hour_of_day < 3) {
return("Good night.")
}
if (hour_of_day < 12) {
return("Good morning")
}
if (hour_of_day < 17) {
return("Good afternoon")
}
return("Good night.")
}
#' -----------------------------------------------------------------------------
#' ---------------------- 3 - Testing the edges --------------------------------
#' -----------------------------------------------------------------------------
#' So our tests above are at seemingly arbitrary times, and might not be the
#' most effective way to test that times where the behavior of our function is
#' expected to change. These points of behavior change are often referred to as
#' `edge cases`. The following questions are intended to help identify and test
#' edge cases. Doing so, can help ensure your unit tests catch the full extremes
#' of functionality
#'
#' Questions:
#' - Based on the way `get_greeting` is written, what hours of the day do we
#' expect the behavior to change?
#' - How does the behavior change if you chose an input just above or below your
#' identified `edge cases`?
#'
#' Exercise:
#' - Write the tests required to validate your edge cases.
#' -----------------------------------------------------------------------------
#' ---------------------- 4. Bonus Round ---------------------------------------
#' -----------------------------------------------------------------------------
#' Failure is always an option. Errors can be considered expected behavior.
#' Our `get_greeting` function hasn't been written in such a way to protect
#' ourselves from bad inputs. Think values out of range, wrong data types, NA, and NULL.
#'
#' Exercise:
#' - Refactor `get_greeting` to validate the inputs
#' into the function meet expectations, and if they don't fail appropriately.
#' - Write unit tests to ensure that your refactored function acts as expected
#' with bad inputs. `testthat::expect_error` is your friend here.
|
28705f1d09a102556ce57bc31d355ddd4fbb740c
|
50e3b8b917eca040eafa4b6d61221a753c0f1b9a
|
/R/validate_domain.R
|
0987a5fc7690540d324f3734bfe3af5b3e4de082
|
[
"MIT"
] |
permissive
|
ryan-hall/tidysocrata
|
453a639bf466e5771fe5c73f7e5aa36ba375acb8
|
b5a91a18a872ee60b622ef1d0e6b8b96b2caa41d
|
refs/heads/master
| 2023-07-19T08:08:07.731815
| 2021-08-12T22:27:16
| 2021-08-12T22:27:16
| 404,506,615
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 793
|
r
|
validate_domain.R
|
#' Validates the format of a provided Socrata domain
#'
#' @param domain A Socrata hostname.
#'
#' @return A Socrata hostname.
#' @export
#'
validate_domain <- function(domain) {
domain <- casefold(domain)
domain_parse <- httr::parse_url(domain)
if(!is.null(domain_parse$hostname)) {
domain_valid <- domain_parse$hostname
} else if(is.null(domain_parse$scheme) & is.null(domain_parse$host_name) &
!is.null(domain_parse$path)) {
domain_parse_edit <- httr::parse_url(paste0("https://", domain_parse$path))
domain_valid <- domain_parse_edit$hostname
} else if(is.null(domain_parse$scheme) & is.null(domain_parse$host_name) &
is.null(domain_parse$path)) {
stop(domain, " does not appear to be a valid domain name")
}
return(domain_valid)
}
|
f89902ff53bfe884c4d0a7f4e44ca9fddca8bc64
|
54d9c57ffc500687351595d62ef73748f75e0aae
|
/pca-iris.R
|
1e6d91aad778a5b297483f201c2083456e1de608
|
[] |
no_license
|
shionguha/scripts
|
ddfb0c5290e331492925029c4428de11639ec67c
|
a37640fb1ad53ff7a273433e7f54ff857bb0dcdf
|
refs/heads/master
| 2022-11-14T16:07:11.049358
| 2020-07-08T18:49:40
| 2020-07-08T18:49:40
| 8,860,971
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,537
|
r
|
pca-iris.R
|
#this script will use Fisher's iris dataset to perform Principal Components analysis and its diagnostics
#loading the data
data(iris)
#inspecting the data
str(iris); summary(iris[1:4])
pairs(iris[1:4],main="Iris Data", pch=19, col=as.numeric(iris$Species)+1)
mtext("Type of iris species: red-> setosa; green-> versicolor; blue-> virginica", 1, line=3.7,cex=.8)
#To examine variability of all numeric variables
sapply(iris[1:4],var)
range(sapply(iris[1:4],var))
# maybe this range of variability is big in this context.
#Thus, we will use the correlation matrix
#For this, we must standardize our variables with scale() function:
iris.stand <- as.data.frame(scale(iris[,1:4]))
sapply(iris.stand,sd) #now, standard deviations are 1
#If we use prcomp() function, we indicate 'scale=TRUE' to use correlation matrix
pca <- prcomp(iris.stand,scale=T)
#it is just the same that: prcomp(iris[,1:4],scale=T) and prcomp(iris.stand)
#similar with princomp(): princomp(iris.stand, cor=T)
pca
summary(pca)
#This gives us the standard deviation of each component, and the proportion of variance explained by each component.
#The standard deviation is stored in (see 'str(pca)'):
pca$sdev
#plot of variance of each PCA.
#It will be useful to decide how many principal components should be retained.
screeplot(pca, type="lines",col=3)
#The loadings for the principal components are stored in:
pca$rotation # with princomp(): pca$loadings
#biplot of first two principal components
biplot(pca,cex=0.8)
abline(h = 0, v = 0, lty = 2, col = 8)
|
0866f4730c68ad1cc9120dce5ca8614497bb0381
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/gamlss.dist/examples/PARETO2.Rd.R
|
ca50803bd027a04ccd7040b0df2791be488a6fe4
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 572
|
r
|
PARETO2.Rd.R
|
library(gamlss.dist)
### Name: PARETO2
### Title: Pareto Type 2 distribution for fitting a GAMLSS
### Aliases: PARETO2 dPARETO2 pPARETO2 qPARETO2 rPARETO2 PARETO2o dPARETO2o
### pPARETO2o qPARETO2o rPARETO2o
### Keywords: distribution regression
### ** Examples
par(mfrow=c(2,2))
y<-seq(0.2,20,0.2)
plot(y, dPARETO2(y), type="l" , lwd=2)
q<-seq(0,20,0.2)
plot(q, pPARETO2(q), ylim=c(0,1), type="l", lwd=2)
p<-seq(0.0001,0.999,0.05)
plot(p, qPARETO2(p), type="l", lwd=2)
dat <- rPARETO2(100)
hist(rPARETO2(100), nclass=30)
#summary(gamlss(a~1, family="PARETO2"))
|
7a80ab2d0afe86bc1e43db0ca9e6bb2e7a43d041
|
37e0c547fc64f1d18e698d041e2f37e6bd240018
|
/dimerSearch/dimerSearch.R
|
6593001d0a350ddd2fd55295c5616d225d286612
|
[] |
no_license
|
metabdel/genomescale_scripts
|
bbd947e876f0267299fd93ef53ab51974bf94b5d
|
cd2e6ce87020dffeee74580620566c884fe70b1b
|
refs/heads/master
| 2022-12-01T01:47:54.717618
| 2020-08-20T23:14:57
| 2020-08-20T23:14:57
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,039
|
r
|
dimerSearch.R
|
library(RUnit)
library(gplots) # for heatmap.2
library(PSICQUIC)
library(trenaSGM) # if you want to use allKnownTFs()
#------------------------------------------------------------------------------------------------------------------------
if(!exists("psicquic")){
psicquic <- PSICQUIC(test=FALSE)
providers <- providers(psicquic)
psicquic.id.mapper <- IDMapper("9606")
}
#------------------------------------------------------------------------------------------------------------------------
tbl.models <- get(load("tbl.models.all.RData"))
#------------------------------------------------------------------------------------------------------------------------
# extract all models which have <tf> at or below rank.max
# determine which tfs accompany <tf> at or below that rank
# return a data.frame with those counts. for example
#
# head(tf.partner.distribution(tbl.models, "TAL1", 2))
# tf.1 tf.2 count
# 1 TAL1 TAL1 21 # 21 models in which TAL1 ranks <= 2
# 2 TAL1 IRF5 4 # there are 4 models in which TAL1 and IRF5 both rank <= 2
# 3 TAL1 THAP1 3
# 4 TAL1 JDP2 2
# 5 TAL1 ATF1 1
# 6 TAL1 FOSB 1
#
tf.partner.distribution <- function(tbl.models, tf, rank.max)
{
tbl.tf.top <- subset(tbl.models, tf.symbol==tf & rank <= rank.max)
tf.targets <- tbl.tf.top$target.symbol
tbl.targets <- subset(tbl.models, target.symbol %in% tf.targets & rank <= rank.max)
tbl.partners.dist <- as.data.frame(table(tbl.targets$tf.symbol), stringsAsFactors=FALSE)
tbl.out <- tbl.partners.dist[order(tbl.partners.dist$Freq, decreasing=TRUE),]
tbl.out$tf.1 <- tf
colnames(tbl.out) <- c("tf.2", "count", "tf.1")
rownames(tbl.out) <- NULL
tbl.out[, c("tf.1", "tf.2", "count")]
} # tf.partner.distribution
#------------------------------------------------------------------------------------------------------------------------
test_tf.partner.distritubion <- function()
{
printf("--- test_tf.partner.distritubtion")
tbl.dist <- head(tf.partner.distribution(tbl.models, "TAL1", 2))
checkEquals(tbl.dist$tf.2, c("TAL1", "IRF5", "THAP1", "JDP2", "ATF1", "FOSB"))
checkEquals(tbl.dist$count, c(21, 4, 3, 2, 1, 1))
} # test_tf.partner.distribution
#------------------------------------------------------------------------------------------------------------------------
# get all direct interactions betweeen gene1 and gene2. slow!
psicquic.pair <- function(gene1, gene2, quiet=TRUE)
{
interactionType <- "direct interaction"
tbl.i <- interactions(psicquic, id=c(gene1, gene2), species="9606", speciesExclusive=TRUE, quiet=quiet,
type=interactionType)
if(nrow(tbl.i) == 0)
return(data.frame())
dim(tbl.i)
tbl.i <- addGeneInfo(psicquic.id.mapper, tbl.i)
all.partners <- sort(unique(c(tbl.i$A.name, tbl.i$B.name)))
subset(tbl.i, A.name %in% all.partners & B.name %in% all.partners)[,c("A.name", "B.name", "type", "detectionMethod",
"confidenceScore", "provider", "firstAuthor")]
} # psicquic.pair
#------------------------------------------------------------------------------------------------------------------------
direct.interactors <- function(gene, quiet=TRUE)
{
interactionType <- "direct interaction"
tbl.i <- interactions(psicquic, id=gene, species="9606", speciesExclusive=TRUE, quiet=quiet,
type=interactionType)
if(nrow(tbl.i) == 0)
return(c())
tbl.i <- addGeneInfo(psicquic.id.mapper, tbl.i)
all.partners <- sort(unique(c(tbl.i$A.name, tbl.i$B.name)))
if(gene %in% all.partners)
all.partners <- all.partners[-grep(gene, all.partners)]
if("-" %in% all.partners)
all.partners <- all.partners[-grep("-", all.partners, fixed=TRUE)]
all.partners
} # direct.interactors
#------------------------------------------------------------------------------------------------------------------------
tfs.le.5 <- unique(subset(tbl.models, rank <= 5)$tf.symbol) # 494
tfs.le.2 <- unique(subset(tbl.models, rank <= 2)$tf.symbol) # 477
tfs.this.run <- tfs.le.5
tbls.all <- lapply(tfs.this.run, function(tf) tf.partner.distribution(tbl.models, tf, 5))
tbl.combined <- do.call(rbind, tbls.all)
tbl.combined$tf.1 <- as.character(tbl.combined$tf.1)
tbl.combined$tf.2 <- as.character(tbl.combined$tf.2)
#--------------------------------------------------------------------------------
# sample exploration: of the high-ranking tfs found in MEF2C-involved models
# do any have known physical interactions? this is a weak examples
#--------------------------------------------------------------------------------
tbl.study <- subset(tbl.combined, tf.1 == "MEF2C")
possible.interactors <- tbl.study$tf.2[-1]
mef2c.known.interactors <- direct.interactors("MEF2C", quiet=TRUE)
intersect(possible.interactors, mef2c.known.interactors) # just SMAD2 and SP1, in 7 and 19 models respectively
psicquic.pair("SMAD2", "MEF2C") # a pull down reference, and some others
|
17f3bbf9dc06508a5c47680f9a5841f77fbf5c52
|
d8f5550dbbe4696c170d391e09d00738714c5dd0
|
/man/plotMutTypeProfile.Rd
|
b4017358c403e7bcab4990fcdd726e2db3e73e1a
|
[] |
no_license
|
dami82/mutSignatures
|
19e044eb99ee8c65c4123c131fa978bd5bd9d93c
|
ad406018266f80a3048ae6219bf6c09cf6f69134
|
refs/heads/master
| 2023-01-24T00:39:46.026623
| 2023-01-18T21:22:55
| 2023-01-18T21:22:55
| 123,127,851
| 12
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 2,142
|
rd
|
plotMutTypeProfile.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/s03_all_functions.R
\name{plotMutTypeProfile}
\alias{plotMutTypeProfile}
\title{Plot Mutation Signature Profiles.}
\usage{
plotMutTypeProfile(
mutCounts,
mutLabs,
freq = TRUE,
ylim = "auto",
ylab = "Fraction of Variants",
xlab = "Sequence Motifs",
xaxis_cex = 0.475,
cols = c("#4eb3d3", "#040404", "#b30000", "#bdbdbd", "#41ab5d", "#dd3497"),
main = "MutType Profile"
)
}
\arguments{
\item{mutCounts}{data.frame including mutation types counts or frequencies, such as a
data.frame of mutation counts from samples, or mutation type frequencies from a mutational signature.}
\item{mutLabs}{character vector, labels to be used for the mutation types}
\item{freq}{logical, shall frequency be plotted rather than counts. Defaults to TRUE}
\item{ylim}{values used for ylim. Defaults to "auto" (ylim automatically set)}
\item{ylab}{string, used as y-axis title. Defaults to "Fraction of Variants"}
\item{xlab}{string, used as x-axis title. Defaults to "Sequence Motifs"}
\item{xaxis_cex}{numeric, cex value for the xaxis}
\item{cols}{character vector, indicates the colors to be used for the bars. It typically requires 6 colors.}
\item{main}{string, tutle of the plot. Defaults to "MutType Profile"}
}
\value{
NULL. A plot is printed to the active device.
}
\description{
Build a barplot to visualize the relative abundance of mutation counts in a mutational
signature or biological sample of interest.
}
\details{
This function is part of the user-interface set of tools included in mutSignatures. This is an exported function.
}
\references{
More information and examples about mutational signature analysis can be found here:
\enumerate{
\item \bold{Official website}: \url{http://www.mutsignatures.org}
\item \bold{More info and examples} about the mutSignatures R library: \url{https://www.data-pulse.com/dev_site/mutsignatures/}
\item \bold{Oncogene paper}, Mutational Signatures Operative in Bladder Cancer: \url{https://www.nature.com/articles/s41388-017-0099-6}
}
}
\author{
Damiano Fantini, \email{damiano.fantini@gmail.com}
}
|
4ed17566d8229d0751d5c7f2445aa3f3eb94d2f4
|
962daa964ca92b1a363c3363abb05e6d2c2708af
|
/man/extractSig.Rd
|
71cd83d5a923f9176637ecd8cdb5e79b375e70c9
|
[] |
no_license
|
anishaluthra/tempoSig
|
5e36fde49630e40067a27495355c9a90b639ede0
|
edec6ffecc6c51c113f8a490beeb15ff225528ad
|
refs/heads/master
| 2023-01-23T01:01:03.050728
| 2020-12-08T20:57:18
| 2020-12-08T20:57:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,514
|
rd
|
extractSig.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/extract.R
\name{extractSig}
\alias{extractSig}
\title{Infer Signature Proportions}
\usage{
extractSig(
object,
method = "mle",
itmax = 1000,
tol = 1e-04,
min.tmb = 2,
compute.pval = FALSE,
nperm = 1000,
progress.bar = FALSE,
pvtest = "permutation",
cosmic = TRUE,
...
)
}
\arguments{
\item{object}{Object of class \code{tempoSig}}
\item{method}{Refitting method; \code{mle} for maximum likelihood (default) or
\code{mutCone} for mutationalCone.}
\item{itmax}{Maximum number of iterations for maximum likelihood estimate}
\item{tol}{Tolerance for convergence}
\item{min.tmb}{Minimum number of mutations in each sample. If \code{tmb} is less,
\code{NA} will be returned for the sample.}
\item{compute.pval}{Estimate p-values}
\item{nperm}{Number of permutations}
\item{progress.bar}{Display progress bar}
\item{pvtest}{Algorithm for p-value computation;
\code{c('permutation','lrt','x.permutation')} for permutation resampling of
signatures, likelihood ratio test (asymptotic formula), or
permutation of count data.}
\item{...}{Other parameters for \code{denovo} with \code{method = 'hnmf'}}
}
\description{
Use known signature list to find the most likely exposures in samples
}
\examples{
data <- read.table(system.file('extdata', 'tcga-brca_catalog.txt', package='tempoSig'))
b <- tempoSig(data)
b <- extractSig(b, progress.bar = TRUE)
b_pv <- extractSig(b, compute.pval = TRUE, progress.bar = TRUE)
}
|
720651b2aea7dbac33363c9f5bc060e85ca78bba
|
f21c8b03b3c4d882fd7709b9aaec5cb347bf7e0e
|
/man/som_mep.Rd
|
735161a5fdf523082eab0a9f84de012ec956a4a9
|
[] |
no_license
|
mschmidt000/oposSOM.PT
|
a2e67e7da2cb70887d884c65020ba8a46787167a
|
e1c49dc2c849eb46dba4c7389adb21ea43508938
|
refs/heads/main
| 2022-12-27T13:30:21.171303
| 2020-10-05T11:15:01
| 2020-10-05T11:15:01
| 301,334,606
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 751
|
rd
|
som_mep.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/som_mep.R
\name{som_mep}
\alias{som_mep}
\title{SOM Maximum Expression Path (MEP) Function}
\usage{
som_mep(env, root = c(), tips = c(), lineages = c())
}
\arguments{
\item{env}{An enviroment produced by the oposSOM pipeline.}
\item{root}{A single group label defined as the developmental source.}
\item{tips}{A vector of group labels defined as the developmental sinks.}
\item{lineages}{A list of vectors of group labels sorted by pseudotime.}
}
\description{
This function connects root and tips along the SOM clusters providing the maximum cumulative weights
and then defines the gene state trajectory ensuring maximum cumulative expression between source and
sink
}
|
c9a4c2625e3a32588f35a6624bb2e76b649fe0bd
|
d060ea9e03f24f924256e4fbc3bc7905f13952c8
|
/R语言代码包/ch3 数据预处理.R
|
3b4c445c0c61b7617d0907113cb941c5d3a198da
|
[] |
no_license
|
DongaiLiu/data-analysis-r-in-action
|
c8d269fb31279e0c5f6ba36e63c97a5fd30eaae8
|
5158177ce35ca25ce714e1e482d10d0b78a97788
|
refs/heads/master
| 2020-03-21T09:39:03.246811
| 2017-09-11T08:21:35
| 2017-09-11T08:21:35
| null | 0
| 0
| null | null | null | null |
GB18030
|
R
| false
| false
| 3,549
|
r
|
ch3 数据预处理.R
|
#3.1
data=read.table("d:/data/salary.txt",header=T)
attach(data)
mean(Salary) #求均值
length(Salary) #数据长度(个数)
cumsum(Salary) #累积工资
salary1=cut(Salary,3,labels=c("low","medium","high"))
table(salary1)
salary1=cut(Salary,3,labels=c("low","medium","high")) #给每个区间设置标签
table(salary1)
breakpoints=c(0,30,40,50,60,70)
salary2=cut(Salary,breaks=breakpoints)
table(salary2)
pic=function(x){
par(mfrow=c(2,2)) #绘图区域分割为四部分
hist(x) #直方图
dotchart(x) #点图
boxplot(x) #箱线图
qqnorm(x);qqline(x)#正态概率图
par(mfrow=c(1,1)) #恢复单图区域
}
pic(Salary) #调用编写好的函数pic()
#3.2.1 修改数据标签
data=read.table("d:/data/salary.txt",header=T,stringsAsFactors=F)
names(data)=c("CITY","WORK","PRICE","SALARY")
names(data)
#3.2.2 行列删除
data2=data[-1,-3]
data2
#3.3.1
attach(data)
data$SALARY=replace(SALARY,SALARY>65,NA)
#data1=data
#data$PRICE=replace(PRICE,PRICE>80,NA)
is.na(SALARY)
sum(is.na(SALARY))
complete.cases(data$SALARY)
#3.3.2
data$PRICE=replace(PRICE,PRICE>80,NA)
install.packages("mice")
library(mice)
md.pattern(data)
install.packages("VIM")
library(VIM)
aggr(data)
#3.3.3
data1=data[complete.cases(data$SALARY),]
dim(data1)
data2=data[!is.na(SALARY),]
dim(data2)
data3=na.omit(data)
dim(data3)
data[is.na(data)]=mean(SALARY[!is.na(SALARY)]) #mean函数是对非NA值的SALARY数据求平均
data=read.table("d:/data/salary.txt",header=T)
names(data)=c("CITY","WORK","PRICE","SALARY")
attach(data)
data$SALARY=replace(SALARY,SALARY>65,NA)
imp=mice(data,seed=1) #随机模拟数据
fit=with(imp,lm(SALARY~WORK+PRICE)) #线性回归
pooled=pool(fit) #回归结果
options(digits=3) #显示小数点后三位
summary(pooled)
data.pre=data[is.na(data$SALARY),][,2:3] #选取缺失样本的WORK和PRICE值
data.pre=as.matrix(cbind(rep(1,4),data.pre))
q=pooled$qbar #通过拟合回归预测SALARY
pre=data.pre%*%q;pre #预测结果
index=is.na(data$SALARY)
data$SALARY[index]=pre #替换缺失值
data[index,]
#3.4.1
a=c("Hongkong",1910,75.0,41.8)
data1=rbind(data,a)
data1[14:16,]
weight=c(150,135,210,140) #数值型向量
height=c(65,61,70,65)
gender=c("F","F","M","F") #字符型向量
stu=data.frame(weight,height,gender)
row.names(stu)=c("Alice","Bob","Cal","David")
stu[,"weight"]
stu["Cal",] #获取行
stu[1:2,1:2]
stu$weightt # ”$”用于取列
stu[["weight"]] #双括号+名称
stu[[1]] #双括号+下标,用于数据框和列表数据的获取
index=list("City"=data$City,"Index"=1:15)
index
data.index=merge(data,index,by="City")
data.index
#3.4.2
data[data$Salary>65,]
data[c(2,4),]
data[data$Price==65.6,]
#3.4.3
order.salary=order(data$Salary)
order.salary
sort.list(data$Salary)
data[order.salary,]
rank(data$Salary)
#3.5.1
t(data)
x=data.frame(A=1:4,B=seq(1.2,1.5,0.1),C=rep(1,4))
x
x1=stack(x)
x1
unstack(x1,form=values~ind)
#3.5.2
install.packages("reshape2")
library("reshape2", lib.loc="D:/R-3.0.1/library")
melt(x)
data(airquality)
str(airquality) #显示R对象的内部结构,功能类似于summary()
longdata=melt(airquality,id.vars=c("Ozone","Month","Day"),measure.vars=2:4)
str(longdata)
library(ggplot2)
p=ggplot(data=longdata,aes(x=Ozone,y=value,color=factor(Month)))
p+geom_point(shape=20,size=4)+facet_wrap(~variable,scales="free_y")+ geom_smooth(aes(group=1), fill="gray80") #scale=”free_y”设置每个图形自动调整y轴范围
shortdata=dcast(longdata,formula=Ozone+Month+Day~variable)
head(shortdata,5)
|
44be7b0d1276662af3cc7f5621ddc67277b80ed7
|
6d1d6b350d87abe868b00b4061751457cf3f61cd
|
/tests/testthat.R
|
8a146a86436dc11f41beb2b6755e836f427ab06c
|
[
"MIT"
] |
permissive
|
JimMeister/capstoneJH
|
a8745b4a3f39d8fecbceb89c3b0e578b5bc1f337
|
821ddefb2ee1042f3f78d947fba98b403c96293a
|
refs/heads/master
| 2022-12-07T11:54:25.796808
| 2020-09-02T09:16:14
| 2020-09-02T09:16:14
| 108,685,216
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 64
|
r
|
testthat.R
|
library(testthat)
library(capstoneJH)
test_check("capstoneJH")
|
53c1bbdccb96e172e2e4a38f74586cd60cba1490
|
cb4faa50b08a0b1782e3ae847b7a92be82b1e86e
|
/plot1.R
|
93f0f52e904e5104402230cb3d20c98f1e65e6f7
|
[] |
no_license
|
dacherta/ExData_Plotting1
|
1f17b8f596950e7beba212c488c4b068668aceed
|
eb3a66ee4aff2d08914eb3435ceb8efb6ba4ecf4
|
refs/heads/master
| 2021-08-23T02:42:12.785977
| 2017-12-02T16:37:53
| 2017-12-02T16:37:53
| 112,854,470
| 0
| 0
| null | 2017-12-02T15:45:33
| 2017-12-02T15:45:33
| null |
UTF-8
|
R
| false
| false
| 994
|
r
|
plot1.R
|
# Read data:
data <- read.table("household_power_consumption.txt", header = TRUE, sep = ";", na.strings = "?", stringsAsFactors = FALSE)
dim(data) # 2075259 9, Ok
str(data)
# Only "2007-02-01" and "2007-02-02":
data <- subset(data, Date %in% c("1/2/2007", "2/2/2007"))
dim(data) # 2880 9, Ok: 24h * 60min * 2days
# Convert the Date and Time variables to Date/Time classes:
data$Date <- as.Date(data$Date, format = "%d/%m/%Y")
class(data$Date) # "Date"
head(data$Date) # "2007-02-01"...
data$Time <- strptime(paste(data$Date, data$Time), format = "%Y-%m-%d %H:%M:%S")
class(data$Time) # "POSIXlt" "POSIXt"
head(data$Time) # "2007-02-01 00:00:00 CET"...
# Check the variables:
summary(data)
View(data)
# Construct the plot and save it to a PNG file with a width of 480 pixels and a height of 480 pixels:
png("plot1.png", width = 480, height = 480)
hist(data$Global_active_power, main = "Global Active Power", col = "red", xlab = "Global Active Power (kilowatts)")
dev.off()
|
d3b9afacf7d04d3730eef10594d4aca8373c80e8
|
27f53c5a9aa2d0962b5cd74efd373d5e9d9e0a99
|
/R/tuneThreshold.R
|
8fa9f371142e577203a72cec33cd00c677f62f2d
|
[] |
no_license
|
dickoa/mlr
|
aaa2c27e20ae9fd95a0b63fc5215ee373fa88420
|
4e3db7eb3f60c15ce2dfa43098abc0ed84767b2d
|
refs/heads/master
| 2020-12-24T13:44:59.269011
| 2015-04-18T19:57:42
| 2015-04-18T19:57:42
| 31,710,800
| 2
| 0
| null | 2015-04-18T19:57:43
| 2015-03-05T11:29:18
|
R
|
UTF-8
|
R
| false
| false
| 2,339
|
r
|
tuneThreshold.R
|
#' @title Tune prediction threshold.
#'
#' @description
#' Optimizes the threshold of prediction based on probabilities.
#' Uses \code{\link[BBmisc]{optimizeSubInts}} for 2class problems and \code{\link[cmaes]{cma_es}}
#' for multiclass problems.
#'
#' @template arg_pred
#' @param measure [\code{\link{Measure}}]\cr
#' Performance measure to optimize.
#' @param task [\code{\link{Task}}]\cr
#' Learning task. Rarely neeeded,
#' only when required for the performance measure.
#' @param model [\code{\link{WrappedModel}}]\cr
#' Fitted model. Rarely neeeded,
#' only when required for the performance measure.
#' @param nsub [\code{integer(1)}]\cr
#' Passed to \code{\link[BBmisc]{optimizeSubInts}} for 2class problems.
#' Default is 20.
#' @param control [\code{list}]\cr
#' Control object for \code{\link[cmaes]{cma_es}} when used.
#' Default is empty list.
#' @return [\code{list}]. A named list with with the following components:
#' \code{th} is the optimal threshold, \code{perf} the performance value.
#' @family tune
#' @export
tuneThreshold = function(pred, measure, task, model, nsub = 20L, control = list()) {
assertClass(pred, classes = "Prediction")
assertClass(measure, classes = "Measure")
if (!missing(task))
assertClass(task, classes = "SupervisedTask")
if (!missing(model))
assertClass(model, classes = "WrappedModel")
assertList(control)
td = pred$task.desc
if (missing(measure))
measure = default.measures(td)[[1]]
probs = getProbabilities(pred)
# brutally return NA if we find any NA in the predicted probs...
if (anyMissing(probs)) {
return(list(th = NA, pred = pred, th.seq = numeric(0), perf = numeric(0)))
}
cls = pred$task.desc$class.levels
k = length(cls)
fitn = function(x) {
if (k > 2)
names(x) = cls
performance(setThreshold(pred, x), measure, task, model)
}
if (k == 2) {
or = optimizeSubInts(f = fitn, lower = 0, upper = 1, maximum = !measure$minimize, nsub = nsub)
th = or[[1]]
perf = or$objective
} else {
requirePackages("cmaes", why = "tuneThreshold", default.method = "load")
start = rep(0.5, k)
or = cmaes::cma_es(par = start, fn = fitn, lower = 0, upper = 1, control = control)
th = or$par / sum(or$par)
names(th) = cls
perf = or$val
}
return(list(th = th, perf = perf))
}
|
f52d2c1dd30cc668983010018b6ce4a0790bf72c
|
0e2ce546312ee44206bc05d0863b30503a5dbaa6
|
/FindIncompleteRowsHW4P10.R
|
5f824eb8e49da488a42e632ef1b151a91a6385e5
|
[] |
no_license
|
BethCH/es207hw7
|
4ffc5c052ed8c6a8452781c6a08c5c87f96edfe5
|
a6057d2e17bd500228a5f5c81a58960a7e6f9a2d
|
refs/heads/master
| 2020-04-28T19:33:50.002370
| 2019-03-20T00:12:09
| 2019-03-20T00:12:09
| 175,515,333
| 1
| 1
| null | 2019-03-20T18:31:58
| 2019-03-13T23:40:05
|
HTML
|
UTF-8
|
R
| false
| false
| 623
|
r
|
FindIncompleteRowsHW4P10.R
|
# Elizabeth Clifton Holcomb
# March 13, 2019
# ES 207
# HW9 P10
# Find addresses that are incomplete
# Find street address rows that don't start with a number
# Find zip codes that have less than 5 rows
LocAddress <- length(loc$Address) - sum(str_detect(loc$Address, "^\\d") & str_detect(loc$`Zip Code`,"\\d{5}"), na.rm = TRUE)
# define LocAddress to take the Address variable in the loc (assigned above) and remove any address that doesn't start with a number (looking for ones without house numbers) and those that don't have 5 digits in the Zip Code variable (incomplete zip codes)
LocAddress
# show me what results
|
63d52c351e27c6e2aa58dc27062fa4196c85369d
|
8baa46a398bd5860333ced1e245535fbe2d1762c
|
/man/mode_cell.rd
|
ce85be44b8b9aec62328439715990a800b19ecd8
|
[] |
no_license
|
cran/etable
|
e5942a73d0f470ee23879f3dbd5317df85f9b79a
|
2edff06431e86cc6f325be6d01198dde1ded708a
|
refs/heads/master
| 2021-06-10T06:21:43.922726
| 2021-05-22T15:50:02
| 2021-05-22T15:50:02
| 17,695,860
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,275
|
rd
|
mode_cell.rd
|
\name{mode_cell}
\alias{mode_cell}
\title{
Mode cell function
}
\description{
Shows the most frequent value (mode)
}
\usage{
mode_cell(x, y, z, w, cell_ids, row_ids, col_ids, vnames, vars, n_min,
digits=3)
}
\arguments{
\item{x}{
The x variable
}
\item{y}{
NOT USED
}
\item{z}{
NOT USED
}
\item{w}{
Weights for x variable. Only if calculating weighted mode.
}
\item{cell_ids}{
Index vector for selecting values in cell.
}
\item{row_ids}{
Index vector for selecting values in row.
}
\item{col_ids}{
Index vector for selecting values in col.
}
\item{vnames}{
NOT USED
}
\item{vars}{
NOT USED
}
\item{n_min}{
NOT USED
}
\item{digits}{
Integer indicating the number of significant digits.
}
}
\author{
Andreas Schulz <ades-s@web.de>
}
\examples{
sex <- factor(rbinom(1000, 1, 0.4), labels=c('Men', 'Women'))
note <- as.factor(rbinom(1000, 4, 0.5)+1)
decades <- rbinom(1000, 3, 0.5)
decades <- factor(decades, labels=c('[35,45)','[45,55)','[55,65)','[65,75)'))
d<-data.frame(sex, decades, note)
tabular.ade(x_vars=c('note'), xname=c('Noten'),
rows=c('sex','ALL','decades'), rnames=c('Gender', 'Age decades'),
data=d, FUN=mode_cell)
}
\keyword{ mode }
\keyword{ frequency }
|
d4a1afabc5c1d507df15305eb888ab5cc4a5a6e3
|
7e4251b7631ab116a179ae72251425f798fbf8b5
|
/scripts/3_visualize/Explore_seasonality.R
|
ca093989e9cb9c2524603da1f36d7aeb2f39da85
|
[] |
no_license
|
srcorsi-USGS/MMSDOptical
|
d9ec095dba8d65d46a15af3a8ea9ea92b7f50cd8
|
447456369a2018c105d19b2747a78bf8c9d5f489
|
refs/heads/master
| 2021-05-23T05:21:33.414676
| 2019-05-22T22:15:17
| 2019-05-22T22:15:17
| 95,141,444
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,248
|
r
|
Explore_seasonality.R
|
#Explore seasonality of optical signals and bacteria
library(smwrBase)
library(dataRetrieval)
#set data directories
raw.path <- "raw_data"
cached.path <- "cached_data"
summary.path <- "SummaryVariables"
summary.save <- "1_SummaryVariables"
cached.save <- "0_munge"
df <- readRDS(file.path(cached.path,summary.save,"dfOptP3P4Combined.rds"))
response <- "lachno2"
which(substr(names(df),1,1)=="A")
AbsVars <- names(df)[c(61:138,232:240)]
FlVars <- names(df)[c(17:60,139:231)]
IVs <- c(AbsVars,FlVars)
month <- as.POSIXlt(df$psdate)$mon + 1
jday <- as.POSIXlt(df$psdate)$yday + 1
boxplot(df$A~month)
boxplot(df$T~month)
boxplot(df$lachno2~month,log="y")
bp <- boxplot((df$lachno2+df$bacHum)~month,log="y")
mtext("n = ", line = 0.1, side = 3,adj=0, cex=1)
mtext(paste(bp$n, sep = ""), at = seq_along(bp$n), line = 0.1, side = 3,cex=1)
plot(log10(df$lachno2+df$bacHum)~jday)
lines(lowess(log10(df$lachno2+df$bacHum)~jday,f = 0.4))
fourier(df$psdate)
jdate <- as.POSIXlt(df$psdate)$yday
jsecs <- as.numeric(df$psdate) -
as.numeric(as.POSIXct(paste0(as.POSIXlt(df$psdate)$year+1900,"-01-01 00:00"),tz='Etc/GMT-6'))
dfQ <- readNWISuv(siteNumbers = "04087120",parameterCd = "00060",startDate = "2014-03-01",endDate = "2014-03-30")
|
41c77d9278a1fff3206806a06421e00b32c6bbb0
|
c55d7812cfc02b401397f3ef4acbb4269ef18284
|
/10x-pilot_region-specific_Amyg_step04_correl-mm-MeA.R
|
b4800e0af1e35178b27d79feb1f437993f298775
|
[] |
no_license
|
BertoLabMUSC/10xPilot_snRNAseq-human
|
af8836a0d1019260c691dafef692a24885c7f93b
|
4da6e15b7d9a4507c0997e375d6984bbaecb8995
|
refs/heads/master
| 2022-12-28T03:19:08.700352
| 2020-10-08T22:38:26
| 2020-10-08T22:38:26
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 39,708
|
r
|
10x-pilot_region-specific_Amyg_step04_correl-mm-MeA.R
|
### MNT 10x snRNA-seq workflow: step 04 - downstream comparisons
### **Region-specific analyses**
### - (3x) NAc samples from: Br5161 & Br5212 & Br5287
### * Comparison to UCLA's Drop-seq on mouse medial amyg (MeA)
#####################################################################
library(SingleCellExperiment)
library(EnsDb.Hsapiens.v86)
library(org.Hs.eg.db)
library(scater)
library(scran)
library(batchelor)
library(DropletUtils)
library(jaffelab)
library(limma)
library(lattice)
library(RColorBrewer)
library(pheatmap)
### Palette taken from `scater`
tableau10medium = c("#729ECE", "#FF9E4A", "#67BF5C", "#ED665D",
"#AD8BC9", "#A8786E", "#ED97CA", "#A2A2A2",
"#CDCC5D", "#6DCCDA")
tableau20 = c("#1F77B4", "#AEC7E8", "#FF7F0E", "#FFBB78", "#2CA02C",
"#98DF8A", "#D62728", "#FF9896", "#9467BD", "#C5B0D5",
"#8C564B", "#C49C94", "#E377C2", "#F7B6D2", "#7F7F7F",
"#C7C7C7", "#BCBD22", "#DBDB8D", "#17BECF", "#9EDAE5")
# ===
### Pseudobulk>modeling approach ============================================
# * Skip this -> Now using sn-level stats for this comparison
## load modeling outputs
# 10x-pilot human Amyg
load("/dcl01/lieber/ajaffe/Matt/MNT_thesis/snRNAseq/10x_pilot_FINAL/rdas/markers-stats_Amyg-n2_manualContrasts_MNTMar2020.rda", verbose=T)
# eb_contrasts.amy.broad, eb_list.amy.broad, sce.amy.PB
# UCLA mouse MeA Drop-seq
load("/dcl01/ajaffe/data/lab/singleCell/ucla_mouse-MeA/markers-stats_mouse-MeA-Drop-seq_manualContrasts_MNTApr2020.rda", verbose=T)
# eb_list.amy.mm, corfit.amy.mm, sce.amy.mm.PB
# Add EntrezID for human
hs.entrezIds <- mapIds(org.Hs.eg.db, keys=rowData(sce.amy.PB)$ID,
column="ENTREZID", keytype="ENSEMBL")
# "'select()' returned 1:many mapping between keys and columns"
table(!is.na(hs.entrezIds))
# 20,578 valid entries (remember this is already subsetted for those non-zero genes only)
# Add to rowData
rowData(sce.amy.PB) <- cbind(rowData(sce.amy.PB), hs.entrezIds)
## Bring in 'HomoloGene.ID' for human (already in rowData for mm SCE) ===
## JAX annotation info
hom = read.delim("http://www.informatics.jax.org/downloads/reports/HOM_AllOrganism.rpt",
as.is=TRUE)
hom_hs <- hom[hom$Common.Organism.Name == "human", ]
# of 19,124 entries
table(rowData(sce.amy.PB)$hs.entrezIds %in% hom_hs$EntrezGene.ID)
# 17,261
table(rowData(sce.amy.PB)$Symbol %in% hom_hs$Symbol)
# 16,916 - not a bad difference
# So for mapping === == === ===
# human.entrez > HomoloGene.ID < mm.Symbol
# ^ filter SCE's on this
# Human (by Entrez)
rowData(sce.amy.PB)$HomoloGene.ID <- hom_hs$HomoloGene.ID[match(rowData(sce.amy.PB)$hs.entrezIds,
hom_hs$EntrezGene.ID)]
## Now set/match to shared homologous genes ===
length(intersect(rowData(sce.amy.PB)$HomoloGene.ID,
rowData(sce.amy.mm.PB)$HomoloGene.ID)) # 13,444
sharedHomologs <- intersect(rowData(sce.amy.PB)$HomoloGene.ID,
rowData(sce.amy.mm.PB)$HomoloGene.ID)
# # That first one is NA - get rid of it
# sharedHomologs <- sharedHomologs[-1]
# Human not in mm
length(setdiff(rowData(sce.amy.PB)$HomoloGene.ID,
rowData(sce.amy.mm.PB)$HomoloGene.ID)) # 3657
# mm not in human
length(setdiff(rowData(sce.amy.mm.PB)$HomoloGene.ID,
rowData(sce.amy.PB)$HomoloGene.ID)) # 928
# Subset for those
sce.mm.PBsub <- sce.amy.mm.PB[rowData(sce.amy.mm.PB)$HomoloGene.ID %in% sharedHomologs, ] # 14247
sce.hsap.PBsub <- sce.amy.PB[rowData(sce.amy.PB)$HomoloGene.ID %in% sharedHomologs, ] # 14178
## Many are duplicated...
rowData(sce.mm.PBsub)$Symbol[duplicated(rowData(sce.mm.PBsub)$HomoloGene.ID)]
# shoot many genes are orthologs
rowData(sce.hsap.PBsub)$Symbol[duplicated(rowData(sce.hsap.PBsub)$HomoloGene.ID)]
# same here, slightly less
### -> Take the higher-expressing of the duplicated - just mean across PB clusters:
## mm ===
duplicatedSet.mm <- which(duplicated(rowData(sce.mm.PBsub)$HomoloGene.ID))
genes2compare.mm <- list()
gene2keep.mm <- character()
for(g in 1:length(duplicatedSet.mm)){
genes2compare.mm[[g]] <- rownames(sce.mm.PBsub)[rowData(sce.mm.PBsub)$HomoloGene.ID ==
rowData(sce.mm.PBsub)$HomoloGene.ID[duplicatedSet.mm[g]]]
rowmeansmat <- rowMeans(assay(sce.mm.PBsub[genes2compare.mm[[g]], ], "logcounts"))
gene2keep.mm[g] <- names(rowmeansmat[order(rowmeansmat, decreasing=TRUE)])[1]
}
# Now pull out those that not being compared, so can `c()`
table(rownames(sce.mm.PBsub) %in% unlist(genes2compare.mm)) # 133 - why isn't this ==
sum(lengths(genes2compare.mm)) # 328 ????
length(unique(unlist(genes2compare.mm))) # 133 - oh. also `length(unique(gene2keep.mm)) == 52`
genesNoCompare.mm <- rownames(sce.mm.PBsub)[!(rownames(sce.mm.PBsub) %in% unlist(genes2compare.mm))]
# Finally combine and subset
sce.mm.PBsub <- sce.mm.PBsub[c(genesNoCompare.mm, unique(gene2keep.mm)), ]
table(rowData(sce.mm.PBsub)$HomoloGene.ID %in% sharedHomologs) # 13444 TRUE
table(duplicated(rowData(sce.mm.PBsub)$HomoloGene.ID)) # 13444 FALSE dope.
## Human ===
# First change rownames to EnsemblID
rowData(sce.hsap.PBsub)$Symbol.unique <- rownames(sce.hsap.PBsub)
rownames(sce.hsap.PBsub) <- rowData(sce.hsap.PBsub)$ID
duplicatedSet.hsap <- which(duplicated(rowData(sce.hsap.PBsub)$HomoloGene.ID))
genes2compare.hsap <- list()
gene2keep.hsap <- character()
for(g in 1:length(duplicatedSet.hsap)){
genes2compare.hsap[[g]] <- rownames(sce.hsap.PBsub)[rowData(sce.hsap.PBsub)$HomoloGene.ID ==
rowData(sce.hsap.PBsub)$HomoloGene.ID[duplicatedSet.hsap[g]]]
rowmeansmat <- rowMeans(assay(sce.hsap.PBsub[genes2compare.hsap[[g]], ], "logcounts"))
gene2keep.hsap[g] <- names(rowmeansmat[order(rowmeansmat, decreasing=TRUE)])[1]
}
# Now pull out those that not being compared, so can `c()`
table(rownames(sce.hsap.PBsub) %in% unlist(genes2compare.hsap)) # 109 - why isn't this ==
sum(lengths(genes2compare.hsap)) # 136 ????
length(unique(unlist(genes2compare.hsap))) # 109 - oh. also `length(unique(gene2keep.hsap)) == 52`
genesNoCompare.hsap <- rownames(sce.hsap.PBsub)[!(rownames(sce.hsap.PBsub) %in% unlist(genes2compare.hsap))]
# of length 13392 (which + 52 == 13444)
# Finally combine and subset
sce.hsap.PBsub <- sce.hsap.PBsub[c(genesNoCompare.hsap, unique(gene2keep.hsap)), ]
table(rowData(sce.hsap.PBsub)$HomoloGene.ID %in% sharedHomologs) # 13444 TRUE
table(duplicated(rowData(sce.hsap.PBsub)$HomoloGene.ID)) # 13444 FALSE dope.
## Match order and save
sce.mm.PBsub <- sce.mm.PBsub[match(rowData(sce.hsap.PBsub)$HomoloGene.ID,
rowData(sce.mm.PBsub)$HomoloGene.ID), ]
table(rowData(sce.mm.PBsub)$HomoloGene.ID == rowData(sce.hsap.PBsub)$HomoloGene.ID)
# all TRUE - good
pheatmap(cor(assay(sce.mm.PBsub, "logcounts"), assay(sce.hsap.PBsub, "logcounts")), fontsize=5)
# (ah but this is at the sample:cluster level)
Readme <- "These two SCEs are subsetted and ordered for matching HomoloGene.ID in the rowData. This can be used to subset the nucleus-level SCEs in their respective Rdata files."
save(sce.mm.PBsub, sce.hsap.PBsub, Readme, file="/dcl01/ajaffe/data/lab/singleCell/ucla_mouse-MeA/SCE_mm-MeA-PBd_w_matchingHsap-Amyg-PBd_HomoloGene.IDs_MNT.rda")
### FINALLY resume comparisons === === === === ===
## mm stats
pvals_mm <- sapply(eb_list.amy.mm, function(x) {
x$p.value[, 2, drop = FALSE]
})
rownames(pvals_mm) = rownames(sce.amy.mm.PB)
ts_mm <- sapply(eb_list.amy.mm, function(x) {
x$t[, 2, drop = FALSE]
})
rownames(ts_mm) = rownames(sce.amy.mm.PB)
## Human stats
pvals_hsap <- sapply(eb_list.amy.broad, function(x) {
x$p.value[, 2, drop = FALSE]
})
rownames(pvals_hsap) = rowData(sce.amy.PB)$ID
ts_hsap <- sapply(eb_list.amy.broad, function(x) {
x$t[, 2, drop = FALSE]
})
rownames(ts_hsap) = rowData(sce.amy.PB)$ID
### Subset and check matching 'HomoloGene.ID' === === === ===
pvals_mm <- pvals_mm[rownames(sce.mm.PBsub), ]
ts_mm <- ts_mm[rownames(sce.mm.PBsub), ]
pvals_hsap <- pvals_hsap[rowData(sce.hsap.PBsub)$ID, ]
ts_hsap <- ts_hsap[rowData(sce.hsap.PBsub)$ID, ]
rownames(ts_mm) <- rowData(sce.mm.PBsub)$HomoloGene.ID
rownames(pvals_mm) <- rowData(sce.mm.PBsub)$HomoloGene.ID
rownames(ts_hsap) <- rowData(sce.hsap.PBsub)$HomoloGene.ID
rownames(pvals_hsap) <- rowData(sce.hsap.PBsub)$HomoloGene.ID
table(rownames(ts_mm) == rownames(ts_hsap))
## all 14121 TRUE - good
## Now run correlation
cor_t = cor(ts_mm, ts_hsap)
signif(cor_t, 2)
## On just hsap cluster-specific homologous genes ===
hsap_specific_indices = mapply(function(t, p) {
oo = order(t, decreasing = TRUE)[1:100]
},
as.data.frame(ts_hsap),
as.data.frame(pvals_hsap)
)
hsap_ind = unique(as.numeric(hsap_specific_indices))
cor_t_hsap = cor(ts_mm[hsap_ind, ],
ts_hsap[hsap_ind, ])
signif(cor_t_hsap, 3)
## On just mouse cluster-specific homologous genes ===
mm_specific_indices = mapply(function(t, p) {
oo = order(t, decreasing = TRUE)[1:100]
},
as.data.frame(ts_mm),
as.data.frame(pvals_mm)
)
mm_ind = unique(as.numeric(mm_specific_indices))
cor_t_mm = cor(ts_mm[mm_ind, ],
ts_hsap[mm_ind, ])
signif(cor_t_mm, 3)
### Heatmap
theSeq.all = seq(-.5, .5, by = 0.01)
my.col.all <- colorRampPalette(brewer.pal(7, "PRGn"))(length(theSeq.all))
ct = colData(sce.hsap.PBsub)
ct = ct[!duplicated(sce.hsap.PBsub$cellType.final), ]
cor_t_hsap_toPlot = cor_t_hsap
rownames(cor_t_hsap_toPlot) = paste0(rownames(cor_t_hsap_toPlot),"_","M.mus")
colnames(cor_t_hsap_toPlot) = paste0(colnames(cor_t_hsap_toPlot),"_","H.sap")
cor_t_mm_toPlot = cor_t_mm
rownames(cor_t_mm_toPlot) = paste0(rownames(cor_t_mm_toPlot),"_","M.mus")
colnames(cor_t_mm_toPlot) = paste0(colnames(cor_t_mm_toPlot),"_","H.sap")
cor_t_all_toPlot = cor_t
rownames(cor_t_all_toPlot) = paste0(rownames(cor_t_all_toPlot),"_","M.mus")
colnames(cor_t_all_toPlot) = paste0(colnames(cor_t_all_toPlot),"_","H.sap")
## MNT added 14Apr2020: Reorder to diagonal & threshold at 0.4 for all-gene correlation === === ===
# Start from descending - easier to manually order
#cor_t_all_toPlot <- cor_t_all_toPlot[ ,rev(1:ncol(cor_t_all_toPlot))]
# This is useful:
apply(cor_t_all_toPlot, 2, which.max)
# If want to re-order human labels (but prefer re-ordering mm labels)
#cor_t_all_toPlot <- cor_t_all_toPlot[ ,rev(c(14,5,3,4, 7,10,12,6, 9,8,2,1, 11,13))]
cor_t_all_toPlot <- cor_t_all_toPlot[c(14,11:13,4,3, 2,8,7,9, 6,15,5,16,1,10), ]
# Threshold at 0.4
range(cor_t_all_toPlot)
cor_t_all_toPlot <- ifelse(cor_t_all_toPlot >= 0.4, 0.4, cor_t_all_toPlot)
## Do for other gene subsets ===
# Human
#cor_t_hsap_toPlot <- cor_t_hsap_toPlot[ ,rev(1:ncol(cor_t_hsap_toPlot))]
#cor_t_hsap_toPlot <- cor_t_hsap_toPlot[ ,rev(c(14,5,3,4, 7,10,12,6, 9,8,2,1, 11,13))]
cor_t_hsap_toPlot <- cor_t_hsap_toPlot[c(14,11:13,4,3, 2,8,7,9, 6,15,5,16,1,10), ]
# Threshold at 0.4
range(cor_t_hsap_toPlot)
cor_t_hsap_toPlot <- ifelse(cor_t_hsap_toPlot >= 0.4, 0.4, cor_t_hsap_toPlot)
# mm
#cor_t_mm_toPlot <- cor_t_mm_toPlot[ ,rev(1:ncol(cor_t_mm_toPlot))]
#cor_t_mm_toPlot <- cor_t_mm_toPlot[ ,rev(c(14,5,3,4, 7,10,12,6, 9,8,2,1, 11,13))]
cor_t_mm_toPlot <- cor_t_mm_toPlot[c(14,11:13,4,3, 2,8,7,9, 6,15,5,16,1,10), ]
# Threshold at 0.4
range(cor_t_mm_toPlot)
cor_t_mm_toPlot <- ifelse(cor_t_mm_toPlot >= 0.4, 0.4, cor_t_mm_toPlot)
pdf("/dcl01/lieber/ajaffe/Matt/MNT_thesis/snRNAseq/10x_pilot_FINAL/pdfs/exploration/overlap-mouse-MeA_with_LIBD-10x-Amyg_top100-or-all_Apr2020.pdf")
# Most human-specific
print(
levelplot(
cor_t_hsap_toPlot,
aspect = "fill",
at = theSeq.all,
col.regions = my.col.all,
ylab = "",
xlab = "",
scales = list(x = list(rot = 90, cex = 1), y = list(cex = 1)),
main="Correlation of cluster-specific t's \n (top 100 genes/human (LIBD) clusters)"
)
)
# Most mm-specific
print(
levelplot(
cor_t_mm_toPlot,
aspect = "fill",
at = theSeq.all,
col.regions = my.col.all,
ylab = "",
xlab = "",
scales = list(x = list(rot = 90, cex = 1), y = list(cex = 1)),
main="Correlation of cluster-specific t's \n (top 100 genes/mouse MeA (UCLA) clusters)"
)
)
# All
print(
levelplot(
cor_t_all_toPlot,
aspect = "fill",
at = theSeq.all,
col.regions = my.col.all,
ylab = "",
xlab = "",
scales = list(x = list(rot = 90, cex = 1), y = list(cex = 1)),
main="Correlation of cluster-specific t's \n (all shared 13,444 homologs)",
fontsize = 20
)
)
dev.off()
### Comparison to UCLA mouse MeA with SN-LEVEL stats ==================================
# Added MNT 14May2020 - UPDATED 22May2020 to compare to 2019 dataset
# (previously only 2017 neuronal subclusters), now with neuronal subcluster info
# Load mouse stats
# load("/dcl01/ajaffe/data/lab/singleCell/ucla_mouse-MeA/2017Neuron/markers-stats_mouseMeA-2017-neuSubs_findMarkers-SN-LEVEL_MNTMay2020.rda", verbose=T)
# # markers.mmMeAneu.t.1vAll
load("/dcl01/ajaffe/data/lab/singleCell/ucla_mouse-MeA/2019Cell/markers-stats_mouseMeA-2019-with-16neuSubs_findMarkers-SN-LEVEL_MNTMay2020.rda", verbose=T)
# markers.mmMeA.t.1vAll
# Load mouse SCE
# load("/dcl01/ajaffe/data/lab/singleCell/ucla_mouse-MeA/2017Neuron/SCE_mouse-MeA-2017_neuronalSubclusters_HVGs_MNT.rda", verbose=T)
# # sce.amy.mm17hvgs
load("/dcl01/ajaffe/data/lab/singleCell/ucla_mouse-MeA/2019Cell/SCE_mouse-MeA_downstream-processing_MNT.rda", verbose=T)
# sce.amy.mm, chosen.hvgs.amy.mm
## Calculate and add t-statistic (= std.logFC * sqrt(N)) for mouse clusters
# and fix row order to the first entry "Astrocyte"
fixTo <- rownames(markers.mmMeA.t.1vAll[[1]])
for(x in names(markers.mmMeA.t.1vAll)){
markers.mmMeA.t.1vAll[[x]]$t.stat <- markers.mmMeA.t.1vAll[[x]]$std.logFC * sqrt(ncol(sce.amy.mm))
markers.mmMeA.t.1vAll[[x]] <- markers.mmMeA.t.1vAll[[x]][fixTo, ]
}
# Pull out the t's
ts.mmMeA <- sapply(markers.mmMeA.t.1vAll, function(x){x$t.stat})
rownames(ts.mmMeA) <- fixTo
## Human t stats subset/re-ordering ===
# Bring in human stats; create t's
load("/dcl01/lieber/ajaffe/Matt/MNT_thesis/snRNAseq/10x_pilot_FINAL/rdas/markers-stats_Amyg-n2_findMarkers-SN-LEVEL_MNTMay2020.rda", verbose=T)
# markers.amy.t.1vAll, markers.amy.t.design, markers.amy.wilcox.block
rm(markers.amy.t.design, markers.amy.wilcox.block)
# Need to add t's with N nuclei used in constrasts
load("/dcl01/lieber/ajaffe/Matt/MNT_thesis/snRNAseq/10x_pilot_FINAL/rdas/regionSpecific_Amyg-n2_cleaned-combined_SCE_MNTFeb2020.rda", verbose=T)
#sce.amy, chosen.hvgs.amy, pc.choice.amy, clusterRefTab.amy, ref.sampleInfo
rm(chosen.hvgs.amy, pc.choice.amy, clusterRefTab.amy,ref.sampleInfo)
# First drop "Ambig.lowNtrxts" (50 nuclei)
sce.amy <- sce.amy[ ,sce.amy$cellType.split != "Ambig.lowNtrxts"]
sce.amy$cellType.split <- droplevels(sce.amy$cellType.split)
## As above, calculate and add t-statistic (= std.logFC * sqrt(N)) from contrasts
# and fix row order to the first entry "Astro"
fixTo <- rownames(markers.amy.t.1vAll[["Astro"]])
for(s in names(markers.amy.t.1vAll)){
markers.amy.t.1vAll[[s]]$t.stat <- markers.amy.t.1vAll[[s]]$std.logFC * sqrt(ncol(sce.amy))
markers.amy.t.1vAll[[s]] <- markers.amy.t.1vAll[[s]][fixTo, ]
}
# Pull out the t's
ts.amy <- sapply(markers.amy.t.1vAll, function(x){x$t.stat})
rownames(ts.amy) <- fixTo
## Bring in HomoloGene.ID info to subset/match order
# load("/dcl01/ajaffe/data/lab/singleCell/ucla_mouse-MeA/2019Cell/SCE_mm-MeA-PBd_w_matchingHsap-Amyg-PBd_HomoloGene.IDs_MNT.rda",
# verbose=T)
# # sce.mm.PBsub, sce.hsap.PBsub, Readme
#
# table(rowData(sce.mm.PBsub)$HomoloGene.ID == rowData(sce.hsap.PBsub)$HomoloGene.ID) # all TRUE - dope
# # (see above - these are the intersecting homologs)
#
# ## However!
# table(rownames(ts.mmMeA) %in% rownames(sce.mm.PBsub)) # not all - so will need to get union
# rm(sce.mm.PBsub, sce.hsap.PBsub, Readme)
## HomoloGene.ID for all human genes ====
hom = read.delim("http://www.informatics.jax.org/downloads/reports/HOM_AllOrganism.rpt",
as.is=TRUE)
hom_hs <- hom[hom$Common.Organism.Name == "human", ]
# of 19,124 entries
# First Add EntrezID for human
hs.entrezIds <- mapIds(org.Hs.eg.db, keys=rowData(sce.amy)$ID,
column="ENTREZID", keytype="ENSEMBL")
# "'select()' returned 1:many mapping between keys and columns"
table(!is.na(hs.entrezIds))
# 22,818 valid entries (remember this is already subsetted for those non-zero genes only)
# Add to rowData
rowData(sce.amy) <- cbind(rowData(sce.amy), hs.entrezIds)
# Now how many in JAX db?
table(rowData(sce.amy)$hs.entrezIds %in% hom_hs$EntrezGene.ID)
# 18,865
table(rowData(sce.amy)$Symbol %in% hom_hs$Symbol)
# 18,472 - not a bad difference
# So for mapping === === ===
# human.entrez > HomoloGene.ID < mm.Symbol
# ^ filter SCE's on this
# Human (by Entrez)
rowData(sce.amy)$HomoloGene.ID <- hom_hs$HomoloGene.ID[match(rowData(sce.amy)$hs.entrezIds,
hom_hs$EntrezGene.ID)]
# end chunk ====
# Intersection?
table(rowData(sce.amy.mm)$HomoloGene.ID %in% rowData(sce.amy)$HomoloGene.ID)
# FALSE TRUE
# 665 13845
# First give [human] ts.amy rownames their respective EnsemblID
# (have to use the full sce bc rownames(sce.hsap.PBsub) is EnsemblID and we uniquified the $Symbol)
rownames(ts.amy) <- rowData(sce.amy)$ID[match(rownames(ts.amy), rownames(sce.amy))]
# Then to HomoloGene.ID
rownames(ts.amy) <- rowData(sce.amy)$HomoloGene.ID[match(rownames(ts.amy), rowData(sce.amy)$ID)]
# Btw half are NA
table(is.na(rownames(ts.amy)))
# FALSE TRUE
# 17261 11203
# So subset for those with HomoloGene.ID
ts.amy <- ts.amy[!is.na(rownames(ts.amy)), ]
# Mouse - can just go to HomoloGene.ID
rownames(ts.mmMeA) <- rowData(sce.amy.mm)$HomoloGene.ID[match(rownames(ts.mmMeA), rownames(sce.amy.mm))]
# Intersecting?
table(rownames(ts.mmMeA) %in% rownames(ts.amy))
# FALSE TRUE
# 985 13525 - so we'll be running correlation across these genes
# Subset and match order
ts.mmMeA <- ts.mmMeA[rownames(ts.mmMeA) %in% rownames(ts.amy), ]
ts.amy <- ts.amy[rownames(ts.mmMeA), ]
cor_t_amy <- cor(ts.amy, ts.mmMeA)
rownames(cor_t_amy) = paste0(rownames(cor_t_amy),"_","H")
colnames(cor_t_amy) = paste0(colnames(cor_t_amy),"_","M")
range(cor_t_amy)
#[1] -0.2203968 0.5023080 (previously {-0.2557751, 0.2577207} on only 2017 neuronal subsets)
### Heatmap - typically use levelplot (e.g. below), but will want pheatmap bc can cluster cols/rows
theSeq.all = seq(-.6, .6, by = 0.01)
my.col.all <- colorRampPalette(brewer.pal(7, "BrBG"))(length(theSeq.all)-1)
# Re-order mouse labels - move EN/MG/MU to after neuronal subclusters
cor_t_amy <- cor_t_amy[ ,c(1, 5,13:20,6:12, 3,2,4, 21,23,22)]
# #pdf("/dcl01/lieber/ajaffe/Matt/MNT_thesis/snRNAseq/10x_pilot_FINAL/pdfs/exploration/HongLab-UCLA_mmMeA/overlap-mouseMeA-2017-neuSubs_with_LIBD-10x-AMY_SN-LEVEL-stats_May2020.pdf")
# pdf("/dcl01/lieber/ajaffe/Matt/MNT_thesis/snRNAseq/10x_pilot_FINAL/pdfs/exploration/HongLab-UCLA_mmMeA/overlap-mouseMeA-2019-fullSubclusters_with_LIBD-10x-AMY_SN-LEVEL-stats_May2020.pdf")
# pheatmap(cor_t_amy,
# color=my.col.all,
# cluster_cols=F, cluster_rows=F,
# breaks=theSeq.all,
# fontsize=11, fontsize_row=15, fontsize_col=12,
# #main="Correlation of cluster-specific t's for mouse MeA neuronal subclusters \n (Wu et al., Neuron 2017)")
# main="Correlation of cluster-specific t's to mouse MeA \n complete subclusters (Chen-Hu-Wu et al., Cell 2019)")
# # Version with mouse glial cell types 'missing' in LIBD data dropped:
# pheatmap(cor_t_amy_sub,
# color=my.col.all,
# cluster_cols=F, cluster_rows=F,
# breaks=theSeq.all,
# fontsize=11.5, fontsize_row=15, fontsize_col=13.5,
# main="Correlation of cluster-specific t's to mouse MeA \n complete subclusters (Chen-Hu-Wu et al., Cell 2019)")
#
# dev.off()
## Version with mouse glial cell types 'missing' in LIBD data dropped:
cor_t_amy_sub <- cor_t_amy[ ,-which(colnames(cor_t_amy) %in% c("EN_M", "MU_M", "OPC.OL_M"))]
## Actually just print as second page to the above - will probably get some suggested edits
## Iteration with top N spp:subcluster-specific genes: ========
# Added MNT 25May2020
# -> Basically just run through line 488, under ("Subset and match order")
# Save the ts matrices to reduce work next time
Readme <- "These t-statistic matrices are subsetted and matched for shared 'HomoloGene.ID', so `cor()` can simply be run or other gene subsets applied first."
save(ts.amy, ts.mmMeA, Readme, file="rdas/zTsMats_libd-AMY_and_ucla-mouseMeA-2019Cell_sharedGenes_25May2020.rda")
# # Have to remove the markers objects bc the rows have been fixed (actually don't need to lol)
# rm(markers.amy.t.1vAll, markers.mmMeA.t.1vAll)
#
# # Re-load them
# load("/dcl01/lieber/ajaffe/Matt/MNT_thesis/snRNAseq/10x_pilot_FINAL/rdas/markers-stats_Amyg-n2_findMarkers-SN-LEVEL_MNTMay2020.rda", verbose=T)
# # markers.amy.t.1vAll, markers.amy.t.design, markers.amy.wilcox.block
# rm(markers.amy.t.design, markers.amy.wilcox.block)
#
# load("/dcl01/ajaffe/data/lab/singleCell/ucla_mouse-MeA/2019Cell/markers-stats_mouseMeA-2019-with-16neuSubs_findMarkers-SN-LEVEL_MNTMay2020.rda", verbose=T)
# # markers.mmMeA.t.1vAll
#
## On just hsap cluster-specific homologous genes ===
hsap_specific_indices = mapply(function(t) {
oo = order(t, decreasing = TRUE)[1:100]
},
as.data.frame(ts.amy)
)
hsap_ind = unique(as.numeric(hsap_specific_indices))
length(hsap_ind) # so of 1200 (100 x 12 cellType.split), 919 unique
cor_t_hsap = cor(ts.amy[hsap_ind, ],
ts.mmMeA[hsap_ind, ])
range(cor_t_hsap)
#[1] -0.2738376 0.6612352
## On just mouse cluster-specific homologous genes ===
mouse_specific_indices = mapply(function(t) {
oo = order(t, decreasing = TRUE)[1:100]
},
as.data.frame(ts.mmMeA)
)
mouse_ind = unique(as.numeric(mouse_specific_indices))
length(mouse_ind) # so of 2300 (100 x 23 subCluster), 1543 unique
cor_t_mouse = cor(ts.amy[mouse_ind, ],
ts.mmMeA[mouse_ind, ])
range(cor_t_mouse)
# [1] -0.2731605 0.6113445
## UPDATED heatmap:
theSeq.all = seq(-.65, .65, by = 0.01)
my.col.all <- colorRampPalette(brewer.pal(7, "BrBG"))(length(theSeq.all)-1)
# Re-order mouse labels - move EN/MG/MU to after neuronal subclusters
cor_t_hsap <- cor_t_hsap[ ,c(1, 5,13:20,6:12, 3,2,4, 21,23,22)]
# Then treshold this one to 0.65 max (max is 0.6612)
cor_t_hsap <- ifelse(cor_t_hsap >= 0.65, 0.65, cor_t_hsap)
# (and)
cor_t_amy_sub <- cor_t_amy[ ,-which(colnames(cor_t_amy) %in% c("EN_M", "MU_M", "OPC.OL_M"))]
cor_t_mouse <- cor_t_mouse[ ,c(1, 5,13:20,6:12, 3,2,4, 21,23,22)]
#pdf("/dcl01/lieber/ajaffe/Matt/MNT_thesis/snRNAseq/10x_pilot_FINAL/pdfs/exploration/HongLab-UCLA_mmMeA/overlap-mouseMeA-2019-fullSubclusters_with_LIBD-10x-AMY_SN-LEVEL-stats_May2020.pdf")
# or
pdf("/dcl01/lieber/ajaffe/Matt/MNT_thesis/snRNAseq/10x_pilot_FINAL/pdfs/exploration/HongLab-UCLA_mmMeA/overlap-mouseMeA-2019-fullSubclusters_with_LIBD-10x-AMY_SN-LEVEL-stats_numbersPrinted_May2020.pdf")
pheatmap(cor_t_amy,
color=my.col.all,
cluster_cols=F, cluster_rows=F,
breaks=theSeq.all,
fontsize=11, fontsize_row=15, fontsize_col=12,
display_numbers=T, number_format="%.2f", fontsize_number=6.5,
main="Correlation of cluster-specific t's to mouse MeA \n complete subclusters (Chen-Hu-Wu et al., Cell 2019)")
# Version with mouse glial cell types 'missing' in LIBD data dropped:
pheatmap(cor_t_amy_sub,
color=my.col.all,
cluster_cols=F, cluster_rows=F,
breaks=theSeq.all,
fontsize=11.5, fontsize_row=15, fontsize_col=13.5,
display_numbers=T, number_format="%.2f", fontsize_number=6.5,
main="Correlation of cluster-specific t's to mouse MeA \n complete subclusters (Chen-Hu-Wu et al., Cell 2019)")
# On human-specific genes (slightly thresholded)
pheatmap(cor_t_hsap,
color=my.col.all,
cluster_cols=F, cluster_rows=F,
breaks=theSeq.all,
fontsize=11.5, fontsize_row=15, fontsize_col=13.5,
display_numbers=T, number_format="%.2f", fontsize_number=6.5,
main="Correlation of top-100 cluster-specific t's to \n (Chen-Hu-Wu et al., Cell 2019) subclusters")
# On mm-MeA-specific genes
pheatmap(cor_t_mouse,
color=my.col.all,
cluster_cols=F, cluster_rows=F,
breaks=theSeq.all,
fontsize=11.5, fontsize_row=15, fontsize_col=13.5,
display_numbers=T, number_format="%.2f", fontsize_number=6.5,
main="Correlation of LIBD-AMY subclusters to \n (Chen-Hu-Wu et al., Cell 2019) top-100 subcluster t's")
dev.off()
## Intersecting some of the top markers =====================
load("/dcl01/lieber/ajaffe/Matt/MNT_thesis/snRNAseq/10x_pilot_FINAL/rdas/markers-stats_Amyg-n2_findMarkers-SN-LEVEL_MNTMay2020.rda", verbose=T)
# markers.amy.t.1vAll, markers.amy.t.design, markers.amy.wilcox.block
rm(markers.amy.t.design, markers.amy.wilcox.block)
# Take top 100
markerList.t.hsap <- lapply(markers.amy.t.1vAll, function(x){
rownames(x)[x$log.FDR < log10(1e-6)]
}
)
genes.top100.hsap <- lapply(markerList.t.hsap, function(x){head(x, n=100)})
load("/dcl01/ajaffe/data/lab/singleCell/ucla_mouse-MeA/2019Cell/markers-stats_mouseMeA-2019-with-16neuSubs_findMarkers-SN-LEVEL_MNTMay2020.rda", verbose=T)
# markers.mmMeA.t.1vAll
# Just `toupper()` it
markerList.t.mm <- lapply(markers.mmMeA.t.1vAll, function(x){
rownames(x)[x$log.FDR < log10(1e-6)]
}
)
genes.top100.mm <- lapply(markerList.t.mm, function(x){toupper(head(x, n=100))})
genes.top100.mm <- sapply(genes.top100.mm, cbind)
## sapply
sapply(genes.top100.hsap, function(x){
apply(genes.top100.mm,2,function(y){length(intersect(x,y))})
})
# Astro Excit.1 Excit.2 Excit.3 Inhib.1 Inhib.2 Inhib.3 Inhib.4 Inhib.5 Micro Oligo OPC
# AS 20 0 1 0 0 1 0 0 1 0 0 1
# EN 2 1 1 0 1 2 1 0 0 0 0 2
# MG 0 0 0 1 0 0 0 0 0 19 0 0
# MU 2 1 0 1 0 0 1 1 0 0 0 0
# N.1 1 4 2 0 14 8 3 4 9 0 1 1
# N.10 0 6 1 4 7 7 2 0 6 0 0 0
# N.11 1 10 5 2 8 3 4 6 8 0 0 4
# N.12 2 7 4 3 7 5 2 3 5 0 2 2
# N.13 1 2 1 3 1 1 0 0 5 1 0 1
# N.14 0 7 2 4 9 6 0 4 7 1 1 2
# N.15 0 7 1 6 0 1 1 0 1 0 0 1
# N.16 1 3 4 1 7 3 3 6 4 0 0 4
# N.2 2 6 2 1 9 5 2 3 6 0 0 3
# N.3 2 3 1 4 0 3 0 0 2 0 0 0
# N.4 2 5 3 1 10 7 3 10 6 1 1 3
# N.5 0 4 3 2 4 4 1 2 5 0 0 2
# N.6 1 2 3 0 13 10 6 8 9 0 3 2
# N.7 0 4 10 1 1 3 1 2 2 0 0 1
# N.8 1 7 4 4 6 6 2 3 19 1 1 3
# N.9 0 3 1 1 10 5 2 5 4 0 0 1
# OL 0 0 2 0 0 0 0 0 0 0 19 0
# OPC 0 0 0 0 0 1 0 0 0 0 0 26
# OPC.OL 0 0 0 1 0 0 0 0 1 0 5 7
## Amonst top 40 ===
genes.top40.hsap <- lapply(markerList.t.hsap, function(x){head(x, n=40)})
genes.top40.mm <- lapply(markerList.t.mm, function(x){toupper(head(x, n=40))})
genes.top40.mm <- sapply(genes.top40.mm, cbind)
sapply(genes.top40.hsap, function(x){
apply(genes.top40.mm,2,function(y){length(intersect(x,y))})
})
# Astro Excit.1 Excit.2 Excit.3 Inhib.1 Inhib.2 Inhib.3 Inhib.4 Inhib.5 Micro Oligo OPC
# AS 7 0 0 0 0 0 0 0 0 0 0 0
# EN 1 0 0 0 0 0 0 0 0 0 0 0
# MG 0 0 0 0 0 0 0 0 0 4 0 0
# MU 0 0 0 0 0 0 0 0 0 0 0 0
# N.1 0 0 1 0 1 0 0 0 0 0 0 0
# N.10 0 0 0 0 1 2 0 0 2 0 0 0
# N.11 0 4 0 0 2 0 0 2 0 0 0 1
# N.12 1 2 2 0 0 1 0 0 2 0 0 1
# N.13 0 0 0 1 0 0 0 0 1 0 0 0
# N.14 0 2 0 1 0 1 0 0 1 1 0 0
# N.15 0 3 0 0 0 0 0 0 1 0 0 0
# N.16 0 1 1 0 0 1 0 0 1 0 0 2
# N.2 0 1 1 0 1 0 0 0 1 0 0 0
# N.3 0 1 0 0 0 2 0 0 0 0 0 0
# N.4 0 1 1 0 3 3 0 1 0 0 0 1
# N.5 0 0 0 0 1 0 0 0 0 0 0 1
# N.6 0 1 0 0 2 2 0 0 0 0 0 0
# N.7 0 0 2 0 0 1 0 1 0 0 0 0
# N.8 0 1 0 0 1 1 1 2 1 0 0 0
# N.9 0 0 0 0 0 1 0 1 1 0 0 0
# OL 0 0 1 0 0 0 0 0 0 0 7 0
# OPC 0 0 0 0 0 0 0 0 0 0 0 10
# OPC.OL 0 0 0 0 0 0 0 0 0 0 1 1
# Inhib.5 : N.8 genes ==
intersect(genes.top40.hsap[["Inhib.5"]], genes.top100.mm[ ,"N.8"])
# [1] "NPFFR2" "SV2C" "OTOF" "GRM8" "OLFM3" "FOXP2"
# round(ts.mmMeA["49202", ],3) # (Tll1 - looking because a highlighted gene in text)
# # AS EN MG MU N.1 N.10 N.11 N.12 N.13 N.14 N.15 N.16
# # -5.939 -5.932 -6.699 1.698 8.835 2.691 107.521 -5.323 20.345 86.122 -5.484 -5.423
# # N.2 N.3 N.4 N.5 N.6 N.7 N.8 N.9 OL OPC OPC.OL
# # 13.117 -5.297 33.339 16.283 -6.203 -5.520 108.310 22.783 -5.886 -4.273 -5.318
#
plotExpression(sce.amy.mm, exprs_values="logcounts", x="subCluster", colour_by="subCluster", features="Tll1")
# # ahh nothing but a few outliers
sce.amy.mm.sub <- sce.amy.mm[ ,grep("N.", sce.amy.mm$subCluster)]
sce.amy.mm.sub$subCluster <- droplevels(sce.amy.mm.sub$subCluster)
plotExpression(sce.amy.mm.sub, exprs_values="logcounts", x="subCluster", colour_by="subCluster",
features=c("Npffr2","Sv2c","Otof","Grm8","Olfm3","Foxp2"))
# Actually nothing suuuper convicing - mostly outlier. These just happen to have _more_ lol
# N.8 top genes include Pcdh8 & Lamp5
plotExpression(sce.amy.mm.sub, exprs_values="logcounts", x="subCluster", colour_by="subCluster",
features=c("Pcdh8","Lamp5"))
# N.12 reported marker genes (reported in supplementals "mmc2.xlsx" with paper)
plotExpression(sce.amy.mm.sub, exprs_values="logcounts", x="subCluster", colour_by="subCluster",
features=c("Eomes","Dsp","Nhlh2","Samd3","Trpc3","Cdhr1","Lhx1"))
# Oh six of these were of the top 10 from my own test and plotted lol. Well good.
# (and btw) ===
table(sce.amy$cellType.split, sce.amy$donor)
# Br5161 Br5212
# Ambig.lowNtrxts 34 16
# Astro 489 363
# Excit.1 141 193
# Excit.2 0 40
# Excit.3 0 55
# Inhib.1 16 155
# Inhib.2 33 76
# Inhib.3 11 24
# Inhib.4 24 0
# Inhib.5 85 13
# Micro 425 339
# Oligo 1697 1776
# OPC 335 292
# Glucocorticoid receptors? (in relation to TLL1, as per https://doi.org/10.1016/j.molbrainres.2005.09.016)
plotExpression(sce.amy, exprs_values="logcounts", x="cellType.split", colour_by="cellType.split",
features=c("NR3C1","NR3C2")) + stat_summary(fun.y = median, fun.ymin = median, fun.ymax = median,
geom = "crossbar", width = 0.3,
colour=rep(tableau20[1:12], 2)) +
theme(axis.text.x = element_text(angle = 90, hjust = 1), plot.title = element_text(size = 25))
# No particular high/specific expression in Inhib.5
### FINAL GRANT VERSION ===
# Remove EN, MU, OPC.OL, N.12 & N.15
load("rdas/zTsMats_libd-AMY_and_ucla-mouseMeA-2019Cell_sharedGenes_25May2020.rda", verbose=T)
# ts.amy, ts.mmMeA, Readme
cor_t_amy <- cor(ts.amy, ts.mmMeA)
rownames(cor_t_amy) = paste0(rownames(cor_t_amy),"_","H")
colnames(cor_t_amy) = paste0(colnames(cor_t_amy),"_","M")
# Re-order mouse labels - move EN/MG/MU to after neuronal subclusters
cor_t_amy <- cor_t_amy[ ,c(1, 5,13:20,6:12, 3,2,4, 21,23,22)]
# Remove those selected
cor_t_amy_sub <- cor_t_amy[ ,-which(colnames(cor_t_amy) %in% c("EN_M", "MU_M", "OPC.OL_M",
"N.12_M", "N.15_M"))]
range(cor_t_amy_sub)
#[1] -0.2203968 0.5023080 --> Threshold to 0.5
cor_t_amy_sub <- ifelse(cor_t_amy_sub >= 0.5, 0.5, cor_t_amy_sub)
### Heatmap - typically use levelplot (e.g. below), but will want pheatmap bc can cluster cols/rows
theSeq.all = seq(-.5, .5, by = 0.01)
my.col.all <- colorRampPalette(brewer.pal(7, "BrBG"))(length(theSeq.all)-1)
pdf("/dcl01/lieber/ajaffe/Matt/MNT_thesis/snRNAseq/10x_pilot_FINAL/pdfs/exploration/HongLab-UCLA_mmMeA/overlap-mouseMeA-2019-fullSubclusters_with_LIBD-10x-AMY_SN-LEVEL-stats_FINAL_May2020.pdf",width=8)
pheatmap(cor_t_amy_sub,
color=my.col.all,
cluster_cols=F, cluster_rows=F,
angle_col=90,
breaks=theSeq.all,
fontsize=11.5, fontsize_row=17, fontsize_col=15,
legend_breaks=c(seq(-0.5,0.5,by=0.25)),
main="Correlation of cluster-specific t's to mouse MeA \n subclusters (Chen-Hu-Wu et al., Cell 2019)")
dev.off()
## For supplement: Print top markers for 'Inhib.5' & corresponding in MeA 'N.8' === ===
# (load AMY SCE - already done in session)
# Prep mouse MeA
load("/dcl01/ajaffe/data/lab/singleCell/ucla_mouse-MeA/2019Cell/SCE_mouse-MeA_downstream-processing_MNT.rda", verbose=T)
# sce.amy.mm, chosen.hvgs.amy.mm
sce.amy.mm.sub <- sce.amy.mm[ ,grep("N.", sce.amy.mm$subCluster)]
sce.amy.mm.sub$subCluster <- droplevels(sce.amy.mm.sub$subCluster)
genes2print <- c("Npffr2", "Tll1", "Grm8", "Foxp2")
pdf("pdfs/pubFigures/suppFig_AMY-vs-MeA_topInhib.5markers_MNTSep2020.pdf", height=2.5, width=5)
# Human AMY
print(
plotExpression(sce.amy, exprs_values = "logcounts", features=toupper(genes2print),
x="cellType.split", colour_by="cellType.split", point_alpha=0.5, point_size=1.0, ncol=4,
add_legend=F) + stat_summary(fun.y = median, fun.ymin = median, fun.ymax = median,
geom = "crossbar", width = 0.3,
colour=rep(tableau20[1:12], length(genes2print))) +
xlab("") +
theme(axis.text.x = element_text(angle = 90, hjust = 1, size = 9.5),
axis.title.y = element_text(angle = 90, size = 10),
panel.grid.major=element_line(colour="grey95", size=0.8),
panel.grid.minor=element_line(colour="grey95", size=0.4))
)
# mouse MeA
print(
plotExpression(sce.amy.mm.sub, exprs_values = "logcounts", features=genes2print,
x="subCluster", colour_by="subCluster", point_alpha=0.5, point_size=1.0, ncol=4,
add_legend=F) + stat_summary(fun.y = median, fun.ymin = median, fun.ymax = median,
geom = "crossbar", width = 0.3,
colour=rep(tableau20[1:16], length(genes2print))) +
xlab("") +
theme(axis.text.x = element_text(angle = 90, hjust = 1, size = 9.5),
axis.title.y = element_text(angle = 90, size = 10),
panel.grid.major=element_line(colour="grey95", size=0.8),
panel.grid.minor=element_line(colour="grey95", size=0.4))
)
dev.off()
## Heatmap version ===
# Take more overlapping, from above exploration
genes2print <- c("Npffr2", "Tll1", "Grm8", "Foxp2", "Sv2c", "Olfm3")
pdf("pdfs/pubFigures/suppFig_AMY-vs-MeA_topInhib.5markers_heatmap_MNTSep2020.pdf", width=5, height=5)
dat <- assay(sce.amy, "logcounts")
cell.idx <- splitit(sce.amy$cellType.split)
current_dat <- do.call(cbind, lapply(cell.idx, function(ii) rowMeans(dat[toupper(genes2print), ii])))
pheatmap(current_dat, cluster_rows = FALSE, cluster_cols = FALSE, breaks = seq(0.02, 4.0, length.out = 101),
color = colorRampPalette(RColorBrewer::brewer.pal(n = 7, name = "BuGn"))(100),
fontsize_row = 18, fontsize_col=16)
dat <- assay(sce.amy.mm.sub, "logcounts")
cell.idx <- splitit(sce.amy.mm.sub$subCluster)
current_dat <- do.call(cbind, lapply(cell.idx, function(ii) rowMeans(dat[genes2print, ii])))
pheatmap(current_dat, cluster_rows = FALSE, cluster_cols = FALSE, breaks = seq(0.02, 1, length.out = 101),
color = colorRampPalette(RColorBrewer::brewer.pal(n = 7, name = "BuGn"))(100),
fontsize_row = 16, fontsize_col=16)
dev.off()
|
e6a1561d542e5d40b0f289e603a7fe723daa148a
|
63b6a7e1890c956ba6082c65ed035e478aaec0f1
|
/Item_item _based_CF.R
|
81f47c2a77a3745487da06cbbe893d239fdac073
|
[] |
no_license
|
abhiraj24/RecommendationSystem
|
20c56493915b8b24816ec29f4cd2cbcebfbac22b
|
54374f92edda55cf93a5c264b355020a4e38bf07
|
refs/heads/master
| 2022-04-15T09:27:59.674159
| 2020-04-12T20:40:12
| 2020-04-12T20:40:12
| 255,165,820
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,307
|
r
|
Item_item _based_CF.R
|
setwd("C:/Users/User/Desktop/product_recommendation_engine")
library(data.table)
library(dplyr)
library(reshape2)
#Loading the datasets
prod_mast<-fread("ProductMasterNovDec18.csv")
trans_data<-fread("TransactionDataNov18Dec18.csv")
nei_lo<-fread("NeighborLoyaltyCardData.csv")
nei_seg<-fread("NeighborSegmentJan19.csv")
#######################Joining Product Master and Transaction Data##################################
prod_trans<-trans_data[prod_mast,on=.(ItemId),nomatch=0][order(TransactionId)]
trans_nei<-merge(trans_data,nei_lo,by.x="LoyaltyCardId",by.y="LoyaltyCardNumber")
prod_nei<-merge(trans_nei,prod_mast,by="ItemId")
final_db<-merge(nei_seg,prod_nei,by="NEIGHBORID")
#######################Calculating the Score for Item Similarity####################################
final_demo<-final_db[,DSCS:=paste(Department,"/",SubDepartment,"/",Class,"/",SubClass)][,DSCS_Description:=paste(DSCS,"_",Description)]
final_demo<-final_db[,c("TransactionId","DSCS_Description","ItemId","NetAmount","NEIGHBORID","Qty","Segment","DepartmentId","SubDepartmentId")][order(TransactionId)][NetAmount>=0]
final_demo<-final_demo[,TotalAmount:=sum(NetAmount),by=.(ItemId,NEIGHBORID)]
final_demo<-final_demo[,TotalQty:=sum(Qty),by=.(ItemId,NEIGHBORID)]
final_demo<-unique(final_demo)
final_demo<-final_demo[,QtyScore:=TotalQty/sum(TotalQty),by=.(NEIGHBORID)]
final_demo<-final_demo[,Score:=(final_demo$DepartmentId*10+final_demo$SubDepartmentId+final_demo$QtyScore)]
###################function to generate the recommendations########################################
fly<-function(user)
{
User_prof<-final_demo[NEIGHBORID==user,]
Neigh_prof<-final_demo[(final_demo$Segment=="A1"|final_demo$Segment=="A2"|final_demo$Segment=="A3"),]
Neigh_prof<-final_demo[(final_demo$DepartmentId==User_prof$DepartmentId & final_demo$SubDepartmentId==User_prof$SubDepartmentId),]
UImatrix<-xtabs(Neigh_prof$Score~Neigh_prof$NEIGHBORID+Neigh_prof$ItemId,sparse = TRUE)
cal_cos <- function(X, Y)
{
ones <- rep(1,nrow(X))
means <- drop(crossprod(X^2, ones)) ^ 0.5
diagonal <- Diagonal( x = means^-1 )
X <- X %*% diagonal
ones <- rep(1,nrow(Y))
means <- drop(crossprod(Y^2, ones)) ^ 0.5
diagonal <- Diagonal( x = means^-1 )
Y <- Y %*% diagonal
crossprod(X, Y)
}
sim<-cal_cos(UImatrix,UImatrix)
sim<-as.matrix(sim)
x<-(sim[rownames(sim)==User_prof$ItemId,])
x<-as.data.frame(as.matrix(x))
x<-melt(sim,value.name = "Similarity")
x<-as.data.table(x)
x<-x[x$Var2==User_prof$ItemId,]
x<-x[order(desc(x$Similarity)),]
colnames(x)[colnames(x)=="Var2"] <- "Items_bought"
colnames(x)[colnames(x)=="Var1"] <- "ItemId"
y<-final_demo[,c("DSCS_Description","ItemId")]
x<-merge(y,x,by="ItemId")
x<-x[order(desc(Similarity))]
x<-unique(x)
x$ItemId <-as.numeric(x$ItemId)
x$Similarity <-as.numeric(x$Similarity)
x<-anti_join(x,User_prof,by="ItemId")
View(x)
#z<-Neigh_prof[,c("DSCS_Description","SubDepartmentId","DepartmentId")]
#z<-merge(x,z,by="DSCS_Description")
#z<-z[order(desc(z$Similarity)),]
#rownames(z)<-NULL
#z<-unique(z)
#z<-as.data.table(z)
#z<-z[,head(.SD, 10), by =.(DepartmentId,SubDepartmentId)]
#View(z)
#print(User_prof)
}
|
37ab418cfc1010ca08a95bf6e0132bfbfcdbae33
|
9daa542b71edc3f698fd7cc2ab6f1d7a1c48d7f2
|
/blc_export/postprocessing.R
|
f8340821d5a7d1444d890651b7e4684365d1d1d6
|
[] |
no_license
|
patrickrd/SMLM
|
74b5d745633f2a4574d8e247d8dccee6824a658f
|
1bdc80b5e327787593b6e7117c963218309b67ab
|
refs/heads/master
| 2020-06-11T13:17:44.409661
| 2016-12-05T19:00:38
| 2016-12-05T19:00:38
| 75,655,578
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,350
|
r
|
postprocessing.R
|
source("internal.R")
foldernames=c("ROIs")
sapply(foldernames, function(expname){
nexpname=expname
r = readLines(con=file.path(paste(nexpname, "/config.txt", sep="")))
get <- function(type){i = grep(type,r); strsplit(r[i], "=")[[1]][2]}
as.v <- function(ch){as.numeric(strsplit(ch,",")[[1]])}
if (length(grep("skeleton",r))==0) skeleton=FALSE else skeleton=as.logical(as.numeric(get("skeleton")))
if (skeleton){
nexpname=paste("R_", expname, sep="")
dir.create(file.path(nexpname))
file.copy(file.path(paste(expname, "/config.txt", sep="")), paste(nexpname, "/config.txt", sep=""))
file.copy(file.path(paste(expname, "/sim_params.txt", sep="")), paste(nexpname, "/sim_params.txt", sep=""))
}
model=get("model")
{if (model=="Gaussian(prec)"){
xlim = as.v(get("xlim"))
ylim = as.v(get("ylim"))
histbins = as.v(get("histbins"))
histvalues = as.v(get("histvalues"))
if (length(grep("pbackground",r))==0 | length(grep("alpha",r))==0){
useplabel=FALSE; pb=NULL; alpha=NULL
}
else {
useplabel=TRUE;
pb=as.numeric(get("pbackground"))
alpha=as.numeric(get("alpha"))
}
}
else {stop("Haven't implemented anything else!")}}
if (length(grep("makeplot",r))==0) makeplot=FALSE else makeplot=as.logical(as.numeric(get("makeplot")))
if (length(grep("superplot",r))==0) superplot=FALSE else superplot=as.logical(as.numeric(get("superplot")))
all=list.files(expname)
dirnames=all[file.info(file.path(paste(expname, "/", all, sep="")))$isdir]
axes=FALSE;
cex=1/(3*sqrt(length(dirnames)))
if (makeplot & superplot) {
##These settings control image size and resolution
png(file.path(paste(nexpname, "/together.png",sep="")), width=10, height=10, units="cm", res=1200)
nrow=ceiling(sqrt(length(dirnames)))
par(mfrow=c(nrow, nrow))
}
res=lapply(dirnames, function(dirname){
foldername=file.path(paste(expname, "/", dirname, sep=""))
nfoldername=file.path(paste(nexpname, "/", dirname, sep=""))
if (skeleton){
dir.create(nfoldername)
file.copy(file.path(paste(foldername, "/data.txt", sep="")), file.path(paste(nfoldername, "/data.txt", sep="")))
}
data=read.csv(file.path(paste(nfoldername, "/data.txt", sep="")))
pts = data[,1:2]; sds = data[,3];
if (skeleton){
file.copy(file.path(paste(foldername, "/r_vs_thresh.txt", sep="")), file.path(paste(nfoldername, "/r_vs_thresh.txt", sep="")))
}
r = read.csv(file.path(paste(nfoldername, "/r_vs_thresh.txt",sep="")), header=FALSE, sep="\t")
m = as.matrix(r)
cs=(m[1,])[-1]
thr=(m[,1])[-1]
m = as.matrix(m[2:length(m[,1]),2:length(m[1,])])
which.maxm <- function(mat){
indcol <- rep(1:ncol(mat), each=nrow(mat))[which.max(mat)]
indrow <- rep(1:nrow(mat), ncol(mat))[which.max(mat)]
c(indrow, indcol)
}
best=which.maxm(m)
bestcs=cs[best[2]]
bestthr=thr[best[1]]
bfile=file.path(paste(foldername, "/labels/clusterscale", bestcs, " thresh", bestthr, "labels.txt", sep=""))
nbfile=bfile
if (skeleton){
dir.create(paste(nfoldername, "/labels", sep=""))
nbfile=file.path(paste(nfoldername, "/labels/clusterscale", bestcs, " thresh", bestthr, "labels.txt", sep=""))
file.copy(bfile, nbfile)
}
labelsbest = strsplit(readLines(nbfile),",")[[1]]
##Some summaries
wfile=file.path(paste(nfoldername, "/summary.txt", sep=""))
cat("The best: clusterscale", bestcs, " thresh", bestthr, "labels.txt\nNumber of clusters:", nClusters(labelsbest), "\nPercentage in clusters: ", percentageInCluster(labelsbest), "%\nMean number of molecules per cluster: ", nMolsPerCluster(labelsbest), "\nMean radius: ", mean(clusterRadii(pts, labelsbest)), sep="", file=wfile)
s=clusterStatistics(pts, labelsbest)
if (!is.null(s)){
wfile=file.path(paste(nfoldername, "/cluster-statistics.txt", sep=""))
cat("x,y,sd,nmol\n", file=wfile)
for (i in 1:dim(s)[2]){
cat(s[,i], sep=",", append=TRUE, file=wfile); cat("\n",append=TRUE,file=wfile)
}
}
if (makeplot){
if (!superplot){
pdf(file.path(paste(nfoldername, "/plot.pdf", sep="")))
axes=TRUE
cex=1
}
if ("clusterID" %in% colnames(data) & !superplot){
labelstrue=sapply(as.numeric(data[,4]), function(n){if (n==0) paste(runif(1)) else {paste(n)}})
par(pty="s")
par(mfrow=c(1,2))
par(mar=c(4,4,.5, .5))
par(oma=c(1,1,1,1))
plot(pts, xlim=xlim, ylim=ylim, col=mkcols(labelstrue), sub="True labels", xlab="",ylab="")
par(mar=c(4,4,.5, .5))
plot(pts, xlim=xlim, ylim=ylim, col=mkcols(labelsbest), sub="Estimated",xlab="",ylab="")
if (!superplot) dev.off()
}
else {
par(pty="s")
par(mar=c(0,0,0,0))
plot(pts, xlim=xlim, ylim=ylim, col=mkcols(labelsbest), sub="Clustering",xlab="",ylab="", pch=16, cex=cex, axes=axes)
box()
if (!superplot) dev.off()
}
}
list(radii=clusterRadii(pts, labelsbest), nmols=molsPerCluster(labelsbest), nclusters=nClusters(labelsbest), pclustered=percentageInCluster(labelsbest), totalmols=length(labelsbest), reldensity=reldensity(pts, labelsbest, xlim, ylim))
})
if (makeplot & superplot) dev.off()
nmols=c()
for (i in 1:length(res)){
nmols=c(nmols, res[[i]]$nmols)
}
h=hist(nmols, plot=FALSE)
pdf(file.path(paste(nexpname, "/nmols.pdf", sep="")))
plot(h, xlab="Number of molecules", ylab="Number of clusters", main="")
dev.off()
f=file.path(paste(nexpname, "/nmols.txt", sep="")); cat(nmols, file=f, sep=","); cat("\n", file=f, append=TRUE)
radii=c()
for (i in 1:length(res)){
radii=c(radii, res[[i]]$radii)
}
h=hist(radii, plot=FALSE)
pdf(file.path(paste(nexpname, "/radii.pdf", sep="")))
plot(h, xlab="Cluster radius", ylab="Number of clusters", main="")
dev.off()
f=file.path(paste(nexpname, "/radii.txt", sep="")); cat(radii, file=f, sep=","); cat("\n", file=f, append=TRUE)
nclusters=c()
for (i in 1:length(res)){
nclusters=c(nclusters, res[[i]]$nclusters)
}
h=hist(nclusters, plot=FALSE)
pdf(file.path(paste(nexpname, "/nclusters.pdf", sep="")))
plot(h, xlab="Number of clusters", ylab="Number of regions", main="")
dev.off()
f=file.path(paste(nexpname, "/nclusters.txt", sep="")); cat(nclusters, file=f, sep=","); cat("\n", file=f, append=TRUE)
pclustered=c()
for (i in 1:length(res)){
pclustered=c(pclustered, res[[i]]$pclustered)
}
h=hist(pclustered, plot=FALSE)
pdf(file.path(paste(nexpname, "/pclustered.pdf", sep="")))
plot(h, xlab="Percentage clustered", ylab="Number of regions", main="")
dev.off()
f=file.path(paste(nexpname, "/pclustered.txt", sep="")); cat(pclustered, file=f, sep=","); cat("\n", file=f, append=TRUE)
totalmols=c()
for (i in 1:length(res)){
totalmols=c(totalmols, res[[i]]$totalmols)
}
h=hist(totalmols, plot=FALSE)
pdf(file.path(paste(nexpname, "/totalmols.pdf", sep="")))
plot(h, xlab="Total Mols per ROI", ylab="Number of regions", main="")
dev.off()
f=file.path(paste(nexpname, "/totalmols.txt", sep="")); cat(totalmols, file=f, sep=","); cat("\n", file=f, append=TRUE)
reldensity=c()
for (i in 1:length(res)){
reldensity=c(reldensity, res[[i]]$reldensity)
}
h=hist(reldensity, plot=FALSE)
pdf(file.path(paste(nexpname, "/reldensity.pdf", sep="")))
plot(h, xlab="Total Mols per ROI", ylab="Number of regions", main="")
dev.off()
f=file.path(paste(nexpname, "/reldensity.txt", sep="")); cat(reldensity, file=f, sep=","); cat("\n", file=f, append=TRUE)
})
|
4b57965b8db58606b12d2d84d71e7bec0efaf263
|
7917fc0a7108a994bf39359385fb5728d189c182
|
/cran/paws.networking/man/apigateway_get_gateway_responses.Rd
|
c3ea6e0a590f7c3e600be4490570654f91ec6b31
|
[
"Apache-2.0"
] |
permissive
|
TWarczak/paws
|
b59300a5c41e374542a80aba223f84e1e2538bec
|
e70532e3e245286452e97e3286b5decce5c4eb90
|
refs/heads/main
| 2023-07-06T21:51:31.572720
| 2021-08-06T02:08:53
| 2021-08-06T02:08:53
| 396,131,582
| 1
| 0
|
NOASSERTION
| 2021-08-14T21:11:04
| 2021-08-14T21:11:04
| null |
UTF-8
|
R
| false
| true
| 1,961
|
rd
|
apigateway_get_gateway_responses.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/apigateway_operations.R
\name{apigateway_get_gateway_responses}
\alias{apigateway_get_gateway_responses}
\title{Gets the GatewayResponses collection on the given RestApi}
\usage{
apigateway_get_gateway_responses(restApiId, position, limit)
}
\arguments{
\item{restApiId}{[required] [Required] The string identifier of the associated RestApi.}
\item{position}{The current pagination position in the paged result set. The
GatewayResponse collection does not support pagination and the position
does not apply here.}
\item{limit}{The maximum number of returned results per page. The default value is 25
and the maximum value is 500. The GatewayResponses collection does not
support pagination and the limit does not apply here.}
}
\value{
A list with the following syntax:\preformatted{list(
position = "string",
items = list(
list(
responseType = "DEFAULT_4XX"|"DEFAULT_5XX"|"RESOURCE_NOT_FOUND"|"UNAUTHORIZED"|"INVALID_API_KEY"|"ACCESS_DENIED"|"AUTHORIZER_FAILURE"|"AUTHORIZER_CONFIGURATION_ERROR"|"INVALID_SIGNATURE"|"EXPIRED_TOKEN"|"MISSING_AUTHENTICATION_TOKEN"|"INTEGRATION_FAILURE"|"INTEGRATION_TIMEOUT"|"API_CONFIGURATION_ERROR"|"UNSUPPORTED_MEDIA_TYPE"|"BAD_REQUEST_PARAMETERS"|"BAD_REQUEST_BODY"|"REQUEST_TOO_LARGE"|"THROTTLED"|"QUOTA_EXCEEDED",
statusCode = "string",
responseParameters = list(
"string"
),
responseTemplates = list(
"string"
),
defaultResponse = TRUE|FALSE
)
)
)
}
}
\description{
Gets the GatewayResponses collection on the given RestApi. If an API
developer has not added any definitions for gateway responses, the
result will be the API Gateway-generated default GatewayResponses
collection for the supported response types.
}
\section{Request syntax}{
\preformatted{svc$get_gateway_responses(
restApiId = "string",
position = "string",
limit = 123
)
}
}
\keyword{internal}
|
3941dd4ac9d6d47bc00544711b4306d50d1b8b20
|
e03daf38d0e6b8755a28b321bd3f9102c47a409e
|
/man/kmerFit.Rd
|
1973e36043e25ca5d5caa72644484c59664c3a3d
|
[
"MIT"
] |
permissive
|
pkimes/upbm
|
9709b857c5bb9e5b468785d567728094a30eccba
|
019977afd48c75534e5ce5e87e9d2fcfe46da53e
|
refs/heads/master
| 2021-03-22T03:02:25.365308
| 2020-10-16T15:23:24
| 2020-10-16T15:23:24
| 118,683,405
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 6,162
|
rd
|
kmerFit.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/kmerFit.R
\name{kmerFit}
\alias{kmerFit}
\title{Fit k-mer probe set models}
\usage{
kmerFit(
pe,
kmers = uniqueKmers(8L),
positionbias = TRUE,
method = c("dl2", "dl"),
contrasts = TRUE,
baseline = NULL,
outlier_cutoff = stats::qnorm(0.995),
outlier_maxp = 0.2,
verbose = FALSE
)
}
\arguments{
\item{pe}{a PBMExperiment object containing probe-level summarized intensity data
returned by \code{\link{probeFit}}.}
\item{kmers}{a character vector of k-mers. (default = \code{uniqueKmers(8L)})}
\item{positionbias}{a logical value whether to correct for bias due to position
of k-mer along probe sequence. (default = TRUE)}
\item{method}{a character name specifying the method to use for estimating cross-probe
variance in each k-mer probe set. Currently, the two-step DerSimonian-Kacker ("dl2") and
non-iterative DerSimonian-Laird ("dl") methods are supported. (default = "dl2")}
\item{contrasts}{a logical value whether to compute contrasts for all columns against a
specified \code{baseline} column. (default = TRUE)}
\item{baseline}{a character string specifying the baseline condition across \code{pe} columns to
use when calculating contrasts. If not specified and set to NULL, the baseline
value is guessed by looking for ``ref" in the column names of \code{pe}. If a unique
matching value is not found, an error is thrown.
This parameter is ignored when \code{contrasts = FALSE}.
(default = NULL)}
\item{outlier_cutoff}{a numeric threshold used for filtering probes from k-mer
probe sets before fitting each k-mer level model. The threshold is
applied to the absolute value of an approximate robust studentized residual
computed for each probe in each probe set and can be turned off by
setting the value to NULL. By default, approximate 0.5% tails are trimmed.
(default = \code{stats::qnorm(0.995)})}
\item{outlier_maxp}{a numeric threshold on the maximum proportion of probes to filter
for each k-mer probe set according to \code{outlier_cutoff}. This should be
set to a reasonably small value to avoid over-filtering based on the approximate
residual threshold. (default = 0.2)}
\item{verbose}{a logical value whether to print verbose output during analysis. (default = FALSE)}
}
\value{
SummarizedExperiment of estimated k-mer affinities and differences with some or all
of the following assays:
\itemize{
\item \code{"affinityEstimate"}: k-mer affinities.
\item \code{"affinityVariance"}: k-mer affinity variances.
\item \code{"contrastDifference"}: (optional) k-mer differential affinities with \code{baseline} condition.
\item \code{"contrastAverage"}: (optional) k-mer average affinities with \code{baseline} condition.
\item \code{"contrastVariance"}: (optional) k-mer differential affinity variances.
}
If computed, the values of the \code{"contrast"} assays will be NA for the specified
baseline condition.
}
\description{
Given a PBMExperiment of probe-level summaries returned by \code{probeFit} and a list of k-mers,
this function applies probe set aggregation to obtain k-mer level estimates of affinity and
variance on the scale of log2 signal intensities. Additionally, if \code{contrasts = TRUE},
effect size and variance estimates are also returned for differential k-mer affinities against
a baseline condition specified with \code{baseline=}.
The output can be passed to \code{\link{kmerTestContrast}}, \code{\link{kmerTestAffinity}},
\code{\link{kmerTestSpecificity}} to perform various statistical tests with the estimated
k-mer statistics.
}
\details{
By default, probe intensities are corrected within each k-mer probe set
to account for biases introduced by where the k-mer lies along the probe sequence. Bias
correction is performed such that the mean cross-probe intensity for each k-mer is
(typically) unchanged. This bias correction step only serves to reduce the cross-probe variance
and improve downstream inference for each k-mer.
For many low affinity k-mers, probe sets may include several probes with high intensity due to
the k-mer sharing a probe with a separate high affinity k-mer. These probes do not provide
meaningful affinity information for the lower affinity k-mer. To adjust for this possibility,
outlier probes are filtered from each k-mer probe set prior after position bias correction, but
before aggregation. Probes with large approximate studentized residuals are
filtered from each probe set according to a user-specified threshold (\code{outlier_cutoff}).
However, to prevent overfiltering, a maximum proportion of probes to filter from any probe set
should also be specified (\code{outlier_maxp}).
After bias correction and probe filtering, a meta analysis model is fit to each probe set.
Under this model, cross-probe variances are estimated using either the DerSimonian and Kacker (2007)
or DerSimonian and Laird (1986) estimator. The estimated k-mer affinities and variances are
included in the returned SummarizedExperiment as two assays, \code{"affinityEstimate"} and
\code{"affinityVariance"}.
If \code{contrast = TRUE}, k-mer differential affinities, the corresponding variances, and
average affinities are also returned as three assays, \code{"contrastDifference"},
\code{"contrastVariance"}, and \code{"contrastAverage"}. Positive differential affinities indicate
higher affinity relative to the baseline condition.
}
\references{
If using \code{method = "dl2"} cross-probe variance estimator:
\itemize{
\item DerSimonian, R., & Kacker, R. (2007). Random-effects model for meta-analysis of clinical trials: an update. Contemporary Clinical Trials, 28(2), 105-114.
}
If using \code{method = "dl"} cross-probe variance estimator:
\itemize{
\item DerSimonian, R., & Laird, N. (1986). Meta-analysis in clinical trials. Controlled Clinical Trials, 7(3), 177-188.
}
Cross-probe variance estimation code adapted from:
\itemize{
\item Viechtbauer, W. (2010). Conducting meta-analyses in R with the metafor package. Journal of Statistical Software, 36(3), 1-48. URL: http://www.jstatsoft.org/v36/i03/
}
}
\seealso{
\code{\link{probeFit}}, \code{\link{uniqueKmers}}
}
\author{
Patrick Kimes
}
|
e6ba49e288c07d58372b91fb69b174d8b64d7185
|
7d31f360f1ece69b09a4b51e3986ac44025efc7c
|
/package/clinUtils/man/dataSDTMCDISCP01.Rd
|
8c72cf3e9f4b2ededcaaf49e5b38cbc46548ef7f
|
[] |
no_license
|
Lion666/clinUtils
|
a0500a773225ffafc29b7d7f7bcc722dd416743c
|
dc6118f32d311657d410bdeba02f3720f01d62df
|
refs/heads/master
| 2023-08-12T08:48:42.923950
| 2021-09-21T14:56:18
| 2021-09-21T16:14:51
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,229
|
rd
|
dataSDTMCDISCP01.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{dataSDTMCDISCP01}
\alias{dataSDTMCDISCP01}
\title{Example of SDTM datasets from the CDISC original Pilot 01 study}
\format{
List of data.frames containing the SDTM
dataset for each selected domain.\cr
Labels for the different variables across datasets
is available via the \code{labelVars} attribute.
}
\source{
Original (and entire) datasets are available in: \cr
\url{https://github.com/phuse-org/phuse-scripts/tree/master/data/sdtm/cdiscpilot01}
See in particular the \emph{define.xml} file
for further description of the
datasets and variables name.
}
\description{
This contains a subset of the CDISC original Pilot 01 study dataset for:
\itemize{
\item{a selected subset of subjects}
\item{a selected subset of domains:
\itemize{
\item{demographics ('dm')}
\item{treatment exposure ('ex')}
\item{concomitant medications ('cm')}
\item{medical history ('mh')}
\item{adverse event ('ae')}
\item{laboratory ('lb')}
\item{vital signs ('vs')}
\item{subject visit ('sv')}
}
}
}
This dataset was created following the SDTM Version 1.1
standard.
}
\seealso{
\link{loadDataADaMSDTM}
}
\author{
Laure Cougnaud
}
\keyword{data}
|
d09f1ef96f01a03281f9d2b3875d3c82e8c90f86
|
43d1ad2ef32ead801ce22ad12dfad8788a2fb1db
|
/Normality test.R
|
9a9c7fc549ac0096884ddeb59caf06acf69f2cd9
|
[] |
no_license
|
SinghAbhi3/Myfiles
|
ecf0c902f4f6bda97000c6e97712da22fb6c0459
|
a321b69831b62e66ddd99f5b2c2da02f486e03bc
|
refs/heads/master
| 2022-12-08T17:25:03.933963
| 2020-09-08T16:52:57
| 2020-09-08T16:52:57
| 256,956,643
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 470
|
r
|
Normality test.R
|
getwd()
cars1 <- read.csv("Cars.csv")
cars1
View(cars1)
MPGcars1 <- cars1$MPG
MPGcars1
shapiro.test(MPGcars1)
# Since p-value is greater than 0.05, we can assume the normality
waistat <- read.csv("wc-at.csv")
waistat
waist <- waistat$Waist
waist
shapiro.test(waist)
# Since p-value is less than 0.05, data set is not normally distributed
at <- waistat$AT
at
shapiro.test(at)
# Since p-value is less than 0.05, data set is not normally distributed
|
45ad0a8c336c4682a834885c0497cb4689c83d19
|
6a28ba69be875841ddc9e71ca6af5956110efcb2
|
/An_Introduction_To_Statistical_Methods_And_Data_Analysis_by_R_Lyman_Ott_And_Michael_Longnecker/CH14/EX14.10/Ex14_10.r
|
5b2731bdf09e1211be7539f1027d6c886c915544
|
[] |
permissive
|
FOSSEE/R_TBC_Uploads
|
1ea929010b46babb1842b3efe0ed34be0deea3c0
|
8ab94daf80307aee399c246682cb79ccf6e9c282
|
refs/heads/master
| 2023-04-15T04:36:13.331525
| 2023-03-15T18:39:42
| 2023-03-15T18:39:42
| 212,745,783
| 0
| 3
|
MIT
| 2019-10-04T06:57:33
| 2019-10-04T05:57:19
| null |
UTF-8
|
R
| false
| false
| 174
|
r
|
Ex14_10.r
|
my_data <- read.table(file.choose(), header=T)
attach(my_data)
str(my_data)
model1 <- aov(Time ~Emergency+Display+Emergency:Display, data = my_data)
summary(model1)
|
ee493dbcbe9e1cb79a488c63ea03d8dcb7d5c2b5
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/iNextPD/examples/ggiNEXT.Rd.R
|
64c7ca1e3682033a0e594b48c8359c154383ea27
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,021
|
r
|
ggiNEXT.Rd.R
|
library(iNextPD)
### Name: ggiNEXT
### Title: ggplot2 extension for an iNextPD object
### Aliases: ggiNEXT ggiNEXT.default ggiNEXT.iNextPD
### ** Examples
# single-assemblage abundance data
data(bird)
bird.phy <- ade4::newick2phylog(bird$tre)
bird.lab <- rownames(bird$abun)
out1 <- iNextPD(bird$abun$North.site, bird.lab, bird.phy,
q=1, datatype="abundance", endpoint=400, se=TRUE)
ggiNEXT(x=out1, type=1)
ggiNEXT(x=out1, type=2)
ggiNEXT(x=out1, type=3)
## Not run:
##D # single-assemblage incidence data with three orders q
##D out2 <- iNextPD(bird$inci$North.site, bird.lab, bird.phy,
##D q=c(0,1,2), datatype="incidence_raw", endpoint=25)
##D ggiNEXT(out2, se=FALSE, color.var="order")
##D
##D # multiple-assemblage abundance data with three orders q
##D out3 <- iNextPD(bird$abun, bird.lab, bird.phy,
##D q=c(0,1,2), datatype="abundance", endpoint=400)
##D ggiNEXT(out3, facet.var="site", color.var="order")
##D ggiNEXT(out3, facet.var="both", color.var="both")
## End(Not run)
|
3b42979140db3994dc02a53d0993b0ac943c7665
|
d2faae85589717f37402338fe40f5990530edb28
|
/tidyr_package.R
|
119f98bc34580d31778dfa6ed9d9b60519c96db2
|
[] |
no_license
|
charan2000/Intro-to-R
|
50a2f235d59f947eb8afbd99f0ed04d414c6a530
|
18de662af524384ed9dc1682243b9b107effa146
|
refs/heads/main
| 2023-04-02T18:16:42.510756
| 2021-04-06T17:29:38
| 2021-04-06T17:29:38
| 328,731,529
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 536
|
r
|
tidyr_package.R
|
library(tidyr)
bill_data <- read.csv(file.choose())
bill_data1 <- bill_data
str(bill_data1)
bill_data$Date <- as.Date(bill_data$Date,format="%d-%m-%y")
View(bill_data1)
str(bill_data)
#Gather
gather <- gather(bill_data,Bill_type,Bill_amount,gasbill:waterbill)
View(gather)
#Spread
unpivot <- spread(gather,Bill_type,Bill_amount)
View(unpivot)
## unite ##
dataunite <- unite(bill_data,"Location",c(city,state),sep=";")
dataunite
datasep <- separate(dataunite,Location,c("city","state"),sep=";")
datasep
|
ab89d19580944170d055531c8fbe74b29f98cb2b
|
31362fdab2193f92b64f9a82b0fe1ca732fcf6df
|
/ComparatorSelectionExplorer/R/dataModel.R
|
150e2313891ea25fc962aa7d421a1b6d32626223
|
[
"LicenseRef-scancode-warranty-disclaimer",
"Apache-2.0"
] |
permissive
|
OHDSI/ShinyDeploy
|
a5c8bbd5341c96001ebfbb1e42f3bc60eeceee7c
|
a9d6f598b10174ffa6a1073398565d108e4ccd3c
|
refs/heads/master
| 2023-08-30T17:59:17.033360
| 2023-08-26T12:07:22
| 2023-08-26T12:07:22
| 98,995,622
| 31
| 49
| null | 2023-06-26T21:07:33
| 2017-08-01T11:50:59
|
R
|
UTF-8
|
R
| false
| false
| 15,082
|
r
|
dataModel.R
|
# Functions for querying data model
#
getCohortDefinitions <- function(qns) {
checkmate::assertClass(qns, "QueryNamespace")
qns$queryDb("select distinct
t.cohort_definition_id,
short_name,
atc_flag as is_atc
from @schema.@cohort_definition t
where t.cohort_definition_id is not null
--and atc_flag in (0, 1)
order by short_name")
}
getDbDataSourcesTable <- function(qns, reactableTable = TRUE) {
checkmate::assertClass(qns, "QueryNamespace")
dataSourceData <- qns$queryDb("select
cdm_source_abbreviation,
cdm_holder,
source_description,
cdm_version,
vocabulary_version,
source_release_date
from @schema.@cdm_source_info t")
if (reactableTable) {
colnames(dataSourceData) <- SqlRender::camelCaseToTitleCase(colnames(dataSourceData))
rt <- reactable::reactable(
data = dataSourceData,
columns = list(
"Source Description" = reactable::colDef(
minWidth = 300)),
defaultPageSize = 5
)
return(rt)
}
return(dataSourceData)
}
#' Get data for covariates that occur on the same day as the exposure event
#'
#'
getCoOccurenceTableData <- function(qns,
databaseIds,
prevInputHighMax,
prevInputHighMin,
prevInputLowMax,
prevInputLowMin,
cohortDefinitionId1,
cohortDefinitionId2) {
checkmate::assertClass(qns, "QueryNamespace")
qns$queryDb(
sql = "
with means_cte as (
select
c1.database_id,
@cohortDefinitionId1 as cohort_definition_id_1,
@cohortDefinitionId2 as cohort_definition_id_2,
case
when c1.covariate_type is null then c2.covariate_type
when c2.covariate_type is null then c1.covariate_type
else c1.covariate_type
end as covariate_type,
case
when c1.covariate_id is null then c2.covariate_id
when c2.covariate_id is null then c1.covariate_id
else c1.covariate_id
end as covariate_id,
case
when c1.covariate_name is null then c2.covariate_name
when c2.covariate_name is null then c1.covariate_name
else c1.covariate_name
end as covariate_short_name,
case
when c1.covariate_mean is null then 0.0
else c1.covariate_mean
end as mean_1,
case
when c2.covariate_mean is null then 0.0
else c2.covariate_mean
end as mean_2
from (
select t.*, covd.covariate_name, covd.covariate_type
from @schema.@covariate_mean t
inner join @schema.@covariate_definition covd on covd.covariate_id = t.covariate_id
where t.cohort_definition_id = @cohortDefinitionId1
and t.database_id IN (@database_ids)
) as c1
left join (
select t.*, covd.covariate_name, covd.covariate_type
from @schema.@covariate_mean t
inner join @schema.@covariate_definition covd on covd.covariate_id = t.covariate_id
where t.cohort_definition_id = @cohortDefinitionId2
and t.database_id IN (@database_ids)
) as c2
on c1.covariate_id = c2.covariate_id AND c1.database_id = c2.database_id
WHERE (c1.covariate_type IS NULL OR c1.covariate_type = 'Co-occurrence')
AND (c2.covariate_type IS NULL OR c2.covariate_type = 'Co-occurrence')
)
select
m.*,
d.cdm_source_abbreviation,
case
when m.mean_1 = m.mean_2 then 0.0
when m.mean_1 = 0.0 and m.mean_2 = 1.0 then null
when m.mean_1 = 1.0 and m.mean_2 = 0.0 then null
else (mean_1 - mean_2) / (sqrt((mean_1 * (1 - mean_1) + mean_2 * (1 - mean_2)) / 2))
end as std_diff,
c1.num_persons as n_1,
c2.num_persons as n_2
from means_cte as m
inner join @schema.@cohort_count as c1
on m.cohort_definition_id_1 = c1.cohort_definition_id and c1.database_id = m.database_id
inner join @schema.@cohort_count as c2
on m.cohort_definition_id_2 = c2.cohort_definition_id and c2.database_id = m.database_id
inner join @schema.@cdm_source_info as d on d.database_id = m.database_id
WHERE (
(m.mean_1 > @prevInputHighMax AND m.mean_2 < @prevInputHighMin) OR
(m.mean_2 > @prevInputHighMax AND m.mean_1 < @prevInputHighMin)
) OR (
(m.mean_1 > @prevInputLowMax AND m.mean_2 < @prevInputLowMin) OR
(m.mean_2 > @prevInputLowMax AND m.mean_1 < @prevInputLowMin)
)
;",
database_ids = databaseIds,
prevInputHighMax = prevInputHighMax,
prevInputHighMin = prevInputHighMin,
prevInputLowMax = prevInputLowMax,
prevInputLowMin = prevInputLowMin,
cohortDefinitionId1 = cohortDefinitionId1,
cohortDefinitionId2 = cohortDefinitionId2)
}
getCohortDefinitionsTable <- function(qns, databaseId, counts = TRUE) {
qns$queryDb(
sql = "select distinct
t.cohort_definition_id,
short_name,
atc_flag as is_atc,
c.num_persons,
c.database_id
from @schema.@cohort_definition t
inner join @schema.@cohort_count c ON c.cohort_definition_id = t.cohort_definition_id
where t.cohort_definition_id is not null
-- and atc_flag in (0, 1)
and c.database_id IN (@database_id)
order by short_name",
database_id = databaseId,
counts = counts
)
}
getPairwiseCovariateData <- function(qns, databaseId, cohortDefinitionId1, cohortDefinitionId2) {
checkmate::assertClass(qns, "QueryNamespace")
qns$queryDb(
sql = "with means as (
select
@cohortDefinitionId1 as cohort_definition_id_1,
@cohortDefinitionId2 as cohort_definition_id_2,
case
when c1.covariate_type is null then c2.covariate_type
when c2.covariate_type is null then c1.covariate_type
else c1.covariate_type
end as covariate_type,
case
when c1.covariate_id is null then c2.covariate_id
when c2.covariate_id is null then c1.covariate_id
else c1.covariate_id
end as covariate_id,
case
when c1.covariate_name is null then c2.covariate_name
when c2.covariate_name is null then c1.covariate_name
else c1.covariate_name
end as covariate_short_name,
case
when c1.covariate_mean is null then 0.0
else c1.covariate_mean
end as mean_1,
case
when c2.covariate_mean is null then 0.0
else c2.covariate_mean
end as mean_2
from (
select t.*, covd.covariate_name, covd.covariate_type from @schema.@covariate_mean t
inner join @schema.@covariate_definition covd on covd.covariate_id = t.covariate_id
where t.cohort_definition_id = @cohortDefinitionId1
and t.database_id = @database_id
) as c1
full join (
select t.*, covd.covariate_name, covd.covariate_type from @schema.@covariate_mean t
inner join @schema.@covariate_definition covd on covd.covariate_id = t.covariate_id
where t.cohort_definition_id = @cohortDefinitionId2
and t.database_id = @database_id
) as c2
on c1.covariate_id = c2.covariate_id)
select
m.*,
case
when m.mean_1 = m.mean_2 then 0.0
when m.mean_1 = 0.0 and m.mean_2 = 1.0 then null
when m.mean_1 = 1.0 and m.mean_2 = 0.0 then null
else (mean_1 - mean_2) / (sqrt((mean_1 * (1 - mean_1) + mean_2 * (1 - mean_2)) / 2))
end as std_diff,
c1.num_persons as n_1,
c2.num_persons as n_2
from means as m
join @schema.@cohort_count as c1
on m.cohort_definition_id_1 = c1.cohort_definition_id and c1.database_id = @database_id
join @schema.@cohort_count as c2
on m.cohort_definition_id_2 = c2.cohort_definition_id and c2.database_id = @database_id
;",
database_id = databaseId,
cohortDefinitionId1 = cohortDefinitionId1,
cohortDefinitionId2 = cohortDefinitionId2)
}
getCohortSimilarityScores <- function(qns, targetCohortId) {
checkmate::assertClass(qns, "QueryNamespace")
qns$queryDb(
sql = "
select distinct
csi.database_id,
csi.cdm_source_abbreviation,
CASE
WHEN t.cohort_definition_id_1 = @targetCohortId THEN t.cohort_definition_id_2
ELSE t.cohort_definition_id_1
END as cohort_definition_id_2,
CASE
WHEN t.cohort_definition_id_1 = @targetCohortId THEN cd2.atc_flag
ELSE cd.atc_flag
END as is_atc_2,
CASE
WHEN t.cohort_definition_id_1 = @targetCohortId THEN cd2.short_name
ELSE cd.short_name
END as short_name,
cosine_similarity,
atc.atc_4_related,
atc.atc_3_related,
CASE
WHEN t.cohort_definition_id_1 = @targetCohortId THEN ec.num_persons
ELSE ec2.num_persons
END as num_persons
from @schema.@cosine_similarity_score t
inner join @schema.@cohort_count ec ON ec.cohort_definition_id = t.cohort_definition_id_2 and ec.database_id = t.database_id
inner join @schema.@cohort_count ec2 ON ec2.cohort_definition_id = t.cohort_definition_id_1 and ec2.database_id = t.database_id
inner join @schema.@cdm_source_info csi ON csi.database_id = t.database_id
inner join @schema.@cohort_definition cd ON cd.cohort_definition_id = t.cohort_definition_id_1
inner join @schema.@cohort_definition cd2 ON cd2.cohort_definition_id = t.cohort_definition_id_2
left join @schema.@atc_level atc on (t.cohort_definition_id_1 = atc.cohort_definition_id_1 and t.cohort_definition_id_2 = atc.cohort_definition_id_2) or (t.cohort_definition_id_2 = atc.cohort_definition_id_1 and t.cohort_definition_id_1 = atc.cohort_definition_id_2)
where (t.cohort_definition_id_1 = @targetCohortId or t.cohort_definition_id_2 = @targetCohortId)
and t.covariate_type = 'average'
",
targetCohortId = targetCohortId)
}
getDatabaseSimilarityScores <- function(qns, targetCohortId, databaseIds) {
checkmate::assertClass(qns, "QueryNamespace")
qns$queryDb(
sql = "
select distinct
d.cdm_source_abbreviation,
CASE
WHEN t.cohort_definition_id_1 = @targetCohortId THEN t.cohort_definition_id_2
ELSE t.cohort_definition_id_1
END as cohort_definition_id_2,
CASE
WHEN t.cohort_definition_id_1 = @targetCohortId THEN cd2.atc_flag
ELSE cd.atc_flag
END as is_atc_2,
CASE
WHEN t.cohort_definition_id_1 = @targetCohortId THEN cd2.short_name
ELSE cd.short_name
END as short_name,
cosine_similarity,
CASE
WHEN t.cohort_definition_id_1 = @targetCohortId THEN ec.num_persons
ELSE ec2.num_persons
END as num_persons,
t.covariate_type
from @schema.@cosine_similarity_score t
inner join @schema.@cohort_count ec ON ec.cohort_definition_id = t.cohort_definition_id_2 and ec.database_id = t.database_id
inner join @schema.@cohort_count ec2 ON ec2.cohort_definition_id = t.cohort_definition_id_1 and ec2.database_id = t.database_id
inner join @schema.@cohort_definition cd ON cd.cohort_definition_id = t.cohort_definition_id_1
inner join @schema.@cohort_definition cd2 ON cd2.cohort_definition_id = t.cohort_definition_id_2
inner join @schema.@cdm_source_info d ON t.database_id = d.database_id
where (t.cohort_definition_id_1 = @targetCohortId or t.cohort_definition_id_2 = @targetCohortId)
and t.database_id IN (@database_ids)
",
targetCohortId = targetCohortId,
database_ids = databaseIds)
}
getDbCosineSimilarityTable <- function(qns, targetCohortId, comparatorCohortId, databaseId, returnReactable = FALSE) {
checkmate::assertClass(qns, "QueryNamespace")
sql <- "SELECT covariate_type, cosine_similarity FROM @schema.@cosine_similarity_score
WHERE database_id = @database_id
AND cohort_definition_id_1 in (@target, @comparator)
AND cohort_definition_id_2 in (@target, @comparator)
"
detailData <- qns$queryDb(sql,
database_id = databaseId,
target = targetCohortId,
comparator = comparatorCohortId)
detailData <- detailData %>%
dplyr::filter(!covariateType %in% c('Co-occurrence')) %>%
dplyr::mutate(
covariateType = factor(
covariateType,
levels = c("Demographics",
"Presentation",
"Medical history",
"prior meds",
"visit context",
"average"))) %>%
dplyr::arrange(covariateType)
if (returnReactable) {
rt <- reactable::reactable(
data = detailData,
columns = list(
"covariateType" = reactable::colDef(
name = "Covariate Domain"),
"cosineSimilarity" = reactable::colDef(
name = "Cosine Similarity",
cell = function(value) { sprintf(fmtSim, value) }
)
)
)
return(rt)
}
}
getDatabaseSources <- function(qns) {
checkmate::assertClass(qns, "QueryNamespace")
qns$queryDb(sql = "select distinct * from @schema.@cdm_source_info t")
}
|
c051a944438bdfc95f17b943efb605af3e2e80fe
|
ff0064f72d0652c242ca2b3b1f7807090f30ae26
|
/Twitter_MongoDB.R
|
915e135287abf67d98aa331f54b4bd6c8e75180a
|
[] |
no_license
|
scubjwu/CodeBase
|
1b1c10065de43dd62788bc20e3d457d1c90cbe2b
|
f4e1d9f13a7f076c5586b6631d0946cf6e52358f
|
refs/heads/master
| 2021-01-17T21:43:32.689058
| 2015-09-01T04:26:14
| 2015-09-01T04:26:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,214
|
r
|
Twitter_MongoDB.R
|
# Twitter Data analysis: using mongodb as database
# Yichu Li
# Aug 30, 2015
require(rmongodb)
rm(list=ls())
# Remember to run MongoDB daemon on localhost before running the code below!
mongo <- mongo.create()
mongo.is.connected(mongo)
NS <- "twitter.tweets"
# case 1: go through the whole db of twitter
if (FALSE)
{
cursor <- mongo.find(mongo=mongo, ns=NS)
while(mongo.cursor.next(cursor))
{
value <- mongo.cursor.value(cursor)
list.r <- mongo.bson.to.list(value)
}
mongo.cursor.destroy(cursor)
}
# case 2: go through filtered records: only choose records with geo NULL, and only choose id, geo and text fields
if (FALSE)
{
buf <- mongo.bson.buffer.create() # first create a bson so to filter
mongo.bson.buffer.append(buf, "geo", NULL)
null.filter <- mongo.bson.from.buffer(buf)
cursor <- mongo.find(mongo=mongo, ns=NS, query=null.filter, fields=list("id"=1L, "geo"=1L, "text"=1L))
geo.null <- list()
while(mongo.cursor.next(cursor))
{
value <- mongo.cursor.value(cursor)
geo.null <- c(geo.null, mongo.bson.to.list(value))
}
mongo.cursor.destroy(cursor)
}
# case 3: go through filtered records: only choose records with geo NOT NULL (geo is deprecated)
if (FALSE)
{
cursor <- mongo.find(mongo=mongo, ns=NS, query=list("geo"=list("$ne"=NULL)), fields=list("id"=1L, "geo"=1L, "text"=1L, "created_at"=1L, "source"=1L, "timestamp_ms"=1L, "place"=1L))
geo.nonull <- list()
i <- 1
while(mongo.cursor.next(cursor))
{
value <- mongo.cursor.value(cursor)
geo.nonull <- c(geo.nonull, list(mongo.bson.to.list(value)))
print(i)
i <- i+1
}
mongo.cursor.destroy(cursor)
}
# case 4: go through filtered records: only choose records with coordinates NOT NULL
{
cursor <- mongo.find(mongo=mongo, ns=NS, query=list("coordinates"=list("$ne"=NULL)), fields=list("id_str"=1L, "text"=1L, "user"=1L, "created_at"=1L, "place"=1L, "coordinates"=1L))
mongo.count(mongo=mongo, ns=NS, query=list("coordinates"=list("$ne"=NULL)))
mongo.count(mongo=mongo, ns=NS)
coord.nonull<- list()
i <- 1
while(mongo.cursor.next(cursor) & i<=2000)
{
value <- mongo.cursor.value(cursor)
coord.nonull<- c(coord.nonull, list(mongo.bson.to.list(value)))
if (i%%100==0)
print(i)
i <- i+1
}
mongo.cursor.destroy(cursor)
}
|
a364b6129233c2c12c8b5a803d2dbf28d3e3cdda
|
b739482757d868e077929c49ed0a1c2446d0fbee
|
/Paper/plotScripts/final_scripts/simulation_analysis/plot_expression_traces_player_piano_figure3.R
|
acba4238814e3a591ec749518e8ddba3cffcd97f
|
[] |
no_license
|
arjunrajlaboratory/lost-in-space-public
|
9bae5d2ba70c4433febca8716dc283aeadae9f8c
|
e9aacc03591c5451ca46140f1212271f14496488
|
refs/heads/main
| 2023-04-11T11:05:47.705168
| 2022-09-16T17:50:08
| 2022-09-16T17:50:08
| 537,553,932
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,991
|
r
|
plot_expression_traces_player_piano_figure3.R
|
library('R.matlab')
library(tidyverse)
library(cowplot)
rawMat = readMat.default('./../../../extractedData/20210528_for_paper/vary_off_add_k_700params/mat_files/S_outpar_20210524_vary_off_add_k_for_paper-rep1_3_1_1_summed.mat')
save_directory = './../../../plots/plots_for_figures/'
plot_three_gene_player_piano = function(data,time1,time2,param){
tdata = data[[1]][[param]][[1]]
tdata = t(tdata)[c(time1:time2),c(1:3)]
tdata = tdata %>% as_tibble(rownames = 'time')
tdata$time = as.integer(tdata$time)
colnames(tdata) = c('time', 'gene_1', 'gene_2', 'gene_3')
tdata = tdata %>% pivot_longer(-time, names_to = 'species')
tdata = tdata %>% mutate(binarized_value = value>3)
tdata = tdata %>% separate(species, into = c(NA, 'gene'), sep = '_')
tdata$gene = as.integer(tdata$gene)
if(length(unique(tdata$binarized_value)) == 2){
p1=ggplot(tdata) +
aes(y = factor(gene), x = time, color = factor(gene), alpha = as.integer(binarized_value)) +
geom_line(size = 5) +
scale_alpha(range=c(0,1), guide = 'none')+
theme_classic()
} else{
p1=ggplot(tdata) +
aes(y = factor(gene), x = time, color = factor(gene)) +
geom_line(size = 5) +
theme_classic()
}
p2 = ggplot(tdata) +
aes(y = value, x = time, color = factor(gene)) +
geom_step()+
theme_classic()+
theme(
strip.background = element_blank(),
strip.text = element_blank()
)
plots = list(p2,p1)
return(plots)
}
save_fun = function(plot_to_save, plot_name){
plot_to_save[[2]] = plot_to_save[[2]] + scale_y_discrete(limits = rev)
final_plot = plot_grid(plotlist=plot_to_save, nrow=2, align='v', axis = 'l',rel_heights = c(2,1))
ggsave(file.path(save_directory, plot_name), final_plot, width = 6, height = 4)
}
idx = c(341, 302, 304)
t1=1280
t2=1396
plots= lapply(idx, function(iter) plot_three_gene_player_piano(rawMat, t1, t2, iter))
lapply(1:3, function(i)
save_fun(plots[[i]], paste0('3dplot_example_', i, '.svg'))
)
|
b18a8c043d6b6f93c1b45ad2ed1f65ba0142cb21
|
5555383cebaa7375a04e6759c8d0e1644c7d39c3
|
/R/hello.R
|
1d1c7e09379290b81a888009f7b46253b11cde62
|
[
"MIT"
] |
permissive
|
ltrainstg/prismadiagramR
|
fc83ba1676b67490e0ce934b6ee3810f41ed4423
|
6b496a49658254e71b222bfcfa747accbc3fc2af
|
refs/heads/master
| 2023-07-25T21:41:09.354333
| 2021-09-07T17:31:58
| 2021-09-07T17:31:58
| 255,697,169
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 74
|
r
|
hello.R
|
hello <- function(){
print('HI')
}
bye <- function(){
print('BYE')
}
|
3060bc980b015c5f71ace8d596a742db027dc627
|
cd68b13f269ad9aef96449dfebe88f6df7a19d60
|
/run_analysis.R
|
6e7c96cde3f87d792ccfccadb0c1776020fe3d6e
|
[] |
no_license
|
rniggebrugge/Getting-and-Cleaning-Data-Course-Project
|
0d60641caf1b3d19565368b614443b5e2ab81666
|
9ab48057fd57b96196512e12d6f1c74dde1aab88
|
refs/heads/master
| 2016-09-05T13:05:23.836641
| 2015-01-20T10:21:56
| 2015-01-20T10:21:56
| 29,252,441
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,542
|
r
|
run_analysis.R
|
## R script file for the Getting and Cleaning Data Course Project
library(dplyr) # for grouping and summarizing data.frames
library(tidyr) # for "gather" function
# Reading the tables to be merged.
# - subject : list with numbers of subjects for each observation
# - activity : list of activities performed for each observation
# - measurements : the actual measurements for each observation
# - features : list with titles of all measurements, used to create
# meaningful column names
cat("\n\nReading train dataset.....please wait....\n")
subject <- read.table("./train/subject_train.txt")
activity <- read.table("./train/y_train.txt")
measurements <- read.table("./train/X_train.txt", sep="")
features <- read.table("./features.txt")
# The "features" table needs a bit of processing. The first colum
# contains a numeric reference of the measurement, this can be
# ignored. The second column, with the names of the measurements,
# contains many unwanted characters, these will be removed before
# using the values as column names of the dataset.
# A lot of processing needs to be done to whip them all in the same form.
# Probably it can be achieved with more elegant, simple regular expressions
# but since it only works on 561 or so values, it is not critical and
# I do not have the time now to tidy up....
features <- features[,2]
features <- gsub("angle\\(tBodyAccMean,gravity\\)","drop_this_column", features)
features <- gsub("angle\\((.*),(.*)gravityMean\\)", "angleWithGravity\\1Total_mean" , features)
features <- gsub("[)(,]","", features)
features <- gsub("^t","time", features)
features <- gsub("^f","fft", features)
features <- gsub("Mag","_mag", features)
features <- gsub("\\-","_", features)
features <- gsub("_mag_(mean|std)","Total_\\1", features)
features <- gsub("_(mean|std)_(.*)$","\\2_\\1", features)
features <- gsub("angle([XYZ])gravityMean","angle_gravity\\1_mean", features)
features <- gsub("_meanFreq_(X|Y|Z)$","\\1Freq_mean", features)
features <- gsub("_meanFreq$","\\1Freq_mean", features)
# Adding column names to the above data.frames. This will ensure the
# final table is understandable and ensures a smooth merge with the
# test-set data
names(subject) <- "subject"
names(activity) <- "activity"
names(measurements) <- features
# Now merge the three dataframes by binding the columns ("left-to-right")
train_frame <- cbind(subject, activity, measurements)
# Repeat the same steps for the tables in ./test/ directory
cat("Reading test dataset....please wait....\n")
subject <- read.table("./test/subject_test.txt")
activity <- read.table("./test/y_test.txt")
measurements <- read.table("./test/X_test.txt", sep="")
names(subject) <- "subject"
names(activity) <- "activity"
names(measurements) <- features
test_frame <- cbind(subject, activity, measurements)
# Now merge to train and test data.frames by binding the rows
# ("top-to-bottom") the resulting data.frame with all observations over
# all subjects is stored in "data"
data <- rbind(train_frame, test_frame)
# The activity column in the data.frame contains numeric values
# corresponding to the six observed activities. This needs to be
# replaced with more meaningful labels. These labels can be found
# in the "activity_labels.txt" file where they are stored in 2nd column.
act_labels <- read.table("./activity_labels.txt")
act_labels$V2 <- tolower(act_labels$V2)
data$activity <- factor(data$activity, labels=act_labels[,2])
# Using again the "features" list with all measurements, select those
# representing 'mean' and 'standard deviations' values.
# The 'grep' function will return a list with column numbers we need
# to keep in our data.frame. In order to take the first two columns
# into account (subject and activity) the discovered column numbers
# are all increased with 2, and the numbers 1 and 2 are added to list
# of columns we need to keep.
columns <- grep("std|[Mm]ean", features)
columns <- columns +2
columns <- c(1,2,columns)
# With that we can reduce are "data" data.frame, only keeping
# the columns as described above.
data <- data[,columns]
# We can further reduce the data.frame by grouping over subjects
# and activities. For each subject-activity combination the mean value
# for each measurements is calculated. This will result
# in 180 observations (6 activities * 30 subjects).
#
# The function "summarise_each" is used to summarize over all columns
# other than those grouped by.
tdw <- group_by(data, subject, activity) %>% summarise_each(funs(mean))
# As a final step the dataset can be narrowed done by taking out the
# _mean and_std parts from the columns names, and instead have this information
# stored in a new column called "calculation_type". This will lead to a small portion
# of NA values, because not for each feature the mean and std values are known.
#
# I have experimented also with further splitting the features, and created tables
# with columns with:
# * _time_ and _fft_ values (mathmatical processing type)
# * X / Y / Z / total values (component of measurement
#
# The physics behind the experiment justify such a further splitting, as they do not
# relate to new observed features, but calculations upon the measurement on the
# features. However, because this resulted in tables with a very large number of
# missing values NA (up to 50%), I decided against this.
tdn<- tdw %>%
gather(temp_column, value, -subject, -activity) %>%
separate(temp_column, into=c("feature","calculation_type") , sep="_") %>%
spread(feature, value)
# End of processing!
cat("\n\nFINISHED PROCESSING.\n\n")
cat("Created dataframes: \n")
cat(" data: full dataset\n")
cat(" tdw : dataset with variables averaged over subject and activity\n")
cat(" tdn : dataset with variables averaged as above, and _mean and _std moved into column\n\n")
rm(activity); rm(act_labels); rm(columns);
rm(features); rm(measurements); rm(subject);
rm(test_frame); rm(train_frame);
# with(new_data_set_wide, xyplot(tBodyAccmeanX ~ activity | subject ))
# with(new_data_set_wide, xyplot(tBodyAccmeanX ~ subject | activity ))
# with(new_data_set_narrow, xyplot(measurement ~ subject| activity, groups=feature))
# qplot(measurement, feature, data=new_data_set_narrow, facets = subject~activity)
# str(new_data_set_wide)
# summary(new_data_set_wide)
# str(new_data_set_narrow)
# summary(new_data_set_narrow)
|
38e9d85dfc6894ceca6a92fa1cf50d8053fb84dd
|
3ec359b8fd8798f21c37d30c612e4b5bc0908de4
|
/man/Q7.Rd
|
3a83355ba68ffb30cfdd5a0d451d54abf7e329bf
|
[] |
no_license
|
JWooll/WoolleyFirstTools
|
bb8c6e4b7efe0e64a05da2903b8e4196949debb0
|
6a5841e80bb5a0da513af26cc9dab11314897a3d
|
refs/heads/master
| 2020-04-07T19:00:12.239274
| 2018-03-07T12:30:11
| 2018-03-07T12:30:11
| 124,227,371
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 860
|
rd
|
Q7.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Q7.R
\name{Q7}
\alias{Q7}
\title{Three Methods of Probability Distribution}
\usage{
Q7(data, funct, interval)
}
\arguments{
\item{data}{used, type of distribution, interval}
\item{funct}{what function the user would like to use
with 0,1, and 2 corresponding with gamma cauchy and binomial}
\item{interval}{the interval across which the
optimize function will operate across}
}
\value{
Sum of distribution
}
\description{
Depending on which probability distribution method the
user wants to use, (0 for Gamma, 1 for Cauchy, 2 for Binomial)
the function takes the data and a given interval and
Returns the maximum optimized sum of the distribution
}
\details{
for gamma: Q7(x,0,c(0,10))
for Cauchy: Q7(x,1,c(-10,10))
for binomial: Q7(x,2,c(-10,10))
}
|
ff3b3d6d7dee7a4740d8429240ce8f42132d9544
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/audiolyzR/examples/audioSplom.Rd.R
|
dffcd8ae13478828c7bb44ea39ead7621c723e66
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 277
|
r
|
audioSplom.Rd.R
|
library(audiolyzR)
### Name: audioSplom
### Title: audioSplom geneartes a scatterplot matrix-like series of tone
### matrices
### Aliases: audioSplom
### ** Examples
##Simple audioSplom example
## Not run:
##D data(NHANES)
##D audioSplom(data=NHANES)
## End(Not run)
|
548818cf784e02f3417fdf27ae5c5fef623750ac
|
51a885d284859c96bfb46e1adb8b52005138cab9
|
/teaching/teaching_files/CSP519_SP17/labs/Lab2/showmelm_WB.R
|
ba9b598a424bc932ca37afb806f5043af298e4e6
|
[] |
no_license
|
wbushong/wbushong.github.io
|
8ab3411993ec8844a55cb3d0f58eda4b209300c2
|
640539969c63b3f1121d7c4c4b80f1e86a7d7e6d
|
refs/heads/master
| 2022-07-19T12:53:14.737578
| 2022-06-21T16:16:01
| 2022-06-21T16:16:01
| 56,018,765
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,851
|
r
|
showmelm_WB.R
|
p.cor <- function(x) {
model.data <- as.data.frame(model.matrix(x)[,-1])
names(model.data) <- letters[1:length(names(model.data))]
model.data <- na.omit(model.data)
model.data$Y <- fitted(x) + resid(x)
predictors <- names(model.data)[-(which(names(model.data) =="Y"))]
if (length(predictors) < 2) {
(sr <- cor(model.data))[2,1]
} else {
resid.values <- model.data[, c("Y", predictors)]
for(i in names(model.data)[-(which(names(model.data) =="Y"))] ) {
resid.values[,predictors[predictors == i]] <- resid(lm(as.formula(paste(i, " ~ ", 1, paste(" +", predictors[-which(predictors == i)], collapse=""))), data=model.data))
}
(sr <- cor(resid.values)[,1][-1])
}
}
vif <- function(mod, ...) {
## Copied from the car package (thanks to John Fox! ##
if (any(is.na(coef(mod))))
stop("there are aliased coefficients in the model")
v <- vcov(mod)
assign <- attributes(model.matrix(mod))$assign
if (names(coefficients(mod)[1]) == "(Intercept)") {
v <- v[-1, -1]
assign <- assign[-1]
}
else warning("No intercept: vifs may not be sensible.")
terms <- labels(terms(mod))
n.terms <- length(terms)
if (n.terms < 2) {
result <- NA } else {R <- cov2cor(v)
detR <- det(R)
result <- matrix(0, n.terms, 3)
rownames(result) <- terms
colnames(result) <- c("GVIF", "Df", "GVIF^(1/2Df)")
for (term in 1:n.terms) {
subs <- which(assign == term)
result[term, 1] <- det(as.matrix(R[subs, subs])) * det(as.matrix(R[-subs,
-subs]))/detR
result[term, 2] <- length(subs)
}
if (all(result[, 2] == 1))
result <- result[, 1]
else result[, 3] <- result[, 1]^(1/(2 * result[, 2]))
result
}
}
########### strip zeros function #########
strip0 <- function(x) {
save.dims <- dim(x)
save.dimnames <-dimnames(x)
x.mat.1 <- as.matrix(x)
x.mat.2 <- matrix(x.mat.1, nrow=1)
x.stripped <- sub("-0.", "-.", x.mat.2, fixed=TRUE)
x.stripped <- sub("0.", ".", x.stripped, fixed=TRUE)
x.mat <- matrix(x.stripped)
dim(x.mat) <- save.dims
dimnames(x.mat) <- save.dimnames
x <- as.data.frame(x.mat, stringsAsFactors=FALSE)
return(x)
}
########## corstars function
corstars <- function(x){
require(Hmisc)
x <- as.matrix(x)
R <- rcorr(x)$r
p <- rcorr(x)$P
## define notions for significance levels; spacing is important.
mystars <- ifelse(p < .001, "*** ", ifelse(p < .01, "** ", ifelse(p < .05, "* ", " ")))
## trunctuate the matrix that holds the correlations to two decimal
R <- format(round(cbind(rep(-1.11, ncol(x)), R), 2))[,-1]
## build a new matrix that includes the correlations with their apropriate stars
Rnew <- matrix(paste(R, mystars, sep=""), ncol=ncol(x))
diag(Rnew) <- paste(diag(R), " ", sep="")
rownames(Rnew) <- colnames(x)
colnames(Rnew) <- paste(colnames(x), "", sep="")
## remove upper triangle
Rnew <- as.matrix(Rnew)
Rnew[upper.tri(Rnew, diag = TRUE)] <- ""
Rnew <- as.data.frame(Rnew, stringsAsFactors=FALSE)
## remove last column and return the matrix (which is now a data frame)
Rnew <- cbind(Rnew[1:length(Rnew)-1])
## strip leading zeros as per APA style
Rnew <- strip0(Rnew)
Rnew <- format(Rnew, justify="left")
## provide the result
return(Rnew)
}
## lm to df function ##
lm2df <- function(x) {
DF <- as.data.frame(model.matrix(x)[,-1])
names(DF) <- letters[1:length(names(DF))]
DF <- na.omit(DF)
DF$Y <- fitted(x) + resid(x)
names(DF) <- c(colnames(model.matrix(x))[-1], as.character(as.formula(x$call))[2])
return(DF)
}
higher.lm <- function(x, sets=NULL, ...) {
if(length(sets) > 0) {
l <- length(names(x$coefficients))
predictors <- names(x$coefficients)[2:l]
dat <- as.character(x$call["data"])
outcome <- as.character(x$terms)[2]
vars <- rownames(attributes(x$terms)$factors)
DF <- get(dat)[vars]
DF <- na.omit(DF)
MODS <- list()
keep <- numeric()
fo <- character()
for(i in 1:length(sets)) {
keep <- unique(c(keep, c(sets[[1]], sets[[i]])))
fo[i] <- paste(outcome, "~", paste(predictors[keep], collapse="+"))
MODS[[i]] <- lm(as.formula(fo[i]), data=DF)
}
AOV <- list()
DR2 <- numeric()
for(i in 1:(length(MODS)-1)) {
AOV[[i]] <- anova(MODS[[i]], MODS[[i+1]])
DR2[i] <- summary(MODS[[i+1]])$r.squared - summary(MODS[[i]])$r.squared
}
return(list(fo=fo, AOV=AOV, DR2=DR2, MODS=MODS))
}
}
showme.lm <- function(x, sr2=TRUE, tol=FALSE, vif=FALSE, cor=FALSE, sets=NULL, digits=4, verbose = FALSE) {
if(length(sets) < 1) {
tmp <- summary(x)
if(sr2==TRUE) {
R2 <- (p.cor(x))^2
tmp$coefficients <- cbind(tmp$coefficients, c(NA, R2))
colnames(tmp$coefficients)[which(colnames(tmp$coefficients) == "")] <- "sr2"
}
if(tol==TRUE) {
VIF <- vif(x)
TOL <- 1/VIF
tmp$coefficients <- cbind(tmp$coefficients, c(NA, TOL))
colnames(tmp$coefficients)[which(colnames(tmp$coefficients) == "")] <- "Tol."
}
if(vif==TRUE) {
VIF <- vif(x)
TOL <- 1/VIF
tmp$coefficients <- cbind(tmp$coefficients, c(NA, VIF))
colnames(tmp$coefficients)[which(colnames(tmp$coefficients) == "")] <- "VIF"
}
MODS <- x
MODS.sum <- tmp
AOV <- anova(x)
Rtable <- tmp
}
if(cor==TRUE) {
DF <- lm2df(x)
COR <- corstars(DF)
cat(" ### Correlation Matrix ### \n \n")
print(COR)
cat("\n ")
}
if(length(sets) > 1) {
HIGHER <- higher.lm(x, sets=sets)
AOV <- HIGHER$AOV
MODS <- HIGHER$MODS
DR2 <- HIGHER$DR2
fo <- HIGHER$fo
cat("### Multiple df Tests ### \n \n")
for(i in 1:length(AOV)) {
names(AOV)[[i]] <-paste("Comparison of models", i, "and", i+1)
print(AOV[i])
cat("Delta R2 =", DR2[i], "\n \n")
}
MODS.sum <- list()
for(i in 1:length(MODS)) {
MODS.sum[[i]] <- summary(MODS[[i]])
}
add.names <- character()
if(sr2==TRUE) {
add.names <- c(add.names, "sr2")
for(i in 1:length(MODS.sum)) {
R2 <- (p.cor(MODS[[i]]))^2
MODS.sum[[i]]$coefficients <- cbind(MODS.sum[[i]]$coefficients, c(NA, R2))
}
}
if(vif==TRUE) {
add.names <- c(add.names, "VIF")
for(i in 1:length(MODS.sum)) {
VIF <- vif(MODS[[i]])
MODS.sum[[i]]$coefficients <- cbind(MODS.sum[[i]]$coefficients, c(NA, VIF))
}
}
if(tol==TRUE) {
add.names <- c(add.names, "Tol.")
for(i in 1:length(MODS.sum)) {
VIF <- vif(MODS[[i]])
TOL <- 1/VIF
MODS.sum[[i]]$coefficients <- cbind(MODS.sum[[i]]$coefficients, c(NA, TOL))
}
}
Rtable <- MODS.sum
for(i in 1:length(Rtable)) {
colnames(Rtable[[i]]$coefficients)[which(colnames(Rtable[[i]]$coefficients) == "")] <- add.names
}
for(i in 1:length(Rtable)) {
Rtable[[i]]$call <- call("lm", as.formula(fo[i]))
names(Rtable[i]) <- paste("Model", i, "summary")
}
}
Rtable$coefficients <- round(Rtable$coefficients, digits = digits)
if (verbose) {
cat("### Model Summaries ### \n")
print(Rtable)
}
Result <- list(Models = MODS, ModelSummaries = MODS.sum, ANOVAtables = AOV, RegressionTables=Rtable)
invisible(Result)
}
|
9a798bba850d8ef38b27b581abe16d0a87fc77b8
|
2eec2110f34267baaf3d0c61cc5872c6aa34d910
|
/src/05_PatternAnalysis.R
|
f0fc766db9993b9009e546020d24eb2189ae2bdf
|
[] |
no_license
|
anu-bioinfo/EGFRFeedback
|
c9219e931ac84635c7d3a722bf21bded2a8f644f
|
1b96801fca55d9b12206f33918a80a120ded5e76
|
refs/heads/master
| 2021-06-05T01:09:49.825329
| 2016-09-18T13:38:01
| 2016-09-18T13:38:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,689
|
r
|
05_PatternAnalysis.R
|
library('ProjectTemplate')
load.project()
pdf('graphs/DoseResponse_TotalArea.pdf')
par(mfrow=c(1,3))
drugs <- c('Cetuximab', 'Gefitinib', 'Afatinib')
names(drugs) <- c('CTX','GFT', 'AFT')
for (d in names(drugs)) {
drugMean <- get(paste('HaCaT',d,'TotalAreaMean',
sep='.'))
drugMin <- get(paste('HaCaT',d,'TotalAreaMin',
sep='.'))
drugMax <- get(paste('HaCaT',d,'TotalAreaMax',
sep='.'))
errbar(x=1:ncol(drugMean),
y=drugMean['mock',],
yplus=drugMax['mock',],
yminus=drugMin['mock',],
pch=getPCH('HaCaT-Mock'),type='o',axes=F,
xlab=paste(drugs[d],
'concentration (nM)'),
ylab='Total area',
ylim=c(0,1.1*max(drugMax)))
errbar(x=1:ncol(drugMean),
y=drugMean['EGFR',],
yplus=drugMax['EGFR',],
yminus=drugMin['EGFR',],
add=T,pch=getPCH('HaCaT-EGFR'),type='o',col='blue',
errbar.col = 'blue')
errbar(x=1:ncol(drugMean),
y=drugMean['HRAS',],
yplus=drugMax['HRAS',],
yminus=drugMin['HRAS',],
add=T,pch=getPCH('HaCaT-HRAS'),type='o',col='red',
errbar.col = 'red')
errbar(x=1:ncol(drugMean),
y=drugMean['PI3K',],
yplus=drugMax['PI3K',],
yminus=drugMin['PI3K',],
add=T,pch=getPCH('HaCaT-PIK3CA'),type='o',col='green',
errbar.col = 'green')
axis(2,las=1)
axis(1,at = 1:ncol(drugMean),
labels = colnames(drugMean))
legend('topright',
col=c('black','blue','red','green'),
pch=getPCH(c('HaCaT-Mock','HaCaT-EGFR',
'HaCaT-HRAS', 'HaCaT-PIK3CA')),
legend=c('HaCaT-Mock','HaCaT-EGFR',
'HaCaT-HRAS', 'HaCaT-PIK3CA'))
}
dev.off()
pdf('graphs/DoseResponse.pdf')
par(mfrow=c(1,3))
drugs <- c('Cetuximab', 'Gefitinib', 'Afatinib')
names(drugs) <- c('CTX','GFT', 'AFT')
for (d in names(drugs)) {
drugMean <- get(paste('HaCaT',d,'TrtMean',
sep='.'))
drugMin <- get(paste('HaCaT',d,'TrtMin',
sep='.'))
drugMax <- get(paste('HaCaT',d,'TrtMax',
sep='.'))
errbar(x=1:ncol(drugMean),
y=100*drugMean['mock',],
yplus=100*drugMax['mock',],
yminus=100*drugMin['mock',],
pch=getPCH('HaCaT-Mock'),type='o',axes=F,
xlab=paste(drugs[d],
'concentration (nM)'),
ylab='% Total area (relative to control)',
ylim=c(0,120))
errbar(x=1:ncol(drugMean),
y=100*drugMean['EGFR',],
yplus=100*drugMax['EGFR',],
yminus=100*drugMin['EGFR',],
add=T,pch=getPCH('HaCaT-EGFR'),type='o',col='blue',
errbar.col = 'blue')
errbar(x=1:ncol(drugMean),
y=100*drugMean['HRAS',],
yplus=100*drugMax['HRAS',],
yminus=100*drugMin['HRAS',],
add=T,pch=getPCH('HaCaT-HRAS'),type='o',col='red',
errbar.col = 'red')
errbar(x=1:ncol(drugMean),
y=100*drugMean['PI3K',],
yplus=100*drugMax['PI3K',],
yminus=100*drugMin['PI3K',],
add=T,pch=getPCH('HaCaT-PIK3CA'),type='o',col='green',
errbar.col = 'green')
axis(2,at=seq(from=0,to=120,by=20),
labels=c(seq(from=0,to=100,by=20),''),las=1)
axis(1,at = 1:ncol(drugMean),
labels = colnames(drugMean))
legend('topright',
col=c('black','blue','red','green'),
pch=getPCH(c('HaCaT-Mock','HaCaT-EGFR',
'HaCaT-HRAS', 'HaCaT-PIK3CA')),
legend=c('HaCaT-Mock','HaCaT-EGFR',
'HaCaT-HRAS', 'HaCaT-PIK3CA'))
}
dev.off()
# plot of relative survival rates
pdf('graphs/DrugSens.pdf')
plot(c(0,4),c(0,120),col='white',axes=F,
xlab='', ylab='Relative Cell survival rate (%)')
axis(2)
axis(1, at=seq(from=0.3,to=3.3),
labels=row.names(DrugSens),las=2)
for (i in 1:ncol(DrugSens)) {
errbar(seq(from=i*.2,to=3+i*.2),
100*DrugSens[,i],
100*DrugSens.Min[,i],
100*DrugSens.Max[,i],
col=getCol(colnames(DrugSens)[i]),
errbar.col = getCol(colnames(DrugSens)[i]),
pch=getPCH(row.names(DrugSens)),add=T,axes=F,ylab='',xlab='')
}
legend('topleft', legend=colnames(DrugSens),
pch=19, col=getCol(colnames(DrugSens)))
dev.off()
# heatmap of all data (w/ hierarchical clustering)
pdf('graphs/HeatmapTFGene.pdf')
heatmap.2(HaCaT.TFGene.TrtMean,scale='row',
trace='none',col=greenred,labRow=NA,
hclust=function(x) hclust(x,method="complete"),
distfun=function(x) as.dist((1-cor(t(x)))/2),
labCol=sub('HaCaT-','',
strspliti(colnames(HaCaT.TFGene.TrtMean),
split=" ",i=1)),
xlab='HaCaT Construct',
ColSideColors=getCol(strspliti(colnames(HaCaT.TFGene.TrtMean),
split=" ",i=3)))
dev.off()
# plots of CoGAPS patterns
pdf('graphs/CoGAPS_SDFIVE_Patterns.pdf',width=5) #,height=3.5)
par(mfrow=c(2,2))
for (p in 1:3) {
plot(c(0,4),
c(0,
max(CoGAPS_SDFIVE_3$Pmean[p,]+
1.5*CoGAPS_SDFIVE_3$Psd[p,])),
col='white',axes=F,
xlab='', ylab=paste('Pattern',p))
axis(2)
axis(1, at=seq(from=0.3,to=3.3),
labels=gsub('HaCaT-','',row.names(DrugSens)),las=2)
for (i in 1:3) {
exp <- colnames(DrugSens)[i] #c('Control',colnames(DrugSens))[i]
points(x=seq(from=(i-1)*.3,to=3+(i-1)*.3),
y=CoGAPS_SDFIVE_3$Pmean[p,paste(row.names(DrugSens),'HaCaT',exp,'NA')],
col=getCol(exp),
pch=getPCH(row.names(DrugSens)))
}
for (i in 1:nrow(DrugSens)) {
segments(x0=(i-1),x1=(i-1)+.6,col="grey",
y0=mean(CoGAPS_SDFIVE_3$Pmean[p,paste(row.names(DrugSens)[i],'HaCaT',colnames(DrugSens),'NA')]),
lty=2)
arrows(x0=(i-1)+.3,y0 = CoGAPS_SDFIVE_3$Pmean[p,paste(row.names(DrugSens)[i],'HaCaT','Control','NA')],
y1=mean(CoGAPS_SDFIVE_3$Pmean[p,paste(row.names(DrugSens)[i],'HaCaT',colnames(DrugSens),'NA')]),
lty=2,col="grey",length=0.1)
points(x=(i-1)+.3,y=CoGAPS_SDFIVE_3$Pmean[p,paste(row.names(DrugSens)[i],'HaCaT','Control','NA')],
pch=getPCH(row.names(DrugSens)[i]))
}
legend('bottomleft', legend=c('Control',colnames(DrugSens),'Treated','Average change'),
pch="-", col=c(getCol(c('Control',colnames(DrugSens))),'grey','grey'))
}
dev.off()
pdf('graphs/Pattern2_DrugSens.pdf',width=3.5,height=3.5)
CGTrtP <- CoGAPS_SDFIVE_3$Pmean[2,grep('Control',colnames(CoGAPS_SDFIVE_3$Pmean),invert=T)]
DrugSensP <- rep(NA, length(CGTrtP))
names(DrugSensP) <- names(CGTrtP)
for (i in names(DrugSensP)) {
DrugSensP[i] <- DrugSens[strsplit(i,split=" ")[[1]][1],
strsplit(i,split=" ")[[1]][3]]
CGTrtP[i] <- -CoGAPS_SDFIVE_3$Pmean[2,sub(strspliti(i,split=" ",i=3),
'Control',i)] +
CGTrtP[i]
}
plot(DrugSensP, CGTrtP, xlab='Cell Survival (%)',
ylab='Pattern 2 in treatment - control',
col=getCol(strspliti(names(DrugSensP),split=" ",i=3)),
pch=getPCH(strspliti(names(DrugSensP),split=" ",i=1)))
abline(lm(CGTrtP~DrugSensP),lty=2)
legend('bottomleft',col='black',
pch=getPCH(unique(strspliti(names(DrugSensP),split=" ",i=1))),
legend=unique(strspliti(names(DrugSensP),split=" ",i=1)))
corVals <- cor.test(DrugSensP,CGTrtP)
title(sprintf('Change in pattern 2 vs cell survival\ncor %0.2f (p %0.2f)',
corVals$estimate,corVals$p.value))
dev.off()
pdf('graphs/Pattern3_DrugSens.pdf',width=3.5,height=3.5)
CGTrtP <- CoGAPS_SDFIVE_3$Pmean[3,grep('Control',colnames(CoGAPS_SDFIVE_3$Pmean),invert=T)]
DrugSensP <- rep(NA, length(CGTrtP))
names(DrugSensP) <- names(CGTrtP)
for (i in names(DrugSensP)) {
DrugSensP[i] <- DrugSens[strsplit(i,split=" ")[[1]][1],
strsplit(i,split=" ")[[1]][3]]
CGTrtP[i] <- -CoGAPS_SDFIVE_3$Pmean[3,sub(strspliti(i,split=" ",i=3),
'Control',i)] +
CGTrtP[i]
}
plot(DrugSensP, CGTrtP, xlab='Cell Survival (%)',
ylab='Pattern 3 in treatment - control',
col=getCol(strspliti(names(DrugSensP),split=" ",i=3)),
pch=getPCH(strspliti(names(DrugSensP),split=" ",i=1)))
abline(lm(CGTrtP~DrugSensP),lty=2)
legend('bottomleft',col='black',
pch=getPCH(unique(strspliti(names(DrugSensP),split=" ",i=1))),
legend=unique(strspliti(names(DrugSensP),split=" ",i=1)))
corVals <- cor.test(DrugSensP,CGTrtP)
title(sprintf('Change in pattern 3 vs cell survival\ncor %0.2f (p %0.2f)',
corVals$estimate,corVals$p.value))
dev.off()
|
f8167a17bb700c3bc7861e727d818a8100ddfe7d
|
3cf764dabd9de4781791d1d6fb35ae2d3fce41ce
|
/RF_GLB_180815.R
|
7a25587b10c2b20705c5fbd572cdc009cf5b9e5d
|
[] |
no_license
|
wbsalls/GLB
|
2abe2382f574b7e24e290bc075866088f23e52b3
|
22124d962e4d9b64c6b24e071b94fbc37d37b3a1
|
refs/heads/master
| 2021-07-24T11:27:33.236875
| 2021-07-16T13:35:27
| 2021-07-16T13:35:27
| 145,010,895
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 16,411
|
r
|
RF_GLB_180815.R
|
# version on GitHub
library(randomForest)
#library(party) # cforest
#library(caret)
#library(h2o)
source("C:/Users/WSALLS/Git/GLB/RF_functions_180530.R")
setwd("O:\\PRIV\\NERL_ORD_CYAN\\Salls_working\\GLB\\Analysis")
#setwd("/Users/wilsonsalls/Desktop/EPA/GLB/")
output_path <- file.path(getwd(), "RF/out_check3/") # used later
# specify variable file to use for naming
variable_file <- read.csv("GLB_LandscapeAnalysis_variables_2020-01-23.csv", stringsAsFactors = FALSE)
# read in data
lake_data <- read.csv("GLB_LandscapeAnalysis_data_2018-11-06.csv")
lake_data <- lake_data[, -which(colnames(lake_data) == "X")] # remove X column
# select model type
model_type <- "randomForest" # "randomForest" or "conditionalForest"
## subset - ecoregion ------------
# set variable to subset by (or "all")
#colnames(lake_data)[which(grepl("Ecoregion", colnames(lake_data)))]
subset_var <- "all" # run with this one ***
#subset_var <- "Ecoregion_L1_code"
#subset_var <- "Ecoregion_L2_code"
#subset_var <- "Ecoregion_L2_elev_lat"
#subset_var <- "Ecoregion_L2_elev"
#subset_var <- "Ecoregion_L2_highelev_lat" # then run with this one ***
# subset each region for running summary stats
lake_data_lo <- lake_data[which(lake_data$Ecoregion_L2_highelev_lat == "lowElev"),]
lake_data_hi <- lake_data[which(lake_data$Ecoregion_L2_highelev_lat == "hiElevHiLat"),]
# select names of classes/regions with at least 25 observations
if (subset_var == "all") {
data_subsets <- "all"
} else {
subset_table <- table(lake_data[, which(colnames(lake_data) == subset_var)])
data_subsets <- names(subset_table[subset_table >= 0]) # set minimum group size
}
## responses ----------
# check for NAs in response
sum(is.na(lake_data$CI_sp90th_tmedian))
# add binary bloom/no variable
lake_data$ci_median_bloom <- NA
lake_data$ci_median_bloom[which(lake_data$CI_sp90th_tmedian == 0.0001)] <- "no bloom"
lake_data$ci_median_bloom[which(lake_data$CI_sp90th_tmedian != 0.0001)] <- "bloom"
lake_data$ci_median_bloom <- as.factor(lake_data$ci_median_bloom)
# add variable with ND removed
lake_data$CI_sp90th_tmedian_rmND <- lake_data$CI_sp90th_tmedian
lake_data$CI_sp90th_tmedian_rmND[which(lake_data$CI_sp90th_tmedian_rmND == 0.0001)] <- NA
# select response
#responses <- c("CI_sp90th_tmedian", "CI_sp90th_tmax", "ci_median_WHO", "ci_max_WHO", "ci_median_Ross", "ci_max_Ross", "ci_median_bloom", "CI_sp90th_tmedian_rmND")
#responses <- c("CI_sp90th_tmedian", "ci_median_WHO", "ci_median_Ross", "ci_median_bloom", "CI_sp90th_tmedian_rmND")
responses <- c("CI_sp90th_tmedian")
# function to reorder classification factor levels
reorder_levels <- function(input_dat) {
if (length(levels(input_dat)) == 5) {
output_dat <- factor(input_dat, levels(input_dat)[c(4, 2, 3, 1, 5)])
} else if (length(levels(input_dat)) == 4) {
output_dat <- factor(input_dat, levels(input_dat)[c(4, 2, 3, 1)])
} else if (length(levels(input_dat)) == 3) {
output_dat <- factor(input_dat, levels(input_dat)[c(2, 3, 1)])
} else {
output_dat <- input_dat
}
return(output_dat)
}
#
## predictors ---------
# predictors
pred_vars_all <- colnames(lake_data)[4:(which(colnames(lake_data) == "Ecoregion_L2_code") - 1)]
pred_vars <- pred_vars_all[-which(pred_vars_all %in% c("ShorelineL",
"ShorelineD",
"MaxLength",
"MaxWidth",
"MeanWidth",
"FetchN",
"FetchNE",
"FetchE",
"FetchSE",
"MaxDepthCo",
"rNI90",
"Q"
))] # remove preds
# predictors - see NAs
#for (p in 4:(which(colnames(lake_data) == "Ecoregion_L1") - 1)) {print(paste0(sum(is.na(lake_data[, p])), " : ", colnames(lake_data)[p]))}
# use only top 25 predictors (if desired)
'
ranks_run <- read.csv("O:/PRIV/NERL_ORD_CYAN/Salls_working/GLB/Analysis/RF/out/current_all/var_ranks_summary_2018-11-13.csv", stringsAsFactors = FALSE)
# pick one (if desired)
vars_top <- ranks_run$var[which(ranks_run$rank_hiElevHiLat %in% 1:25)]
#vars_top <- ranks_run$var[which(ranks_run$rank_lowElev %in% 1:25)]
var_key <- read.csv("O:/PRIV/NERL_ORD_CYAN/Salls_working/GLB/Analysis/variable_key.csv", stringsAsFactors = FALSE)
pred_vars <- var_key$Variable[which(var_key$Label %in% vars_top)]
'
# correlation matrix
data_cor_all <- lake_data # lake_data, lake_data_lo, lake_data_hi
data_cor_preds <- data_cor_all[, which(colnames(data_cor_all) %in% pred_vars)]
cor_preds <- cor(data_cor_preds, use = "pairwise.complete.obs")
n_corrs <- ((ncol(cor_preds) ^ 2) - ncol(cor_preds)) / 2 # total num correlations
(sum(cor_preds > 0.7) - ncol(cor_preds)) / 2 # how many correlations exceed 0.7? 86
((sum(cor_preds > 0.7) - ncol(cor_preds)) / 2) / n_corrs # portion
sum(cor_preds < 0.7 & cor_preds > 0.3) / 2 # how many correlations in 0.3 - 0.7?
(sum(cor_preds < 0.7 & cor_preds > 0.3) / 2) / n_corrs # portion
(sum(cor_preds < 0.3)) / 2 # how many correlations are below 0.3? 3397
((sum(cor_preds < 0.3)) / 2) / n_corrs # portion
# top 8
preds8 <- pred_vars[c(87, 32, 46, 42, 21, 20, 19, 25)]
data_cor_preds8 <- data_cor_all[, which(colnames(data_cor_all) %in% preds8)]
cor_preds8 <- cor(data_cor_preds8, use = "pairwise.complete.obs")
corrplot(cor_preds8)
#cor_preds <- cor_preds8 # for use above
#write.csv(cor_preds8, "O:/PRIV/NERL_ORD_CYAN/Salls_working/GLB/Analysis/correlations_top8.csv")
# including response
data_cor <- cbind(data_cor_all[, which(colnames(data_cor_all) %in% c("CI_sp90th_tmedian")),],
data_cor_all[, which(colnames(data_cor_all) %in% pred_vars)])
cor_vars <- cor(data_cor, use = "pairwise.complete.obs")
#write.csv(cor_vars, "O:/PRIV/NERL_ORD_CYAN/Salls_working/GLB/Analysis/correlations.csv")
library(corrplot)
corrplot(cor_vars)
# ----------------------------------
rf_list <- list()
treeMSE_df <- data.frame()
Sys.time()
# for each subset (ecoregion)...
for (d in 1:length(data_subsets)) {
if ("all" %in% data_subsets) {
lake_data_use <- lake_data
} else {
lake_data_use <- lake_data[which(lake_data[, which(colnames(lake_data) == subset_var)] == data_subsets[d]), ]
}
# for each response variable...
for (nr in 1:length(responses)) {
resp <- responses[nr]
dataset_name <- paste0(data_subsets[d], "_", resp)
resp_data <- lake_data_use[, resp]
pred_data <- lake_data_use[, pred_vars]
# reorder classification factor levels
if (is.factor(resp_data)) {
resp_data <- reorder_levels(resp_data)
}
# remove records with NA response
pred_data <- pred_data[!is.na(resp_data), ] # this has to go before resp, so don't change order!
resp_data <- resp_data[!is.na(resp_data)]
# remove records with NA predictors, if any
index_na_pred <- which(apply(pred_data, 1, FUN = function(x) {sum(is.na(x))}) > 0)
if (length(index_na_pred) > 0) {
resp_data <- resp_data[-index_na_pred]
pred_data <- pred_data[-index_na_pred, ]
}
# multi RF runs -----------------------------------------------------------------------------------------------
setwd(output_path)
rf_eval_df <- data.frame()
var_imp_df <- data.frame(var = as.character(pred_vars), stringsAsFactors = FALSE)
var_rank_df <- data.frame(var = as.character(pred_vars), stringsAsFactors = FALSE)
if (is.numeric(resp_data)) {
imp_metric_label <- "IncNodePurity"
} else {
imp_metric_label <- "MeanDecreaseGini"
}
set.seed(1)
nruns <- 100
rf_list_subset <- list()
# for each model run...
for (j in 1:nruns) {
print(sprintf(" *** %s: run #%s of %s *** ", dataset_name, j, nruns))
# run rf function
#rf_i <- randomForest(x = pred_data, y = resp_data, na.action = na.omit, mtry = floor(sqrt(ncol(pred_data)))) # randomForest
rf_i <- randomForest(x = pred_data, y = resp_data, na.action = na.omit) # randomForest
# append this RF to list of RFs in this subset, and MSE to treeMSE_df
rf_list_subset[[j]] <- rf_i
treeMSE_df <- rbind(treeMSE_df, data.frame(subset = data_subsets[d], subset_num = d, run = j, tree = 1:rf_i$ntree, mse = rf_i$mse))
## evaluate -----------
# plot error rate
jpeg(sprintf('runs/ErrorRate_%s.jpg', paste0(dataset_name, "_run", j)), width = 1000, height = 700)
plot(rf_i)
dev.off()
if (is.numeric(resp_data)) {
# predict using this rf model
preds <- predict(rf_i)
# run evaluation function, updating
rf_eval_df_i <- eval_model(resp_data, preds, run_name = paste0(dataset_name, "_run", j),
plot_fit = FALSE, save_plot = TRUE, save_dir = "runs")
rf_eval_df <- rbind(rf_eval_df, cbind(rf_eval_df_i,
OOB_mse_mean = mean(rf_i$mse),
OOB_rsq_mean = mean(rf_i$rsq)))
} else {
# run confusion matrix; round classication.error to 3 places
conf <- (rf_i$confusion)
conf[, ncol(conf)] <- round(conf[, ncol(conf)], 3)
# calculate commission error for each column
comerr_vect <- c()
for (c in 1:(ncol(conf) - 1)) {
comerr_vect <- c(comerr_vect, round((sum(conf[, c]) - conf[c, c]) / sum(conf[, c]), 3))
}
# subset to only values (not error rates) for summing
conf_vals <- conf[1:(nrow(conf) - 1), 1:(ncol(conf) - 1)]
# calculate overall error, append to bottom row
overall_err <- round((sum(conf_vals) - sum(diag(conf_vals))) / sum(conf_vals), 3)
comerr_vect <- c(comerr_vect, overall_err)
# append commission error
conf <- rbind(conf, comerr_vect)
# update names; print; export to csv
rownames(conf)[nrow(conf)] <- "commission error"
colnames(conf)[nrow(conf)] <- "ommission error"
#print(conf)
write.csv(conf, sprintf("runs/confusionMatrix_%s_run%s.csv", dataset_name, j))
# error rate
rf_eval_df <- rbind(rf_eval_df, cbind(run = j,
OOB_err.rate.est = rf_i$err.rate[nrow(rf_i$err.rate), 1],
confusion.error = conf[nrow(conf), ncol(conf)]))
}
## variable importance -----------
# make importance dataframe for this run
var_imp_df_i <- data.frame(var = rownames(rf_i$importance),
importance_metr = rf_i$importance)
# merge this importance df to existing one
var_imp_df <- merge(var_imp_df, var_imp_df_i)
colnames(var_imp_df)[j + 1] <- paste0("run", j, "_", imp_metric_label)
# plot variable importance
var_import_plot(rf_output = rf_i, var_file = variable_file, response_name = paste0(dataset_name, "_run", j), nvar_show = 20,
save_plot = TRUE, save_dir = "runs", return_df = FALSE)
# variable ranks
imp_ordered <- var_imp_df_i[order(-var_imp_df_i[, 2]), ]
rownames(imp_ordered) <- 1:nrow(imp_ordered)
ranks <- c()
for (v in 1:length(var_rank_df$var)) {
ranks <- c(ranks, as.numeric(rownames(imp_ordered)[which(imp_ordered$var == var_rank_df$var[v])]))
}
var_rank_df <- cbind(var_rank_df, ranks)
colnames(var_rank_df)[j + 1] <- paste0("run", j, "_", dataset_name)
}
rf_list[[d]] <- rf_list_subset
# sum importance metric (Increase in Node Purity or Mean Decrease in Gini Index)
var_imp_df$imp_value_sum <- apply(var_imp_df[, 2:(nruns + 1)], 1, sum)
var_imp_df <- var_imp_df[order(var_imp_df$imp_value_sum, decreasing = TRUE), ]
var_imp_df$imp_value_rank <- 1:nrow(var_imp_df)
plot(var_imp_df$imp_value_rank, var_imp_df$imp_value_sum, xlab = "Overall Importance Rank", ylab = "Score (Sum of Importance Metric)",
main = sprintf("Distribution of Variable Importance Metric Values (%s RF runs)", nruns))
# summarize var_imp_df
for (v in 1:nrow(var_imp_df)) {
var_imp_df$mean[v] <- mean(as.numeric(var_imp_df[v, 2:(nruns + 1)]))
var_imp_df$median[v] <- median(as.numeric(var_imp_df[v, 2:(nruns + 1)]))
var_imp_df$min[v] <- min(as.numeric(var_imp_df[v, 2:(nruns + 1)]))
var_imp_df$max[v] <- max(as.numeric(var_imp_df[v, 2:(nruns + 1)]))
var_imp_df$range[v] <- max(as.numeric(var_imp_df[v, 2:(nruns + 1)])) - min(as.numeric(var_imp_df[v, 2:(nruns + 1)]))
var_imp_df$sd[v] <- sd(as.numeric(var_imp_df[v, 2:(nruns + 1)]))
var_imp_df$var[v] <- variable_file$Label[which(variable_file$Variable == var_imp_df$var[v])]
}
# var_rank_df
# sum ranks to create rank score; order based on rank sum; assign cumulative ranks; plot rank score distribution
var_rank_df$rank_sum <- apply(var_rank_df[, 2:(nruns + 1)], 1, sum)
var_rank_df <- var_rank_df[order(var_rank_df$rank_sum), ]
var_rank_df$cum_rank <- 1:nrow(var_rank_df)
plot(var_rank_df$cum_rank, var_rank_df$rank_sum, xlab = "Overall Rank", ylab = "Score (Sum of Ranks)",
main = sprintf("Distribution of Variable Rank Scores (%s RF runs)", nruns))
# summarize var_rank_df
for (v in 1:nrow(var_rank_df)) {
var_rank_df$mean[v] <- mean(as.numeric(var_rank_df[v, 2:(nruns + 1)]))
var_rank_df$median[v] <- median(as.numeric(var_rank_df[v, 2:(nruns + 1)]))
var_rank_df$min[v] <- min(as.numeric(var_rank_df[v, 2:(nruns + 1)]))
var_rank_df$max[v] <- max(as.numeric(var_rank_df[v, 2:(nruns + 1)]))
var_rank_df$range[v] <- max(as.numeric(var_rank_df[v, 2:(nruns + 1)])) - min(as.numeric(var_rank_df[v, 2:(nruns + 1)]))
var_rank_df$sd[v] <- sd(as.numeric(var_rank_df[v, 2:(nruns + 1)]))
var_rank_df$var[v] <- variable_file$Label[which(variable_file$Variable == var_rank_df$var[v])]
}
# write tables
write.csv(rf_eval_df, sprintf("rf_eval_%s_%s.csv", dataset_name, Sys.Date()))
#write.csv(var_imp_df, sprintf("var_imp_df_%s_%s.csv", dataset_name, Sys.Date()))
write.csv(var_rank_df, sprintf("var_rank_%s_%s.csv", dataset_name, Sys.Date()))
}
}
## write treeMSE_df
write.csv(treeMSE_df, sprintf("treeMSE_df_%s.csv", Sys.Date()))
## aggregate ranking tables
rank_files <- list.files(".", pattern = "var_rank_")
# initiate rank summary table with first rank file
rank_file1 <- read.csv(rank_files[1], stringsAsFactors = FALSE)
rank_summary <- rank_file1[, which(colnames(rank_file1) %in% c("var", "cum_rank"))]
colnames(rank_summary)[2] <- substr(rank_files[1], 5, (regexpr("_CI_sp90th_", rank_files[1]) - 1))
# for each rank file: read csv, pull var and rank columns, merge to rank_summary df, rename column
for (f in 2:length(rank_files)) {
fname <- rank_files[f]
rank_csv <- read.csv(fname, stringsAsFactors = FALSE)
ranks_f <- rank_csv[, which(colnames(rank_csv) %in% c("var", "cum_rank"))]
rank_summary <- merge(rank_summary, ranks_f, by = "var", all.y = TRUE)
colnames(rank_summary)[f + 1] <- substr(fname, 5, (regexpr("_CI_sp90th_", fname) - 1))
}
write.csv(rank_summary, sprintf("var_ranks_summary_%s.csv", Sys.Date()))
## aggregate eval tables
eval_files <- list.files(".", pattern = "rf_eval_")
eval_summary <- data.frame()
for (f in 1:length(eval_files)) {
fname <- eval_files[f]
eval_csv <- read.csv(fname, stringsAsFactors = FALSE)
colmeans <- as.data.frame(t(apply(eval_csv[, 3:11], 2, mean)))
eval_summary <- rbind(eval_summary, data.frame(name = sub("rf_eval_CI_sp90th_tmedian_", "", fname), colmeans))
}
write.csv(eval_summary, sprintf("rf_evals_summary_%s.csv", Sys.Date()))
### ----------------------
##
# individual
'
resp <- responses[1]
resp_data <- lake_data[, resp]
rf <- randomForest(x = pred_data, y = resp_data, na.action = na.omit)
importance(rf, type = 2)
'
|
437435c77b0a76dd9f1747ee1d44f9b18be0028d
|
47b0af173c6c08a2e6b4432230e9e5796d6cc387
|
/man/plotMeta.Rd
|
7249419987afdfc011ff2161f9981b459fd83278
|
[] |
no_license
|
berndweiss/tuma
|
db84b5ab4e292d81b50920c33edd21118d3d1877
|
dcc94460b6d957819f476b8c4fc5c8a63d92edcb
|
refs/heads/master
| 2020-05-15T08:05:10.588948
| 2019-11-29T04:05:00
| 2019-11-29T04:05:00
| 3,453,711
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 518
|
rd
|
plotMeta.Rd
|
\name{plotMeta}
\alias{plotMeta}
\title{Open new plot window and creates forest and density plot}
\usage{
plotMeta(T, se, k, studlab, myfontsize, mylinewidth)
}
\arguments{
\item{T}{numeric vector of effect sizes}
\item{se}{numeric vector of standard errors}
\item{k}{length of T}
\item{studlab}{character vector of study labels}
\item{myfontsize}{font size (cex)}
\item{mylinewidth}{line width (lwd)}
}
\description{
Open new plot window and creates forest and density plot
}
\keyword{internal}
|
bf184a080c273de71e778e85b0201fe5e548e086
|
fcdb816e104bc8e984aea3b50686a14241c4a92c
|
/hotspot_classification.R
|
30112413efe34df33cdf314f313eaf7cb65aa975
|
[] |
no_license
|
Litaa/hotspot_classification
|
1900723288a5803b936f3dff79f4c186096b1026
|
ab60ff35211f6e0258e1521ef0d204b9bd6b8022
|
refs/heads/master
| 2020-12-07T15:10:27.699376
| 2020-01-09T16:20:15
| 2020-01-09T16:20:15
| 232,743,372
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,327
|
r
|
hotspot_classification.R
|
titikpanas <- read.csv("E:/firms3350314631282061_NRT.csv")
View(titikpanas)
data.hotspot <- titikpanas
data.hotspot$geom <- NULL
data.hotspot$longitude <- NULL
data.hotspot$latitude <- NULL
data.hotspot$scan <- NULL
data.hotspot$track <- NULL
data.hotspot$acq_date <- NULL
data.hotspot$acq_time <- NULL
data.hotspot$satellite <- NULL
data.hotspot$version <- NULL
data.hotspot$frp <- NULL
View(data.hotspot)
#binning data
#range (-1 - 29) (30 - 79) (80 - 100)
data.hotspot$confidenceC <- cut (data.hotspot$confidence, c(-1,29,79,100))
View(data.hotspot)
library(infotheo)
# discretization process binning result into 3 class
#class 1 -> 0 <= confidence < 30
#class 2 -> 30 <= confidence < 80
#class 3 -> 80 <= confidence < 100
data.hotspot$confidence <- discretize(as.data.frame(data.hotspot$confidenceC), "equalwidth", 3)
View(data.hotspot)
#create data matrix
data.hotspot$confidence <- data.matrix(data.hotspot$confidence)
View(data.hotspot)
data.hotspot$confidenceC <- NULL
train <- data.hotspot[1:935,]
test <- data.hotspot[936:1169,]
View(train)
label_train <- train$confidence
label_train
label_test <- test$confidence
label_test
library(class)
result <- knn(train, test, label_train, k=5)
result
table(result, label_test)
plot(result)
|
9a72041bce8078d61d930b582ec4e17bfc6ace25
|
863aa7e71911423a9096c82a03ef755d1cf34654
|
/man/replace_data_value.Rd
|
b75b50f80e137673679e467d66dc1a03a98c72d0
|
[] |
no_license
|
BioSystemsUM/specmine
|
8bd2d2b0ee1b1db9133251b80724966a5ee71040
|
13b5cbb73989e1f84e726dab90ff4ff34fed68df
|
refs/heads/master
| 2023-08-18T05:51:53.650469
| 2021-09-21T13:35:11
| 2021-09-21T13:35:11
| 313,974,923
| 1
| 1
| null | 2021-09-21T13:35:12
| 2020-11-18T15:22:49
|
R
|
UTF-8
|
R
| false
| false
| 1,126
|
rd
|
replace_data_value.Rd
|
\name{replace_data_value}
\alias{replace_data_value}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Replace data value
}
\description{
Replace a data value for a new value on the dataset.
}
\usage{
replace_data_value(dataset, x.axis.val, sample, new.value,
by.index = FALSE)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{dataset}{
list representing the dataset from a metabolomics experiment.
}
\item{x.axis.val}{
variable index or name.
}
\item{sample}{
sample name.
}
\item{new.value}{
new value to replace the old value.
}
\item{by.index}{
boolean value to indicate if the x.axis.val is an index or not.
}
}
\value{
Returns the dataset with the data value replaced.
}
\examples{
## Example of replacing a data value from the dataset
library(specmine.datasets)
data(cachexia)
dataset = replace_data_value(cachexia, "Creatine", "PIF_178", 10.3,
by.index = FALSE)
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ dataset }
\keyword{ value }% __ONLY ONE__ keyword per line
|
3549cd67d24ab64b8cd50858a8adfc2fb4d20be4
|
9aafde089eb3d8bba05aec912e61fbd9fb84bd49
|
/codeml_files/newick_trees_processed/1842_43/rinput.R
|
3fb3626ed607be079db25a9d665c862d1802764d
|
[] |
no_license
|
DaniBoo/cyanobacteria_project
|
6a816bb0ccf285842b61bfd3612c176f5877a1fb
|
be08ff723284b0c38f9c758d3e250c664bbfbf3b
|
refs/heads/master
| 2021-01-25T05:28:00.686474
| 2013-03-23T15:09:39
| 2013-03-23T15:09:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 137
|
r
|
rinput.R
|
library(ape)
testtree <- read.tree("1842_43.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="1842_43_unrooted.txt")
|
1ae3425ccb046bdbeb5e496e3ed8602bbfb59b8c
|
c3d2fb0a02e5eabbd0234860186f246651c9fb39
|
/R/Visualization/ggplot2-color-ribon-for-discrete-variable.R
|
e79c4824be3b4e99e57bdef1f59b06a5140d73be
|
[] |
no_license
|
ppbppb001/Snippets
|
09216417c52af40c947114bc106aee97776674f7
|
49e254eecd55f5e777d87c3f06c720cb881fb2dd
|
refs/heads/master
| 2023-08-14T08:16:07.814866
| 2023-08-07T11:57:15
| 2023-08-07T11:57:15
| 93,360,154
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,969
|
r
|
ggplot2-color-ribon-for-discrete-variable.R
|
# 2019-05-06
library(ggplot2)
x <- rep(seq(from=100,to=104),6) # Compand's id number
y <- rnorm(5*6)*5+15 # values
z <- c(rep(2001,5), rep(2002,5), rep(2003,5), rep(2004,5), rep(2005,5), rep(2006,5)) # years
z <- as.character(z) # convert 'years' from int to string
df <- data.frame(Company=x, Value=y, Year=z) # compose a data frame
p <- ggplot(df, aes(x=Company, y=Value, fill=Year)) + # colored ribon for each year
geom_area() + # area fill (ribon)
labs(x="X=COMPANY", y="Y=VALUE", title="TITLE") + # x/y labels and title
theme(plot.title = element_text(hjust=0.5)) # center the title
p # draw the plot using default palette(hue)
p + scale_fill_brewer(palette = "Set1") # draw the plot using color brewer palette
p + scale_fill_brewer(palette = "Set2") # draw the plot using color brewer palette
p + scale_fill_brewer(palette = "Set3") # draw the plot using color brewer palette
p + scale_fill_brewer(palette = "Pastel1") # draw the plot using color brewer palette
p + scale_fill_brewer(palette = "Pastel2") # draw the plot using color brewer palette
p + scale_fill_brewer(palette = "Accent") # draw the plot using color brewer palette
p + scale_fill_brewer(palette = "Dark2") # draw the plot using color brewer palette
p + scale_fill_brewer(palette = "Paired") # draw the plot using color brewer palette
p + scale_fill_brewer(palette = "Spectral") # draw the plot using color brewer palette
p + scale_fill_brewer(palette = "Greens") # draw the plot using color brewer palette
p + scale_fill_brewer(palette = "Blues") # draw the plot using color brewer palette
p + scale_fill_brewer(palette = "Oranges") # draw the plot using color brewer palette
|
93616f0b0eb840203fcb010741cf30268192ea85
|
aba5794905d20a12d0207026b7d843a5d81c31ad
|
/man/any_greater_than.Rd
|
1a70857d9dea0a7fd0d471fb3ac069700adabc00
|
[] |
no_license
|
kgori/svfiltr
|
9097a4eba8c0792766d3b81f9e515d71b0e79980
|
106a22110cf10310fba7da38e95a389ca28acb54
|
refs/heads/master
| 2021-01-12T14:55:10.535079
| 2016-11-04T21:15:51
| 2016-11-04T21:15:51
| 68,912,542
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 446
|
rd
|
any_greater_than.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/filters.R
\name{any_greater_than}
\alias{any_greater_than}
\title{Find rows of a dataframe where any of the values
is above a threshold}
\usage{
any_greater_than(df, threshold)
}
\arguments{
\item{df}{A (numeric) dataframe.}
\item{threshold}{A number.}
}
\value{
TRUE or FALSE.
}
\description{
Find rows of a dataframe where any of the values
is above a threshold
}
|
7bad12c9418bebb1303e007e8a871028850b0590
|
56761570830ea13abd8282a6f0f8dafe665b97b2
|
/plot3.R
|
dda888083bca98fd1ce991e56fa988ef9155246e
|
[] |
no_license
|
jclopher/ExData_Plotting1
|
9bc7032750587a90a85ee09ea3e41012f48ccec2
|
f9fe3961ef265dbca2f0cf4328c3b9a426050ccf
|
refs/heads/master
| 2021-01-22T20:39:38.696940
| 2014-09-05T12:35:21
| 2014-09-05T12:35:21
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,598
|
r
|
plot3.R
|
## Read files and others...
## setwd("~/R/Course 4 Exploratory/household_power_consumption")
library(Defaults)
setDefaults('as.Date.character', format = '%d/%m/%Y')
## initial <- read.table("./household_power_consumption.txt",nrows=100)
## classes <- sapply(initial,class)
## taball <- read.table("./household_power_consumption.txt", colClasses=classes)
HousePow <- read.table("./household_power_consumption.txt",sep=";",header=TRUE,na.strings="?", as.is = TRUE, colClasses=c("Date", "character", "numeric", "numeric", "numeric", "numeric", "numeric", "numeric", "numeric"))
HousePowData <- HousePow[which((as.character(HousePow$Date)=="2007-02-01") | (as.character(HousePow$Date)=="2007-02-02") ),]
#str(HousePowData)
Sys.setlocale("LC_TIME","C")
## Open PNG device; create 'Plot3.png' in my working directory
## Create plot and send to a file (no plot appears on screen)
png(file = "Plot3.png", width = 480, height = 480)
## Construct Plot 3
HousePowData$Date_Time <- paste(HousePowData$Date, HousePowData$Time)
HousePowData$Date_Time <- strptime(HousePowData$Date_Time, format="%Y-%m-%d %H:%M:%S")
with(HousePowData,plot(Date_Time, Sub_metering_1, xlab="", ylab="Energy sub metering", type="l", col = "black"))
legend("topright", c('Sub_metering_1','Sub_metering_2','Sub_metering_3'), lty=1, col=c('black','red','blue'))
lines(HousePowData$Date_Time, HousePowData$Sub_metering_2, type = "l", col = "red")
lines(HousePowData$Date_Time, HousePowData$Sub_metering_3, type = "l", col = "blue")
## Close the PNG file device
## Now you can view the file 'Plot3.png' on your computer
dev.off()
|
b248f11df207525f744da198ecb6014a45d82e7d
|
493623e05e3ad8b3f9b874e718913221f6dd3dee
|
/MUV27_G4_Heatmap.R
|
2f0708eb7b370fc267254b1cec4a0785a2d565ca
|
[] |
no_license
|
mayagarg/MDASummer2020
|
0afe884cdbfcf373f870c69c4b2161123a1ab07e
|
c9179393006f9a7069e0d991000d83d0335c59f4
|
refs/heads/master
| 2022-12-10T11:36:26.199576
| 2020-08-28T22:14:52
| 2020-08-28T22:14:52
| 287,127,165
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,910
|
r
|
MUV27_G4_Heatmap.R
|
library(Seurat)
library("devtools")
library(Matrix)
library(dplyr)
#load in the data
MUV27.data <- read.table("/Users/mayagarg/Downloads/GSE119926_RAW copy/GSM3905412_MUV27.txt", header=TRUE)
MUV27mg.data <- matrix()
MUV27mg.data <- rbind(MUV27.data["SPARC", ], MUV27.data["P2RY12", ], MUV27.data["TMEM119", ], MUV27.data["SLC2A5", ], MUV27.data["SALL1", ], MUV27.data["ECSCR", ],
MUV27.data["SCAMP5", ], MUV27.data["PTPRM", ], MUV27.data["KCND1", ], MUV27.data["SDK1", ], MUV27.data["CLSTN1", ], MUV27.data["SGCE", ], MUV27.data["SLC12A2", ],
MUV27.data["CADM1", ], MUV27.data["SEMA4G", ], MUV27.data["PROS1", ], MUV27.data["SLCO2B1", ], MUV27.data["RTN4RL1", ], MUV27.data["CMTM4", ],
MUV27.data["FAT3", ], MUV27.data["SMO", ], MUV27.data["MRC2", ], MUV27.data["JAM2", ], MUV27.data["PLXNA4", ], MUV27.data["SLC46A1", ],
MUV27.data["AGMO", ], MUV27.data["TMEM204", ], MUV27.data["BST2", ], MUV27.data["C2", ], MUV27.data["C4B", ], MUV27.data["CD74", ],
MUV27.data["CFB", ], MUV27.data["CIITA", ], MUV27.data["CTSC", ], MUV27.data["EXO1", ], MUV27.data["GBP2", ],
MUV27.data["IFNB1", ], MUV27.data["IRF1", ], MUV27.data["ITK", ], MUV27.data["JAK3", ], MUV27.data["LY86", ], MUV27.data["MASP1", ],
MUV27.data["OAS2", ], MUV27.data["P2RY14", ], MUV27.data["SEMA4A", ], MUV27.data["SERPING1", ], MUV27.data["STAT1", ],
MUV27.data["TLR2", ], MUV27.data["TLR3", ], MUV27.data["TNF", ], MUV27.data["TNFSF10", ], MUV27.data["TNFSF13b", ], MUV27.data["TNFSF15", ],MUV27.data["TNFSF8", ],
MUV27.data["CX3CR1", ])
MUV27mg.data[is.na(MUV27mg.data)] <- 0
#initialize the Seurat data with the raw (non-normalized) data
#min.cells= include features (genes) detected in at least X many cells
#min.features = include cells where at least Y many features (genes) are detected
MUV27 <- CreateSeuratObject(counts = MUV27mg.data, project= "GSM3905412_MUV27")
MUV27
#An object of class Seurat
#55 features (genes) across 314 samples (cells) within 1 assay
#Active assay: RNA (55 features, 0 variable features)
#QC pre processing
#MT is mitochondrial gene marker
MUV27[["percent.mt"]] <- PercentageFeatureSet(MUV27, pattern = "^MT-")
#Normalizing data
MUV27 <- NormalizeData(MUV27, normalization.method = "LogNormalize", scale.factor = 10000)
#identification of highly variable features (feature selection)
MUV27 <- FindVariableFeatures(MUV27, selection.method = "vst")
# Identify the 10 most highly variable genes
top10 <- head(VariableFeatures(MUV27), 10)
# plot variable features with and without labels
#check to see if there are any immune related genes here
plot1 <- VariableFeaturePlot(MUV27)
plot2 <- LabelPoints(plot = plot1, points = top10, repel = TRUE, xnudge = 0, ynudge = 0)
CombinePlots(plots = list(plot1, plot2))
#scaling the data
all.genes <- rownames(MUV27)
MUV27 <- ScaleData(MUV27, features = all.genes)
#perform PCA on scaled data using linear dimension reduction
#heat maps require PCA
MUV27 <- RunPCA(MUV27, features = VariableFeatures(object = MUV27), approx=FALSE)
#clustering cells
MUV27 <- FindNeighbors(MUV27, dims = 1:15)
MUV27 <- FindClusters(MUV27)
#determine marker groups and cluster data
MUV27.markers <- FindAllMarkers(MUV27, only.pos = TRUE, min.pct = 0.25, logfc.threshold = 0.25)
MUV27.markers %>% group_by(cluster) %>% top_n(n = 2, wt = avg_logFC)
#find top markers and plot
top10 <- MUV27.markers %>% group_by(cluster) %>% top_n(n = 10, wt = avg_logFC)
DoHeatmap(MUV27) #+ theme(axis.text.y = element_text(size=3))
#want to extract the different clusters as individual matrices
table(Idents(MUV27)) #how many cells are in each cluster
# 0 1 2 --> cluster
# 111 110 93 --> number of cells per cluster
#extract cluster 0
cluster0.matrix.MUV27 <- as.matrix(GetAssayData(MUV27, slot = "counts")[, WhichCells(MUV27, ident = "0")])
ncol(cluster0.matrix.MUV27) #111 --> correct (column is cell)
#extract cluster 1
cluster1.matrix.MUV27 <- as.matrix(GetAssayData(MUV27, slot = "counts")[, WhichCells(MUV27, ident = "1")])
ncol(cluster1.matrix.MUV27) #110 --> correct (column is cell)
#extract cluster 2
cluster2.matrix.MUV27 <- as.matrix(GetAssayData(MUV27, slot = "counts")[, WhichCells(MUV27, ident = "2")])
ncol(cluster2.matrix.MUV27) #93 --> correct (column is cell)
#find the average expression value per cluster (sums all of the expression levels in the
#cluster and divides by the number of cells --> average sum of expression per cell)
avg.c0.MUV27 <- sum(cluster0.matrix.MUV27)/ncol(cluster0.matrix.MUV27)
avg.c1.MUV27 <- sum(cluster1.matrix.MUV27)/ncol(cluster1.matrix.MUV27)
avg.c2.MUV27 <- sum(cluster2.matrix.MUV27)/ncol(cluster2.matrix.MUV27)
c("C0", "C1", "C2", "C3")[which.max(c(avg.c0.MUV27, avg.c1.MUV27, avg.c2.MUV27))] #C1
|
2aee54e9238290f281d93b528926a3904afb3c05
|
abbc6395e0f7d3987145818c961d1792157a17b3
|
/man/as.numeric2.Rd
|
cc6acaa233caee2139153a3189ef3d0182f2c257
|
[] |
no_license
|
cma1/JGmisc
|
04e0d2cdec68c2364cbd62a19da1aaf5ac4058cb
|
ca68d2c995bab52e4b6606c291cff4f3881a663f
|
refs/heads/master
| 2020-03-14T22:40:57.834088
| 2018-05-02T09:13:15
| 2018-05-02T09:13:15
| 131,826,546
| 0
| 0
| null | 2018-05-02T09:12:24
| 2018-05-02T09:12:23
| null |
UTF-8
|
R
| false
| false
| 776
|
rd
|
as.numeric2.Rd
|
\name{as.numeric2}
\alias{as.numeric2}
\title{Numeric Vectors in Dataframe}
\usage{
as.numeric2(x, as.char = T)
}
\description{
Creates or coerces objects of type \code{"numeric"}.
}
\arguments{
\item{x}{ a vector, matrix, dataframe or other R object which is coercible to one by \code{as.vector(x, "numeric")}. }
\item{as.char}{ logical. Should \code{as.character()} wrapper be added? }
}
\author{Jason Grafmiller}
\details{
The default \code{as.char = T} is to ensure that coerced factors use the name values rather than the implicit factor numbers used by R.
}
\seealso{
\code{\link{as.numeric}}, \code{\link{is.numeric}}
}
\examples{
df <- data.frame(A = rep(c(4, 5, 6), each = 3),
B = rep(c("10","45", "3.14"), 3))
as.numeric2(df)
as.numeric2(df, as.char = F)
}
|
57fbc69c0ecdb7c4d74d59286c22b098b6131d33
|
600144623603ba5825d11e5ac01b00567cdb28a9
|
/simple_mapbox_map.R
|
ba6efa0da420fef095702ea9d348d951bc174909
|
[] |
no_license
|
talonendm/Rmap
|
751132ca1706695892534dbe1527c89a0bb61419
|
849af077d819755b4c416c0cfbb6ccb9556bad01
|
refs/heads/master
| 2022-12-23T17:30:23.200726
| 2020-09-25T03:10:25
| 2020-09-25T03:10:25
| 296,081,245
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,721
|
r
|
simple_mapbox_map.R
|
# simple_mapbox_map.R
# apikey: ------------
# https://www.r-bloggers.com/2020/09/running-an-r-script-on-a-schedule-gh-actions/
# How do you add them to your local environment?
#
# Create a .Renviron file in your local project
# add a new line to your .gitignore file: .Renviron
# Now this file with secrets will be ignored by git and you
# can never accidentely add it to a repo.
# the .Renviron file is a simple text file where you can add ‘secrets’ like: apikey="aVerY5eCretKEy" on a new line.
# at github: https://github.com/talonendm/Rmap/settings/secrets
p <- list()
p$mapboxkey <- Sys.getenv("MAPBOX_TOKEN") # note restart session if not working at first time
if (FALSE) {
# setup
library(plotKML)
library(dplyr)
gpxlist <- c('9_21_20 16_53.gpx')
gpxfiles <- paste0('C:\\data\\sportstracker_ebike\\',gpxlist)
gpxfiles
# gpxfiles <- 'https://github.com/talonendm/Rmap/blob/master/data/ebike/9_21_20%2016_53.gpx'
# gpxfiles <- 'https://raw.githubusercontent.com/talonendm/Rmap/master/data/ebike/9_21_20%2016_53.gpx'
loca <- list()
i <- 0
for (file_i in gpxfiles) {
i <- i + 1
da <- readGPX(file_i, metadata = FALSE, bounds = FALSE, waypoints = FALSE, tracks = TRUE, routes = FALSE)
loca[[i]] <- data.frame(da$tracks)
}
data <- rbind_list(loca) %>% dplyr::select(lat = NA.lat, lon = NA.lon, datetime = NA.time)
head(data)
write.csv(data, file= 'C:\\github\\Rmap\\data\\ebike\\ebike.csv')
df <- readLines(gpxfiles)
# install.packages('RCurl')
# library(RCurl)
# myfile <- getURL('https://sakai.unc.edu/access/content/group/3d1eb92e-7848-4f55-90c3-7c72a54e7e43/public/data/bycatch.csv', ssl.verifyhost=FALSE, ssl.verifypeer=FALSE)
# not working fells_loop <- readGPX("http://www.topografix.com/fells_loop.gpx")
# fells_loop <- readGPX(getURL("https://github.com/talonendm/Rmap/blob/master/data/ebike/9_21_20%2016_53.gpx"))
} else {
# data2 <- read.csv('https://github.com/talonendm/Rmap/blob/master/data/ebike/ebike.csv')
# note the format:
# df <- read.csv(paste0('https://raw.githubusercontent.com/uber-common/deck.gl-data/master/','examples/3d-heatmap/heatmap-data.csv' ))
# raw; button
data <- read.csv('https://raw.githubusercontent.com/talonendm/Rmap/master/data/ebike/ebike.csv')
}
# .....................................
library(mapdeck)
options(mapbox.accessToken = p$mapboxkey)
library(mapdeck)
library(widgetframe)
library(dplyr)
# restricted not working:
# https://stackoverflow.com/questions/57764179/mapbox-access-token-restrict-to-url-does-not-work-with-github-pages
df <- data
df <- df[!is.na(df$lon), ]
location = as.vector(c(df$lon[1], df$lat[1]))
# do not use in html and export file: token = p$mapboxkey / MAPBOX_TOKEN
map <- mapdeck(location = location, style = mapdeck_style("outdoors") , pitch = 40, zoom = 11, show_view_state = FALSE ) %>%
add_heatmap(
data = df
, lat = "lat"
, lon = "lon"
, weight = "weight"
, colour_range = colourvalues::colour_values(1:6, palette = "inferno")
, update_view = FALSE
)
# update field: https://stackoverflow.com/questions/56171231/add-polygon-in-mapdeck-zooms-out-the-map
# Access Tokens
# If the token argument is not used, the map will search for the token, firstly by checking if set_token() was used, then it will search environment variables using Sys.getenv() and the following values, in this order
# c("MAPBOX_TOKEN","MAPBOX_KEY","MAPBOX_API_TOKEN", "MAPBOX_API_KEY", "MAPBOX", "MAPDECK")
# If multiple tokens are found, the first one is used
print(map)
# test:
# source('C:/github/Rmap/simple_mapbox_map.R', encoding = 'UTF-8')
# https://rstudio.github.io/renv/articles/renv.html
# install.packages('renv')
|
958d9698ab3f55f5fc66fc84a0da22deeceb964a
|
faffd534f3f2b2fb47aaa958565091bf7c303fb7
|
/Test/nuevo-test.R
|
e8763ca6268926b3dc6c560500dcbcff49f56f1c
|
[] |
no_license
|
ruddyblip/Proyecto_0001
|
d5915da186437649d5112b53c8eb62759a7d2087
|
eb641d4443a0632ab03624a5e4344801137975d3
|
refs/heads/main
| 2023-02-16T20:04:02.684087
| 2021-01-11T23:04:12
| 2021-01-11T23:04:12
| 328,753,908
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 33
|
r
|
nuevo-test.R
|
### R scrip
x <- c(1:6)
mean(x)
|
d0a453cd321b23de78e38ccafef76d41411735ce
|
fb6e2ba2d21d370519997611531606bd8dfe282c
|
/man/generic_annual_report.Rd
|
3ffdd2061e7a19028a31c471a8cae2aabb5c5a4d
|
[] |
no_license
|
Arpae-it/arpautils
|
321cc302d240fbfb6745965eaef8d87fb658c073
|
16dae958622f147ebe7d2a9c044437eac9caf1f3
|
refs/heads/master
| 2023-06-20T08:40:24.644973
| 2021-07-06T09:13:24
| 2021-07-06T09:13:24
| 385,835,918
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,343
|
rd
|
generic_annual_report.Rd
|
\name{generic_annual_report}
\alias{prepare.annual_report}
\alias{calculate.annual_report}
\alias{write.annual_report}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
%% ~~function to do ... ~~
Funzioni per produrre statistiche annuali (una stazione)
}
\description{
%% ~~ A concise (1-5 lines) description of what the function does. ~~
Funzioni per produrre le statistiche annuali tipiche (media annua, superamenti giornalieri della media e del max della media mobile su 8h) per una singola stazione: estrazione, calcoli, scrittura su DB.
}
\usage{
prepare.annual_report(con, id.staz, id.param, year=NULL, tstep, ...)
calculate.annual_report(data, id.param, thr.daily.ave=NULL, thr.ave8h.max=NULL,
thr.hourly=NULL, thr.multihourly=NULL, NH=3,
critical.months=NULL)
write.annual_report(con, AR, id.param, verbose=F, ...)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{con}{
identificativo della connessione al DB (stringa)
}
\item{id.staz}{
codice numerico identificativo della stazione
}
\item{id.param}{
codice numerico identificativo dell'inquinante
}
\item{year}{
anno per cui si richiede il report. Se lasciato \code{NULL} prende l'anno di 5 mesi fa, facendo riferimento a \code{Sys.Date()}.
}
\item{tstep}{
stringa di carattere che identifica il timestep ("H" o "d", orario o giornaliero)
}
\item{data}{
dati estratti da \code{prepare.annual_report}
}
\item{thr.daily.ave}{
soglia per la media giornaliera
}
\item{thr.ave8h.max}{
soglia per il max giornaliero della media mobile su 8h
}
\item{thr.hourly}{
soglia oraria
}
\item{thr.multihourly}{
soglia per superamenti orari di più ore consecutive
}
\item{NH}{
numero di ore consecutive di superamento da contare se \code{thr.multihourly} non ? NULL
}
\item{critical.months}{
vettore numerico dei mesi su cui calcolare la media di periodo (p.es. \code{c(1:3,10:12)} per la media invernale)
}
\item{AR}{
lista di \code{data.frame} prodotta da \code{calculate.annual_report} (vedi Value)
}
\item{verbose}{
scrive a video alcune informazioni utili per il debug
}
\item{\dots}{
parametri opzionali. La funzione \code{prepare.annual_report} li passa a \code{dbqa.get.datastaz}; la funzione \code{write.annual_report} li passa a \code{dbqa.insert}.
}
}
\details{
%% ~~ If necessary, more details than the description above ~~
}
\value{
%% ~Describe the value returned
%% If it is a LIST, use
%% \item{comp1 }{Description of 'comp1'}
%% \item{comp2 }{Description of 'comp2'}
%% ...
La funzione \code{calculate.annual_report} restituisce un \code{data.frame} con:
\item{annual.mean }{media annua}
\item{annual.nValid }{numero di dati validi usati per il calcolo della media annua}
\item{annual.percValid}{percentuale di dati validi usati per il calcolo della media annua}
\item{annual.nExpected}{numero di dati attesi nell'anno}
\item{annual.efficiency}{rapporto tra numero di dati disponibili e attesi nell'anno}
se \code{thr.daily.ave} non ? \code{NULL} allora \code{data.frame} ha anche:
\item{daily.nexc}{superamenti della media giornaliera}
\item{daily.nValid}{numero di dati validi usati per il calcolo dei superamenti della media giornaliera}
\item{daily.percValid}{percentuale di dati validi usati per il calcolo dei superamenti della media giornaliera}
se \code{thr.ave8h.max} non ? \code{NULL} allora \code{data.frame} ha anche:
\item{ave8h.nexc}{superamenti del max giornaliero della media mobile su 8h}
\item{ave8h.nValid}{numero di dati validi usati per il calcolo dei superamenti del max giornaliero della media mobile su 8h}
\item{ave8h.percValid}{percentuale di dati validi usati per il calcolo dei superamenti del max giornaliero della media mobile su 8h}
se \code{thr.hourly} o \code{thr.multihourly} non sono \code{NULL} allora \code{data.frame} ha anche:
\item{hourly.nValid}{numero di dati orari validi}
\item{hourly.percValid}{percentuale di dati orari validi}
se \code{thr.hourly} non ? \code{NULL} allora \code{data.frame} ha anche:
\item{hourly.nexc}{numero di superamenti orari}
se \code{thr.multihourly} non ? \code{NULL} allora \code{data.frame} ha anche:
\item{multihourly.nexc}{numero di superamenti orari di almeno NH ore consecutive}
se \code{critical.months} non ? \code{NULL} allora \code{data.frame} ha anche:
\item{critmonths.mean }{media dei mesi selezionati}
\item{critmonths.nValid }{numero di dati validi nei mesi selezionati}
\item{critmonths.percValid}{percentuale di dati validi nei mesi selezionati}
\item{critmonths.nExpected}{numero di dati attesi nei mesi selezionati}
\item{critmonths.efficiency}{rapporto tra numero di dati disponibili e attesi nei mesi selezionati}
}
\references{
%% ~put references to the literature/web site here ~
}
\note{
%% ~~further notes~~
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
%\section{Warning }{
% Usare l'opzione \code{empty=TRUE} solo se strettamente necessario: svuota tutta la tabella ??? dell'ozono sul DB.
%}
\seealso{
%% ~~objects to See Also as \code{\link{help}}, ~~~
}
\examples{
\dontrun{
db_usr="yourUsername"
db_pwd="yourPassword"
db_name="DBaddress"
con1 <- dbqa.connect(db_usr, db_pwd, db_name)
dat <- prepare.annual_report(con=con1, id.staz="2000003")
calculate.annual_report(data=dat)
dbDisconnect(con1)
}
}
|
d43da0091bf7204aaa198aa3ab6753c850106b4b
|
7d4613a3fcb34d032ae9227566132d8fcbf12682
|
/Rscripts/PrepareBulkData.R
|
244673817092f1038706d681a465e2d4fbadfd41
|
[
"BSD-2-Clause"
] |
permissive
|
junseonghwan/PhylExAnalysis
|
15f2b9d6ec4e67911fbfac5d2bd6e96034f44680
|
5bafad3fb6dc60e4144b18e85bbdba47a9a04fe7
|
refs/heads/main
| 2023-04-11T17:29:30.812185
| 2023-03-29T14:49:13
| 2023-03-29T14:49:13
| 335,060,186
| 1
| 1
| null | 2021-02-06T05:22:54
| 2021-02-01T19:26:43
|
R
|
UTF-8
|
R
| false
| false
| 4,416
|
r
|
PrepareBulkData.R
|
args = commandArgs(trailingOnly=TRUE)
print(args)
VCF_PATH <- as.character(args[1])
CNV_PATH <- as.character(args[2])
OUTPUT_PATH <- as.character(args[3])
#VCF_PATH <- "~/data/TNBC/BCSA1/BCSA1_filtered_hg38.vcf"
#CNV_PATH <- "~/data/TNBC/BCSA1/"
#OUTPUT_PATH <- "~/PhylExAnalysis/_temp/"
# path to TitanCNA output file within each bulk region.
CNV_SUFFIX_PATH <- "cna/results/titan/hmm/"
OPTIMAL_CNV_SOLN_FILE <- paste(CNV_SUFFIX_PATH, "optimalClusterSolution.txt", sep="/")
GERMLINE_COLUMN_IDX <- 1
MIN_VAF <- 0.03
APPLY_PASS_FILTER <- TRUE # Set to TRUE if there is a filter to be used.
#FILTER_HLA_GENES <- TRUE
library(biomaRt)
library(PhylExR)
library(vcfR)
vcf <- read.vcfR(VCF_PATH)
fix <- data.frame(vcf@fix)
fix <- ProcessFixedComponentVCF(fix, strip_chr_prefix = T)
allelic_depth <- extract.gt(vcf, element = "AD")
allelic_depth <- data.frame(allelic_depth)
# Select SNVs
filter_idx2 <- (nchar(fix$ALT) == 1) & (nchar(fix$REF) == 1)
# Pass filters
if (APPLY_PASS_FILTER) {
filter_idx1 <- (fix$FILTER == "PASS")
filter_idx <- filter_idx1 & filter_idx2
} else {
filter_idx <- filter_idx2
}
fix <- fix[filter_idx,]
allelic_depth <- allelic_depth[filter_idx,]
dim(fix)
dim(allelic_depth)
region_names <- names(allelic_depth)
ref_counts <- data.frame(matrix(0, ncol = length(region_names), nrow = sum(filter_idx)))
alt_counts <- data.frame(matrix(0, ncol = length(region_names), nrow = sum(filter_idx)))
for (i in 1:length(region_names)) {
ref_alt <- sapply(as.character(allelic_depth[,region_names[i]]), function(ad) {
ref <- strsplit(ad, split=",")[[1]][1]
alt <- strsplit(ad, split=",")[[1]][2]
return(matrix(c(ref, alt), ncol=2, byrow = T))
})
ref_alt <- t(ref_alt)
ref_counts[,i] <- as.numeric(ref_alt[,1])
alt_counts[,i] <- as.numeric(ref_alt[,2])
}
names(ref_counts) <- region_names
names(alt_counts) <- region_names
depth <- ref_counts + alt_counts
# Filter out by MIN_VAF
vaf <- alt_counts[,-GERMLINE_COLUMN_IDX]/depth[,-GERMLINE_COLUMN_IDX]
region_count <- dim(vaf)[2]
filter_idx <- (rowSums(vaf >= MIN_VAF) == region_count)
fix <- fix[filter_idx,]
depth <- depth[filter_idx,]
alt_counts <- alt_counts[filter_idx,]
d <- apply(depth[,-GERMLINE_COLUMN_IDX], 1, function(row) {
ret <- paste(row, collapse = ",")
ret
})
b <- apply(alt_counts[,-GERMLINE_COLUMN_IDX], 1, function(row) {
ret <- paste(row, collapse = ",")
ret
})
# Construct multi-region data frame.
snv_count <- dim(fix)[1]
mut_ids <- paste("s", 0:(snv_count-1), sep="")
# Add copy number information.
snv.gr <- ConstructGranges(chr = fix[,c("CHROM")], start = fix[,c("POS")], width = 0)
major_cn <- matrix(1, nrow = snv_count, ncol = region_count)
minor_cn <- matrix(1, nrow = snv_count, ncol = region_count)
colnames(major_cn) <- region_names[-GERMLINE_COLUMN_IDX]
colnames(minor_cn) <- region_names[-GERMLINE_COLUMN_IDX]
for (i in 1:region_count) {
region <- region_names[-GERMLINE_COLUMN_IDX][i]
REGION_PATH <- paste(CNV_PATH, region, sep="/")
opt_soln <- read.table(paste(REGION_PATH, OPTIMAL_CNV_SOLN_FILE, sep="/"), header=F, skip = 1)
CNA_PATH <- paste(REGION_PATH, "/", CNV_SUFFIX_PATH, "/optimalClusterSolution/", opt_soln$V2, ".segs.txt", sep="")
cna <- read.table(CNA_PATH, header=T)
# Strip `chr`
cna_chrs <- gsub("chr", "", cna$Chromosome)
cna.gr <- ConstructGranges(cna_chrs, cna$Start_Position.bp., width = cna$End_Position.bp. - cna$Start_Position.bp.)
ret <- findOverlaps(snv.gr, cna.gr)
major_cn[ret@from,i] <- cna[ret@to,"MajorCN"]
minor_cn[ret@from,i] <- cna[ret@to,"MinorCN"]
}
major_cn <- apply(major_cn, 1, function(row) {
paste(row, collapse = ",")
})
minor_cn <- apply(minor_cn, 1, function(row) {
paste(row, collapse = ",")
})
bulk <- data.frame(ID = mut_ids,
b = b, d = d,
# major_cn=paste(rep(1, region_count), collapse = ","), minor_cn=paste(rep(1, region_count), collapse=","))
major_cn=major_cn, minor_cn=minor_cn)
if (!dir.exists(OUTPUT_PATH)) {
dir.create(OUTPUT_PATH, recursive = T)
}
write.table(bulk, file = paste(OUTPUT_PATH, "bulk.txt", sep=""), sep="\t", col.names = T, row.names = F, quote = F)
# We will output loci file needed to look up SNV to their CHR, POS, REF, ALT
loci <- data.frame(ID=mut_ids, fix[,c("CHROM", "POS", "REF", "ALT")])
write.table(loci, file = paste(OUTPUT_PATH, "loci.txt", sep=""), sep="\t", col.names = T, row.names = F, quote = F)
|
c055d24d8f8945f09d5ed14de4c5357fdb8ef43a
|
19c693fe101a36e79233fbfd0011309172cc1a80
|
/scripts/swedish_wild_reads.R
|
94f4b1a368112fb464e858e1dcb98ace45838ad9
|
[] |
no_license
|
tkarasov/controlled_metagenomics
|
bfe8d36142c2af9d9f6d9161ca698fdd903f8897
|
ef3846d1394cc95798db1fe1c7ecbc1ef7e1b148
|
refs/heads/master
| 2020-04-16T23:46:47.953377
| 2019-11-12T08:50:48
| 2019-11-12T08:50:48
| 166,025,291
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,478
|
r
|
swedish_wild_reads.R
|
library(reshape2)
library('dplyr')
library('tidyr')
library(ggplot2)
library(RColorBrewer)
library(wesanderson)
require(gridExtra)
'%!in%' <- function(x,y)!('%in%'(x,y))
meta=read.table("~/work_main/abt6_projects9/metagenomic_controlled/data/processed_reads/swedish_samples/meta_family_corrected_per_plant.csv", sep=",", header=T, row.names = 1)
top10=names(sort(rowSums(meta, na.rm=TRUE), decreasing=TRUE )[1:10])
meta_microbiome=meta[top10,]
rest=colSums(meta[rownames(meta) %!in% top10, ], na.rm=TRUE)
meta_microbiome=rbind(meta_microbiome, rest)
rownames(meta_microbiome)[11]="Rest"
tot=colSums(meta_microbiome)
meta_microbiome$Family=rownames(meta_microbiome)
microb_melt=melt(meta_microbiome, id=c("Family"))
microb_melt$Load=as.factor(-1*tot[microb_melt$variable])
#microb_melt$Day2=relevel(microb_melt$Day, "0")
#microb_melt$Genotype2=relevel(as.factor(microb_melt$Genotype), "C")
pdf("~/Dropbox/controlled_metagenomics/results_figures/swedish_metagenome.pdf")
p <- ggplot(data=microb_melt, aes(x=Load, y=value, fill=Family))
p + geom_bar(aes(), stat="identity", position="stack") +
scale_fill_manual(values = c("darkblue", "darkgoldenrod1", "darkseagreen", "darkorchid", "darkolivegreen1", "lightskyblue", "darkgreen", "deeppink", "khaki2", "firebrick", "brown1", "darkorange1", "cyan1", "royalblue4", "darksalmon", "darkblue",
"royalblue4", "dodgerblue3", "steelblue1", "lightskyblue", "darkseagreen", "darkgoldenrod1", "darkseagreen", "darkorchid", "darkolivegreen1", "brown1", "darkorange1", "cyan1", "darkgrey")) +
theme(legend.position="bottom", panel.background = element_blank(), axis.line = element_line(colour = "black"), axis.text.x = element_blank()) + guides(fill=guide_legend(nrow=5)) +
xlab("Plant Individuals") + ylab("Microbial Cov./Plant Cov.")
dev.off()
#Day3 avrB log10 -0.276
#Day3 C log10 -1.65
#Day3 EV log10 1.54
#Hpa
#Day11 0.0714 or -1.146302 (log10)
#Day5 0.00303 or -2.518557 (log10)
#How much Pseudomonas?
pdf("~/Dropbox/controlled_metagenomics/results_figures/sweden_pseud_hpa.pdf")
families=data.frame(t(meta))
pseud=ggplot(data=families, aes(log10(Pseudomonadaceae))) + geom_histogram(fill="GRAY", colour = "BLACK")+theme_bw()+xlab(expression(log[10]~("Pseudomonadaceae coverage"/"Plant coverage")))+geom_vline(aes(xintercept=-0.276, color="Resistant"))+geom_vline(aes(xintercept=1.54, color="Susceptible"))
hpa=ggplot(data=families, aes(log10(Peronosporaceae))) + geom_histogram(fill="GRAY", colour = "BLACK")+theme_bw()+xlab(expression(log[10]~("Peronosporaceae coverage"/"Plant coverage")))+geom_vline(aes(xintercept=-1.146302, color="Day 11"))+geom_vline(aes(xintercept=-2.518557 , color="Day 5"))
grid.arrange(pseud, hpa, ncol=1)
dev.off()
sd_meta=sqrt(MatVar(meta))
mean_meta=rowSums(meta)/dim(meta)[1]
together=cbind(mean_meta, sd_meta)
max_t=apply(meta, 1, max)
together=cbind(together, max_t)
together=as.data.frame(together[order(-mean_meta),])
var_plot<-ggplot(data=together, aes(x=c(1:dim(together)[1]),y=mean_meta))
var_plot + geom_point() + geom_errorbar(ymin=mean_meta-sd_meta, ymax=mean_meta+sd_meta, width=.1)
mean_meta sd_meta max_t
Burkholderiaceae 0.058878881 1.8177008 11.6311032
Pseudomonadaceae 0.027161930 1.1359455 7.8252101
Peronosporaceae 0.014652289 0.5844252 3.3207906
Enterobacteriaceae 0.009363755 0.2652899 1.6110845
Albuginaceae 0.002868011 0.1838265 1.3010689
Sphingomonadaceae 0.010266232 0.1302916 0.4788446
|
e5504bfc20c4ca35f6b9e9716300161412ddde7b
|
ac961f7c20e60c955720e0d93483b4d92ce01726
|
/04_functionalAnnotation_vectORA.R
|
3cfbddb00ac54d53e592dba562ae1abf0ef913fd
|
[] |
no_license
|
mengchen18/RFunctionCollection
|
01b2936ea794297a87cf277ef0f583d02a1bb4ac
|
222d22fc8f12a622af01b91711a66c6e9da08be8
|
refs/heads/master
| 2022-05-24T17:49:05.385208
| 2022-05-17T12:58:42
| 2022-05-17T12:58:42
| 127,109,330
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,407
|
r
|
04_functionalAnnotation_vectORA.R
|
#' three columns in
#' @param pathways List of gene sets to check.
#' @param genelist A list of gene/proteins you want to annotate, often from differential expression analysis.
#' The ID needs to be the same as in pathways
#' @param background An integer to indicate the size of background; or a character vector
#' stores all background genes, the ID needs to be the same as in pathways
#' @param trimPathway remove ID from pathways that are not present in the background,
#' only used if background is a gene/protein name list.
#' @param minOverlap the minimum required overlap between pathway and gene list, if the overlap is lower
#' than this value, no test would be done on this pathway
#' @param minSize the minimum size of pathway should be tested
#' @param maxSize the maximum size of pathways should be tested
#' @param pathway_desc description of pathways, a name character vector storing the description of
#' of the pathway, the names should be the same as names in "pathways" argument.
#' @param unconditional.or calculate odds ratio using Maximum Likelihood Estimate (the sample odds ratio).
#' Note that the conditional Maximum Likelihood Estimate (MLE) is used in fisher.test.
#' @param mtc.method multiple test correction methods, passed to p.adjust function
#' @param sort could be one of c("none", "p.value", "OR") to indicate how the result should be sorted.
#' @import fastmatch
require(fastmatch)
vectORA <- function(pathways, genelist, background, trimPathway = FALSE,
minOverlap = 3, minSize=5, maxSize=Inf, pathway_desc = NULL,
unconditional.or = TRUE, mtc.method = "fdr",
sort = c("none", "p.value", "OR")[1]) {
# check id, duplicates
genelist <- unique(genelist)
if (length(background) == 1 && is.integer(background)) {
bkgn <- background
} else if (length(background) > 1 && is.character(background)) {
if (!all(genelist %in% background)) {
background <- c(background, genelist)
message("Some IDs in genelist is not in background, background is expanded!")
}
background <- unique(background)
bkgn <- length(background)
if (trimPathway)
pathways <- lapply(pathways, function(x) x[x %fin% background])
} else
stop("Unknown background type!")
overlap <- lapply(pathways, function(x) x[x %fin% genelist])
nol <- sapply(overlap, length)
ngs <- sapply(pathways, length)
i <- nol >= minOverlap & ngs >= minSize & ngs <= maxSize
pathways <- pathways[i]
overlap <- overlap[i]
nol <- nol[i]
ngs <- ngs[i]
pathway_annot_x <- ""
if (!is.null(pathway_desc))
pathway_annot_x <- pathway_desc[names(pathways)]
bdf <- vectORA.core(
n.overlap = nol,
n.de = length(genelist),
n.gs = ngs,
n.bkg = bkgn,
unconditional.or = unconditional.or, mtc.method = mtc.method)
rs <- cbind(
pathway = names(pathways),
desc = pathway_annot_x,
bdf,
overlap_ids = sapply(overlap, paste0, collapse = ";")
)
sort <- sort[1]
if (sort == "p.value") {
rs <- rs[order(rs$p.value, decreasing = FALSE), ]
} else if (sort == "OR") {
rs <- rs[order(rs$OR, decreasing = TRUE), ]
} else if (sort != "none")
warning("Unknown sort method, the results are not sorted!")
rs
}
#' @param n.overlap the number of overlap between de and gs. The number of white balls drawn
#' without replacement from an urn which contains both black and white balls (compared to hyper).
#' @param n.de the number of DE gene. The number of balls drawn from the urn (compared to hyper).
#' @param n.gs the size of gene set. The number of white balls in the urn (compared to hyper).
#' @param n.bkg the background size.
#' @param unconditional.or calculate odds ratio using Maximum Likelihood Estimate (the sample odds ratio).
#' Note that the conditional Maximum Likelihood Estimate (MLE) is used in fisher.test.
#' @param mtc.method multiple test correction methods, passed to p.adjust function
#' @import fastmatch
#' @examples
#' xq <- rbind(c(4, 2, 4),
#' c(20, 40, 10),
#' c(11, 234, 10),
#' c(200, 1000, 100))
#'
#' vectORA.core(xq[1, ], xq[2, ], xq[3, ], xq[4, ])
#' vectORA.core(xq[1, ], xq[2, ], xq[3, ], xq[4, ], unconditional.or = TRUE)
#'
#' # fisher's test
#' t(apply(xq, 2, function(x1) {
#' v <- fisher.test(rbind(c(x1[1], x1[2]-x1[1]), c(x1[3]-x1[1], x1[4] - x1[2] - x1[3] + x1[1])), alternative = "greater")
#' c(p.value = v$p.value, v$estimate)
#' }))
vectORA.core <- function(n.overlap, n.de, n.gs, n.bkg, unconditional.or = TRUE, mtc.method = "fdr") {
pval <- phyper(q = n.overlap-1, m = n.gs, n = n.bkg - n.gs, k = n.de, lower.tail = FALSE)
if (unconditional.or)
or <- (n.overlap/(n.de - n.overlap))/((n.gs-n.overlap)/(n.bkg - n.gs - n.de + n.overlap)) else {
or <- function(n.overlap, n.gs, n.de, n.bkg) {
m <- n.gs
n <- n.bkg - n.gs
k <- n.de
x <- n.overlap
lo <- pmax(0L, k - n)
hi <- pmin(k, m)
supportl <- mapply(":", lo, hi, SIMPLIFY = FALSE)
sapply(1:length(x), function(i) {
support <- supportl[[i]]
logdc <- dhyper(support, m[i], n[i], k[i], log = TRUE)
dnhyper <- function(ncp) {
d <- logdc + log(ncp) * support
d <- exp(d - max(d))
d/sum(d)
}
mnhyper <- function(ncp) {
if (ncp == 0)
return(lo[i])
if (ncp == Inf)
return(hi[i])
sum(support * dnhyper(ncp))
}
mle <- function(x) {
if (x == lo[i])
return(0)
if (x == hi[i])
return(Inf)
mu <- mnhyper(1)
if (mu > x)
uniroot(function(t) mnhyper(t) - x, c(0, 1))$root
else if (mu < x)
1/uniroot(function(t) mnhyper(1/t) - x, c(.Machine$double.eps, 1))$root
else 1
}
mle(x[i])
})
}
or <- or(n.overlap=n.overlap, n.gs=n.gs, n.de=n.de, n.bkg=n.bkg)
}
data.frame(
p.value = pval,
p.adjusted = p.adjust(pval, method = mtc.method),
OR = or,
size_overlap = n.overlap,
size_geneset = n.gs,
size_input = n.de,
size_backgroung = n.bkg,
stringsAsFactors = FALSE
)
}
|
b37b3c2044fd94355a95c6f59301bca5b2f1cbf6
|
2d00505c7940f1bd1dcff122aa1e8bbd5d2edea2
|
/R/overloaded.R
|
677b3f5b8a64b4607f34b417d5ac855cfb4da791
|
[] |
no_license
|
kashenfelter/RGCCTranslationUnit
|
cb647e6c57e78656bb196e68834d60fd664e66cd
|
1bd45f5589516334afa57a75e936d2a36ff943b6
|
refs/heads/master
| 2020-03-23T00:32:22.815755
| 2013-04-21T21:38:45
| 2013-04-21T21:38:45
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,264
|
r
|
overloaded.R
|
getMaximalSignature =
function(x, supportDerivedClasses, outArgs = list(), verbose = FALSE) {
i = sapply(x, function(x) length(x$parameters))
if(all(i == 0))
return(character(0))
w = x[[ which.max(i) ]]$parameters
ans = names(w)
if(x[[1]]$name %in% outArgs)
ans = c(ans, ".copy")
if(supportDerivedClasses
&& any(w <- sapply(x, function(x) "virtual" %in% names(x) && x$virtual))) {
if(verbose)
cat("adding .inherited for", x[[1]]$name, "\n")
ans = c(ans, ".inherited")
} else {
if(verbose)
cat("no virtual methods for", x[[1]]$name, "\n")
}
ans
}
computeOverloadedSignatures =
#
# take the resolved methods for all the classes of interest
# and group them by method name and then find the names of the
# parameters for the largest one.
#
# Make this include the .copy and the .inherited arguments that
# may be introduced when we generate the code.
#
# Take into account derived classes and so a) additional overloading (for each class in which they are inherited/defined)
# and b) augmenting the arguments by 1
#
function(resolvedMethods, routines = list(), outArgs = list(), supportDerivedClasses = TRUE, createGenerics = TRUE, ...)
{
tt = collapseMethods(resolvedMethods)
tt = tt[!sapply(tt, is, "ResolvedNativeClassDestructor")]
tt = c(tt, routines)
names(tt) = sapply(tt, function(x) if(isS4(x)) x@name else x$name)
# for each different method name, find the biggest signature.
signatures = tapply(tt, names(tt), getMaximalSignature, supportDerivedClasses, outArgs)
# f =
# function(methods) min(sapply(methods, function(m) which.min(is.na(sapply(m$parameters, function(x) x$defaultValue)))))
# numNonDefault = tapply(tt, names(tt), f)
# browser()
# take any name that has more than one method/routine
# and also any name for which there is a virtual method in any
# of the classes as we will need this when creating derived classes.
# We already have the .inherited in the signature.
i = table(names(tt))
#XXXX Discard static if not overloaded !
if(FALSE) {
# This uses names. The else clause uses indices.
x = tapply(tt, names(tt), function(x) { any(sapply(x, function(x) "virtual" %in% names(x) && x$virtual))})
# Any method that will be implemented in an R derived class needs to be made generic
# also. (There is one case where it does not and that is when it is a method in a leaf class
# in the hierarchy and so there will only be an implementation for one derived class. But this
# special case doesn't hurt us. So for now, ignore.)
methodNames = if(supportDerivedClasses)
names(sapply(tt, inherits, "ResolvedNativeClassMethod"))
else
character()
ids = unique(c(names(i)[ i > 1], names(x)[x], methodNames))
} else if(length(tt) > 0) {
ids = which(names(tt) %in% names(i)[i > 1])
o = which(sapply(tt, function(x) ("virtual" %in% names(x) && x$virtual) || (supportDerivedClasses && inherits(x, "ResolvedNativeClassMethod"))))
ids = unique(names(tt)[c(ids, o)])
} else
ids = character()
if(createGenerics) {
# numUniqueArgs(tt[ids])
createGenericsCode(signatures[ids], ...)
} else
structure(signatures[ ids ], class = "GenericSignatureList")
}
RGenericDefinition =
#
#
# XXX want the method too in order to fetch the default values?
#
function(name, sig, defaults = character(), addDots = TRUE)
{
if(addDots && !"..." %in% sig)
sig = c(sig, "...")
new("RGenericDefinition",
name = name,
signature = sig)
}
createGenericsCode =
function(sigs, addAllDefaults = FALSE, addDots = TRUE)
{
ans = mapply(RGenericDefinition, names(sigs), sigs, MoreArgs = list(addDots))
if(addAllDefaults)
ans = lapply(ans, function(x) {
defaults = rep("NULL", length(x@signature))
if(!is.na(i <- match("...", x@signature)))
defaults[i] = ""
x@paramDefaults = defaults
x
})
class(ans) = "GenericDefinitionList"
ans
}
|
f6fc6b1d0eaaafb55f429423784334384598c91c
|
a32b253ef7363d5fab6ea06069f68bd126d61553
|
/analysis-code/part1.R
|
fc0588d143d4ccf89aa4d5ae606bc31c1f07caae
|
[] |
no_license
|
jonahsmith/senior-thesis
|
f462561cff0c92523f2f1b871c89eb19090fd23d
|
480b30352c04a70a937e42d19a625265b785c811
|
refs/heads/master
| 2016-09-06T09:21:56.545552
| 2014-05-09T01:50:14
| 2014-05-09T01:50:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,253
|
r
|
part1.R
|
load('withScores.RData')
#Summary statistics
graduate=0
for (i in 1:length(withScores)){
data <- withScores[[i]]
graduate <- graduate + length(which(data$GraduateBin==1))
}
graduate/(40*length(data$GraduateBin))
#Noncognitive standardized regression
est <- c()
vars <- c()
intercept <- c()
for (i in 1:length(withScores)){
stNonCog <- scale(withScores[[i]]$nonCog, center=TRUE, scale=TRUE)
regression<-glm(withScores[[i]]$GraduateBin~stNonCog, family="binomial")
est[i] <- summary(regression)$coef[[2]] #est
vars[i]<-(summary(regression)$coef[[4]])^2 #within-variance
}
qHat=mean(est)
uHat=mean(vars)
B=var(est)
T=uHat+(1+1/40)*B
overallSE = sqrt(T)
d.f = (39)*(1+(40*uHat)/(41*B))^2
pt(-abs(qHat/overallSE), df=d.f)*2
exp(qHat)
exp(mean(intercept))/(1+exp(mean(intercept)))
exp(mean(intercept+qHat))/(1+exp(mean(intercept+qHat)))
exp(mean(intercept-qHat))/(1+exp(mean(intercept-qHat)))
#Cognitive standardized regression
est <- c()
vars <- c()
intercept <- c()
cogIndices <- which(names(withScores[[1]]) == "Q3LW_SS"):which(names(withScores[[1]]) == "Q3AP_SS")
for (i in 1:length(withScores)){
dataset <- withScores[[i]]
cog <- rowSums(dataset[,cogIndices])/length(cogIndices)
stCog <- scale(cog)
withScores[[i]]$stCog <- stCog
regression<-glm(dataset$GraduateBin~stCog, family="binomial")
intercept[i] <- summary(regression)$coef[[1]]
est[i] <- summary(regression)$coef[[2]] #est
vars[i]<-(summary(regression)$coef[[4]])^2 #variance
}
qHat=mean(est)
uHat=mean(vars)
B=var(est)
T=uHat+(1+1/40)*B
overallSE = sqrt(T)
d.f = (39)*(1+(40*uHat)/(41*B))^2
pt(-abs(qHat/overallSE), df=d.f)*2
exp(qHat)
#Size-adjusted Income
est <- c()
vars <- c()
intercept <- c()
for (i in 1:length(withScores)){
dataset <- withScores[[i]]
stIncome <- scale(dataset$fiasMean)
regression<-glm(dataset$GraduateBin~stIncome, family="binomial")
intercept[i] <- summary(regression)$coef[[1]]
est[i] <- summary(regression)$coef[[1]] #est
vars[i]<-(summary(regression)$coef[[3]])^2 #variance
}
qHat=mean(est)
uHat=mean(vars)
B=var(est)
T=uHat+(1+1/40)*B
overallSE = sqrt(T)
d.f = (39)*(1+(40*uHat)/(41*B))^2
pt(-abs(qHat/overallSE), df=d.f)*2
exp(qHat)
exp(mean(intercept))/(1+exp(mean(intercept)))
exp(mean(intercept+qHat))/(1+exp(mean(intercept+qHat)))
exp(mean(intercept-qHat))/(1+exp(mean(intercept-qHat)))
##Code race and gender
for (i in 1:length(withScores)){
withScores[[i]]$CHRACE[!(withScores[[i]]$CHRACE %in% c(1,2))] <- 3
withScores[[i]]$CHRACE <- as.factor(withScores[[i]]$CHRACE)
withScores[[i]]$ER32000 <- as.factor(withScores[[i]]$ER32000)
withScores[[i]]$BIOPR97[withScores[[i]]$BIOPR97 == 0] <- NA
}
#Controlling
est <- c()
vars <- c()
intercept <- c()
for (i in 1:length(withScores)){
data <- withScores[[i]]
stNonCog <- scale(data$nonCog, center=TRUE, scale=TRUE)
stIncome <- scale(data$fiasMean)
data$race <- 1
data$race[data$CHRACE %in% c(2,3)] <- 2
regression<-glm(data$GraduateBin~stNonCog+stIncome+data$ER32000+data$CHRACE+data$BIOPR97+data$ER11766, family="binomial")
est[i] <- summary(regression)$coef[[9]] #est
vars[i]<-(summary(regression)$coef[[18]])^2 #within-variance
}
qHat=mean(est)
uHat=mean(vars)
B=var(est)
T=uHat+(1+1/40)*B
overallSE = sqrt(T)
d.f = (39)*(1+(40*uHat)/(41*B))^2
|
de609c8831bded016f0772f2dd88ff86e967ee37
|
8ec62ca2bcfe19064797618f27a1c29934526ec6
|
/Fibonacci_Seq/R/Fibonacci_Seq.R
|
b6db815d13a87de77c258211967dcf12ccf6f032
|
[] |
no_license
|
slamatic/Assignment-for-R
|
9a11c8f08971376d6cae20b5691cdd0c872e0bd5
|
a87fea9209e5b189d24eed354b7f521d73e5c383
|
refs/heads/master
| 2021-03-12T20:19:08.171901
| 2014-12-07T10:20:32
| 2014-12-07T10:20:32
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 518
|
r
|
Fibonacci_Seq.R
|
fibonacciSeq <- function ( input = 0 ) {
print("This is a Fibonacci Sequence Assignment for R")
#check,
if (input < 0) {
return("Input cannot less than 0.")
} else if (input > 50) {
return("exceeded maximum.")
}
#init,
len = ( input + 1 )
fibo <- array ( 0 , len )
#loop,
for ( n in 1 : length ( fibo ) ) {
if ( n > 2 ) {
fibo[n] = fibo[n-2] + fibo[n-1]
} else if ( n == 2 ) {
fibo[2] = 1
} else if ( n == 1 ) {
fibo[1] = 0
}
}
return(fibo)
}
|
4807413b460be74bddca078235b57867cf739a38
|
42c160cf51f06964a1224f5c077b5a80a99e2054
|
/scheduler.tests.R
|
61295a0bc6257ac556d03cf50da9e78f17a0bcf3
|
[] |
no_license
|
charlos123/bot
|
92b96d5b02df65e97f5f26b7477099e25ea04b14
|
3b78ae179e3390fb5bfdd12c73fd4877946cd50f
|
refs/heads/master
| 2021-01-17T07:55:52.713136
| 2017-03-11T09:33:49
| 2017-03-11T09:33:49
| 83,819,606
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,926
|
r
|
scheduler.tests.R
|
source("scheduler.R")
#
# Sample Data
#
scheduler.getSampleSchedule <- function(){
schedule <- list()
dish <- list(name = "meat", weight = 150)
schedule[[1]] <- list(id = 1, date_time = as.POSIXct("2017-04-03 15:00 EEST"), dishes <- list(dish))
schedule[[2]] <- list(
id = 2,
date_time = as.POSIXct("2017-04-03 16:00 EEST"),
dishes <- list(dish, dish)
)
return(schedule)
}
#
# Tests
#
scheduler.showDay.test <- function(){
schedule <- scheduler.getSampleSchedule()
res <- scheduler.showDay(schedule, Sys.Date())
if (length(res) != 2) writeLines("scheduler.showDay Failed") else writeLines("scheduler.showDay Pass")
if (res[[1]]$id != "1")
writeLines("scheduler.showDay Failed") else writeLines("scheduler.showDay Pass")
}
scheduler.showDay.test()
scheduler.getEventsByDate.test <- function(){
schedule <- scheduler.getSampleSchedule()
cur_time <- Sys.time()
#delta_t = one day
delta_t <- 60*60*24
schedule[[1]]$date_time <- cur_time
schedule[[2]]$date_time <- cur_time + delta_t
res <- scheduler.getEventsByDate(schedule, as.Date(cur_time))
if (length(res) != 1) writeLines("scheduler.getEventsByDate Failed") else writeLines("scheduler.getEventsByDate Pass")
res <- scheduler.getEventsByDate(schedule, as.Date(cur_time)+1)
if (length(res) != 1) writeLines("scheduler.getEventsByDate Failed") else writeLines("scheduler.getEventsByDate Pass")
}
scheduler.getEventsByDate.test()
scheduler.getTodaysPastEvents.test <- function(){
schedule <- scheduler.getSampleSchedule()
res <- scheduler.getTodaysPastEvents(schedule, as.POSIXct("2017-04-03 15:30 EEST"))
if (length(res) != 1) writeLines("scheduler.getTodaysPastEvents Failed") else writeLines("scheduler.getTodaysPastEvents Pass")
res <- scheduler.getTodaysPastEvents(schedule, as.POSIXct("2017-04-03 14:59 EEST"))
if (length(res) != 0) writeLines("scheduler.getTodaysPastEvents Failed") else writeLines("scheduler.getTodaysPastEvents Pass")
}
scheduler.getTodaysPastEvents.test()
scheduler.execEvent.test <- function(){
schedule <- scheduler.getSampleSchedule()
user_event <- list(
date_time = as.POSIXct("2017-04-03 15:30 EEST"),
exec_date_time = as.POSIXct("2017-04-03 15:20 EEST"),
status = "all good"
)
res <- scheduler.execEvent(schedule, 1, user_event)
if (length(res) != 2) writeLines("scheduler.execEvent Failed") else writeLines("scheduler.execEvent Pass")
if (res[[1]]["user_event"][[1]]$status != "all good") writeLines("scheduler.execEvent Failed") else writeLines("scheduler.execEvent Pass")
}
scheduler.execEvent.test()
scheduler.findEventsByParam.test <- function(schedule, param_name, param_value) {
schedule <- scheduler.getSampleSchedule()
res <- scheduler.findEventsByParam(schedule, "id", 1)
if (length(res) != 1) writeLines("scheduler.findEventsByParam.test Failed") else writeLines("scheduler.findEventsByParam.test Pass")
res <- scheduler.findEventsByParam(schedule, "date", as.Date("2017-04-03"))
if (length(res) != 2) writeLines("scheduler.findEventsByParam.test Failed") else writeLines("scheduler.findEventsByParam.test Pass")
#+1 days to second event
schedule[[2]]$date_time = as.POSIXct("2017-04-04 16:00 EEST")
res <- scheduler.findEventsByParam(schedule, "date", as.Date("2017-04-03"))
if (length(res) != 1) writeLines("scheduler.findEventsByParam.test Failed") else writeLines("scheduler.findEventsByParam.test Pass")
}
scheduler.findEventsByParam.test()
scheduler.findEventPositionsByParam.test <- function(schedule, param_name, param_value) {
schedule <- scheduler.getSampleSchedule()
res <- scheduler.findEventPositionsByParam(schedule, "id", 1)
if (all(res != c(TRUE,FALSE))) writeLines("scheduler.findEventPositionsByParam.test Failed") else writeLines("scheduler.findEventPositionsByParam.test Pass")
}
scheduler.findEventPositionsByParam.test()
|
99595d61e5927452d6c5e27800d760e17dc0d1f6
|
5e80ccca0c6c6361bafe935fe7b0900c0973457e
|
/man/reverse_element.Rd
|
114bdecbf5166809ab57b10a1f88954b78550918
|
[
"MIT"
] |
permissive
|
GuangchuangYu/ggreverse
|
9dcb13203de82d41fa955e37b9d57843693c2053
|
bd6dd3117a5d61c198f6e12e2baee336a6842c70
|
refs/heads/master
| 2020-05-25T09:57:34.164944
| 2019-05-21T02:54:05
| 2019-05-21T02:54:05
| 187,750,028
| 1
| 0
| null | 2019-05-21T02:49:49
| 2019-05-21T02:49:49
| null |
UTF-8
|
R
| false
| true
| 404
|
rd
|
reverse_element.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/reverse-theme.R
\name{reverse_element}
\alias{reverse_element}
\title{Convert theme \code{element} into code to create it}
\usage{
reverse_element(element)
}
\arguments{
\item{element}{a ggplot2 theme element}
}
\value{
Character string "element_XXX(...)"
}
\description{
Convert theme \code{element} into code to create it
}
|
c1e13ac0dbae7b623837fe8f73e5cb4e401a3505
|
55133b0dbbe76e203af3024d51861e017585ba95
|
/fundamentos-de-analytics/lab004/resposta/gabriel-lab04-questao6.R
|
5d0e0bb5c400064f5744e9b689e0f4d5b22b8400
|
[] |
no_license
|
fernandojunior/analytics-gabriel
|
2244f27e49e130cbfaa6d46bc172296dfbcc93e6
|
364c725d79264335c2c50b5671139d65b683add7
|
refs/heads/master
| 2021-01-22T16:10:49.963023
| 2012-05-19T23:42:38
| 2012-05-19T23:42:38
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,338
|
r
|
gabriel-lab04-questao6.R
|
library("ggplot2")
source("intervalos-de-confianca.R")
## 6. Encontre o estado A do Brasil onde os deputados federais gastam mais em
## média. Encontre também o estado B do Brasil onde os deputados federais
## gastam menos, em média. É possível afirmar com 95% de confiança que o
## deputado menos gastador de A gasta mais que o deputado mais gastador de B?
dados <- read.csv(file = "dados-deputados.csv")
nivel.de.significancia = 0.05
# Intervalos de confiança
ic.gastos.totais.por.estado <- aggregate(dados$gastos.total,
list(estado = dados$estado),
function(x) intervalo.de.confianca.para.a.media(x,
nivel.de.significancia))
names(ic.gastos.totais.por.estado) <- c("estado", "ic")
# Médias
media.gastos.totais.por.estado <-
aggregate(dados$gastos.total,
list(estado = dados$estado),
mean)
names(media.gastos.totais.por.estado) <- c("estado", "media")
# Quantidade
n.gastos.totais.por.estado <-
aggregate(dados$gastos.total,
list(estado = dados$estado),
length)
names(n.gastos.totais.por.estado) <- c("estado", "n")
# Desvio padrão
sd.gastos.totais.por.estado <-
aggregate(dados$gastos.total,
list(estado = dados$estado),
sd)
names(sd.gastos.totais.por.estado) <- c("estado", "desvio.padrao")
# Merge
gastos.totais.por.estado <- merge(media.gastos.totais.por.estado,
ic.gastos.totais.por.estado,
by = c("estado"))
gastos.totais.por.estado <- merge(gastos.totais.por.estado,
n.gastos.totais.por.estado,
by = c("estado"))
gastos.totais.por.estado <- merge(gastos.totais.por.estado,
sd.gastos.totais.por.estado,
by = c("estado"))
gastos.totais.por.estado
# Gráfico
# Ordenando os estados pela média de gastos
gastos.totais.por.estado$estado <- reorder(gastos.totais.por.estado$estado,
-gastos.totais.por.estado$media,
order = TRUE)
png(filename = "output-questao6-gastos-por-estado-media-gastos.png", width = 960, height = 480)
ggplot(gastos.totais.por.estado, aes(estado, media, fill = estado)) +
geom_bar() +
geom_errorbar(aes(ymin = ic[, 1], ymax = ic[, 2], width = 0.2)) +
scale_x_discrete(name = "Estado") +
scale_y_continuous(name = "Média de gastos") +
opts(legend.position = "none",
title = "Média de gastos dos deputados por estado em 2011")
dev.off()
# Análise dos deputados extremos do estado de maior gasto (mínimo) e do estado de menor gasto (máximo)
# Identificação dos deputados
estado.maior.gasto = with(gastos.totais.por.estado,
gastos.totais.por.estado[media == max(media),]$estado)
estado.menor.gasto = with(gastos.totais.por.estado,
gastos.totais.por.estado[media == min(media),]$estado)
deputados.estado.maior.gasto = with(dados,
dados[estado == as.character(estado.maior.gasto), ])
deputados.estado.menor.gasto = with(dados,
dados[estado == as.character(estado.menor.gasto), ])
deputado.menor.gasto.estado.maior.gasto =
with(deputados.estado.maior.gasto,
deputados.estado.maior.gasto[gastos.total == min(gastos.total), ])
deputado.maior.gasto.estado.menor.gasto =
with(deputados.estado.menor.gasto,
deputados.estado.menor.gasto[gastos.total == max(gastos.total), ])
# Comparação direta através dos gastos dos deputados
print(paste("Estado de maior gasto:", deputado.menor.gasto.estado.maior.gasto$estado))
print(paste("Seu deputado de menor gasto:", deputado.menor.gasto.estado.maior.gasto$nome))
print(paste("Gastou", deputado.menor.gasto.estado.maior.gasto$gastos.total))
print(paste("Estado de maior gasto:", deputado.maior.gasto.estado.menor.gasto$estado))
print(paste("Seu deputado de maior gasto:", deputado.maior.gasto.estado.menor.gasto$nome))
print(paste("Gastou", deputado.maior.gasto.estado.menor.gasto$gastos.total))
|
7a3c3c16ac96ecc24f6b85daa4f5ae266b86076c
|
8e8008d28e9aecfd56d5ccfb435541494cf8b5c2
|
/databricks-jump-start/1-Getting Started with Spark/Spark on Databricks/8 SparkR_Visualization.r
|
9d0261743fe5b0934dbb267b0b92253549a59120
|
[] |
no_license
|
bhavink/databricks
|
b383ec666fc03dc376a653893d119c3ec44d60de
|
b67856f9fcdea6db2a3e3f5a8e4ac0ed0f852e98
|
refs/heads/master
| 2023-09-05T07:34:24.612942
| 2023-08-28T14:45:45
| 2023-08-28T14:45:45
| 202,873,726
| 45
| 29
| null | 2023-04-19T18:44:34
| 2019-08-17T11:40:59
|
Jupyter Notebook
|
UTF-8
|
R
| false
| false
| 5,068
|
r
|
8 SparkR_Visualization.r
|
# Databricks notebook source
# MAGIC %md-sandbox
# MAGIC <div style="text-align: center; line-height: 0; padding-top: 9px;">
# MAGIC <img src="https://cdn2.hubspot.net/hubfs/438089/docs/training/dblearning-banner.png" alt="Databricks Learning" width="555" height="64">
# MAGIC </div>
# COMMAND ----------
# MAGIC %md
# MAGIC
# MAGIC <img src="http://curriculum-release.s3-website-us-west-2.amazonaws.com/images/Apache-Spark-Logo_TM_200px.png" width=80 height=40> <br>
# MAGIC
# MAGIC # Visualization
# MAGIC
# MAGIC * Time Estimate: 30 minutes
# MAGIC * Learning Objective: Understand what data visualization options are supported and what are not supported
# MAGIC * Main Topic: Illustrations of various visualization techniques
# MAGIC 1. R base plot
# MAGIC 2. Lattice
# MAGIC 3. ggplot
# MAGIC 4. Other R libraries
# MAGIC 5. Databricks built-in visualization
# MAGIC 6. Databricks Widgets
# MAGIC
# MAGIC
# MAGIC
# MAGIC ## Additional Resources
# MAGIC * **Plot.ly on Databricks**: https://docs.azuredatabricks.net/user-guide/visualizations/plotly.html
# COMMAND ----------
library(SparkR)
# COMMAND ----------
# MAGIC %md
# MAGIC # Introduction
# MAGIC
# MAGIC
# MAGIC Data visualization in SparkR is fundamentally the same as R. Users can apply any visualization techniques that are compatible with R graphics system. These includes the base R plots and packages such as `graphics`, `grid`, `lattice`, and `ggplots`. When using Databricks Notebook, users are provided additional visualization capabilities such as `display()` that visualizes the first 1,000 observations, and `widgets` such as textbox, dropdown lists etc.
# MAGIC
# MAGIC Keep in mind however, SparkR is interface to Spark and by design it facilitates distributed computation on large quantity of data. Due to the distributed nature of the data and computation, SparkR does not provide any special functionalities for visulizing entire large dataset . When the data size is large (which often motivates the use of SparkR), users will need to either select a subset of data or compute summary statistics in order to effectively render the visualzation.
# MAGIC
# MAGIC You can run RStudio on Spark clusters and you can launch Shiny application from within RStudio.
# COMMAND ----------
# MAGIC %md
# MAGIC # R Base Plots
# COMMAND ----------
options(repr.plot.width = 1000)
# COMMAND ----------
require(stats)
set.seed(14)
x <- rchisq(100, df = 4)
## Comparing data with a model distribution should be done with qqplot()!
qqplot(x, qchisq(ppoints(x), df = 4)); abline(0, 1, col = 2, lty = 2)
## if you really insist on using hist() ... :
hist(x, freq = FALSE, ylim = c(0, 0.2), font=2, main="Histogram of Chi-sq Random Variable", las=1)
curve(dchisq(x, df = 4), col = 2, lty = 2, lwd = 2, add = TRUE)
# COMMAND ----------
# MAGIC %md
# MAGIC # lattice
# COMMAND ----------
head(iris)
# COMMAND ----------
options(repr.plot.height = 500)
# Lattice Examples
library(lattice)
attach(iris)
bin.Sepal.Length <- cut(Sepal.Length, breaks=4)
bwplot (~ Sepal.Width | Species * bin.Sepal.Length, main="Sepal Width Distribution by Species and Sepal Length Quartiles")
# COMMAND ----------
attach(mtcars)
# kernel density plots by factor level
densityplot(~mpg|cyl,
main="Density Plot by Number of Cylinders",
xlab="Miles per Gallon")
# COMMAND ----------
# MAGIC %md
# MAGIC # ggplot
# COMMAND ----------
library(ggplot2)
# COMMAND ----------
options(repr.plot.height = 600)
# COMMAND ----------
ggplot(as.data.frame(diamonds), aes(x = carat, y = price, color = color)) + geom_point(alpha = 0.2) + facet_grid(.~cut) + theme_bw()
# COMMAND ----------
# MAGIC %md
# MAGIC ## Other R Visualization Libraries
# COMMAND ----------
install.packages("DandEFA", repos = "http://cran.us.r-project.org")
library(DandEFA)
data(timss2011)
timss2011 <- na.omit(timss2011)
dandpal <- rev(rainbow(100, start = 0, end = 0.2))
facl <- factload(timss2011,nfac=5,method="prax",cormeth="spearman")
dandelion(facl,bound=0,mcex=c(1,1.2),palet=dandpal)
facl <- factload(timss2011,nfac=8,method="mle",cormeth="pearson")
dandelion(facl,bound=0,mcex=c(1,1.2),palet=dandpal)
# COMMAND ----------
# MAGIC %md ## Databirkcs built-in visualizations
# COMMAND ----------
library(ggplot2)
# COMMAND ----------
display(diamonds)
# COMMAND ----------
display(iris)
# COMMAND ----------
# MAGIC %md ## Databricks widgets
# COMMAND ----------
require(SparkR)
require(magrittr)
iris.df <- createDataFrame(iris)
# COMMAND ----------
dbutils.widgets.help() # See list of supported widgets
# COMMAND ----------
dbutils.widgets.combobox("species2", "setosa", as.list(as.character(unique(iris$Species))))
# COMMAND ----------
dbutils.widgets.dropdown("species", "setosa", as.list(as.character(unique(iris$Species))))
# COMMAND ----------
iris.df %>% where(.$Species == dbutils.widgets.get("species2")) %>% display
# COMMAND ----------
# MAGIC %md Modify the widget on the top and notce that the cell re-runs and produces new results
|
c8c376ce45ac203bbf613adbf5f90c24e06bfd02
|
d39badbd710155fed93ba7676bb42e0378523907
|
/titanicSVM.R
|
4d9b1b0de0946446db1fa678a4c1da9a03c04586
|
[] |
no_license
|
snaketron/Titanic
|
f851a2537fb2c4049545965f0f76cfe3cd368921
|
bbb4d94ed2d86c6ff55709f3ba51439d5cd44208
|
refs/heads/master
| 2021-01-19T08:22:05.368104
| 2015-12-24T20:25:01
| 2015-12-24T20:25:01
| 40,611,277
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 12,355
|
r
|
titanicSVM.R
|
##################### inputs and dependencies #################
train <- read.csv(file = "train.csv")
test <- read.csv(file = "test.csv")
require("e1071")
require("randomForest")
require("plyr")
train$Survived <- as.factor(train$Survived)
test$Survived <- NA
test$Survived <- as.factor(test$Survived)
p <- read.csv(file = "test.csv")
p$Survived <- NA
p$Survived <- as.factor(p$Survived)
pass.id <- p$PassengerId
rm(p)
##################### cluster cabins #################
train$Cabin <- as.character(train$Cabin)
decks <- LETTERS
for(d in decks) {
train$Cabin[which(grepl(pattern = paste(d, "+", sep = ''), x = as.character(train$Cabin), ignore.case = T) == T)] <- d
}
train$Cabin <- as.factor(train$Cabin)
rm(d)
rm(decks)
test$Cabin <- as.character(test$Cabin)
decks <- LETTERS
for(d in decks) {
test$Cabin[which(grepl(pattern = paste(d, "+", sep = ''), x = as.character(test$Cabin), ignore.case = T) == T)] <- d
}
test$Cabin <- as.factor(test$Cabin)
rm(d)
rm(decks)
################# get titles #################
train$Young <- F
train$Young[which(grepl(pattern = paste("+", "Master\\.", "+", sep = ''), x = as.character(train$Name), ignore.case = T) == T)] <- T
train$Young[which(grepl(pattern = paste("+", "Miss\\.", "+", sep = ''), x = as.character(train$Name), ignore.case = T) == T)] <- T
test$Young <- F
test$Young[which(grepl(pattern = paste("+", "Master\\.", "+", sep = ''), x = as.character(test$Name), ignore.case = T) == T)] <- T
test$Young[which(grepl(pattern = paste("+", "Miss\\.", "+", sep = ''), x = as.character(test$Name), ignore.case = T) == T)] <- T
getTitle <- function(data) {
sp <- strsplit(as.character(data$Name), split = "\\.")
sp <- unlist(sp)
r <- c()
for(s in sp) {
r <- c(r, unlist(strsplit(x = s, split = "\\, "))[2])
}
r <- r[!is.na(r)]
plyr::count(r)
return (r)
}
train$Title <- as.factor(getTitle(data = train))
test$Title <- as.factor(getTitle(data = test))
############## get high class ###########
high.class <- c("the Countess", "Sir", "Mme", "Mlle", "Dr")
train$Title <- as.character(train$Title)
train$Title[which(train$Title %in% high.class)] <- "highclass"
test$Title <- as.character(test$Title)
test$Title[which(test$Title %in% high.class)] <- "highclass"
train$Title <- as.factor(train$Title)
test$Title <- as.factor(test$Title)
levels(test$Title) <- levels(train$Title)
rm(high.class)
########### get alone ####################
train$Alone <- ifelse(test = train$SibSp == 0 & train$Parch == 0, yes = T, no = F)
test$Alone <- ifelse(test = test$SibSp == 0 & test$Parch == 0, yes = T, no = F)
########### impute age ####################
temp <- rbind(train, test)
imputeAge <- function(boots = 100, in.ratio = 0.66, temp) {
age.predicts <- c()
train.temp <- temp[!is.na(temp$Age), ]
test.temp <- temp[is.na(temp$Age), ]
for(i in 1:boots) {
s <- sample(x = 1:nrow(train.temp), size = round(x = nrow(train.temp)*in.ratio, digits = 0), replace = T)
train.current <- train.temp[s, ]
rf.imput <- randomForest(Age~Pclass+Parch+SibSp+Young+Title+Fare+Embarked, data = train.current, ntree = 3000, na.action = na.omit)
x <- predict(object = rf.imput, newdata = test.temp[, -1], type = "response")
age.predicts <- rbind(age.predicts, x)
cat("i:", i, "\n")
}
return(age.predicts)
}
#updated train
train.temp <- temp[!is.na(temp$Age), ]
test.temp <- temp[is.na(temp$Age), ]
age.predicts <- imputeAge(boots = 100, in.ratio = 0.66, temp = temp)
temp$Age[which(is.na(temp$Age))] <- apply(age.predicts, MARGIN = 2, FUN = median)
train$Age <- temp$Age[1:891]
test$Age <- temp$Age[892:1309]
rm(x)
rm(temp)
rm(rf.imput)
rm(test.temp)
rm(train.temp)
rm(age.predicts)
rm(i)
################# impute cabin ###############
imputeCabin <- function(boots = 100, in.ratio = 0.66, train, test) {
temp <- rbind(train, test)
temp <- temp[, c("Cabin", "Pclass", "Fare", "Embarked")]
temp$Cabin <- as.character(temp$Cabin)
train.temp <- temp[temp$Cabin != "", ]
test.temp <- temp[temp$Cabin == "", ]
cabin.predicts <- c()
for(i in 1:boots) {
s <- sample(x = 1:nrow(train.temp), size = round(x = nrow(train.temp)*in.ratio, digits = 0), replace = T)
train.current <- train.temp[s, ]
train.current$Cabin <- as.factor(as.character(train.current$Cabin))
rf.imput <- randomForest(Cabin~., data = train.current, ntree = 2000, na.action = na.omit)
x <- predict(object = rf.imput, newdata = test.temp[, -1], type = "response")
cabin.predicts <- rbind(cabin.predicts, as.character(x))
cat("i:", i, "\n")
}
max.cabin <- apply(cabin.predicts, MARGIN = 2, FUN = count)
final.cabins <- c()
for(mc in max.cabin) {
if(max(mc$freq) >= (0.7*boots)) {
c <- as.character(mc$x[which(mc$freq >= (0.7*boots))])
}
else {
c <- as.character(mc$x[which(mc$freq == max(mc$freq))[1]])
}
final.cabins <- c(final.cabins, c)
}
browser()
temp$Cabin <- as.character(temp$Cabin)
temp$Cabin[temp$Cabin == ""] <- final.cabins
return(temp)
}
test$Fare[153] <- 8
imputed.cabins <- imputeCabin(boots = 200, in.ratio = 0.66, train = train, test = test)
train$Cabin <- imputed.cabins$Cabin[1:891]
test$Cabin <- imputed.cabins$Cabin[892:1309]
rm(imputed.cabins)
train$Cabin <- as.factor(as.character(train$Cabin))
test$Cabin <- as.factor(as.character(test$Cabin))
############# ticked parse ##########
train$Ticket <- as.character(train$Ticket)
test$Ticket <- as.character(test$Ticket)
train$TicketNew <- NA
for(t in 1:nrow(train)) {
new.ticket <- unlist(strsplit(train$Ticket[t], split = " "))
train$TicketNew[t] <- new.ticket[1]
}
test$TicketNew <- NA
for(t in 1:nrow(test)) {
new.ticket <- unlist(strsplit(test$Ticket[t], split = " "))
if(length(new.ticket) > 1) {
test$TicketNew[t] <- new.ticket[1]
}
else {
test$TicketNew[t] <- "other"
}
}
rm(new.ticket)
rm(t)
############## impute embarked ##############
imputeEmbarked <- function(boots = 100, in.ratio = 0.66, train, test) {
temp <- rbind(train, test)
temp$Embarked <- as.character(temp$Embarked)
temp <- temp[, c("Embarked", "Cabin", "Alone", "SibSp", "Parch")]
train.temp <- temp[temp$Embarked != "", ]
test.temp <- temp[temp$Embarked == "", ]
temp$Embarked <- as.factor(as.character(temp$Embarked))
browser()
embarked.predicts <- c()
for(i in 1:boots) {
s <- sample(x = 1:nrow(train.temp), size = round(x = nrow(train.temp)*in.ratio, digits = 0), replace = T)
train.current <- train.temp[s, ]
train.current$Embarked <- as.factor(as.character(train.current$Embarked))
rf.imput <- randomForest(Embarked~Cabin+Alone+SibSp+Parch, data = train.current, ntree = 2000, na.action = na.omit)
x <- predict(object = rf.imput, newdata = test.temp[, -1], type = "response")
embarked.predicts <- rbind(embarked.predicts, as.character(x))
cat("i:", i, "\n")
}
browser()
max.embarked <- apply(embarked.predicts, MARGIN = 2, FUN = count)
final.embarked <- c()
for(me in max.embarked) {
if(max(me$freq) >= (0.7*boots)) {
c <- as.character(me$x[which(me$freq >= (0.7*boots))])
}
else {
c <- as.character(me$x[which(me$freq == max(me$freq))[1]])
}
final.embarked <- c(final.embarked, c)
}
browser()
temp$Embarked <- as.character(temp$Embarked)
temp$Embarked[temp$Embarked == ""] <- final.embarked
return(temp)
}
imputed.embarked <- imputeEmbarked(boots = 200, in.ratio = 0.66, train = train, test = test)
train$EmbarkedNew <- imputed.embarked$Embarked[1:891]
test$EmbarkedNew <- imputed.embarked$Embarked[892:1309]
rm(imputed.embarked)
train$EmbarkedNew <- as.factor(as.character(train$EmbarkedNew))
test$EmbarkedNew <- as.factor(as.character(test$EmbarkedNew))
################ relevel ###############
levels(test$Sex) <- levels(train$Sex)
levels(test$cabin.binary) <- levels(train$cabin.binary)
levels(test$Embarked) <- levels(train$Embarked)
levels(test$EmbarkedNew) <- levels(train$EmbarkedNew)
levels(test$CabinDiscrete) <- levels(train$CabinDiscrete)
levels(test$Survived) <- levels(train$Survived)
train$Young <- as.factor(train$Young)
test$Young <- as.factor(test$Young)
levels(test$Young) <- levels(train$Young)
levels(test$Cabin) <- levels(train$Cabin)
levels(train$Title) <- levels(test$Title)
levels(train$Cabin) <- levels(test$Cabin)
levels(test$Alone) <- levels(train$Alone)
################# remove bad cols #################
# train$Name <- NULL
# test$Name <- NULL
#
# train$Ticket <- NULL
# test$Ticket <- NULL
#
# train$PassengerId <- NULL
# test$PassengerId <- NULL
save(train, file = "train1.RData")
save(test, file = "test1.RData")
############################ svm #########################
load(file = "train.RData")
load(file = "test.RData")
z <- rbind(train, test)
z$Cabin <- as.factor(as.character(z$Cabin))
z$TicketNew <- as.factor(as.character(z$TicketNew))
z$EmbarkedNew <- as.factor(as.character(z$EmbarkedNew))
# # tune svm params
tuned <- tune.svm(Survived~Title+Sex+Pclass+Age+SibSp+Parch+Embarked+Cabin, data = z[1:891, ], type = "C-classification", gamma = 10^(seq(from = -1, to = 1, by = 0.25)), cost = 10^(seq(from = -1, to = 1, by = 0.25)))
summary(tuned)
# rm(tuned)
#
#
# is.numeric(z$TicketNew[2])
#
# save(train, file = "train.cabin.RData")
# save(test, file = "test.cabin.RData")
# learn
svm <- e1071::svm(Survived~Title+Sex+Pclass+Age+SibSp+Parch+Embarked+Cabin, data = z[1:891, ], type = "C-classification", cost = 0.1778279, gamma = 0.1, kernel="radial") #0.1778279 #0.3162
length(which(as.numeric(as.character(svm$fitted)) != as.numeric(as.character(z$Survived[1:891]))))
# svm <- e1071::svm(Survived~Title+Sex+Pclass+Age+SibSp+Parch+EmbarkedNew+Cabin+TicketNew, data = z[1:891, ], type = "C-classification", cost = 0.17, gamma = 0.1, kernel="radial")
# length(which(as.numeric(as.character(svm$fitted)) != as.numeric(as.character(z$Survived[1:891]))))
predict.svm <- predict(object = svm, newdata = z[892:1309, ], type = "response")
o <- read.csv(file = "0.799.csv")
which(as.numeric(as.character(predict.svm)) != o$Survived)
rm(o)
predict.svm <- predict(object = svm, newdata = z[892:1309, ], type = "response")
l <- read.csv(file = "newest.csv")
which(as.numeric(as.character(predict.svm)) != l$Survived)
rm(l)
# export
export.svm <- data.frame(PassengerId = pass.id, Survived = predict.svm, row.names = NULL)
which(is.na(export.svm$Survived))
write.table(export.svm, file = "newest2.csv", quote = F, sep = ",", row.names = F, col.names = T)
bootstrapSurvived <- function(boots = 100, in.ratio = 0.66, z) {
predicted.survival <- c()
for(i in 1:boots) {
s <- sample(x = 1:891, size = round(x = 891*in.ratio, digits = 0), replace = T)
svm <- e1071::svm(Survived~Title+Sex+Pclass+Age+SibSp+Parch+Embarked+Cabin, data = z[s, ],
type = "C-classification", cost = 0.3162, gamma = 0.1, kernel="radial")
predict.svm <- predict(object = svm, newdata = z[892:1309, ], type = "response")
predicted.survival <- rbind(predicted.survival, as.character(predict.svm))
cat("i:", i, "\n")
}
max.survival <- apply(predicted.survival, MARGIN = 2, FUN = count)
final.survival <- c()
final.freq <- c()
for(mc in max.survival) {
if(max(mc$freq) >= (0.7*boots)) {
c <- as.character(mc$x[which(mc$freq >= (0.7*boots))])
}
else {
c <- as.character(mc$x[which(mc$freq == max(mc$freq))[1]])
}
f <- (max(mc$freq)[1])/boots
final.freq <- c(final.freq, f)
final.survival <- c(final.survival, c)
}
result <- list(final.survival = final.survival, final.freq = final.freq)
return (result)
}
boot.survival <- bootstrapSurvived(boots = 500, in.ratio = 0.66, z)
boot.survival$final.survival
which(as.numeric(boot.survival$final.survival) != o$Survived)
which(as.numeric(boot.survival$final.survival) != l$Survived)
# export
export.svm <- data.frame(PassengerId = pass.id, Survived = as.numeric(boot.survival$final.survival), row.names = NULL)
which(is.na(export.svm$Survived))
write.table(export.svm, file = "newest3.csv", quote = F, sep = ",", row.names = F, col.names = T)
plot(boot.survival$final.freq)
plot(test$Title, boot.survival$final.freq)
plot(test$Title, boot.survival$final.survival)
|
e3a406208accf72c0b787d0826f77a220e1f13b7
|
c53e367a5a155cfb1ee3a41e8b0351aeaa8d331d
|
/xts/unitTests/runit.plot.R
|
40fd20fcac8177841cae95d96d84ffece1df3619
|
[
"MIT"
] |
permissive
|
solgenomics/R_libs
|
bcf34e00bf2edef54894f6295c4f38f1e480b3fc
|
e8cdf30fd5f32babf39c76a01df5f5544062224e
|
refs/heads/master
| 2023-07-08T10:06:04.304775
| 2022-05-09T15:41:26
| 2022-05-09T15:41:26
| 186,859,606
| 0
| 2
|
MIT
| 2023-03-07T08:59:16
| 2019-05-15T15:57:13
|
C++
|
UTF-8
|
R
| false
| false
| 980
|
r
|
runit.plot.R
|
# Tests for plotting functions
data(sample_matrix)
x <- as.xts(sample_matrix, dateFormat = "Date")
# axTicksByTime
test.format_xts_yearqtr <- function() {
xq <- to.quarterly(x)
xtbt <- axTicksByTime(xq)
checkIdentical(names(xtbt), c("2007-Q1", "2007-Q2"))
}
test.format_zoo_yearqtr <- function() {
xq <- to.quarterly(x)
xtbt <- axTicksByTime(as.zoo(xq))
checkIdentical(names(xtbt), c("2007-Q1", "2007-Q2"))
}
test.axTicksByTime_ticks.on_quarter <- function() {
tick_marks <- setNames(c(1, 4, 7, 10, 13, 16, 19, 22, 25, 25),
c("\nJan\n2016", "\nApr\n2016", "\nJul\n2016", "\nOct\n2016",
"\nJan\n2017", "\nApr\n2017", "\nJul\n2017", "\nOct\n2017",
"\nJan\n2018", "\nJan\n2018"))
if (.Platform$OS.type != "unix") {
names(tick_marks) <- gsub("\n(.*)\n", "\\1 ", names(tick_marks))
}
ym <- as.yearmon("2018-01") - 24:0 / 12
x <- xts(seq_along(ym), ym)
xtbt <- axTicksByTime(x, ticks.on = "quarters")
checkIdentical(xtbt, tick_marks)
}
|
3d7900ebb1e3294b72d56ce0f191daab2c7d5a5c
|
b696f21b206f3cfa5bff79f3536c40880f76e7bf
|
/R/corr.R
|
a8bb07e04dee3ecbe4352f2123d92b0431ef3207
|
[] |
no_license
|
cran/lcmr
|
03b5b31dcedab0e5fb8d2a896e6ab536e0cbb342
|
2f807b8be13b6b1390f246d0e67078374d267194
|
refs/heads/master
| 2020-06-02T17:02:15.628802
| 2011-02-13T00:00:00
| 2011-02-13T00:00:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,493
|
r
|
corr.R
|
### Fit a latent class model with random effects
###
### Copyright 2005-2011 Liangliang Wang <l.wang@stat.ubc.ca>,
### Nandini Dendukuri <nandini.dendukuri@mcgill.ca>.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# A copy of the GNU General Public License is available at
# http://www.r-project.org/Licenses/
#corr <- function(x, ...) UseMethod("corr")
#================== corr ========================
corr <- function(x, start, end, ...){
if(missing(end)){
end <- x$iter
}
C <- 2^x$nTest
nClass <- x$nClass
N <- x$N
nA <- x$nA
nB <- x$nB
nR <- x$nR
tb <- x$tb
ta <- x$ta
burnin <- start-1
n.col <- end - burnin
prev <- x$prev[start:end, ]
a.tmp <- cbind(x$a, 0)
a <- a.tmp[start:end, ]
bNzero <- x$bNzero
if(bNzero){
b.tmp <- cbind(x$b, 0)
b <- b.tmp[start:end, ]
}else{
b <- matrix(0, nrow=n.col, ncol=1)
}
nCorr <- (x$nTest-1)*x$nTest/2
corr <- rep(0, nrow=nCorr)
pCorr <- matrix(0, nrow=nCorr, ncol=n.col)
# observed correlations
s <- colSums(x$DAT)
k <- 0
for(i1 in 1:(x$nTest-1)){
for(i2 in (i1+1):x$nTest){
k <- k + 1
corr[k] <- (sum(x$DAT[,i1]*x$DAT[,i2])*N - s[i1]*s[i2])/max(sqrt(s[i1]*s[i2]*(N - s[i1])*(N - s[i2])), 0.01)
}
}
if(nR <=1){
tr <- rep(1, x$nTest);
gq.out <- gauQuad()
gq <- gq.out$gq
w <- gq.out$w
for(i in 1:n.col){
# predicted (expected) correlations
s <- rep(0, x$nTest)
for(j in 1:x$nTest){
for(n.i in 1:nClass){
s[j] <- s[j] + prev[i, n.i]*pnorm(a[i,ta[n.i, j] ]/sqrt(1+b[i, tb[n.i, j, 1]]^2))
}
}
k <- 0
for(i1 in (1:(x$nTest-1))){
for(i2 in ((i1+1):x$nTest)){
k <- k+1
if(tr[i1]==tr[i2]){
I <- 0
for(n.i in 1:nClass){
I <- I + prev[i, n.i]*sum(w*pnorm(a[i,ta[n.i, i1]]+ b[i, tb[n.i, i1, 1]]*gq)*pnorm(a[i, ta[n.i, i2]]+ b[i, tb[n.i, i2, 1]]*gq))
}
pCorr[k, i] <- (I-s[i1]*s[i2])/sqrt(s[i1]*s[i2]*(1-s[i1])*(1-s[i2]))
}else{
I <- 0
for(n.i in 1:nClass){
I <- I + prev[i, n.i]*pnorm(a[i,ta[n.i, i1]]/sqrt(1+b[i, tb[n.i, i1, 1]]^2))*pnorm(a[i,ta[n.i, i2]]/sqrt(1+b[i, tb[n.i, i2, 1]]^2))
}
pCorr[k, i] <- (I-s[i1]*s[i2])/sqrt(s[i1]*s[i2]*(1-s[i1])*(1-s[i2]))
}
}
}
}
}else{
nrnd <- 1000;
nallrnd <- nrnd*nR;
for(i in 1:n.col){
# predicted (expected) correlations
s <- rep(0, x$nTest)
for(j in 1:x$nTest){
for(n.i in 1:nClass){
s[j] <- s[j] + prev[i, n.i]*pnorm(a[i,ta[n.i, j] ]/sqrt(1+sum(b[i, tb[n.i, j, ]]^2)))
}
} # end of for(j in 1:x$nTest)
rndMat <- matrix(rnorm(nallrnd), nrnd, nR) # generating random numbers. Do I need to generate them for each iteration?
pd <- matrix(0, nrow=nrnd, ncol=nClass)
for(n.i in 1:nClass){
sumabrs1 <- a[i, ta[n.i, i1]]
sumabrs2 <- a[i, ta[n.i, i2]]
for(n.r in 1:nR){
sumabrs1 <- sumabrs1 + b[i, tb[n.i, i1,n.r]]*rndMat[,n.r]
sumabrs2 <- sumabrs2 + b[i, tb[n.i, i2,n.r]]*rndMat[,n.r]
}
pd[, n.i] <- prev[i, n.i]*(pnorm(sumabrs1)*pnorm(sumabrs2))
}
k <- 0
for(i1 in (1:(x$nTest-1))){
for(i2 in ((i1+1):x$nTest)){
k <- k+1
I <- 0
for(n.i in 1:nClass){
sumabrs1 <- a[i, ta[n.i, i1]]
sumabrs2 <- a[i, ta[n.i, i2]]
for(n.r in 1:nR){
sumabrs1 <- sumabrs1 + b[i, tb[n.i, i1,n.r]]*rndMat[,n.r]
sumabrs2 <- sumabrs2 + b[i, tb[n.i, i2,n.r]]*rndMat[,n.r]
}
I <- I + prev[i, n.i]*mean(pnorm(sumabrs1)*pnorm(sumabrs2))
}
pCorr[k, i] <- (I-s[i1]*s[i2])/sqrt(s[i1]*s[i2]*(1-s[i1])*(1-s[i2]))
}
}
} # end of for(i in 1:n.col)
} # end of if-else
list(corr=corr, pCorr=pCorr)
}
|
436d624a738922e99a7bb265425ea3ee2a78dded
|
0a906cf8b1b7da2aea87de958e3662870df49727
|
/diffrprojects/inst/testfiles/dist_mat_absolute/libFuzzer_dist_mat_absolute/dist_mat_absolute_valgrind_files/1609962208-test.R
|
bdf7635b40505b85c918025e1866596d95697f19
|
[] |
no_license
|
akhikolla/updated-only-Issues
|
a85c887f0e1aae8a8dc358717d55b21678d04660
|
7d74489dfc7ddfec3955ae7891f15e920cad2e0c
|
refs/heads/master
| 2023-04-13T08:22:15.699449
| 2021-04-21T16:25:35
| 2021-04-21T16:25:35
| 360,232,775
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 523
|
r
|
1609962208-test.R
|
testlist <- list(x = integer(0), y = c(1364283729L, 1364283729L, 1370640721L, 1364283729L, 1364283729L, 1364283729L, 1364283729L, 1364283729L, 1364283729L, 1364283729L, 1364283729L, 1364000588L, 690618188L, -16777216L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L))
result <- do.call(diffrprojects:::dist_mat_absolute,testlist)
str(result)
|
e78d2a73ed32e693fb90ca39ca522b946ca5d308
|
ae5225814dde38b7a3e08e5aa984209cf47ff1ed
|
/man/copyXDFFiles.Rd
|
8717410d70492826c040922165eb5100af4eb1be
|
[
"MIT"
] |
permissive
|
KrishAK47/jeeves
|
ccbba2d94b55d8325368b9f7e6c93a12fb440042
|
c0ddecad35cdfee1763cf54c8fc61dd477a41e46
|
refs/heads/master
| 2020-06-02T17:22:25.335004
| 2018-06-28T16:21:37
| 2018-06-28T16:21:37
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 402
|
rd
|
copyXDFFiles.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/installAlteryx.R
\name{copyXDFFiles}
\alias{copyXDFFiles}
\title{Copy XDF files from SVN}
\usage{
copyXDFFiles(svnDir = getOption("alteryx.svndir"), rVersion = getRversion())
}
\arguments{
\item{svnDir}{svn directory to copy from.}
\item{rVersion}{string indicating version of R.}
}
\description{
Copy XDF files from SVN
}
|
d8f53f0f0a3ded6dfd42c432d5ead6b082ca92d5
|
af82d640f48f0509d53feea49386785e3f912337
|
/proj_05_tasa/ta.R
|
40e12c306714adb541d568c31122013b4c14b0af
|
[] |
no_license
|
yangfanjun/GitHub_Learning-Analysis
|
b9f87826f5e18e5456717806a91277255109cfb7
|
67127523132049efcacc521bdc121b3369ce1cd4
|
refs/heads/master
| 2020-08-30T14:51:46.718368
| 2019-10-30T01:10:53
| 2019-10-30T01:10:53
| 218,414,074
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,155
|
r
|
ta.R
|
library(ggplot2)
df<-read.csv("df8.csv", header = TRUE, stringsAsFactors=FALSE)
#create alllevels for change shape (scale_shape_manual)
df$sortedid <- 1:nrow(df) #用于x轴表示时间序列先后次序
#The function factor is used to encode a vector as a factor (the terms ‘category’ and
# ‘enumerated type’ are also used for factors).
df$code <- factor(df$code, levels=c("IEx", "IEl", "IQE", "IQR", "IPA","GEx", "GEl-1","GEl-2","GQE", "GQR", "GPA","IML", "GML","ISP", "GSI-1","GSI-2"),
labels=c("IEx", "IEl", "IQE", "IQR", "IPA","GEx", "GEl-1","GEl-2","GQE",
"GQR", "GPA","IML", "GML","ISP", "GSI-1","GSI-2")) #用于y轴表示不同code
alllevels <- c(levels(df$code)) #下面会用它表示点的形状
#using ggplot2 - ggplot to make a graph
p <- ggplot(df) +
geom_point(aes(x=df$sortedid, y=df$code, shape=df$code, colour=df$student)) +
scale_shape_manual(values=1:length(alllevels))+
xlab("Discussion Threads") + ylab("Coding Categories")
#以下是优化graph的显示,主要针对图列的说明
p + guides(shape=FALSE) +
guides(col=guide_legend(ncol=2,title="Students"))
|
5b019be0d5fbb9812a4b9e310e075e3d051e1488
|
bbf1ae079309eca11270422d3f0d259d1515d430
|
/numerical-tours/r/nt_toolbox/toolbox_signal/plot_levelset.R
|
ffdea58f826487870b1c638c2e03e5b02f468f2a
|
[
"BSD-2-Clause"
] |
permissive
|
ZichaoDi/Di_MATLABTool
|
5e6a67b613c4bcf4d904ddc47c2744b4bcea4885
|
c071291c63685c236f507b2cb893c0316ab6415c
|
refs/heads/master
| 2021-08-11T07:28:34.286526
| 2021-08-04T18:26:46
| 2021-08-04T18:26:46
| 149,222,333
| 9
| 5
| null | null | null | null |
UTF-8
|
R
| false
| false
| 396
|
r
|
plot_levelset.R
|
plot_levelset <- function(Z, f=c(), title="", lw=1.5, sbpt=c()){
####
# f is supposed to be of the same shape as Z
####
if (length(sbpt) >0){
if (sbpt[3]==1){
par(mfrow=sbpt[1:2])
}
}
if (length(f)==0){
f <- Z }
Z <- as.cimg(t(Z))
ct <- contours(Z, nlevels=1)
imageplot(f, title, sbpt)
purrr::walk(ct,function(v) lines(v$x,v$y,col="red",lw=lw))
}
|
59cb70012355423059b38be6576e094a4ea23f84
|
a07b28cb54fd41d3102058cefbfe90a4f1e51aaa
|
/k01/k01 sample size.R
|
0f6746f6dd6f33cfad251131586eb3c303e12b39
|
[] |
no_license
|
amyrobyn/LaBeaud_Lab
|
ed1d05fa2dc1c1df412a106519b69dd79168d978
|
fab5c867027a07409006a1f0ed9b80e39db8357c
|
refs/heads/master
| 2021-06-05T20:31:30.379779
| 2021-04-30T18:56:59
| 2021-04-30T18:56:59
| 64,692,795
| 5
| 1
| null | 2017-05-03T05:19:28
| 2016-08-01T18:47:38
|
Stata
|
UTF-8
|
R
| false
| false
| 3,530
|
r
|
k01 sample size.R
|
library(pwr)
# aim 1 -------------------------------------------------------------------
#Hypothesis: clinicians with accurate and real-time diagnostic and reporting tools increase reporting compared to control.
#219 acute febrile patients will be enrolled for a single study visit over an enrollment period of two years from two clinics (total n = 438). This sample size calculation (Cohens D) is based on 6% of dengue cases currently reported under traditional surveillance system,29,30 a 10% expected increase in reporting in FeverDX clinic vs control clinics with 80% power, and 95% confidence.
pwr.t.test(n = NULL, d = 0.2, sig.level = 0.05, power = .8, type = c("paired"), alternative = c("two.sided"))
pwr.t.test(n = NULL, d = 0.4, sig.level = 0.05, power = .8, type = c("paired"), alternative = c("two.sided"))
# aim 2 -------------------------------------------------------------------
#Neighborhood violence (measured as homicides) decreases proportion of acute febrile and arboviral cases captured in traditional surveillance systems.
#To estimate minimum expected effect (log odds of 4) of violence on care seeking rates, we will survey 480
#households (~1440 persons based on three persons per household 76) in each of three neighborhoods per catchment
#area of two clinics.
#This sample size is based on a conservative estimate of four times decreased odds of accessing healthcare92 with one level increase in violence, 26% of households reporting febrile illness in the past two weeks77 (between cluster variation = 5%), and 50% of febrile illness seeking care from a health facility or provider for febrile illness77 and 24% incidence of dengue among febrile patients.78
30*2*4*2
#Vector sampling: Based on data previously collected from Cali, Colombia (Ocampo et al. unpublished),
#over a three-month sampling period, on average, 70% of ovitraps were positive for at least one Aedes spp.
#(range = 40-80%) per month. To detect at least a 10% difference in monthly pupal positivity by ANOVA, our
#sample will contain 43 ovitraps per neighborhood per month assuming 9 neighborhoods with 95% confidence and
#80% power.
pwr.anova.test(k=2,f=.3,sig.level=.05,power=.8)
44*2*24
# aim 3 -------------------------------------------------------------------
#arboviral outbreaks two or more standard deviations above average are predictable two weeks pre-outbreak at the neighborhood level with high sensitivity and specificity.
#Sample size in the two catchment areas: Daily SIVIGLA febrile case data will be adjusted using hybrid surveillance methods49 to generate the case dataset (N = 26,985 suspected arboviral cases and 56.5% of suspected arboviral cases confirmed by laboratory diagnostics based on case data reported to SIVGILA in Cali 2014-20163). Climate and violence data will be collected daily for two years. Vector data will be collected monthly for 24 months. Expected effect size: Climate is expected to be strongly correlated with dengue cases111-cumulative precipitation correlated with dengue cases (correlation coefficient = 0.5, p-value<0.001) and minimum temperature (correlation coefficient = 0.6, p-value<0.001).112 Accuracy and precision Accounting for seasonality, time lags, community violence, and vector campaigns, an increase in accuracy and spatial and temporal precision is expected compared to previous studies using a lag of 1-12 weeks113 (defined during optimization), 80% sensitivity24 and 90% specificity,24 37 days pre-outbreak24 at a spatial scale of 0.4 km2.24
|
881c8371bbe0d4ac25e9d5685b4b440ff21b13ea
|
0618b568a1accbb864f4bb78d82230f7239128ec
|
/run_analysis.R
|
a44bd2e2c2be87e380d1b0e29647de7c24513cc6
|
[] |
no_license
|
richshaw/cleanDataCoursea
|
78dc0188e1443fe6d6f1dae4471fd6057aefeda1
|
8c093da909e67d2ed1d98ec29b5a9050ec1da8cd
|
refs/heads/master
| 2020-04-05T23:27:57.517371
| 2014-07-16T23:54:44
| 2014-07-16T23:54:44
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,099
|
r
|
run_analysis.R
|
##
# Script brief
#
# The purpose of this project is to demonstrate your ability to collect, work
# with, and clean a data set. The goal is to prepare tidy data that can be used
# for later analysis. You will be graded by your peers on a series of yes/no
# questions related to the project. You will be required to submit: 1) a tidy
# data set as described below, 2) a link to a Github repository with your script
# for performing the analysis, and 3) a code book that describes the variables,
# the data, and any transformations or work that you performed to clean up the
# data called CodeBook.md. You should also include a README.md in the repo with
# your scripts. This repo explains how all of the scripts work and how they are
# connected.
# You should create one R script called run_analysis.R that does the following.
# 1. Merges the training and the test sets to create one data set.
# 2. Extracts only the measurements on the mean and standard deviation for each
# measurement.
# 3. Uses descriptive activity names to name the activities in the data set
# 4. Appropriately labels the data set with descriptive variable names.
# 5. Creates a second, independent tidy data set with the average of each variable
# for each activity and each subject.
##
# File download
#
# Local file name for zipped data
file <- "Data.zip"
url <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
# Downloads and unzips data
# url is URL of zip file
# file is the local name of downloaded data
# exdir is where we want to store the unzipped files. Defaults to working dir.
# method = is download method, defaults to curl because this was created on a mac.
downloadUnzipData <- function(url,file,exdir = ".",method="curl") {
# Download zip
download.file(url,file,method=method)
# Unzip files
unzip(file,exdir = exdir)
}
# Brief assumes data is in working dir but will
# download data if it's not around
if(!file.exists(file)) {
# URL of zipped data
downloadUnzipData(url,file,method="curl")
}
##
# Start cleaning script
#
# Looking at the description of the data the data in README.txt and
# features_info.txt we can difference see that the different parts of the study
# have been split into separate files...
# 'features.txt': Friendly labels for 'x' the dependent variables of the study
# 'activity_labels.txt': Friendly labels 'y' the independent variables of the study
# 'train/X_train.txt': Phone data for training group
# 'train/y_train.txt': Activities data for training group
# 'train/subject_test.txt': Subject Id's for trainng data
# 'test/X_test.txt': Phone data for training group
# 'test/y_test.txt': Activities for test training group
# 'test/subject_test.txt': Subject Id's for trainng data
# Before we can do any analysis we need to re-combine and re-label the data into
# a more friendly format.
# We're before starting analysi we're aiming for a table like the following.
# Subject | Independent Variables | Dependent variables
# Load data into environment
# Dependent vars
# data.x.train: Training set.
# data.x.test: Test set.
data.x.train <- read.table("UCI HAR Dataset//train//X_train.txt", header = FALSE)
print("data.x.train loaded")
data.x.test <- read.table("UCI HAR Dataset//test//X_test.txt", header = FALSE)
print("data.x.test loaded")
# Independent vars
# data.y.train: Training labels.
# data.y.test: Test labels.
data.y.train <- read.table("UCI HAR Dataset//train//y_train.txt", header = FALSE)
print("data.y.train loaded")
data.y.test <- read.table("UCI HAR Dataset//test//y_test.txt", header = FALSE)
print("data.y.test loaded")
# Subjects
data.subject.train <- read.table("UCI HAR Dataset//train//subject_train.txt", header = FALSE)
print("data.subject.train loaded")
data.subject.test <- read.table("UCI HAR Dataset//test//subject_test.txt", header = FALSE)
print("data.subject.test loaded")
# Variable labels
# Features = labels for y/independent vars
# Activity = labels for x/dependent vars
data.label.features <- read.table("UCI HAR Dataset//features.txt", header = FALSE)
print("data.label.features loaded")
data.label.activity <- read.table("UCI HAR Dataset//activity_labels.txt", header = FALSE)
print("data.label.activity loaded")
# The train and test groups are just random samples so we can safely re-combine the data
# Merge the training and the test sets to create one data set
data.x <- rbind(data.x.train, data.x.test)
data.y <- rbind(data.y.train, data.y.test)
data.subject <- rbind(data.subject.train, data.subject.test)
# Sanity check. Each row in our 3 data sets should be the same person so each
# data set should have the same number of rows.
if( (nrow(data.x) != nrow(data.y)) |
(nrow(data.y) != nrow(data.subject)) |
(nrow(data.x) != nrow(data.subject)) ) stop('Unequal number of subjects in data')
### 4. Appropriately labels the data set with descriptive variable names.
# Give x the DV's friendly column labels
colnames(data.x) <- data.label.features[["V2"]]
# Give y the IV a friendly column label
colnames(data.y) <- c('activity')
# Give subject the subject id's a friendly column label
colnames(data.subject) <- c('subject')
print("Given data friendly column names")
### 3. Uses descriptive activity names to name the activities in the data set
# The activity data in y isn't really integers they're categories so lets make them so
data.y[["activity"]] <- factor(data.y[["activity"]],
levels = data.label.activity[["V1"]],
labels = data.label.activity[["V2"]])
print("Converted activity column to factor")
# Combine the data together to a representation of all the data in a tidy format
data.tidy.all <- cbind(data.subject,data.y,data.x)
print("Combined data into single tidy data set")
### 2. We're only interested in the measurements on the mean and standard deviation for each measurement...
# According to features.txt all mean and std dev vars are tagged with mean() and std()
# Extract names of mean() and std() columns
# Regex looks for columns that contains mean() OR std()
stdmeanColumns <- names(data.tidy.all)[grep('(mean\\(\\)|std\\(\\))',names(data.tidy.all))]
# Create vector of columns we want to subset from total data set. Subject ID, IV + DV's
subsetColumns <- c('subject','activity',stdmeanColumns)
# Subset data to requried columns
data.tidy.stdmean <- subset(data.tidy.all, select = subsetColumns)
#Tidy column names
colnames(data.tidy.stdmean) <- gsub("^t", "time", colnames(data.tidy.stdmean))
colnames(data.tidy.stdmean) <- gsub("^f", "frequency", colnames(data.tidy.stdmean))
print("Extracted std and mean variables into tidy data set")
### 5. Create a second, independent tidy data set with the average of each
### variable for each activity and each subject.
data.tidy.means <- aggregate(. ~ subject+activity,data = data.tidy.stdmean, mean)
print("Calculated means for each variable for each subject and activity")
#Write ouput
write.table(data.tidy.means,"data_tidy_means.txt")
print("Tidy data written to data_tidy_means.txt")
print("DONE")
|
d7634a44e4f5f305a3546cc3c394bc0dc1679c88
|
d9598fcbf32fa2fd9eb6ad635308206113175a5e
|
/1-Basics-of-R-shiny/1-Hello-Shiny.R
|
5ff6357ea2f742b28d8224b3cb82de74382a19e5
|
[
"MIT"
] |
permissive
|
Yuhao0428/r-shiny-tutorial
|
0ea00cc1b08a39b820b659b7e6bc9e63aca6c96c
|
88c8a23592f1dfe060316dce060f888497f360ee
|
refs/heads/main
| 2023-07-09T02:06:28.630552
| 2021-08-06T02:47:48
| 2021-08-06T02:47:48
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 66
|
r
|
1-Hello-Shiny.R
|
library(shiny)
runExample("01_hello", port=9999, host="0.0.0.0")
|
e160e0684866f2bbef229c295a0a9b0ba10d570c
|
edc1b8bd5dfafd300baa1e47f7544cd4b69e1345
|
/proteomics/DPP_Up_regulator_percentage_SM.R
|
10b4b5aed59484876318cb94227674c47ee5aa79
|
[] |
no_license
|
mhptr/phd_code
|
fdbfb2fcd2b816a83f76854bb811cc582cfd3c41
|
8241737953a7b2293398068dd551991cfeb8d569
|
refs/heads/master
| 2022-10-10T10:18:10.905711
| 2020-03-09T09:18:01
| 2020-03-09T09:18:01
| 199,153,370
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,628
|
r
|
DPP_Up_regulator_percentage_SM.R
|
#06/09/19
setwd("~/OneDrive - University of Warwick/WORK/Results/Proteomics/FINAL Result/Analysis - R-Script /")
# regulation = read.csv("./SubtiWiki Exports /regulations.csv", header = T)
# regulation_freq = table(regulation$regulator)
# write.csv(regulation_freq, "./SubtiWiki Exports /regulation_freq.csv", row.names = F)
regulation_freq = read.csv("./SubtiWiki Exports /regulation_freq.csv", header = T)
colnames(regulation_freq) = c("BSU_regulators_all", "No_of_total_targets")
find_ratio = function(regulation_freq, my_table_freq) {
merged_table = merge(my_table_freq, regulation_freq, by.x="regulator", by.y="BSU_regulators_all", all.x = TRUE)
print(merged_table)
merged_percentage_table = transform(merged_table, percentage = 100 * merged_table$No_of_targets/merged_table$No_of_total_targets)
merged_percentage_table = merged_percentage_table[order(merged_percentage_table$percentage, decreasing = TRUE), ]
return(merged_percentage_table)
}
#DPP_LB
DPP_LB_geneRegulations = read.csv("./Data/combined/output/geneRegulations/old/regulations/DPP_LB_geneRegulations.csv", header = T)
DPP_Up_LB_geneRegulations = DPP_LB_geneRegulations[(DPP_LB_geneRegulations$logFC)>0,]
DPP_Up_LB_geneRegulations = table(DPP_Up_LB_geneRegulations$regulator)
write.csv(DPP_Up_LB_geneRegulations, "./Data/combined/output/geneRegulations/regulators/percentage_regulator/Up/DPP_Up_LB_reg_freq.csv", row.names = F)
DPP_Up_LB_reg_freq = read.csv("./Data/combined/output/geneRegulations/regulators/percentage_regulator/Up/DPP_Up_LB_reg_freq.csv", header = T)
colnames(DPP_Up_LB_reg_freq) = c("regulator", "No_of_targets")
DPP_Up_LB_reg_freq = DPP_Up_LB_reg_freq[(DPP_Up_LB_reg_freq$No_of_targets)>0,]
DPP_Up_LB_reg_freq = DPP_Up_LB_reg_freq[!DPP_Up_LB_reg_freq$regulator == "",]
DPP_Up_LB_reg_percentage = find_ratio(regulation_freq, DPP_Up_LB_reg_freq)
write.csv(DPP_Up_LB_reg_percentage, "./Data/combined/output/geneRegulations/regulators/percentage_regulator/output/Up/DPP_LB_reg_percentage.csv", row.names = FALSE)
View(DPP_Up_LB_reg_percentage)
#DPP_M9
DPP_M9_geneRegulations = read.csv("./Data/combined/output/geneRegulations/old/regulations/DPP_M9_geneRegulations.csv", header = T)
DPP_Up_M9_geneRegulations = DPP_M9_geneRegulations[(DPP_M9_geneRegulations$logFC)>0,]
DPP_Up_M9_geneRegulations = table(DPP_Up_M9_geneRegulations$regulator)
write.csv(DPP_Up_M9_geneRegulations, "./Data/combined/output/geneRegulations/regulators/percentage_regulator/Up/DPP_Up_M9_reg_freq.csv", row.names = F)
DPP_Up_M9_reg_freq = read.csv("./Data/combined/output/geneRegulations/regulators/percentage_regulator/Up/DPP_Up_M9_reg_freq.csv", header = T)
colnames(DPP_Up_M9_reg_freq) = c("regulator", "No_of_targets")
DPP_Up_M9_reg_freq = DPP_Up_M9_reg_freq[(DPP_Up_M9_reg_freq$No_of_targets)>0,]
DPP_Up_M9_reg_freq = DPP_Up_M9_reg_freq[!DPP_Up_M9_reg_freq$regulator == "",]
DPP_Up_M9_reg_percentage = find_ratio(regulation_freq, DPP_Up_M9_reg_freq)
write.csv(DPP_Up_M9_reg_percentage, "./Data/combined/output/geneRegulations/regulators/percentage_regulator/output/Up/DPP_M9_reg_percentage.csv", row.names = FALSE)
View(DPP_Up_M9_reg_percentage)
#DPP_SH2
DPP_SH2_geneRegulations = read.csv("./Data/combined/output/geneRegulations/old/regulations/DPP_SH2_geneRegulations.csv", header = T)
DPP_Up_SH2_geneRegulations = DPP_SH2_geneRegulations[(DPP_SH2_geneRegulations$logFC)>0,]
DPP_Up_SH2_geneRegulations = table(DPP_Up_SH2_geneRegulations$regulator)
write.csv(DPP_Up_SH2_geneRegulations, "./Data/combined/output/geneRegulations/regulators/percentage_regulator/Up/DPP_Up_SH2_reg_freq.csv", row.names = F)
DPP_Up_SH2_reg_freq = read.csv("./Data/combined/output/geneRegulations/regulators/percentage_regulator/Up/DPP_Up_SH2_reg_freq.csv", header = T)
colnames(DPP_Up_SH2_reg_freq) = c("regulator", "No_of_targets")
DPP_Up_SH2_reg_freq = DPP_Up_SH2_reg_freq[(DPP_Up_SH2_reg_freq$No_of_targets)>0,]
DPP_Up_SH2_reg_freq = DPP_Up_SH2_reg_freq[!DPP_Up_SH2_reg_freq$regulator == "",]
DPP_Up_SH2_reg_percentage = find_ratio(regulation_freq, DPP_Up_SH2_reg_freq)
write.csv(DPP_Up_SH2_reg_percentage, "./Data/combined/output/geneRegulations/regulators/percentage_regulator/output/Up/DPP_SH2_reg_percentage.csv", row.names = FALSE)
View(DPP_Up_SH2_reg_percentage)
#DPP_SH5
DPP_SH5_geneRegulations = read.csv("./Data/combined/output/geneRegulations/old/regulations/DPP_SH5_geneRegulations.csv", header = T)
DPP_Up_SH5_geneRegulations = DPP_SH5_geneRegulations[(DPP_SH5_geneRegulations$logFC)>0,]
DPP_Up_SH5_geneRegulations = table(DPP_Up_SH5_geneRegulations$regulator)
write.csv(DPP_Up_SH5_geneRegulations, "./Data/combined/output/geneRegulations/regulators/percentage_regulator/Up/DPP_Up_SH5_reg_freq.csv", row.names = F)
DPP_Up_SH5_reg_freq = read.csv("./Data/combined/output/geneRegulations/regulators/percentage_regulator/Up/DPP_Up_SH5_reg_freq.csv", header = T)
colnames(DPP_Up_SH5_reg_freq) = c("regulator", "No_of_targets")
DPP_Up_SH5_reg_freq = DPP_Up_SH5_reg_freq[(DPP_Up_SH5_reg_freq$No_of_targets)>0,]
DPP_Up_SH5_reg_freq = DPP_Up_SH5_reg_freq[!DPP_Up_SH5_reg_freq$regulator == "",]
DPP_Up_SH5_reg_percentage = find_ratio(regulation_freq, DPP_Up_SH5_reg_freq)
write.csv(DPP_Up_SH5_reg_percentage, "./Data/combined/output/geneRegulations/regulators/percentage_regulator/output/Up/DPP_SH5_reg_percentage.csv", row.names = FALSE)
View(DPP_Up_SH5_reg_percentage)
#DPP_SH5_vs_SH2
DPP_SH5_vs_SH2_geneRegulations = read.csv("./Data/combined/output/geneRegulations/old/regulations/DPP_SH5_vs_SH2_geneRegulations.csv", header = T)
DPP_Up_SH5_vs_SH2_geneRegulations = DPP_SH5_vs_SH2_geneRegulations[(DPP_SH5_vs_SH2_geneRegulations$logFC)>0,]
DPP_Up_SH5_vs_SH2_geneRegulations = table(DPP_Up_SH5_vs_SH2_geneRegulations$regulator)
write.csv(DPP_Up_SH5_vs_SH2_geneRegulations, "./Data/combined/output/geneRegulations/regulators/percentage_regulator/Up/DPP_Up_SH5_vs_SH2_reg_freq.csv", row.names = F)
DPP_Up_SH5_vs_SH2_reg_freq = read.csv("./Data/combined/output/geneRegulations/regulators/percentage_regulator/Up/DPP_Up_SH5_vs_SH2_reg_freq.csv", header = T)
colnames(DPP_Up_SH5_vs_SH2_reg_freq) = c("regulator", "No_of_targets")
DPP_Up_SH5_vs_SH2_reg_freq = DPP_Up_SH5_vs_SH2_reg_freq[(DPP_Up_SH5_vs_SH2_reg_freq$No_of_targets)>0,]
DPP_Up_SH5_vs_SH2_reg_freq = DPP_Up_SH5_vs_SH2_reg_freq[!DPP_Up_SH5_vs_SH2_reg_freq$regulator == "",]
DPP_Up_SH5_vs_SH2_reg_percentage = find_ratio(regulation_freq, DPP_Up_SH5_vs_SH2_reg_freq)
write.csv(DPP_Up_SH5_vs_SH2_reg_percentage, "./Data/combined/output/geneRegulations/regulators/percentage_regulator/output/Up/DPP_SH5_vs_SH2_reg_percentage.csv", row.names = FALSE)
View(DPP_Up_SH5_vs_SH2_reg_percentage)
|
e268f457f351ecf51859df63ed6278d74af74cff
|
4a90a8d09f1e4b8c79d3982e0f235c4d064e74c1
|
/man/load_txt.Rd
|
b5e0e99ff5287defd6200360656a3ac1187529fb
|
[] |
no_license
|
bsnouffer/atlantistools
|
3696d68837ac61256305e68a5a2ce49ca9cf9e86
|
3bcdb27696f9b16ce76869371658e41ddce1d374
|
refs/heads/master
| 2021-01-15T11:14:33.996939
| 2016-08-31T21:13:51
| 2016-08-31T21:13:51
| 67,061,727
| 0
| 0
| null | 2016-08-31T18:09:12
| 2016-08-31T18:09:11
| null |
UTF-8
|
R
| false
| true
| 1,332
|
rd
|
load_txt.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/load-txt.R
\name{load_txt}
\alias{load_txt}
\title{Function to load various txt files from Atlantis simulations}
\usage{
load_txt(dir, file, id_col = "Time")
}
\arguments{
\item{dir}{Character string giving the path of the Atlantis model folder.
If data is stored in multiple folders (e.g. main model folder and output
folder) you should use 'NULL' as dir.}
\item{file}{Character string of the file. In case you are using
multiple folders for your model files and outputfiles pass the complete
folder/filename string as to file. In addition set dir to 'NULL' in this
case.}
\item{id_col}{Character strings giving the names of the columns which are not variables.
Data from all other columns will be gathered with tidyr.}
}
\value{
Dataframe in tidy format!
}
\description{
Function to load various txt files from Atlantis simulations
}
\examples{
d <- system.file("extdata", "setas-model-new-becdev", package = "atlantistools")
load_txt(dir = d,
file = "outputSETASSSB.txt",
id_col = "Time")
}
\seealso{
Other load functions: \code{\link{load_box}},
\code{\link{load_bps}}, \code{\link{load_dietcheck}},
\code{\link{load_fgs}}, \code{\link{load_nc_physics}},
\code{\link{load_nc}}, \code{\link{load_rec}},
\code{\link{load_spec_mort}}
}
|
800e02169ff18b10fd1f36c304a61d70c71a84ea
|
ee0760e97724685ca8d1ab4ce2dd19f5b37e05e6
|
/bin/experiment_03_netherlands/comparison_agg.R
|
9f8bbf28975380dcf6df82151acc845064bc3fa3
|
[] |
no_license
|
imarkonis/EarthCare
|
f7d94ab939b92d1679a68d1e00dea257a71aa6cc
|
60fcfc0934a1c6c1c2ffed048088c19b421a2a79
|
refs/heads/master
| 2020-03-14T11:08:34.977062
| 2018-06-20T11:21:50
| 2018-06-20T11:21:50
| 131,584,029
| 0
| 1
| null | 2018-06-19T12:48:26
| 2018-04-30T10:43:19
|
HTML
|
UTF-8
|
R
| false
| false
| 15,781
|
r
|
comparison_agg.R
|
source("./source/graphics.R")
source("./source/spatial_tools.R")
load("./data/experiment_3_main.rdata") #Created in comparison prep
# The comparison between gpm_rdr_prcp & gpm_knmi_prcp showed very small differences and thus gpm_rdr_prcp is used
## Aggregation example for each day of 2016-09
my_date <- as.Date("2016-09-03")
no_points <- 1
#gravity_center_gpm <- get_gravity_center(gpm_rdr_prcp[time %in% my_date], no_points)
#gravity_center_knmi <- get_gravity_center(c, no_points)
#gravity_center_rdr <- get_gravity_center(rdr_prcp[time %in% my_date], no_points)
#prcp_day_gpm_rdr <- agg_prcp(gpm_rdr_prcp, my_date, gravity_center_gpm)
#prcp_day_knmi <- agg_prcp(prcp_knmi, my_date, gravity_center_knmi)
#prcp_day_rdr <- agg_prcp(rdr_prcp, my_date, gravity_center_rdr)
prcp_day_gpm_rdr <- gpm_rdr_prcp[time %in% my_date]
prcp_day_gpm_rdr <- agg_prcp(prcp_day_gpm_rdr, get_max_location(prcp_day_gpm_rdr))
prcp_day_knmi <- knmi_prcp[time %in% my_date]
prcp_day_knmi <- agg_prcp(prcp_day_knmi, get_max_location(prcp_day_knmi))
prcp_day_rdr <- rdr_prcp[time %in% my_date]
prcp_day_rdr <- agg_prcp(prcp_day_rdr, get_max_location(prcp_day_knmi))
transp <- 0.3
ggplot(prcp_day_gpm_rdr, aes(distance, mean)) +
geom_smooth(method = "loess", col = "orange", size = 1, span = 0.5, se = T, fill = "orange") +
geom_smooth(data = prcp_day_knmi, aes(distance, mean), method = "loess", col = "red", size = 1, span = 1, se = T, fill = "red") +
geom_smooth(data = prcp_day_rdr, aes(distance, mean), method = "loess", col = "dark green", size = 1, span = 1, se = T, fill = "dark green") +
labs(x = "Distance (km)", y = "Mean (mm)") +
theme_bw()
ggplot(prcp_day_gpm_rdr, aes(distance, sd/mean)) +
geom_smooth(method = "loess", col = "orange", size = 1, span = 0.5, se = T, fill = "orange") +
geom_smooth(data = prcp_day_knmi, aes(distance, sd/mean), method = "loess", col = "red", size = 1, span = 1, se = T, fill = "red") +
geom_smooth(data = prcp_day_rdr, aes(distance, sd/mean), method = "loess", col = "dark green", size = 1, span = 1, se = T, fill = "dark green") +
labs(x = "Distance (km)", y = "Mean (mm)") +
theme_bw()
ggplot(prcp_day_gpm_rdr, aes(distance, sum, size = prcp)) +
geom_point(col = "orange", alpha = transp) +
geom_point(data = prcp_day_knmi, aes(distance, sum, size = prcp), col = "red", alpha = transp) +
labs(x = "Distance (km)", y = "Precipitation sum (mm)") +
theme_bw()
ggplot(prcp_day_gpm_rdr, aes(distance, sum, size = prcp)) +
geom_point(col = "orange", alpha = transp) +
geom_point(data = prcp_day_rdr, aes(distance, sum, size = prcp), col = "dark green", alpha = transp) +
labs(x = "Distance (km)", y = "Precipitation sum (mm)") +
theme_bw()
ggplot(prcp_day_gpm_rdr, aes(distance, mean, size = prcp)) +
geom_point(col = "orange", alpha = transp) +
geom_point(data = prcp_day_knmi, aes(distance, mean, size = prcp), col = "red", alpha = transp) +
geom_point(data = prcp_day_rdr, aes(distance, mean, size = prcp), col = "dark green", alpha = transp) +
labs(x = "Distance (km)", y = "Mean (mm)") +
theme_bw()
ggplot(prcp_day_gpm_rdr, aes(distance, sd/mean, size = prcp)) +
geom_point(col = "orange", alpha = transp) +
geom_point(data = prcp_day_knmi, aes(distance, sd/mean, size = prcp), col = "red", alpha = transp) +
geom_point(data = prcp_day_rdr, aes(distance, sd/mean, size = prcp), col = "dark green", alpha = transp) +
labs(x = "Distance (km)", y = "Coef. of Variation") +
theme_bw()
map_plot(stations = prcp_day_knmi, radar = prcp_day_rdr, satellite = prcp_day_gpm_rdr, date = my_date)
## Aggregation in time
my_period <- rdr_prcp[time >= my_date, unique(time)]
#Spatial scaling in monthly aggregates
period <- my_period
prcp_month_gpm_rdr <- agg_prcp_period_sum(gpm_rdr_prcp, period,
get_max_location(gpm_rdr_prcp[time %in% period]))
prcp_month_knmi <- agg_prcp_period_sum(knmi_prcp, period,
get_max_location(knmi_prcp[time %in% period]))
prcp_month_rdr <- agg_prcp_period_sum(rdr_prcp, period,
get_max_location(rdr_prcp[time %in% period]))
ggplot(prcp_month_gpm_rdr, aes(distance, mean, size = prcp)) +
geom_point(col = "orange", alpha = transp) +
geom_point(data = prcp_month_knmi, aes(distance, mean, size = prcp), col = "red", alpha = transp) +
geom_point(data = prcp_month_rdr, aes(distance, mean, size = prcp), col = "dark green", alpha = transp) +
labs(x = "Distance (km)", y = "Mean (mm)") +
theme_bw()
ggplot(prcp_month_gpm_rdr, aes(distance, sd/mean, size = prcp)) +
geom_point(col = "orange", alpha = transp) +
geom_point(data = prcp_month_knmi, aes(distance, sd/mean, size = prcp), col = "red", alpha = transp) +
geom_point(data = prcp_month_rdr, aes(distance, sd/mean, size = prcp), col = "dark green", alpha = transp) +
labs(x = "Distance (km)", y = "Coef. of Variation") +
theme_bw()
#Mean daily precipitation for september
prcp_prd_gpm_rdr <- gpm_rdr_prcp[time %in% my_period[1]]
prcp_prd_gpm_rdr <- agg_prcp(prcp_prd_gpm_rdr, get_max_location(prcp_prd_gpm_rdr))
prcp_prd_gpm_rdr <- cbind(prcp_prd_gpm_rdr, time = my_period[1])
prcp_prd_knmi <- gpm_rdr_prcp[time %in% my_period[1]]
prcp_prd_knmi <- agg_prcp(prcp_prd_knmi, get_max_location(prcp_prd_knmi))
prcp_prd_knmi <- cbind(prcp_prd_knmi, time = my_period[1])
prcp_prd_rdr <- gpm_rdr_prcp[time %in% my_period[1]]
prcp_prd_rdr <- agg_prcp(prcp_prd_rdr, get_max_location(prcp_prd_rdr))
prcp_prd_rdr <- cbind(prcp_prd_rdr, time = my_period[1])
for(i in 2:length(my_period)){
prcp_day_gpm_rdr <- gpm_rdr_prcp[time %in% my_period[i]]
prcp_day_gpm_rdr <- agg_prcp(prcp_day_gpm_rdr, get_max_location(prcp_day_gpm_rdr))
prcp_day_gpm_rdr <- cbind(prcp_day_gpm_rdr, time = my_period[i])
prcp_prd_gpm_rdr <- rbind(prcp_prd_gpm_rdr, prcp_day_gpm_rdr)
prcp_day_knmi <- knmi_prcp[time %in% my_period[i]]
prcp_day_knmi <- agg_prcp(prcp_day_knmi, get_max_location(prcp_day_knmi))
prcp_day_knmi <- cbind(prcp_day_knmi, time = my_period[i])
prcp_prd_knmi <- rbind(prcp_prd_knmi, prcp_day_knmi)
prcp_day_rdr <- rdr_prcp[time %in% my_period[i]]
prcp_day_rdr <- agg_prcp(prcp_day_rdr, get_max_location(prcp_day_rdr))
prcp_day_rdr <- cbind(prcp_day_rdr, time = my_period[i])
prcp_prd_rdr <- rbind(prcp_prd_rdr, prcp_day_rdr)
print(i)
}
prcp_mean_gpm_rdr <- prcp_prd_gpm_rdr[, mean(mean), .(distance)]
colnames(prcp_mean_gpm_rdr)[2] <- "mean"
prcp_mean_knmi <- prcp_prd_knmi[, mean(mean), .(distance)]
colnames(prcp_mean_knmi)[2] <- "mean"
prcp_mean_rdr <- prcp_prd_rdr[, mean(mean), .(distance)]
colnames(prcp_mean_rdr)[2] <- "mean"
ggplot(prcp_mean_gpm_rdr, aes(distance, mean)) +
geom_smooth(method = "loess", col = "orange", size = 1, span = 1, se = T, fill = "orange") +
geom_smooth(data = prcp_mean_knmi, aes(distance, mean), method = "loess", col = "red", size = 1, span = 1, se = T, fill = "red") +
geom_smooth(data = prcp_mean_rdr, aes(distance, mean), method = "loess", col = "dark green", size = 1, span = 1, se = T, fill = "dark green") +
labs(x = "Distance (km)", y = "Mean (mm)") +
theme_bw()
prcp_cv_gpm_rdr <- prcp_prd_gpm_rdr[, mean(sd/mean), .(distance)]
colnames(prcp_cv_gpm_rdr)[2] <- "cv"
prcp_cv_knmi <- prcp_prd_knmi[, mean(sd/mean), .(distance)]
colnames(prcp_cv_knmi)[2] <- "cv"
prcp_cv_rdr <- prcp_prd_rdr[, mean(sd/mean), .(distance)]
colnames(prcp_cv_rdr)[2] <- "cv"
ggplot(prcp_cv_gpm_rdr, aes(distance, cv)) +
geom_smooth(method = "loess", col = "orange", size = 1, span = 1, se = T, fill = "orange") +
geom_smooth(data = prcp_cv_knmi, aes(distance, cv), method = "loess", col = "red", size = 1, span = 1, se = T, fill = "red") +
geom_smooth(data = prcp_cv_rdr, aes(distance, cv), method = "loess", col = "dark green", size = 1, span = 1, se = T, fill = "dark green") +
labs(x = "Distance (km)", y = "Coef. of Variation") +
theme_bw()
#Mean daily precipitation for whole period
my_period <- rdr_prcp[, unique(time)]
prcp_prd_gpm_rdr <- gpm_rdr_prcp[time %in% my_period[1]]
prcp_prd_gpm_rdr <- agg_prcp(prcp_prd_gpm_rdr, get_max_location(prcp_prd_gpm_rdr))
prcp_prd_gpm_rdr <- cbind(prcp_prd_gpm_rdr, time = my_period[1])
prcp_prd_knmi <- gpm_rdr_prcp[time %in% my_period[1]]
prcp_prd_knmi <- agg_prcp(prcp_prd_knmi, get_max_location(prcp_prd_knmi))
prcp_prd_knmi <- cbind(prcp_prd_knmi, time = my_period[1])
prcp_prd_rdr <- gpm_rdr_prcp[time %in% my_period[1]]
prcp_prd_rdr <- agg_prcp(prcp_prd_rdr, get_max_location(prcp_prd_rdr))
prcp_prd_rdr <- cbind(prcp_prd_rdr, time = my_period[1])
for(i in 2:length(my_period)){
prcp_day_gpm_rdr <- gpm_rdr_prcp[time %in% my_period[i]]
prcp_day_gpm_rdr <- agg_prcp(prcp_day_gpm_rdr, get_max_location(prcp_day_gpm_rdr))
prcp_day_gpm_rdr <- cbind(prcp_day_gpm_rdr, time = my_period[i])
prcp_prd_gpm_rdr <- rbind(prcp_prd_gpm_rdr, prcp_day_gpm_rdr)
prcp_day_knmi <- knmi_prcp[time %in% my_period[i]]
prcp_day_knmi <- agg_prcp(prcp_day_knmi, get_max_location(prcp_day_knmi))
prcp_day_knmi <- cbind(prcp_day_knmi, time = my_period[i])
prcp_prd_knmi <- rbind(prcp_prd_knmi, prcp_day_knmi)
prcp_day_rdr <- rdr_prcp[time %in% my_period[i]]
prcp_day_rdr <- agg_prcp(prcp_day_rdr, get_max_location(prcp_day_rdr))
prcp_day_rdr <- cbind(prcp_day_rdr, time = my_period[i])
prcp_prd_rdr <- rbind(prcp_prd_rdr, prcp_day_rdr)
print(i)
}
prcp_mean_gpm_rdr <- prcp_prd_gpm_rdr[, mean(mean), .(distance)]
colnames(prcp_mean_gpm_rdr)[2] <- "mean"
prcp_mean_knmi <- prcp_prd_knmi[, mean(mean), .(distance)]
colnames(prcp_mean_knmi)[2] <- "mean"
prcp_mean_rdr <- prcp_prd_rdr[, mean(mean), .(distance)]
colnames(prcp_mean_rdr)[2] <- "mean"
ggplot(prcp_mean_gpm_rdr, aes(distance, mean)) +
geom_smooth(method = "loess", col = "orange", size = 1, span = 1, se = T, fill = "orange") +
geom_smooth(data = prcp_mean_knmi, aes(distance, mean), method = "loess", col = "red", size = 1, span = 1, se = T, fill = "red") +
geom_smooth(data = prcp_mean_rdr, aes(distance, mean), method = "loess", col = "dark green", size = 1, span = 1, se = T, fill = "dark green") +
labs(x = "Distance (km)", y = "Mean (mm)") +
theme_bw()
prcp_cv_gpm_rdr <- prcp_prd_gpm_rdr[, mean(sd/mean), .(distance)]
colnames(prcp_cv_gpm_rdr)[2] <- "cv"
prcp_cv_knmi <- prcp_prd_knmi[, mean(sd/mean), .(distance)]
colnames(prcp_cv_knmi)[2] <- "cv"
prcp_cv_rdr <- prcp_prd_rdr[, mean(sd/mean), .(distance)]
colnames(prcp_cv_rdr)[2] <- "cv"
ggplot(prcp_cv_gpm_rdr, aes(distance, cv)) +
geom_smooth(method = "loess", col = "orange", size = 1, span = 1, se = T, fill = "orange") +
geom_smooth(data = prcp_cv_knmi, aes(distance, cv), method = "loess", col = "red", size = 1, span = 1, se = T, fill = "red") +
geom_smooth(data = prcp_cv_rdr, aes(distance, cv), method = "loess", col = "dark green", size = 1, span = 1, se = T, fill = "dark green") +
labs(x = "Distance (km)", y = "Coef. of Variation") +
theme_bw()
#Mean heavy daily precipitation for JAS
my_period <- rdr_prcp[time >= "2016-07-01" & prcp > 7.5, unique(time)]
prcp_prd_gpm_rdr <- gpm_rdr_prcp[time %in% my_period[1] & prcp > 7.5]
prcp_prd_gpm_rdr <- agg_prcp(prcp_prd_gpm_rdr, get_max_location(prcp_prd_gpm_rdr))
prcp_prd_gpm_rdr <- cbind(prcp_prd_gpm_rdr, time = my_period[1])
prcp_prd_knmi <- gpm_rdr_prcp[time %in% my_period[1] & prcp > 7.5]
prcp_prd_knmi <- agg_prcp(prcp_prd_knmi, get_max_location(prcp_prd_knmi))
prcp_prd_knmi <- cbind(prcp_prd_knmi, time = my_period[1])
prcp_prd_rdr <- gpm_rdr_prcp[time %in% my_period[1] & prcp > 7.5]
prcp_prd_rdr <- agg_prcp(prcp_prd_rdr, get_max_location(prcp_prd_rdr))
prcp_prd_rdr <- cbind(prcp_prd_rdr, time = my_period[1])
for(i in 2:length(my_period)){
prcp_day_gpm_rdr <- gpm_rdr_prcp[time %in% my_period[i]]
prcp_day_gpm_rdr <- agg_prcp(prcp_day_gpm_rdr, get_max_location(prcp_day_gpm_rdr))
prcp_day_gpm_rdr <- cbind(prcp_day_gpm_rdr, time = my_period[i])
prcp_prd_gpm_rdr <- rbind(prcp_prd_gpm_rdr, prcp_day_gpm_rdr)
prcp_day_knmi <- knmi_prcp[time %in% my_period[i] & prcp > 7.5]
prcp_day_knmi <- agg_prcp(prcp_day_knmi, get_max_location(prcp_day_knmi))
prcp_day_knmi <- cbind(prcp_day_knmi, time = my_period[i])
prcp_prd_knmi <- rbind(prcp_prd_knmi, prcp_day_knmi)
prcp_day_rdr <- rdr_prcp[time %in% my_period[i]]
prcp_day_rdr <- agg_prcp(prcp_day_rdr, get_max_location(prcp_day_rdr))
prcp_day_rdr <- cbind(prcp_day_rdr, time = my_period[i])
prcp_prd_rdr <- rbind(prcp_prd_rdr, prcp_day_rdr)
print(i)
}
prcp_mean_gpm_rdr <- prcp_prd_gpm_rdr[, mean(mean), .(distance)]
colnames(prcp_mean_gpm_rdr)[2] <- "mean"
prcp_mean_knmi <- prcp_prd_knmi[, mean(mean), .(distance)]
colnames(prcp_mean_knmi)[2] <- "mean"
prcp_mean_rdr <- prcp_prd_rdr[, mean(mean), .(distance)]
colnames(prcp_mean_rdr)[2] <- "mean"
ggplot(prcp_mean_gpm_rdr, aes(distance, mean)) +
geom_smooth(method = "loess", col = "orange", size = 1, span = 0.5, se = T, fill = "orange") +
geom_smooth(data = prcp_mean_knmi, aes(distance, mean),
method = "loess", col = "red", size = 1, span = 0.5, se = T, fill = "red") +
geom_smooth(data = prcp_mean_rdr, aes(distance, mean),
method = "loess", col = "dark green", size = 0.5, span = 1, se = T, fill = "dark green") +
labs(x = "Distance (km)", y = "Mean (mm)") +
theme_bw()
prcp_cv_gpm_rdr <- prcp_prd_gpm_rdr[, mean(sd/mean), .(distance)]
colnames(prcp_cv_gpm_rdr)[2] <- "cv"
prcp_cv_knmi <- prcp_prd_knmi[, mean(sd/mean), .(distance)]
colnames(prcp_cv_knmi)[2] <- "cv"
prcp_cv_rdr <- prcp_prd_rdr[, mean(sd/mean), .(distance)]
colnames(prcp_cv_rdr)[2] <- "cv"
ggplot(prcp_cv_gpm_rdr, aes(distance, cv)) +
geom_smooth(method = "loess", col = "orange", size = 1, span = 1, se = T, fill = "orange") +
geom_smooth(data = prcp_cv_knmi, aes(distance, cv), method = "loess", col = "red", size = 1, span = 1, se = T, fill = "red") +
geom_smooth(data = prcp_cv_rdr, aes(distance, cv), method = "loess", col = "dark green", size = 1, span = 1, se = T, fill = "dark green") +
labs(x = "Distance (km)", y = "Coef. of Variance") +
theme_bw()
#p3 <- map_plot(radar = rdr_prcp, satellite = gpm_rdr_prcp,ground = knmi_prcp, date = period)
#Look more into this!!!
#period <- my_period
# <- gpm_rdr_prcp[time %in% period]
#prcp_prd_gpm_rdr <- agg_prcp(prcp_prd_gpm_rdr, get_max_location(prcp_prd_gpm_rdr))
#prcp_prd_knmi <- knmi_prcp[time %in% period]
#prcp_prd_knmi <- agg_prcp(prcp_prd_knmi, get_max_location(prcp_prd_knmi))
#prcp_prd_rdr <- rdr_prcp[time %in% period]
#prcp_prd_rdr <- agg_prcp(prcp_prd_rdr, get_max_location(prcp_prd_rdr))
#my_period <- my_period[c(1, 5, 15, 30, 90, length(my_period))]
ggplot(prcp_period_gpm_knmi, aes(log10(distance), log10(sum))) +
geom_line(col = "orange", alpha = 0.5, size = 1) +
geom_line(data = prcp_period_knmi, aes(log10(distance), log10(sum)), col = "red", size = 1, alpha = 0.5) +
labs(x = "Distance (km)", y = "Precipitation sum (mm)") +
theme_bw()
ggplot(prcp_period_gpm_rdr, aes(log10(distance), log10(sum))) +
geom_line(col = "orange", size = 1) +
geom_line(data = prcp_period_rdr, aes(log10(distance), log10(sum)), col = "dark green", size = 1) +
labs(x = "Distance (km)", y = "Precipitation sum (mm)") +
theme_bw()
##Comparison of gravity Centers
gravity_center_knmi <- get_gravity_center(knmi_prcp[time %in% my_date & prcp], no_points)
gravity_center_rdr <- get_gravity_center(rdr_prcp[time %in% my_date & prcp], no_points)
gDistance(SpatialPoints(gravity_center_gpm), SpatialPoints(gravity_center_knmi)) * 111
gDistance(SpatialPoints(gravity_center_gpm), SpatialPoints(gravity_center_rdr)) * 111
gDistance(SpatialPoints(gravity_center_knmi), SpatialPoints(gravity_center_rdr)) * 111
#Can also compare sums of light vs heavy as light cells are more
|
bad168de1dc4a7b0292349f88533d7b9dc56bbd1
|
a92abde35302bdf856c9c81a89634116c4c5f115
|
/count_elements.R
|
d5039489adde4189db3f9285fdf79e154bda95e7
|
[] |
no_license
|
dashukvita/R
|
4afa08b8f3f783cedcd29323fe2902926e843500
|
172b0426c839d994b45d8585be42ee3644ced118
|
refs/heads/master
| 2022-12-18T07:38:28.256711
| 2020-09-29T14:44:37
| 2020-09-29T14:44:37
| 215,611,264
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 566
|
r
|
count_elements.R
|
# Пусть x -- целочисленный вектор. Напишите функцию, которая вернёт матрицу из двух строк. В первой строке перечислите все различные элементы вектора, упорядоченные по возрастанию. Во второй строке укажите частоты (количество повторов) этих элементов.
count_elements <- function(x) {
answer <- t(as.matrix(data.frame(table(x))))
answer
}
count_elements(x)
|
37dd1297fb2e403d626dedf4572db3d27f91c2dc
|
b45845ee528ed22b433c5e876c87d088ce5ac860
|
/essai_R_API_NIRS.R
|
054690c9aa126eb89bedbb24b85f0398d2237da0
|
[] |
no_license
|
martinEcarnot/vrac
|
00445d290377b68c812173c20761094654d5878e
|
47bdfc47fd15454e30fdcf777c6d461a95cb38dc
|
refs/heads/master
| 2022-12-21T10:01:16.702031
| 2022-12-13T08:07:24
| 2022-12-13T08:07:24
| 232,131,113
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 561
|
r
|
essai_R_API_NIRS.R
|
library(httr)
library(jsonlite)
response <- POST(url = "http://moleans.cirad.fr:8080/nirs_api/login", query = list("user"="test", "password"="test"))
json = fromJSON(rawToChar(response$content))
token <- json$token
t <- paste("Bearer", token)
print(json)
response <- GET(url = "http://moleans.cirad.fr:8080/nirs_api/spectra", add_headers("Authorization"=t), query = list("type"="spectrum"))
response <- GET(url = "http://moleans.cirad.fr:8080/nirs_api/spectra/1", add_headers("Authorization"=t))
json = fromJSON(rawToChar(response$content))
print(json)
|
e057cbf73775a43afe343d19298c129b662b4ddc
|
b06eae406fead055517e9c1cd6beb919773ca2bc
|
/scripts/Dev_Scripts/nice_mapping.R
|
2d41259d4f6fc279d220092ad406a5d6d1d81f2b
|
[] |
no_license
|
HiDef-Aerial-Surveying/SNH_Moray_Firth
|
8b242fb77cae0b56e111ede66f08b443ee26e82b
|
22afa9016221f5146e1a54deb0f4454f0a0cf628
|
refs/heads/master
| 2022-11-15T19:27:18.641971
| 2020-07-07T11:48:51
| 2020-07-07T11:48:51
| 261,455,303
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,509
|
r
|
nice_mapping.R
|
##############################################
### Create survey area maps ###
### Project HC0049
### Grant Humphries & Raul Vilela
### May 2020
### HiDef Aerial Surveying, BioConsult SH
##############################################
# Libraries ---------------------------------------------------------------
source("load_libs.R")
LIBS <- c("tidyverse","foreach","rgeos","sp","maptools","rgdal","raster","Hmisc","ggthemes",
"Cairo","doSNOW","readxl","INLA","mapproj","inlabru")
Load_Libs(LIBS)
# Species select ----------------------------------------------------------
source('scripts/species_list.R')
SppName <- 'Common Scoter'
if(length(names(which(Species==SppName)))>0){
Spp <- names(which(Species==SppName))
}else{
print("WARNING - CHECK SPELLING OF SppName")
}
# Spatial Projections -------------------------------------------------------------
WGS84<-CRS("+init=epsg:4326")
UTM30<-CRS("+init=epsg:32630")
# Load Shapefiles ---------------------------------------------------------
boundary.shape <- readOGR(dsn="./Data/Shapefile/Moray_Firth_Area_WGS84.shp",
layer="Moray_Firth_Area_WGS84")
boundaryUTM30 <- spTransform(boundary.shape,UTM30)
boundary_DF <- fortify(boundaryUTM30)
Transects <- readOGR(dsn="./Data/Aerial_Surveys/2020 - Month 01 - Survey 01/Output/Zone87_M01_S01_20_Output/Zone87_M01_S01_20_Output-Day1-Transects.shp",
layer="Zone87_M01_S01_20_Output-Day1-Transects")
TransectsUTM30 <- spTransform(Transects,UTM30)
transdf <- data.frame(coordinates(TransectsUTM30))
load(file = 'D:/GIS_DATA/UK_coastline.rda')
coastline_sp_utm <- spTransform(tt, UTM30)
coastData_DF <- fortify(coastline_sp_utm)
SppDatajan <- readOGR(dsn="./Data/Aerial_Surveys/2020 - Month 01 - Survey 01/Output/Zone87_M01_S01_20_Output/Zone87_M01_S01_20_Output-Day1-CentCount.shp",
layer="Zone87_M01_S01_20_Output-Day1-CentCount")
SppDatajanUTM30 <- spTransform(SppDatajan,UTM30)
Sppdfjan <- data.frame(coordinates(SppDatajanUTM30))
SppDatamar <- readOGR(dsn="./Data/2020 - Month 03 - Survey 01/Output/Zone87_M03_S01_20_Output/Zone87_M03_S01_20_Output-Day1-CentCount.shp",
layer="Zone87_M03_S01_20_Output-Day1-CentCount")
SppDatamarUTM30 <- spTransform(SppDatamar,UTM30)
Sppdfmar <- data.frame(coordinates(SppDatamarUTM30))
# Generate transect map ---------------------------------------------------
xlims <- c(min(transdf$coords.x1)*0.99, max(transdf$coords.x1)*1.01)
ylims <- c(min(transdf$coords.x2)*0.999, max(transdf$coords.x2)*1.001)
G <- ggplot(transdf,aes(x=coords.x1,y=coords.x2)) +
geom_polygon(aes(x = long, y = lat, group = group), data = coastData_DF, fill = "darkolivegreen3",
col = "grey35", alpha = 0.7) +
geom_polygon(aes(x=long,y=lat,group=group,fill="SPA"),data=boundary_DF,
col="black",alpha=0.6)+
scale_fill_manual(name="",values=c("SPA" = "lightblue"))+
geom_point(size=0.8,pch=21,fill="orange")+
scale_x_continuous(breaks=seq(425000,525000,25000),label=seq(4.25,5.25,.25))+
scale_y_continuous(breaks=seq(6380000,6440000,20000),label=seq(6.38,6.44,.02))+
xlab(expression(X(meters~x~10^{"6"})))+
ylab(expression(Y(meters~x~10^{"7"})))+
coord_equal(xlim = xlims, ylim = ylims)+
ggsn::scalebar(boundary_DF,location = "bottomright",st.size = 5,height = 0.04,dist = 20,dist_unit = "km",transform = FALSE)+
ggsn::north(boundary_DF,scale = 0.15,location = "topleft")+
ggtitle("Transects")+
theme_gdocs()+
theme(
#panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.border = element_rect(colour = "black"),
plot.background = element_blank()
)
G
ggsave(plot=G,filename="Transects_Map.jpeg",device = "jpeg",width=10,height=10)
# Generate species map ----------------------------------------------------
source('scripts/species_list.R')
SppName <- 'Long-tailed Duck'
if(length(names(which(Species==SppName)))>0){
Spp <- names(which(Species==SppName))
}else{
print("WARNING - CHECK SPELLING OF SppName")
}
Sppdfjan$spp <- as.vector(SppDatajanUTM30@data[,Spp])
xlims <- c(min(transdf$coords.x1)*0.99, max(transdf$coords.x1)*1.01)
ylims <- c(min(transdf$coords.x2)*0.999, max(transdf$coords.x2)*1.001)
G <- ggplot(Sppdfjan,aes(x=coords.x1,y=coords.x2)) +
#geom_polygon(aes(x = long, y = lat, group = group), data = coastData_DF, fill = "darkolivegreen3",
# col = "grey35", alpha = 0.7) +
geom_polygon(aes(x=long,y=lat,group=group,fill="SPA"),data=boundary_DF,
col="black",alpha=0.6)+
scale_fill_manual(name="",values=c("SPA" = "lightblue"))+
geom_point(aes(size=spp),pch=21,fill="orange")+
scale_size_continuous(range=c(0.5,8),name="Count")+
scale_x_continuous(breaks=seq(425000,525000,25000),label=seq(4.25,5.25,.25))+
scale_y_continuous(breaks=seq(6380000,6440000,20000),label=seq(6.38,6.44,.02))+
xlab(expression(X(meters~x~10^{"6"})))+
ylab(expression(Y(meters~x~10^{"7"})))+
coord_equal(xlim = xlims, ylim = ylims)+
ggsn::scalebar(boundary_DF,location = "bottomright",st.size = 5,height = 0.04,dist = 20,dist_unit = "km",transform = FALSE)+
ggsn::north(boundary_DF,scale = 0.15,location = "topleft")+
ggtitle(SppName)+
theme_gdocs()+
theme(
#panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.border = element_rect(colour = "black"),
plot.background = element_blank()
)
G
ggsave(plot=G,filename="Output.jpeg",device = "jpeg",width=10,height=10)
|
ab753a08d95546a85a848bd79b51af2f7960a156
|
b82009d15790e7a71dde583da6169f5ef478873e
|
/sqtl/optimal_covariate/calc_FDR.R
|
e9c2a0ae41314b86ad77e8a28f494a353c496a20
|
[] |
no_license
|
boxiangliu/rpe
|
9233e5b0ac6b9761fbb34e44a462b9e6cae87889
|
2125bf34e029d16a484f8530381af25bb9619d2d
|
refs/heads/master
| 2021-10-26T02:32:10.477848
| 2019-04-09T19:31:57
| 2019-04-09T19:31:57
| 51,119,068
| 2
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 808
|
r
|
calc_FDR.R
|
library(data.table)
library(foreach)
library(dplyr)
library(dtplyr)
args=commandArgs(T)
in_fn=args[1]
out_fn=args[2]
geno_pc=as.integer(args[3])
splice_pc=as.integer(args[4])
message('INFO - reading p-values')
fastqtl=fread(sprintf('zcat %s',in_fn),select=c(1,2,4),col.names=c('cluster','snp','p'))
fastqtl=fastqtl%>%
group_by(cluster)%>%
mutate(pmin=min(p))%>%
filter(p==pmin)
stopifnot(all(fastqtl$p==fastqtl$pmin))
message('INFO - calculating FDR')
padj=p.adjust(fastqtl$p,method='fdr')
message('INFO - counting significant sQTL')
sig=foreach(FDR=c(1e-8,1e-6,1e-4,0.01,0.05),.combine='rbind')%do%{
data.table(FDR=FDR,sig=sum(padj<FDR,na.rm=TRUE))
}
sig$geno_pc=geno_pc
sig$splice_pc=splice_pc
message('INFO - writing output')
fwrite(sig,out_fn,sep='\t',col.names=FALSE)
message('INFO - done')
|
6ab01d12b2beb11af85b18471681dd44e8f50bd1
|
062fe18dd13549672145247a24c2c0be90dc0a63
|
/man/get_origin_h3_index_from_unidirectional_edge.Rd
|
ad216d51cde0435e7f30e35831515fa5d62d360d
|
[
"Apache-2.0"
] |
permissive
|
crazycapivara/h3-r
|
48d5b70689578992bab6995f54c326ffdb86fc30
|
6b658e832f6907581d9d5c5296d611c4e4cf372a
|
refs/heads/master
| 2022-08-19T20:19:50.881920
| 2022-08-08T08:04:07
| 2022-08-08T08:04:07
| 165,286,703
| 61
| 10
|
NOASSERTION
| 2022-07-31T12:15:40
| 2019-01-11T17:58:47
|
R
|
UTF-8
|
R
| false
| true
| 482
|
rd
|
get_origin_h3_index_from_unidirectional_edge.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/h3_uniedge.R
\name{get_origin_h3_index_from_unidirectional_edge}
\alias{get_origin_h3_index_from_unidirectional_edge}
\title{Get the origin hexagon from the unidirectional H3 edge index.}
\usage{
get_origin_h3_index_from_unidirectional_edge(h3_edge_index)
}
\arguments{
\item{h3_edge_index}{character scalar; H3 edge index}
}
\description{
Get the origin hexagon from the unidirectional H3 edge index.
}
|
70666d670a873f03ab1d514b0eb66eff444e8607
|
a5d8d28a41439d2d00a6d8e6ed8deef0bc8028e7
|
/man/engineEdgeBayes.Rd
|
3d4008691d3b91e53814702dcd71b4f74cdff0c8
|
[] |
no_license
|
SSDALab/dnr
|
eedee42774029f4d30b833f6069cd4e0555c2d4f
|
4e9139421251be4e4e11c01f7c24fb0054eb0157
|
refs/heads/master
| 2021-03-27T19:32:15.492187
| 2020-12-27T00:47:22
| 2020-12-27T00:47:22
| 71,925,977
| 7
| 0
| null | 2020-12-27T00:48:31
| 2016-10-25T18:17:19
|
R
|
UTF-8
|
R
| false
| true
| 2,765
|
rd
|
engineEdgeBayes.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/engine_bayes.R
\name{engineEdgeBayes}
\alias{engineEdgeBayes}
\title{Implementation of simulation engine for dynamic networks using smoothing estimates of change statistics.}
\usage{
engineEdgeBayes(
start_network,
inputcoeff,
ns,
model.terms,
model.formula,
graph_mode,
group,
intercept,
exvar,
maxlag,
lagmat,
ylag,
lambda = NA,
method = "bayesglm",
alpha.glmnet,
paramout = TRUE,
Theta = NA
)
}
\arguments{
\item{start_network}{Initial list of networks}
\item{inputcoeff}{coefficient vector}
\item{ns}{number of time points for simulation}
\item{model.terms}{model terms in formula}
\item{model.formula}{model formula (ergm)}
\item{graph_mode}{'digraph' by default}
\item{group}{group terms}
\item{intercept}{intercept terms}
\item{exvar}{extraneous covariates}
\item{maxlag}{maximum lag}
\item{lagmat}{lag matrix}
\item{ylag}{lag vector for network lag terms}
\item{lambda}{NA}
\item{method}{'bayesglm' by default}
\item{alpha.glmnet}{NA}
\item{paramout}{T/F parameter estimation is returned.}
\item{Theta}{= prior probability matrix.}
}
\description{
Implementation of simulation engine for dynamic networks using smoothing estimates of change statistics.
}
\examples{
\dontrun{
startNet <- rdNets[1:50]
model.terms=c("triadcensus.003", "triadcensus.012", "triadcensus.102", "triadcensus.021D", "gwesp")
model.formula = net~triadcensus(0:3)+gwesp(alpha=0, fixed=FALSE, cutoff=30)-1
graph_mode <- 'digraph'
group <- 'dnc'
alpha.glmnet <- 1
method <- 'bayesglm'
maxlag <- 3
lambda <- NA
intercept <- "edges"
cdim <- length(model.terms)
lagmat <- matrix(sample(c(0,1),(maxlag+1)*cdim,replace = TRUE),ncol = cdim)
ylag <- rep(1,maxlag)
lagmat[1,] <- rep(0,ncol(lagmat))
out.coef <- paramEdge(input_network = startNet,
model.terms = model.terms,
model.formula = model.formula,
graph_mode='digraph',
group=group,intercept = intercept,
exvar=NA,
maxlag = maxlag,
lagmat = lagmat,
ylag = ylag,
lambda = NA, method='bayesglm',
alpha.glmnet=1)
inputcoeff <- out.coef$coef$coef.edge
nvertex <- 47 ##find vertex here
ns <- 1
exvar <- NA
for(i in seq_along(startNet)) Theta <- Theta + startNet[[i]][,]
Theta <- Theta/length(startNet)
Theta <- thresh(Theta)
out.bayes <- engineEdgeBayes(start_network=startNet,
inputcoeff=inputcoeff,
ns=ns,
model.terms=model.terms,
model.formula=model.formula,
graph_mode=graph_mode,
group=group,intercept=intercept,
exvar=exvar,
maxlag=maxlag,
lagmat=lagmat,
ylag=ylag,
lambda = NA, method='bayesglm',
alpha.glmnet=alpha.glmnet,
Theta = Theta)
}
}
|
2413238d6d9334488af37d89edff994f3b44432e
|
61180649c781ca23ee434754577acea001eb4bc0
|
/R/faostat3.R
|
866f8decac537f499341125e4a340b0408e575c5
|
[] |
no_license
|
malexan/fclhs
|
87e0b4c9b86eb1c954644bbdb699699677d4163b
|
f93e69dd96bbd15bdbc68a5c52db5fe0dde3a0fa
|
refs/heads/master
| 2020-05-31T15:48:22.614152
| 2015-08-09T07:59:08
| 2015-08-09T07:59:08
| 27,025,796
| 0
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,590
|
r
|
faostat3.R
|
#' Function retrieves data from FAOSTAT3 FENIX API
#'
#' JS source: https://gist.github.com/Kalimaha/80da85e6d7cbdbf92557
#'
#'
#'
#'
getfenix <- function(list1Codes = NULL,
list2Codes = NULL,
list3Codes = NULL,
list4Codes = NULL,
list5Codes = NULL,
list6Codes = NULL,
list7Codes = NULL,
nullValues = FALSE,
thousand = "",
decimal = ".",
decPlaces = 2,
datasource = "faostatdb",
domainCode = "TM",
lang = "E",
url = "http://faostat3.fao.org/wds/rest/procedures/data") {
payload <- list(datasource = datasource,
domainCode = domainCode,
lang = lang,
list1Codes = list1Codes,
list2Codes = list2Codes,
list3Codes = list3Codes,
list4Codes = list4Codes,
list5Codes = list5Codes,
list6Codes = list6Codes,
list7Codes = list7Codes,
thousand = thousand,
decimal = decimal,
decPlaces = decPlaces,
limit = 10000000)
lnames <- names(payload)
payload <- lapply(names(payload), function(lname) {
x <- payload[[lname]]
if(stringr::str_detect(lname, "^list[1-7]Codes$")) {
if(is.null(x)) return(integer()) # Empty value has to be in [] after AJAXing
return(paste0("\'", as.character(x), "\'")) # FENIX wants additional single quotes around values in arrays
}
jsonlite::unbox(x) # Removing [] around settings' values
})
names(payload) <- lnames
payload <- as.character(jsonlite::toJSON(payload))
resp <- httr::POST(url, body = list(payload = payload), encode = "form")
t <- jsonlite::fromJSON(content(resp, as = "text"))
tnames <- make.names(t[1,])
t <- as.data.frame(t[-1,], stringsAsFactors = F)
colnames(t) <- tnames
t <- t[, !(colnames(t) %in% c("NoRecords", "RecordOrder", "Year.Code",
"Var1Order", "Var2Order",
"Var3Order", "Var4Order",
"Var5Order"))]
t$Value <- as.numeric(t$Value)
t
}
# https://github.com/mkao006/FAOSTATpackage/blob/28bd0ba5606ea8570c0f33f919a83105de54c8ac/FAOSTAT/R/getFAO.R
# http://faostat3.fao.org/wds/api?db=faostatdb&select=A.AreaCode[FAOST_CODE],D.year[Year],D.value[Value]&from=data[D],element[E],item[I],area[A]&where=D.elementcode(2610),D.itemcode(15),D.domaincode('TM'),D.year(2011),JOIN(D.elementcode:E.elementcode),JOIN(D.itemcode:I.itemcode),JOIN(D.areacode:A.areacode)
# AreaCodeAreaCodeElementListCodeItemCodeItemNameEElementListNameEItemCodeAreaNameEAreaNameFElementCodeElementListNameFItemNameFItemNameSElementListNameSYearAreaNameSAreaNameCValueElementListNameCItemNameCItemNameAElementListNameAFlagAreaNameAAreaNameRLoadDateElementListNameRItemNameRItemLevelElementCodeDomainCodeAreaLevelAreaUNDPCodeGroupCodeElementNameEItemDescriptionEItemDescriptionFElementNameFElementListCodeAreaISO2CodeAreaISO3CodeNotesElementNameSItemDescriptionSElementNameCAreaM49CodeAreaWBCodeElementNameAElementNameRAreaStartYearAreaEndYearElementUnitNameEElementUnitNameFElementUnitNameSElementUnitNameCElementUnitNameAElementUnitNameRUnitCodeUnitNameEUnitNameFUnitNameSUnitNameCUnitNameAUnitNameRElementDescriptionEElementDescriptionFElementDescriptionS
|
0e379103e590405e915a7c103b24b602f28b5875
|
e11c2dce6c69111a959dec53548b4e9c594ad5dd
|
/homework_4.R
|
a6d80ac752033f192de7220447a4a59e6e432c87
|
[] |
no_license
|
samarprit/Useful-programs
|
ddbcfe5f87371c9672769031f0a02201d45244a4
|
47150a85d99877d0224530c2cfb77586376edf7a
|
refs/heads/master
| 2021-01-19T23:56:56.511298
| 2018-09-01T19:51:13
| 2018-09-01T19:51:13
| 89,053,141
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,601
|
r
|
homework_4.R
|
datapath<-"E:/business analytics/Uchicago/subjects/machine learning/machine_learning_04_data/"
data<-read.csv(paste0(datapath,'test_sample.csv'))
set.seed(2001)
nEval = 20 # last nEval values - evaluation set
N=120
n = N - nEval # n values in train set
xTrain = data # reshuffle train set, keep test set unchanged
nFold = 10
testSize = floor(n/nFold)
resCV<-NULL
for(k in 1:6)
{ # k is the degree of fitted polynomial
if(k>1)
{
# only to create polynomial of degree k
xTrain = cbind(xTrain,xTrain$X^k)
names(xTrain)[ncol(xTrain)] = paste0('x',k)
}
for(i in 1:nFold)
{
# select train and test sets
testInd = (1+(i-1)*testSize):(i*testSize) #make test fold
#This will go from 1 to 11 and then 12 to 22 and so on
train = xTrain[(1:n)[-testInd],] #exclude 10 values of test
test = xTrain[testInd,] #exclude 10 values for holdout
model <- lm(Y~.,data=train)
resCV[i] = sum((predict(model,test)-test$Y)^2)
}
cat(k,mean(resCV),'\n') #like print, but more efficient
#At every iteration it will print the value of the MSE obtd via CV
}
# xTest<-data[which(is.na(data$Y)==T),]
# xTest$'x2'<-xTest$X^2
# xTest$'x3'<-xTest$X^3
# xTest$'x4'<-xTest$X^4
# xTest$'x5'<-xTest$x^5
for_model<-xTrain[is.na(data$Y)==F,]
for_model<-lm(Y~(X+x2+x3),for_model)
predY<-predict.lm(for_model,newdata=xTrain[which(is.na(xTrain$Y)==T),])
fittedDegree<-3
res = matrix(c(fittedDegree,predY),ncol = 1,
dimnames = list(c("Degree",paste0(data$X[is.na(data$Y)])),c())
)
write.table(res,"W4answer.csv",quote=F,col.names = F,sep = ",")
|
1b06d0b43f5e06415f734ca20d126c7b367e80fe
|
14d9d0ecb5ed703ec4644c7eb4cb8cf5ff5174d7
|
/Plot3.R
|
1e7dbcec01ba4c46f902d5be68d85d7e4db6cdb9
|
[] |
no_license
|
ogamonal/ExData_Plotting1
|
4daa612a0b8e3e918e2a17aa4567f27992c81bec
|
2e34c733756621d82f90cee49be8b630dbc82ff3
|
refs/heads/master
| 2022-12-01T03:46:31.307117
| 2020-08-19T23:10:23
| 2020-08-19T23:10:23
| 288,854,295
| 0
| 0
| null | 2020-08-19T23:10:24
| 2020-08-19T22:54:11
| null |
UTF-8
|
R
| false
| false
| 799
|
r
|
Plot3.R
|
FileName <- "household_power_consumption.txt"
Data <- read.table(FileName, sep = ";", header = TRUE, na.strings = "?",
nrows = -1)
Data <- Data[(Data$Date == "1/2/2007" | Data$Date == "2/2/2007"), ]
datetime <- paste(Data$Date, Data$Time)
datetime
Data$DateTime <- strptime(datetime, "%d/%m/%Y %H:%M:%S")
png("plot3.png",width = 480, height = 480)
with(Data, plot(DateTime, Sub_metering_1, type = "l", xlab = "", ylab = "Energy sub metering"))
with(Data, lines(DateTime, Sub_metering_2,col='Red'))
with(Data, lines(DateTime, Sub_metering_3,col='Blue'))
legend("topright", pch = -1, col=c("black", "red", "blue"), lty=1, lwd=2,
legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
dev.off() ## Don't forget to close the PNG device!
|
1f540f2f19fffd3dfb127304d58da283902395e3
|
72f306ba9228c4723f3326c1ad3b3bcd20318c25
|
/R/connectivity.r
|
b592154541be01907cc2aab1623cb44bc06e2604
|
[] |
no_license
|
ashwini06/NEArender
|
a312a3cc13e0033fef983f2af053a465e09bd6e0
|
62df5386c203103245ca514c1d7b559238129dd7
|
refs/heads/master
| 2021-05-02T18:42:32.642427
| 2018-03-28T11:58:35
| 2018-03-28T11:58:35
| 64,827,884
| 2
| 1
| null | 2018-04-06T09:03:06
| 2016-08-03T08:09:19
|
R
|
UTF-8
|
R
| false
| false
| 3,389
|
r
|
connectivity.r
|
#' Connectivity
#'
#' Function for plotting node degree distribution in order to test if the network is scale-free
#' @param NET Input network file.
#' @param Lowercase If node IDs should be rendered lower-case (Default:1, i.e. 'yes').
#' @param col.1 Number of column where 1st node of each edge should be found (only needed when NET is a text file rather than a list).
#' @param col.2 Number of column where 2nd node of each edge should be found (only needed when NET is a text file rather than a list, i.e. passed down to import.net).
#' @param col.score Number of column where edge confidence score is found (only needed when NET is a text file rather than a list, i.e. passed down to import.net).
#' @param min.score Minimum confidence score for an edge to be included in the network (is alternative to n.top and is only used when NET is a text file rather than a list, i.e. passed down to import.net).
#' @param n.top Number of edges to be included in the network, top when ranked by confidence score (is alternative to min.score and only used when NET is a text file rather than a list, i.e. passed down to import.net).
#' @param echo If messages about execution progress should appear.
#' @param main title name for the plot, default: "Connectivity plot"
#' @param hex If the node degree distribution should be presented as density plot using package hexbin.
#' @seealso \code{\link{benchmark}} and \code{\link{import.net}}
#' @examples
#' file <- system.file("extdata", "Connectivity.FC1_full", package = "NEArender")
#' \donttest{
#' connect <- connectivity(file,hex=TRUE)
#' }
#' @import hexbin
#' @importFrom graphics axis plot.new
#' @export
connectivity <- function (NET, Lowercase = 1, col.1 = 1, col.2 = 2, col.score=3, echo=1, main="Connectivity plot", min.score=NA, n.top = NA, hex=FALSE) {
if (is.list(NET)) {
net.list <- NET;
} else {
print("Importing network from text file:");
net.list <- import.net(NET, Lowercase = Lowercase, col.1 = col.1, col.2 = col.2, col.score = col.score, min.score=min.score, n.top = n.top, echo = echo);
}
if (net.list$Ntotal < 300) {
print(paste("The network has ", net.list$Ntotal, " nodes which is too little to analyze...", sep=""));
stop(call. = T);
}
c0 <- unlist(sapply(net.list$links, length))
h1 <- hist(c0, breaks=seq(from=min(c0-1, na.rm=T), to=max(c0, na.rm=T)+1, by=1), plot=F)
Br = c(0, 2**seq(from=log2(min(c0, na.rm=T)), to=log2(max(c0, na.rm=T))+1, by=1));
t0 <- table(c0)
X = log2(as.numeric(names(t0)));
Y = log2(t0);
pick = which(!is.infinite(X) & !is.infinite(Y) & !is.na(X) & !is.na(Y));
X = X[pick];
Y = Y[pick];
if (hex) {
plot.new()
plot(hexbin(X, Y, xbins = 2*(log10(net.list$Ntotal) + 1) - 1), xlab="log2(edges per node)", ylab="log2(no. of nodes)", main=main)
} else {
plot(X, Y, log="", type="b", cex=0.5, pch=19, xaxt = "n", yaxt = "n", xlab="Edges per node", ylab="No. of nodes", main=main);
intX = seq(from=round(X[1]), to=round(X[length(X)]), by=1);
intY = 2**seq(from=round(Y[1]), to=round(Y[length(Y)]), by=-1);
axis(1, at=intX, labels=2**intX, col.axis="black", las=2);
axis(2, at=intY, labels=2**intY, col.axis="black", las=2);
}
Lm <- lm(Y ~ X, na.action=na.exclude);
abline(coef = coef(Lm), col="red", lty=2,untf=T);
legend("topright", legend=paste(c("Edge cut-off =", "Ntop =", "Nedges =", "Nnodes ="), c(min.score, n.top, net.list$Ntotal, length(net.list$links)), collapse="\n"), bty = "n")
}
|
772e1b506d0de3bb8dd6ff03317b4fad3b0aebca
|
49502f7574419b1504b5cf697ba3ef6ccf592ded
|
/man/tlnise.Rd
|
3c28fd2d9d79be68fc6690a53f21e6fdc905827e
|
[] |
no_license
|
ktp-forked-repos/tlnise
|
49ca083bc562815227f532fbfbae9fede9bc0dcd
|
02437145a77f7c9303e93611a76aab1d377cd21a
|
refs/heads/master
| 2020-05-01T07:45:24.172881
| 2018-10-23T17:09:24
| 2018-10-23T17:09:24
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,594
|
rd
|
tlnise.Rd
|
\name{tlnise}
\alias{tlnise}
\alias{initTLNise}
\concept{Hierarchical Normal model}
\title{TLNise}
\description{
Two level Normal independent sampling estimation
}
\usage{
tlnise(Y, V, w = NA, V0 = NA, prior = NA, N = 1000, seed = NULL,
Tol = 1e-06, maxiter = 1000, intercept = TRUE, labelY = NA,
labelYj = NA, labelw = NA, digits = 4, brief = 1, prnt = TRUE)
}
\arguments{
\item{Y}{Jxp (or pxJ) matrix of p-dimensional Normal outcomes}
\item{V}{pxpxJ array of pxp Level-1 covariances (assumed known)}
\item{w}{Jxq (or qxJ) covariate matrix (adds column of 1's if not included and \code{intercept = TRUE})}
\item{V0}{"typical" Vj (default is average of Vj's)}
\item{prior}{prior parameter (see Details)}
\item{N}{number of Constrained Wishart draws for inference}
\item{seed}{seed for the random number generator}
\item{Tol}{tolerance for determining modal convergence}
\item{maxiter}{maximum number of EM iterations for finding mode}
\item{intercept}{if \code{TRUE}, an intercept term is included in the regression}
\item{labelY}{optional names vector for the J observations}
\item{labelYj}{optional names vector for the p elements of Yj}
\item{labelw}{optional names vector for covariates}
\item{digits}{number of significant digits for reporting results}
\item{brief}{level of output, from 0 (minimum) to 2 (maximum)}
\item{prnt}{controls printing during execution}
}
\details{
The prior is \eqn{p(B0) = |B0|^{(prior-p-1)/2}}{p(B_0) =
|B_0|^{(prior - p - 1)/2}}.
Note that for the prior distribution, \code{prior = -(p+1)}
corresponds to a uniform on level-2 covariance matrix A (default),
\code{prior = 0} is the Jeffreys' prior, and \code{prior = (p+1)}
is the uniform prior on shrinkage matrix B0.
}
\value{
\code{tlnise} returns a list, the precise contents of which depends on
the value of the \code{brief} argument. Setting \code{brief = 2}
returns the maximum amount of information. Setting \code{brief = 1}
or \code{brief = 0} returns a subset of that information.
If \code{brief = 2}, the a list with the following components is returned:
\item{gamma}{matrix of posterior mean and SD estimates of Gamma, and
thei ratios}
\item{theta}{pxJ matrix of posterior mean estimates for thetaj's}
\item{SDtheta}{pxJ matrix of posterior SD estimates for thetaj's}
\item{A}{pxp estimated posterior mean of variance matrix A}
\item{rtA}{p-vector of between group SD estimates}
\item{Dgamma}{rxr estimated posterior covariance matrix for Gamma}
\item{Vtheta}{pxpxJ array of estimated covariances for thetaj's}
\item{B0}{pxpxN array of simulated B0 values}
\item{lr}{N-vector of log density ratios for each B0 value}
\item{lf}{N-vector of log f(B0|Y) evaluated at each B0}
\item{lf0}{N-vector of log f0(B0|Y) evaluated at each B0 (f0 is the CWish envelope density for f)}
\item{df}{degrees of freedom for f0}
\item{Sigma}{scale matrix for f0}
\item{nvec}{number of matrices begun, diagonal and off-diagonal elements simulated to get N CWish matrices}
\item{nrej}{number of rejections that occurred at each step 1,..,p}
}
\references{
Everson PJ, Morris CN (2000). \dQuote{Inference for Multivariate Normal
Hierarchical Models,} Journal of the Royal Statistical Society, Series
B, 62 (6) 399--412.
}
\author{S-PLUS original by Philip Everson; R port by Roger D. Peng}
\examples{
x <- rnorm(10) ## Second level
y <- rnorm(10, x) ## First level means
out <- tlnise(Y = y, V = rep(1, 10), w = rep(1, 10), seed = 1234)
}
\keyword{models}% at least one, from doc/KEYWORDS
|
f57832ccf624998731d664b2ba293d7ef79bf210
|
a7cc4fad4231cee4636d625df0bd0c6d10876b45
|
/R/simpleNN.R
|
f1d0ed55d4272c79f06bf9facb156f9573947144
|
[] |
no_license
|
gumeo/mnistr
|
7aa977e81c1935f57520f76942eb63d782c04038
|
d05be259520c30205b6bb2e03842c871e70e4d71
|
refs/heads/master
| 2020-12-24T20:15:23.603520
| 2018-01-31T10:11:25
| 2018-01-31T10:11:25
| 59,769,535
| 3
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 11,406
|
r
|
simpleNN.R
|
#' Convert factor to a dummy/indicator matrix
#'
#' \code{class.ind} take a factor vector of length $n$ with $k$ levels and
#' outputs an $n$ x $k$ indicator matrix.
#'
#' @param cl factor vector
#'
#' @return Appropriate indicator matrix.
#'
#' @examples
#' # Random factor data
#' dat <- factor(sample(1:3,100, replace=TRUE))
#' # Get the ggplot object
#' indicatorMatrix <- class.ind(dat)
#'
#' @export
class.ind <- function(cl) {
Ik=diag(length(levels(cl)))
x=Ik[as.numeric(cl),]
dimnames(x) <- list(names(cl), levels(cl))
x
}
#' Sigmoid activation functon
#'
#' \code{sigmoid} can take a scalar, vector or matrix and output
#' elementwise application of the sigmoid nonlinearity/
#' squashing function. There is an additional boolean
#' flag for calculating the derivative. The deriv argument
#' is needed for activation functions.
#'
#' @param X numeric scalar, vector or matrix
#' @param deriv boolean indicating whether we should evaluate the function
#' or the derivative at the input.
#'
#' @return Same format as the input \code{X}.
#'
#' @examples
#' sigmoid(-5)
#' sigmoid(0)
#' sigmoid(5)
#' sigmoid(0,deriv=TRUE)
#'
#' @export
sigmoid <- function(X,deriv=FALSE){
if(deriv==FALSE){
return(1/(1+exp(-X)))
}else{
return(sigmoid(X)*(1 - sigmoid(X)))
}
}
#' Rectified Linear Unit activation functon
#'
#' \code{sigmoid} can take a scalar, vector or matrix and output
#' elementwise application of the reLU nonlinearity/
#' squashing function. There is an additional boolean
#' flag for calculating the derivative. The deriv argument
#' is needed for activation functions.
#'
#' @param X numeric scalar, vector or matrix
#' @param deriv boolean indicating whether we should evaluate the function
#' or the derivative at the input.
#'
#' @return Same format as the input \code{X}.
#'
#' @examples
#' reLU(-5)
#' reLU(0)
#' reLU(5)
#' reLU(1,deriv=TRUE)
#'
#' @export
reLU <- function(X,deriv=FALSE){
if(deriv==FALSE){
X[X<0] <- 0
return(X)
}else{
X[X<0] <- 0
X[X>0] <- 1
return(X)
}
}
#' Softmax activation functon
#'
#' \code{sigmoid} takes a vector and applies a softmax to it for
#' transforming it into probabilities. This is
#' normally used for an output layer in a neural
#' network for doing classification.
#'
#' @param X numeric vector or matrix (for minibatches)
#'
#' @return Same format as the input \code{X}.
#'
#' @examples
#' softmax(matrix(1:10,5,2))
#' softmax(matrix(1:10,2,5))
#'
#' @export
softmax <- function(X){
Z <- rowSums(exp(X))
X <- exp(t(X))%*%diag(1/Z)
return(t(X))
}
#' Matrix closure for neural network layers
#'
#' \code{matrixInLayer} getter and setter functions for a matrix
#'
#' @param init boolean for whether we need to initialize values.
#' @param rS number of rows.
#' @param cS number of columns.
#' @param initPos boolean to indicate whether values need to be initialized as positive.
#' @param initScale scalar to truncate initialized values closer to zero.
#'
#' @return environment with the functions \code{setter} and \code{getter}
#'
#' @examples
#' testFun <- matrixInLayer(TRUE,10,10,TRUE,10)
#'
#' @export
#' @keywords internal
matrixInLayer <- function(init = FALSE, rS, cS, initPos = FALSE, initScale = 100){
intMat <- matrix(0, nrow=rS, ncol=cS)
if(init == TRUE){
intMat <- matrix(stats::rnorm(rS*cS)/initScale,nrow = rS,ncol = cS)
if(initPos == TRUE){
intMat <- abs(intMat)
}
}
getter <- function(){
return(intMat)
}
setter <- function(nMat){
intMat <<- nMat
}
return(list2env(list(setter = setter, getter=getter)))
}
#' Fully connected layer for neural network
#'
#' \code{Layer} encapsulates all the data needed for a fully connected layer.
#'
#' @param activation function for neural network. Must be able to do elementwise
#' calculations on a matrix and have the parameter \code{deriv},
#' which is a boolean indicating whether the derivative should be
#' calculated
#' @param minibatchSize Number of samples used for estimating the gradient
#' @param sizeP vector of two values, number of inputs to this layer and number
#' of outputs from this layer, ignoring bias values.
#' @param is_input boolean indicating whether this is an input
#' @param is_output boolean indicating whether this is output
#' @param initPos boolean indicating whether weights should be initialized as positive
#' @param initScale scalar for initialising wieghts, e.g. if it is 100, then
#' the randomly sampled initial weights are scaled by 1/100.
#'
#' @return environment with the functions to set all the internal matricies and
#' a function to forward propagate through the layer.
#'
#' @examples
#' testLayer <- Layer(mnistr::reLU, 3, c(10,10),FALSE,FALSE,TRUE,1000)
#' testLayer$W$getter() # Check random weights
#' @export
#' @keywords internal
Layer <- function(activation, minibatchSize,sizeP,is_input=FALSE,is_output=FALSE, initPos, initScale){
# Matrix holding the output values
Z <- matrixInLayer(FALSE,minibatchSize,sizeP[1])
# Outgoing Weights
W <- matrixInLayer(TRUE,sizeP[1],sizeP[2],initPos=initPos, initScale=initScale)
# Input to this layer
S <- matrixInLayer(FALSE,minibatchSize,sizeP[1])
# Deltas for this layer
D <- matrixInLayer(FALSE,minibatchSize,sizeP[1])
# Matrix holding derivatives of the activation function
Fp <- matrixInLayer(FALSE,sizeP[1],minibatchSize)
# Propagate minibatch through this layer
forward_propagate <- function(){
if(is_input == TRUE){
return(Z$getter()%*%W$getter())
}
Z$setter(activation(S$getter()))
if(is_output == TRUE){
return(Z$getter())
}else{
# Add bias for the hidden layer
Z$setter(cbind(Z$getter(),rep(1,nrow(Z$getter()))))
Fp$setter(t(activation(S$getter(),deriv = TRUE)))
#print(Fp$getter())
return(Z$getter()%*%W$getter())
}
}
# Return a list of these functions
myEnv <- list2env(list(forward_propagate=forward_propagate, S = S,
D = D, Fp = Fp, W = W, Z = Z))
class(myEnv) <- 'Layer'
return(myEnv)
}
#' Multi Layer Percepteron
#'
#' \code{mlp} is a function that generates an MLP, for which you can train on data.
#'
#' @param structNet Vector inidcating the sizes of the nodes in the network. E.g.
#' \code{c(100,70,40,10)} would be a network with 100 input nodes,
#' 2 hidden layers with 70 and 40 neurons respectively, and an
#' output layer with 10 neurons.
#' @param minibatchSize Number of samples used for estimating the gradient
#' @param activation function for neural network. Must be able to do elementwise
#' calculations on a matrix and have the parameter \code{deriv},
#' which is a boolean indicating whether the derivative should be
#' calculated
#' @param initPos boolean indicating whether weights should be initialized as positive
#' @param initScale scalar for initialising wieghts, e.g. if it is 100, then
#' the randomly sampled initial weights are scaled by 1/100.
#'
#' @return environment with the functions to train the network, forwards propagate and
#' a list with all the layers. The forward propage function can be used to do
#' predictions.
#'
#' @examples
#' testMLP <- mlp(c(10,10,10),5,mnistr::reLU,TRUE,1000)
#' testMLP$network[[1]]$W$getter() # Check random weights
#' @export
mlp <- function(structNet, minibatchSize,activation, initPos =FALSE, initScale=100){
num_layers <- length(structNet)
#Create the network
layers <- list()
for(i in 1:length(structNet)){
if(i == 1){#inp layer
layers[[i]] <- mnistr::Layer(activation, minibatchSize, c(structNet[1]+1,structNet[2]),is_input=TRUE,initPos = initPos,initScale=initScale)
}else if(i == length(structNet)){
layers[[i]] <- mnistr::Layer(softmax, minibatchSize, c(structNet[num_layers],structNet[num_layers]),is_output=TRUE,initPos = initPos,initScale=initScale)
}else{
layers[[i]] <- mnistr::Layer(activation, minibatchSize, c(structNet[i]+1,structNet[i+1]),initPos = initPos,initScale=initScale)
}
}
print('Layers have been initialized!')
forward_prop <- function(dataBatch){
# Add bias to the input
layers[[1]]$Z$setter(cbind(dataBatch,rep(1,nrow(dataBatch))))
for(i in 1:(num_layers-1)){
layers[[i+1]]$S$setter(layers[[i]]$forward_propagate())
}
return(layers[[num_layers]]$forward_propagate())
}
backwards_prop <- function(yhat,labels){
layers[[num_layers]]$D$setter(t(yhat-labels))
# Special case when we have no hidden layers
val <- num_layers-1
if(val < 2){
return()
}
for(i in (num_layers-1):2){
W_nobias <- layers[[i]]$W$getter()
W_nobias <- W_nobias[1:(nrow(W_nobias)-1),]
mat <- layers[[i]]$Fp$getter()
layers[[i]]$D$setter((W_nobias%*%layers[[i+1]]$D$getter())*mat)
}
}
update_weights <- function(eta){
for(i in 1:(num_layers-1)){
W_grad <- -eta*t(layers[[i+1]]$D$getter()%*%layers[[i]]$Z$getter())
layers[[i]]$W$setter(layers[[i]]$W$getter()+W_grad)
}
}
# Labels here as dummy matrix
train <- function(trainData, trainLabels, num_epochs, eta, cvSplit = 0.3){
cvInds <- sample(1:nrow(trainData),round(cvSplit*nrow(trainData)))
cvData <- trainData[cvInds,]
cvLabels <- trainLabels[cvInds,]
trainData <- trainData[-cvInds,]
trainLabels <- trainLabels[-cvInds,]
extraCV <- length(cvInds)%%minibatchSize
if(extraCV > 0){
extraCV <- minibatchSize - extraCV
}
extraCV <- sample(1:nrow(cvData),extraCV)
cvData <- rbind(cvData,cvData[extraCV,])
cvLabels <- rbind(cvLabels,cvLabels[extraCV,])
k <- 0
for(i in 1:num_epochs){
print(paste('epoch number: ',i))
inds <- sample(1:nrow(trainData),nrow(trainData)) # Permutate data
extra <- length(inds)%%minibatchSize # Resample to make final batch correct size
if(extra > 0){
extra <- minibatchSize - extra
}
inds <- c(inds,sample(1:nrow(trainData),extra))
numIter <- length(inds)/minibatchSize
for(j in 1:numIter){
batchInds <- ((j-1)*minibatchSize):(j*minibatchSize)
tDat <- trainData[inds[batchInds],]
tLab <- trainLabels[inds[batchInds],]
preds <- forward_prop(tDat)
backwards_prop(preds,tLab)
update_weights(eta = eta)
}
# Calculate accuracy on CV set
diff <- c()
numCV <- nrow(cvData)/minibatchSize
for(j in 1:numCV){
batchInds <- ((j-1)*minibatchSize):(j*minibatchSize)
# Forward prop
cvDat <- cvData[batchInds,]
cvLab <- cvLabels[batchInds,]
preds <- forward_prop(cvDat)
diff <- c(diff,max.col(preds)-max.col(cvLab))
}
acc <- sum(diff==0)/length(diff)
print(paste('Accuracy for epoch',i,'is:',acc))
}
}
# Implement predict function
myEnv <- list2env(list(network=layers,
forward_prop=forward_prop,
train = train))
class(myEnv) <- 'mlp'
return(myEnv)
}
|
98a44e245c7d186217afc34a13b6c4570647c48a
|
b0828ab1cd8d8271c66d6935416b32580fb93bb4
|
/BasicQTL.R
|
2fb029cf8ad5e042706c89a457152079c62150db
|
[] |
no_license
|
KonradZych/GBIC2011
|
ba0f145d4f51549d344a756abd578675f291cf0d
|
c84ea96728f260d030d661e66a0442e934cb58f3
|
refs/heads/master
| 2016-09-06T06:23:01.350627
| 2011-07-13T19:17:58
| 2011-07-13T19:17:58
| 1,341,280
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 18,871
|
r
|
BasicQTL.R
|
# Basic QTL Mapping functions
#
#(c) 2011-2015 Konrad Zych
# Version: 0.0.1
#Sweave
#
# Contains:
#clean - Removing NA from data matrix by replacing it with 0
#qtlbyttest - Basic single marker mapping by using a t.test statistic (For RIL)
#heatmapqtl - Creates data file concerning all traits and also image of it
#pathway - creates probable pathway using values for specified marker
#controller - AIO function
#clean - Removing NA from data matrix by replacing it with 0
#matrix_to_be_cleaned: matrix containing data mixed with NA
#returns: matrix_to_be_cleaned - the same matrix with NAs replaced with 0
clean<-function(matrix_to_be_cleaned){
for(h in 1:nrow(matrix_to_be_cleaned)){
for(w in 1:ncol(matrix_to_be_cleaned)){
if(is.na(matrix_to_be_cleaned[h,w])){
matrix_to_be_cleaned[h,w]<-0
}
}
}
matrix_to_be_cleaned
}
#makebinary - making a binary matrix out of matrix containing data e.g. from gene expression data -> gene expressed/not expressed
#matrix_to_be_made_binary: matrix containing data
#returns: output - matrix containing 0s (value below the treshold) and 1 (value above the treshold) currently, the treshold is median
makebinary<-function(matrix_to_be_made_binary){
output<-matrix(0,nrow(matrix_to_be_made_binary),ncol(matrix_to_be_made_binary))
#resulting matrix should have the same labels as input
rownames(output)<-rownames(matrix_to_be_made_binary, do.NULL = FALSE)
colnames(output)<-colnames(matrix_to_be_made_binary, do.NULL = FALSE)
#using median as a treshold value
for(i in 1:ncol(matrix_to_be_made_binary)){
tres=median(matrix_to_be_made_binary[,i])
output[,i]<-(matrix_to_be_made_binary[,i]>tres)
}
output
}
#qtlbyttest - Basic single marker mapping by using a t.test statistic (For RIL)
#phenotypes: Matrix of row: individuals, columns: traits
#genotypes: Matrix of row: individuals, columns: markers
#trait: integer value of the column to analyse
#return: output - Vector of significance (-LOD10) of linkage to that marker
qtlbyttest <- function(phenotypes,genotypes,trait){
output<-NULL
phenotypes<-clean(phenotypes)
genotypes<-clean(genotypes)
for(m in 1:ncol(genotypes)){
pheno_class1 <- phenotypes[which(genotypes[,m]==1),trait]
pheno_class2 <- phenotypes[which(genotypes[,m]==2),trait]
if(mean(pheno_class1)>=mean(pheno_class2)){
output <- c(output,-log10(t.test(pheno_class1,pheno_class2)[[3]]))
}else{
output <- c(output,log10(t.test(pheno_class1,pheno_class2)[[3]]))
}
}
output
}
#heatmapqtl - Creates data file concerning all traits and also image of it
#phenotypes: Matrix of row: individuals, columns: traits
#genotypes: Matrix of row: individuals, columns: markers
#return: output - translated matrix of results of qtlbyttest
heatmapqtl <- function(phenotypes,genotypes){
output <- NULL
for(y in 1:ncol(phenotypes)){
output <-rbind(output,qtlbyttest(phenotypes,genotypes,y))
}
output <- t(output)
rownames(output)<-colnames(genotypes, do.NULL = FALSE)
colnames(output)<-colnames(phenotypes, do.NULL = FALSE)
output
}
#pathway - creates probable pathway using values for specified marker
#results: output array from heatmapqtl
#phenotypes: Matrix of row: individuals, columns: traits (mind labels, they are crucial)
#marker: specified marker (use 100 for current data)
#return: output - proposed pathway (linear!) based on height of peak from marker 100
pathway<-function(results,marker){
output<-NULL
vect<-results[marker,]
for(i in 1:ncol(results)){
if(max(vect)==0){break}
else{
output<-which(vect==max(vect))
output<-c(output,colnames(result_binary)[output])
vect[output]<-0
}
}
output
}
#makecorvector - double mode:
#line 100!
#0 - using one matrix, producing matrix of correlations between neighbor values in one direction
#1 - using two matrices, one as above, second to add exact values
#input - one or two matrices of data, two_matrices (number 0 or 1)
#function returns matrix suitable for make_topo_pallete
makecorvector <- function(first_matrix,two_matrices=0,second_matrix=NULL){
first_matrix_cor <- cor(first_matrix,use="pairwise.complete.obs")
first_matrix_corv <- NULL
output <- NULL
for(i in 2:(ncol(first_matrix_cor)-1)){
first_matrix_corv <- c(first_matrix_corv,mean(first_matrix_cor[i-1,i],first_matrix_cor[i,i+1]))
}
first_matrix_corv <- c(first_matrix_corv,(first_matrix_cor[(ncol(first_matrix_cor)-1),(ncol(first_matrix_cor))])/2)
for(i in 1:23){
output <- rbind(output,first_matrix_corv)
}
output <- t(output)
if(two_matrices==1){
for(i in 1:nrow(output)){
for(j in 1:ncol(output)){
output[i,j]<-output[i,j]+second_matrix[i,j]
}
}
}
output
}
#make_topo_pallete - making nice collor pallete using RGB function, values >0 are red and less transparent the higher their are, <0 blue and less transparent the lower their are
#input - matrix produced by makecorvector
#returns - matrix of colors in RGB,alpha format, max value for color and alpha =255
make_topo_pallete <- function(result_matrix){
cur_mean <- mean(abs(result_matrix))
cur_sd <- mean(sd(abs(result_matrix)))
cur_range <- abs(max(abs(result_matrix))-min(abs(result_matrix)))
topo_pallete<-matrix(0,nrow(result_matrix),ncol(result_matrix))
for(i in 1:nrow(result_matrix)){
for(j in 1:ncol(result_matrix)){
if(result_matrix[i,j]>=0){
if(result_matrix[i,j]<(cur_mean-cur_sd)){
topo_pallete[i,j]<-rgb(0,0,abs(result_matrix[i,j]/cur_range)*255,255-abs(result_matrix[i,j]/cur_range)*255,maxColorValue=255)
}else if((result_matrix[i,j]>(cur_mean+cur_sd))){
topo_pallete[i,j]<-rgb(abs(result_matrix[i,j]/cur_range)*255,0,0,abs(result_matrix[i,j]/cur_range)*255,maxColorValue=255)
}else{
topo_pallete[i,j]<-rgb(0,abs(result_matrix[i,j]/cur_range)*255,0,55-abs(result_matrix[i,j]/cur_range)*55,maxColorValue=255)
}
}else{
cur<-abs(result_matrix[i,j])
if(cur>(cur_mean+cur_sd)){
topo_pallete[i,j]<-rgb(0,0,abs(cur/cur_range)*255,abs(cur/cur_range)*255,maxColorValue=255)
}else if((cur<(cur_mean-cur_sd))){
topo_pallete[i,j]<-rgb(abs(cur/cur_range)*255,0,0,255-abs(cur/cur_range)*255,maxColorValue=255)
}else{
topo_pallete[i,j]<-rgb(0,abs(cur/cur_range)*255,0,55-abs(cur/cur_range)*55,maxColorValue=255)
}
}
}
}
topo_pallete
}
#un_intonumeric - function formating data to be usable, specificly for one file, but can be easily adapted to other, mind the comment below in the text
#input - matrix of data
#returns matrix with specified characters
un_intonumeric <- function(un_matrix){
inside <- un_matrix
#switching -, a, b to NA, 1, 2, respectively
un_matrix[which(un_matrix=="-")] <- NA
un_matrix[which(un_matrix=="a")] <- 1
un_matrix[which(un_matrix=="b")] <- 2
un_matrix <- as.numeric(un_matrix)
un_matrix <- matrix(un_matrix,nrow(inside),ncol(inside))
rownames(un_matrix)<-rownames(inside, do.NULL = FALSE)
colnames(un_matrix)<-colnames(inside, do.NULL = FALSE)
un_matrix
}
#un_rec
un_rec <- function(result_matrix,chrom_vector1,chrom_vector2){
cur<-result_matrix
for(i in 1:length(chrom_vector1)){
if(is.na(chrom_vector1[i])||is.na(chrom_vector2[i])){
cur <- cur
}else if(chrom_vector1[i]==chrom_vector2[i]){
cur <- cur
}else{
cur <- cur + 1
}
}
#result_matrix[colnames(chrom_vector1),colnames(chrom_vector2)] <- cur
#result_matrix
print(cur)
cur
}
#un_recombination - TODO!!!
#input - matrix of data
#return - matrix of recombination values between COLUMNS
un_recombination<-function(chrom_matrix){
s <- proc.time()
output <- matrix(0,ncol(chrom_matrix),ncol(chrom_matrix))
rownames(output)<-colnames(chrom_matrix, do.NULL = FALSE)
colnames(output)<-colnames(chrom_matrix, do.NULL = FALSE)
l<-vector("list",ncol(chrom_matrix)*ncol(chrom_matrix))
for(k in 1:ncol(chrom_matrix)){
output <- output
}
#beacuase we want to be able to use the same functions as for corelaction, recombination values must be scaled
e <- proc.time()
cat("Done in:",e[3]-s[3],"s. Dziekuje, koniec imprezy.\n")
output
}
#un_row_score - counting score for specified vector, which means, how many values w are above specified treshold, divided by it's length
#input - vector of values, treshold (number 0-1)
#return - row score (number)
un_row_score <- function(cor_matrix_row,treshold=0.7){
row_score <- 0
for(j in 1:length(cor_matrix_row)){
if(cor_matrix_row[j]>treshold){
row_score <- row_score + 1/length(cor_matrix_row)
}
}
row_score
}
#un_drop_markers - removing columns, that are higly correlated with more that specified percentage of others
#input - matrix of data, treshold (number 0-1)
#return - matrix of data of the same type
un_drop_markers <- function(chrom_matrix,treshold=0.25){
cor_matrix <- un_recombination(chrom_matrix)
result <- apply(cor_matrix,1,un_row_score)
i<-1
while(max(result)>treshold){
chrom_matrix <- chrom_matrix[,-(which(result==max(result)))]
cor_matrix <- un_recombination(chrom_matrix)
result <- apply(cor_matrix,1,un_row_score)
print(i)
print(max(result))
i<-i+1
}
chrom_matrix
}
#un_remove_background
un_remove_background <- function(cur_matrix,s1,s2){
t1 <- mean(cur_matrix)-2*mean(sd(cur_matrix))
t2 <- mean(cur_matrix)+mean(sd(cur_matrix))
for(i in 1:nrow(cur_matrix)){
for(j in 1:ncol(cur_matrix)){
if(cur_matrix[i,j]<t1){
cur_matrix[i,j]<-cur_matrix[i,j]*s1
}else if(cur_matrix[i,j]>t2){
cur_matrix[i,j]<-cur_matrix[i,j]*s2
}
}
}
cur_matrix
}
#un_best_clustering - needs improvment really bad! quadro-for!:D - making spceified number of clustering of data
#then producing a matrix of points, udes later for further classification
#input - matrix of data, number of iterations(int), number of groups(int)
#return - matrix of numbers (0-nr_iterations)
un_best_clustering <- function(chrom_matrix,nr_iterations,groups=10){
cor_matrix <- cor(chrom_matrix,use="pairwise.complete.obs")
cor_matrix <- un_remove_background(cor_matrix,-10,10)
print("un_best_clustering starting")
res <- NULL
map <- matrix(0,nrow(cor_matrix),ncol(cor_matrix))
print("iteration starting")
#clustering with k-means
for(i in 1:nr_iterations){
r <- kmeans(cor_matrix,groups)
res <- rbind(res,(as.numeric(r[[1]])))
}
print("iteration done, starting pointing system")
#matrix of points
for(i in 1:nr_iterations){
for(j in 1:groups){
for(k in which(res[i,]==j)){
for(l in which(res[i,]==j)){
map[k,l] <- map[k,l] + 1
}
}
}
}
map <- un_remove_background(map,0,10)
print("pointing done, returning output")
#matrix should inherit colnames from input
colnames(map)<-colnames(chrom_matrix, do.NULL = FALSE)
map
}
#un_order_chromosome - ordering markers inside one group (chromosome)
#input - matrix of data (specified fragment to be sorted inside)
#return - names of columns in sorted order
un_order_chromosome_by_cor <- function(chrom_matrix){
cat(ncol(chrom_matrix)," markers\n")
output<-chrom_matrix
#sorting is made in number of iterations equal to number of columns
for(i in 1:ncol(chrom_matrix)){
cat("Starting iteration ",i,"\n")
chrom_cor_matrix <- cor(output,use="pairwise.complete.obs")
first_free <- 1
last_free <- ncol(chrom_cor_matrix)
col_means <- apply(chrom_cor_matrix,2,mean)
result <- as.vector(matrix(0,1,last_free))
result[first_free] <- which(col_means==min(col_means))
result[last_free] <- which(col_means==sort(col_means)[2])
chrom_cor_matrix[result[first_free],]<--10
chrom_cor_matrix[result[last_free],]<--10
current <- NULL
for(i in 2:ncol(chrom_cor_matrix)-1){
first_free_column <- chrom_cor_matrix[,result[first_free]]
last_free_column <- chrom_cor_matrix[,result[last_free]]
if(max(first_free_column) > max(last_free_column)){
result[first_free+1] <- which(first_free_column==max(first_free_column))[1]
chrom_cor_matrix[result[first_free],] <- -10
first_free <- first_free+1
}else{
result[last_free-1] <- which(last_free_column==max(last_free_column))[1]
chrom_cor_matrix[result[last_free],] <- -10
last_free <-last_free-1
}
}
}
print("Iterations done,saving result")
output <- colnames(chrom_matrix[,result])
output
}
un_order_chromosome_by_reco <- function(chrom_matrix){
output<-chrom_matrix
#for(i in 1:ncol(chrom_matrix)){
reco_matrix <- un_recombination(chrom_matrix)
#reco_matrix <- (100-100*cor(output,use="pairwise.complete.obs"))
for(j in 1:ncol(reco_matrix)){
reco_matrix[j,j]<-200
}
col_means <- apply(reco_matrix,2,mean)
result <- as.vector(matrix(0,1,ncol(reco_matrix)))
if(ncol(reco_matrix)%%2==0){
center <- ncol(reco_matrix)/2
}else{
center <- (ncol(reco_matrix)+1)/2
}
first_free <- center
last_free <- center
result[center] <- which(col_means==min(col_means))
cat("Center:",center,"value:",which(col_means==min(col_means)),"\n")
reco_matrix[,result[center]] <- 200
for(k in sort(reco_matrix[result[center],])){
cur <- which(k==reco_matrix[result[center],])[1]
cur_first <- reco_matrix[result[first_free],cur]
cur_last <- reco_matrix[result[last_free],cur]
if(first_free==1){
last_free <- last_free+1
result[last_free] <- cur
reco_matrix[,cur] <- 200
}else if(last_free==ncol(reco_matrix)){
first_free <- first_free-1
result[first_free] <- cur
reco_matrix[,cur] <- 200
}else if(cur_first>cur_last){
first_free <- first_free-1
result[first_free] <- cur
reco_matrix[,cur] <- 200
}else{
last_free <- last_free+1
result[last_free] <- cur
reco_matrix[,cur] <- 200
}
}
output <- output[,result[-length(result)]]
#}
result <- result[-length(result)]
result
}
un_order_chromosome_by_seriation <- function(chrom_matrix){
#reco_matrix <- dist(chrom_matrix)
cur <- abs(cor(chrom_matrix,use="pairwise.complete.obs"))
cat("Starting un_order_chromosome_by_seriation\n")
for(i in 1:ncol(chrom_matrix)){
cat("Segregating marker",i,"\n")
o <- seriate(t(clean(cur)))
cur <- cur[get_order(o),get_order(o)]
}
get_order(o)
}
#un_neighbor - heart of analysis!
#input: matrix of data with wrongly ordered columns, to be clustered, sorted inside groups, nr_iterations (int) groups(int)
#return: matrix of the same data with rigth order of columns
un_neighbor <- function(chrom_matrix,method=1,nr_iterations=1000,groups=5){
if(method==1){
print("Using un_order_chromosome_by_cor.")
r <- un_best_clustering(chrom_matrix,nr_iterations,groups)
#un_cor <- cor(chrom_matrix,use="pairwise.complete.obs")
#r <- un_best_clustering(r,nr_iterations,groups)
r<-kmeans(r,groups)
res <- NULL
for(i in 1:groups){
cat("Segregating chromosome: ",i,"nr of markers:",length(which(r[[1]]==i)),"\n")
cur <- un_order_chromosome_by_cor(chrom_matrix[,which(r[[1]]==i)])
res <- cbind(res,chrom_matrix[,cur])
}
}else{
print("Using un_order_chromosome_by_reco.")
r <- un_best_clustering(chrom_matrix,nr_iterations,groups)
r <- kmeans(r,groups)
res <- NULL
for(i in 1:groups){
cat("Segregating chromosome: ",i,"nr of markers:",length(which(r[[1]]==i)),"\n")
cur <- un_order_chromosome_by_seriation(chrom_matrix[,which(r[[1]]==i)])
res <- cbind(res,chrom_matrix[,cur])
}
}
res
}
#un_neighbor2 - heart of analysis!
un_neighbor2<-function(input){
reco_matrix<-input
j<-ncol(reco_matrix)
i<-1
for(i in 1:4){
cur_col <- reco_matrix[,1]
cur<-which(cur_col<(mean(cur_col)))
reco_matrix <- reco_matrix[-cur,-cur]
j<-ncol(reco_matrix)
cat(i,":",j," : ",length(cur)," : ",cur,"\n")
i<-i+1
}
cat(i,":",j," : ",ncol(reco_matrix)," : ",colnames(reco_matrix),"\n")
}
qtlbyttest_test <- function(){
#Creating vector of correct data
cor<-c(15.6152669361112, 10.9611214399831, 10.6377737262226,
6.66414029040903, 1.49439826143111, 8.4439633754547,
10.1596325117105, 18.6492514223787, 7.70593733223225,
2.83027804463293, 19.0918976951953, 6.13302622877962,
5.42781730179255, 15.1181942357603, 7.93420253276565,
10.9872557298082, 8.40230035811374, 5.13484444988022,
0.0513458276192633, 0.282071067944515, 0.623321252421781,
1.69995386811916, 0.395375105682745, 0.66074033328745)
setwd("d:/data")
#Loading data
phenotypes <- clean(as.matrix(read.table("measurements_ordered.txt", sep="")))
genotypes <- clean(as.matrix(read.table("genotypes_ordered.txt", sep="")))
#Testing loop
for(x in 1:ncol(genotypes)){
res <- qtlbyttest(phenotypes,genotypes,x)
if((res[116]==cor[x])){#why the fuck isn't this working?!
cat("Value for the trait ",x,"is correct.")
}else{
cat("Error wrong value for trait ",x," is ",res[100]," should be ",cor[x],"\n")
}
cat("All values are correct.\n")
}
}
makebinary_test <- function(){
#Creating vector of correct data
correct<-c(0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0)
setwd("d:/data")
#Loading data
phenotypes <- clean(as.matrix(read.table("measurements_ordered.txt", sep="")))
#Testing here
res <- makebinary(phenotypes)
for(i in 1:ncol(phenotypes)){
if(res[100,i]!=correct[i]){
stop("Wrong value for trait ",i,". Disconnecting for your safety.")
}
}
cat("Test passed. Makebinary ready to serve.\n")
}
#examples to be used in package
#
#1 - basic qtl map, using data from gene expression (phenotypes.txt) and genotyping (genotypes.txt)
#
makebinary_test()
qtlAnalysis ("D:/data","genotypes.txt","phenotypes.txt","QTL_map.png")
setwd("D:/data","phenotypes.txt","genotypes.txt","QTL_map.png")
phenotypes <- as.matrix(read.table("phenotypes.txt", sep=""))
genotypes <- as.matrix(read.table("genotypes.txt", sep=""))
result_binary<-heatmapqtl(makebinary(clean(phenotypes)),genotypes)
covariance_matrix <- makecorvector(genotypes,1,result_binary)
color_pallete <- make_topo_pallete(covariance_matrix)
persp(result_binary,col=color_pallete)
#obviously, one can skip this fancy coloring, but, hell, it's nice looking
#
#2 - obtaining simple pathawy from observing marker 100 peak
#
setwd("D:/data")
phenotypes <- as.matrix(read.table("phenotypes.txt", sep=""))
genotypes <- as.matrix(read.table("genotypes.txt", sep=""))
path<-pathway(result_binary, 100)
#
#3 - recreating genemap from messed data
#
setwd("D:/data")
un_matrix <- un_intonumeric(as.matrix(read.table("genotypes_multitrait.txt", sep="", header=TRUE)))
un_result<-un_drop_markers(un_matrix)
un_reco <- un_recombination(un_matrix)
un_ord <- un_neighbor(un_reco,1000,5)
un_ord <- un_neighbor(un_matrix,2,1000,5)
un_ord_r <- un_neighbor((100-un_reco)/100,1,1000,5)
un_ord_cor <- cor(un_ord,use="pairwise.complete.obs")
un_rod_r_cor <- cor(un_ord_r,use="pairwise.complete.obs")
image(un_ord_cor)
image(un_rod_r_cor)
un_ord<-un_order_chromosome_by_reco(un_reco)
un_order_chromosome_by_cor
un_reco_ord <- un_recombination(un_ord)
un_rec_cor <- cor(un_ord,use="pairwise.complete.obs")
image(un_rec_cor)
ord <- un_neighbor(un_result,1000,20)
ord_recombination <- un_recombination(ord)
image(ord_recombination)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.