blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
9a0742e824c181ff2bfbc5c565ace599620ae95c
|
cdc6049c2731e6aa03c0cd93daba24b6b1e3de06
|
/opensesame/splitTrialsByCond.R
|
5c4a9adefb64fc4065ceb912df620ad9d1bf2ea4
|
[] |
no_license
|
disaltzman/TalkerTeam-Mapping
|
4ebee00fc36cd4b9ec644ffa32c66a808247cafd
|
1131d3ddf7d70fc705d75e3125390b647584802c
|
refs/heads/master
| 2023-07-15T11:08:47.397160
| 2021-08-27T13:47:04
| 2021-08-27T13:47:04
| 257,983,179
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 691
|
r
|
splitTrialsByCond.R
|
rm(list=ls()) #clear all variables
setwd("~/Documents/UConn/Research/Talker Team - Mapping/randomizations/")
files <- as.data.frame(list.files())
colnames(files) <- "files"
#
for(n in 1:length(files$files)){
randomization <- read.csv(as.character(files[n,]))
subject <- unlist(strsplit(as.character(files[n,]),split = ".", fixed = TRUE))[1]
mixed <- randomization[1:48,]
blockedmale <- randomization[49:72,]
blockedfemale <- randomization[73:96,]
write.csv(mixed, paste0(subject,"mixed.csv"), row.names = FALSE)
write.csv(blockedmale, paste0(subject,"blockedmale.csv"), row.names = FALSE)
write.csv(blockedfemale, paste0(subject,"blockedfemale.csv"), row.names = FALSE)
}
|
b14053cbb1f310f146cada36a1025a02925837a8
|
358bcdc100cedfa930b1b1f636b08c81dc906d8f
|
/Code/Fig 4.R
|
87f8562e8c832b38976c48aa97eb1a35f779e1b5
|
[] |
no_license
|
btmarti25/biophysical-basis-of-thermal-tolerance-in-aquatic-eggs-
|
2d5c4d5d236d0424db27115104f54219089a90b7
|
7e295600ea97d9bde76edb4eb4d065b3fd6be632
|
refs/heads/master
| 2022-08-31T11:22:53.028154
| 2020-05-25T09:15:07
| 2020-05-25T09:15:07
| 262,339,918
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,026
|
r
|
Fig 4.R
|
library(reshape2)
library(ggplot2)
library(lme4)
library(gridExtra)
library(MASS)
library(cowplot)
library(dplyr)
library(tidyverse)
library(cowplot)
GravelSurvivalData <- read.csv(file = "~/Google Drive/Projects/ChinookEggExperiment/Final/Data/GravelSurvivalData.csv", header = T)
## fit mixed model for survial as function of flow and temperature
model <- glmer(cbind(survivors, morts) ~ factor(temp) * factor(flow) + (1 | Tube), family = binomial(link = logit),
control = glmerControl(optimizer = "Nelder_Mead", optCtrl = list(maxfun = 1e+06)), data = GravelSurvivalData)
summary(model)
## get Predictions and CI from mixed model
predictionData <- expand.grid(temp = c(11, 15), flow = c(0.01, 0.04, 0.12), distance = 0)
predictionData$pred <- predict(model, predictionData, re.form = NA)
mm = unique(model.matrix(model))
Tpred <- mm %*% fixef(model)
mm * fixef(model) * t(mm)
CI <- sqrt(diag(mm %*% tcrossprod(vcov(model), mm))) * 1.96
predictionData$CI <- CI
predictionData$sPred <- exp(predictionData$pred)/(1 + exp(predictionData$pred))
predictionData$upr <- exp(predictionData$pred + predictionData$CI)/(1 + exp(predictionData$pred + predictionData$CI))
predictionData$lwr <- exp(predictionData$pred - predictionData$CI)/(1 + exp(predictionData$pred - predictionData$CI))
## Get mean of observed survival of top and bottom egg pockets
aggregateSurvival <- aggregate(GravelSurvivalData, by = list(GravelSurvivalData$Tube), FUN = mean, na.rm = TRUE)
##plot observed and predicted survival
pSurvival <- ggplot() +
geom_line(data = predictionData, aes(x = flow, y = sPred, group = factor(temp),
color = factor(temp)), size = 1) +
geom_ribbon(data = predictionData, aes(x = flow, ymin = lwr,
ymax = upr, fill = factor(temp)), alpha = 0.3, width = 0.004, size = 1) +
geom_jitter(data = aggregateSurvival, aes(x = flow, y = propS, fill = factor(temp)), width = 0.003, alpha = 0.6, shape = 21, size = 2) +
theme_cowplot()+
theme(legend.position = c(0.7, 0.2)) +
scale_color_viridis(option = "plasma", discrete = TRUE, end = 0.65, direction = 1)+
scale_fill_viridis(option = "plasma", discrete = TRUE, end = 0.65, direction = 1) +
xlab("Flow (cm/s)") + ylab("Proportion surviving") + ylim(0, 1) + xlim(0, 0.126) +
theme(legend.title = element_blank())+ theme(legend.position = c(0.7, 0.2))
### sublethal plot
dat <- read.csv(file = "~/Google Drive/Projects/ChinookEggExperiment/Final/Data/Alevin_lengths.csv")
aggLengthData <- aggregate(dat, by = list(dat$Tube), FUN = mean, na.rm = TRUE)
tube <- read.csv(file = "~/Google Drive/Projects/ChinookEggExperiment/Final/Data/gravel_tudeIds.csv")
comb <- merge(dat, tube, by = "Tube")
combAgg <- merge(aggLengthData, tube, by = "Tube")
meanMod <- lm(length ~ flow * temp, data = combAgg)
summary(meanMod)
DatComb <- cbind(combAgg, predict(meanMod, combAgg, interval = "predict"))
plength <- ggplot(DatComb, aes(x = flow, y = fit, group = temp)) +
geom_line(aes(x = flow, y = fit, color = factor(temp)), size = 1) +
geom_jitter(aes(x = flow, y = length, fill = factor(temp)), width = 0.003, alpha = 0.6, shape = 21,
size = 2)+
stat_smooth(aes(x = flow, y = length, color = factor(temp), fill = factor(temp)), method = "lm") +
theme_cowplot()+
scale_color_viridis(option = "plasma", discrete = TRUE, end = 0.65, direction = 1) +
scale_fill_viridis(option = "plasma", discrete = TRUE, end = 0.65, direction = 1) +
ylim(18, 23) + xlab("Flow (cm/s)") + ylab("Length (mm)") +
theme(legend.position = c(0.7, 0.2)) +
theme(legend.title = element_blank()) + xlim(0, 0.126)
#####
GravelSurvivalData$logitS=log(GravelSurvivalData$propS/(1-GravelSurvivalData$propS))
backPocket<-subset(GravelSurvivalData,Location=="Top")
names(backPocket)[10]<-"logitSback"
frontPocket<-subset(GravelSurvivalData,Location=="Bottom")
names(frontPocket)[10]<-"logitSfront"
comb<-merge(frontPocket ,backPocket,by=c("temp","flow","rep"))
t.test(comb$logitSfront, comb$logitSback, paired = TRUE, alternative = "two.sided")
pairedDat<-data.frame(Front=comb$logitSfront, Back=comb$logitSback)
pairedDat$rep<-1:29
pairedDat =melt(pairedDat,id="rep")
pd<-pairedDat %>%
tidyr::spread(variable, value) %>%
dplyr::mutate(is_increasing = Front < Back) %>%
tidyr::gather("variable", "value", 2:3)
pd <- pd %>% mutate(variable = as.factor(variable))
pd$variable <- factor(pd$variable,levels(pd$variable)[c(2,1)])
pFrontBack<- ggplot(pd,aes(x = variable, y = value)) +
geom_boxplot(aes(fill = variable), alpha = 0.3, col = "grey") +
geom_jitter(aes(fill=variable),height=0,width=.03,alpha = 0.6, shape = 21, size = 2) +
geom_line(aes(group = rep, col = is_increasing),alpha=.5) +
theme_cowplot()+
scale_color_viridis(option = "plasma", discrete = TRUE, begin = 0.35,end=.85,direction = 1) +
scale_fill_viridis(option = "plasma", discrete = TRUE, begin = 0.35, end=.85,direction = 1) +
theme(legend.position = "none") + xlab("Egg cluster postion")+ylab("Logit survival")
grid.arrange(pSurvival, pFrontBack, plength, ncol = 3)
|
43651d658b53713242b0e220c82cdd9ed89e34ee
|
226779647e199fbb8c8fee155c2373f8fa82857f
|
/Plot1.R
|
5ee7f58eedc6046a533e5883ceffc6319049fb01
|
[] |
no_license
|
andrewplumb/ExData_Plotting1
|
b7d816a69a508e2d51c2436c33405f864703aaef
|
4fd20cfee5ceb5abaa5ba5124133b3f933c82cc6
|
refs/heads/master
| 2021-01-16T19:44:35.912201
| 2015-08-04T23:17:38
| 2015-08-04T23:17:38
| 40,208,840
| 0
| 0
| null | 2015-08-04T21:00:30
| 2015-08-04T21:00:30
| null |
UTF-8
|
R
| false
| false
| 478
|
r
|
Plot1.R
|
library(dplyr)
path2epc <- "C:\\Users\\XBBLXKV\\Documents\\household_power_consumption.txt"
epc <- read.table(path2epc, header = TRUE, sep = ";")
epcfiltered <- filter(epc, Date == "1/2/2007" | Date == "2/2/2007")
par(mfrow = c(1,1))
par(mar = c(4,4,2,2))
hist(as.numeric(levels(epcfiltered$Global_active_power))[epcfiltered$Global_active_power], col = "red", xlab = "Global Active Power (kilowatts)", main = "Global Active Power")
dev.copy(png, file = "plot1.png")
dev.off()
|
38b5cff0fd9a15ccddcc96f7f4a8501fc699929b
|
0c1c0e8c68835ca908806372236af6f539490042
|
/lecture/2016.06.02/notes.R
|
ed2857420978aff5cb57eb90df62241ca01701cc
|
[] |
no_license
|
AylNels/notes
|
2343f78064d1bb07dc71d23dc2b70c69b45e52c3
|
e331c4ff2d736cb65f2313659ab42be469d80209
|
refs/heads/master
| 2021-01-15T20:47:57.257314
| 2016-06-05T20:29:02
| 2016-06-05T20:29:43
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,079
|
r
|
notes.R
|
## # STS 98 - Lecture 2016.06.02
##
## See [notes.R](notes.R) to follow along in RStudio.
##
## Also see the [R input](r_session.txt) from the lecture.
## Announcements
## -------------
## Extra OH: Today 5-7pm, Shields 360
##
## Final Exam: Monday (June 6), 10:30am - 12:30pm, Wellman 26
##
## Bring a blue book!
## Questions
## ---------
## Review
## ------
## Absence of a topic doesn't mean it won't be on the exam!
##
## Don't worry about memorizing every function in R.
## ### Density Plots
##
## A density plot is a smoothed histogram, rescaled so that the total area
## under the curve is 1.
##
## The area under the curve for any interval on the x-axis estimates the
## proportion of points that fall in that interval.
##
## For intuition, imagine simultaneously increasing the number of observations
## and decreasing the width of the bins until the tops of the histogram bars
## become a smooth curve.
df = readRDS("data/random_points.rds")
hist(df$x, freq = F, col = "gray", breaks = 30)
lines(density(df$x))
## Density plots don't work well when:
##
## * The variable shown is not continuous. If the variable takes specific,
## discrete values, you should use a histogram instead.
##
## * There aren't many observations. Look at both the histogram and the density
## plot to decide; the histogram may have information that gets smoothed away
## on the density plot.
## ### Outliers
##
## An _outlier_ is an observation that doesn't fit the pattern made by the
## other observations. In other words, it's an unusual observation.
##
## An extreme value is the most common kind of outlier. This is an observation
## that's very far way from most of the other observations.
##
## There's no specific definition for "far away". In practice, it's common to
## label outliers as points...
##
## * more than 1.5 IQRs below 25th or above 75th quantile
##
## * more than 3 standard deviations above or below the mean
##
## However, these are not foolproof and you should always examine the
## observations graphically to determine which are outliers.
##
##
##
## When you find an outlier, try to explain the cause! Examining other
## variables for the outlying observation often helps. This may lead you to an
## interesting insight about the data.
##
## However, you should also consider the possibility that the outlier was
## caused by a mistake when the data was recorded. If the outlier is not a
## valid observation, try to correct it using...
##
## * information from other variables (for the same observation)
##
## * a location estimate based on "similar" observations
##
## The second strategy is called _imputation_. Imputing a value for the outlier
## is not always a good choice, because it can bias your analysis.
##
## If you know an outlier is invalid but cannot correct it, replace the value
## with a missing value.
q = read.delim("signif.txt")
boxplot(q$TOTAL_DEATHS)
library("ggmap")
map = qmap("Shanghai, CN", zoom = 4)
pts = geom_point(aes(LONGITUDE, LATITUDE, size = TOTAL_DEATHS,
color = EQ_PRIMARY), q, alpha = 0.75)
library("viridis")
map + pts + scale_color_gradientn(colors = rev(magma(5)))
# Locate extreme values.
head(q[order(q$TOTAL_DEATHS, decreasing = T), ])
## ### Missing Values
##
## R represents missing values as NA.
##
## Missing values can be:
##
## * missing not at random (MNAR)
##
## * missing at random (MAR)
##
## When values are MNAR, they're missing for a reason! Try to come up with an
## explanation for why they're missing, as this may give you additional insight
## into the data set.
##
## Ignoring values that are MNAR can bias your analysis. There's a reason the
## points are missing! You should correct these values if possible, using the
## same strategies you would use to correct outliers.
##
## When values are MAR, they are not likely to bias your analysis, so
## correction is less important and can even be detrimental.
##
##
##
## Identifying whether values are MNAR or MAR can be difficult.
##
## The strategy is to look for relationships between the missing values and
## other variables. If you see a relationship, the values are MNAR.
##
## However, it's possible values are MNAR but the relationship is with an
## external variable that wasn't included in the data set.
q_na = subset(q, is.na(TOTAL_DEATHS))
library("lattice")
densityplot(~ YEAR, q_na)
range(q$TOTAL_DEATHS, na.rm = T)
## ### Correlation
##
## _Correlation_ measures how well two variables make a line when plotted as
## (x, y) coordinates. In other words, correlation measures linear
## relationships.
##
## Correlation ranges from -1 to 1, with -1 meaning an inverse (negative slope)
## relationship and 1 meaning a direct (positive slope) relationship. 0 means
## no __linear__ relationship was detected.
##
## For intuition, imagine slicing a scatter plot vertically at the mean of the
## x-variable and horizontally at the mean of the y-variable. This divides the
## plot into 4 pieces. If most of the points are in...
##
## * the bottom left and top right pieces, the correlation will be close to 1
##
## * the top left and bottom right pieces, the correlation will be close to -1
##
## * every piece evenly, the correlation will be close to 0
##
## The actual correlation statistic also takes into account how far the points
## are from the means. Points farther away have a stronger effect.
##
##
##
## Beware of two easy mistakes with correlation:
##
## * Correlation DOES NOT imply causation, it's just a number computed with a
## formula. Statistics can detect patterns but cannot determine cause/effect
## relationships.
##
## * A correlation of 0 DOES NOT imply no relationship. Correlation only
## detects linear relationships. There could be a very strong non-linear
## relationship, and you'd never know unless you plotted the data.
##
## How else can you detect relationships between variables?
##
## First | Second | Plot
## ----------- | ----------- | ----
## categorical | categorical | mosaic
## categorical | numeric | box, density
## numeric | numeric | scatter
pairs(df)
## ### Confounding Variables
##
## Be careful to consider how each variable might affect your analysis.
##
## If you are examining some variables and grouping or rescaling by another
## variable changes your conclusions about the data, that variable is a
## _confounding variable_.
##
## Sometimes the relationship between variables is confounded by a variable
## that isn't even included in the data set. There's no easy way to detect
## this, so you should think hard about any conclusions you make and try to
## come up with ways they might be wrong.
##
## In other words, it's a good idea to have an attitude of _statistical
## skepticism_ when doing data analyses.
##
##
##
## The most dramatic example of confounding is called Simpson's paradox. In
## a Simpson's paradox, accounting for the confounding variable reverses the
## conclusion.
##
## See <http://vudlab.com/simpsons/> for an interactive example.
## ### Joins
##
## Data split across multiple tables is called _relational data_. A variable
## that appears in more than one table is called a _key_, and can be used to
## relate the tables.
##
## For example, a grocery chain's inventory is relational data. They might have
## a table that lists stores and another that lists food items. The key might
## be the store ID number; this connects stores to their particular food items.
##
## Relational formats are especially useful when the unit (subject) observed in
## each table is different.
##
## However, for some analyses you might want to combine two or more tables. You
## can do this by _joining_ the tables using the key(s).
##
## In some cases there may be multiple keys. Think carefully about what the
## observations are in each table you're joining!
##
parts = readRDS("data/inventory/parts.rds")
suplr = readRDS("data/inventory/suppliers.rds")
sp = readRDS("data/inventory/supplier_parts.rds")
x = merge(parts, sp, by = "PartID")
merge(x, suplr, by = "SupplierID")
## ### Tidying
##
## Variables can be divided into two groups:
##
## 1. _Identifier variables_ identify a unit of observation.
##
## 2. _Measurement variables_ are actual measurements on the unit.
##
## These definitions depend on what you want to do. However, identifier
## variables usually contain groups you'd like to compare.
##
##
##
## _Melting_ a data set puts all of the measurement variables in a single
## column and creates a new identifier variable to identify what's being
## measured.
##
## _Casting_ a molten data set puts each measurement variable in a separate
## column.
##
## Melting and casting are useful in two situations:
##
## 1. The data set doesn't have rows corresponding to observations and columns
## corresponding to variables. By melting and casting, you can rearrange the
## data set to satisfy this requirement.
##
## 2. You want to create a new unit of observation by aggregating the existing
## observations.
##
library("reshape2")
melt(smiths, id.vars = c("subject", "time"))
|
eb2fdf40bddd94672be748839a4969eed53c2b41
|
f93ba2f8fb9269b3b9b1e08060ed90bf7983ea27
|
/testingdata/Code/gamma.R
|
ad0ff1a071552a270fddfbb1cb9dcb98d9e3d853
|
[] |
no_license
|
maniteja123/defect_prediction
|
b065c76e4b1ca0139d9f7e73a49c7315947f9963
|
f28a1383a933e51202a01bfde660a7adb9b56c82
|
refs/heads/master
| 2021-01-10T07:50:34.931522
| 2016-03-27T06:39:31
| 2016-03-27T06:39:31
| 54,816,070
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,001
|
r
|
gamma.R
|
library(VGAM)
library(fitdistrplus)
library(actuar)
setwd("D:\\Studies\\Project\\BugPrediction\\Softwares\\testingdata")
nextReleaseDates = data.frame(
tomcat3.3="2002-09-06",
tomcat4.1="2003-12-03",
tomcat5="2007-02-28",
tomcat6="2011-01-14",
tomcat7="2014-06-25" )
files = c("tomcat 3.3","tomcat 4.1","tomcat 5","tomcat 6")
plotGamma <- function(file) {
r <- paste(file,"csv",sep = ".")
data <- read.csv(r)
data <- data[,c(1,9)]
nextRelease <- as.Date(nextReleaseDates[file][1,],format = "%Y-%m-%d")
data$Opened <- as.Date(as.POSIXct(data$Opened,format = "%d-%m-%y"))
data <- data[data$Opened < nextRelease,]
rownames(data) <- NULL
fre <- table(format(data$Opened,"%Y-%m"))
fre <- as.data.frame(fre)
y <- fre$Freq
x = 1:length(y)
fit <- fitdist(y, "gamma", start=list(shape = 1, scale = 500))
file <- paste(file,"gamma fitdist",sep=" ")
r <- paste(file,"pdf",sep = ".")
pdf(r, width = 10, height = 10)
plot(fit)
dev.off()
return(fit)
}
|
9dc1a6705848953f8ae9d9d14286931396430530
|
d1f1fabdd43cb8d2ce376118d902f0b281082aef
|
/R/lc_gradient.R
|
87aa7d1b8385d1a83cabef4e55b79d3c39738c00
|
[] |
no_license
|
cran/GB2group
|
36f76d3a5997fd71e83de3ea771208d01bfb626c
|
47adc1f2ef13aa307d3141398df2f6a95913d195
|
refs/heads/master
| 2021-06-17T19:43:21.291134
| 2021-01-26T16:00:09
| 2021-01-26T16:00:09
| 157,565,794
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,108
|
r
|
lc_gradient.R
|
gr.gb2 <- function(theta) {
a <- theta[1]
b <- theta[2]
p <- theta[3]
q <- theta[4]
pr <- theta[5]
t <- qgb2(pr, a, b, p, q)
lcgb2 <- pgb2(t, a, b, p + 1 / a, q - 1 / a)
return(-lcgb2)
}
gr.da <- function(theta) {
a <- theta[1]
b <- theta[2]
p <- theta[3]
q <- 1
pr <- theta[4]
t <- qgb2(pr, a, b, p, q)
lcgb2 <- pgb2(t, a, b, p + 1 / a, q - 1 / a)
return(-lcgb2)
}
gr.b2 <- function(theta) {
a <- 1
b <- theta[1]
p <- theta[2]
q <- theta[3]
pr <- theta[4]
t <- qgb2(pr, a, b, p, q)
lcgb2 <- pgb2(t, a, b, p + 1 / a, q - 1 / a)
return(-lcgb2)
}
gr.sm <- function(theta) {
a <- theta[1]
b <- theta[2]
p <- 1
q <- theta[3]
pr <- theta[4]
t <- qgb2(pr, a, b, p, q)
lcgb2 <- pgb2(t, a, b, p + 1 / a, q - 1 / a)
return(-lcgb2)
}
gr.f <- function(theta) {
a <- theta[1]
b <- theta[2]
p <- 1
q <- 1
pr <- theta[3]
t <- qgb2(pr, a, b, p, q)
lcgb2 <- pgb2(t, a, b, p + 1 / a, q - 1 / a)
return(-lcgb2)
}
gr.ln <- function(theta) {
s <- theta[1]
mu <- theta[2]
pr <- theta[3]
lcln <- pnorm(qnorm(pr) - s)
return(-lcln)
}
|
536dc760fac9f056857682a4329ec19d4c1d60cc
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/bayesQR/examples/prior.Rd.R
|
75b8200b0f74edabdca64e8207ad6cf03d1a4728
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 493
|
r
|
prior.Rd.R
|
library(bayesQR)
### Name: prior
### Title: Create prior for Bayesian quantile regression
### Aliases: prior
### ** Examples
# Load the Prostate cancer dataset
data(Prostate)
# Create informative prior object
prior <- prior(lpsa~., data=Prostate, beta0=rep(5,9), V0=diag(9))
# Investigate structure of bayesQR.prior object
str(prior)
# Estimate the model parameters with informative prior
out <- bayesQR(lpsa~., data=Prostate, prior=prior, ndraw=5000)
# Print results
summary(out)
|
416d6b26324e0fcfff837ea4961e8e92f5e552e7
|
e9aea5bfb926656cf440c0df1cc7835589d68736
|
/PPAC/R/cossim_sync_method1.R
|
5874cfdbe7b5279cae019eb5fefff6e092d971be
|
[
"MIT"
] |
permissive
|
Nian-Jingqing/emosync
|
1e639968d263ffba3d86d63dcad447a1f9f54e41
|
4bada30ab5af64691c0b881cca65af55591bf67e
|
refs/heads/master
| 2023-03-30T02:25:24.676248
| 2021-03-26T11:53:29
| 2021-03-26T11:53:29
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,172
|
r
|
cossim_sync_method1.R
|
library(tidyr)
library(tidyverse)
library(lmerTest)
library(lme4)
library(MuMIn)
library(lattice)
library(lsa)
library(here)
source(file.path(here(), 'R', 'cossim_sync_method_functions.R'))
polku <- file.path(here(), 'data', 'ppac_all.csv')
PPAC <- read.csv(polku, header = T, sep = ",")
sync_2feats_1emo(PPAC, "anger", orb_avg, fasym_rel, TIMECOR = F)
###
# fnames <- subset somehow
featnames <- colnames(select(PPAC, 5:17))
fnames <- featnames[c(1, 5, 6, 7, 8, 9)]
flen <- length(fnames)
emotions <- unique(PPAC$event)
emosync <- NULL
for (e in emotions){
allsync <- sync_2feats_1emo(PPAC, e, fnames[1], fnames[2], FALSE)
for (i in 2:flen - 1) {
for (j in (i+1) : flen) {
tmp <- sync_2feats_1emo(PPAC, e, fnames[i], fnames[j], FALSE)
allsync <- merge(allsync, tmp, sort = F)
}
}
emosync <- rbind(emosync, allsync)
rm(tmp, allsync)
}
emosyncL <- emosync %>%
pivot_longer(cols = starts_with("cos.sim_"), names_to = "feat.pair", values_to = "cos.sim") %>%
mutate(feat.pair = gsub("cos.sim_", "", feat.pair))
# average ID x emotion across all feat pairs
mean.cos.sim <- emosyncL %>%
group_by(ID, emotion) %>%
summarise(sync = mean(abs(na.omit(cos.sim))))
# average each emotion across all feat pairs
mean.emo <- emosyncL %>%
group_by(emotion) %>%
summarise(sync = mean(na.omit(cos.sim)))
# average each feat pair across all ID x emotion
mean.feat.pair <- emosyncL %>%
group_by(feat.pair) %>%
summarise(sync = mean(na.omit(cos.sim)))
# filter to Zyg x Orb and average across all ID
mean.emo.zyg.orb <- emosyncL %>%
filter(feat.pair == "zyg_avg_orb_avg") %>%
group_by(emotion) %>%
summarise(zyg.orb.sync = mean(na.omit(cos.sim)))
# filter to Zyg x Orb and average across all emotions
mean.id.zyg.orb <- emosyncL %>%
filter(feat.pair == "zyg_avg_orb_avg") %>%
group_by(ID) %>%
summarise(zyg.orb.sync = mean(na.omit(cos.sim)))
# filter to Anger and average across all ID x emotion
mean.feat.pair.anger <- emosyncL %>%
filter(emotion == "anger") %>%
group_by(feat.pair) %>%
summarise(sync = mean(na.omit(cos.sim)))
df <- merge(test5, perf)
summary(lm(visAcc ~ zyg.orb.sync, data = df))
|
e027b1be20bd907de146b0d19113bfb8b1c8d1ca
|
19a0a4d113de967aa4884b4053a7b3cb68f5c6b1
|
/tests/testthat/test-airzone_metric.R
|
8178078131b4e3d1d7b5002950b6e951d78d0bc3
|
[
"Apache-2.0"
] |
permissive
|
paulroberts68/rcaaqs
|
5ad8feeebb9d33593446d60282241b0e7e2bb3e2
|
435f20598089506d473aa264ed5fb28861aa0536
|
refs/heads/master
| 2021-01-20T14:34:06.947772
| 2017-02-15T18:30:29
| 2017-02-15T18:30:29
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,499
|
r
|
test-airzone_metric.R
|
context("airzone_metric")
test_that("parsing valid years works with some n_years being 2", {
test_s2 <- data.frame(n_years = c(3,3,2,2,3), val = rnorm(5), foo = letters[1:5])
res <- parse_incomplete(test_s2, "n_years", "val")
expect_true(all(is.na(res$val[3:4])))
})
test_that("parsing valid years works with all n_years being 2", {
test_a2 <- data.frame(n_years = rep(2,5), val = rnorm(5), foo = letters[1:5])
res <- parse_incomplete(test_a2, "n_years", "val")
expect_true(all(!is.na(res$val)))
})
test_that("airzone_metric works", {
df <- data.frame(Airzone = sort(rep(LETTERS[1:4],4)),
nyears = c(rep(2, 4), rep(3, 4), c(2,3,2,3,2,3,2,3)),
value = c(1:4, 5:8, 12:9, 13:16), stringsAsFactors = FALSE)
res <- airzone_metric(df, n_years = "nyears", az = "Airzone", val = "value")
expect_equal(dim(res), c(4,3))
expect_equal(res$Airzone, LETTERS[1:4])
expect_equal(res$nyears, c(2,3,3,3))
expect_equal(res$value, c(4,8,11,16))
})
test_that("keep arg works", {
df <- data.frame(site_id = letters[1:16], Airzone = sort(rep(LETTERS[1:4],4)),
nyears = c(rep(2, 4), rep(3, 4), c(2,3,2,3,2,3,2,3)),
value = c(1:4, 5:8, 12:9, 13:16),
otherdata = rep(c("foo", "bar"), 8), stringsAsFactors = FALSE)
res <- airzone_metric(df, n_years = "nyears", az = "Airzone", val = "value",
keep = c("site_id", "otherdata"))
expect_equal(dim(res), c(4,5))
expect_equal(res$Airzone, LETTERS[1:4])
expect_equal(res$nyears, c(2,3,3,3))
expect_equal(res$value, c(4,8,11,16))
expect_equal(res$site_id, c("d", "h", "j", "p"))
})
test_that("renaming keep cols works", {
df <- data.frame(site_id = letters[1:16], Airzone = sort(rep(LETTERS[1:4],4)),
nyears = c(rep(2, 4), rep(3, 4), c(2,3,2,3,2,3,2,3)),
value = c(1:4, 5:8, 12:9, 13:16),
otherdata = rep(c("foo", "bar"), 8), stringsAsFactors = FALSE)
res <- airzone_metric(df, n_years = "nyears", az = "Airzone", val = "value",
keep = c(rep_site = "site_id", foobar = "otherdata"))
expect_equal(names(res), c("Airzone", "nyears", "value", "rep_site", "foobar"))
res <- airzone_metric(df, n_years = "nyears", az = "Airzone", val = "value",
keep = c(rep_site = "site_id", "otherdata", yearssss = "nyears"))
expect_equal(names(res), c("Airzone", "yearssss", "value", "rep_site", "otherdata"))
})
|
cc1f32bf27cdce78a8f4305b223eee1ff6f2b9ef
|
bd8a7c215d851e6b3c44165baec15e3f13efb665
|
/man/es_fileset_present.Rd
|
4a1fbeef6a518811f97efe8c7b8d84b501f04ceb
|
[] |
no_license
|
mYstar/easyshiny
|
dfe36d11f97d390cb3e7e5548f64d6939b9de36a
|
9987d571a65ecdb6004cfa112ad80f027694b0fd
|
refs/heads/master
| 2020-04-12T15:02:55.283045
| 2019-06-19T08:19:46
| 2019-06-19T08:19:46
| 162,569,346
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 492
|
rd
|
es_fileset_present.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/es_filechecks.R
\name{es_fileset_present}
\alias{es_fileset_present}
\title{Check for Fileset}
\usage{
es_fileset_present(filesets, setnumber)
}
\arguments{
\item{filesets}{the fileset list}
\item{setnumber}{the number of the set to check}
}
\value{
a boolean value (TRUE if the fileset exists, else FALSE)
}
\description{
Checks if a fileset has been uploaded. Used when dynamically loading the upload fields.
}
|
c812bca4fa46ca93ec3af216b381e750e29703be
|
dbd79ac5dadffb31324ec696f33c948efdf9796d
|
/man/Gpext2terminal.Rd
|
d14824b5016586ba975384ac17b1d49f85e27be5
|
[] |
no_license
|
cran/Rgnuplot
|
a0c8659432434600da277ebd97690f6d4a1ae4f3
|
c4eb64e6c9dfab0bfb0e433cc4298acce86a8ab6
|
refs/heads/master
| 2016-09-15T18:54:37.170197
| 2015-07-28T00:00:00
| 2015-07-28T00:00:00
| 17,693,315
| 4
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 328
|
rd
|
Gpext2terminal.Rd
|
\name{Gpext2terminal}
\alias{Gpext2terminal}
\title{Determine a suitable terminal from a file extension}
\description{\code{Gpext2terminal} }
\usage{Gpext2terminal(filetype='PNG')}
\arguments{ \item{filetype}{ file extension}
}
\value{ terminal name}
\seealso{ \code{\link{GpsetTerm}}}
\author{Jose' Gama}
\keyword{programming}
|
8e27d9702978c3ff68aa65bd545550b6710c61a4
|
0dc7ef54a4a4a0566fc7f1af18c06bf001c8a570
|
/L1/code/util.r
|
45d580c1d56c38583b5df6929f07e19c3288eb70
|
[] |
no_license
|
holypriest/MAE0217-2017
|
480f13daae41d7ab71552df79bdf5d7383380598
|
89c78f5da3ff54cc0c2ba33527a752b7eb8933d4
|
refs/heads/master
| 2020-12-03T02:19:24.724126
| 2017-06-30T22:30:08
| 2017-06-30T23:07:24
| 95,927,054
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 844
|
r
|
util.r
|
# Calculates basic statistical measures for the given data.
#
# Arguments:
# data: Array of values for which statistical measures will be applied.
# digits: Number of decimal places to be used.
basic_measures = function(data, digits=2) {
avg = mean(df$Velocidade)
stdev = sd(df$Velocidade)
# Note: Algorithm used for quantile() depends on 'type' attribute!
quants = quantile(df$Velocidade)
measures = c("Min.", "1st Qu.", "Median", "Mean", "Std. Dev.", "3rd Qu.", "Max")
# Force values to use the desired number of decimal places
values = round(c(quants[1:3], avg, stdev, quants[4:5]), digits)
values = format(values, nsmall=digits)
# Join arrays in a matrix and print the result
table = format(rbind(measures, values), justify="right")
write.table(table, row.names=F, col.names=F, quote=F)
}
|
b14f87ae1633fcc1a66e16eed9f9a3cebc723932
|
060d160980d35f8cc59a4613319444af19c6a740
|
/R/studies_per_huc.R
|
54ff4ba8f89c470e5951de357fe6cff598c4acbd
|
[] |
no_license
|
mfoard/mountain_climate
|
ce4b799d34f817324af892a5b003e1bd3ab6bcc7
|
1f63a64e4c5e4f9b047e0b35c9eb55398e2f7540
|
refs/heads/master
| 2020-03-17T15:51:21.867803
| 2018-05-15T20:04:58
| 2018-05-15T20:04:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,471
|
r
|
studies_per_huc.R
|
# Make a map to show total number of studies in each HUC.
# Subset to studies that are HUC-specific.
library(tidyverse)
library(sf)
library(ggthemes)
# Get data - should ultimately be combination of first and second round.
dat <- read_csv("../results/tabular/single_copy_results.csv")
# Get HUC6 data.
huc6_raw <- st_read("../data/spatial/crb_huc6.shp")
#laea <- st_crs("+proj=laea +lat_0=30 +lon_0=-95")
huc6_raw <- st_transform(huc6_raw, 4326)
huc6 <- st_simplify(huc6_raw, dTolerance = 0.1)
# Count studies per huc.
huc_dat <- dat %>%
unnest_tokens(huc6, huc6, token = stringr::str_split, pattern = ", ") %>%
group_by(huc6) %>%
count(sort = T) %>%
ungroup() %>%
mutate(huc6 = str_sub(huc6, 1, 4))
huc_dat$huc6[grep("[[:digit:]]", huc_dat$huc6)] <- paste0(17, huc_dat$huc6[grep("[[:digit:]]", huc_dat$huc6)])
# Get rid of all/both HUC designations.
huc_dat <- huc_dat %>%
filter(!is.na(huc6)) %>%
filter(!grepl("all", huc6)) %>%
filter(!grepl("both", huc6)) %>%
rename("HUC6" = "huc6")
huc6 <- left_join(huc6, huc_dat)
pdf("../results/figures/studies_per_huc.pdf", width = 12, height = 8)
plot(huc6[5],
main = "Studies per HUC6",
graticule = TRUE)
dev.off()
# Normalize by area.
huc6 <- huc6 %>% mutate(n_per_area = 1000*n/AREASQKM)
pdf("../results/figures/studies_per_huc_area.pdf", width = 12, height = 8)
p2 <- plot(huc6[6],
main = "Studies per HUC6 - area normalized",
graticule = TRUE)
dev.off()
|
20016d59b04ee47d765c89015727919a6a538b74
|
bfd8b304248b6e65cb6b6d3ede810b53187af621
|
/doc/Topic Modeling Tutorial_Original.R
|
91cdabd371ce6bb7813cb70cf31aee0bd6fc1f49
|
[] |
no_license
|
TZstatsADS/Fall2016-proj4-CHuang0-0
|
47c9a0511887f2c8e09c3f3b1923a85efbbf181d
|
4588a56e49185cabb031ebd39918a730088b4427
|
refs/heads/master
| 2021-05-01T00:30:51.522807
| 2016-12-13T21:22:32
| 2016-12-13T21:22:32
| 72,900,689
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,629
|
r
|
Topic Modeling Tutorial_Original.R
|
---
title: "Topic Modelling"
date: "11/9/2016"
output: html_document
---
## References
The tutorial is replicated from the [A topic model for movie reviews](https://cpsievert.github.io/LDAvis/reviews/reviews.html) with added comment.
## Essential theories
### 1. Text problems, Unsupervises, Bag-of-Words Model, Bayesian
**Order doesn't matter!**

###2. Setup
### Definitions:
#### - Topics: Probability distributions on vocabulary
A topic is like a collection of distributions on words. Different topics have different distributions of words.
#### - Topic proportions: Probability distributions on the topics
Like Issac Asimov's *The Last Question* definitely has a topic distribution different from, say Donald Trump's speech script.
#### - Topic assignments: Assigns each observed word to a topic
###3. A Closer Look at the Model set-up

#### Slides taken from Professor John Paisley's lecture EECS E6720 Bayesian Models for Machine Learning.
For full infromation on this course please check [here](http://www.columbia.edu/~jwp2128/Teaching/E6720/Fall2016/E6720Fall2016.html)
###4. Intuitively, why does it work?

Again slides credit to Professor John Paisley.
## Topic Modelling in R
#### And for the next tutorial I will be following the tutorial available online [here](https://cpsievert.github.io/LDAvis/reviews/reviews.html)
##### Packages used
```{r setup}
data(reviews, package = "LDAvisData")
library(NLP)
library(tm)
library(lda)
library(LDAvis)
```
##### Pre-processing
```{r}
stop_words <- stopwords("SMART")
# pre-processing:
reviews <- gsub("'", "", reviews) # remove apostrophes
reviews <- gsub("[[:punct:]]", " ", reviews) # replace punctuation with space
reviews <- gsub("[[:cntrl:]]", " ", reviews) # replace control characters with space
reviews <- gsub("^[[:space:]]+", "", reviews) # remove whitespace at beginning of documents
reviews <- gsub("[[:space:]]+$", "", reviews) # remove whitespace at end of documents
reviews <- tolower(reviews) # force to lowercase
```
##### Prepare the Inputs!
**doc.list: A list of documents**
```{R}
# tokenize on space and output as a list:
doc.list <- strsplit(reviews, "[[:space:]]+")
length(doc.list) # Length: number of documents
length(doc.list[[1]]) # A splited string of words
```
**term.table: vocabulary and their frequency**
```{R}
# compute the table of terms:
term.table <- table(unlist(doc.list))
term.table <- sort(term.table, decreasing = TRUE)
head(term.table) # Just to give an idea of how it looks like
```
Term table is a table of ALL terms that exist in our collections of documents. Here we still see words that do not bear much information, so we move on to delete them.
**Vocab: all distinct words**
```{R}
# remove terms that are stop words or occur fewer than 5 times:
del <- names(term.table) %in% stop_words | term.table < 5
term.table <- term.table[!del]
vocab <- names(term.table) # Vocab: a vector of all words
```
**"Now put the documents into the format required by the lda package"**
```{R}
# now put the documents into the format required by the lda package:
get.terms <- function(x) {
index <- match(x, vocab)
index <- index[!is.na(index)]
rbind(as.integer(index-1), as.integer(rep(1, length(index))))
}
documents <- lapply(doc.list, get.terms)
documents[1]
```
`DOCUMENTS` is basically a large list of all docoments we have, each entry is a 2*length(document_i) matrix, recording which word in the `VOCAB` appeared in this document.
```{R}
# Compute some statistics related to the data set:
D <- length(documents) # number of documents (2,000)
W <- length(vocab) # number of terms in the vocab (14,568)
doc.length <- sapply(documents, function(x) sum(x[2, ])) # number of tokens per document [312, 288, 170, 436, 291, ...]
N <- sum(doc.length) # total number of tokens in the data (546,827)
term.frequency <- as.integer(term.table) # frequencies of terms in the corpus [8939, 5544, 2411, 2410, 2143,
# MCMC and model tuning parameters:
K <- 20
G <- 5000
alpha <- 0.02
eta <- 0.02
```
```{R,eval=FALSE}
# Fit the model:
library(lda)
set.seed(357)
t1 <- Sys.time()
fit <- lda.collapsed.gibbs.sampler(documents = documents, K = K, vocab = vocab,
num.iterations = G, alpha = alpha,
eta = eta, initial = NULL, burnin = 0,
compute.log.likelihood = TRUE)
t2 <- Sys.time()
t2 - t1 # about 24 minutes on laptop
```
##### Visualization
```{R,eval=FALSE}
theta <- t(apply(fit$document_sums + alpha, 2, function(x) x/sum(x)))
phi <- t(apply(t(fit$topics) + eta, 2, function(x) x/sum(x)))
MovieReviews <- list(phi = phi,
theta = theta,
doc.length = doc.length,
vocab = vocab,
term.frequency = term.frequency)
library(servr)
# create the JSON object to feed the visualization:
json <- createJSON(phi = MovieReviews$phi,
theta = MovieReviews$theta,
doc.length = MovieReviews$doc.length,
vocab = MovieReviews$vocab,
term.frequency = MovieReviews$term.frequency)
serVis(json, out.dir = 'vissample', open.browser = FALSE)
```
```{r,include=FALSE,eval=FALSE}
# 1. separate logistics
# 2. Naive bayes
# 3. Tree structure, generative, create music vocabulary; association mining
# 4. Topic modelling: use features to predict topic profiles and then generate words
```
|
a6668b064d0d689767c302aca3129e40d97b89f4
|
6a2ca2c8c3e362ea3eb401124673a3e31b176586
|
/4_import_export.r
|
bc0a5f054ff8e38f781c662e5b055bc8166725be
|
[] |
no_license
|
shraban020/r-analytics
|
31986b333e4edf33436f55237ecd257ae2fbe787
|
bc1806c8bead8d9051fa5617c2578252ab23b0ce
|
refs/heads/master
| 2020-08-27T02:51:53.251793
| 2018-08-24T04:48:17
| 2018-08-24T04:48:17
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 924
|
r
|
4_import_export.r
|
# Import a CSV file
csv_file<-read.csv(file.choose())
csv_file<-read.csv(file="d://out5.csv")
# View the file
View(csv_file)
# Edit the file
csv_file<-edit(csv_file)
# Export CSV file
write.csv(csv_file,file.choose())
# Import a text file
txt_file<-read.table(file="d://out5.csv",header=TRUE,sep=" ")
# Using write.table
write.table(csv_file,file="d://out5.csv",sep=",",row.names=FALSE)
write.table(datafile,file.choose(),sep=",",row.names=FALSE)
# Install the XLSX package to work with excel files
# ***Requires 32 bit version and java compatibility
# to work
install.packages("xlsx",dependencies=TRUE)
# Load the package into memory
library(xlsx)
# Import worksheet from an excel file
a<-read.xlsx(file.choose(),1)
a
# Write to excel file
write.xlsx(a,file.choose(),sheetName="test",row.names=FALSE)
write.xlsx(a,file.choose(),sheetName="test2",append=TRUE,row.names=FALSE)
|
470242e691e13c559eac702ac782c0dc54251493
|
ab50d0187fff17c5e3576ca4d912760ad7fe245b
|
/R/ate.randomForest.R
|
ec4916a6db0be34f1c1ee1289fc099d3930f7b65
|
[
"MIT"
] |
permissive
|
ge-li/crossEstimation
|
a061651439daea43d8a57ad1e22913f92007fd62
|
cc0f1a3e7884475466673f5542fd795d556460d7
|
refs/heads/master
| 2021-12-12T21:52:09.400340
| 2017-02-20T23:03:39
| 2017-02-20T23:03:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,623
|
r
|
ate.randomForest.R
|
ate.randomForest = function(X, Y, W, nodesize = 20, conf.level=.9) {
if (prod(W %in% c(0, 1)) != 1) {
stop("Treatment assignment W must be encoded as 0-1 vector.")
}
nobs = nrow(X)
pobs = ncol(X)
yhat.0 = rep(NA, nobs)
yhat.1 = rep(NA, nobs)
if(length(unique(Y)) > 2) {
rf.0 = randomForest::randomForest(X[W==0,], Y[W==0], nodesize = nodesize)
rf.1 = randomForest::randomForest(X[W==1,], Y[W==1], nodesize = nodesize)
yhat.0[W==0] = stats::predict(rf.0)
yhat.0[W==1] = stats::predict(rf.0, newdata = X[W==1,])
yhat.1[W==1] = stats::predict(rf.1)
yhat.1[W==0] = stats::predict(rf.1, newdata = X[W==0,])
} else {
rf.0 = randomForest::randomForest(X[W==0,], factor(Y)[W==0], nodesize = nodesize)
rf.1 = randomForest::randomForest(X[W==1,], factor(Y)[W==1], nodesize = nodesize)
yhat.0[W==0] = stats::predict(rf.0, type = "prob")[,2]
yhat.0[W==1] = stats::predict(rf.0, newdata = X[W==1,], type = "prob")[,2]
yhat.1[W==1] = stats::predict(rf.1, type = "prob")[,2]
yhat.1[W==0] = stats::predict(rf.1, newdata = X[W==0,], type = "prob")[,2]
}
yhat.bar = (sum(W == 1) * yhat.0 + sum(W == 0) * yhat.1) / nobs
tau.hat = mean((Y - yhat.bar)[W==1]) - mean((Y - yhat.bar)[W==0])
var.hat = var((Y - yhat.bar)[W==1]) / sum(W == 1) +
var((Y - yhat.bar)[W==0]) / sum(W == 0)
ci=c(tau.hat-qnorm(1-(1-conf.level)/2)*sqrt(var.hat),tau.hat+qnorm(1-(1-conf.level)/2)*sqrt(var.hat))
list(tau=tau.hat, var=var.hat, conf.int=ci, conf.level=conf.level)
}
|
d04213df90ba90856e5a224ac614ad74b7cdb50c
|
44ea20642e56ff6cc836029bcda5a29390335b30
|
/R/d.binormal.R
|
fe9abe75ec83957001536d82861d6721cce1ec37
|
[] |
no_license
|
cran/idr
|
e8906789b0be3ba0663d46da33f36ea46c2cfd96
|
4fa51a408935584f97292a091cf32c6e307d9cc6
|
refs/heads/master
| 2022-07-18T03:32:18.278592
| 2022-06-21T06:30:07
| 2022-06-21T06:30:07
| 17,696,749
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 209
|
r
|
d.binormal.R
|
d.binormal <-
function(z.1, z.2, mu, sigma, rho){
loglik <- (-log(2)-log(pi)-2*log(sigma) - log(1-rho^2)/2 - (0.5/(1-rho^2)/sigma^2)*((z.1-mu)^2 -2*rho*(z.1-mu)*(z.2-mu) + (z.2-mu)^2))
return(loglik)
}
|
80af5bb0d197d160ff2e228f31f1759f613dbde1
|
94908a285737843999c5acaaad60199538a5c8d6
|
/man/barplot.gety.Rd
|
22c777bdaabdaff3d450a5e555baa9a53cc9348a
|
[] |
no_license
|
drmjc/mjcgraphics
|
800716e07757066d992a6eb1ea0470012cb8f698
|
cd9e30472fea14591bc342b24cc8330307fb9b4c
|
refs/heads/master
| 2021-01-19T01:47:00.882419
| 2016-06-07T06:10:21
| 2016-06-07T06:10:21
| 12,447,176
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 608
|
rd
|
barplot.gety.Rd
|
\name{barplot.gety}
\alias{barplot.gety}
\title{Function to determine the height of each bar with extra padding.}
\usage{
barplot.gety(height, space = 0.05, ...)
}
\arguments{
\item{height}{the same vector that you passed to barplot}
\item{space}{the gap between top/bottom of bar and the
point, as a proportion of the y-range}
\item{\dots}{currently ignored}
}
\value{
a numeric vector Todo: handle \code{beside=FALSE} and
matrices for height
}
\description{
Why??? you might want to add text above the positive
bars, and below the negative bars.
}
\author{
Mark Cowley, 2009-03-20
}
|
f163cd22aae20ceced77559c8a3cb464dbae951b
|
f2e2b71783b916dfff5d6ee1b7b5dc93bb55d84b
|
/deprecated/plot-random-walk.R
|
13eec147a8d5f621360a00bcc7dddd09b43daf93
|
[
"MIT"
] |
permissive
|
bcow/GPP
|
7ca6937ea1093aad4ab145fdc52a4d7bba60f5d5
|
50ca55f7f8feb94b9caa8c0108338c6807fb0f09
|
refs/heads/master
| 2021-01-15T10:20:50.617154
| 2016-05-04T18:49:03
| 2016-05-04T18:49:03
| 53,826,500
| 0
| 0
| null | 2016-03-14T04:19:23
| 2016-03-14T04:19:23
| null |
UTF-8
|
R
| false
| false
| 1,849
|
r
|
plot-random-walk.R
|
# Fit a random walk
library(ggplot2)
library(gridExtra)
load("modis-download/modis.data.RData")
sitenames <- function(name){
if(name == "MOD15A2.fn_uswiwill.txt"){
sitename <- "Willow-Creek"
}else if(name == "MOD15A2.fn_uslcreek.txt") {
sitename <- "Lost-Creek"
}else if(name == "MOD15A2.fn_ussylvan.txt"){
sitename <- "Sylvania"
}
}
source("modisRandomWalk.R")
b = "Fpar_1km"
for(i in seq_along(modis.list)){
modis.name <- sitenames(names(modis.list)[i])
load(sprintf("%s.%s.Rdata", modis.name, b))
out <- modisRandomWalk(quants$time,quants$mean, n.iter=10000, diag_plot=F)
out <- as.data.frame(out)
p <- ggplot(out)
p1 <- p + geom_histogram(aes(x = tau_add, y = ..density..), bins=30)
p2 <- p + geom_histogram(aes(x = tau_obs, y = ..density..), bins=30)
p3 <- p + geom_point(aes(x = tau_add,y = tau_obs))
grid.arrange(p1, p2, p3, layout_matrix = matrix(c(1,2,3,3),2,2,byrow=TRUE), top = modis.name)
out_quants <- as.data.frame(t(apply(out[,3:ncol(out)],2,quantile,c(0.025,0.5,0.975))))
colnames(out_quants) <- c("low","mean","high")
out_quants$time <- as.Date( as.POSIXlt(substr(dat$time,2,8),format="%Y%j"))
p4 <- ggplot(out_quants) +
geom_line(aes(y=mean, x=time, colour = "mean obvs")) +
geom_line(aes(y=quants$mean, x=quants$time, colour = "mean pred"), alpha =.3)+
geom_ribbon(aes(ymin=low, ymax=high, x=time, fill = "95%"), alpha = 0.3)+
scale_colour_manual("",values=c("darkred","darkblue"))+
scale_fill_manual("",values="red") +
labs(title = modis.name, y = b, x = "Time") +
scale_x_date(date_breaks = "1 years", date_minor_breaks = "2 months", date_labels = "%Y") +
scale_y_continuous(limits = c(-.5,1.5)) +
theme_bw() + theme(text = element_text(size=18)) +
theme(axis.text.x = element_text(angle = 45, hjust = 1))
plot(p4)
}
|
993ed0250b23113a16e2ae4f0342a9ee94771844
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/sfadv/examples/Farms.Rd.R
|
e820256aac33adeb82592dab95d2a69b467835b1
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 286
|
r
|
Farms.Rd.R
|
library(sfadv)
### Name: Farms
### Title: Data set of farm accountancy data
### Aliases: Farms
### Keywords: datasets
### ** Examples
head(Farms)
str(Farms)
summary(Farms)
lm.output <- lm(farm_output ~ agri_land + tot_lab + tot_asset + costs, data = Farms)
summary(lm.output)
|
8c8c72a954464760c9e7564f5848ee37d56b8cad
|
401213f0cb5fb3ebac8133262b6d0e65326fffc7
|
/man/conditional_np.Rd
|
cad113796d49ba0ba558343a96d4ded9a51c63b3
|
[] |
no_license
|
fhernanb/usefultools
|
41e6464990cb436d5e2c6adf546a891bde036cbb
|
6be5f5583152c7d966cdea7df503e484262e3b5c
|
refs/heads/master
| 2020-04-23T18:42:25.417048
| 2019-02-25T14:23:25
| 2019-02-25T14:23:25
| 171,377,234
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,131
|
rd
|
conditional_np.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/conditional_np.R
\name{conditional_np}
\alias{conditional_np}
\title{Conditional distribution of a multivariate normal distribution Np}
\usage{
conditional_np(x, mu, Sigma)
}
\arguments{
\item{x}{is a vector with the values that are known. For example, \code{c(3, NA, 5, NA)} means that we are interested in the conditional distribution of X2 and X4 when X1=3 and X3=5.}
\item{mu}{is the mean vector of the Np.}
\item{Sigma}{is the covariance matrix of the Np.}
}
\value{
a list with the mean and covariance matrix of the conditional distribution.
}
\description{
This function can be used to obtain the conditional distribution of any Np.
}
\examples{
# Consider a N3 normal distribution
mu <- c(3, -1, 1)
Sigma <- matrix(c(3, -1, 1,
-1, 1, 0,
1, 0, 2), ncol=3)
# We want the conditional distribution of X2 given X1=2 and X3=0
conditional_np(x=c(2, NA, 0), mu=mu, Sigma=Sigma)
# We want the conditional distribution of X1 and X3 given X2=2
conditional_np(x=c(NA, 2, NA), mu=mu, Sigma=Sigma)
}
|
f21f94f8fbab0ce0bad159cd1d17cabca561a6e6
|
234f7a4e847bd66a235ab998ada749c26fcaba8a
|
/analyses/ss2/check_reference/Rnotebooks/check_reference_pseudogene.R
|
2fe0cac92f8767884319aff44274fe01fc637b9d
|
[
"BSD-3-Clause"
] |
permissive
|
HumanCellAtlas/skylab-analysis
|
2ac1c4f70894b3d634281aee519a38d9b7ff40f4
|
983e7744d416ad2492f3367fea3f7b324dc1a567
|
refs/heads/master
| 2022-12-12T04:56:51.220490
| 2020-02-19T17:55:05
| 2020-02-19T17:55:05
| 118,922,733
| 3
| 0
|
BSD-3-Clause
| 2022-06-22T01:12:45
| 2018-01-25T14:24:47
|
HTML
|
UTF-8
|
R
| false
| false
| 13,632
|
r
|
check_reference_pseudogene.R
|
## ------------------------------------------------------------------------
library(rtracklayer)
library(ggplot2)
library(plyr)
library(reshape2)
library(plotly)
library(VennDiagram)
require(gplots)
system.time(gtf_gencode_comp <- readGFF("~/Documents/HCA/reference/refs/gencode.v27.chr_patch_hapl_scaff.annotation.gtf", version=2L, tags = c("gene_name","gene_id", "transcript_id","gene_type")))
system.time(gtf_gencode_basic <- readGFF("~/Documents/HCA/reference/refs/gencode.v27.chr_patch_hapl_scaff.basic.annotation.gtf", version=2L, tags = c("gene_name","gene_id", "transcript_id","gene_type")))
system.time(gtf_ensembl <- readGFF("~/Documents/HCA/reference/refs/Homo_sapiens.GRCh38.90.gtf", version=2L,tags = c("gene_name","gene_id", "transcript_id","gene_biotype")))
system.time(gtf_refseq <- readGFF("~/Documents/HCA/reference/refs/ncbi-genomes-2017-10-05/GCF_000001405.37_GRCh38.p11_genomic.gff.gz", tags = c("ID", "Name","gbkey","gene","gene_biotype","Parent")))
g4<-unique(gtf_gencode_basic$gene_name)
g3<-unique(gtf_gencode_comp$gene_name)
g2<-unique(gtf_ensembl$gene_name)
g1<-unique(na.omit(gtf_refseq$gene))
## ------------------------------------------------------------------------
ptype<-c('processed_pseudogene','pseudogene','transcribed_unitary_pseudogene','transcribed_unprocessed_pseudogene','unprocessed_pseudogene')
g3.pseudo<-subset(gtf_gencode_comp,gtf_gencode_comp$gene_type %in% ptype & gtf_gencode_comp$type == "gene")
g3.tab<-as.matrix((table(g3.pseudo$gene_type)))
g3.tab
## ------------------------------------------------------------------------
##two mapping categories, uniquely and multiple mapped reads
gene.counts<-list('unq'=list(),'mult'=list())
for(aln in c('unq','mult')){
## two read lengths, 25bp and 100bp
pcounts<-list('25'=c(),'100'=c())
sralist<-c()
for(nb in c('25','100')){
##load files
files<-list.files(path='~/Documents/HCA/reference/counts/',pattern=paste(nb,"_GRCh38_GencodeV27.gene.",aln,".counts.txt",sep=''))
for(fn in files){
## parse out sra ID
sra<-unlist(strsplit(fn,split='_'))[1]
sralist<-c(sralist,sra)
df<-read.delim(paste('~/Documents/HCA/reference/counts/',fn,sep=''),sep='\t',header=T,skip=1)
## only need first and seventh column which are gene id, such ensemblID and the actualy counts column.
x<-df[,c(1,7)]
colnames(x)<-c('gene_id','counts')
## only need the gene annotation, not transcripts
y<-subset(gtf_gencode_comp,type == 'gene')
## left join two table by gene_id
z<-join(x,y,by='gene_id')
## aggregate sum by genetype, such as coding, lncRNA
z.agg<-aggregate(z$counts,by=list(z$gene_type), FUN=sum,na.rm=T)
colnames(z.agg)<-c('Group',sra)
if(length(pcounts[[nb]])==0){
pcounts[[nb]]<-z.agg
}else{
pcounts[[nb]]<-merge(pcounts[[nb]],z.agg,by='Group')
}
}
}
gene.counts[[aln]]<-pcounts
}
## ------------------------------------------------------------------------
gene.counts.basic<-list('unq'=list(),'mult'=list())
for(aln in c('unq','mult')){
pcounts.basic<-list('25'=c(),'100'=c())
sralist<-c()
for(nb in c('25','100')){
files<-list.files(path='~/Documents/HCA/reference/counts/',pattern=paste(nb,"_GRCh38_GencodeV27_basic.gene.",aln,".counts.txt",sep=''))
for(fn in files){
sra<-unlist(strsplit(fn,split='_'))[1]
sralist<-c(sralist,sra)
df<-read.delim(paste('~/Documents/HCA/reference/counts/',fn,sep=''),sep='\t',header=T,skip=1)
x<-df[,c(1,7)]
colnames(x)<-c('gene_id','counts')
y<-subset(gtf_gencode_basic,type == 'gene')
z<-join(x,y,by='gene_id')
z.agg<-aggregate(z$counts,by=list(z$gene_type), FUN=sum,na.rm=T)
colnames(z.agg)<-c('Group',sra)
if(length(pcounts.basic[[nb]])==0){
pcounts.basic[[nb]]<-z.agg
}else{
pcounts.basic[[nb]]<-merge(pcounts.basic[[nb]],z.agg,by='Group')
}
}
}
gene.counts.basic[[aln]]<-pcounts.basic
}
## ------------------------------------------------------------------------
atype<-c('protein_coding','antisense_RNA','processed_transcript','lincRNA','Mt_rRNA')
rlen<-c('100')
alns<-c('unq','mult')
for (rr in rlen){
output<-c()
output.per<-c()
for(aln in alns ){
pcounts<-gene.counts[[aln]]
pcounts.basic<-gene.counts.basic[[aln]]
## load table
s1<-pcounts[[rr]]
s2<-pcounts.basic[[rr]]
## total gene counts from all annotation
s1.tot<-apply(s1[,-1],2,sum)
s2.tot<-apply(s2[,-1],2,sum)
## pseudo gene counts
p1<-subset(s1,s1$Group %in% ptype)
p2<-subset(s2,s2$Group %in% ptype)
p1.sum<-data.frame('Group'='pseudo',t(apply(p1[,-1],2,sum)))
p2.sum<-data.frame('Group'='pseudo',t(apply(p2[,-1],2,sum)))
## total counts in each gene category
c1<-subset(s1,s1$Group %in% atype)
c2<-subset(s2,s2$Group %in% atype)
## others, not in listed annotation types or pseudogene
o1<-subset(s1,!(s1$Group %in% c(ptype,atype)))
o2<-subset(s2,!(s2$Group %in% c(ptype,atype)))
## sum of category
o1.sum<-data.frame('Group'='others',t(apply(o1[,-1],2,sum)))
o2.sum<-data.frame('Group'='others',t(apply(o2[,-1],2,sum)))
## total gene counts
tot1<-rbind(p1.sum,c1,o1.sum)
tot2<-rbind(p2.sum,c2,o2.sum)
tot1.ct<-data.frame('Mapped'=rep(aln,nrow(tot1)),'Annot'=rep('comp',nrow(tot1)),tot1)
tot2.ct<-data.frame('Mapped'=rep(aln,nrow(tot2)),'Annot'=rep('comp',nrow(tot2)),tot2)
output<-rbind(output,tot1.ct,tot2.ct)
## percentage
tot1.per<-data.frame('Mapped'=rep(aln,nrow(tot1)),'Annot'=rep('comp',nrow(tot1)),'Group'=tot1[,1],t(apply(tot1[,-1],1,function(x){x/s1.tot})))
tot2.per<-data.frame('Mapped'=rep(aln,nrow(tot2)),'Annot'=rep('basic',nrow(tot2)),'Group'=tot2[,1],t(apply(tot2[,-1],1,function(x){x/s2.tot})))
output.per<-rbind(output.per,tot1.per,tot2.per)
}
## visualize
##for(g in unique(output.per$Group)){
## x<-subset(output.per,Group == g)
## p<-ggplot(data=melt(x),aes(x=variable,y=value,color=Annot,shape=Mapped))+geom_point(size=4)
## p<-p+theme(axis.text.y = element_text(color='black',size=10,face='bold'),axis.text.x = element_text(color='black',size=10,face='bold',hjust=1,angle = 45))
## p<-p+xlab(' ')+ylab(paste('ratio of ',g,' counts in total gene counts',sep=''))+ggtitle(label = paste('Counts of ',g,' type',sep=''))
## p<-p+theme(plot.title = element_text(hjust = 0.5))
## ggsave(p,file=paste('/Users/jishuxu/Documents/HCA/reference/plots/',g,'_rlen_',rr,'_gencode_percent_gene_counts_annotation.png',sep=''),type='cairo-png')
##}
##write.table(output.per,file=paste('~/Documents/HCA/reference/rlen_',rr,'_gencode_percent_gene_counts_annotation.csv',sep=''),sep=',',col.names=T,row.names=F,quote=F)
##write.table(output,file=paste('~/Documents/HCA/reference/rlen_',rr,'_gencode_gene_counts_annotation.csv',sep=''),sep=',',col.names=T,row.names=F,quote=F)
}
## ------------------------------------------------------------------------
gencode.pseudo<-subset(output.per,Group == 'pseudo')
p<-ggplot(data=melt(gencode.pseudo),aes(x=variable,y=value,color=Annot,shape=Mapped))+geom_point(size=4)
p<-p+theme(axis.text.y = element_text(color='black',size=10,face='bold'),axis.text.x = element_text(color='black',size=10,face='bold',hjust=1,angle = 45))
p<-p+xlab(' ')+ylab('sum(pseudogene counts)/sum(all gene counts)')+ggtitle(label = paste('% of pseudogene',sep=''))
p<-p+theme(plot.title = element_text(hjust = 0.5))
p
## ------------------------------------------------------------------------
x<-subset(output.per,Group == 'protein_coding')
p<-ggplot(data=melt(x),aes(x=variable,y=value,color=Annot,shape=Mapped))+geom_point(size=4)
p<-p+theme(axis.text.y = element_text(color='black',size=10,face='bold'),axis.text.x = element_text(color='black',size=10,face='bold',hjust=1,angle = 45))
p<-p+xlab(' ')+ylab("sum(protein coding counts)/sum(all gene counts)")+ggtitle(label = paste('% of protein coding',sep=''))
p<-p+theme(plot.title = element_text(hjust = 0.5))
p
gencodes.coding<-x
## ------------------------------------------------------------------------
gene.counts.refseq<-list('unq'=list(),'mult'=list())
for(aln in c('unq','mult')){
pcounts<-list('25'=c(),'100'=c())
sralist<-c()
for(nb in c('25','100')){
files<-list.files(path='~/Documents/HCA/reference/counts/',pattern=paste(nb,"_GRCh38_RefSeq.gene.",aln,".counts.txt",sep=''))
for(fn in files){
sra<-unlist(strsplit(fn,split='_'))[1]
sralist<-c(sralist,sra)
df<-read.delim(paste('~/Documents/HCA/reference/counts/',fn,sep=''),sep='\t',header=T,skip=1)
x<-df[,c(1,7)]
colnames(x)<-c('ID','counts')
y<-as.data.frame(subset(gtf_refseq,type == 'gene'))
z<-join(x,y,by='ID')
z.agg<-aggregate(z$counts,by=list(z$gene_biotype), FUN=sum,na.rm=T)
colnames(z.agg)<-c('Group',sra)
if(length(pcounts[[nb]])==0){
pcounts[[nb]]<-z.agg
}else{
pcounts[[nb]]<-merge(pcounts[[nb]],z.agg,by='Group')
}
}
}
gene.counts.refseq[[aln]]<-pcounts
}
## ------------------------------------------------------------------------
atype<-c('protein_coding','processed_transcript','lncRNA','Mt_rRNA','rRNA','pseudogene','transcribed_pseudogene')
rlen<-c('100')
alns<-c('unq','mult')
for (rr in rlen){
output<-c()
output.per<-c()
for(aln in alns ){
pcounts<-gene.counts.refseq[[aln]]
s1<-pcounts[[rr]]
## total gene counts
s1.tot<-apply(s1[,-1],2,sum)
## tota protein coding gene counts
c1<-subset(s1,s1$Group %in% atype)
## others
o1<-subset(s1,!(s1$Group %in% atype))
o1.sum<-data.frame('Group'='others',t(apply(o1[,-1],2,sum)))
## total gene counts
tot1<-rbind(c1,o1.sum)
tot1.ct<-data.frame('Mapped'=rep(aln,nrow(tot1)),'Annot'=rep('RefSeq',nrow(tot1)),tot1)
output<-rbind(output,tot1.ct)
## percentage
tot1.per<-data.frame('Mapped'=rep(aln,nrow(tot1)),'Annot'=rep('RefSeq',nrow(tot1)),'Group'=tot1[,1],t(apply(tot1[,-1],1,function(x){x/s1.tot})))
output.per<-rbind(output.per,tot1.per)
}
## visualize
##for(g in unique(output.per$Group)){
## x<-subset(output.per,Group == g)
## p<-ggplot(data=melt(x),aes(x=variable,y=value,color=Annot,shape=Mapped))+geom_point(size=4)
## p<-p+theme(axis.text.y = element_text(color='black',size=10,face='bold'),axis.text.x = element_text(color='black',size=10,face='bold',hjust=1,angle = 45))
## p<-p+xlab(' ')+ylab(paste('ratio of ',g,' counts in total gene counts',sep=''))+ggtitle(label = paste('Counts of ',g,' type',sep=''))
## p<-p+theme(plot.title = element_text(hjust = 0.5))
## ggsave(p,file=paste('/Users/jishuxu/Documents/HCA/reference/plots/',g,'_rlen_',rr,'_refseq_percent_gene_counts_annotation.png',sep=''),type='cairo-png')
##}
write.table(output.per,file=paste('~/Documents/HCA/reference/rlen_',rr,'_refseq_percent_gene_counts_annotation.csv',sep=''),sep=',',col.names=T,row.names=F,quote=F)
write.table(output,file=paste('~/Documents/HCA/reference/rlen_',rr,'_refseq_gene_counts_annotation.csv',sep=''),sep=',',col.names=T,row.names=F,quote=F)
}
## ------------------------------------------------------------------------
x<-subset(output.per,Group %in% c('pseudogene','transcribed_pseudogene'))
refseq.pseudo<-aggregate(x[,-c(1,2,3)],by=list(x$Mapped),FUN=sum)
y<-melt(aggregate(x[,-c(1,2,3)],by=list(x$Mapped),FUN=sum))
colnames(y)<-c('Group','Sample','value')
p<-ggplot(data=y,aes(x=Sample,y=value,color=Group,shape=Group))+geom_point(size=4)
p<-p+theme(axis.text.y = element_text(color='black',size=10,face='bold'),axis.text.x = element_text(color='black',size=10,face='bold',hjust=1,angle = 45))
p<-p+xlab(' ')+ylab('ratio of pseudo gene counts in total gene counts')+ggtitle(label = paste('Counts of pseudo gene type',sep=''))
p<-p+theme(plot.title = element_text(hjust = 0.5))
p
## ------------------------------------------------------------------------
refseq.coding<-subset(output.per,Group %in% c('protein_coding') )
y<-melt(aggregate(refseq.coding[,-c(1,2,3)],by=list(refseq.coding$Mapped),FUN=sum))
colnames(y)<-c('Group','Sample','value')
p<-ggplot(data=y,aes(x=Sample,y=value,color=Group,shape=Group))+geom_point(size=4)
p<-p+theme(axis.text.y = element_text(color='black',size=10,face='bold'),axis.text.x = element_text(color='black',size=10,face='bold',hjust=1,angle = 45))
p<-p+xlab(' ')+ylab('ratio of coding gene counts in total gene counts')+ggtitle(label = paste('% of coding gene type',sep=''))
p<-p+theme(plot.title = element_text(hjust = 0.5))
p
## ------------------------------------------------------------------------
xx<-data.frame('Mapped'=refseq.pseudo[,1],'Annot'=rep('RefSeq',2),'Group'=rep('pseudo',2),refseq.pseudo[,2:11])
yy<-rbind(xx,gencode.pseudo)
p<-ggplot(data=melt(yy),aes(x=variable,y=value,color=Annot,shape=Mapped))+geom_point(size=4)
p<-p+theme(axis.text.y = element_text(color='black',size=10,face='bold'),axis.text.x = element_text(color='black',size=10,face='bold',hjust=1,angle = 45))
p<-p+xlab(' ')+ylab('sum(pseduogene counts)/sum(all gene counts)')+ggtitle(label = paste('% of pseudogene',sep=''))
p<-p+theme(plot.title = element_text(hjust = 0.5))
p
## ------------------------------------------------------------------------
xx<-refseq.coding
yy<-rbind(gencodes.coding,xx)
p<-ggplot(data=melt(yy),aes(x=variable,y=value,color=Annot,shape=Mapped))+geom_point(size=4)
p<-p+theme(axis.text.y = element_text(color='black',size=10,face='bold'),axis.text.x = element_text(color='black',size=10,face='bold',hjust=1,angle = 45))
p<-p+xlab(' ')+ylab('sum(protein counts)/sum(all gene counts)')+ggtitle(label = paste('% of protein coding',sep=''))
p<-p+theme(plot.title = element_text(hjust = 0.5))
p
|
9cb05229a4c211d7fe176d8b522fec24c086431f
|
53c8b2c6a0300d48682284fe357b2a74b2d373c3
|
/demand/demand_curves.R
|
46414d88027dc60c258a0bb7c65fc29d18b22a36
|
[] |
no_license
|
emlab-ucsb/future_food_from_sea
|
ce8140972b5ecf087d4ce9ea82db602742b88442
|
917496a5c29e2d804cc46ece3225729522d8e8e0
|
refs/heads/master
| 2022-11-04T21:31:01.031805
| 2020-07-27T17:28:29
| 2020-07-27T17:28:29
| 250,334,394
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,720
|
r
|
demand_curves.R
|
## Tracey Mangin
## December 4, 2019
## demand curves
## future of food from the sea
## note to user: must run files in init_cond folder first
## this creates demand curves
## attach libraries
library(tidyverse)
library(rJava)
library(tabulizer)
library(tabulizerjars)
library(rebus)
library(viridis)
library(reconPlots)
library(scales)
library(furrr)
## initial production and price
## note to user: change path, load file created in init_conditions.R
init_pq_df <- read_csv("init_pq_df.csv")
## for pulling out gammas
pdf1 <- '~/GitHub/future_food_from_sea/demand/cai_2017.pdf'
caietal <- file.path(pdf1)
pdf_txt <- tabulizer::extract_text(caietal)
tbls <- tabulizer::extract_tables(caietal, pages = 34, output = "data.frame")
## clean up the table
gamma_tbl <- tbls[[1]]
gamma_coln <- c("region", "marine_mean", "marine_lb", "marine_ub", "fresh_diad_mean", "fresh_diad_lb", "fresh_diad_ub",
"crust_mean", "crust_ub_lb", "shm_mean", "shm_lb", "shm_ub", "ceph_mean", "ceph_lb", "ceph_ub")
colnames(gamma_tbl) <- gamma_coln
gamma_tbl2 <- gamma_tbl %>%
mutate(crust_lb = str_extract(crust_ub_lb, pattern = START %R% one_or_more(DIGIT) %R% DOT %R% one_or_more(DIGIT)),
crust_ub = str_extract(crust_ub_lb, pattern = SPACE %R% one_or_more(DIGIT) %R% DOT %R% one_or_more(DIGIT)),
crust_ub = str_remove(crust_ub, pattern = SPACE)) %>%
select(region:crust_mean, crust_lb, crust_ub, shm_mean:ceph_ub)
region_df <- data.frame(geography = c(rep("region", 20), rep("country", 20)))
gamma_tbl3 <- cbind(region_df, gamma_tbl2) %>%
select(region, geography, marine_mean:ceph_ub)
## median gamma for regions -- marine
gamma_est <- gamma_tbl3 %>%
group_by(geography) %>%
summarise(mean_gamma_mf = mean(marine_mean),
med_gamma_mf = median(marine_mean))
gamma_mf <- gamma_est %>%
filter(geography == "region") %>%
select(mean_gamma_mf) %>%
as.numeric()
## median gamma for regions -- bivalve
gamma_est_biv <- gamma_tbl3 %>%
group_by(geography) %>%
mutate(shm_mean = as.numeric(shm_mean)) %>%
summarise(mean_gamma_shm = mean(shm_mean, na.rm = T),
med_gamma_shm = median(shm_mean))
gamma_biv <- gamma_est_biv %>%
filter(geography == "region") %>%
select(mean_gamma_shm) %>%
as.numeric()
## median gamma for regions -- freshwater
gamma_est_fresh <- gamma_tbl3 %>%
group_by(geography) %>%
mutate(fresh_mean = as.numeric(fresh_diad_mean)) %>%
summarise(mean_gamma_fresh = mean(fresh_mean, na.rm = T),
med_gamma_fresh = median(fresh_mean))
gamma_fresh <- gamma_est_fresh %>%
filter(geography == "region") %>%
select(mean_gamma_fresh) %>%
as.numeric()
## find gamma for aggegrate scenarios
## -------------------------------------
ps_gamma_3sect <- init_pq_df %>%
filter(sector %in% c("marine_capture", "finfish_mariculture", "bivalve_mariculture")) %>%
mutate(gamma = ifelse(sector == "marine_capture", gamma_mf,
ifelse(sector == "finfish_mariculture", gamma_mf, gamma_biv)),
gamma_grp = ifelse(sector == "marine_capture", "finfish",
ifelse(sector == "finfish_mariculture", "finfish", "bivalves"))) %>%
group_by(gamma_grp, gamma) %>%
summarise(sum_q = sum(quantity))
ps_gamma_3sect_val <- weighted.mean(ps_gamma_3sect$gamma, ps_gamma_3sect$sum_q)
ps_gamma_4sect <- init_pq_df %>%
filter(sector %in% c("marine_capture", "finfish_mariculture", "bivalve_mariculture", "inland_production")) %>%
mutate(gamma = ifelse(sector == "marine_capture", gamma_mf,
ifelse(sector == "finfish_mariculture", gamma_mf,
ifelse(sector == "bivalve_mariculture", gamma_biv, gamma_fresh))),
gamma_grp = ifelse(sector == "marine_capture", "finfish",
ifelse(sector == "finfish_mariculture", "finfish",
ifelse(sector == "bivalve_mariculture", "bivalves", "fresh")))) %>%
group_by(gamma_grp, gamma) %>%
summarise(sum_q = sum(quantity))
ps_gamma_4sect_val <- weighted.mean(ps_gamma_4sect$gamma, ps_gamma_4sect$sum_q)
## functions
## calculate consumption
calc_cons <- function(pval, n_pop, alpha, beta, y_gdpc, gamma) {
consumption <- n_pop * alpha * pval ^ beta * y_gdpc ^ gamma
}
## calculate alpha
calc_alpha <- function(cons_val, n_pop, pval, beta, y_gdpc, gamma) {
alpha <- cons_val / (n_pop * pval ^ beta * y_gdpc ^ gamma)
}
## estimate beta val, taken from our report which cites Muhammed et al. 2013
low_ope <- -0.48
med_ope <- -0.382
high_ope <- -0.278
## define current values
n_pop0 <- 7.39e9 ## IMF, 2017
y_gdpc0 <- 17117.153 ## the world bank, 2017
beta_val <- mean(c(low_ope, med_ope, high_ope)) ## own price elasticity
## repeat with future estimates
n_popt <- 9.8e9 ## UN article
# y_gdpct <- y_gdpc0 + y_gdpc0 * 1.3 ## PWC https://www.pwc.com/gx/en/issues/economy/the-world-in-2050.html
##
gdp2016 <- 128.567 * 1e12
gdp2050 <- gdp2016 + gdp2016 * 1.3
y_gdpct <- gdp2050 / n_popt
## demand curve input df
input_df <- init_pq_df %>%
mutate(demand_scen = "current",
population = n_pop0,
beta = beta_val,
gamma = ifelse(sector %in% c("marine_capture", "finfish_mariculture"), gamma_mf,
ifelse(sector == "inland_production", gamma_fresh,
ifelse(sector == "bivalve_mariculture", gamma_biv,
ifelse(sector == "aggregate_lb", ps_gamma_4sect_val, ps_gamma_3sect_val)))),
yval = y_gdpc0) %>%
rowwise() %>%
mutate(alpha = calc_alpha(cons_val = quantity, n_pop = population, pval = price, beta = beta, y_gdpc = yval, gamma = gamma)) %>%
ungroup()
input_df_fut <- input_df %>%
mutate(demand_scen = "future",
population = n_popt,
yval = y_gdpct)
input_df_all <- rbind(input_df, input_df_fut) %>%
mutate(scenario = paste(sector, demand_scen, sep = "_"))
create_demand <- function(dscen, inputs){
## define the price and sector curve
sector_name <- dscen$sector
p <- dscen$price_val
## define inputs
input_df <- inputs %>%
filter(sector == sector_name)
cons <- calc_cons(pval = p,
n_pop = input_df$population,
alpha = input_df$alpha,
beta = input_df$beta,
y_gdpc = input_df$yval,
gamma = input_df$gamma)
demand <- tibble(sector = sector_name,
demand_scen = input_df$demand_scen,
price = p,
quantity = cons)
}
## make current demand curve
price_vals <- as.list(seq(0, 20000, 1))
sector_vec <- as.list(unique(input_df_all$sector))
#sector_vec <- as.list(c("marine_capture", "finfish_mari_a", "finfish_mari_b", "finfish_mari_c", "bivalve_mariculture", "aggregate"))
# plan(multiprocess)
demand_df0 <- cross(list(sector_name = sector_vec,
price_val = price_vals)) %>%
map(create_demand,
inputs = input_df_all %>% filter(demand_scen == "current")) %>%
bind_rows()
## now for future
demand_df_fut <- cross(list(sector_name = sector_vec,
price_val = price_vals)) %>%
map(create_demand,
inputs = input_df_all %>% filter(demand_scen == "future")) %>%
bind_rows()
## now do extreme for each sector
## double future demand curve
mult_val <- 2
demand_df_ext <- demand_df_fut %>%
mutate(quantity = quantity * mult_val) %>%
mutate(demand_scen = "extreme")
## combine all three curves
demand_df_all <- rbind(demand_df0, demand_df_fut, demand_df_ext)
## note to user: change path, save for scripts in analysis folder
saveRDS(demand_df_all, "demand_curves_final.rds")
|
3a4c7dc57640e1dce11f3fa942c3422b1c32125a
|
ae04c40d5560d5fd97c688909e6134b631fe4065
|
/cachematrix.R
|
742238e1f756d8d01871540435df049996bfc643
|
[] |
no_license
|
nodde/ProgrammingAssignment2
|
b75066c05a9125fd78773489089d94884b5d2b54
|
839780c4352409d5f859c82d795fc414a9a38f3f
|
refs/heads/master
| 2021-01-19T07:22:50.946643
| 2015-11-21T20:21:38
| 2015-11-21T20:21:38
| 44,532,474
| 0
| 0
| null | 2015-10-19T12:18:43
| 2015-10-19T12:18:42
| null |
UTF-8
|
R
| false
| false
| 3,011
|
r
|
cachematrix.R
|
## Matrix inversion can be a time-consuming computation.
## Caching the inverse of a matrix after computing once
## rather than computing it in each run can conserve
## resources and enhance efficiency. These 2 functions
## present the solution to this problem on the assumption
## of a square invertible matrix.
## Function makeCacheMatrix creates a special "matrix"
## object that can cache its inverse. It returns a list
## of 4 functions which set and retrieves the matrix.
makeCacheMatrix <- function(x = matrix()) {
# Declaring matrix object
invM <- NULL
# set: set the value of the matrix object
set <- function(y) {
x <<- y
invM <<- NULL
}
# get: get the value of the matrix object
get <- function() x
# setmatrix: set the value of inverse matrix
# getmatrix: get the value of inverse matrix
setmatrix <- function(solve) invM <<- solve
getmatrix <- function() invM
# makeCacheMatrix returns the 4 functions
# above in a list
list(set = set, get = get,
setmatrix = setmatrix,
getmatrix = getmatrix)
}
## Function cacheSolve computes the inverse of the
## special "matrix" returned by makeCacheMatrix above.
## First, it checks if inverse has already been computed.
## If so, it retrieves the value from the cached data.
## If not, an inverse matrix is generated and stored.
cacheSolve <- function(x, ...) {
# Retrieves a matrix that is the inverse of 'x'
invM <- x$getmatrix()
# Checks if inverse matrix exists already
# If so, inverse matrix is returned
if(!is.null(invM)) {
message("getting cached data")
return(invM)
}
# Else it retrieves the matrix and create its inverse
data <- x$get()
invM <- solve(data, ...)
x$setmatrix(invM)
invM
}
## Test run:
## > xy <- rbind(c(2, 7, -4, 0), c(3, 12, 16, -1),
## c(5, 10, 0, 6), c(-2, 1, 13, 4))
## > m <- makeCacheMatrix(xy)
## > m$get()
## [,1] [,2] [,3] [,4]
## [1,] 2 7 -4 0
## [2,] 3 12 16 -1
## [3,] 5 10 0 6
## [4,] -2 1 13 4
## No cache in the first run
##
## > cacheSolve(m)
## [,1] [,2] [,3] [,4]
## [1,] -0.436906377 0.111261872 0.199457259 -0.2713704206
## [2,] 0.220081411 -0.007598372 -0.052645862 0.0770691995
## [3,] -0.083310719 0.042333786 0.007598372 -0.0008141113
## [4,] -0.002713704 -0.080054274 0.088195387 0.0976933514
## The second run retrieves the previous
## inverse matrix from the cahed data
##
## > cacheSolve(m)
## getting cached data.
## [,1] [,2] [,3] [,4]
## [1,] -0.436906377 0.111261872 0.199457259 -0.2713704206
## [2,] 0.220081411 -0.007598372 -0.052645862 0.0770691995
## [3,] -0.083310719 0.042333786 0.007598372 -0.0008141113
## [4,] -0.002713704 -0.080054274 0.088195387 0.0976933514
## >
|
7b147e229337c7c801586ab853cb76a7b819922f
|
5697ce07fbfd684465ca2e1864198e4a249bf526
|
/R/print.metaMDS.R
|
4128d74f987d2f477452e14d2cb1be8627f9482d
|
[] |
no_license
|
psolymos/vegan
|
be0f3a42fda647f1e96f01c53ce0363c447ff7a2
|
ccbd3479a29d0d4397b5726d79a6a975208c0572
|
refs/heads/master
| 2020-12-28T21:51:35.806024
| 2020-06-26T16:24:12
| 2020-06-26T16:24:12
| 47,991,367
| 1
| 1
| null | 2020-06-26T16:03:45
| 2015-12-14T17:46:13
|
R
|
UTF-8
|
R
| false
| false
| 1,618
|
r
|
print.metaMDS.R
|
`print.metaMDS` <-
function (x, ...)
{
cat("\nCall:\n")
cat(deparse(x$call), "\n\n")
if (x$engine == "monoMDS")
cat(x$model, "Multidimensional Scaling using monoMDS\n\n")
else if (x$engine == "isoMDS")
cat("non-metric Multidimensional Scaling using isoMDS (MASS package)\n\n")
cat("Data: ", x$data, "\n")
cat("Distance:", x$distance, "\n\n")
cat("Dimensions:", x$ndim, "\n")
cat("Stress: ", x$stress, "\n")
if (inherits(x, "monoMDS")) {
cat("Stress type", x$isform)
if(x$model != "linear")
cat(", ", c("weak", "strong")[x$ities], " ties", sep = "")
cat("\n")
}
if (x$converged) {
cat("Two convergent solutions found after", x$tries,
"tries\n")
} else {
cat("No convergent solutions - best solution after",
x$tries, "tries\n")
}
z <- x$points
scal <- c(if (attr(z, "centre")) "centring",
if (attr(z, "pc")) "PC rotation",
if (attr(z, "halfchange")) "halfchange scaling")
if (!length(scal))
scal <- "as is"
cat("Scaling:", paste(scal, collapse = ", "), "\n")
if (all(is.na(x$species))) {
cat("Species: scores missing\n")
} else {
spattr <- attr(x$species, "shrinkage")
spdata <- attr(x$species, "data")
if (is.null(spdata))
spdata <- x$data
if (is.null(spattr))
cat("Species: non-expanded scores ")
else
cat("Species: expanded scores ")
cat("based on", sQuote(spdata), "\n")
}
cat("\n")
invisible(x)
}
|
a4d4bf5fce045b818465baa4472e7ecb2670747b
|
f511c0143691c35244a6a02c353446e813e15cdd
|
/OldCode/TCHallBadgersTIDY.R
|
a8270c85e40fcd86e4a00c4aea3dec2374936535
|
[] |
no_license
|
davehudson67/Inbreeding
|
e581eccfc25e723cc0816dd88bfc049ffad6e43f
|
1b3212f96aa8026a6a2bd1f34df7e9cf84e18f87
|
refs/heads/master
| 2023-06-30T03:02:44.474462
| 2021-07-11T11:04:42
| 2021-07-11T11:04:42
| 264,190,703
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,222
|
r
|
TCHallBadgersTIDY.R
|
library(tidyverse)
library(lubridate)
rm(list=ls())
## read in data
CH <- read_csv("all.diag.results.csv")
## keep only required variables
CH <- select(CH, tattoo, date, pm, captureyear, sex, age, age_years)#, statpak, IFNgamma, cult_SUM, brock)
## set date variable correctly
CH$date <- dmy(CH$date)
## adjust columns
colnames(CH)[which(names(CH) == "tattoo")] <- "ID"
## keep only badgers with known age
CH <- CH %>%
drop_na(age_years)
## create infected variables
## change NAs
#CH$statpak[is.na(CH$statpak)] <- 3
#CH$brock[is.na(CH$brock)] <- 3
#CH$IFNgamma[is.na(CH$IFNgamma)] <- 3
#CH$cult_SUM[is.na(CH$cult_SUM)] <- -1
#CH <- CH %>%
# mutate(infected = if_else((CH$statpak == 1 | CH$brock == 1 | CH$cult_SUM > 0 | CH$IFNgamma ==1 ), 1, 0)) %>%
# mutate(infected_as_cub = if_else((CH$statpak == 1 | CH$brock == 1 | CH$cult_SUM > 0 | CH$IFNgamma == 1) & CH$age == "CUB", 1, 0))
# mutate(no_test = if_else((CH$statpak == 3 & CH$brock == 3 & CH$cult_SUM == -1 & CH$IFNgamma == 3), 1, 0))
#CH <- CH %>%
# group_by(ID) %>%
# mutate(infected = max(infected)) %>%
# mutate(infected_as_cub = max(infected_as_cub)) %>%
# ungroup()
## create a trap month variable
CH <- CH %>%
mutate(trap_month = as.numeric(month(date)))
## split trap month into 4 different 'seasons', year split into 4 quarters
CH$trap_season <- NA
for (i in 1:nrow(CH)){
if(CH$trap_month[i] == 1 | CH$trap_month[i] == 2 | CH$trap_month[i] == 3){
CH$trap_season[i] <- 1
} else if (CH$trap_month[i] == 4 | CH$trap_month[i] == 5 | CH$trap_month[i] == 6){
CH$trap_season[i] <- 2
} else if (CH$trap_month[i] == 7 | CH$trap_month[i] == 8 | CH$trap_month[i] == 9){
CH$trap_season[i] <- 3
} else if (CH$trap_month[i] == 10 | CH$trap_month[i] == 11 | CH$trap_month[i] == 12){
CH$trap_season[i] <- 4
}
}
## create occasion variable by combining capture year and trap season
CH <- unite(CH, "occasion", captureyear, trap_season, sep = ".", remove = FALSE)
CH$occ <- CH %>%
arrange(date) %>%
group_by(occasion) %>%
group_indices()
## adjust occasion to allow for the start of the study/badgers can be born before the first capture occasion
CH$occ <- CH$occ + 5
## create birth occasion (need to decide on the +1 to determine when most births happen)
CH <- CH %>%
group_by(ID) %>%
mutate(birth = if_else(age == "CUB", occ, 1000)) %>%
mutate(birth = min(birth)) %>%
mutate(birth = birth - (min(trap_season[age == "CUB"])) + 1) %>%
ungroup()
## check error message
filter(CH, is.infinite(birth))
## adjust birth for this individual
CH$birth[CH$ID=="UN574"] <- 154 - 4
## create death occasion
CH <- CH %>%
group_by(ID) %>%
mutate(death = if_else(pm == "Yes", occ, 0)) %>%
mutate(death = max(death)) %>%
ungroup()
CH$death[CH$death == 0] <- 1000
## check each individual only has 1 pm date
CH %>%
group_by(ID) %>%
filter(pm == "Yes") %>%
count(sort = TRUE)
## remove duplicate captures... individuals captured twice in same season.
#CH <- CH %>%
# arrange(ID, pm) %>%
# distinct(ID, occ, .keep_all = TRUE)
##-------------------------------------------- SPLIT DATA -----------------------------------------------##
## select individuals whose only entry is a recovered dead pm
CHdr <- CH %>%
group_by(ID) %>%
add_count(name = "captures") %>%
filter(captures == 1 & pm == "Yes") %>%
mutate(captures = captures - 1)
## remove all CHdr entries from CH
CH <- anti_join(CH, CHdr)
## create last_seen variable for this group
CHdr$last_seen <- CHdr$birth
## create max possible captures for this group
CHdr <- CHdr %>%
group_by(ID) %>%
mutate(max_captures = death - birth - 1) %>%
mutate(max_captures = if_else(max_captures < 0, 0, max_captures)) %>%
ungroup()
##------------------------------------------- DEAL WITH CH --------------------------------------------##
## check any individuals where birth >= death
CH %>%
filter(birth >= death)
## add last seen alive
CH <- CH %>%
group_by(ID) %>%
mutate(last_seen = min(max(occ), death - 1))
## add number of captures
CH <- CH %>%
group_by(ID) %>%
mutate(captures = n_distinct(occ)) %>%
ungroup()
## create max possible captures (-1 removes death recovery occasion)
eos <- max(CH$occ)
CH <- CH %>%
group_by(ID) %>%
mutate(max_captures = if_else(death == 1000, eos - birth, death - birth - 1)) %>%
mutate(max_captures = if_else(max_captures < 0, 0, max_captures)) %>%
ungroup()
##------------------------------------------- REJOIN DATA -----------------------------------------------##
## combine dfs
CH <- bind_rows(CH, CHdr)
rm(CHdr)
##----------------------------------------- run some checks ---------------------------------------------##
## check for any entries made after death
CH %>%
group_by(ID) %>%
filter(last_seen > death)
## any birth = death
CH %>%
group_by(ID) %>%
filter(birth == death)
## adjust these individuals
CH$death[CH$ID == "UN088"] <- 35
#CH$max_captures[CH$ID == "UN088"] <- 0
#CH$captures[CH$ID == "UN088"] <- 0
CH$death[CH$ID == "UN089"] <- 35
#CH$max_captures[CH$ID == "UN089"] <- 0
#CH$captures[CH$ID == "UN089"] <- 0
CH$death[CH$ID == "UN135"] <- 63
#CH$max_captures[CH$ID == "UN135"] <- 0
#CH$captures[CH$ID == "UN135"] <- 0
CH$death[CH$ID == "UN136"] <- 63
#CH$max_captures[CH$ID == "UN136"] <- 0
#CH$captures[CH$ID == "UN136"] <- 0
#CH$death[CH$ID == "UN129"] <- 56
#CH$max_captures[CH$ID == "UN129"] <- 0
#CH$captures[CH$ID == "UN136"] <- 0
#CH$death[CH$ID == "L081"] <- 40
#CH$max_captures[CH$ID == "L081"] <- 0
#CH$captures[CH$ID == "L081"] <- 0
#CH$last_seen[CH$ID == "L081"] <- 39
CH %>%
group_by(ID) %>%
filter(last_seen < birth)
CH %>%
group_by(ID) %>%
filter(captures > max_captures)
CH <- CH %>%
group_by(ID) %>%
mutate(captures = if_else(captures > max_captures, max_captures, captures))
## any last_seen = death
CH %>%
group_by(ID) %>%
filter(last_seen == death)
## change unknown death to NA
CH$death[CH$death == 1000] <- NA
## keep unique entries
CH <- distinct(CH, ID, .keep_all = TRUE)
## save output
saveRDS(CH, "CH.rds")
CH[52,]
122-119
CH %>%
filter(captures > last_seen - birth)
CH <- CH %>%
group_by(ID) %>%
mutate(captures = if_else(captures > last_seen - birth, captures - 1, captures))
|
e2f663f7c5a15bfae77dea11e8dce14cde8c5e63
|
9820b3e3da2974cb12f9aac79c25481825cbf3a5
|
/shiny/dvdstore/database_functions.R
|
7d0f90789084a508bfd6effe0f7f5e03e99c2f92
|
[] |
no_license
|
OliBravo/my_apps
|
152d96bc7e4e26bccf980c0c9b616e4940216952
|
1e21694bd0c1fce34c425f88a327a73a32be474f
|
refs/heads/master
| 2023-02-02T09:11:02.279128
| 2020-12-22T13:27:04
| 2020-12-22T13:27:04
| 111,607,649
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,487
|
r
|
database_functions.R
|
# database functions
db_connect <- function(login, password){
# opens a connection to dvdrental database
if (login == "" | password == ""){
return(NULL)
}
# valid logins-passwords are: web-web, mike-mike123, jon-jon123
DB_NAME <- "dvdrental"
DB_HOST <- "localhost"
DB_PORT <- 5432
DB_USER <- login
DB_PASSWORD <- password
cnx <- try(
dbConnect(
RPostgres::Postgres(),
dbname = DB_NAME,
host = DB_HOST,
user = DB_USER,
password = DB_PASSWORD,
port = DB_PORT),
silent = F
)
# connection failed:
if (class(cnx) == "try-error"){
msg <- "Connection error"
# database or server issues:
if (grepl("could not connect to server",
as.character(cnx))){
msg <- "Could not connect to the server."
}
# authorization issues:
if (grepl("authentication failed", as.character(cnx)) | grepl("permission denied", as.character(cnx)))
msg <- "You are not allowed to access the content."
showNotification(msg)
return(NULL)
}
cnx
}
queryDB <- function(connection, sql, params = NULL){
# tries to query the database using RPostgres::dbSendQuery method
# returns NULL in case of errors
tryCatch({
r <- RPostgres::dbSendQuery(
connection,
sql,
params = params
)
r <- dbFetch(r)
r
},
error = function(e) {
print(as.character(e))
NULL
})
}
|
da871c8cd1f0dd397ac594548126dfe510abec8c
|
24975c66d61805ffd50147890b9fc34769f18324
|
/Notes_scripts_bank/ex1_twitter_compare_STAN.R
|
2a9270a6c5e83e2eb0b4fc191db622e10f44f229
|
[] |
no_license
|
npetraco/MATFOS705
|
53081de4e38a1aae8e0d67bf093a1bcd6b9f0258
|
dc54407b7b13ebf8315282cbaf0c9b742212884d
|
refs/heads/master
| 2023-05-28T03:08:46.839815
| 2023-05-16T14:47:15
| 2023-05-16T14:47:15
| 121,066,670
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,352
|
r
|
ex1_twitter_compare_STAN.R
|
library(bayesutils)
library(loo)
# Extra options to set for Stan:
options(mc.cores = 1)
rstan_options(auto_write = TRUE)
# Load a Stan model:
stan.code <- paste(readLines(system.file("stan/poisson-gamma_multiple_wloglik.stan", package = "bayesutils")),collapse='\n')
# Translate Stan code into C++
model.c <- stanc(model_code = stan.code, model_name = 'model', verbose=T)
# Compile the Stan C++ model:
sm <- stan_model(stanc_ret = model.c, verbose = T)
M <- c(1154, 1062, 1203, 1125, 1091, 1120, 1202, 1129, 1103, 1098, 1169, 1142, 1174, 1111, 1148,
1134, 1146, 1179, 1165, 1076, 1152, 1209, 1205, 1139, 1227, 1145, 1140, 1220, 1059, 1165)
A <- c(1326, 1362, 1297, 1350, 1324, 1384, 1343, 1373, 1345, 1399, 1364, 1380, 1303, 1232, 1330,
1306, 1309, 1336, 1367, 1291, 1325, 1348, 1318, 1351, 1382, 1340, 1305, 1306, 1333, 1337)
N <- c(1251, 1234, 1337, 1235, 1189, 1289, 1318, 1190, 1307, 1224, 1279, 1331, 1310, 1244, 1246,
1168, 1267, 1274, 1262, 1254, 1139, 1236, 1310, 1227, 1310, 1255, 1230 ,1327, 1242, 1269)
#s <- array(c(M,A,N), c(length(c(M,A,N)), 1)) # Model a. One lambda. Feed data in as a column vector
#s <- cbind(M,A,N) # Model b. Three lambdas
s <- rbind(M,A,N) # Model c. 30 lambdas. One for each day.
dat <- list(
"n" = nrow(s),
"m" = ncol(s),
"s" = s,
"a" = 25/16,
"b" = 1/16000
)
# Run the model:
fit3 <- sampling(sm, data = dat, iter=5000, thin = 1, chains = 4)
fit3
# Model a. adequacy metrics
log.lik1 <- extract.log.lik(fit1, merge_chains = FALSE)
r.eff1 <- relative_eff(exp(log.lik1), cores = 2)
loo.est1 <- loo(log.lik1, r_eff = r.eff1, cores = 2)
waic.est1 <- waic(log.lik1)
print(loo.est1)
print(waic.est1)
# Model b. adequacy metrics
log.lik2 <- extract.log.lik(fit2, merge_chains = FALSE)
r.eff2 <- relative_eff(exp(log.lik2), cores = 2)
loo.est2 <- loo(log.lik2, r_eff = r.eff2, cores = 2)
waic.est2 <- waic(log.lik2)
print(loo.est2)
print(waic.est2)
# Model c. adequacy metrics
log.lik3 <- extract.log.lik(fit3, merge_chains = FALSE)
r.eff3 <- relative_eff(exp(log.lik3), cores = 2)
loo.est3 <- loo(log.lik3, r_eff = r.eff3, cores = 2)
waic.est3 <- waic(log.lik3)
print(loo.est3)
print(waic.est3)
plot(loo.est3, label_points = F)
# Intercompare models. Best on top:
loo_compare(loo.est1, loo.est2, loo.est3)
|
deea11b974bb8c336799c5a89dc939d8eb46ee38
|
bdd00c8db273e7b8557598dd021931265c6b8aa9
|
/run_analysis.r
|
1c585ea4a4a741ec6d1068acdc05d4bcb50ca0e2
|
[] |
no_license
|
gyurisc/coursera-getting-and-cleaning-data
|
734bc3ae980f80ac95f782ba031283ef5b573c9d
|
3fd6c0fbce4f64c47e3f9897e725849201b77fb7
|
refs/heads/master
| 2020-06-12T17:47:02.528753
| 2014-06-21T14:43:51
| 2014-06-21T14:43:51
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,896
|
r
|
run_analysis.r
|
# Step 1. Merge the training and the test sets to create one data set.
# Setting working directory to point to the location of the git repository root
# e.g setwd("/Users/krisztiangyuris/Desktop/r_workingdir/coursera-getting-and-cleaning-data")
# Loading files
print("Loading data...")
trainData <- read.table("data/train/X_train.txt")
trainLabels <- read.table("data/train/y_train.txt")
trainSubject <- read.table("data/train/subject_train.txt")
testData <- read.table("data/test/X_test.txt")
testLabels <- read.table("data/test/y_test.txt")
testSubject <- read.table("data/test/subject_test.txt")
activityLabels <- read.table("data/activity_labels.txt")
features <- read.table("data/features.txt")
# combining test and set data
print("Combining test and train data.")
allData <- rbind(trainData, testData)
allLabels <- rbind(trainLabels, testLabels)
allSubject <- rbind(trainSubject, testSubject)
# Step 2. Extracts only the measurements on the mean and standard deviation for each measurement.
print("Extracting and renaming.")
meanstdIndexes <- grep("mean\\(\\)|std\\(\\)", features[,2])
allData <- allData[, meanstdIndexes]
names(allData) <- gsub("\\(\\)", "", features[meanstdIndexes, 2])
names(allData) <- gsub("mean", "Mean", names(allData))
names(allData) <- gsub("std", "Standard", names(allData))
# Step 3. Uses descriptive activity names to name the activities in the data set
print("Adding activity names.")
activityLabels[, 2] <- tolower(activityLabels[,2])
activityLabels[, 2] <- gsub("_u", "U", activityLabels[,2])
activityLabels[, 2] <- gsub("_d", "D", activityLabels[,2])
substr(activityLabels[, 2], 1,1) <- toupper(substr(activityLabels[, 2], 1, 1))
al <- activityLabels[allLabels[, 1], 2]
allLabels[, 1] <- al
names(allLabels) <- "activity"
# Step 4. Appropriately labels the data set with descriptive variable names.
names(allSubject) <- "subject"
print("Writing out final data set")
finalData <- cbind(allSubject, allLabels, allData)
write.table(finalData, "final_data.txt")
# Step 5. Creates a second, independent tidy data set with the average of each variable for each activity and each subject.
print("Creating the final average data")
activityLength <- dim(activityLabels)[1]
subjectLength <- length(table(allSubject))
columnLength = dim(finalData)[2]
tmp <- matrix(NA, nrow = activityLength*subjectLength, ncol = columnLength)
finalDataAvg <- as.data.frame(tmp)
colnames(finalDataAvg) <- colnames(finalData)
r <- 1
for(i in 1:subjectLength)
{
for(j in 1:activityLength)
{
finalDataAvg[r, 1] <- i
activity <- activityLabels[j, 2]
finalDataAvg[r, 2] <- activity
subjectVector <- i == finalData$subject
activityVector <- activity == finalData$activity
finalDataAvg[r, 3:columnLength] <- colMeans(finalData[subjectVector&activityVector, 3:columnLength])
r <- r + 1
}
}
write.table(finalDataAvg, "final_data_average.txt")
|
6ffad53b15f6c208f9e2dbdacb3c52ddf7e64f67
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/Matching/examples/Match.Rd.R
|
27fecbfc0ab79c63535cc7c849a734baac56fa88
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,388
|
r
|
Match.Rd.R
|
library(Matching)
### Name: Match
### Title: Multivariate and Propensity Score Matching Estimator for Causal
### Inference
### Aliases: Match
### Keywords: nonparametric
### ** Examples
# Replication of Dehejia and Wahba psid3 model
#
# Dehejia, Rajeev and Sadek Wahba. 1999.``Causal Effects in
# Non-Experimental Studies: Re-Evaluating the Evaluation of Training
# Programs.''Journal of the American Statistical Association 94 (448):
# 1053-1062.
data(lalonde)
#
# Estimate the propensity model
#
glm1 <- glm(treat~age + I(age^2) + educ + I(educ^2) + black +
hisp + married + nodegr + re74 + I(re74^2) + re75 + I(re75^2) +
u74 + u75, family=binomial, data=lalonde)
#
#save data objects
#
X <- glm1$fitted
Y <- lalonde$re78
Tr <- lalonde$treat
#
# one-to-one matching with replacement (the "M=1" option).
# Estimating the treatment effect on the treated (the "estimand" option defaults to ATT).
#
rr <- Match(Y=Y, Tr=Tr, X=X, M=1);
summary(rr)
# Let's check the covariate balance
# 'nboots' is set to small values in the interest of speed.
# Please increase to at least 500 each for publication quality p-values.
mb <- MatchBalance(treat~age + I(age^2) + educ + I(educ^2) + black +
hisp + married + nodegr + re74 + I(re74^2) + re75 + I(re75^2) +
u74 + u75, data=lalonde, match.out=rr, nboots=10)
|
9092b978220addb57abb25c5682436a283093e58
|
0363e9059653e5ce2a8fd4dfa1bcfe981072ea82
|
/man/circle.Rd
|
e359d76173b4b08c27b6c331a76f8523d42ec710
|
[] |
no_license
|
mwrowe/microRutils
|
7725bd4d5e2ac60337932f384562ed39abcf86a1
|
654cd867bafe126593089441f63c88906ecf60ed
|
refs/heads/master
| 2021-07-07T19:59:43.732449
| 2021-06-10T16:59:33
| 2021-06-10T16:59:33
| 245,310,935
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,702
|
rd
|
circle.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/circle.R
\name{circle}
\alias{circle}
\title{Plot a Circle or Regular Polygon}
\usage{
circle(x, y = 0, add = T, segs = 100, how = "radius", ...)
}
\arguments{
\item{x}{A numeric vector of length 1 or 2:
\itemize{
\item If length(x)==1: origin is assumed to be c(0,0); x-coordinate of
a radial segment.
\item If length(x)==2: x-coordinates of origin (x[1]) and radius
endpoint.
}}
\item{y}{Optional numeric vector of length 1 or 2:
\itemize{
\item If omitted: assume y-coordinates of zero.
\item If length(y)==1: origin is assumed to be c(0,0); y-coordinate of
a radial segment.
\item If length(y)==2: y-coordinates of origin (x[1]) and radius
endpoint.
}}
\item{add}{Logical value indicating whether circle should be added to an
existing plot (default) or to a new plot.}
\item{segs}{Integer; number of segments used to approximate a circle. A small number
will produce a regular polygon inscribed within the radius, with a vertex
at x, y}
\item{how}{Character string specifying how x and y relate to the circle geometry.
Currently only radius is implemented.}
\item{...}{Other named arguments passed to \code{\link[graphics]{lines}} or
\code{\link[graphics]{plot}} (such as lwd, lty, col, etc.)}
}
\value{
Returns a data.frame with columns x and y specifying coordinates of
the vertices.
}
\description{
Plot a circle or regular polygon of a given radius at a given location.
}
\seealso{
\code{\link[graphics]{par}} and \code{\link[graphics]{plot.default}} give
more detailed descriptions of the ... plotting parameters.
}
\author{
M.W.Rowe, \email{mwr.stats@gmail.com}
}
|
ba3e36baa67ed21ba11d5219784a581d3adf17ac
|
207b4bb6f3aaeef7eaaf043874578fe051ff63b8
|
/man/get_batch_details.Rd
|
a9ccc9eca7ab7d371b2568a1b97ae73990850f30
|
[
"MIT"
] |
permissive
|
cran/captr
|
451359bf791c0aef9e3f86a92e0c3df5c6b12274
|
0aa5de3e9d1e35f00797cda47438668e0f66546f
|
refs/heads/master
| 2021-01-10T13:17:44.930177
| 2017-04-15T19:29:34
| 2017-04-15T19:29:34
| 48,077,495
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 749
|
rd
|
get_batch_details.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_batch_details.R
\name{get_batch_details}
\alias{get_batch_details}
\title{Get Details of a particular batch}
\usage{
get_batch_details(batch_id = "", ...)
}
\arguments{
\item{batch_id}{ID for the batch}
\item{\dots}{Additional arguments passed to \code{\link{captr_GET}}.}
}
\value{
list of length 26.
}
\description{
Get details such as who the batch was created by, user id of the creator, name of the batch, whether or not the batch has been submitted for processing, files in the batch, etc.
}
\examples{
\dontrun{
get_batch_details("batch_id")
}
}
\references{
\url{https://shreddr.captricity.com/developer/api-reference/#v1-batch}
}
|
fd25f5a98700a632d8838894613165b66e59e663
|
ee6d73b5c686b08448d2ce4a6e86e8355717acec
|
/R/tidytext_topic_models.R
|
cbe7cbf7cfd0a39c8c6c0c278faaf47e659097f3
|
[] |
no_license
|
codymg/lab_work
|
c5a232d255a9d08f98a96570014ab06a3d574a87
|
92d8fabc230b40eb868a31fe8e7b808093a5cab7
|
refs/heads/master
| 2021-01-06T21:44:52.591920
| 2020-02-27T21:23:51
| 2020-02-27T21:23:51
| 241,488,982
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,781
|
r
|
tidytext_topic_models.R
|
library(rvest)
library(purrr)
library(tidyverse)
library(SnowballC)
library(tidytext)
library(stopwords)
library(corpus)
library(textstem)
library(ggwordcloud)
library(ggthemes)
library(caret)
library(igraph)
library(ggraph)
library(topicmodels)
library(tictoc)
library(tm)
library(drlib)
trump_dat <- readRDS(url("https://github.com/codymg/lab_work/raw/master/data/trump_dat.rds"))
warren_dat <- readRDS(url("https://github.com/codymg/lab_work/raw/master/data/warren_dat.rds"))
sanders_dat <- readRDS(url("https://github.com/codymg/lab_work/raw/master/data/sanders_dat.rds"))
romney_dat <- readRDS(url("https://github.com/codymg/lab_work/raw/master/data/romney_dat.rds"))
dat <- trump_dat %>%
bind_rows(warren_dat, sanders_dat, romney_dat) %>%
mutate(speech_text = str_squish(speech_text)) %>%
mutate(speech_text = str_trim(speech_text, "both")) %>%
mutate(speech_text = str_remove_all(.$speech_text, "applause|laughter|thank|thanks|presidential|president|realdonaldtrump|donald|
trump|bernie|senator|klobuchar|trump|white house|yang|sanders|warren|buttigieg|
biden|joe|jr|mike|bloomberg|mayor|hillary|clinton|view document|pdf format|bush|
romney|mitt|mitch|mcconnell|kevin|mccarthy|john|thune"))
#tokenizing
token_df <- data.frame(txt = dat$speech_text, politician = dat$politician, speech_type = as.character(dat$speech_type), stringsAsFactors = FALSE)
stopwrds <- data.frame(words = stopwords("SMART"))
otherwrds <- data.frame(words = c("applause", "laughter", "president", "thank", "thanks"))
otherwrds$words <- as.character(otherwrds$words)
stopwrds <- stopwrds %>%
bind_rows(otherwrds) %>%
dplyr::select(words) %>%
distinct(words)
#head(stopwords::stopwords("russian"), 30) provides preview of stop_words being used
word_list <- token_df %>%
mutate(speech_id = 1:n()) %>%
tidytext::unnest_tokens(word, txt) %>%
anti_join(stopwrds, by = c("word" = "words")) %>% #removing stop words based on snowball set
filter(nchar(word) > 3) %>% #filters out words with less than 2 characters
mutate(word_id = 1:n())
word_count <- token_df %>%
tidytext::unnest_tokens(word, txt) %>%
dplyr::anti_join(stopwrds, by = c("word" = "words")) %>% #removing stop words based on snowball set
dplyr::count(word, sort = TRUE) %>%
dplyr::filter(nchar(word) > 3) %>%
left_join(word_list, by = c("word" = "word")) %>%
distinct(word, .keep_all = TRUE)
################################################
################################################
#Sentiment
###############################################
###############################################
sentiments %>%
filter(lexicon == "AFINN")
# a list of English words rated for valence with an integer between minus five (negative) and plus five (positive)
word_list %>%
head(10)
word_list %>%
left_join(., sentiment_afinn, by = c("word" = "term")) %>%
group_by(politician) %>%
summarize(mean_score = mean(score, na.rm=TRUE),
total_words = n(),
scored_words = sum(!is.na(score))) %>%
arrange(desc(mean_score))
################################################
################################################
#Topic Modeling
###############################################
###############################################
token_df <- data.frame(txt = dat$speech_text, politician = dat$politician, speech_type = as.character(dat$speech_type), stringsAsFactors = FALSE)
pol_tokens <- token_df %>%
mutate(speech_id = 1:n()) %>%
unnest_tokens(output = word, input = txt) %>%
anti_join(stop_words) %>%
mutate(word = SnowballC::wordStem(word)) %>%
filter(nchar(word) > 2)
pol_dtm <- pol_tokens %>%
# get count of each token in each document
dplyr::count(speech_id, word) %>%
# create a document-term matrix with all features and tf weighting
cast_dtm(document = speech_id, term = word, value = n)
pol_dtm
################################################
################################################
#ngrams
###############################################
###############################################
pol_digram <- dat %>%
unnest_tokens(bigram, speech_text, token = "ngrams", n = 2, collapse = FALSE) %>%
separate(bigram, c("word1", "word2"), sep = " ", remove = FALSE)
dat %>%
unnest_tokens(bigram, speech_text, token ="skip_ngrams", n = 2, k = 1, collapse = FALSE) %>%
head(10)
my_stop_words <- setdiff(stop_words$word, "not") # Need
pol_digram <- pol_digram %>%
filter(!word1 %in% my_stop_words, !word2 %in% my_stop_words) %>%
mutate_at(vars(word1, word2), SnowballC::wordStem) %>%
mutate(stemmed_digram = paste0(word1, " ", word2))
res_digram <- pol_digram %>%
group_by(politician, stemmed_digram) %>%
summarize(n = n()) %>%
group_by(politician) %>%
top_n(30, n + runif(n(), 0, 0.01)) %>% # runif to randomly breaks 10th place ties...
ungroup() %>%
arrange(politician, n) %>%
mutate(.r = row_number())
res_digram %>% head(20)
gg <- ggplot(res_digram, aes(x=.r, y=n)) +
facet_wrap(~politician,
scales="free_y") +
geom_col() +
coord_flip() + xlab("") +
scale_x_continuous( # This handles replacement of .r for x
breaks = res_digram$.r, # notice need to reuse data frame
labels = res_digram$stemmed_digram )
gg
pol_digram %>%
left_join(sentiments %>% filter(lexicon=="AFINN") %>% mutate(word = SnowballC::wordStem(word)), by=c("word2"="word")) %>% #mutate makes the words in the lexicon has multiple stems and left join copies additional rows for the multiple stems making the list longer than it should (using some unique filtering would help solve thi problem)
group_by(politician) %>%
summarize(mean_score=mean(score*(1-2*(word1=="not")), na.rm=TRUE), #logical command will automatically be converted to 1 and 0 from true or false
total_words = n(), total_nots = sum(word1=="not", na.rm=T),
scored_words = sum(!is.na(score))) %>%
arrange(mean_score)
plt <- res_digram %>%
filter(n > 10) %>%
tidyr::separate(stemmed_digram, into = c("word1", "word2"), sep = " ") %>%
dplyr::select(word1, word2) %>%
filter(word1 !="NA" | word2 !="NA") %>%
igraph::graph_from_data_frame()
par.old <- par(mai=c(0,0,0,0))
plot(plt)
par()
visualize_bigrams <- function(bigrams) {
set.seed(2016)
a <- grid::arrow(type = "closed", length = unit(.15, "inches"))
bigrams %>%
graph_from_data_frame() %>%
ggraph(layout = "fr") +
geom_edge_link(aes(edge_alpha = n), show.legend = FALSE, arrow = a) +
geom_node_point(color = "lightblue", size = 5) +
geom_node_text(aes(label = name), vjust = 1, hjust = 1) +
theme_void()
}
res_digram %>%
filter(n > 40) %>%
visualize_bigrams()
pol_lda <- LDA(pol_dtm, k = 5, control = list(seed = 123))
pol_lda <- tidy(pol_lda)
top_terms <- pol_lda %>%
group_by(topic) %>%
top_n(5, beta) %>%
ungroup() %>%
arrange(topic, -beta)
top_terms
top_terms %>%
mutate(topic = factor(topic),
term = drlib::reorder_within(term, beta, topic)) %>%
ggplot(aes(term, beta, fill = topic)) +
geom_bar(alpha = 0.8, stat = "identity", show.legend = FALSE) +
scale_x_reordered() +
facet_wrap(~ topic, scales = "free", ncol = 2) +
coord_flip()
beta_spread <- pol_lda %>%
filter(topic == 1 | topic == 2) %>%
mutate(topic = paste0("topic", topic)) %>%
spread(topic, beta) %>%
filter(topic1 > .001 | topic2 > .001) %>%
mutate(log_ratio = log2(topic2 / topic1))
beta_spread %>%
filter(log_ratio > 20 | log_ratio < -20) %>%
mutate(term = fct_reorder(term, log_ratio)) %>%
ggplot(aes(term, log_ratio)) +
geom_bar(alpha = 0.8, stat = "identity", show.legend = FALSE) +
scale_x_reordered() +
coord_flip()
|
4bebb532add44258ec57419aeb74828a59951b7b
|
57ee4c4f40ad9e168f8ab27b80f1e81ef20bf76a
|
/pkg/sdam/man/rpmp.Rd
|
c1f679ded930694823a0e9bb07094075a84a476b
|
[] |
no_license
|
mplex/cedhar
|
72d93228e9436ebcccce4c183f966c0ba488fd24
|
c951f25a1edaa5e5cda315051c264a97ebe69db1
|
refs/heads/master
| 2023-08-31T01:07:10.424651
| 2023-08-29T17:33:07
| 2023-08-29T17:33:07
| 215,776,073
| 1
| 2
| null | 2019-11-27T08:27:52
| 2019-10-17T11:29:53
|
TeX
|
UTF-8
|
R
| false
| false
| 976
|
rd
|
rpmp.Rd
|
\name{rpmp}
\docType{data}
\alias{rpmp}
\title{
Maps of ancient Roman provinces and Italian regions
}
\description{
This is a list with specifications to plot cartographical maps of ancient Roman provinces and Italian regions.
}
\usage{
data("rpmp")
}
\format{
A list of lists object of 59 Roman provinces and Italian regions in year 117AD, and where \code{names(rpmp)} gives the
province acronyms according to \code{\link{EDH}} dataset.
Each province in \code{rpmp} has a two-length list with the province name and the shape data for a cartographical map in
different slots.
}
\source{
https://commons.wikimedia.org/wiki/File:RomanEmpire_117.svg
https://commons.wikimedia.org/wiki/File:Roman_provinces_trajan.svg
https://commons.wikimedia.org/wiki/File:Regioni_dell'Italia_Augustea.svg
}
%\references{
%}
\seealso{
\code{\link{plot.map}}, \code{\link{rpmcd}}, \code{\link{rp}}, \code{\link{retn}}, \code{\link{EDH}}
}
\keyword{datasets}
|
0a7ee561d07c0c8efe9a24a0b95aec2f0a776ccc
|
12ae74bd0ba9d5494d7301b521b45d1bfa5ff84a
|
/R/grapes_equals_grapes.R
|
e8e09367ed628419e2d0a52f24c0a68d6cb04e9f
|
[] |
no_license
|
cran/do
|
62b609a0f0cc0f0c0cc879adb821b1d9d95b6632
|
fa0d7c8f9799326ffa6f0763f490c2873597131b
|
refs/heads/master
| 2021-08-15T11:59:00.793187
| 2021-08-03T10:40:02
| 2021-08-03T10:40:02
| 206,034,685
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 658
|
r
|
grapes_equals_grapes.R
|
#' Locate Accurately
#'
#' @param a vector for matching
#' @param b vector for searching
#'
#' @return If length of a is one, a vector will be return. If length of a is more
#' than one, a list for each element will be return.
#' @export
#'
#' @examples
#' a=c(1,2,3,4)
#' b=c(1,2,3,1,4,1,5,6,1,4,1)
#' a %==% b
"%==%"<- function(a,b){
if (length(a)==1){
(1:length(b))[a == b]
}else if(length(a) > 1){
for (i in 1:length(a)) {
if (i==1){location=list()}
location.i=(1:length(b))[a[i] == b]
location=c(location,list(location.i))
names(location)[length(location)]=a[i]
}
location
}
}
|
866e3cd9b4d0f64fc094413e1a39a9d993f8a184
|
de5acbf5d3d770f5a8a6805d4ded21265a9465a7
|
/tree_generator.R
|
547772cf07de1ff743df8465200a7331b72a0748
|
[] |
no_license
|
arodgers11/R
|
901dc35737281ce00b51be04c36014ccbb84ccec
|
53388609f76d38857bf34070fcb79553d3e897c1
|
refs/heads/master
| 2022-07-22T00:03:57.378771
| 2020-05-14T05:37:42
| 2020-05-14T05:37:42
| 192,859,206
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 531
|
r
|
tree_generator.R
|
#Generates random binary trees with n nodes
library("phytools", lib.loc="~/R/win-library/3.4")
for(n in 6:20) {
d<-c()
t=unroot(rtree(n))
s=paste('./Test Trees/','d',n,'.m',sep='')
cat(paste('n=',n,';\n\n',"d = [ ",sep=''),file=s,append=FALSE)
for(i in 1:(n-1)) {
for(j in (i+1):n) {
d<-c(d,length(nodepath(t,i,j))-2)
cat(length(nodepath(t,i,j))-1,' ',sep='',file=s,append=T)
}
}
cat("];\n",file=s,append=T)
cat('xtrue = [',2^(n-2-d),sep=' ',file=s,append=T)
cat(' ];\n\n',file=s,append=T)
}
|
26883be0fc9403846a5a800b5aaa0bb6052b1e63
|
334c555684570d5499b70b2ff35476a84d6f392a
|
/ui.R
|
ce7ec4b2242de68a495a2557075c110ed486aa10
|
[] |
no_license
|
mndrake/leaflet-demo
|
57342a582935c8c634fc30b4e3fba7ff34eff454
|
c9c9c2ad96e6504171b0e61c5353cd8693fb781f
|
refs/heads/master
| 2021-01-21T05:23:11.073297
| 2017-02-27T01:43:11
| 2017-02-27T01:43:11
| 83,180,863
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,325
|
r
|
ui.R
|
library(shiny)
library(DT)
library(leaflet)
navbarPage('Map Demo', id = 'main',
tabPanel('map', value = 'map',
div(class = 'outer',
tags$head(includeCSS('assets/style.css'),
includeScript("assets/gomap.js")),
leafletOutput('county_map', width = '100%', height = '100%'),
absolutePanel(id = 'controls', class = 'panel panel-default', fixed = TRUE,
draggable = TRUE, top = 60, left = 'auto', right = 20,
bottom = 'auto', width = 330, height = 'auto',
selectInput('stat', label = 'statistic', choices = NULL, selected = NULL),
selectInput('polyname', label = 'county', choices = NULL, selected = NULL),
actionButton("reset_map", "reset map")
)
)
),
tabPanel('data', value = 'map',
DT::dataTableOutput('data'),
conditionalPanel("false", icon("crosshair"))
)
)
|
ed410d106a8f46373293440c1568d21160d2f3dd
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/forestSAS/examples/shrinkedge.Rd.R
|
7e825485b31a8ed92362ab74a52987a85d787d7b
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 613
|
r
|
shrinkedge.Rd.R
|
library(forestSAS)
### Name: shrinkedge
### Title: Shrink the edges for the point pattern
### Aliases: shrinkedge
### ** Examples
library(spatstat)
data(finpines)
finpines$window
# window: rectangle = [-5, 5] x [-8, 2] metres
#Shrink the rectangle [-5,5]x[-8,2] to [-3,3]x[-5,-1]
shrink.trees<- shrinkedge(finpines,xwidth=2,ywidth=3,id=1:126)
shrink.trees
# Show the changes by figures
opar<-par(mfrow=c(1,2))
plot(finpines$x,finpines$y)
text(finpines$x,finpines$y,1:126)
rect(-3,-5,3,-1,border="red")
plot(shrink.trees$x,shrink.trees$y)
text(shrink.trees$x,shrink.trees$y,shrink.trees$old.id)
par(opar)
|
fb0d381313e78880fe557c70288592109dfcefd6
|
a3d179a426b333958fbe885ba39334225d5559f4
|
/Machine-learning-model/Model_building.R
|
70ead5bb70a3b6252ab9297ef50a5664f5e11313
|
[] |
no_license
|
hwu84/Small-Molecule-Screen-Facility
|
e0d1e43fb102e0b5126eccd874ba30a1f29ef177
|
33c47df9b59d7cc584d4cc87674967484c1ffd33
|
refs/heads/master
| 2021-01-10T10:14:13.052893
| 2016-03-17T20:52:50
| 2016-03-17T20:52:50
| 52,896,901
| 0
| 0
| null | 2016-03-17T20:52:51
| 2016-03-01T17:44:08
|
R
|
UTF-8
|
R
| false
| false
| 28,030
|
r
|
Model_building.R
|
# Code include preprocess data, generate training and test set, 5 fold cross validation, model training based on optimal parameters,
# generate holdout predictions from 5 fold cv, evaluate enrichment from test results.
# Using rank percentile ( rank/row length).
# Author: Haozhen Wu
library(AUC)
library(data.table)
library(ggplot2)
library(gridExtra)
setwd("~/Desktop/study/SmallMolecular/stacking")
docking <- read.csv("~/Desktop/study/SmallMolecular/stacking/docking_data_dude14_20160111.txt", sep=" ",colClasses = c(rep("character",171)))
molid = docking$molid
head(docking)
########## functions
rank_percentile_na = function(x){
na_index = which(is.na(x))
rankPercentile = frank(x,ties.method = "average")/length(x)
rankPercentile[na_index] = -100
return(rankPercentile)
}
Get_feature_oriMol = function(i){
index_target = c(1:14)
index_feature = c(0:13)
target_name = data.frame(name = c("ace","adrb1","braf","cdk2","drd3","esr1","fa10","fabp4","gria2","hdac8","mmp13","pde5a","ptn1","src"))
# target = sapply(docking$classifications,function(x) substr(x,index_target[i],index_target[i]))
# cat("table for target",paste0("feature_",target_name$name)[i],":\n",table(target))
feature = docking[,grep(paste0("X",index_feature[i],"_"),colnames(docking))]
feature = sapply(feature,as.numeric)
feature = apply(feature,2,function(x) x[index[[i]]])
cat("\ndim for features",paste0("feature_",target_name$name)[i],":\n",dim(feature))
feature = apply(feature,2,rank_percentile_na)
feature = data.frame(feature)
feature$mean_scoreType0 = apply(feature[,1:6],1,mean)
feature$mean_scoreType1 = apply(feature[,7:12],1,mean)
feature$mean_scoreTypeAll = apply(feature[,1:12],1,mean)
return(feature)
}
boost_cv = function(TarName){
sink(paste0('./cv_output/bst_',TarName,'_8_5foldcv.txt'))
time_start=proc.time()
param <- list( objective = "binary:logistic",
booster = "gbtree",
eval_metric = "auc",
eta = 0.03, # 0.023
max_depth = 7, #changed from default of 8
subsample = 0.83,
colsample_bytree = 1,
num_parallel_tree = 1,
min_child_weight = 5,
gamma = 5
# alpha = 0.0001,
# lambda = 1
)
set.seed(1120)
cv = xgb.cv(param, eval(parse(text = paste0("dtrain_",TarName,"_v8"))),
nrounds = 1500,
nfold=5,
metrics={'error'},
verbose = 1,
print.every.n = 10,
maximize = TRUE,
nthread = 12
)
time = time_start-proc.time()
print(time)
best <-max(cv$test.auc.mean)
bestIter <- min(which(cv$test.auc.mean==best))
print(cv[bestIter])
cat("Best round and test result auc\n",bestIter,best,"\n")
cat("Recommend round for training fullset:\n",bestIter*1.2)
sink()
df = data.frame(targetName = TarName,round = bestIter, roundForTrain = bestIter*1.2,
train.auc.mean = cv$train.auc.mean[bestIter],train.auc.std = cv$train.auc.std[bestIter],
test.auc.mean = cv$test.auc.mean[bestIter],test.auc.std = cv$test.auc.std[bestIter])
write.table(df,"./cv_optimal/stacking8_cvOptimal.csv",append = T,col.names = FALSE,sep = ",",row.names = FALSE)
}
boost_cv_holdout_8 = function(TarName,Rounds){
sink(paste0('bst_',TarName,'_8_5foldcv2.txt'))
#sink('bst_ace_3_5foldcv.txt')
time_start=proc.time()
param <- list( objective = "binary:logistic",
booster = "gbtree",
eval_metric = "auc",
eta = 0.04, # 0.023
max_depth = 7, #changed from default of 8
subsample = 0.83,
colsample_bytree = 0.77,
num_parallel_tree = 1,
min_child_weight = 1,
gamma = 5
# alpha = 0.0001,
# lambda = 1
)
set.seed(1120)
bst_5cv = xgb.cv(param, eval(parse(text = paste0("dtrain_",TarName,"_v8"))),
nrounds = Rounds,
nfold=5,
metrics={'error'},
verbose = 1,
print.every.n = 10,
maximize = TRUE,
prediction = TRUE,
nthread = 12
)
time = time_start-proc.time()
print(time)
sink()
bst_holdout = data.frame(Holdout_Pred = bst_5cv$pred)
write.csv(bst_holdout, file = paste0("bst_",TarName,"_8_5cv_holdout.csv"),row.names = F)
}
boost_train = function(TarName,Rounds){
sink(paste0('bst_',TarName,'_8_out.txt'))
time_start=proc.time()
param <- list( objective = "binary:logistic",
booster = "gbtree",
eval_metric = "auc",
eta = 0.04, # 0.023
max_depth = 7, #changed from default of 8
subsample = 0.83,
colsample_bytree = 0.77,
num_parallel_tree = 1,
min_child_weight = 1,
gamma = 5
# alpha = 0.0001,
# lambda = 1
)
set.seed(1120)
bst <- xgb.train( params = param,
data = eval(parse(text = paste0("dtrain_",TarName,"_v8"))),
nrounds = Rounds,
verbose = 1, #1
early.stop.round = 100,
watchlist = eval(parse(text = paste0("watchlist_",TarName,"_v8"))),
maximize = T,
print.every.n = 10,
nthread = 6
)
time = time_start-proc.time()
print(time)
xgb.save(bst, paste0("bst_",TarName,"_8"))
sink()
}
pred_newTarget = function(TarName){
pred_auc = list()
target_name = data.frame(name = c("ace","adrb1","braf","cdk2","drd3","esr1","fa10","fabp4","gria2","hdac8","mmp13","pde5a","ptn1","src"))
sink(paste0("pred_auc_F",TarName,"_8.txt"))
for( i in 1:14){
pred_target = predict(eval(parse(text = paste0("bst_",TarName,"_8"))),data.matrix(feature_v8_oriMol[[i]]))
pred_auc[[i]]=auc(roc(pred_target ,as.factor(target_oriMol[[i]])))
print(pred_auc[[i]])
}
return(pred_auc)
sink()
}
# 14 predictions from one target, total 14 targets. store in a list
pred_target = list()
for( i in 1:14){
temp = list()
for( j in 1:14){
temp[[j]] = predict(eval(parse(text = paste0("bst_",target_name$name[j],"_8"))),data.matrix(feature_v8_oriMol[[i]]))
}
pred_target[[i]] = temp
print(str(pred_target))
print(i)
}
# get targets
index_target = c(1:14)
target = list()
for ( i in 1:14){
target[[i]] = as.numeric(sapply(docking$classifications,function(x) substr(x,index_target[i],index_target[i])))
cat("\ntable for target",paste0("feature_",target_name$name)[i],":\n",table(target[[i]]))
}
#
original_molecule = function(i){
target_name = data.frame(name = c("ace","adrb1","braf","cdk2","drd3","esr1","fa10","fabp4","gria2","hdac8","mmp13","pde5a","ptn1","src"))
index = grep(paste0("*_",target_name$name[i]),molid)
return(index)
}
index = list()
index[[1]] = original_molecule(1)
index[[2]] = original_molecule(2)
index[[3]] = original_molecule(3)
index[[4]] = original_molecule(4)
index[[5]] = original_molecule(5)
index[[6]] = original_molecule(6)
index[[7]] = original_molecule(7)
index[[8]] = original_molecule(8)
index[[9]] = original_molecule(9)
index[[10]] = original_molecule(10)
index[[11]] = original_molecule(11)
index[[12]] = original_molecule(12)
index[[13]] = original_molecule(13)
index[[14]] = original_molecule(14)
# generate original unqiue molecule for each target
target_oriMol = list()
target_oriMol[[1]] = target[[1]][index[[1]]]
target_oriMol[[2]] = target[[2]][index[[2]]]
target_oriMol[[3]] = target[[3]][index[[3]]]
target_oriMol[[4]] = target[[4]][index[[4]]]
target_oriMol[[5]] = target[[5]][index[[5]]]
target_oriMol[[6]] = target[[6]][index[[6]]]
target_oriMol[[7]] = target[[7]][index[[7]]]
target_oriMol[[8]] = target[[8]][index[[8]]]
target_oriMol[[9]] = target[[9]][index[[9]]]
target_oriMol[[10]] = target[[10]][index[[10]]]
target_oriMol[[11]] = target[[11]][index[[11]]]
target_oriMol[[12]] = target[[12]][index[[12]]]
target_oriMol[[13]] = target[[13]][index[[13]]]
target_oriMol[[14]] = target[[14]][index[[14]]]
# get features
feature_v8_oriMol = list()
feature_v8_oriMol[[1]] = Get_feature_oriMol(1)
feature_v8_oriMol[[2]] = Get_feature_oriMol(2)
feature_v8_oriMol[[3]] = Get_feature_oriMol(3)
feature_v8_oriMol[[4]] = Get_feature_oriMol(4)
feature_v8_oriMol[[5]] = Get_feature_oriMol(5)
feature_v8_oriMol[[6]] = Get_feature_oriMol(6)
feature_v8_oriMol[[7]] = Get_feature_oriMol(7)
feature_v8_oriMol[[8]] = Get_feature_oriMol(8)
feature_v8_oriMol[[9]] = Get_feature_oriMol(9)
feature_v8_oriMol[[10]] = Get_feature_oriMol(10)
feature_v8_oriMol[[11]] = Get_feature_oriMol(11)
feature_v8_oriMol[[12]] = Get_feature_oriMol(12)
feature_v8_oriMol[[13]] = Get_feature_oriMol(13)
feature_v8_oriMol[[14]] = Get_feature_oriMol(14)
#feature_v8_oriMol[[1]] = apply(feature_v8[[1]],2,function(x) x[index[[1]]])
summary(feature_v8[[1]])
summary(feature_v8_oriMol[[1]])
# get xgboost dataset
dtrain_ace_v8<-xgb.DMatrix(data=data.matrix(feature_v8_oriMol[[1]]),label=as.numeric(target_oriMol[[1]]))
watchlist_ace_v8<-list(train=dtrain_ace_v8)
xgb.DMatrix.save(dtrain_ace_v8, "dtrain_ace_v8.buffer")
dtrain_adrb1_v8<-xgb.DMatrix(data=data.matrix(feature_v8_oriMol[[2]]),label=as.numeric(target_oriMol[[2]]))
watchlist_adrb1_v8<-list(train=dtrain_adrb1_v8)
xgb.DMatrix.save(dtrain_adrb1_v8, "dtrain_adrb1_v8.buffer")
dtrain_braf_v8<-xgb.DMatrix(data=data.matrix(feature_v8_oriMol[[3]]),label=as.numeric(target_oriMol[[3]]))
watchlist_braf_v8<-list(train=dtrain_braf_v8)
xgb.DMatrix.save(dtrain_braf_v8, "dtrain_braf_v8.buffer")
dtrain_cdk2_v8<-xgb.DMatrix(data=data.matrix(feature_v8_oriMol[[4]]),label=as.numeric(target_oriMol[[4]]))
watchlist_cdk2_v8<-list(train=dtrain_cdk2_v8)
xgb.DMatrix.save(dtrain_cdk2_v8, "dtrain_cdk2_v8.buffer")
dtrain_drd3_v8<-xgb.DMatrix(data=data.matrix(feature_v8_oriMol[[5]]),label=as.numeric(target_oriMol[[5]]))
watchlist_drd3_v8<-list(train=dtrain_drd3_v8)
xgb.DMatrix.save(dtrain_drd3_v8, "dtrain_drd3_v8.buffer")
dtrain_esr1_v8<-xgb.DMatrix(data=data.matrix(feature_v8_oriMol[[6]]),label=as.numeric(target_oriMol[[6]]))
watchlist_esr1_v8<-list(train=dtrain_esr1_v8)
xgb.DMatrix.save(dtrain_esr1_v8, "dtrain_esr1_v8.buffer")
dtrain_fa10_v8<-xgb.DMatrix(data=data.matrix(feature_v8_oriMol[[7]]),label=as.numeric(target_oriMol[[7]]))
watchlist_fa10_v8<-list(train=dtrain_fa10_v8)
xgb.DMatrix.save(dtrain_fa10_v8, "dtrain_fa10_v8.buffer")
dtrain_fabp4_v8<-xgb.DMatrix(data=data.matrix(feature_v8_oriMol[[8]]),label=as.numeric(target_oriMol[[8]]))
watchlist_fabp4_v8<-list(train=dtrain_fabp4_v8)
xgb.DMatrix.save(dtrain_fabp4_v8, "dtrain_fabp4_v8.buffer")
dtrain_gria2_v8<-xgb.DMatrix(data=data.matrix(feature_v8_oriMol[[9]]),label=as.numeric(target_oriMol[[9]]))
watchlist_gria2_v8<-list(train=dtrain_gria2_v8)
xgb.DMatrix.save(dtrain_gria2_v8, "dtrain_gria2_v8.buffer")
dtrain_hdac8_v8<-xgb.DMatrix(data=data.matrix(feature_v8_oriMol[[10]]),label=as.numeric(target_oriMol[[10]]))
watchlist_hdac8_v8<-list(train=dtrain_hdac8_v8)
xgb.DMatrix.save(dtrain_hdac8_v8, "dtrain_hdac8_v8.buffer")
dtrain_mmp13_v8<-xgb.DMatrix(data=data.matrix(feature_v8_oriMol[[11]]),label=as.numeric(target_oriMol[[11]]))
watchlist_mmp13_v8<-list(train=dtrain_mmp13_v8)
xgb.DMatrix.save(dtrain_mmp13_v8, "dtrain_mmp13_v8.buffer")
dtrain_pde5a_v8<-xgb.DMatrix(data=data.matrix(feature_v8_oriMol[[12]]),label=as.numeric(target_oriMol[[12]]))
watchlist_pde5a_v8<-list(train=dtrain_pde5a_v8)
xgb.DMatrix.save(dtrain_pde5a_v8, "dtrain_pde5a_v8.buffer")
dtrain_ptn1_v8<-xgb.DMatrix(data=data.matrix(feature_v8_oriMol[[13]]),label=as.numeric(target_oriMol[[13]]))
watchlist_ptn1_v8<-list(train=dtrain_ptn1_v8)
xgb.DMatrix.save(dtrain_ptn1_v8, "dtrain_ptn1_v8.buffer")
dtrain_src_v8<-xgb.DMatrix(data=data.matrix(feature_v8_oriMol[[14]]),label=as.numeric(target_oriMol[[14]]))
watchlist_src_v8<-list(train=dtrain_src_v8)
xgb.DMatrix.save(dtrain_src_v8, "dtrain_src_v8.buffer")
# load xgboost dataset
library(xgboost)
dtrain_ace_v8 <- xgb.DMatrix("dtrain_ace_v8.buffer")
watchlist_ace_v8 <- list(train=dtrain_ace_v8)
library(xgboost)
dtrain_adrb1_v8 <- xgb.DMatrix("dtrain_adrb1_v8.buffer")
watchlist_adrb1_v8 <- list(train=dtrain_adrb1_v8)
library(xgboost)
dtrain_braf_v8 <- xgb.DMatrix("dtrain_braf_v8.buffer")
watchlist_braf_v8 <- list(train=dtrain_braf_v8)
library(xgboost)
dtrain_cdk2_v8 <- xgb.DMatrix("dtrain_cdk2_v8.buffer")
watchlist_cdk2_v8 <- list(train=dtrain_cdk2_v8)
library(xgboost)
dtrain_drd3_v8 <- xgb.DMatrix("dtrain_drd3_v8.buffer")
watchlist_drd3_v8 <- list(train=dtrain_drd3_v8)
library(xgboost)
dtrain_esr1_v8 <- xgb.DMatrix("dtrain_esr1_v8.buffer")
watchlist_esr1_v8 <- list(train=dtrain_esr1_v8)
library(xgboost)
dtrain_fa10_v8 <- xgb.DMatrix("dtrain_fa10_v8.buffer")
watchlist_fa10_v8 <- list(train=dtrain_fa10_v8)
library(xgboost)
dtrain_fabp4_v8 <- xgb.DMatrix("dtrain_fabp4_v8.buffer")
watchlist_fabp4_v8 <- list(train=dtrain_fabp4_v8)
library(xgboost)
dtrain_gria2_v8 <- xgb.DMatrix("dtrain_gria2_v8.buffer")
watchlist_gria2_v8 <- list(train=dtrain_gria2_v8)
library(xgboost)
dtrain_hdac8_v8 <- xgb.DMatrix("dtrain_hdac8_v8.buffer")
watchlist_hdac8_v8 <- list(train=dtrain_hdac8_v8)
library(xgboost)
dtrain_mmp13_v8 <- xgb.DMatrix("dtrain_mmp13_v8.buffer")
watchlist_mmp13_v8 <- list(train=dtrain_mmp13_v8)
library(xgboost)
dtrain_pde5a_v8 <- xgb.DMatrix("dtrain_pde5a_v8.buffer")
watchlist_pde5a_v8 <- list(train=dtrain_pde5a_v8)
library(xgboost)
dtrain_ptn1_v8 <- xgb.DMatrix("dtrain_ptn1_v8.buffer")
watchlist_ptn1_v8 <- list(train=dtrain_ptn1_v8)
library(xgboost)
dtrain_src_v8 <- xgb.DMatrix("dtrain_src_v8.buffer")
watchlist_src_v8 <- list(train=dtrain_src_v8)
## model cv
boost_cv("ace")
boost_cv("adrb1")
boost_cv("braf")
boost_cv("cdk2")
boost_cv("drd3")
boost_cv("esr1")
boost_cv("fa10")
boost_cv("fabp4")
boost_cv("gria2")
boost_cv("hdac8")
boost_cv("mmp13")
boost_cv("pde5a")
boost_cv("ptn1")
boost_cv("src")
## train model
boost_train("ace",1350)
boost_train("adrb1",310)
boost_train("braf",1480)
boost_train("cdk2",920)
boost_train("drd3",270)
boost_train("esr1",400)
boost_train("fa10",1490)
boost_train("fabp4",1310)
boost_train("gria2",670)
boost_train("hdac8",1120)
boost_train("mmp13",1470)
boost_train("pde5a",940)
boost_train("ptn1",1320)
boost_train("src",340)
# load model
library(xgboost)
bst_ace_8 = xgb.load("bst_ace_8")
bst_adrb1_8 = xgb.load("bst_adrb1_8")
bst_braf_8 = xgb.load("bst_braf_8")
bst_cdk2_8 = xgb.load("bst_cdk2_8")
bst_drd3_8 = xgb.load("bst_drd3_8")
bst_esr1_8 = xgb.load("bst_esr1_8")
bst_fa10_8 = xgb.load("bst_fa10_8")
bst_fabp4_8 = xgb.load("bst_fabp4_8")
bst_gria2_8 = xgb.load("bst_gria2_8")
bst_hdac8_8 = xgb.load("bst_hdac8_8")
bst_mmp13_8 = xgb.load("bst_mmp13_8")
bst_pde5a_8 = xgb.load("bst_pde5a_8")
bst_ptn1_8 = xgb.load("bst_ptn1_8")
bst_src_8 = xgb.load("bst_src_8")
## predict new target
pred_auc_8 = list()
pred_auc_8[[1]] = pred_newTarget("ace")
pred_auc_8[[2]] = pred_newTarget("adrb1")
pred_auc_8[[3]] = pred_newTarget("braf")
pred_auc_8[[4]] = pred_newTarget("cdk2")
pred_auc_8[[5]] = pred_newTarget("drd3")
pred_auc_8[[6]] = pred_newTarget("esr1")
pred_auc_8[[7]] = pred_newTarget("fa10")
pred_auc_8[[8]] = pred_newTarget("fabp4")
pred_auc_8[[9]] = pred_newTarget("gria2")
pred_auc_8[[10]] = pred_newTarget("hdac8")
pred_auc_8[[11]] = pred_newTarget("mmp13")
pred_auc_8[[12]] = pred_newTarget("pde5a")
pred_auc_8[[13]] = pred_newTarget("ptn1")
pred_auc_8[[14]] = pred_newTarget("src")
## # equal weight blending
# # mean: equal weight blending models for ace
pred_ace_all_8 = cbind(unlist(pred_target[[1]][2]),unlist(pred_target[[1]][3]),unlist(pred_target[[1]][4]),
unlist(pred_target[[1]][5]),unlist(pred_target[[1]][6]),unlist(pred_target[[1]][7]),
unlist(pred_target[[1]][8]),unlist(pred_target[[1]][9]),unlist(pred_target[[1]][10]),
unlist(pred_target[[1]][11]),unlist(pred_target[[1]][12]),unlist(pred_target[[1]][13]),
unlist(pred_target[[1]][14]))
pred_ace_mean_8 = apply(pred_ace_all_8,1,mean)
auc(roc(pred_ace_mean_8,as.factor(target_oriMol[[1]])))
## # equal weight blending
# # mean: equal weight blending models for adrb1
pred_adrb1_all_8 = cbind(unlist(pred_target[[2]][1]),unlist(pred_target[[2]][3]),unlist(pred_target[[2]][4]),
unlist(pred_target[[2]][5]),unlist(pred_target[[2]][6]),unlist(pred_target[[2]][7]),
unlist(pred_target[[2]][8]),unlist(pred_target[[2]][9]),unlist(pred_target[[2]][10]),
unlist(pred_target[[2]][11]),unlist(pred_target[[2]][12]),unlist(pred_target[[2]][13]),
unlist(pred_target[[2]][14]))
pred_adrb1_mean_8 = apply(pred_adrb1_all_8,1,mean)
auc(roc(pred_adrb1_mean_8,as.factor(target_oriMol[[2]])))
## # equal weight blending
# # mean: equal weight blending models for braf
pred_braf_all_8 = cbind(unlist(pred_target[[3]][1]),unlist(pred_target[[3]][2]),unlist(pred_target[[3]][4]),
unlist(pred_target[[3]][5]),unlist(pred_target[[3]][6]),unlist(pred_target[[3]][7]),
unlist(pred_target[[3]][8]),unlist(pred_target[[3]][9]),unlist(pred_target[[3]][10]),
unlist(pred_target[[3]][11]),unlist(pred_target[[3]][12]),unlist(pred_target[[3]][13]),
unlist(pred_target[[3]][14]))
pred_braf_mean_8 = apply(pred_braf_all_8,1,mean)
auc(roc(pred_braf_mean_8,as.factor(target_oriMol[[3]])))
## # equal weight blending
# # mean: equal weight blending models for cdk2
pred_cdk2_all_8 = cbind(unlist(pred_target[[4]][1]),unlist(pred_target[[4]][2]),unlist(pred_target[[4]][3]),
unlist(pred_target[[4]][5]),unlist(pred_target[[4]][6]),unlist(pred_target[[4]][7]),
unlist(pred_target[[4]][8]),unlist(pred_target[[4]][9]),unlist(pred_target[[4]][10]),
unlist(pred_target[[4]][11]),unlist(pred_target[[4]][12]),unlist(pred_target[[4]][13]),
unlist(pred_target[[4]][14]))
pred_cdk2_mean_8 = apply(pred_cdk2_all_8,1,mean)
auc(roc(pred_cdk2_mean_8,as.factor(target_oriMol[[4]])))
## # equal weight blending
# # mean: equal weight blending models for drd3
pred_drd3_all_8 = cbind(unlist(pred_target[[5]][1]),unlist(pred_target[[5]][2]),unlist(pred_target[[5]][3]),
unlist(pred_target[[5]][4]),unlist(pred_target[[5]][6]),unlist(pred_target[[5]][7]),
unlist(pred_target[[5]][8]),unlist(pred_target[[5]][9]),unlist(pred_target[[5]][10]),
unlist(pred_target[[5]][11]),unlist(pred_target[[5]][12]),unlist(pred_target[[5]][13]),
unlist(pred_target[[5]][14]))
pred_drd3_mean_8 = apply(pred_drd3_all_8,1,mean)
auc(roc(pred_drd3_mean_8,as.factor(target_oriMol[[5]])))
## # equal weight blending
# # mean: equal weight blending models for esr1
pred_esr1_all_8 = cbind(unlist(pred_target[[6]][1]),unlist(pred_target[[6]][2]),unlist(pred_target[[6]][3]),
unlist(pred_target[[6]][4]),unlist(pred_target[[6]][5]),unlist(pred_target[[6]][7]),
unlist(pred_target[[6]][8]),unlist(pred_target[[6]][9]),unlist(pred_target[[6]][10]),
unlist(pred_target[[6]][11]),unlist(pred_target[[6]][12]),unlist(pred_target[[6]][13]),
unlist(pred_target[[6]][14]))
pred_esr1_mean_8 = apply(pred_esr1_all_8,1,mean)
auc(roc(pred_esr1_mean_8,as.factor(target_oriMol[[6]])))
## # equal weight blending
# # mean: equal weight blending models for fa10
pred_fa10_all_8 = cbind(unlist(pred_target[[7]][2]),unlist(pred_target[[7]][3]),unlist(pred_target[[7]][4]),
unlist(pred_target[[7]][5]),unlist(pred_target[[7]][6]),unlist(pred_target[[7]][1]),
unlist(pred_target[[7]][8]),unlist(pred_target[[7]][9]),unlist(pred_target[[7]][10]),
unlist(pred_target[[7]][11]),unlist(pred_target[[7]][12]),unlist(pred_target[[7]][13]),
unlist(pred_target[[7]][14]))
pred_fa10_mean_8 = apply(pred_fa10_all_8,1,mean)
auc(roc(pred_fa10_mean_8,as.factor(target_oriMol[[7]])))
## # equal weight blending
# # mean: equal weight blending models for fabp4
pred_fabp4_all_8 = cbind(unlist(pred_target[[8]][2]),unlist(pred_target[[8]][3]),unlist(pred_target[[8]][4]),
unlist(pred_target[[8]][5]),unlist(pred_target[[8]][6]),unlist(pred_target[[8]][7]),
unlist(pred_target[[8]][1]),unlist(pred_target[[8]][9]),unlist(pred_target[[8]][10]),
unlist(pred_target[[8]][11]),unlist(pred_target[[8]][12]),unlist(pred_target[[8]][13]),
unlist(pred_target[[8]][14]))
pred_fabp4_mean_8 = apply(pred_fabp4_all_8,1,mean)
auc(roc(pred_fabp4_mean_8,as.factor(target_oriMol[[8]])))
## # equal weight blending
# # mean: equal weight blending models for gria2
pred_gria2_all_8 = cbind(unlist(pred_target[[9]][2]),unlist(pred_target[[9]][3]),unlist(pred_target[[9]][4]),
unlist(pred_target[[9]][5]),unlist(pred_target[[9]][6]),unlist(pred_target[[9]][7]),
unlist(pred_target[[9]][8]),unlist(pred_target[[9]][1]),unlist(pred_target[[9]][10]),
unlist(pred_target[[9]][11]),unlist(pred_target[[9]][12]),unlist(pred_target[[9]][13]),
unlist(pred_target[[9]][14]))
pred_gria2_mean_8 = apply(pred_gria2_all_8,1,mean)
auc(roc(pred_gria2_mean_8,as.factor(target_oriMol[[9]])))
## # equal weight blending
# # mean: equal weight blending models for hdac8
pred_hdac8_all_8 = cbind(unlist(pred_target[[10]][2]),unlist(pred_target[[10]][3]),unlist(pred_target[[10]][4]),
unlist(pred_target[[10]][5]),unlist(pred_target[[10]][6]),unlist(pred_target[[10]][7]),
unlist(pred_target[[10]][8]),unlist(pred_target[[10]][9]),unlist(pred_target[[10]][1]),
unlist(pred_target[[10]][11]),unlist(pred_target[[10]][12]),unlist(pred_target[[10]][13]),
unlist(pred_target[[10]][14]))
pred_hdac8_mean_8 = apply(pred_hdac8_all_8,1,mean)
auc(roc(pred_hdac8_mean_8,as.factor(target_oriMol[[10]])))
## # equal weight blending
# # mean: equal weight blending models for mmp13
pred_mmp13_all_8 = cbind(unlist(pred_target[[11]][2]),unlist(pred_target[[11]][3]),unlist(pred_target[[11]][4]),
unlist(pred_target[[11]][5]),unlist(pred_target[[11]][6]),unlist(pred_target[[11]][7]),
unlist(pred_target[[11]][8]),unlist(pred_target[[11]][9]),unlist(pred_target[[11]][10]),
unlist(pred_target[[11]][1]),unlist(pred_target[[11]][12]),unlist(pred_target[[11]][13]),
unlist(pred_target[[11]][14]))
pred_mmp13_mean_8 = apply(pred_mmp13_all_8,1,mean)
auc(roc(pred_mmp13_mean_8,as.factor(target_oriMol[[11]])))
## # equal weight blending
# # mean: equal weight blending models for pde5a
pred_pde5a_all_8 = cbind(unlist(pred_target[[12]][2]),unlist(pred_target[[12]][3]),unlist(pred_target[[12]][4]),
unlist(pred_target[[12]][5]),unlist(pred_target[[12]][6]),unlist(pred_target[[12]][7]),
unlist(pred_target[[12]][8]),unlist(pred_target[[12]][9]),unlist(pred_target[[12]][10]),
unlist(pred_target[[12]][11]),unlist(pred_target[[12]][1]),unlist(pred_target[[12]][13]),
unlist(pred_target[[12]][14]))
pred_pde5a_mean_8 = apply(pred_pde5a_all_8,1,mean)
auc(roc(pred_pde5a_mean_8,as.factor(target_oriMol[[12]])))
## # equal weight blending
# # mean: equal weight blending models for ptn1
pred_ptn1_all_8 = cbind(unlist(pred_target[[13]][2]),unlist(pred_target[[13]][3]),unlist(pred_target[[13]][4]),
unlist(pred_target[[13]][5]),unlist(pred_target[[13]][6]),unlist(pred_target[[13]][7]),
unlist(pred_target[[13]][8]),unlist(pred_target[[13]][9]),unlist(pred_target[[13]][10]),
unlist(pred_target[[13]][11]),unlist(pred_target[[13]][12]),unlist(pred_target[[13]][1]),
unlist(pred_target[[13]][14]))
pred_ptn1_mean_8 = apply(pred_ptn1_all_8,1,mean)
auc(roc(pred_ptn1_mean_8,as.factor(target_oriMol[[13]])))
## # equal weight blending
# # mean: equal weight blending models for src
pred_src_all_8 = cbind(unlist(pred_target[[14]][2]),unlist(pred_target[[14]][3]),unlist(pred_target[[14]][4]),
unlist(pred_target[[14]][5]),unlist(pred_target[[14]][6]),unlist(pred_target[[14]][7]),
unlist(pred_target[[14]][8]),unlist(pred_target[[14]][9]),unlist(pred_target[[14]][10]),
unlist(pred_target[[14]][11]),unlist(pred_target[[14]][12]),unlist(pred_target[[14]][13]),
unlist(pred_target[[14]][1]))
pred_src_mean_8 = apply(pred_src_all_8,1,mean)
auc(roc(pred_src_mean_8,as.factor(target_oriMol[[14]])))
# store blending probability result
store_blending_prob = function(i){
target_name = data.frame(name = c("ace","adrb1","braf","cdk2","drd3","esr1","fa10","fabp4","gria2","hdac8","mmp13","pde5a","ptn1","src"))
write.csv(eval(parse(text = paste0("pred_",target_name$name[i],"_mean_8"))),paste0("pred_",target_name$name[i],"_mean_8.csv"), row.names = F)
}
store_blending_prob(1)
store_blending_prob(2)
store_blending_prob(3)
store_blending_prob(4)
store_blending_prob(5)
store_blending_prob(6)
store_blending_prob(7)
store_blending_prob(8)
store_blending_prob(9)
store_blending_prob(10)
store_blending_prob(11)
store_blending_prob(12)
store_blending_prob(13)
store_blending_prob(14)
# roc plot
i = 1
plot(roc(eval(parse(text = paste0("pred_",target_name$name[i],"_mean_8"))) ,as.factor(target_oriMol[[i]])))
i=i+1
roc(eval(parse(text = paste0("pred_",target_name$name[i],"_mean_8"))) ,as.factor(target_oriMol[[i]]))[[3]][
max(which(roc(eval(parse(text = paste0("pred_",target_name$name[i],"_mean_8"))) ,as.factor(target_oriMol[[i]]))[[2]]<0.01))
]
i=i+1
# holdout prediction
boost_cv_holdout_8("ace",1350)
boost_cv_holdout_8("adrb1",310)
boost_cv_holdout_8("braf",1480)
boost_cv_holdout_8("cdk2",920)
boost_cv_holdout_8("drd3",270)
boost_cv_holdout_8("esr1",400)
boost_cv_holdout_8("fa10",1490)
boost_cv_holdout_8("fabp4",1310)
boost_cv_holdout_8("gria2",670)
boost_cv_holdout_8("hdac8",1120)
boost_cv_holdout_8("mmp13",1470)
boost_cv_holdout_8("pde5a",940)
boost_cv_holdout_8("ptn1",1320)
boost_cv_holdout_8("src",340)
|
fe5b58a7c05f7ac2b4095cea2456d21b526a8f6d
|
67f566943ef74373bef603f2a6b0f3ebe914be2b
|
/man/Mqrcm-package.Rd
|
eabb56ac0eba8473b7f15533ced0a6fb1323609e
|
[] |
no_license
|
cran/Mqrcm
|
78aac457f7fa191e73c7af81cac49c1ca1cd1e89
|
7776ed3d279c94cf2cc40dd1a72c540ad6184d6c
|
refs/heads/master
| 2021-06-16T23:40:08.883883
| 2021-02-02T02:10:06
| 2021-02-02T02:10:06
| 145,909,724
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,849
|
rd
|
Mqrcm-package.Rd
|
\name{Mqrcm-package}
\alias{Mqrcm-package}
\docType{package}
\title{
M-Quantile Regression Coefficients Modeling
}
\description{
This package implements Frumento and Salvati (2020) method for M-quantile regression
coefficients modeling (Mqrcm), in which M-quantile regression coefficients are described
by (flexible) parametric functions of the order of the quantile. This permits modeling
the entire conditional M-quantile function of a response variable.
}
\details{
\tabular{ll}{
Package: \tab Mqrcm\cr
Type: \tab Package\cr
Version: \tab 1.2\cr
Date: \tab 2021-01-29\cr
License: \tab GPL-2\cr
}
The function \code{\link{iMqr}} permits specifying the regression model.
Two special functions, \code{\link{slp}} and \code{\link{plf}}, are provided to facilitate model building.
The auxiliary functions \code{\link{summary.iMqr}}, \code{\link{predict.iMqr}}, and \code{\link{plot.iMqr}}
can be used to extract information from the fitted model.
}
\author{
Paolo Frumento
Maintainer: Paolo Frumento <paolo.frumento@unipi.it>
}
\references{
Frumento, P., Salvati, N. (2020). \emph{Parametric modeling of M-quantile regression coefficient functions with application to small area estimation}, Journal of the Royal Statistical Society, Series A, 183(1), p. 229-250.
}
\keyword{ package }
\examples{
# use simulated data
n <- 250
x <- rexp(n)
y <- runif(n, 0, 1 + x)
model <- iMqr(y ~ x, formula.p = ~ p + I(p^2))
summary(model)
summary(model, p = c(0.1,0.2,0.3))
predict(model, type = "beta", p = c(0.1,0.2,0.3))
predict(model, type = "CDF", newdata = data.frame(x = c(1,2,3), y = c(0.5,1,2)))
predict(model, type = "QF", p = c(0.1,0.2,0.3), newdata = data.frame(x = c(1,2,3)))
predict(model, type = "sim", newdata = data.frame(x = c(1,2,3)))
par(mfrow = c(1,2)); plot(model, ask = FALSE)
}
|
78ad948d2c0d5d8b9973bcd38b1b498d28af0ab3
|
4df13b51cd129b9471cc4fdf53cdc45fe2e4a8c3
|
/stats/real-time/server.R
|
ea96e62d86cd4984572905ec8c9aa73bbafd1d25
|
[] |
no_license
|
jacoboqc/CountingPeople
|
3e1b56c6680059eba25f98fe5f8030b7ebc7c188
|
f5875d3215cc00f05d3e90ee059764d403a444c8
|
refs/heads/master
| 2021-01-19T12:30:54.054027
| 2017-04-27T22:52:55
| 2017-04-27T22:52:55
| 82,319,529
| 1
| 0
| null | 2017-04-25T11:25:23
| 2017-02-17T17:06:53
|
JavaScript
|
UTF-8
|
R
| false
| false
| 3,960
|
r
|
server.R
|
# This is the server logic for a Shiny web application.
# You can find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com
#
library(shiny)
library(dplyr)
library(tidyr)
library(httr)
library(ggplot2)
library(scales)
library(grid)
library(RColorBrewer)
library(rmarkdown)
source("./server-functions.R")
source("../static-analysis.R")
refresh <- 5
# begin <- Sys.time() - 1200
begin <- as.POSIXct("2017/04/06 09:24:46 CEST")
end <- begin + refresh
mac_df <- data.frame()
mac_temp <- data.frame()
sec2milis <- function(x){x*1000}
delete <- TRUE
theme <- geom_col(color="#99ccff", fill="#99ccff")
shinyServer(function(input, output, session) {
output$time_evol <- renderPrint({
refresh <<- input$s
cat(file=stderr(),"Before the API request", "\n")
invalidateLater(sec2milis(refresh), session)
mac_temp <<- getAllMacsByTimestamp(begin=begin, end=end)
# woul not be necessary if filtering was done at the api
# important: interval open on one side
mac_temp <<- mac_temp[mac_temp$time >= begin & mac_temp$time < end,]
cat(file=stderr(),"rows in temporal list", nrow(mac_temp), "\n")
cat(file=stderr(),"columns in temporal list", ncol(mac_temp), "\n")
cat(file=stderr(),"rows in permanent list", nrow(mac_df), "\n")
cat(file=stderr(),"columns in permanent list", ncol(mac_df), "\n")
names(mac_temp) <<- c("mac", "device", "ID","time", "type") # important dont move
# solves match.names problems in rbind
mac_df <<- rbind(mac_df, mac_temp)
cat(file=stderr(), "names of permanent and temporal dataset",
names(mac_df), names(mac_temp), "\n")
cat("Actual annalyse interval from ")
cat(format(begin, "%X"), " to ")
cat(format(end, "%X"))
begin <<- begin + refresh
end <<- end + refresh
})
output$macs_per_second <- renderPlot({
invalidateLater(sec2milis(refresh), session)
interval_mac_count <- count_macs_interval(mac_temp, "time", "mac", "1 sec")
plot_date_count(interval_mac_count, "time", "mac_count", "1 sec", theme)
})
output$macs_per_second_total <- renderPlot({
invalidateLater(sec2milis(refresh), session)
interval_mac_count <- count_macs_interval(mac_df, "time", "mac", "1 sec")
plot_date_count(interval_mac_count, "time", "mac_count", "1 sec", theme)
})
output$new_devices_interval <- renderPrint({
invalidateLater(sec2milis(refresh), session)
new_macs_count <- count_new_devices_interval(mac_temp, "time", "mac", "1 sec")
new_macs_count[ is.na(new_macs_count) ] <- 0
cat("New devices in the last interval: ", (sum(new_macs_count[, 2]) / nrow(new_macs_count)))
})
output$time_per_mac <- renderPlot({
# ## straw
# getMinOrMax <- function(mac, data, min_or_max, mac_col, time_col){
# min_or_max(data[data[mac_col]==mac, time_col])
# }
# mapply(f, unlist(lista_macs), MoreArgs=list(data=macs, mac_col="mac", min_or_max=min, time_col="time"))
# ##
# x <- runif(n_distinct(mac_df$MAC), min = 0, max = as.numeric(end-begin)/15)
# plot(x)
})
# amount of unique macs in the interval
output$new_macs_per_second <- renderPlot({
invalidateLater(sec2milis(refresh), session)
new_macs_count <- count_new_devices_interval(mac_temp, "time", "mac", "1 sec")
plot_date_count(new_macs_count, "time", "dev_count", "1 sec", theme)
})
output$time_between_bursts <- renderPlot({
invalidateLater(sec2milis(refresh), session)
t_bursts <- time_between_bursts(mac_df, "mac", "time")
hist(as.numeric(t_bursts$avg_secs), main="Average time between bursts", col="#99ccff", fill="#99ccff")
})
# Generates the static report
observeEvent(input$static_annalyse, {
render("../static-report.Rmd")
insertUI(
selector = "#content_iframe",
where = "beforeEnd",
ui = includeHTML("../static-report.html")
)
session$sendCustomMessage(type = 'shiny_message', "Annalyse ready")
})
})
|
67494dec08e469c2b8d70b7f5c24987974f5ca98
|
f5fdbe59345699e686537eb1140c33b3b017eb9b
|
/man/map2.Rd
|
e854ac670ed30d1117f38fc09dce7f11562c75be
|
[] |
no_license
|
jonasbhend/geoutils
|
19f792ad42789bb3d1dea0f21341f0abe412610a
|
1fdafe1404e6320176ca072afe50cb0035853761
|
refs/heads/master
| 2021-01-21T04:25:06.651578
| 2016-07-28T07:10:29
| 2016-07-28T07:10:29
| 18,552,441
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 493
|
rd
|
map2.Rd
|
% Generated by roxygen2 (4.0.1): do not edit by hand
\name{map2}
\alias{map2}
\title{Addition to plot proper map without borders}
\usage{
map2(interior = F, add = T, ...)
}
\arguments{
\item{interior}{logical, should country borders be plotted? defaults to FALSE}
\item{add}{logical, should map be added to existing plot? devaults to TRUE}
\item{...}{Additional arguments passed to map}
}
\description{
Plots map without borders and adds missing lakes and inland seas
}
\keyword{utilities}
|
f8ef19ffaf8a9200330333646e9d85f422749e32
|
46839194e5859098f71638b9332317313e34d888
|
/tests/testthat.R
|
12b13a930317eb93d36e16a0e5c44cec6052e65d
|
[] |
no_license
|
kaneplusplus/envi
|
f07c7bdad9e4f4692f82b8926800a3c431c1d224
|
be440c152036c8f5098d6c57cd3037c3f3154925
|
refs/heads/master
| 2020-11-26T11:40:02.235934
| 2020-01-05T21:56:48
| 2020-01-05T21:56:48
| 229,060,785
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 54
|
r
|
testthat.R
|
library(testthat)
library(envi)
test_dir("testthat")
|
090b2381b2b7d5ae402a6d6c2d55e01c41bc6a0c
|
e189d2945876e7b372d3081f4c3b4195cf443982
|
/man/icevision_CoarseDropout.Rd
|
e90010466259df8c7b1e5cceeb292203a28a36fa
|
[
"Apache-2.0"
] |
permissive
|
Cdk29/fastai
|
1f7a50662ed6204846975395927fce750ff65198
|
974677ad9d63fd4fa642a62583a5ae8b1610947b
|
refs/heads/master
| 2023-04-14T09:00:08.682659
| 2021-04-30T12:18:58
| 2021-04-30T12:18:58
| 324,944,638
| 0
| 1
|
Apache-2.0
| 2021-04-21T08:59:47
| 2020-12-28T07:38:23
| null |
UTF-8
|
R
| false
| true
| 1,099
|
rd
|
icevision_CoarseDropout.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/icevision_albumentations.R
\name{icevision_CoarseDropout}
\alias{icevision_CoarseDropout}
\title{CoarseDropout}
\usage{
icevision_CoarseDropout(
max_holes = 8,
max_height = 8,
max_width = 8,
min_holes = NULL,
min_height = NULL,
min_width = NULL,
fill_value = 0,
mask_fill_value = NULL,
always_apply = FALSE,
p = 0.5
)
}
\arguments{
\item{max_holes}{max_holes}
\item{max_height}{max_height}
\item{max_width}{max_width}
\item{min_holes}{min_holes}
\item{min_height}{min_height}
\item{min_width}{min_width}
\item{fill_value}{fill_value}
\item{mask_fill_value}{mask_fill_value}
\item{always_apply}{always_apply}
\item{p}{p}
}
\value{
None
}
\description{
CoarseDropout of the rectangular regions in the image.
}
\section{Targets}{
image, mask
}
\section{Image types}{
uint8, float32
}
\section{Reference}{
| https://arxiv.org/abs/1708.04552 | https://github.com/uoguelph-mlrg/Cutout/blob/master/util/cutout.py | https://github.com/aleju/imgaug/blob/master/imgaug/augmenters/arithmetic.py
}
|
bcf6d67e49481481824fb880aaa604cd0c071b55
|
4cecc8cc52436a08674442d4df18b25234e0cbfa
|
/man/nonparam.Hankel.rd
|
bcecfddf2e3f91dc87734e3c7049cbcced8cdb8e
|
[] |
no_license
|
anjaweigel/mixComp_package
|
3be8e19eff9a943dadb3e2bb755954f21219d3c4
|
eb27f7ec39fc1e5bdaf5fe4a6e4b2a8f29a16254
|
refs/heads/master
| 2022-12-07T17:35:08.432093
| 2020-08-26T09:07:31
| 2020-08-26T09:07:31
| 279,328,281
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,309
|
rd
|
nonparam.Hankel.rd
|
\name{nonparamHankel}
\alias{nonparamHankel}
\alias{print.hankDet}
\alias{plot.hankDet}
\title{
Estimate Mixture Complexity Based on Hankel Matrix
}
\description{
Estimation of a mixture's complexity based on estimating the determinant of the Hankel matrix of the moments of the mixing distribution. The estimated determinants can be scaled and/or penalized.
}
\usage{
nonparamHankel(obj, j.max = 10, pen.function = NULL, scaled = FALSE,
B = 1000, ...)
\method{print}{hankDet}(x, ...)
\method{plot}{hankDet}(x, type = "b", xlab = "j", ylab = NULL, mar = NULL,
ylim = c(min(0, min(obj)), max(obj)), ...)
}
\arguments{
\item{obj}{object of class \code{datMix}.}
\item{j.max}{integer specifying the maximal number of components to be considered.}
\item{pen.function}{a function with arguments \code{j} and \code{n} specifying the penalty added to the determinant value given sample size \eqn{n} and the currently assumed complexity \eqn{j}. If left empty no penalty will be added. If non-empty and \code{scaled} is \code{TRUE}, the penalty function will be added after the determinants are scaled.}
\item{scaled}{logical specifying whether the vector of estimated determinants should be scaled.}
\item{B}{integer specifying the number of bootstrap replicates used for scaling of the determinants. Ignored if \code{scaled} is \code{FALSE}.}
\item{x}{object of class \code{hankDet}.}
\item{type}{character denoting type of plot, see, e.g. \code{\link[graphics]{lines}}. Defaults to \code{"b"}.}
\item{xlab,ylab}{labels for the x and y axis with defaults (the default for \code{ylab} is created within the function, if no value is supplied).}
\item{mar}{numerical vector of the form c(bottom, left, top, right) which gives the number of lines of margin to be specified on the four sides of the plot, see \code{\link[graphics]{par}}.}
\item{ylim}{range of y values to use.}
\item{\dots}{
\describe{
\item{in \code{nonparamHankel()}:}{further arguments passed to the \code{\link[boot]{boot}} function if \code{scaled} is \code{TRUE}.}
\item{in \code{plot.hankDet()}:}{further arguments passed to \code{\link[base]{plot}}.}
\item{in \code{print.hankDet()}:}{further arguments passed to \code{\link[base]{print}}.}
}}
}
\details{
Define the \eqn{complexity} of a finite mixture \eqn{F} as the smallest integer \eqn{p}, such that its pdf/pmf \eqn{f} can be written as
\deqn{f(x) = w_1*g(x;\theta _1) + \dots + w_p*g(x;\theta _p).}
\code{nonparamHankel} estimates \eqn{p} by iteratively increasing the assumed complexity \eqn{j} and calculating the determinant of the \eqn{(j+1)}x\eqn{(j+1)} Hankel matrix made up of the first \eqn{2j} raw moments of the mixing distribution. As shown by Dacunha-Castelle & Gassiat (1997), once the correct complexity is reached (i.e. for all \eqn{j >= p}), this determinant is zero.
This suggests an estimation procedure for \eqn{p} based on initially finding a consistent estimator of the moments of the mixing distribution and then choosing the estimator \eqn{estim_p} as the value of \eqn{j} which yields a sufficiently small value of the
determinant. Since the estimated determinant is close to 0 for all \eqn{j >= p}, this could lead to choosing \eqn{estim_p} rather larger than the true value. The function therefore returns all estimated determinant values corresponding to complexities up to \code{j.max},
so that the user can pick the lowest \eqn{j} generating a sufficiently small determinant. In addition, the function allows the inclusion of a penalty term as a function of the sample size \code{n} and the currently assumed complexity \code{j} which will be added to the determinant value (by supplying \code{pen.function}), and/or scaling of the determinants (by setting \code{scaled = TRUE}). For scaling, a nonparametric bootstrap is used to calculate the covariance of the estimated determinants, with \code{B} being the size of the bootstrap sample. The inverse of the square root of this covariance matrix (i.e. the matrix \eqn{S^(-1)} such that $A = SS$, where A is the covariance matrix) is then multiplied with the estimated determinant vector to get the scaled determinant vector.
For a thorough discussion of the methods that can be used for the estimation of the moments see the details section of \code{\link{datMix}}.
}
\value{
The vector of estimated determinants (optionally scaled and/or penalized), given back as an object of class \code{hankDet} with the following attributes:
\item{scaled}{logical indicating whether the determinants are scaled.}
\item{pen }{logical indicating whether a penalty was added to the determinants.}
\item{dist }{character string stating the (abbreviated) name of the component distribution, such that the function \code{ddist} evaluates its density function and \code{rdist} generates random numbers.}
}
\references{
D. Dacunha-Castelle and E. Gassiat, "The estimation of the order of a mixture model", Bernoulli, Volume 3, Number 3, 279-299, 1997.
}
\seealso{
\code{\link{paramHankel}} for a similar approach which estimates the component weights and parameters on top of the complexity,
\code{\link{datMix}} for the creation of the \code{datMix} object.
}
\examples{
## create 'Mix' object
geomMix <- Mix("geom", w = c(0.1, 0.6, 0.3), prob = c(0.8, 0.2, 0.4))
## create random data based on 'Mix' object (gives back 'rMix' object)
set.seed(1)
geomRMix <- rMix(1000, obj = geomMix)
## create 'datMix' object for estimation
# explicit function giving the estimate for the j^th moment of the
# mixing distribution, needed for Hankel.method "explicit"
explicit.fct.geom <- function(dat, j){
1 - ecdf(dat)(j - 1)
}
## generating 'datMix' object
geom.dM <- RtoDat(geomRMix, Hankel.method = "explicit",
Hankel.function = explicit.fct.geom)
## function for penalization
pen <- function(j, n){
(j*log(n))/(sqrt(n))
}
## estimate determinants
set.seed(1)
geomdets_pen <- nonparamHankel(geom.dM, pen.function = pen, j.max = 5)
plot(geomdets_pen, main = "Three component geometric mixture")
}
\keyword{cluster}
|
e0a8c3ad91745dd7dac7554c7248d2df107cf52f
|
5ca9ef906adb4fea0e442717ea71cd582d065d79
|
/plot3.R
|
5f4df5bf9d918012a4ca031b509368e522fc4a7b
|
[] |
no_license
|
radasian/ExData_Plotting1
|
c555b2bab492b83105ca7ace20163e8ae80c5164
|
76e759ea21a5081b00d6ac9d34ba3aa18e88ab16
|
refs/heads/master
| 2021-01-17T11:24:48.800500
| 2015-02-05T08:41:05
| 2015-02-05T08:41:05
| 30,292,802
| 0
| 0
| null | 2015-02-04T10:08:10
| 2015-02-04T10:08:10
| null |
UTF-8
|
R
| false
| false
| 1,411
|
r
|
plot3.R
|
# You should first set your working directory using a command similar to the below,
# adjusted for your setup:
#setwd('d:/coursera/Exploratory Data Analysis')
# Load in the data, using a script which holds the loading elements common to all plot
# scripts, and can therefore be included in each plot's script. This improves readability
# and means any change to that part of the script only needs to be made in one place
# instead of four.
source('loadData.R')
# Delete the target PNG file if it exists (else the procedure will fail)
unlink('plot3.png')
# Set up a png file as the device for output. It will be
# created in the working directory
png(filename = "plot3.png",
width = 480, height = 480, units = "px", pointsize = 12,
bg = "white", res = NA, family = "", restoreConsole = TRUE,
type = c("windows", "cairo", "cairo-png"))
plot.new
# Create the basic plot and add the first data series
plot(x=reduset$datetime,y=reduset$Sub_metering_1,
type="l",
xlab="",
ylab="Energy sub metering"
)
# Add the second and third data series
lines(x=reduset$datetime,y=reduset$Sub_metering_2,col="red")
lines(x=reduset$datetime,y=reduset$Sub_metering_3,col="blue")
# Add the legend
legend("topright"
,c("Sub_metering_1","Sub_metering_2","Sub_metering_3")
,col=c("black","red","blue")
,lty=1
)
# Switch off (close) the output device (file)
dev.off()
|
75a503eb8d99e1e46dc9bd8ce014cfd20ea497dc
|
5279da0a6b4687c4f87b4ef60dff7eaef6d7af99
|
/Plot1.R
|
e70626d2305c643d7c363981c77488b7028443a7
|
[] |
no_license
|
pennychase/ExData_Plotting2
|
f3e9462547025163de1d254f124c3f5110b475b6
|
542d253e025d10c39fa4881fc83e1958b5ffc4d9
|
refs/heads/master
| 2016-09-06T05:38:41.153818
| 2014-08-21T23:18:15
| 2014-08-21T23:18:15
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,849
|
r
|
Plot1.R
|
## R Code to construct a plot to answer Question 1:
## Have total emissions from PM2.5 decreased in the United States from 1999 to 2008?
##
## This script reads in the the National Emissions Inventory (NEI) data for 1999, 2002, 2005, and 2008.
## It computes the total emissions by year, and uses the base plotting system to plot a time series
## graph of the total emissions by year.
# Set working directory
setwd("~/Documents/MOOCs/Data Science Specialization/Course4_Exploratory-Data-Analysis/Projects/ExData_Plotting2")
# Assume the data has been downloaded to ./exdata-data-NEI_data
# Read in the data and the classification codes from R object files
NEI <- readRDS("./exdata-data-NEI_data/summarySCC_PM25.rds")
SCC <- readRDS("./exdata-data-NEI_data/Source_Classification_Code.rds")
# Compute the total emissions by year
te <- tapply(NEI$Emissions, NEI$year, sum)
# Convert result to a data frame
totalEmissions <- data.frame(Year=as.Date(names(te), "%Y"), Emissions=as.vector(te))
# Open graphics device
png(file="Plot1.png")
# Plot as a time series: type="l" draws lines
with(totalEmissions, plot(Year, Emissions, type="l", xlab="Year",
ylab="Emissions (tons)", main="Total Emissions from All Sources, 1999-2008"))
# Close graphics device
dev.off()
###
### Using ggplot (project called for base plot)
###
library(ggplot2)
# Create the basic plot of year and Emissions
n <- ggplot(NEI, aes(year, Emissions))
# Use stat_summary() to plot the summary of the y values (i.e., the Emissions)
# Plot the points as well as the lines to make clear which years we have data
n + stat_summary(fun.y = sum, geom = "line") + stat_summary(fun.y = sum, geom = "point")
###
### Answer
###
### From the graph we can see that total emissions declined from over 7,000,000 tons in 1999 to
### below 3,500,000 tons in 2008.
|
f5ce6dbd12368f8019d97bd6cc828c7e9cb35298
|
7f0d7049dcb95857fe407abd2d4c72e7588092cf
|
/predictsFunctions/R/CorrectSamplingEffort.R
|
01e616163d64a386a1f988f8c5547db9b2fe6d50
|
[] |
no_license
|
timnewbold/predicts-demo
|
4f2f3794243c90a64031409fb17b1a99068531a0
|
1a4f1b9c4d08d63c1bcfd6dbd1b501f5652417f7
|
refs/heads/master
| 2023-05-28T22:20:18.735109
| 2023-05-22T19:03:28
| 2023-05-22T19:03:28
| 154,857,571
| 3
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 970
|
r
|
CorrectSamplingEffort.R
|
CorrectSamplingEffort <-
function(diversity) {
# Assume that any missing values of sample effort mean equal sampling
# effort was applied.
missing <- is.na(diversity$Sampling_effort)
cat('Correcting', sum(missing), 'missing sampling effort values\n')
diversity$Sampling_effort[missing] <- 1
# TODO Check this logic with Tim
# Rescale sampling effort to have a maximum of 1 in each source
cat('Rescaling sampling effort\n')
diversity$Sampling_effort <- do.call('c', tapply(diversity$Sampling_effort,
diversity$SS, function(se) return (se/max(se))))
# Correct for sensitivity to sampling effort
sensitive <- diversity$Diversity_metric_is_effort_sensitive
cat('Correcting', sum(sensitive), 'values for sensitivity to sampling',
'effort\n')
diversity$Measurement[sensitive] <- diversity[sensitive,'Measurement'] /
diversity[sensitive,'Sampling_effort']
return (diversity)
}
|
f7ed2a6bce307081ade59dc61f82a991f1c0b558
|
c8a22a50238433f5db26db1e78c852aad1343c90
|
/week_05/day_5/homework.R
|
e33ff875b5b4e1a75e8deabe18d66b94f40d4248
|
[] |
no_license
|
abbygailju/codeclan_homework_AbbyU
|
a54f29a5e6a24f8d1e45ea83c830e93382965f59
|
fe57eafe868cc32a8274b45ebfd814470263ff51
|
refs/heads/main
| 2023-02-23T00:52:06.897527
| 2021-01-20T22:07:03
| 2021-01-20T22:07:03
| 305,484,282
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,129
|
r
|
homework.R
|
library(tidyverse)
library(shiny)
library(shinythemes)
library(DT)
ui <-
fluidPage(themes = shinytheme("superhero"),
titlePanel("Differences in cuisine between Indonesia and the British Isles"),
sidebarLayout(
sidebarPanel(
h5("CulinaryDB is a database of recipes collected from multiple websites. Here I chose to analyse recipes and their constituent ingredients from the British Isles and Indonesia. I hope the data shows I'm not biased when I say that Indonesian food has more flavour - just look at the type of ingredients used!"),
radioButtons("choice_of_cuisines",
"British or Indonesian?",
choices = c("British", "Indonesian")
),
actionButton("cuisine_type", "Tell me about the cuisine!"),
sliderInput("slider",
"Group by how many ingredients?",
min = 2,
max = 20,
value = 5)
,
sliderInput("ingredtypes",
"How many of the most used ingredients would you like to see?",
min = 3,
max = 15,
value = 10),
tags$a(href="https://cosylab.iiitd.edu.in/culinarydb/basic_stats.html", "Click here to find out more!")
)
,
mainPanel(
tabsetPanel(type = "tabs",
tabPanel("Ingredients",
h3("These two plots show the number of ingredients in the recipes collected from CulinaryDB") ,
plotOutput("indonesian_ingredient_numbers")),
tabPanel("Types of ingredients",
h3("These are the most used ingredients for the recipes featured"),
plotOutput("indonesian_ingredient_types")),
tabPanel("Recipes",
h3("These are the recipes"),
dataTableOutput("recipes")))
),
)
)
server <- function(input, output){
outputplot <- eventReactive(input$cuisine_type,{
number_of_ingredients %>%
filter(cuisine == input$choice_of_cuisines) %>%
ggplot()+
aes(x = ingrednum)+
geom_histogram(binwidth = input$slider,fill = "#F49719")+
labs(x = "Number of Ingredients in each recipe", y = "Number of recipes in the database")
})
output$indonesian_ingredient_numbers <- renderPlot({
outputplot()
})
outputplottab <- eventReactive(input$cuisine_type, {
recipes_joined %>%
filter(cuisine == input$choice_of_cuisines) %>%
group_by(aliased_ingredient_name) %>%
summarise(num_recipes = length((entity_id))) %>%
slice_max(num_recipes, n = input$ingredtypes) %>%
ggplot()+
aes(x = reorder(aliased_ingredient_name, -num_recipes), y = num_recipes, fill = aliased_ingredient_name) +
geom_col()+
coord_flip()+
labs(x = "Ingredient", y = "Number of recipes featured")+
scale_fill_discrete("Type of ingredient")
})
output$indonesian_ingredient_types <- renderPlot({
outputplottab()
})
output$recipes <-renderDataTable({
number_of_ingredients %>%
filter(cuisine == input$choice_of_cuisines)
})
}
shinyApp(ui = ui, server = server)
|
ab496868502c3b4178ba6a46887fc26fd8986f76
|
a11470a5ca9a46b6d723bfd4aa1c5f40838649d8
|
/dataset_structures.R
|
204805d2766165bef54560b2dc10eeee95ff4892
|
[] |
no_license
|
julianhatwell/interpret_basics_auxr
|
3c2f9393c291f2e3228e048de3e7d9810217b905
|
7564bf89c02374507ef37edce828311beece1347
|
refs/heads/master
| 2021-05-10T12:40:59.193357
| 2020-10-24T06:20:40
| 2020-10-24T06:20:40
| 118,448,401
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,066
|
r
|
dataset_structures.R
|
dataset_structure <- function(dname, class_col
, what = c("shp", "bal", "dim"
, "lnr", "ovr"
, "nei", "nwk")) {
library(ECoL)
library(nFactors)
library(caret)
library(vcd)
results <- numeric(0)
dat <- get(dname)
fmla <- as.formula(paste(class_col, "~ ."))
if ("shp" %in% what) {
cats <- sapply(dat, class)
facs <- names(dat)[cats == "factor" & names(dat) != class_col]
nums <- names(dat)[cats %in% c("numeric", "integer") & names(dat) != class_col]
asso <- 0
for (f in facs) {
if (assocstats(table(dat[[f]], dat[[class_col]]))$chisq_test[2,3] <= 0.05) {
asso <- asso + 1
}
}
prnc <- 0
if (length(nums) > 0)
if (length(nums) == 1) {
prnc <- 1
} else {
preproc <- preProcess(dat[nums], method = c("BoxCox", "center", "scale"))
trans <- predict(preproc, dat[nums])
prnc <- max(nScree(cor(trans))$components
, sum(eigen(cor(trans))$values >= 1.0))
}
shp <- c(n_row = nrow(dat)
, n_col = ncol(dat)
, n_facs = length(facs)
, n_num = length(nums)
, asso = asso
, prnc = prnc)
print(shp)
results <- c(results, shp)
}
if ("bal" %in% what) {
bal <- tryCatch(balance(fmla, dat)
, error = function(e) return(c(C1 = NA, C2 = NA)))
print(paste("balance measures:", dname))
print(bal)
results <- c(results, bal)
}
if ("dim" %in% what) {
dmn <- tryCatch(dimensionality(fmla, dat)
, error = function(e) return(c(T2 = NA, T3 = NA, T4 = NA)))
print(paste("dimensionality measures:", dname))
print(dmn)
results <- c(results, dmn)
}
if ("lnr" %in% what) {
# takes a long time to run
lnr <- tryCatch(linearity(fmla, dat)
, error = function(e) return(c(L1 = NA, L2 = NA, L3 = NA)))
print(paste("linearity measures:", dname))
print(lnr)
results <- c(results, lnr)
}
if ("ovr" %in% what) {
ovr <- tryCatch(overlapping(fmla, dat)
, error = function(e) return(F1 = NA, F1v = NA, F2 = NA, F3 = NA, F4 = NA))
print(paste("overlapping measures:", dname))
print(ovr)
results <- c(results, ovr)
}
if ("nei" %in% what) {
# requires very large vector
nei <- tryCatch(neighborhood(fmla, dat)
, error = function(e) return(c(N1 = NA, N2 = NA, N3 = NA, N4 = NA, T1 = NA, LSCAvg = NA)))
print(paste("neighbourhood measures:", dname))
print(nei)
results <- c(results, nei)
}
if ("nwk" %in% what) {
# requires very large vector
nwk <- tryCatch(network(fmla, dat)
, error = function(e) return(c(Density = NA, ClsCoef = NA, Hubs = NA)))
print(paste("network measures:", dname))
print(nwk)
results <- c(results, nwk)
}
return(results)
}
|
9d34e899fa904765dc94ae09463b6e92a07fc97b
|
dd3f117ff7bb9d51d22b6bf7f5b9e14a2c5f3640
|
/SLDSR/scripts/plot_LDSC.R
|
e58c1e0380f2b9e74f6d66dda7a0fd3b019c0b88
|
[] |
no_license
|
hansenlab/egtex_brain_wgbs
|
a87db299b5a01fbb7fef6d7e142cf48b2fabd3f9
|
b9d96af6d0b172d2da9979fc8993ec282c01e71d
|
refs/heads/master
| 2023-03-24T00:41:15.658170
| 2021-03-24T22:18:55
| 2021-03-24T22:18:55
| 276,696,245
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 16,005
|
r
|
plot_LDSC.R
|
# Plot LDSC results for 'adjusting for baseline' analyses
# Peter Hickey
# 2020-05-22
# NOTE: Run with module load conda_R/3.6.x
# Setup ------------------------------------------------------------------------
library(GenomicRanges)
library(readr)
library(dplyr)
library(ggplot2)
library(scales)
library(gplots)
library(cowplot)
library(here)
library(bsseq)
library(BSgenome.Hsapiens.UCSC.hg38)
# Brain features ---------------------------------------------------------------
categories <- readRDS(here("SLDSR", "objects", "eGTEx_features.rds"))
# NOTE: Focusing on CG-DMRs, CH-DMRs, and CG-VMRs
categories <- categories[grepl("CG|CH", names(categories)) &
grepl("DMR|VMR", names(categories))]
# Record proportion of SNPs and CpGs in each category --------------------------
categories_df <- bind_rows(
lapply(
X = names(categories),
FUN = function(x) {
tibble(
Category = x,
`Total width (bp)` = sum(width(categories[[x]])),
`Mean width (bp)` = mean(width(categories[[x]])),
`Median width (bp)` = median(width(categories[[x]])),
n = length(categories[[x]]))
})) %>%
arrange(`Total width (bp)`)
stopifnot(nrow(categories_df) == length(categories))
snp_prop_table <- bind_rows(
lapply(
X = names(categories),
FUN = function(cn) {
x <- read_tsv(
here(
"SLDSR",
"output",
"ldsc",
paste0(cn, ".Height.results"))) %>%
filter(Category == "L2_0" | Category == "CNS_0")
tibble(
Category = cn,
`Prop._SNPs` = unlist(x[, "Prop._SNPs"]))
}))
colnames(snp_prop_table)[2] <- "Proportion of SNPs"
# TODO: Is this actually used anywhere? Should I do the same for CpHs?
cpgs <- findLoci(
pattern = "CG",
subject = BSgenome.Hsapiens.UCSC.hg38,
include = paste0("chr", c(1:22, "X", "Y")),
strand = "*")
cpgs <- unstrand(cpgs[strand(cpgs) == "+"])
cpg_prop_table <- tibble(
Category = names(categories),
`Proportion of CpGs` = sapply(
X = categories,
FUN = function(x) {
sum(overlapsAny(cpgs, x)) / length(cpgs)
}))
categories_df <- categories_df %>%
inner_join(snp_prop_table, c("Category" = "Category")) %>%
inner_join(cpg_prop_table, c("Category" = "Category")) %>%
arrange(`Total width (bp)`) %>%
mutate(Category = factor(Category, ordered = TRUE)) %>%
arrange(Category)
traits_df <- read_csv(here("SLDSR", "tables", "traits_df.csv")) %>%
mutate(
N = ifelse(is.na(N_cases), N, N_cases + N_controls),
TraitType2 = ifelse(Trait == "BMI", "BMI", TraitType)) %>%
mutate(
TraitColour = case_when(
.$TraitType2 == "Additional_phenotype" ~ brewer_pal("qual")(5)[1],
.$TraitType2 == "Behavioural-cognitive" ~ brewer_pal("qual")(5)[2],
.$TraitType2 == "Neurological" ~ brewer_pal("qual")(5)[3],
.$TraitType2 == "Psychiatric" ~ brewer_pal("qual")(5)[4]),
Trait2Colour = case_when(
.$TraitType2 == "Additional_phenotype" ~ brewer_pal("qual")(5)[1],
.$TraitType2 == "Behavioural-cognitive" ~ brewer_pal("qual")(5)[2],
.$TraitType2 == "Neurological" ~ brewer_pal("qual")(5)[3],
.$TraitType2 == "Psychiatric" ~ brewer_pal("qual")(5)[4],
.$TraitType2 == "BMI" ~ brewer_pal("qual")(5)[5]))
# Load data and construct objects ----------------------------------------------
fls <- unlist(
lapply(
X = names(categories),
FUN = function(cn) {
list.files(
here("SLDSR", "output", "ldsc"),
pattern = glob2rx(
paste0(cn, "*.results")),
full.names = TRUE)
}))
# Read in files, tidy up, and rbind
x <- bind_rows(
lapply(
X = fls,
FUN = function(fl) {
suppressMessages(read_tsv(fl)) %>%
filter(Category == "L2_0" | Category == "CNS_0") %>%
mutate(
Category = sapply(strsplit(basename(fl), "\\."), "[[", 1),
Trait = sapply(
strsplit(sub("\\.Phase1\\.results", "", basename(fl)), "\\."), "[[", 2),
lower = Enrichment - 1.96 * Enrichment_std_error,
upper = Enrichment + 1.96 * Enrichment_std_error,
file = fl)
}))
# Join tidied LDSC output with categories_df
x <- x %>%
mutate(
Category = factor(
x = Category,
levels = levels(categories_df$Category),
ordered = TRUE)) %>%
inner_join(categories_df, by = c("Category" = "Category")) %>%
inner_join(traits_df, by = c("Trait" = "Trait"))
stopifnot(length(fls) == nrow(x))
# NOTE: Anttila report these traits "had in sufficient evidence of additive
# heritability for robust analysis" and excluded them from further
# analysis
x <- x %>%
filter(Trait != "Agreeableness",
Trait != "Cardioembolic_stroke",
Trait != "Large-vessel_disease",
Trait != "Small-vessel_disease")
# TODO: Remove Conscientiousness and Intracarebral_hemorrhage? Both have huge
# enrichment SEs.
# x <- x %>%
# filter(!Trait %in% c("Intracarebral_hemorrhage", "Conscientiousness"))
# Add adjusted P-values
x <- x %>%
group_by(Category, file) %>%
filter(grepl(Category, file)) %>%
ungroup() %>%
mutate(Coefficient_p = pnorm(`Coefficient_z-score`, lower.tail = FALSE)) %>%
group_by(Trait) %>%
mutate(
Coefficient_holm = p.adjust(Coefficient_p, method = "holm"),
Enrichment_holm = p.adjust(Enrichment_p, method = "holm"),
Coefficient_holm_cutoff =
max(Coefficient_p[Coefficient_holm < 0.05], na.rm = TRUE),
Enrichment_holm_cutoff =
max(Enrichment_p[Enrichment_holm < 0.05], na.rm = TRUE)) %>%
ungroup() %>%
mutate(sig_coef = Coefficient_holm < 0.05) %>%
arrange(`Total width (bp)`) %>%
mutate(`Pretty Trait` = gsub("_", " ", Trait)) %>%
arrange(Category)
# Stratify traits --------------------------------------------------------------
# NOTE: Traits stratified by 'Brain-linked (sig)', 'Brain-linked (non-sig)', or
# 'Non-brain-linked' (stratification defined in 'baseline' analysis)
strata_df <- x %>%
group_by(Trait) %>%
summarise(
strata = ifelse(
any(Coefficient_holm < 0.05),
"Brain-linked (sig)",
"Brain-linked (non-sig)"),
TraitType2 = unique(TraitType2),
strata = ifelse(
TraitType2 == "Additional_phenotype",
"Non-brain-linked",
strata)) %>%
dplyr::select(Trait, strata)
saveRDS(strata_df, here("SLDSR", "objects", "trait_strata_df.rds"))
x_stratified <- inner_join(x, strata_df)
# Tables of results ------------------------------------------------------------
x_stratified %>%
dplyr::select(
-lower, -upper, -n, -`Prop._SNPs`, -file, -TraitType2, -TraitColour,
-Trait2Colour, -Enrichment_p, -Enrichment_holm, -Enrichment_holm_cutoff,
-`Mean width (bp)`, -`Median width (bp)`, -`Proportion of CpGs`,
-Coefficient_holm_cutoff, -sig_coef, -Trait, -N, -N_cases, -N_controls,
-TraitType) %>%
dplyr::select(
`Pretty Trait`, strata, Category, `Total width (bp)`,
`Proportion of SNPs`, starts_with("Coefficient"), everything()) %>%
rename(
Feature = Category,
Trait = `Pretty Trait`,
`Proportion of h2` = `Prop._h2`,
`Proportion of h2 standard error` = `Prop._h2_std_error`,
Stratum = `strata`) %>%
write_csv(here("SLDSR", "tables", "LDSC_results.baseline_adjustments.csv"))
x_stratified %>%
dplyr::select(`Pretty Trait`, TraitType, N, N_cases, N_controls) %>%
distinct() %>%
rename(Trait = `Pretty Trait`, `Type` = `TraitType`) %>%
write_csv(here("SLDSR", "tables", "annotated_traits_df.csv"))
# Plot limits ------------------------------------------------------------------
# TODO: Choose these empirically? (currently using BrainEpigenome values)
ylim_coefficient_score <- c(-4.5, 9)
ylim_enrichment <- c(-7, 45)
# Plot results for DMRs --------------------------------------------------------
ct <- grep("VMR", names(categories), value = TRUE, invert = TRUE)
# Coefficient Z-score
g <- x_stratified %>%
filter(Category %in% ct) %>%
arrange(Category) %>%
ggplot(
data = .,
aes(
x = Category,
y = `Coefficient_z-score`,
col = Category,
shape = sig_coef,
size = sig_coef)) +
geom_point() +
facet_wrap( ~ `Pretty Trait`, ncol = 5) +
theme_bw() +
theme(axis.text.x = element_blank(), axis.ticks.x = element_blank()) +
geom_hline(yintercept = 0, lty = 2) +
scale_shape_manual(values = c(1, 16)) +
scale_size_manual(values = c(2, 3)) +
guides(col = FALSE, shape = FALSE, size = FALSE) +
scale_color_brewer(palette = "Dark2") +
coord_cartesian(ylim = ylim_coefficient_score)
ggsave(
here(
"SLDSR",
"figures",
"Coefficient_Z-score.baseline_adjustments.DMRs.pdf"),
g,
height = 6,
width = 7,
useDingbats = FALSE)
g <- x_stratified %>%
filter(Category %in% ct) %>%
ggplot(
data = .,
aes(
x = Category,
y = `Coefficient_z-score`,
col = Category,
shape = sig_coef,
size = sig_coef)) +
geom_jitter(width = 0.3) +
facet_grid(. ~ strata) +
theme_bw() +
theme(axis.text.x = element_blank(), axis.ticks.x = element_blank()) +
geom_hline(yintercept = 0, lty = 2) +
scale_shape_manual(values = c(1, 16)) +
scale_size_manual(values = c(2, 3)) +
guides(col = FALSE, shape = FALSE, size = FALSE) +
scale_color_brewer(palette = "Dark2") +
coord_cartesian(ylim = ylim_coefficient_score)
ggsave(
here(
"SLDSR",
"figures",
"Coefficient_Z-score.baseline_adjustments.stratified.DMRs.pdf"),
g,
height = 4,
width = 5,
useDingbats = FALSE)
# Enrichment
g <- x_stratified %>%
filter(Category %in% ct) %>%
filter(strata == "Brain-linked (sig)") %>%
arrange(Category) %>%
ggplot(
data = .,
aes(
x = Category,
y = Enrichment,
col = Category,
shape = sig_coef)) +
geom_point() +
geom_pointrange(aes(
ymin = Enrichment - 2 * Enrichment_std_error,
ymax = Enrichment + 2 * Enrichment_std_error)) +
facet_wrap( ~ `Pretty Trait`, ncol = 4) +
theme_bw() +
theme(axis.text.x = element_blank(), axis.ticks.x = element_blank()) +
scale_shape_manual(values = c(1, 16)) +
guides(col = FALSE, shape = FALSE, size = FALSE) +
scale_color_brewer(palette = "Dark2") +
geom_hline(yintercept = 0, lty = 2) +
coord_cartesian(ylim = ylim_enrichment)
ggsave(
here(
"SLDSR",
"figures",
"Enrichment.baseline_adjustments.DMRs.pdf"),
g,
height = 6,
width = 7,
useDingbats = FALSE)
g <- x_stratified %>%
filter(Category %in% ct) %>%
ggplot(
data = .,
aes(
x = Category,
y = Enrichment,
col = Category,
shape = sig_coef,
size = sig_coef)) +
geom_jitter(width = 0.2) +
facet_grid(. ~ strata, labeller = labeller(sig = label_both)) +
theme_bw() +
theme(axis.text.x = element_blank(), axis.ticks.x = element_blank()) +
geom_hline(yintercept = 0, lty = 2) +
scale_shape_manual(values = c(1, 16)) +
scale_size_manual(values = c(2, 3)) +
guides(col = FALSE, shape = FALSE, size = FALSE) +
scale_color_brewer(palette = "Dark2") +
coord_cartesian(ylim = ylim_enrichment)
ggsave(
here(
"SLDSR",
"figures",
"Enrichment.baseline_adjustments.sig_stratified.DMRs.pdf"),
g,
height = 4,
width = 5,
useDingbats = FALSE)
# Create legend used in all plot_ldsc.* output
g <- x %>%
filter(Category %in% ct) %>%
arrange(Category) %>%
ggplot(
data = .,
aes(
x = Category,
y = -log10(Coefficient_p),
col = Category,
shape = sig_coef,
size = sig_coef)) +
geom_point() +
scale_color_brewer(palette = "Dark2") +
scale_shape_manual(values = c(1, 16)) +
scale_size_manual(values = c(2, 3))
legend_plot <- ggdraw(plot_grid(NULL, get_legend(g)))
ggsave(
here("SLDSR", "figures", "Legend.DMRs.pdf"),
legend_plot,
height = 6,
width = 6,
useDingbats = FALSE)
# Plot results for VMRs --------------------------------------------------------
ct <- grep("VMR", names(categories), value = TRUE)
# NOTE: Too many levels for a sane colour palette.
colours <- setNames(
c(Polychrome::palette36.colors(min(length(ct), 36)), "dodgerBlue", "orange"),
ct)
# Coefficient Z-score
g <- x_stratified %>%
filter(Category %in% ct) %>%
arrange(Category) %>%
ggplot(
data = .,
aes(
x = Category,
y = `Coefficient_z-score`,
col = Category,
shape = sig_coef,
size = sig_coef)) +
geom_point() +
facet_wrap( ~ `Pretty Trait`, ncol = 5) +
theme_bw() +
theme(axis.text.x = element_blank(), axis.ticks.x = element_blank()) +
geom_hline(yintercept = 0, lty = 2) +
scale_shape_manual(values = c(1, 16)) +
scale_size_manual(values = c(2, 3)) +
guides(col = FALSE, shape = FALSE, size = FALSE) +
scale_colour_manual(values = colours) +
coord_cartesian(ylim = ylim_coefficient_score)
ggsave(
here(
"SLDSR",
"figures",
"Coefficient_Z-score.baseline_adjustments.VMRs.pdf"),
g,
height = 12,
width = 14,
useDingbats = FALSE)
g <- x_stratified %>%
filter(Category %in% ct) %>%
ggplot(
data = .,
aes(
x = Category,
y = `Coefficient_z-score`,
col = Category,
shape = sig_coef,
size = sig_coef)) +
geom_jitter(width = 0.3) +
facet_grid(. ~ strata) +
theme_bw() +
theme(axis.text.x = element_blank(), axis.ticks.x = element_blank()) +
geom_hline(yintercept = 0, lty = 2) +
scale_shape_manual(values = c(1, 16)) +
scale_size_manual(values = c(2, 3)) +
guides(col = FALSE, shape = FALSE, size = FALSE) +
scale_colour_manual(values = colours) +
coord_cartesian(ylim = ylim_coefficient_score)
ggsave(
here(
"SLDSR",
"figures",
"Coefficient_Z-score.baseline_adjustments.stratified.VMRs.pdf"),
g,
height = 8,
width = 10,
useDingbats = FALSE)
# Enrichment
g <- x_stratified %>%
filter(Category %in% ct) %>%
filter(strata == "Brain-linked (sig)") %>%
arrange(Category) %>%
ggplot(
data = .,
aes(
x = Category,
y = Enrichment,
col = Category,
shape = sig_coef)) +
geom_point() +
geom_pointrange(aes(
ymin = Enrichment - 2 * Enrichment_std_error,
ymax = Enrichment + 2 * Enrichment_std_error)) +
facet_wrap( ~ `Pretty Trait`, ncol = 4) +
theme_bw() +
theme(axis.text.x = element_blank(), axis.ticks.x = element_blank()) +
scale_shape_manual(values = c(1, 16)) +
guides(col = FALSE, shape = FALSE, size = FALSE) +
scale_colour_manual(values = colours) +
geom_hline(yintercept = 0, lty = 2) +
coord_cartesian(ylim = ylim_enrichment)
ggsave(
here(
"SLDSR",
"figures",
"Enrichment.baseline_adjustments.VMRs.pdf"),
g,
height = 12,
width = 14,
useDingbats = FALSE)
g <- x_stratified %>%
filter(Category %in% ct) %>%
ggplot(
data = .,
aes(
x = Category,
y = Enrichment,
col = Category,
shape = sig_coef,
size = sig_coef)) +
geom_jitter(width = 0.2) +
facet_grid(. ~ strata, labeller = labeller(sig = label_both)) +
theme_bw() +
theme(axis.text.x = element_blank(), axis.ticks.x = element_blank()) +
geom_hline(yintercept = 0, lty = 2) +
scale_shape_manual(values = c(1, 16)) +
scale_size_manual(values = c(2, 3)) +
guides(col = FALSE, shape = FALSE, size = FALSE) +
scale_colour_manual(values = colours) +
coord_cartesian(ylim = ylim_enrichment)
ggsave(
here(
"SLDSR",
"figures",
"Enrichment.baseline_adjustments.sig_stratified.VMRs.pdf"),
g,
height = 8,
width = 10,
useDingbats = FALSE)
# Create legend used in all plot_ldsc.* output
g <- x %>%
filter(Category %in% ct) %>%
arrange(Category) %>%
ggplot(
data = .,
aes(
x = Category,
y = -log10(Coefficient_p),
col = Category,
shape = sig_coef,
size = sig_coef)) +
geom_point() +
scale_colour_manual(values = colours) +
scale_shape_manual(values = c(1, 16)) +
scale_size_manual(values = c(2, 3))
legend_plot <- ggdraw(plot_grid(NULL, get_legend(g), ncol = 1))
ggsave(
here("SLDSR", "figures", "Legend.VMRs.pdf"),
legend_plot,
height = 12,
width = 12,
useDingbats = FALSE)
|
71ddb2eb0f16d6e80d9c0f47a17ec44ac944ce9a
|
062c2f0f3f55b9c9d8aa31120a3520c74573107b
|
/Figure 2/Figure 2B.R
|
ff3f7f1e3e1b279188ca5c979dcaf6f4e80e2efa
|
[] |
no_license
|
ThieryM95/Drug_resistance_data_and_visualisation
|
67a46b9451b28d544692b884e23e75c30e52bec7
|
7acd259f23b3120be1fe1286b63bff75331fe658
|
refs/heads/main
| 2023-04-17T10:28:03.152000
| 2022-06-08T08:10:49
| 2022-06-08T08:10:49
| 458,226,427
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,059
|
r
|
Figure 2B.R
|
##################################################################################
# Code to visualize the effect of each factor during the global #
# sensitivity analyses of the spread of parasites resistant to drug A+ B #
# #
# Input: table of the estimated median and interquartile range of selection #
# coefficients estimated during the global sensitivity #
# analyses over the parameter ranges (Data_factors_effect_GSA_ACT.csv) #
# #
# author: thiery Masserey (thiery.masserey@swisstph.ch) #
##################################################################################
# Load the package
library("ggh4x")
library("ggplot2")
# Load the data
Quantil_final_final <- read.csv(file = "/scicore/home/penny/masthi00/WF_spread/Visulaise_results/Paper/Figure 2-Source data 2.csv", header = TRUE)
Quantil_final_final <- Quantil_final_final[, c(1:5, 10)]
# add data about the drug archeytpe
Quantil_final_final$Drug <- "Short-acting + Long-acting drugs"
# Define the break for the y axis
break_y <- c(1, 10, 20, 30, 39)
Label_yy <- c("Min", "", "", "", "Max")
#---- visualise three most important factor in the non seasonal setting ----
# select the data
Quantil_final_2 <- Quantil_final_final[Quantil_final_final$Seasonality == "sesonality1", ]
Quantil_final_2 <- Quantil_final_2[Quantil_final_2$G == "Access" | Quantil_final_2$G == "Resistance_Level_long" | Quantil_final_2$G == "Resistance_Level", ]
constant<-2
# visualise
PB <- ggplot(data = Quantil_final_2) +
geom_line(aes(x = x, y = M, color = G), size = 2/constant) +
geom_ribbon(aes(x = x, ymin = L, ymax = U, fill = G), alpha = 0.1) +
facet_grid(. ~ Drug) +
theme_bw() +
scale_y_continuous(name = "Selection coefficient\n(resistance to the short-acting drug)") +
scale_x_continuous(name = "Factor values", breaks = break_y, labels = Label_yy) +
scale_color_manual(
name = "Factors:",
values = c(
"#126429",
"#709FCD", "#273871"),
breaks = c("Access", "Resistance_Level", "Resistance_Level_long", "half_life_long", "Fitness", "eir", "half_life_short", "C_max_IC50"),
labels = c("Access to treatment (%)", "Degree of resistance to the short-acting drug", "Degree of resistance to the long-acting drug", "\nHalf-life\nof drug B", "Fitness cost", "EIR", "\nHalf-life\nof drug A", "Cmax/IC50\nof drug B")) +
scale_fill_manual(
name = "Factors:",
values = c(
"#117733",
"#6699CC", "#332288"),
breaks = c("Access", "Resistance_Level", "Resistance_Level_long", "half_life_long", "Fitness", "eir", "half_life_short", "C_max_IC50"),
labels = c("Access to treatment (%)", "Degree of resistance to the short-acting drug", "Degree of resistance to the long-acting drug", "\nHalf-life\nof drug B", "Fitness cost", "EIR", "\nHalf-life\nof drug A", "Cmax/IC50\nof drug B")) +
theme(
axis.text.x = element_text(size = 15/2),
axis.text.y = element_text(size = 15/2),
axis.title.x = element_text(size = 15/2, face = "bold"),
axis.title.y = element_text(size = 15/2, face = "bold"),
plot.title = element_text(size = 20/2, hjust = 0.5, face = "bold")) +
theme(legend.text = element_text(size = 15/2)) +
theme(legend.title = element_text(size = 15/2, face = "bold")) +
theme(
strip.text.x = element_text(size = 15/2, color = "black", face = "bold"),
strip.text.y = element_text(size = 15/2, color = "black", face = "bold")) +
# theme(legend.position = "bottom", legend.direction = "vertical")+
theme(legend.key.size = unit(0.3, "cm"))
PB
PLOT<-plot_grid(PA, PB,
ncol = 1, nrow = 2, rel_widths = c(1, 1), scale = 1, labels = c("A", "B"), label_size = 16/2)
ggsave("/scicore/home/penny/masthi00/WF_spread/Visulaise_results/Paper/Figure 2.pdf",
plot = PLOT, width = 14, height = 14, device="pdf", units = "cm", dpi = 300)
|
81034c24634e2938ce97655fb82f3834e89781a4
|
b5822b9c2a756f4e540c426e7e84af35dae8caec
|
/rockchalk/R/mcGraph.R
|
445a6f322afd276339fa387e9ac63c704ba12e61
|
[] |
no_license
|
pauljohn32/rockchalk
|
0c75b7a7bc142669efcfabbc70d511f60c3f47e0
|
fc2d3d04396bf89ef020e824f50db3c348e3e226
|
refs/heads/master
| 2022-08-20T02:49:56.898990
| 2022-07-26T01:20:12
| 2022-07-26T01:20:12
| 8,965,635
| 8
| 5
| null | 2022-07-18T00:36:58
| 2013-03-23T04:07:35
|
R
|
UTF-8
|
R
| false
| false
| 8,751
|
r
|
mcGraph.R
|
##' Illustrate multicollinearity in regression, part 1.
##'
##' @description
##' This is a set of functions that faciliates the examination
##' of multicollinearity. Suppose the "true" relationship is
##' y[i] = 0.2 * x1[i] + 0.2 * x2[i] + e
##' where e is Normal(0, stde^2).
##'
##' mcGraph1 draws the 3D regression space, but all of the points
##' are illustrated "in" the flat plane of x1-x2 variables.
##'
##' @details
##' These functions are specialized to a particular purpose. If you
##' just want to draw 3D regressions, look at plotPlane.
##' @name mcGraph1
##' @param x1 a predictor vector
##' @param x2 a predictor vector
##' @param y the dependent variable
##' @param x1lab label for the x1 axis, (the one called "xlab" inside persp)
##' @param x2lab label for the x2 axis, (the one called "ylab" inside persp)
##' @param ylab label for the y (vertical) axis (the one called "zlab" inside persp)
##' @param ... additional parameters passed to persp
##' @export
##' @rdname mcGraph
##' @return mcGraph1 and mcGraph2 return only the perspective matrix
##' from persp. It is returned so that users can add additional embellishments on the 3D plot (can be used with trans3d)
##' @keywords regression hplot
##' @author Paul E. Johnson \email{pauljohn@@ku.edu}
##' @examples
##' set.seed(12345)
##' ## Create data with x1 and x2 correlated at 0.10
##' dat <- genCorrelatedData(rho=.1, stde=7)
##'
##' mcGraph1(dat$x1, dat$x2, dat$y, theta=20, phi=8, ticktype="detailed", nticks=10)
##'
mcGraph1 <- function (x1, x2, y, x1lab, x2lab, ylab, ...){
x1range <- magRange(x1, 1.25)
x2range <- magRange(x2, 1.25)
yrange <- magRange(y, 1.5)
if (missing(x1lab)) x1lab <- gsub(".*\\$", "", deparse(substitute(x1)))
if (missing(x2lab)) x2lab <- gsub(".*\\$", "", deparse(substitute(x2)))
if (missing(ylab)) ylab <- gsub(".*\\$", "", deparse(substitute(y)))
res <- perspEmpty(x1 = plotSeq(x1range, 5),
x2 = plotSeq(x2range,5),
y = yrange, x1lab = x1lab,
x2lab = x2lab, ylab = ylab, ...)
yMinimum <- rep(yrange[1] , length(x1))
mypoints1 <- trans3d(x1, x2, yMinimum, pmat = res)
points( mypoints1, pch = 16, col = "blue")
invisible(res)
}
NULL
##' mcGraph2 draws a 3-D representation of a scatterplot with shadows
##' in the x1-x2 plane. The observations are represented by blue
##' points floating above the x1-x2 plane. If scaley=1, the end result
##' is a scatterplot "cloud" of the y points above the x1-x2 plane,
##' and gray shadows of the points are cast down from the cloud onto
##' the x1-x2 plane itself. This uses persp to make the actual
##' drawing.
##'
##' @param rescaley a single scalar value or a vector of the same
##' length as y.
##' @export
##' @rdname mcGraph
##' @examples
##' set.seed(12345)
##' ## Create data with x1 and x2 correlated at 0.10
##' dat <- genCorrelatedData(rho=.1, stde=7)
##' ## This will "grow" the "cloud" of points up from the
##' ## x1-x2 axis
##' mcGraph2(dat$x1, dat$x2, dat$y, rescaley = 0.0, theta = 0)
##' mcGraph2(dat$x1, dat$x2, dat$y, rescaley = 0.1, theta = 0)
##' mcGraph2(dat$x1, dat$x2, dat$y, rescaley = 0.2, theta = 0)
##' mcGraph2(dat$x1, dat$x2, dat$y, rescaley = 0.3, theta = 0)
##' mcGraph2(dat$x1, dat$x2, dat$y, rescaley = 0.4, theta = 0)
##' mcGraph2(dat$x1, dat$x2, dat$y, rescaley = 0.5, theta = 0)
##' mcGraph2(dat$x1, dat$x2, dat$y, rescaley = 0.6, theta = 0)
##' mcGraph2(dat$x1, dat$x2, dat$y, rescaley = 0.7, theta = 0)
##' mcGraph2(dat$x1, dat$x2, dat$y, rescaley = 0.8, theta = 0)
##' mcGraph2(dat$x1, dat$x2, dat$y, rescaley = 0.9, theta = 0)
##' mcGraph2(dat$x1, dat$x2, dat$y, rescaley = 1, theta = 0)
##'
##' ##rotate this
##' mcGraph2(dat$x1, dat$x2, dat$y, rescaley = 1, theta = 20)
##' mcGraph2(dat$x1, dat$x2, dat$y, rescaley = 1, theta = 40)
##' mcGraph2(dat$x1, dat$x2, dat$y, rescaley = 1, theta = 60)
##' mcGraph2(dat$x1, dat$x2, dat$y, rescaley = 1, theta = 80)
##'
##' ## once they reach the top, make them glitter a while
##' for(i in 1:20){
##' mcGraph2(dat$x1, dat$x2, dat$y, rescaley = runif(length(dat$x1), .9,1.1), theta = 0)
##' }
##'
mcGraph2 <- function(x1, x2, y, rescaley = 1, drawArrows = TRUE, x1lab, x2lab, ylab, ...){
x1range <- magRange(x1, 1.25)
x2range <- magRange(x2, 1.25)
yrange <- magRange(y, 1.5)
if (missing(x1lab)) x1lab <- gsub(".*\\$", "", deparse(substitute(x1)))
if (missing(x2lab)) x2lab <- gsub(".*\\$", "", deparse(substitute(x2)))
if (missing(ylab)) ylab <- gsub(".*\\$", "", deparse(substitute(y)))
res <- perspEmpty(x1 = plotSeq(x1range,5), x2 = plotSeq(x2range,5), y = yrange, x1lab = x1lab, x2lab = x2lab, ylab = ylab, ...)
mypoints1 <- trans3d ( x1, x2 ,yrange[1], pmat = res )
newy <- rescaley * (y - yrange[1]) + yrange[1]
mypoints2 <- trans3d ( x1 , x2 , newy , pmat = res )
points( mypoints1, pch = 16, col=gray(0.8))
points( mypoints2, pch = 1, col= "blue")
mypoints2s <- trans3d(x1, x2, (0.8)*newy, pmat = res)
if (drawArrows) arrows(mypoints1$x , mypoints1$y , mypoints2s$x , mypoints2s$y , col="red" , lty = 2, lwd = 0.3, length = 0.1)
invisible(res)
}
NULL
##' With mcGraph3, the observations are scattered in 3-dimensions, the
##' fitted values are represented by a mesh, and their shadows in the
##' x1-x2 plane are also represented.
##'
##' @param interaction a TRUE or FALSE request for inclusion of the x1-x2 interaction in the regression calculation
##' @param drawArrows TRUE or FALSE, do you want arrows from the plane to observed y?
##' @return mcGraph3 returns a list of 2 objects. 1) the fitted
##' regression model2) the perspective matrix used with persp to draw
##' the image.
##' @export
##' @rdname mcGraph
##' @examples
##' set.seed(12345)
##' ## Create data with x1 and x2 correlated at 0.10
##' dat <- genCorrelatedData(rho=.1, stde=7)
##'
##' mcGraph3(dat$x1, dat$x2, dat$y, theta = 0)
##'
##' dat2 <- genCorrelatedData(rho = 0, stde = 7)
##'
##' mcGraph3(dat2$x1, dat2$x2, dat2$y, theta = 0, phi = 10)
##' mcGraph3(dat2$x1, dat2$x2, dat2$y, theta = 30, phi = 10)
##' mcGraph3(dat2$x1, dat2$x2, dat2$y, theta = -30, phi = 10)
##' mcGraph3(dat2$x1, dat2$x2, dat2$y, theta = -30, phi = -10)
##' mcGraph3(dat2$x1, dat2$x2, dat2$y, theta = -30, phi = -15)
##'
##' ## Run regressions with not-strongly correlated data
##' modset1 <- list()
##' for(i in 1:20){
##' dat2 <- genCorrelatedData(rho = .1, stde = 7)
##' summary(lm( y ~ x1 + x2 , data = dat2))
##' modset1[[i]] <- mcGraph3(dat2$x1, dat2$x2, dat2$y, theta = -30)
##' }
##'
##'
##' ## Run regressions with strongly correlated data
##' modset2 <- list()
##' for(i in 1:20){
##' dat2 <- genCorrelatedData(rho = .981, stde = 7)
##' summary(lm( y ~ x1 + x2 , data = dat2))
##' modset2[[i]] <- mcGraph3(dat2$x1, dat2$x2, dat2$y, theta = -30)
##' }
##'
##' dat3 <- genCorrelatedData(rho = .981, stde = 100, beta=c(0.1, 0.2, 0.3, -0.1))
##' mcGraph3(dat3$x1, dat3$x2, dat3$y, theta=-10, interaction = TRUE)
mcGraph3 <- function(x1, x2, y, interaction = FALSE, drawArrows = TRUE, x1lab, x2lab, ylab, ...){
x1range <- magRange(x1, 1.25)
x2range <- magRange(x2, 1.25)
yrange <- magRange(y, 1.5)
if (missing(x1lab)) x1lab <- gsub(".*\\$", "", deparse(substitute(x1)))
if (missing(x2lab)) x2lab <- gsub(".*\\$", "", deparse(substitute(x2)))
if (missing(ylab)) ylab <- gsub(".*\\$", "", deparse(substitute(y)))
res <- perspEmpty(x1 = plotSeq(x1range, 5), x2 = plotSeq(x2range, 5), y = yrange, x1lab = x1lab, x2lab = x2lab, ylab = ylab, ...)
mypoints1 <- trans3d( x1, x2, yrange[1], pmat = res )
points(mypoints1, pch = 16, col = gray(0.8))
mypoints2 <- trans3d( x1, x2, y, pmat = res )
points(mypoints2, pch = 1, col = "blue")
if (interaction) m1 <- lm(y ~ x1 * x2)
else m1 <- lm(y ~ x1 + x2)
x1seq <- plotSeq (x1range, length.out = 20)
x2seq <- plotSeq (x2range, length.out = 20)
zplane <- outer (x1seq, x2seq, function(a, b) { predict(m1,
newdata = data.frame(x1 = a, x2 = b))})
for( i in 1:length(x1seq) ){
lines(trans3d(x1seq[i], x2seq, zplane[i,], pmat = res), lwd = 0.3)
}
for( j in 1:length(x2seq) ){
lines(trans3d(x1seq, x2seq[j], zplane[,j], pmat = res), lwd = 0.3)
}
mypoints4 <- trans3d (x1 , x2 , fitted(m1) , pmat = res)
## points(mypoints4)
newy <- ifelse(fitted(m1) < y, fitted(m1)+ 0.8*(y-fitted(m1)),
fitted(m1) + 0.8 * (y-fitted(m1)))
mypoints2s <- trans3d(x1, x2, newy, pmat = res)
if (drawArrows) arrows(mypoints4$x , mypoints4$y , mypoints2s$x , mypoints2s$y , col = "red" , lty = 4, lwd = 0.3, length = 0.1)
invisible(list(lm = m1, res = res))
}
NULL
|
bfc1257730526399458d6fe8c5fe75b9130dbf5d
|
5d690f159266b2c0f163e26fcfb9f9e17a0dc541
|
/rLiDAR/man/LASmetrics.Rd
|
350fcfb2511122eefce1c4abd879060ce402c240
|
[] |
no_license
|
albrizre/spatstat.revdep
|
3a83ab87085895712d7109c813dcc8acb55493e9
|
b6fc1e73985b0b7ed57d21cbebb9ca4627183108
|
refs/heads/main
| 2023-03-05T14:47:16.628700
| 2021-02-20T01:05:54
| 2021-02-20T01:05:54
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 5,734
|
rd
|
LASmetrics.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/LASmetrics.r
\name{LASmetrics}
\alias{LASmetrics}
\title{LiDAR-derived metrics}
\usage{
LASmetrics(LASfile, minht, above)
}
\arguments{
\item{LASfile}{A LAS standard LiDAR data file}
\item{minht}{Use only returns above specified height break, e.g. 1.30 m. Default is 1.37 m.}
\item{above}{Compute covers metrics using specified height break, e.g. 2.5 m. Default is 2 m.}
}
\value{
Returns A matrix with the LiDAR-derived vegetation height and canopy cover metrics (see \emph{cloudmetrics}, in McGaughey, 2014)
}
\description{
Compute LiDAR metrics that describe statistically the Lidar dataset
}
\examples{
#=======================================================================#
# Example 01: Computing LiDAR metrics for a single LAS file
#=======================================================================#
# Import the LAS data file
LASfile <- system.file("extdata", "LASexample1.las", package="rLiDAR")
# Set the minht and above parameters
minht<-1.37 # meters or feet
above<-2.00 # meters or feet
# LiDAR metrics computation
LiDARmetrics<-LASmetrics(LASfile, minht, above)
#==========================================================================#
# Example 02: Computing Lidar metrics for multiple LAS files within a folder
#==========================================================================#
# Set folder where LAS source files reside
folder=dirname(LASfile)
# Get list of LAS files residing in the folder
LASlist <- list.files(folder, pattern="*.las", full.names=TRUE)
# Set the "minht" and "above" parameters
minht<-1.37 # meters or feet
above<-2.00 # meters or feet
# Creat an empty dataframe in whic to store the LiDAR metrics
getMetrics<-data.frame()
# Set a loop to compute the LiDAR metrics
for ( i in LASlist) {
getMetrics<-rbind(getMetrics, LASmetrics(i, minht, above))}
# Table of the Lidar metrics
LiDARmetrics<-cbind(Files=c(basename(LASlist)), getMetrics)
head(LiDARmetrics)
}
\seealso{
McGaughey, R. 2014. FUSION/LDV: Software for lidar data analysis and visualization. Version 3.41. Seattle, WA: U.S. Department of Agriculture, Forest Service, Pacific Northwest Research Station.
# List of the LiDAR-derived metrics:
\itemize{
\item Total all return count
\item Total first return count
\item Total all return count above \emph{minht}
\item Return 1 count above \emph{minht}
\item Return 2 count above \emph{minht}
\item Return 3 count above \emph{minht}
\item Return 5 count above \emph{minht}
\item Return 6 count above \emph{minht}
\item Return 7 count above \emph{minht}
\item Return 8 count above \emph{minht}
\item Return 9 count above \emph{minht}
\item HMIN - Maximum Height
\item HMAX - Maximum Height
\item HMEAN - Mean height
\item HMOD - Modal height
\item HMEDIAN - Median height
\item HSD - Standard deviation of heights
\item HVAR - Variance of heights
\item HCV - Coefficient of variation of heights
\item HKUR - Kurtosis of Heights
\item HSKE - Skewness of Heights
\item H01TH - 01th percentile of height
\item H05TH - 05th percentile of height
\item H10TH - 10th percentile of height
\item H15TH - 15th percentile of height
\item H20TH - 20th percentile of height
\item H25TH - 25th percentile of height
\item H30TH - 30th percentile of height
\item H35TH - 35th percentile of height
\item H40TH - 40th percentile of height
\item H45TH - 45th percentile of height
\item H50TH - 50th percentile of height
\item H55TH - 55th percentile of height
\item H60TH - 60th percentile of height
\item H65TH - 65th percentile of height
\item H70TH - 70th percentile of height
\item H75TH - 75th percentile of height
\item H80TH - 80th percentile of height
\item H90TH - 90th percentile of height
\item H95TH - 95th percentile of height
\item H99TH - 99th percentile of height
\item CRR - Canopy relief ratio
\item IMIN - Minimum intensity
\item IMAX - Maximum intensity
\item IMEAN - Mean intensity
\item IMOD - Modal intensity
\item IMEDIAN - Median intensity
\item ISD - Standard deviation of intensities
\item IVAR - Variance of heights
\item ICV - Coefficient of variation of intensities
\item IKUR - Kurtosis of intensities
\item ISKE - Skewness of intensities
\item I01TH - 1th percentile of intensity
\item I05TH - 5th percentile of intensity
\item I10TH - 10th percentile of intensity
\item I15TH - 15th percentile of intensity
\item I20TH - 20th percentile of intensity
\item I25TH - 25th percentile of intensity
\item I30TH - 30th percentile of intensity
\item I35TH - 35th percentile of intensity
\item I40TH - 40th percentile of intensity
\item I45TH - 45th percentile of intensity
\item I50TH - 50th percentile of intensity
\item I55TH - 55th percentile of intensity
\item I60TH - 60th percentile of intensity
\item I65TH - 65th percentile of intensity
\item I70TH - 70th percentile of intensity
\item I75TH - 75th percentile of intensity
\item I80TH - 80th percentile of intensity
\item I90TH - 90th percentile of intensity
\item I95TH - 95th percentile of intensity
\item I99TH - 99th percentile of intensity
\item Pentage first returns above \emph{above}
\item Percentage all returns above \emph{above}
\item (All returns above above / Total first returns)*100
\item First returns above \emph{above}
\item All returns above \emph{above}
\item Percentage first returns above mean
\item Percentage first returns above mode
\item Percentage all returns above mean
\item Percentage all returns above mode
\item (All returns above mean / Total first returns)*100
\item (All returns above mode / Total first returns)* 100
\item First returns above mean"
\item First returns above mode
\item All returns above mean
\item All returns above mode
}
}
\author{
Carlos Alberto Silva
}
|
8eeb131a3019e6b89fc4cbfb5370510a1269a4eb
|
98ef4dbe50ff5df8de97f58152d7fc1b5065d795
|
/R/cTotal.R
|
201f3c08669303ec3ebf8659d283b90bf021ed10
|
[] |
no_license
|
dmarcondes/rugbypackage
|
85ab7a27127d2fb1a2cf29a283f2cb00392ef519
|
ab0d3542ea1cea33eafa95ece2c5cd645f601d4a
|
refs/heads/master
| 2021-01-20T07:51:08.458615
| 2017-08-22T10:29:17
| 2017-08-22T10:29:17
| 18,717,669
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 70
|
r
|
cTotal.R
|
total <- function(){
total <- sum(Game[,14])
print(total)
}
|
9f013b162e5bee2729790747caa5aea85e57c950
|
e6bfd2c5d9db1f6ccde6f8116398c0bf28cca16e
|
/R/Poly_Gibbs_GammaTrace.R
|
6dc262a6499710a446bc8b21dbe68feff3b28ede
|
[] |
no_license
|
zovialpapai/PolyGibbs
|
5fccafef33d5577d05d19f1fd68f5e5c2d256c9b
|
ce86cbe1b9eee93c999a6f4f342b6280fd72c8ee
|
refs/heads/master
| 2020-09-02T14:30:14.936587
| 2019-12-06T07:46:50
| 2019-12-06T07:46:50
| 219,241,547
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,186
|
r
|
Poly_Gibbs_GammaTrace.R
|
#' Plots for Diagnosis of Convergence Distribution .
#'
#' \code{Multinom_traceplot_gamma} Plots for diagnosis of Parameters estimates by Ordered Multinomial Regression via data augmentation and Gibbs sampling.
#' @import MASS
#' @import truncnorm
#' @import graphics
#' @import stats
#' @import mvtnorm
#'
#' @param gamma_update a nIter X (K+1) matrix of beta_updates staring with -Inf and ending with + Inf, sorted in each row
#' @param k a integer not greater than (p) indicating which beta is of interest.
#'
#' @return \code{traceplot} Line diagrams showing convergence of gibbs sampler for a parameter and indicating cumulative posterior mean over iterartions.
#'
#' @export
#'
#'
#' @examples # Initialization
#' @examples set.seed(250)
#' @examples n <- 1000 # Total no of observations.
#' @examples int1 <- -1 # gamma boundary
#' @examples int2 <- 3 # gamma boundary
#' @examples beta <- c(-.75, 1) # Regression Parameters for data generation.
#' @examples X <- cbind(sample(1:4, n, replace = TRUE), rnorm(n, 0, 2)) # Generated design matrix
#' @examples # Generation of Latent Variable Observations
#' @examples eta <- X %*% beta
#' @examples z <- rnorm(n, eta, 1)
#' @examples # Generation of Responses depending on z
#' @examples y <- rep(0, n)
#' @examples y[z <= int1] <- 1
#' @examples y[int1 <z & z <= int2] <- 2
#' @examples y[int2 < z ] <- 3
#' @examples #Spliting The Data in Train and Test in 80:20 ratio
#' @examples Train_ID = sample(1:nrow(X), round(nrow(X) * 0.8), replace = FALSE) # Train Data IDS
#' @examples Train_X = X[Train_ID, ]# Train Data Covariates
#' @examples Test_X = X[-Train_ID, ]
#' @examples Train_Y = y[Train_ID] # Train Data Response
#' @examples Test_Y = y[-Train_ID] # Test Data Response
#' @examples K = 3
#' @examples k = 1
#' @examples nIter = 10000
#' @examples burn_in = 5000
#' @examples breaks = 50
#' @examples Result = MultinomGibbs_fit(Train_X, Train_Y, nIter, burn_in, K)
#' @examples gamma_update = Result$gamma_update
#' @examples Multinom_traceplot_gamma(gamma_update = gamma_update , k = 2)
Multinom_traceplot_gamma <- function(gamma_update, k){
# Checking conformibility of gamma_update
if(sum(gamma_update[ ,1] == (-Inf)) != nrow(gamma_update)){
stop("1st column of gamma_update should be all -Inf")
}
if(sum(gamma_update[ ,ncol(gamma_update)] == (Inf)) != nrow(gamma_update)){#*
stop("last column of gamma_update should be all Inf")
}
if(sum(apply(gamma_update[ ,-c(1,ncol(gamma_update))], 1, function(t){is.unsorted(t) })) != 0){
stop(" The rows of gamma_update must be sorted")
}
# checking validity of k
#if( k %in% (0:(ncol(gamma_update) - 1)) == FALSE){#*
if( k %in% (2:((ncol(gamma_update) - 1 ))) == FALSE){
stop(" plot can be generated if k is in 2:((ncol(gamma_update) - 1 ))) ")
}
# # Warning on no of breaks
# if( breaks > (nrow(gamma_update) - burn_in)){
# warning(" no. of breaks too large")
# }
# Drawing the Traceplot#*
plot(gamma_update[ ,(k)], type = "l", xlab = paste(" values of gamma_", k) , main = paste("Traceplot of gamma_", k))
# Adding the cumulating mean
lines(cumsum(gamma_update[ , (k)])/(1:nrow(gamma_update)), col = "red", lwd = 2)
}
|
1df0ea60504d3a6827db87b2ceefd06152940fff
|
8865d5b376c757f009935b581638efb905154687
|
/Linear Regressions on Twitter Data.R
|
978e05696734d52ed30b6fce694df5a7834b02ba
|
[] |
no_license
|
meganbaird/My-Projects
|
e2913c5b81530e33c4d5156a4ff0efff5c504751
|
a8c3ceb8b2507eaca1ac26a7e9032bff0a026e66
|
refs/heads/master
| 2020-06-30T21:01:14.222264
| 2019-08-07T01:45:19
| 2019-08-07T01:45:19
| 200,951,094
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,865
|
r
|
Linear Regressions on Twitter Data.R
|
setwd("C:/Users/bairdm2/Downloads")
getwd()
list.files()
tweets <- read.csv("xg2.csv")
#################################FAVORITE COUNT#############################
tweets_fav<-lm(favorite_count~source+verified+xxa+xxe+xxf+xxh+xxi+xxl+xxfe+
xxma+xxn+xxp+xxq+xxu+xxw+xdtw,
data=tweets)
summary(tweets_fav)
#remove xxma
tweets_fav<-lm(favorite_count~source+verified+xxa+xxe+xxf+xxh+xxi+xxl+xxfe+
xxn+xxp+xxq+xxu+xxw+xdtw,
data=tweets)
summary(tweets_fav)
#remove xxe
tweets_fav<-lm(favorite_count~source+verified+xxa+xxf+xxh+xxi+xxl+xxfe+
xxn+xxp+xxq+xxu+xxw+xdtw,
data=tweets)
summary(tweets_fav)
#remove xxi
tweets_fav<-lm(favorite_count~source+verified+xxa+xxf+xxh+xxl+xxfe+xxn+xxp
+xxq+xxu+xxw+xdtw,
data=tweets)
summary(tweets_fav)
#remove xxf
tweets_fav<-lm(favorite_count~source+verified+xxa+xxh+xxl+xxfe+xxn+xxp
+xxq+xxu+xxw+xdtw,
data=tweets)
summary(tweets_fav)
#remove xxa
tweets_fav<-lm(favorite_count~source+verified+xxh+xxl+xxfe+xxn+xxp
+xxq+xxu+xxw+xdtw,
data=tweets)
summary(tweets_fav)
#remove xxq
tweets_fav<-lm(favorite_count~source+verified+xxh+xxl+xxfe+xxn+xxp+xxu+xxw+
xdtw,
data=tweets)
summary(tweets_fav)
##besides sourceOther, this is our final model for finding a high fav count
#Coefficients:
# Estimate Std. Error t value Pr(>|t|)
# (Intercept) 12800.880 905.180 14.142 < 2e-16 ***
# sourceMobile 5089.640 664.985 7.654 2.05e-14 ***
# sourceOther -865.967 894.606 -0.968 0.333064
# verifiedTRUE 2565.991 518.322 4.951 7.47e-07 ***
# xxh -1924.019 505.221 -3.808 0.000140 *** http
# xxl -37.260 7.044 -5.290 1.24e-07 *** length
# xxfe 2150.400 476.886 4.509 6.55e-06 *** female
# xxn -350.183 153.894 -2.275 0.022889 * count of numbers
# xxp -1871.986 557.570 -3.357 0.000788 *** politics
# xxu -105.665 19.473 -5.426 5.83e-08 *** upper case
# xxw 298.599 38.423 7.771 8.18e-15 *** white space
# xdtwB 10455.416 642.281 16.279 < 2e-16 *** retweet count B
# xdtwC 23735.201 775.313 30.614 < 2e-16 *** retweet count C
# xdtwD 44979.785 818.577 54.949 < 2e-16 *** retweet count D
# xdtwE 84165.166 892.459 94.307 < 2e-16 *** retweet count E
# xdtwF 154859.152 1093.385 141.633 < 2e-16 *** retweet count F
# xdtwG 254356.602 1475.831 172.348 < 2e-16 *** retweet count G
######################################RETWEET COUNT#########################
tweets_ret<-lm(retweet_count~source+verified+xxa+xxe+xxf+xxh+xxi+xxl+xxfe+
xxma+xxn+xxp+xxq+xxu+xxw,
data=tweets)
summary(tweets_ret)
#remove xxfe
tweets_ret<-lm(retweet_count~source+verified+xxa+xxe+xxf+xxh+xxi+xxl+xxma
+xxn+xxp+xxq+xxu+xxw,
data=tweets)
summary(tweets_ret)
#remove xxq
tweets_ret<-lm(retweet_count~source+verified+xxa+xxe+xxf+xxh+xxi+xxl+xxma
+xxn+xxp+xxu+xxw,
data=tweets)
summary(tweets_ret)
#remove xxf
tweets_ret<-lm(retweet_count~source+verified+xxa+xxe+xxh+xxi+xxl+xxma
+xxn+xxp+xxu+xxw,
data=tweets)
summary(tweets_ret)
#remove xxn
tweets_ret<-lm(retweet_count~source+verified+xxa+xxe+xxh+xxi+xxl+xxma
+xxp+xxu+xxw,
data=tweets)
summary(tweets_ret)
#remove xxma
tweets_ret<-lm(retweet_count~source+verified+xxa+xxe+xxh+xxi+xxl
+xxp+xxu+xxw,
data=tweets)
summary(tweets_ret)
#remove xxi
tweets_ret<-lm(retweet_count~source+verified+xxa+xxe+xxh+xxl+xxp+xxu+xxw,
data=tweets)
summary(tweets_ret)
##besides sourceOther, this is the function to use to get a high retweet
#count
#Coefficients:
# Estimate Std. Error t value Pr(>|t|)
#(Intercept) 18895.02 513.75 36.779 < 2e-16 ***
# sourceMobile 3731.48 399.33 9.344 < 2e-16 ***
# sourceOther -901.90 539.46 -1.672 0.094571 .
# verifiedTRUE -5073.61 311.64 -16.280 < 2e-16 ***
# xxa 45.38 14.86 3.055 0.002255 ** non word char
# xxe -407.52 169.22 -2.408 0.016036 * exclamations
# xxh 1400.06 281.17 4.979 6.43e-07 *** counts of http
# xxl -42.95 4.73 -9.080 < 2e-16 *** length
# xxp -1511.09 336.92 -4.485 7.34e-06 *** count politics
# xxu -32.66 12.15 -2.687 0.007227 ** count of upper case
# xxw 83.28 24.36 3.418 0.000631 *** white space
############################################################################
############################################################################
############################################################################
##Unsupervised Learning
###########Rule Association Learning############
install.packages("arules")
library(arules)
install.packages("arulesViz")
library(arulesViz)
library(stringr)
tweets$text<-as.character(tweets$text)
#Get some information on the number of words used in each tweet
#sapply means you give it a vector and we applied a vector to
#each variable. split up words by spaces. length gives us length
#of words
word_count<-as.numeric(
sapply(
as.character(tweets$text), function(x){
length(strsplit(x," ")[[1]])
}
)
)
#Lets take a look
hist(word_count)
summary(word_count)
#Prepare for market basket analysis
words<-as.character(
sapply(
tweets$text, function(x){
str_replace_all(x, " ", ",")
}
)
)
write.csv(words,"xg2.csv",quote=FALSE,row.names=TRUE)
#Now, lets run an algorithm to do the analysis and extract the rules
tr<-read.transactions("xg2.csv",format="basket",sep=",")
#minimum amount of support and confidence that we want for our rules
#rules within 1% support and 50% confidence
rules<-apriori(tr,parameter=list(supp=0.01,conf=.8))
topRules<-rules[1:100]
inspect(topRules)
#Get some plots of the association rules
####look into why error message is popping up. says arulesViz package
####needs to be installed but it is installed and still won't run
plot(topRules)
plot(topRules,method="graph")
plot(topRules,method="grouped")
##################Clustering###################
rtclusters<-kmeans(tweets$retweet_count,3)
hist(tweets$retweet_count,breaks = 10,xlim=c(0,2000))
summary(tweets$retweet_count)
tweets$retweet_count[15]
category<-rtclusters$cluster
tweets<-data.frame(tweets,rtCategory=as.factor(category))
##as.factor is necessary since it is a categorical variable, not numeric
#Supervised Learning
#Regression Problems
library(lexicon)
library(sentimentr)
library(parallel)
library(caret)
#lexicon is a series of words with numerical values stored to them
lexicons <- list(hash_sentiment_huliu,
hash_sentiment_jockers,
hash_sentiment_jockers_rinker,
hash_sentiment_loughran_mcdonald,
hash_sentiment_nrc,
hash_sentiment_senticnet,
hash_sentiment_sentiword,
hash_sentiment_slangsd,
hash_sentiment_socal_google)
theText<-as.character(tweets$text)
theLexicon<-lexicons[[2]]
#Single-Core Processing
#textSentiments<-sapply(theText,function(i){sum(sentiment)})
#Parallel Processing
clust<-makeCluster(detectCores())
clusterEvalQ(clust,library(sentimentr))
clusterExport(clust,"theText")
clusterExport(clust,"theLexicon")
textSentiments<-parSapply(clust,1:length(theText),
function(x){
sum(sentiment(theText[x],polarity_dt=theLexicon)$sentiment)
})
stopCluster(clust)
tweets<-data.frame(tweets,textSentiments)
#Basic Regression:
theFormula<-retweet_count~textSentiments
olsModel<-lm(theFormula,data=tweets)
summary(olsModel)
residuals<-olsModel$residuals
#poisson has mean = variance
poisModel<-glm(theFormula,family="poisson",data=tweets)
summary(poisModel)
BIC(olsModel)
BIC(poisModel)
####Our OLS model is the better of the 2 models
#Classification Problem with Naive Bayes
library(e1071)
classFormula<-rtCategory~textSentiments+favorite_count
nbc<-naiveBayes(classFormula,data=tweets)
testTweet<-tweets[1,]
predict(nbc,testTweet)
###########################Clustering#######################
##############Flat Clustering Approaches##########
#K-Mediod Clustering
install.packages("kmed")
library(kmed)
#With Numerical Variables
tweet_nums <- tweets[,c("retweet_count","favorite_count")]
tweet_nums <- scale(tweet_nums)
#distNumeric will compute the distance matrix
dist_m<-distNumeric(tweet_nums,tweet_nums)
tweet_cluster<-fastkmed(dist_m,5)
the_cluster<-tweet_cluster$cluster
plot(tweet_nums[,1],tweet_nums[,2],col=the_cluster,xlim=c(0,1),
ylim=c(0,1))
#With Categorical Variables
tweet_cat<-as.matrix(tweets[,c("source")])
##originally had "is_quote" in vector, needed to remove since it is not in xg2
tweet_dist<-matching(tweet_cat,tweet_cat)
cooccur(tweet_cat)
########Hierarchical Clustering Approaches#######
#Bottom-Up Approach
#First let's prepare the numerical data we would like to use
tweet_nums<-tweets[sample(1:nrow(tweets),50),c("retweet_count",
"favorite_count")]
tweet_dist<-dist(tweet_nums)
fit<-hclust(tweet_dist)
plot(fit)
clust<-cutree(fit,k=5)
plot(tweet_nums$retweet_count,tweet_nums$favorite_count,col=clust,
xlim=c(0,100),ylim=c(0,500))
|
3c26da14dc1139fdd14f8f00860e1ef072bd4b32
|
fd99be475e4227add55ead15cf1e1829316517ce
|
/scripts/blast_and_align.R
|
2e77db29635db715a0e20ce59cd5989beb95415b
|
[
"MIT"
] |
permissive
|
AndersenLab/20191001_Hawaii
|
38f2d1d4780722008c7d5938b29d1f9641daf82f
|
2655506efc4518da252faa98f7578c22d51b9e4f
|
refs/heads/master
| 2022-06-27T23:35:56.221121
| 2020-01-08T17:15:17
| 2020-01-08T17:15:17
| 221,272,355
| 0
| 0
|
MIT
| 2022-06-27T16:56:09
| 2019-11-12T17:19:35
|
HTML
|
UTF-8
|
R
| false
| false
| 2,113
|
r
|
blast_and_align.R
|
# install.packages("BiocManager")
library(BiocManager)
# install(c("sangerseqR","annotate","genbankr"))
# BiocManager::install(c("DECIPHER", "Biostrings", "sangerseqR"))
library(devtools)
# install_github("roblanf/sangeranalyseR")
library(sangerseqR)
library(sangeranalyseR)
library(tidyverse)
# install.packages("microclass")
library(annotate)
# set working directory
setwd(glue::glue("{dirname(rstudioapi::getActiveDocumentContext()$path)}/.."))
# make input folder
input.folders = c("data/sanger/raw/")
# make sanger summary for all the .ab1 files. Takes a long time!
sf = summarise.abi.folder(input.folders, processors = 2)
seq_summary <- sf$summaries
# reading about sf reads structure https://colauttilab.github.io/DNAbarcodes.html
test <- sf$reads[1]
# plot mean qualities summary
raw_mean_qual <- ggplot(sf$summaries) +
geom_histogram(aes(x = raw.mean.quality), bins = nrow(seq_summary)/3, fill = "blue", alpha = 0.5) +
geom_histogram(aes(x = trimmed.mean.quality), bins = nrow(seq_summary)/3, fill = "red", alpha = 0.5) +
xlim(0,60) +
theme_bw()
raw_mean_qual
######################
# Trying different method https://colauttilab.github.io/DNAbarcodes.html
ITS<-read.abif("data/ssu_pcr_1/S-0415_oECA1271.ab1") # Read
ITSseq <- sangerseq(ITS) # Extract
SeqX<-makeBaseCalls(ITSseq) # Call
SeqXBlastDF<-blastSequences(paste(SeqX@primarySeq),as='data.frame', hitListSize = 3, timeout = 3000)
#####################################
### loop through test directory ###
#####################################
# establish test file list
test_file_list <- list.files(test.input.folder)
# make dataframe to hold loop output
test_seq_blast_df <- NULL
# write loop to process .ab1 files
for(i in unique(test_file_list)){
ITS<-read.abif(glue::glue("{test.input.folder}{i}")) # Read
ITSseq <- sangerseq(ITS) # Extract
SeqX<-makeBaseCalls(ITSseq) # Call
SeqXBlastDF<-blastSequences(paste(SeqX@primarySeq),as='data.frame', hitListSize = 3, timeout = 10000) # blast
test_seq_blast_df <- rbind(test_seq_blast_df, SeqXBlastDF) # bind blast data into dataframe named test_seq_blast_df
}
|
6fe9cbc5244f0c2947089277221c9f06dcff084a
|
9aafde089eb3d8bba05aec912e61fbd9fb84bd49
|
/codeml_files/newick_trees_processed/81_1/rinput.R
|
91dc412a06dc01db929954f9c1c7aa4f20705428
|
[] |
no_license
|
DaniBoo/cyanobacteria_project
|
6a816bb0ccf285842b61bfd3612c176f5877a1fb
|
be08ff723284b0c38f9c758d3e250c664bbfbf3b
|
refs/heads/master
| 2021-01-25T05:28:00.686474
| 2013-03-23T15:09:39
| 2013-03-23T15:09:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 131
|
r
|
rinput.R
|
library(ape)
testtree <- read.tree("81_1.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="81_1_unrooted.txt")
|
a97710865480cf102f464f50b56bb0adb8323faf
|
922aa270fa30066044e7ae475f31e4426b59cfac
|
/man/similarity_metrics.Rd
|
086121fdf854c011c2cb8d5658388c10e7dc7fdf
|
[] |
permissive
|
jakobbossek/mcMST
|
361a3708a3413126fbfe61f6ae930e3ee326356b
|
4d5a18dfb79a9949c99fadf3a93c6f0f44b0cba3
|
refs/heads/master
| 2023-03-16T12:54:59.937066
| 2023-03-13T18:49:28
| 2023-03-13T18:49:28
| 96,212,733
| 2
| 3
|
BSD-2-Clause
| 2019-10-16T11:48:01
| 2017-07-04T11:51:54
|
R
|
UTF-8
|
R
| false
| true
| 1,623
|
rd
|
similarity_metrics.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/metrics.R
\name{similarity_metrics}
\alias{similarity_metrics}
\alias{getNumberOfCommonEdges}
\alias{getSizeOfLargestCommonSubtree}
\title{Metrics for spanning tree comparisson.}
\usage{
getNumberOfCommonEdges(x, y, n = NULL, normalize = TRUE)
getSizeOfLargestCommonSubtree(x, y, n = NULL, normalize = TRUE)
}
\arguments{
\item{x}{[\code{matrix(2, n)}]\cr
First spanning tree represented as a list of edges.}
\item{y}{[\code{matrix(2, n)}]\cr
Second spanning tree represented as a list of edges.}
\item{n}{[\code{integer(1)} | \code{NULL}]\cr
Number of nodes of the graph.
Defaults to \code{length(x)}.}
\item{normalize}{[\code{logical(1)}]\cr
Should measure be normalized to \eqn{[0, 1]} by devision
through the number of edges?
Default is \code{TRUE}.}
}
\value{
[\code{numeric(1)}] Measure
}
\description{
Functions which expect two (spanning) trees and return a measure
of similiarity between those. Function \code{getNumberOfCommonEdges} returns
the (normalized) number of shared edges and function \code{getSizeOfLargestCommonSubtree}
returns the (normalized) size of the largest connected subtree which is located in
both trees.
}
\examples{
# Here we generate two random spanning trees of a complete
# graph with 10 nodes
set.seed(1)
st1 = prueferToEdgeList(sample(1:10, size = 8, replace = TRUE))
st2 = prueferToEdgeList(sample(1:10, size = 8, replace = TRUE))
# Now check the number of common edges
NCE = getNumberOfCommonEdges(st1, st2)
# And the size of the largest common subtree
SLS = getSizeOfLargestCommonSubtree(st1, st2)
}
|
48d4d634c583e83b7a941c1f947f1f9d5b860e1c
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/COUNT/examples/affairs.Rd.R
|
df40f3207b11fd42c8b69469d0d62102316cdd9d
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 460
|
r
|
affairs.Rd.R
|
library(COUNT)
### Name: affairs
### Title: affairs
### Aliases: affairs
### Keywords: datasets
### ** Examples
data(affairs)
glmaffp <- glm(naffairs ~ kids + yrsmarr2 + yrsmarr3 + yrsmarr4 + yrsmarr5,
family = poisson, data = affairs)
summary(glmaffp)
exp(coef(glmaffp))
require(MASS)
glmaffnb <- glm.nb(naffairs ~ kids + yrsmarr2 + yrsmarr3 + yrsmarr4 + yrsmarr5,
data=affairs)
summary(glmaffnb)
exp(coef(glmaffnb))
|
1a4ab43cf5a84621ccd38fbd73bb9896b50a530e
|
b739104b55e758ab8d0655b7f02ee46604498ece
|
/sir_sim_corr.R
|
75af931aa2a60b47447cfc8ffc623988d11678c3
|
[] |
no_license
|
parksw3/serial
|
a7b5601fb444714bfcc9e392e5877a80a93c8a85
|
1c22d26e912cc207f9375eebbacc33e74e89e567
|
refs/heads/master
| 2023-01-28T03:59:11.252960
| 2020-12-07T06:49:52
| 2020-12-07T06:49:52
| 254,206,298
| 5
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 503
|
r
|
sir_sim_corr.R
|
source("sir_corr.R")
nsim <- 10
corr <- c(0, 0.25, 0.5, 0.75)
simlist_corr <- vector('list', length(corr))
for (j in 1:length(corr)) {
print(j)
simlist <- vector('list', nsim)
i <- 1
while (i <= nsim) {
print(i)
sir_sim <- sir.full2(size=40000, I0=10, seed=i, rho=corr[j], keep.intrinsic = FALSE)
if (nrow(sir_sim$data) > 100) {
simlist[[i]] <- sir_sim
i <- i +1
}
}
simlist_corr[[j]] <- simlist
}
save("simlist_corr", file="sir_sim_corr.rda")
|
5848ea77d4e829f76c418db362d9d711a5f08076
|
f7b07bb3556b9cc730d01bf96b304c668bfaeea3
|
/plot3.R
|
3bb1382e1e9f9d329a04823f6bf0f83526a69065
|
[] |
no_license
|
007bishesh/ExData_Plotting1
|
43277e2a4d04f8ad722ed3aacfa5fbbe6fe9f1b6
|
6abc398dba4f4707aa1a848304c2eef3d6e557bc
|
refs/heads/master
| 2021-01-21T20:07:52.107417
| 2014-11-09T05:50:22
| 2014-11-09T05:50:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,617
|
r
|
plot3.R
|
##Downloading File
temp <- tempfile()
fileUrl<-"https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
download.file(fileUrl,temp)
##Reading the data
power_consump<-read.table(unz(temp, "household_power_consumption.txt"), sep = ";", header= TRUE,colClasses = "character"
,stringsAsFactors=FALSE)
unlink(temp)
## Converting the Datatype
power_consump$Date<-as.Date(power_consump$Date,"%d/%m/%Y")
## Filtering the data
power_consump_sub<-subset(power_consump,Date=="2007-02-01" |Date=="2007-02-02")
## Converting the Datatype
power_consump_sub$Sub_metering_1<-as.numeric(power_consump_sub$Sub_metering_1)
power_consump_sub$Sub_metering_2<-as.numeric(power_consump_sub$Sub_metering_2)
power_consump_sub$Sub_metering_3<-as.numeric(power_consump_sub$Sub_metering_3)
##Generating the PNG
concat = paste(power_consump_sub$Date,power_consump_sub$Time,sep=' ') # concatenating date and time to convert it to a time format
totime = strptime(concat,"%Y-%m-%d %H:%M:%S") # to time format but date is also included.
png("plot3.png", width = 480, height = 480,units = "px")
##Plotting the graph
plot(totime,power_consump_sub$Sub_metering_1, type = "l",
ylab = "Energy sub metering",xlab="")
## Adding the other two variables onto the canvas with different color
lines(totime,power_consump_sub$Sub_metering_2, type="l", col="red")
lines(totime,power_consump_sub$Sub_metering_3, type="l", col="blue")
## Adding the lengend
legend("topright",c("Sub_metering_1","Sub_metering_2","Sub_metering_3"),lwd=1,
col=c("black", "red", "blue"))
dev.off()
|
ca02b49a238ed409194076bc16ecb9ace0086e92
|
d9db5f542c88863788839ae522cccc6c832fa759
|
/tests/testthat/test-wflow_publish.R
|
02966c9ddf348f6e75fac04c33a04f6707849f7a
|
[
"MIT"
] |
permissive
|
jdblischak/workflowrBeta
|
fc15f45d8d65f25a7562cc962aae0e20f2f2ad21
|
2a79ade2971e939cc785502c5a0fba54f209890d
|
refs/heads/master
| 2020-03-07T11:30:55.104317
| 2018-04-02T17:48:32
| 2018-04-02T17:48:32
| 127,457,737
| 5
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,352
|
r
|
test-wflow_publish.R
|
context("wflow_publish")
# Setup ------------------------------------------------------------------------
library("git2r")
# Setup workflowr project for testing
site_dir <- tempfile("test-wflow_publish-")
suppressMessages(wflow_start(site_dir, change_wd = FALSE))
# Delete workflowr project on exit
on.exit(unlink(site_dir, recursive = TRUE, force = TRUE))
site_dir <- workflowr:::absolute(site_dir)
s <- wflow_status(project = site_dir)
r <- repository(s$root)
rmd <- file.path(s$analysis, c("about.Rmd", "index.Rmd", "license.Rmd"))
html <- workflowr:::to_html(rmd, outdir = s$docs)
rmd_to_fail <- file.path(s$analysis, "error.Rmd")
file.copy(from = "files/test-wflow_build/error.Rmd",
to = rmd_to_fail)
# Test wflow_publish -----------------------------------------------------------
test_that("wflow_publish works in a simple case", {
expect_message(o <- wflow_publish(rmd, view = FALSE, project = site_dir),
rmd[1])
expect_true(all(file.exists(html)))
s <- wflow_status(project = site_dir)
expect_true(all(s$status[rmd, "published"]))
})
# Create decoy file that should not be built since it is unpublished
rmd_decoy <- file.path(s$analysis, "decoy.Rmd")
file.create(rmd_decoy)
html_decoy <- workflowr:::to_html(rmd_decoy, outdir = s$docs)
test_that("wflow_publish can `republish`", {
mtime_pre <- file.mtime(html)
Sys.sleep(2)
# Change the theme
config <- file.path(s$analysis, "_site.yml")
config_lines <- readLines(config)
config_lines <- stringr::str_replace(config_lines,
" theme: cosmo",
" theme: readable")
writeLines(config_lines, con = config)
# Republish with new theme
expect_message(o <- wflow_publish(config, republish = TRUE, view = FALSE,
project = site_dir),
rmd[1])
mtime_post <- file.mtime(html)
expect_true(all(mtime_post > mtime_pre))
expect_true(config == o$step1$commit_files)
expect_true(all(html %in% o$step3$commit_files))
expect_false(file.exists(html_decoy))
expect_false(html_decoy %in% o$step3$commit_files)
})
# Commit decoy file. Should not be affected by `update = TRUE` b/c it has not
# been published.
wflow_commit(rmd_decoy, "Commit decoy Rmd", project = site_dir)
test_that("wflow_publish can `update`", {
# Edit and manually commit a published Rmd file, then use `update` to publish.
cat("edit", file = rmd[1], append = TRUE)
wflow_commit(rmd[1], "Draft edit", project = site_dir)
# Update
expect_message(o <- wflow_publish(update = TRUE, view = FALSE, project = site_dir),
rmd[1])
expect_true(is.null(o$step1))
expect_true(html[1] == o$step3$commit_files)
expect_false(file.exists(html_decoy))
expect_false(html_decoy %in% o$step3$commit_files)
})
test_that("wflow_publish can be used to commit non-Rmd files instead of wflow_commit", {
f_test <- file.path(s$root, "test.txt")
file.create(f_test)
expect_silent(o <- wflow_publish(f_test, view = FALSE, project = site_dir))
expect_true(f_test == o$step1$commit_files)
expect_true(is.null(o$step2))
expect_true(is.null(o$step3))
})
test_that("wflow_publish automatically removes unused figure files", {
# Publish a file that has 2 plots from 2 unnamed chunks
file_w_figs <- file.path(s$analysis, "fig.Rmd")
file.copy("files/test-wflow_build/figure-v01.Rmd", file_w_figs)
publish_v01 <- wflow_publish(file_w_figs, view = FALSE, project = site_dir)
figs_analysis_v01 <- file.path(s$analysis, "figure", basename(file_w_figs),
c("unnamed-chunk-1-1.png", "unnamed-chunk-2-1.png"))
expect_true(all(file.exists(figs_analysis_v01)))
figs_docs_v01 <- file.path(s$docs, "figure", basename(file_w_figs),
c("unnamed-chunk-1-1.png", "unnamed-chunk-2-1.png"))
expect_true(all(file.exists(figs_docs_v01)))
expect_true(all(figs_docs_v01 %in% publish_v01$step3$commit_files))
# Update the file such that the previous 2 chunks are now named, plus add a
# 3rd plot chunk
file.copy("files/test-wflow_build/figure-v02.Rmd", file_w_figs, overwrite = TRUE)
publish_v02 <- wflow_publish(file_w_figs, view = FALSE, project = site_dir)
expect_false(all(file.exists(figs_analysis_v01)))
expect_false(all(file.exists(figs_docs_v01)))
figs_analysis_v02 <- file.path(s$analysis, "figure", basename(file_w_figs),
c("named1-1.png", "named2-1.png", "named3-1.png"))
expect_true(all(file.exists(figs_analysis_v02)))
figs_docs_v02 <- file.path(s$docs, "figure", basename(file_w_figs),
c("named1-1.png", "named2-1.png", "named3-1.png"))
expect_true(all(file.exists(figs_docs_v02)))
expect_true(all(figs_docs_v02 %in% publish_v02$step3$commit_files))
# The v01 files should also be listed in the commit_files b/c they are removed
# in this commit
expect_true(all(figs_docs_v01 %in% publish_v02$step3$commit_files))
# The Git status should have no staged or unstaged changes, which would occur
# if the files were deleted but not committed
current_status <- status(r)
expect_false(length(current_status$staged) > 0)
expect_false(length(current_status$unstaged) > 0)
# Cleanup
file.remove(file_w_figs)
unlink(file.path(s$analysis, "figure", basename(file_w_figs)),
recursive = TRUE, force = TRUE)
unlink(file.path(s$docs, "figure", basename(file_w_figs)),
recursive = TRUE, force = TRUE)
})
# This tests the edge case where a file had one or more figures but then gets
# reduced to zero. While Git is able to "add" a non-existent directory to stage
# deleted files, git2r chokes if the non-existent directory is a relative path.
# git2r requires the non-existent directory to either be an absolute path or a
# relative path from the root of the Git repo.
test_that("wflow_publish removes unused figure files even if directory no longer exists", {
# Publish a file that has 2 plots from 2 unnamed chunks
file_w_figs <- file.path(s$analysis, "fig.Rmd")
file.copy("files/test-wflow_build/figure-v01.Rmd", file_w_figs)
publish_v01 <- wflow_publish(file_w_figs, view = FALSE, project = site_dir)
figs_analysis_v01 <- file.path(s$analysis, "figure", basename(file_w_figs),
c("unnamed-chunk-1-1.png", "unnamed-chunk-2-1.png"))
expect_true(all(file.exists(figs_analysis_v01)))
figs_docs_v01 <- file.path(s$docs, "figure", basename(file_w_figs),
c("unnamed-chunk-1-1.png", "unnamed-chunk-2-1.png"))
expect_true(all(file.exists(figs_docs_v01)))
expect_true(all(figs_docs_v01 %in% publish_v01$step3$commit_files))
# Update the file to have no plots
file.copy("files/test-wflow_build/seed.Rmd", file_w_figs, overwrite = TRUE)
publish_v02 <- wflow_publish(file_w_figs, view = FALSE, project = site_dir)
expect_false(all(file.exists(figs_analysis_v01)))
expect_false(all(file.exists(figs_docs_v01)))
# The old figure files should also be listed in the commit_files b/c they are
# removed in this commit
expect_true(all(figs_docs_v01 %in% publish_v02$step3$commit_files))
# The Git status should have no staged or unstaged changes, which would occur
# if the files were deleted but not committed
current_status <- status(r)
expect_false(length(current_status$staged) > 0)
expect_false(length(current_status$unstaged) > 0)
# Cleanup
file.remove(file_w_figs)
})
# Test error handling ----------------------------------------------------------
test_that("wflow_publish resets Git repo to previous commit if build fails", {
commit_pre <- commits(r, n = 1)[[1]]
expect_error(utils::capture.output(
wflow_publish(rmd_to_fail, view = FALSE, project = site_dir)),
"There was an error")
commit_post <- commits(r, n = 1)[[1]]
expect_identical(commit_post, commit_pre)
})
test_that("wflow_publish restores previous docs/ if build fails", {
md5sum_pre <- tools::md5sum(rmd)
mtime_pre <- file.mtime(rmd)
Sys.sleep(2)
expect_error(utils::capture.output(
wflow_publish(c(rmd, rmd_to_fail), view = FALSE, project = site_dir)),
"There was an error")
md5sum_post <- tools::md5sum(rmd)
mtime_post <- file.mtime(rmd)
expect_identical(md5sum_post, md5sum_pre)
expect_identical(mtime_post, mtime_pre)
})
|
cb54513abcfa067ab3233327a36205e16a791d1a
|
d4599d2a5faeaa5e40994b8486e6becc59141fe1
|
/man/focus.Rd
|
9573d2c5666f4784280a18330975166352d69ca3
|
[] |
no_license
|
Allisterh/forecast-pimfc
|
6a61c568768a792babc49ba1f27cc89997c63cfa
|
928986ec4932e7247fff857da58e53733ee00cd4
|
refs/heads/master
| 2023-03-16T23:02:45.366133
| 2017-03-13T23:35:45
| 2017-03-13T23:35:45
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 455
|
rd
|
focus.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{focus}
\alias{focus}
\title{Expectativas de inflacao da Focus}
\format{Um data frame com 168 observacoes de 2002.1 a 2015.12 para a
expectativas do mercado para o IPCA mensal e para IPCA em 12 meses,
ambas para um horizonte de 12 meses a frente}
\source{
BCB
}
\usage{
focus
}
\description{
Expectativas de inflacao da Focus
}
\keyword{datasets}
|
29fbcdb697cd0e8886fb44a465a4bae97804a010
|
9816d5a8c5c6099a8fd4cccc27895f20a8989a61
|
/class_1/Rintro.R
|
4791c36a26dc5dc71be937c6f4b5c0d6feca6d33
|
[] |
no_license
|
qingqiao-hz/Git_R
|
79681eb802113b52818ebf6c09aedbb5c3127eaa
|
ebc4ae3864ad06cc0bedcda76f86fb634a436e46
|
refs/heads/master
| 2020-07-13T17:37:57.245491
| 2019-08-29T09:43:59
| 2019-08-29T09:43:59
| 205,124,369
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,879
|
r
|
Rintro.R
|
x <- 5
print(x)
y <- 'hello there'
y
y <- sqrt(10)
y
z <- x+y
z
#R下标从1开始,py下标从0开始
x <- 1:5
x[1] <- 17
x
x[3:5] <- 0
x
#R中 -3代表不要第三个数,py中-3代表取出倒数第三个值
x[-3]
y <- c(1,5,2,4,7)
y
y[2]
y[-3]
y[c(1,4,5)]
i<- (1:3)
i
z <- c(9,10,11)
y[i] <- z
y
y <- y^2
y
#log默认以E为底
y <- 1:10
y <- log(y)
y
y <- exp(y)
y
x <- c(5,4,3,2,1,5,4,3,2,1)
z <- x+y
z
z <- (x ==2)
print(z)
#####x < 5 ?
z <- (x<5);print(z)
x[x<5] <- y[x<5]
x
y[y<5]
#matrix
junk <- c(1,2,3,4,5,0.5,2,6,0,1,1,0)
m <- matrix(junk,ncol = 3) #3列,先列后行,int类型自动转为float
m
m2 <- matrix(junk,ncol = 3,byrow = T) #3列,先行后列
m2
#显示维度
dim(m)
#第一列
y <- m[,1]
y
y <- m[2:3,2:3]
y
z <- m[1,2]
z
#t()行列转置
zz <- t(y)
zz
#矩阵
new <- matrix(1:9,3,3)
new
hello <- z + new
hello
#取某一个值
m[1,3]
#取子集
m[2:3,2:3]
m[,c(2,3)]
m[2,3] <- 99
m
#去掉第二行
m[-2,]
#runif 生成均匀分布随机数
x <- runif(100,0,1)
mean(x)
min(x)
max(x)
summary(x)
#help()显示帮助文档
help(mean)
#list
who <- list(name = 'Joe',age = 45,married = T)
who
who$name
who[[1]]
#列名
names(who)
who$name <- c('Joe','steve','marry')
who$age <- c(45,23)
who$married <- c(T,F,T)
who
#循环
for(i in 1 :10){
print(i+1)
}
x <- 101:200
y <- 1:100
#rep : 0重复100次
z <- rep(0,100)
z
for(i in 1:100){
z[i] <- x[i] + y[i]
}
w <- x + y
w - z
for(i in 1:10){
for(j in 1:5){
print(i+j)
}
}
#source引入自己本地的R文件中的函数
source('other')
#安装第三方包
install.packages('ggplot2')
library('ggplot2')
#函数
my.fun <- function(x,y){
a <- mean(x) - mean(y)
return(a)
}
my.fun2 <- function(x,y){
mx <- mean(x)
my <- mean(y)
d <- mx - my
out <- list(meanx = mx,meany = my,difference = d)
return(out)
}
x <- runif(50,0,1)
y <- runif(50,0,3)
output <- my.fun(x,y)
output
output2 <- my.fun2(x,y)
output2
output2$difference
for(i in 1:10){
if( i == 4) print(i)
}
##plot
x <- 1:10
#rnorm 取10个 服从均值为0 方差为1的正态分布
y <- 1+ x + rnorm(10,0,1)
plot(x,y)
plot(x,y,type = 'h')
plot(x,y,type = 'l')
plot(x,y,type = 'l',lwd = 3,col = 6,xlab = 'x',ylab = 'y')
#par()重新设置参数
par(mfrow = c(3,2))
for(i in 1:6){
plot(x,y+i,type = 'l',led = 3,col=6,xlab = 'x',ylab = 'y')
}
#图片保存
postscript('plot.ps')
plot(x,y,type = 'l',led = 3,col=6,xlab = 'x',ylab = 'y')
#关闭图形设备
dev.off()
par(myfrow = c(1,1))
x <- rnorm(100,0,1)
y <- rpois(500,4)
hist(y)
hist(y,nclass = 50)
if(FALSE){
'rnorm生成随机正态分布序列
pnorm可以输出正态分布的分布函数
dnorm可以输出正态分布的概率密度
qnorm给定分位数的正太分布'}
pnorm(2,0,1)
pnorm(2,1,4)
qnorm(.3,0,1)
x <- seq(-3,3,length.out = 1000)
f <- dnorm(x,0,1)
plot(x,f,type = 'l',lwd = 3,col = 4)
|
380ba84ff21997affe31b413b7fbb06ed3e833ee
|
01ab1a31cd719e71ca21cf02eb34d601f2b16e96
|
/R/plot_gr_microplate.R
|
3989f9456c0ed31495fb3dc48dbc7eb52d193b57
|
[] |
no_license
|
MartinBanchero/mpxtractor
|
cd52afa115db1b3aa9d6d70a2ab639c61ca8646a
|
78e16314006e440ba51d4f819e809b13e7aa26a9
|
refs/heads/master
| 2022-03-18T06:16:28.183064
| 2022-02-27T20:08:03
| 2022-02-27T20:08:03
| 246,330,861
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,394
|
r
|
plot_gr_microplate.R
|
#' Function to plot growth rates over microplate frame.
#'
#' This function takes a dataframe with the raw data and the information from the
#' layout file. Calculate growth rate and plot this growth rates over a microplate
#' frame.
#'
#' @param df_data Is a dataframe that combines data files with layout files
#' @param var_gr This the attribute to be used to calculate growth rates
#' @param exp_title optional, add the title.
#' @param ws is the windowsize in hours
#' @param cond_to_col The condition from the layout to color
#' @param plate_file plate file to be use to compute growth rates in case of multiple files.
#' @param output_filename The name of the output file followed by proper extension, ie. .png
#'
#' @return Returns the background plot which is the microplate frame and over this
#' the plot of growth rates in each well.
#' The background plot shows in the x-axis the number of columns and in the y-axis
#' the letter to identified the row.
#'
#'
#' @section Warning:
#' Note that the time should be a time series, if the time series is broken the
#' growth rates are not calculated and the process stop.
#'
#' @importFrom rlang .data
#' @export
#'
#' @examples
#' \dontrun{
#' # Get the data file path
#' file_path_sp <- system.file(
#' "extdata",
#' "test_gr_spectramax.txt",
#' package = "mpxtractor"
#' )
#'
#' # Extract the data stored in the files into a df using proper wrangling function
#' df_sp <- mpxtractor::read_spectramax_data(
#' file = file_path_sp
#' )
#' # get the path to layout file
#' file_path_layout <- system.file(
#' "extdata", "test_layout_file.csv",
#' package = "mpxtractor"
#' )
#' # combine raw data with layout scheme
#' df_data_combine <- mpxtractor::combine_data_with_layout(
#' df_data = df_sp,
#' reader_type = "spectramax",
#' layout_files = file_path_layout
#' )
#'
#' microplateplot <- mpxtractor::plot_gr_microplate(
#' df_data = df_data_combine,
#' var_gr = "Measurement",
#' exp_title = "Spectramax experiment",
#' ws = "2hs",
#' cond_to_col = "condition",
#' output_filename = "growth_rates_test.png")
#' }
#' #Check vignette **plotting_functions()** for more information.
#'
# Main function
plot_gr_microplate <- function(df_data, var_gr, exp_title = NULL,
ws, cond_to_col, plate_file = NULL, output_filename) {
# Check input
if (!is.data.frame(df_data)) stop("df_data should be a dataframe")
check_variables(df_data, var_gr, cond_to_col)
check_is_multi_plate(df_data, plate_file)
df_data_gr <- compute_growth_rates(df_data, var_gr, ws, plate_file)
df_data_gr <- factor_to_color(df_data_gr, cond_to_col)
df_sub_plots_well <- generate_subplots_by_well(df_data_gr, cond_to_col)
df_sub_plots_well <- subplots_with_coordinates(df_sub_plots_well)
df_sub_plots_well <- subplots_annotated(df_sub_plots_well, df_data_gr)
all_wells_plot <- combine_subplots_backgr(df_sub_plots_well, exp_title, cond_to_col)
save_plot(df_data, all_wells_plot, output_filename)
}
check_variables <- function(df_data, var_gr, cond_to_col) {
if (!var_gr %in% colnames(df_data)) {
stop("The variable (var_gr) to calculate growth rate is not present in the
input data.")
}
if (!cond_to_col %in% colnames(df_data)) {
stop("The variable (cond_to_col) assigned to color is not present in the
input data.")
}
}
check_is_multi_plate <- function(df_data, plate_file){
if (is.null(plate_file) && !is.null(.data$plate_filename) &&
length(unique(df_data$plate_filename)) > 1) {
stop("Sorry, there is more than one plate present in the data. You have to
specify which plate to use." )
}
df_data
}
# add column condition_fc with the different conditions as factors
factor_to_color <- function(sp_data_layout, cond_to_col) {
sp_data_layout$condition_fc <- factor(sp_data_layout[[cond_to_col]],
levels = unique(sp_data_layout[[cond_to_col]])
)
return(sp_data_layout)
}
save_plot <- function(df_data, all_wells_plot, output_filename ){
if (length(unique(df_data$Wells)) == 96) {
ggplot2::ggsave(
filename = output_filename,
plot = all_wells_plot,
width = 22,
height = 17,
units = "cm")
} else if (length(unique(df_data$Wells)) == 384) {
ggplot2::ggsave(
filename = output_filename,
plot = all_wells_plot,
width = 50,
height = 30,
units = "cm")
}
}
|
c64dc15d89f478984bbc1a7aefe3ac65fdc6b72d
|
8eb0e554e6eae7aa81cfd18e9438cea9dbfc751f
|
/Spinalcord_MappR.R
|
b02c08f569ccc9e2344ee5f00acc7eb167231ea9
|
[] |
no_license
|
nstifani/Spinalcord_MappR
|
f2eb90f3694cbfc12960e7301a3a33558b2ed7d5
|
c8fec07de95084575e00a3f883ab8cb9b46078e7
|
refs/heads/master
| 2021-01-12T07:31:45.867107
| 2017-03-14T00:51:48
| 2017-03-14T00:51:48
| 76,971,777
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 73,629
|
r
|
Spinalcord_MappR.R
|
## Density Plot data from Spinalcord Mapper
## Script written by Nicolas Stifani contact nstifani@gmail.com
## Note: This Plugin Assume that Y coordinates are Inverted
## Manual CellCounter does not report "inverted Y". So the X=0 Y=0 is the top left corner.
## To Invert Non-Inverted Y Coordinates one must take the absolute of the Y Value minus Maximum Y value
## Y2=abs(Y-Max(Y))
## Requires Ghosscript brew install ghostscript
## The SC Layout is generated from a AI file to generate a postscript XML
## PostScriptTrace("Mouse_SC_L4_Normalized.ai")
# Functions ---------------------------------------------------------------
rm(list=ls()) # Clear the workspace just in case you have old stuff there
# Functions to install the required packages
InstallRequiredPackage.Function<-function(ListPackage){
for (PackageI in 1:length(ListPackage)){
RequiredPackageI<-ListPackage[PackageI]
if (!is.element(RequiredPackageI, installed.packages()[,1])){
install.packages(RequiredPackageI)
}
library(RequiredPackageI, character.only=TRUE) # Load the required packages
} # Download required packages if there are not already there and load them
}
# Function to display an Error dialog
ErrorDialog.Function<-function(ErrorMessage, FixMessage){
ErrorDialogChoice<-tk_messageBox(type = "retrycancel", message=paste0(ErrorMessage,"\n\n", FixMessage, "\n\n", "Click Retry to Try Again\n or \nCancel to Abort."), caption = "KinemaR Information", icon="question")
if(ErrorDialogChoice=="cancel"){
stop(paste0("Function Stopped because ", ErrorMessage))
}
}
# Function to select a directory containing file with only one . in their file name and the FileExt as a file extension
SelectInputDir.Function <- function(DialogMessage, DirPathObjectName, DirNameObjectName, ListFilePathObjectName, ParentDirPathObjectName, FileEXT){
InputDirPass=0
while(InputDirPass!=1){
DirPath<-tk_choose.dir(default=getwd(), caption=DialogMessage) # Prompt the user to select an inputdirectory
DirName<-basename(DirPath) # Defines Name of Input directory
ListFilePath<-list.files(path=DirPath, pattern=paste0(".",FileEXT), all.files=FALSE, full.names=TRUE, ignore.case = TRUE) # Get the list of TXT filepath within InputDir
if(length(ListFilePath)==0){
ErrorMessageInputFile=paste0("Sorry, the folder ",DirName," does not contain any ", FileEXT," file.")
FixMessageInputFile=paste0("Please select a folder containing at least one ",FileEXT," file.")
ErrorDialog.Function(ErrorMessage=ErrorMessageInputFile, FixMessage=FixMessageInputFile)
NbFilePass=0
} else {
NbFilePass=1
}
if(length(ListFilePath)>0){
FilenamePass=1
for(FileI in 1:length(ListFilePath)){ ## Screen all files to make sure they have only one .
FilePathFileI <- ListFilePath[FileI]
FilenameFileI <- basename(FilePathFileI)
FilenameFileIComponents <- unlist(strsplit(as.character(FilenameFileI),".", fixed=TRUE))
if(length(FilenameFileIComponents)!=2){ # If more than one . make an error
FilenamePass=0
ErrorMessageInputFile=paste0("Sorry, the file ",FilenameFileI," contains more than one \".\" character.")
FixMessageInputFile="Please ensure that all Files contain only one \".\" for the file extension."
ErrorDialog.Function(ErrorMessage=ErrorMessageInputFile, FixMessage=FixMessageInputFile)
}
}
}
if(NbFilePass==1 && FilenamePass==1){
InputDirPass=1
}
}
assign(DirPathObjectName, DirPath, envir = .GlobalEnv) # Assign the Variable to the global environment
assign(DirNameObjectName, DirName, envir = .GlobalEnv) # Assign the Variable to the global environment
assign(ListFilePathObjectName, ListFilePath, envir = .GlobalEnv) # Assign the Variable to the global environment
assign(ParentDirPathObjectName, dirname(DirPath), envir = .GlobalEnv)
}
## Function to select a Directory
SelectDir.Function <- function(DialogMessage, DirPathObjectName, DirNameObjectName, ParentDirPathObjectName){
DirPath<-tk_choose.dir(default=getwd(), caption=DialogMessage) # Prompt the user to select an inputdirectory
DirName<-basename(DirPath) # Defines Name of Input directory
assign(DirPathObjectName, DirPath, envir = .GlobalEnv) # Assign the Variable to the global environment
assign(DirNameObjectName, DirName, envir = .GlobalEnv) # Assign the Variable to the global environment
assign(ParentDirPathObjectName, dirname(DirPath), envir = .GlobalEnv)
}
## Function to Merge files from the Select Input Directory
MergeInputFile.Function <- function(ListFilePath, MergedObjectName){
for (FileI in 1:length(ListFilePath)){
FilePathI <- ListFilePath[FileI] # Defines the Path of the File to be processed
FilenameI <- basename(FilePathI) # Get the Filename of the File being processed
FilenameICompoments <- unlist(strsplit(as.character(FilenameI),".", fixed=TRUE))
FilenameINoExt<-FilenameICompoments[1]
# FilenameINoExt <- gsub(".txt","", FilenameI, ignore.case = TRUE) # Create a filename without extension
DataI <- read.table(FilePathI, sep = "\t", header = TRUE, nrows = 100000)
DataI$File_ID<-rep(FilenameINoExt, dim(DataI)[1])
if(FileI==1){
MergedData<-DataI
} else {
MergedData<-rbind.fill(MergedData, DataI)
}
}
assign(MergedObjectName, MergedData, envir=.GlobalEnv)
}
# Function to Select a given File with FileExt
SelectFile.Function <- function(DialogMessage, DataObjectName, FileExt){
# DataFilePath<-tk_choose.files(default = getwd(), caption = DialogMessage, filters=matrix(c(paste0(FileExt," File"),paste0(".",FileExt)),1,2, byrow=TRUE), multi=FALSE)
DataFilePath<-tk_choose.files(default = getwd(), caption = DialogMessage, multi=FALSE)
Data<-read.table(DataFilePath, header=TRUE, sep = "\t", colClasses = "character")
assign(DataObjectName, Data, envir=.GlobalEnv)
}
# Function to Create an Output Directory
CreateOutputDir.Function <- function(OutputDirLocation, OutputDirName, SubDirList){
OutputDirName2=OutputDirName
n=1
while(dir.exists(file.path(OutputDirLocation, OutputDirName2))==TRUE){
n=n+1
OutputDirName2=paste0(OutputDirName,"_", n)
}
dir.create(file.path(OutputDirLocation, OutputDirName2))
assign("OutputDirPath", file.path(OutputDirLocation, OutputDirName2), envir = .GlobalEnv)
for(SubDirI in 1:length(SubDirList)){
dir.create(file.path(OutputDirPath,SubDirList[SubDirI]))
}
}
## Create a Color Palette From Blue to Red
BlueToRedPalette<-function(NbOfColor, Transparency){
rev(rainbow(n=NbOfColor, s=1, v=1, start=0, end=4/6, alpha=Transparency))}
# HouseKeeping ------------------------------------------------------------
# Install Required Packages
ListRequiredPackage=c("zoo", "tcltk", "MASS", "Hotelling","ggplot2", "car", "grImport", "plyr")
InstallRequiredPackage.Function(ListPackage=ListRequiredPackage)
# Select and Read the Data ------------------------------------------------------------
# Select the Input Data and Get list of Data files
SelectInputDir.Function(DialogMessage = "Choose the folder containing the Data Files.",
DirPathObjectName="InputDirPath",
DirNameObjectName="InputDirName",
ListFilePathObjectName="ListInputFilePath",
ParentDirPathObjectName="ParentInputDirPath",
FileEXT="TXT"
)
setwd(InputDirPath)
# Merge all Input data Files into one
MergeInputFile.Function(ListFilePath=ListInputFilePath, MergedObjectName="MergedInputData")
# Select the RegistrationData and Registrtion coordinates
SelectFile.Function(DialogMessage="Select the Registration Coordinates File", DataObjectName = "RegistrationData", FileExt="TXT")
# Create OuputDirectory and Subdirectory
CreateOutputDir.Function(OutputDirLocation=ParentInputDirPath,
OutputDirName=paste0(InputDirName,"_Coordinates_Processed"),
SubDirList=c("Graphs by File", "Tables by File","Graphs by Subject", "Tables by Subject","Graphs by Group", "Tables by Group") )
# Select the XML SC Layout
#Create a XML Layout from a Vector File
#SC_Layout_Vector_File<-file.choose("Select the file of the SC Layout")
# Create the
#setwd(dirname(SC_Layout_Vector_File))
#PostScriptTrace(SC_Layout_Vector_File)
SC_Layout_File<-file.choose("Select the XML file of the SC Layout")
SpinalCordLayout<-readPicture(SC_Layout_File)
# /Users/Nicolas/Documents/Spinalcord_MappR/SC_Layouts/Mouse_SC_L4_Normalized.xml
# Pre-Process RegistrationData ---------------------------------------------------
# Convert Registration Data to Numeric values
for (ColI in 1:dim(RegistrationData)[2]){
NameColI<-colnames(RegistrationData)[ColI]
if(endsWith(NameColI, "Pixels")){
RegistrationData[,ColI]<-mapply(RegistrationData[,ColI], FUN=as.numeric, simplify=TRUE)
}
}
# Center the registration coordinates according to the CC position
for(RowI in 1:dim(RegistrationData)[1]){
RegistrationData$CC_X_Pixels_Centered[RowI]<-scale(RegistrationData$CC_X_Pixels[RowI], center=RegistrationData$CC_X_Pixels[RowI], scale=FALSE)
RegistrationData$DE_R_X_Pixels_Centered[RowI]<-scale(RegistrationData$DE_R_X_Pixels[RowI], center=RegistrationData$CC_X_Pixels[RowI], scale=FALSE)
RegistrationData$LE_R_X_Pixels_Centered[RowI]<-scale(RegistrationData$LE_R_X_Pixels[RowI], center=RegistrationData$CC_X_Pixels[RowI], scale=FALSE)
RegistrationData$VE_R_X_Pixels_Centered[RowI]<-scale(RegistrationData$VE_R_X_Pixels[RowI], center=RegistrationData$CC_X_Pixels[RowI], scale=FALSE)
RegistrationData$VE_L_X_Pixels_Centered[RowI]<-scale(RegistrationData$VE_L_X_Pixels[RowI], center=RegistrationData$CC_X_Pixels[RowI], scale=FALSE)
RegistrationData$LE_L_X_Pixels_Centered[RowI]<-scale(RegistrationData$LE_L_X_Pixels[RowI], center=RegistrationData$CC_X_Pixels[RowI], scale=FALSE)
RegistrationData$DE_L_X_Pixels_Centered[RowI]<-scale(RegistrationData$DE_L_X_Pixels[RowI], center=RegistrationData$CC_X_Pixels[RowI], scale=FALSE)
RegistrationData$CC_Y_Pixels_Centered[RowI]<-scale(RegistrationData$CC_Y_Pixels[RowI], center=RegistrationData$CC_Y_Pixels[RowI], scale=FALSE)
RegistrationData$DE_R_Y_Pixels_Centered[RowI]<-scale(RegistrationData$DE_R_Y_Pixels[RowI], center=RegistrationData$CC_Y_Pixels[RowI], scale=FALSE)
RegistrationData$LE_R_Y_Pixels_Centered[RowI]<- 0 ## By default we use the lateral edge a Y=0
#RegistrationData$LE_R_Y_Pixels_Centered[RowI]<-scale(RegistrationData$LE_R_Y_Pixels[RowI], center=RegistrationData$CC_Y_Pixels[RowI], scale=FALSE)
RegistrationData$VE_R_Y_Pixels_Centered[RowI]<-scale(RegistrationData$VE_R_Y_Pixels[RowI], center=RegistrationData$CC_Y_Pixels[RowI], scale=FALSE)
RegistrationData$VE_L_Y_Pixels_Centered[RowI]<-scale(RegistrationData$VE_L_Y_Pixels[RowI], center=RegistrationData$CC_Y_Pixels[RowI], scale=FALSE)
RegistrationData$LE_L_Y_Pixels_Centered[RowI]<- 0 ## By default we use the lateral edge a Y=0
#RegistrationData$LE_L_Y_Pixels_Centered[RowI]<-scale(RegistrationData$LE_L_Y_Pixels[RowI], center=RegistrationData$CC_Y_Pixels[RowI], scale=FALSE)
RegistrationData$DE_L_Y_Pixels_Centered[RowI]<-scale(RegistrationData$DE_L_Y_Pixels[RowI], center=RegistrationData$CC_Y_Pixels[RowI], scale=FALSE)
# Scale the Registration coordinates
RegistrationData$CC_X_Scaled[RowI]<- scale(RegistrationData$CC_X_Pixels_Centered[RowI], center=FALSE, scale=RegistrationData$DE_R_X_Pixels_Centered[RowI])
RegistrationData$DE_R_X_Scaled[RowI]<- scale(RegistrationData$DE_R_X_Pixels_Centered[RowI], center=FALSE, scale=RegistrationData$LE_R_X_Pixels_Centered[RowI])
RegistrationData$LE_R_X_Scaled[RowI]<- scale(RegistrationData$LE_R_X_Pixels_Centered[RowI], center=FALSE, scale=RegistrationData$LE_R_X_Pixels_Centered[RowI])
RegistrationData$VE_R_X_Scaled[RowI]<- scale(RegistrationData$VE_R_X_Pixels_Centered[RowI], center=FALSE, scale=RegistrationData$LE_R_X_Pixels_Centered[RowI])
RegistrationData$VE_L_X_Scaled[RowI]<- - scale(RegistrationData$VE_L_X_Pixels_Centered[RowI], center=FALSE, scale=RegistrationData$LE_L_X_Pixels_Centered[RowI])
RegistrationData$LE_L_X_Scaled[RowI]<- - scale(RegistrationData$LE_L_X_Pixels_Centered[RowI], center=FALSE, scale=RegistrationData$LE_L_X_Pixels_Centered[RowI])
RegistrationData$DE_L_X_Scaled[RowI]<- - scale(RegistrationData$DE_L_X_Pixels_Centered[RowI], center=FALSE, scale=RegistrationData$LE_L_X_Pixels_Centered[RowI])
RegistrationData$CC_Y_Scaled[RowI]<- scale(RegistrationData$CC_Y_Pixels_Centered[RowI], center=FALSE, scale=RegistrationData$DE_R_Y_Pixels_Centered[RowI])
RegistrationData$DE_R_Y_Scaled[RowI]<- scale(RegistrationData$DE_R_Y_Pixels_Centered[RowI], center=FALSE, scale=RegistrationData$DE_R_Y_Pixels_Centered[RowI])
RegistrationData$LE_R_Y_Scaled[RowI]<- 0 # Force the Lateral point to be in the dorsal quadrant
# RegistrationData$LE_R_Y_Scaled[RowI]<- scale(RegistrationData$LE_R_Y_Pixels_Centered[RowI], center=FALSE, scale=RegistrationData$LE_R_Y_Pixels_Centered[RowI])
RegistrationData$VE_R_Y_Scaled[RowI]<- - scale(RegistrationData$VE_R_Y_Pixels_Centered[RowI], center=FALSE, scale=RegistrationData$VE_R_Y_Pixels_Centered[RowI])
RegistrationData$VE_L_Y_Scaled[RowI]<- - scale(RegistrationData$VE_L_Y_Pixels_Centered[RowI], center=FALSE, scale=RegistrationData$VE_L_Y_Pixels_Centered[RowI])
RegistrationData$LE_L_Y_Scaled[RowI]<- 0 # Force the Lateral point to be in the dorsal quadrant
#RegistrationData$LE_L_Y_Scaled[RowI]<- scale(RegistrationData$LE_L_Y_Pixels_Centered[RowI], center=FALSE, scale=RegistrationData$LE_L_Y_Pixels_Centered[RowI])
RegistrationData$DE_L_Y_Scaled[RowI]<- scale(RegistrationData$DE_L_Y_Pixels_Centered[RowI], center=FALSE, scale=RegistrationData$DE_L_Y_Pixels_Centered[RowI])
# Extract the Variables from the File name convention
List_Filename_Variables<- unlist(strsplit(RegistrationData$File_ID[RowI],"_", fixed = TRUE))
RegistrationData$Date[RowI]<-List_Filename_Variables[1]
RegistrationData$Subject_ID[RowI]<-List_Filename_Variables[2]
RegistrationData$Group[RowI]<-List_Filename_Variables[3]
if (length(List_Filename_Variables)>3){
for (VariableI in 4:length(List_Filename_Variables)){
RegistrationData[[paste0("Filename_Variable_", sprintf("%03d", as.numeric(VariableI)))]][RowI] <- as.character(List_Filename_Variables[VariableI])
}# Add the Variable from the filename to the RegistrationData table
} # If
} ## End of for each Row and center and scale the data
# Pre-Process the Data ----------------------------------------------------
# Transform File_ID into factor
MergedInputData$File_ID <- factor(MergedInputData$File_ID)
## Three cases: when Channel is present, Channel is absent and Counter is present OR absent
if((any(colnames(MergedInputData)=="Channel")==TRUE )) { # Marker Name is factor of Channel
MergedInputData$Marker_Name<-as.factor(as.character(MergedInputData$Channel))
MergedInputData$Marker_Name_x_Channel<- MergedInputData$Marker_Name
} else { # Channel is absent
if((any(colnames(MergedInputData)=="Counter")==FALSE )){ ## and Counter is absent because only one has been used then Create One
MergedInputData$Counter<-as.factor(rep(paste0(000),dim(MergedInputData)[1]))
}
# Create Marker_ID
MergedInputData$Marker_ID<-as.factor(sprintf("%03d", as.numeric(as.character(MergedInputData$Counter))))
# MergedInputData$Marker_ID<-factor(000)
if(nlevels(MergedInputData$Marker_ID)==1){
# Prompt String Marker_Name
PromptNameDialog <- tktoplevel()
Name_Var <- tclVar("Marker_Name")
PromptNameDialog$env$Entered_Name <-tkentry(PromptNameDialog, width = "25", textvariable = Name_Var)
tkgrid(tklabel(PromptNameDialog, text = "Please enter the Marker Name:", justify = "left"),
padx = 10, pady = c(15, 5), sticky = "w")
tkgrid(PromptNameDialog$env$Entered_Name, padx = 10, pady = c(0, 15))
ClickOK <- function() {
Name_Var2 <- tclvalue(Name_Var)
tkdestroy(PromptNameDialog)
assign("Marker_Name", Name_Var2, envir=.GlobalEnv)
}
PromptNameDialog$env$Button_OK <-tkbutton(PromptNameDialog, text = "OK", width = -6, command = ClickOK)
tkgrid(PromptNameDialog$env$Button_OK, padx = 10, pady = c(5, 15))
tkbind(PromptNameDialog$env$Entered_Name, "<Return>", ClickOK)
tkfocus(PromptNameDialog)
tkwait.window(PromptNameDialog)
MergedInputData$Marker_Name<-rep(Marker_Name, length(MergedInputData$File_ID))
#Create dataframe for MarkerData wiht only one row
MarkerData<-data.frame("Marker_ID"=sprintf("%03d", 0), "Marker_Name"=Marker_Name)
} else if(nlevels(MergedInputData$Marker_ID)>1){
# Prompt for Marker Information
SelectFile.Function(DialogMessage="Select the Marker Information File", DataObjectName = "MarkerData", FileExt="txt")
MarkerData$Marker_ID<-as.factor(sprintf("%03d", as.numeric(as.character(MarkerData$Marker_ID))))
MergedInputData$Marker_Name<-""
# Get the Marker_Name from the marker_ID in the MarkerData
for(RowI in 1:length(MergedInputData$File_ID)){
Marker_IDI<-as.character(MergedInputData$Marker_ID[RowI])
MergedInputData$Marker_Name[RowI]<-as.character(MarkerData$Marker_Name[MarkerData$Marker_ID==Marker_IDI])
}
} ## End of Get MarkerName
MergedInputData$Marker_Name<-factor( MergedInputData$Marker_Name)
# Create a Channel by the combination of MarkerName and Channel
if((any(colnames(MergedInputData)=="Ch")==TRUE )){ ## and Counter is absent because only one has been used then Create One
MergedInputData$Channel<-as.factor(MergedInputData$Ch)
} else if((any(colnames(MergedInputData)=="Slice")==TRUE )){
MergedInputData$Channel<-as.factor(MergedInputData$Slice)
} else if((any(colnames(MergedInputData)=="Type")==TRUE )){
MergedInputData$Channel<-as.factor(MergedInputData$Type)
} else {
# Prompt for Channel Name
PromptNameDialog <- tktoplevel()
Name_Var <- tclVar("Channel_Name")
PromptNameDialog$env$Entered_Name <-tkentry(PromptNameDialog, width = "25", textvariable = Name_Var)
tkgrid(tklabel(PromptNameDialog, text = "Please enter the Channel Name:", justify = "left"),
padx = 10, pady = c(15, 5), sticky = "w")
tkgrid(PromptNameDialog$env$Entered_Name, padx = 10, pady = c(0, 15))
ClickOK <- function() {
Name_Var2 <- tclvalue(Name_Var)
tkdestroy(PromptNameDialog)
assign("Channel_Name", Name_Var2, envir=.GlobalEnv)
}
PromptNameDialog$env$Button_OK <-tkbutton(PromptNameDialog, text = "OK", width = -6, command = ClickOK)
tkgrid(PromptNameDialog$env$Button_OK, padx = 10, pady = c(5, 15))
# tkbind(PromptNameDialog$env$Entered_Name, "<Return>", ClickOK)
# tkfocus(PromptNameDialog)
tkwait.window(PromptNameDialog)
MergedInputData$Channel<-as.factor(as.character(rep(Channel_Name,length(MergedInputData$File_ID))))
}
MergedInputData$Marker_Name_x_Channel<- paste0(as.character(MergedInputData$Marker_Name),"_x_Ch",as.character(MergedInputData$Channel))
MergedInputData$Marker_Name_x_Channel<-factor( MergedInputData$Marker_Name_x_Channel)
} ## end of else
# Gather the Data into MetaData Table -------------------------------------
##Bring the Marker Data
for (FileI in 1:length(MergedInputData$File_ID)){
File_IDI<-MergedInputData$File_ID[FileI]
InputDataI<-MergedInputData[MergedInputData$File_ID==File_IDI,] # Get the Data of a given Image
#Refresh the Factor
InputDataI$Marker_Name<-factor(InputDataI$Marker_Name)
Nb_Marker_Types<-nlevels(InputDataI$Marker_Name)
RegistrationData$Nb_Marker_Types[RegistrationData$File_ID==File_IDI]<-Nb_Marker_Types
}
for(MarkerI in 1:length(MarkerData$Marker_Name)){
Marker_IDI<-MarkerData$Marker_ID[MarkerI]
Marker_NameI<-MarkerData$Marker_Name[MarkerI]
RegistrationData$MarkerI<-NA
names(RegistrationData)[(dim(RegistrationData)[2])]<-paste0("Marker_", sprintf("%03d", as.numeric(Marker_IDI)))
RegistrationData$CounterNameI<-NA
names(RegistrationData)[(dim(RegistrationData)[2])]<-paste0(Marker_NameI)
}
## Add the Counts of each marker
for (FileI in 1:length(RegistrationData$File_ID)){
File_IDI<-RegistrationData$File_ID[FileI]
InputDataI<-MergedInputData[MergedInputData$File_ID==File_IDI,] # Get the Data of a given Image
#Refresh the Factor
InputDataI$Marker_ID<-factor(InputDataI$Marker_ID)
InputDataI$Marker_Name<-factor(InputDataI$Marker_Name)
SummaryTable_Marker_ID<-as.data.frame(table(InputDataI$Marker_ID))
SummaryTable_Marker_ID$Marker_ID<-SummaryTable_Marker_ID$Var1
SummaryTable_Marker_ID$Counts<-SummaryTable_Marker_ID$Freq
SummaryTable_Marker_ID$Var1<-NULL
SummaryTable_Marker_ID$Freq<-NULL
SummaryTable_Marker_Name<-as.data.frame(table(InputDataI$Marker_Name))
SummaryTable_Marker_Name$Marker_Name<-SummaryTable_Marker_Name$Var1
SummaryTable_Marker_Name$Counts<-SummaryTable_Marker_Name$Freq
SummaryTable_Marker_Name$Var1<-NULL
SummaryTable_Marker_Name$Freq<-NULL
for(MarkerI in 1:length(SummaryTable_Marker_ID$Marker_ID)){
Marker_IDI<-as.character(SummaryTable_Marker_ID$Marker_ID[MarkerI])
Counts_Marker_IDI<-as.integer(SummaryTable_Marker_ID$Counts[MarkerI])
RegistrationData[RegistrationData$File_ID==File_IDI, names(RegistrationData)==paste0("Marker_",Marker_IDI)]<-Counts_Marker_IDI
}
for(MarkerI in 1:length(SummaryTable_Marker_Name$Marker_Name)){
Marker_NameI<-as.character(SummaryTable_Marker_Name$Marker_Name[MarkerI])
Counts_Marker_NameI<-as.integer(SummaryTable_Marker_Name$Counts[MarkerI])
RegistrationData[RegistrationData$File_ID==File_IDI, names(RegistrationData)==Marker_NameI]<-Counts_Marker_NameI
}
}
MetaData<-RegistrationData
write.table(MetaData, file=file.path(OutputDirPath, "Metadata.txt"), row.names=FALSE, sep = "\t")
# Process each File separatly -------------------------------------------------------
for (FileI in 1:nlevels(MergedInputData$File_ID)){
File_IDI<-levels(MergedInputData$File_ID)[FileI]
InputDataI<-MergedInputData[MergedInputData$File_ID==File_IDI,] # Get the Data of a given Image
## Get the registered coordinates from the RegistrationData
## If a perfect match on fileIDs use the following line
#RegistrationDataI<-RegistrationData[RegistrationData$File_ID==File_IDI,]
## If a partial match on fileIDs
RegistrationDataI<-subset(RegistrationData, pmatch(RegistrationData$File_ID, File_IDI)==1)
if(dim(RegistrationDataI)[1]==1){
InputDataI$X_Pixels<- as.numeric(InputDataI$X)
InputDataI$Y_Pixels<- as.numeric(InputDataI$Y)
} else {
stop(paste0("Registration Data for the file ",File_IDI," is missing."))
}
# Center the data on the central canal
InputDataI$X_Pixels_Centered<-scale(InputDataI$X_Pixels, center=RegistrationDataI$CC_X_Pixels, scale=FALSE)
InputDataI$Y_Pixels_Centered<-scale(InputDataI$Y_Pixels, center=RegistrationDataI$CC_Y_Pixels, scale=FALSE)
# Divide the Data into 4 quadrants Dorso-Ventral Right_Left
InputDataI_D_R<- InputDataI[InputDataI$X_Pixels_Centered>=0 & InputDataI$Y_Pixels_Centered>=0,]
InputDataI_V_R<- InputDataI[InputDataI$X_Pixels_Centered>=0 & InputDataI$Y_Pixels_Centered<0,]
InputDataI_V_L<- InputDataI[InputDataI$X_Pixels_Centered<0 & InputDataI$Y_Pixels_Centered<0,]
InputDataI_D_L<- InputDataI[InputDataI$X_Pixels_Centered<0 & InputDataI$Y_Pixels_Centered>=0,]
# Scale each quadrant
InputDataI_D_R$X_Scaled<- scale(InputDataI_D_R$X_Pixels_Centered, center=FALSE, scale=RegistrationDataI$LE_R_X_Pixels_Centered)
InputDataI_D_R$Y_Scaled<- scale(InputDataI_D_R$Y_Pixels_Centered, center=FALSE, scale=RegistrationDataI$DE_R_Y_Pixels_Centered)
InputDataI_V_R$X_Scaled<- scale(InputDataI_V_R$X_Pixels_Centered, center=FALSE, scale=RegistrationDataI$LE_R_X_Pixels_Centered)
InputDataI_V_R$Y_Scaled<- - scale(InputDataI_V_R$Y_Pixels_Centered, center=FALSE, scale=RegistrationDataI$VE_R_Y_Pixels_Centered)
InputDataI_V_L$X_Scaled<- - scale(InputDataI_V_L$X_Pixels_Centered, center=FALSE, scale=RegistrationDataI$LE_L_X_Pixels_Centered)
InputDataI_V_L$Y_Scaled<- - scale(InputDataI_V_L$Y_Pixels_Centered, center=FALSE, scale=RegistrationDataI$VE_L_Y_Pixels_Centered)
InputDataI_D_L$X_Scaled<- - scale(InputDataI_D_L$X_Pixels_Centered, center=FALSE, scale=RegistrationDataI$LE_L_X_Pixels_Centered)
InputDataI_D_L$Y_Scaled<- scale(InputDataI_D_L$Y_Pixels_Centered, center=FALSE, scale=RegistrationDataI$DE_R_Y_Pixels_Centered)
# Bind the quadrants back together
OutputDataI<-rbind(InputDataI_D_R,InputDataI_V_R,InputDataI_V_L,InputDataI_D_L)
## Add the RegistrationData to the OutputData
MissingVars<- setdiff(colnames(RegistrationDataI), colnames(OutputDataI)) # Get the Missing Columns
if (length(MissingVars)>0){ # Compare the Nb Of Columns if Merge file has more columns
for (MissingVariableI in 1: length(MissingVars)){
OutputDataI[[paste0(MissingVars[MissingVariableI])]] <- rep(RegistrationDataI[[paste0(MissingVars[MissingVariableI])]], dim(OutputDataI)[1])
} # Add Missing Variables to OutputDataI
}
# Merge Output Files together
if(FileI==1){
OutputData<-OutputDataI
} else {
OutputData<-rbind(OutputData, OutputDataI)
}
write.table(OutputData, file=file.path(OutputDirPath, "Data_Coordinates_Processed.txt"), row.names=FALSE, sep = "\t")
}
# Plot by File ---------------------------------------------------------
# Process each File separately
OutputData$File_ID<-factor(OutputData$File_ID)
for (FileI in 1:nlevels(OutputData$File_ID)){
File_IDI<-levels(OutputData$File_ID)[FileI]
OutputDataI<-OutputData[OutputData$File_ID==File_IDI,] # Get the Data of a given Image
write.table(OutputDataI, file=file.path(OutputDirPath, "Tables by File",paste0(File_IDI,".txt")), row.names=FALSE, sep = "\t")
if(FileI==1){
dir.create(file.path(OutputDirPath, "Graphs by File","Raw"))
dir.create(file.path(OutputDirPath, "Graphs by File","Scaled"))
}
# Plot RAW coordinates for each file
cairo_pdf(file.path(OutputDirPath, "Graphs by File", "Raw", paste0(File_IDI,"_Raw_Graph.pdf"))) # Open the graph as pdf
Xlim=round(max(abs(c(mean(OutputDataI$LE_L_X_Pixels_Centered),mean(OutputDataI$LE_R_X_Pixels_Centered),max(abs(OutputDataI$X_Pixels_Centered))))),-1)
Ylim=round(max(abs(c(mean(OutputDataI$DE_L_Y_Pixels_Centered),mean(OutputDataI$DE_R_Y_Pixels_Centered),mean(OutputDataI$VE_L_Y_Pixels_Centered),mean(OutputDataI$VE_R_Y_Pixels_Centered), max(abs(OutputDataI$Y_Pixels_Centered))))),-1)
NbMarkers=nlevels(OutputDataI$Marker_Name_x_Channel)
par(xpd=TRUE)
plot(OutputDataI$X_Pixels_Centered, OutputDataI$Y_Pixels_Centered,
type="p", bty="n",
pch=1,lwd=0.5, cex=0.5, col=BlueToRedPalette(NbMarkers,1)[OutputDataI$Marker_Name_x_Channel],
xlim=c(-Xlim,Xlim), ylim=c(-Ylim,Ylim),
xaxp=c(-Xlim,Xlim,4), yaxp=c(-Ylim,Ylim,4),
main=File_IDI,
xlab="Relative position to CC (pixel)", ylab="Relative position to CC (pixel)"
)
points(mean(OutputDataI$CC_X_Pixels_Centered),mean(OutputDataI$CC_Y_Pixels_Centered), col="black", pch=3, cex=0.5)
points(mean(OutputDataI$DE_R_X_Pixels_Centered),mean(OutputDataI$DE_R_Y_Pixels_Centered), col="black", pch=3, cex=0.5)
points(mean(OutputDataI$LE_R_X_Pixels_Centered),mean(OutputDataI$LE_R_Y_Pixels_Centered), col="black", pch=3, cex=0.5)
points(mean(OutputDataI$VE_R_X_Pixels_Centered),mean(OutputDataI$VE_R_Y_Pixels_Centered), col="black", pch=3, cex=0.5)
points(mean(OutputDataI$DE_L_X_Pixels_Centered),mean(OutputDataI$DE_L_Y_Pixels_Centered), col="black", pch=3, cex=0.5)
points(mean(OutputDataI$LE_L_X_Pixels_Centered),mean(OutputDataI$LE_L_Y_Pixels_Centered), col="black", pch=3, cex=0.5)
points(mean(OutputDataI$VE_L_X_Pixels_Centered),mean(OutputDataI$VE_L_Y_Pixels_Centered), col="black", pch=3, cex=0.5)
# Add Marker Legend
legend("bottomleft",
bty="n",
pch=1, cex=0.5,
col=BlueToRedPalette(NbMarkers,1),
title="Marker",
legend=levels(OutputDataI$Marker_Name_x_Channel),
xjust = 0.5, yjust = 0.5
)
# Add Cell Counts Title
LegendTop <- legend("top",
inset=c(0,-0.05),
bty="n",
xjust =0.5, yjust = 0.5,
cex=0.5,
col="black",
title="Nb of Cells",
legend=c(" ", rep(" ",nlevels(OutputDataI$Marker_Name_x_Channel))),
text.width = strwidth("Marker: Left + Right = Total")
)
# Add Cell Counts SubTitle
text(LegendTop$rect$left + LegendTop$rect$w/2, LegendTop$text$y,
c("Marker: Left + Right = Total",rep(" ",nlevels(OutputDataI$Marker_Name_x_Channel))),
cex=0.5)
# Add Counts for each Marker
for(MarkerI in 1:nlevels(OutputDataI$Marker_Name_x_Channel)){
Marker_NameI<-levels(OutputDataI$Marker_Name_x_Channel)[MarkerI]
OutputDataI_MarkerI<-OutputDataI[OutputDataI$Marker_Name_x_Channel==Marker_NameI,]
TotalCountOutputDataI_MarkerI<-dim(OutputDataI_MarkerI)[1]
LeftCountOutputDataI_MarkerI<-dim(OutputDataI_MarkerI[OutputDataI_MarkerI$X_Scaled<0,])[1]
RightCountOutputDataI_MarkerI<-dim(OutputDataI_MarkerI[OutputDataI_MarkerI$X_Scaled>=0,])[1]
# Add the Marker Data to the Legend
text(LegendTop$rect$left + LegendTop$rect$w/2, LegendTop$text$y,
c(rep(" ",MarkerI),
paste0(Marker_NameI,": " ,LeftCountOutputDataI_MarkerI," + ",RightCountOutputDataI_MarkerI," = ",TotalCountOutputDataI_MarkerI),
rep(" ",(nlevels(OutputDataI$Marker_Name_x_Channel))-MarkerI)
),
cex=0.5, col=BlueToRedPalette(NbMarkers,1)[MarkerI]
)
}## End of for Marker I
dev.off() # Close and save the graph
# Plot SCALED coordinates for each file
cairo_pdf(file.path(OutputDirPath, "Graphs by File", "Scaled", paste0(File_IDI,"_Scaled_Graph.pdf"))) # Open the graph as pdf
Xlim=round(max(abs(c(mean(OutputDataI$LE_L_X_Scaled),mean(OutputDataI$LE_R_X_Scaled),max(abs(OutputDataI$X_Scaled))))),2)
Ylim=round(max(abs(c(mean(OutputDataI$DE_L_Y_Scaled),mean(OutputDataI$DE_R_Y_Scaled),mean(OutputDataI$VE_L_Y_Scaled),mean(OutputDataI$VE_R_Y_Scaled), max(abs(OutputDataI$Y_Scaled))))),2)
NbMarkers<-nlevels(OutputDataI$Marker_Name_x_Channel)
par(xpd=TRUE)
plot(OutputDataI$X_Scaled, OutputDataI$Y_Scaled,
type="p", bty="n",
pch=1,lwd=0.5, cex=0.5, col=BlueToRedPalette(NbMarkers,1)[OutputDataI$Marker_Name_x_Channel],
xlim=c(-Xlim,Xlim), ylim=c(-Ylim,Ylim),
xaxp=c(-Xlim,Xlim,4), yaxp=c(-Ylim,Ylim,4),
main=File_IDI,
xlab="Relative position to CC (Scaled)", ylab="Relative position to CC (Scaled)"
,pannel.first={
XCenter= ( par()$mai[2] + (par()$pin[1])/2)/(par()$din[1])
YCenter= ( par()$mai[1] + (par()$pin[2])/2)/(par()$din[2])
WidthSC= ((par()$pin[1])/2)/(par()$din[1])
HeightSC= ((par()$pin[2])/2)/(par()$din[2])
grid.picture(SpinalCordLayout, x=XCenter, y=YCenter,
width=2*WidthSC+0.1*WidthSC, height=2*HeightSC+0.1*HeightSC
,distort=TRUE)
}
)
points(mean(OutputDataI$CC_X_Scaled),mean(OutputDataI$CC_Y_Scaled), col="black", pch=3, cex=0.5)
points(mean(OutputDataI$DE_R_X_Scaled),mean(OutputDataI$DE_R_Y_Scaled), col="black", pch=3, cex=0.5)
points(mean(OutputDataI$LE_R_X_Scaled),mean(OutputDataI$LE_R_Y_Scaled), col="black", pch=3, cex=0.5)
points(mean(OutputDataI$VE_R_X_Scaled),mean(OutputDataI$VE_R_Y_Scaled), col="black", pch=3, cex=0.5)
points(mean(OutputDataI$DE_L_X_Scaled),mean(OutputDataI$DE_L_Y_Scaled), col="black", pch=3, cex=0.5)
points(mean(OutputDataI$LE_L_X_Scaled),mean(OutputDataI$LE_L_Y_Scaled), col="black", pch=3, cex=0.5)
points(mean(OutputDataI$VE_L_X_Scaled),mean(OutputDataI$VE_L_Y_Scaled), col="black", pch=3, cex=0.5)
## Add Marker Legend
legend("bottomleft",
bty="n",
pch=1, cex=0.5,
col=BlueToRedPalette(NbMarkers,1),
title="Marker",
legend=levels(OutputDataI$Marker_Name_x_Channel),
xjust = 0.5, yjust = 0.5
)
## Add Count Title
LegendTop <- legend("top",
inset=c(0,-0.05),
bty="n",
xjust =0.5, yjust = 0.5,
cex=0.5,
col="black",
title="Nb of Cells",
legend=c(" ", rep(" ",nlevels(OutputDataI$Marker_Name_x_Channel))),
text.width = strwidth("Marker: Left + Right = Total")
)
## Add Count SubTitle
text(LegendTop$rect$left + LegendTop$rect$w/2, LegendTop$text$y,
c("Marker: Left + Right = Total",rep(" ",nlevels(OutputDataI$Marker_Name_x_Channel))),
cex=0.5)
# Add Counts for each Marker
for(MarkerI in 1:nlevels(OutputDataI$Marker_Name_x_Channel)){
Marker_NameI<-levels(OutputDataI$Marker_Name_x_Channel)[MarkerI]
OutputDataI_MarkerI<-OutputDataI[OutputDataI$Marker_Name_x_Channel==Marker_NameI,]
NbImages<-length(levels(OutputDataI$File_ID))
TotalCountOutputDataI_MarkerI<-dim(OutputDataI_MarkerI)[1]
LeftCountOutputDataI_MarkerI<-dim(OutputDataI_MarkerI[OutputDataI_MarkerI$X_Scaled<0,])[1]
RightCountOutputDataI_MarkerI<-dim(OutputDataI_MarkerI[OutputDataI_MarkerI$X_Scaled>=0,])[1]
# Add the Counts for each marker
text(LegendTop$rect$left + LegendTop$rect$w/2, LegendTop$text$y,
c(rep(" ",MarkerI),
paste0(Marker_NameI,": " ,LeftCountOutputDataI_MarkerI," + ",RightCountOutputDataI_MarkerI," = ",TotalCountOutputDataI_MarkerI),
rep(" ",(nlevels(OutputDataI$Marker_Name_x_Channel))-MarkerI)
),
cex=0.5, col=BlueToRedPalette(NbMarkers,1)[MarkerI]
)
}## End of for Marker I
dev.off() # Close and save the graph
}
# Plot By Subject ID ----------------------------------------------------
OutputData$Subject_ID<-factor(OutputData$Subject_ID)
for (SubjectI in 1:nlevels(OutputData$Subject_ID)){
Subject_IDI<-levels(OutputData$Subject_ID)[SubjectI]
OutputDataI<-OutputData[OutputData$Subject_ID==Subject_IDI,] # Get the Data of a given Subject
# Refresh Factors
OutputDataI$File_ID<-factor(OutputDataI$File_ID)
OutputDataI$Subject_ID<-factor(OutputDataI$Subject_ID)
write.table(OutputDataI, file=file.path(OutputDirPath, "Tables by Subject",paste0(Subject_IDI,".txt")), row.names=FALSE, sep = "\t")
if(SubjectI==1){
dir.create(file.path(OutputDirPath, "Graphs by Subject","Raw"))
dir.create(file.path(OutputDirPath, "Graphs by Subject","Scaled"))
}
# Plot RAW coordinates for each SUBJECT
cairo_pdf(file.path(OutputDirPath, "Graphs by Subject", "Raw", paste0(Subject_IDI,"_Raw_Graph.pdf"))) # Open the graph as pdf
Xlim=round(max(abs(c(mean(OutputDataI$LE_L_X_Pixels_Centered),mean(OutputDataI$LE_R_X_Pixels_Centered),max(abs(OutputDataI$X_Pixels_Centered))))),-1)
Ylim=round(max(abs(c(mean(OutputDataI$DE_L_Y_Pixels_Centered),mean(OutputDataI$DE_R_Y_Pixels_Centered),mean(OutputDataI$VE_L_Y_Pixels_Centered),mean(OutputDataI$VE_R_Y_Pixels_Centered), max(abs(OutputDataI$Y_Pixels_Centered))))),-1)
NbMarkers=nlevels(OutputDataI$Marker_Name_x_Channel)
par(xpd=TRUE)
plot(OutputDataI$X_Pixels_Centered, OutputDataI$Y_Pixels_Centered,
type="p", bty="n",
pch=1,lwd=0.5, cex=0.5, col=BlueToRedPalette(NbMarkers,1)[OutputDataI$Marker_Name_x_Channel],
xlim=c(-Xlim,Xlim), ylim=c(-Ylim,Ylim),
xaxp=c(-Xlim,Xlim,4), yaxp=c(-Ylim,Ylim,4),
main=Subject_IDI,
xlab="Relative position to CC (pixel)", ylab="Relative position to CC (pixel)"
)
points(mean(OutputDataI$CC_X_Pixels_Centered),mean(OutputDataI$CC_Y_Pixels_Centered), col="black", pch=3, cex=0.5)
points(mean(OutputDataI$DE_R_X_Pixels_Centered),mean(OutputDataI$DE_R_Y_Pixels_Centered), col="black", pch=3, cex=0.5)
points(mean(OutputDataI$LE_R_X_Pixels_Centered),mean(OutputDataI$LE_R_Y_Pixels_Centered), col="black", pch=3, cex=0.5)
points(mean(OutputDataI$VE_R_X_Pixels_Centered),mean(OutputDataI$VE_R_Y_Pixels_Centered), col="black", pch=3, cex=0.5)
points(mean(OutputDataI$DE_L_X_Pixels_Centered),mean(OutputDataI$DE_L_Y_Pixels_Centered), col="black", pch=3, cex=0.5)
points(mean(OutputDataI$LE_L_X_Pixels_Centered),mean(OutputDataI$LE_L_Y_Pixels_Centered), col="black", pch=3, cex=0.5)
points(mean(OutputDataI$VE_L_X_Pixels_Centered),mean(OutputDataI$VE_L_Y_Pixels_Centered), col="black", pch=3, cex=0.5)
## Add Marker Legend
legend("bottomleft",
bty="n",
pch=1, cex=0.5,
col=BlueToRedPalette(NbMarkers,1),
title="Marker",
legend=levels(OutputDataI$Marker_Name_x_Channel),
xjust = 0.5, yjust = 0.5
)
## Add Count Title
LegendTop <- legend("top",
inset=c(0,-0.05),
bty="n",
xjust =0.5, yjust = 0.5,
cex=0.5,
col="black",
title="Nb Of Cells",
legend=c(" ", rep(" ",nlevels(OutputDataI$Marker_Name_x_Channel))),
text.width = strwidth("Marker: Left + Right = Total")
)
## Add Count Subtitle
text(LegendTop$rect$left + LegendTop$rect$w/2, LegendTop$text$y,
c("Marker: Left + Right = Total",rep(" ",nlevels(OutputDataI$Marker_Name_x_Channel))),
cex=0.5)
#Add Left Count Title
LegendLeft <- legend("topleft",
inset=c(0,-0.05),
bty="n",
xjust =0, yjust = 0,
cex=0.5,
col="black",
title="Left Side Counts",
text.width = strwidth("Marker: Avg Cell/Section (+/- StDev) ; n Sections")/2,
legend=c(" ", rep(" ",nlevels(OutputDataI$Marker_Name_x_Channel)))
)
#Add Left Count SubTitle
text(LegendLeft$rect$left + LegendLeft$rect$w/2, LegendLeft$text$y,
c("Marker: Avg Cell/Section (+/- StDev) ; n Sections",rep(" ",nlevels(OutputDataI$Marker_Name_x_Channel))),
cex=0.5)
#Add Right Count Title
LegendRight <- legend("topright",
inset=c(0,-0.05),
bty="n",
xjust =0., yjust = 0,
cex=0.5,
col="black",
title="Right Side Counts",
text.width = strwidth("Avg Cell/Section (+/- SD) ; n Sections")/2,
legend=c(" ", rep(" ",nlevels(OutputDataI$Marker_Name_x_Channel)))
)
#Add Right Count SubTitle
text(LegendRight$rect$left + LegendRight$rect$w/2, LegendRight$text$y,
c("Marker: Avg Cell/Section (+/- StDev) ; n Sections",rep(" ",nlevels(OutputDataI$Marker_Name_x_Channel))),
cex=0.5)
# If you want to plot only the markers that are present in this animals then uncomment the next slide to refresh the factor of Marker_Name x Channel
# OutputDataI$Marker_Name_x_Channel<-factor(OutputDataI$Marker_Name_x_Channel)
# Add Counts for each Marker
for(MarkerI in 1:nlevels(OutputDataI$Marker_Name_x_Channel)){
Marker_NameI<-levels(OutputDataI$Marker_Name_x_Channel)[MarkerI]
OutputDataI_MarkerI<-OutputDataI[OutputDataI$Marker_Name_x_Channel==Marker_NameI,]
NbImages<-length(levels(OutputDataI$File_ID))
LeftCountsPerImage_MarkerI<-c()
RightCountsPerImage_MarkerI<-c()
for(ImageI in 1:length(levels(OutputDataI$File_ID))){
Image_IDI<-levels(OutputDataI$File_ID)[ImageI]
DataImageI<-OutputDataI[OutputDataI$File_ID==Image_IDI,]
LeftDataImageI<-DataImageI[DataImageI$X_Scaled<0,]
RightDataImageI<-DataImageI[DataImageI$X_Scaled>=0,]
LeftDataImageI_MarkerI<-LeftDataImageI[LeftDataImageI$Marker_Name_x_Channel==Marker_NameI,]
RightDataImageI_MarkerI<-RightDataImageI[RightDataImageI$Marker_Name_x_Channel==Marker_NameI,]
LeftCountsPerImage_MarkerI<-c(LeftCountsPerImage_MarkerI,dim(LeftDataImageI_MarkerI)[1])
RightCountsPerImage_MarkerI<-c(RightCountsPerImage_MarkerI,dim(RightDataImageI_MarkerI)[1])
}
TotalCountOutputDataI_MarkerI<-dim(OutputDataI_MarkerI)[1]
LeftCountOutputDataI_MarkerI<-dim(OutputDataI_MarkerI[OutputDataI_MarkerI$X_Scaled<0,])[1]
RightCountOutputDataI_MarkerI<-dim(OutputDataI_MarkerI[OutputDataI_MarkerI$X_Scaled>=0,])[1]
# Add the Marker Counts to the Legend
text(LegendTop$rect$left + LegendTop$rect$w/2, LegendTop$text$y,
c(rep(" ",MarkerI),
paste0(Marker_NameI,": " ,LeftCountOutputDataI_MarkerI," + ",RightCountOutputDataI_MarkerI," = ",TotalCountOutputDataI_MarkerI),
rep(" ",(nlevels(OutputDataI$Marker_Name_x_Channel))-MarkerI)
),
cex=0.5, col=BlueToRedPalette(NbMarkers,1)[MarkerI]
)
# Add Left Counts
text(LegendLeft$rect$left + LegendLeft$rect$w/2, LegendLeft$text$y,
c(rep(" ",MarkerI),
paste0(Marker_NameI,": ",signif(mean(LeftCountsPerImage_MarkerI),3)," (+/- ",signif(sd(LeftCountsPerImage_MarkerI),3),") ; n = ",length(LeftCountsPerImage_MarkerI)),
rep(" ",(nlevels(OutputDataI$Marker_Name_x_Channel))-MarkerI)
),
cex=0.5, col=BlueToRedPalette(NbMarkers,1)[MarkerI]
)
# Add Right Counts
text(LegendRight$rect$left + LegendRight$rect$w/2, LegendRight$text$y,
c(rep(" ",MarkerI),
paste0(Marker_NameI,": ",signif(mean(RightCountsPerImage_MarkerI),3)," (+/- ",signif(sd(RightCountsPerImage_MarkerI),3),") ; n = ",length(RightCountsPerImage_MarkerI)),
rep(" ",(nlevels(OutputDataI$Marker_Name_x_Channel))-MarkerI)
),
cex=0.5, col=BlueToRedPalette(NbMarkers,1)[MarkerI]
)
}## End of for Marker I
dev.off() # Close and save the graph
# Plot SCALED coordinates for each SUBJECT
cairo_pdf(file.path(OutputDirPath, "Graphs by Subject", "Scaled", paste0(Subject_IDI,"_Scaled_Graph.pdf"))) # Open the graph as pdf
Xlim=round(max(abs(c(mean(OutputDataI$LE_L_X_Scaled),mean(OutputDataI$LE_R_X_Scaled),max(abs(OutputDataI$X_Scaled))))),2)
Ylim=round(max(abs(c(mean(OutputDataI$DE_L_Y_Scaled),mean(OutputDataI$DE_R_Y_Scaled),mean(OutputDataI$VE_L_Y_Scaled),mean(OutputDataI$VE_R_Y_Scaled), max(abs(OutputDataI$Y_Scaled))))),2)
NbMarkers=nlevels(OutputDataI$Marker_Name_x_Channel)
par(xpd=TRUE)
plot(OutputDataI$X_Scaled, OutputDataI$Y_Scaled,
type="p", bty="n",
pch=1,lwd=0.5, cex=0.5, col=BlueToRedPalette(NbMarkers,1)[OutputDataI$Marker_Name_x_Channel],
xlim=c(-Xlim,Xlim), ylim=c(-Ylim,Ylim),
xaxp=c(-Xlim,Xlim,4), yaxp=c(-Ylim,Ylim,4),
main=Subject_IDI,
xlab="Relative position to CC (Scaled)", ylab="Relative position to CC (Scaled)"
,pannel.first={
XCenter= ( par()$mai[2] + (par()$pin[1])/2)/(par()$din[1])
YCenter= ( par()$mai[1] + (par()$pin[2])/2)/(par()$din[2])
WidthSC= ((par()$pin[1])/2)/(par()$din[1])
HeightSC= ((par()$pin[2])/2)/(par()$din[2])
grid.picture(SpinalCordLayout, x=XCenter, y=YCenter,
width=2*WidthSC+0.1*WidthSC, height=2*HeightSC+0.1*HeightSC
,distort=TRUE)
}
)
points(mean(OutputDataI$CC_X_Scaled),mean(OutputDataI$CC_Y_Scaled), col="black", pch=3, cex=0.5)
points(mean(OutputDataI$DE_R_X_Scaled),mean(OutputDataI$DE_R_Y_Scaled), col="black", pch=3, cex=0.5)
points(mean(OutputDataI$LE_R_X_Scaled),mean(OutputDataI$LE_R_Y_Scaled), col="black", pch=3, cex=0.5)
points(mean(OutputDataI$VE_R_X_Scaled),mean(OutputDataI$VE_R_Y_Scaled), col="black", pch=3, cex=0.5)
points(mean(OutputDataI$DE_L_X_Scaled),mean(OutputDataI$DE_L_Y_Scaled), col="black", pch=3, cex=0.5)
points(mean(OutputDataI$LE_L_X_Scaled),mean(OutputDataI$LE_L_Y_Scaled), col="black", pch=3, cex=0.5)
points(mean(OutputDataI$VE_L_X_Scaled),mean(OutputDataI$VE_L_Y_Scaled), col="black", pch=3, cex=0.5)
## add marker legend
legend("bottomleft",
bty="n",
pch=1, cex=0.5,
col=BlueToRedPalette(NbMarkers,1),
title="Marker",
legend=levels(OutputDataI$Marker_Name_x_Channel),
xjust = 0.5, yjust = 0.5
)
## Add Counts Title
LegendTop <- legend("top",
inset=c(0,-0.05),
bty="n",
xjust =0.5, yjust = 0.5,
cex=0.5,
col="black",
title="Nb Of Cells",
legend=c(" ", rep(" ",nlevels(OutputDataI$Marker_Name_x_Channel))),
text.width = strwidth("Marker: Left + Right = Total")
)
# Add Coutn subtitles
text(LegendTop$rect$left + LegendTop$rect$w/2, LegendTop$text$y,
c("Marker: Left + Right = Total",rep(" ",nlevels(OutputDataI$Marker_Name_x_Channel))),
cex=0.5)
# Add Left Count Title
LegendLeft <- legend("topleft",
inset=c(0,-0.05),
bty="n",
xjust =0, yjust = 0,
cex=0.5,
col="black",
title="Left Side Counts",
text.width = strwidth("Marker: Avg Cell/Section (+/- StDev) ; n Sections")/2,
legend=c(" ", rep(" ",nlevels(OutputDataI$Marker_Name_x_Channel)))
)
# Add left coutns subtitle
text(LegendLeft$rect$left + LegendLeft$rect$w/2, LegendLeft$text$y,
c("Marker: Avg Cell/Section (+/- StDev) ; n Sections",rep(" ",nlevels(OutputDataI$Marker_Name_x_Channel))),
cex=0.5)
# Add right count title
LegendRight <- legend("topright",
inset=c(0,-0.05),
bty="n",
xjust =0., yjust = 0,
cex=0.5,
col="black",
title="Right Side Counts",
text.width = strwidth("Avg Cell/Section (+/- SD) ; n Sections")/2,
legend=c(" ", rep(" ",nlevels(OutputDataI$Marker_Name_x_Channel)))
)
#add right count subtitle
text(LegendRight$rect$left + LegendRight$rect$w/2, LegendRight$text$y,
c("Marker: Avg Cell/Section (+/- StDev) ; n Sections",rep(" ",nlevels(OutputDataI$Marker_Name_x_Channel))),
cex=0.5)
# Add Counts for each Marker
for(MarkerI in 1:nlevels(OutputDataI$Marker_Name_x_Channel)){
Marker_NameI<-levels(OutputDataI$Marker_Name_x_Channel)[MarkerI]
OutputDataI_MarkerI<-OutputDataI[OutputDataI$Marker_Name_x_Channel==Marker_NameI,]
NbImages<-length(levels(OutputDataI$File_ID))
LeftCountsPerImage_MarkerI<-c()
RightCountsPerImage_MarkerI<-c()
for(ImageI in 1:length(levels(OutputDataI$File_ID))){
Image_IDI<-levels(OutputDataI$File_ID)[ImageI]
DataImageI<-OutputDataI[OutputDataI$File_ID==Image_IDI,]
LeftDataImageI<-DataImageI[DataImageI$X_Scaled<0,]
RightDataImageI<-DataImageI[DataImageI$X_Scaled>=0,]
LeftDataImageI_MarkerI<-LeftDataImageI[LeftDataImageI$Marker_Name_x_Channel==Marker_NameI,]
RightDataImageI_MarkerI<-RightDataImageI[RightDataImageI$Marker_Name_x_Channel==Marker_NameI,]
LeftCountsPerImage_MarkerI<-c(LeftCountsPerImage_MarkerI,dim(LeftDataImageI_MarkerI)[1])
RightCountsPerImage_MarkerI<-c(RightCountsPerImage_MarkerI,dim(RightDataImageI_MarkerI)[1])
}
TotalCountOutputDataI_MarkerI<-dim(OutputDataI_MarkerI)[1]
LeftCountOutputDataI_MarkerI<-dim(OutputDataI_MarkerI[OutputDataI_MarkerI$X_Scaled<0,])[1]
RightCountOutputDataI_MarkerI<-dim(OutputDataI_MarkerI[OutputDataI_MarkerI$X_Scaled>=0,])[1]
# Add the Marker counts to the Legend
text(LegendTop$rect$left + LegendTop$rect$w/2, LegendTop$text$y,
c(rep(" ",MarkerI),
paste0(Marker_NameI,": " ,LeftCountOutputDataI_MarkerI," + ",RightCountOutputDataI_MarkerI," = ",TotalCountOutputDataI_MarkerI),
rep(" ",(nlevels(OutputDataI$Marker_Name_x_Channel))-MarkerI)
),
cex=0.5, col=BlueToRedPalette(NbMarkers,1)[MarkerI]
)
#Add left counts
text(LegendLeft$rect$left + LegendLeft$rect$w/2, LegendLeft$text$y,
c(rep(" ",MarkerI),
paste0(Marker_NameI,": ",signif(mean(LeftCountsPerImage_MarkerI),3)," (+/- ",signif(sd(LeftCountsPerImage_MarkerI),3),") ; n = ",length(LeftCountsPerImage_MarkerI)),
rep(" ",(nlevels(OutputDataI$Marker_Name_x_Channel))-MarkerI)
),
cex=0.5, col=BlueToRedPalette(NbMarkers,1)[MarkerI]
)
## Add Right Counts
text(LegendRight$rect$left + LegendRight$rect$w/2, LegendRight$text$y,
c(rep(" ",MarkerI),
paste0(Marker_NameI,": ",signif(mean(RightCountsPerImage_MarkerI),3)," (+/- ",signif(sd(RightCountsPerImage_MarkerI),3),") ; n = ",length(RightCountsPerImage_MarkerI)),
rep(" ",(nlevels(OutputDataI$Marker_Name_x_Channel))-MarkerI)
),
cex=0.5, col=BlueToRedPalette(NbMarkers,1)[MarkerI]
)
}## End of for Marker I
#Add 0 0 lines
par(xpd=FALSE)
abline(v=0, col="grey", lwd=0.5)
abline(h=0, col="grey", lwd=0.5)
par(xpd=TRUE)
dev.off() # Close and save the graph
} # End of for SubjectI
# Calculate the Density and Plot Density per Subject ----------------------------------------------------
OutputData$Subject_ID<-factor(OutputData$Subject_ID)
for (SubjectI in 1:nlevels(OutputData$Subject_ID)){
Subject_IDI<-levels(OutputData$Subject_ID)[SubjectI]
OutputDataI<-OutputData[OutputData$Subject_ID==Subject_IDI,] # Get the Data of a given Subject
# Refresh factors
OutputDataI$File_ID<-factor(OutputDataI$File_ID)
if(SubjectI==1){
dir.create(file.path(OutputDirPath, "Graphs by Subject","Contours"))
dir.create(file.path(OutputDirPath, "Graphs by Subject","Filled Density"))
dir.create(file.path(OutputDirPath, "Tables by Subject","Density Raw"))
dir.create(file.path(OutputDirPath, "Tables by Subject","Density Normalized"))
dir.create(file.path(OutputDirPath, "Tables by Subject","Density Weighted"))
}
# Plot SCALED coordinates for each SUBJECT and ADD THE CONTOURS
cairo_pdf(file.path(OutputDirPath, "Graphs by Subject", "Contours", paste0(Subject_IDI,"_Contours_Normalized_Graph.pdf"))) # Open the graph as pdf
par(xpd=TRUE)
Xlim=round(max(abs(c(mean(OutputDataI$LE_L_X_Scaled),mean(OutputDataI$LE_R_X_Scaled),max(abs(OutputDataI$X_Scaled))))),2)
Ylim=round(max(abs(c(mean(OutputDataI$DE_L_Y_Scaled),mean(OutputDataI$DE_R_Y_Scaled),mean(OutputDataI$VE_L_Y_Scaled),mean(OutputDataI$VE_R_Y_Scaled), max(abs(OutputDataI$Y_Scaled))))),2)
NbMarkers=nlevels(OutputDataI$Marker_Name_x_Channel)
plot(OutputDataI$X_Scaled, OutputDataI$Y_Scaled,
type="p", bty="n",
pch=1,lwd=0.5, cex=0.5, col=BlueToRedPalette(NbMarkers,1)[OutputDataI$Marker_Name_x_Channel],
xlim=c(-Xlim,Xlim), ylim=c(-Ylim,Ylim),
xaxp=c(-Xlim,Xlim,4), yaxp=c(-Ylim,Ylim,4),
main=Subject_IDI,
xlab="Relative position to CC (Scaled)", ylab="Relative position to CC (Scaled)"
,pannel.first={
XCenter= ( par()$mai[2] + (par()$pin[1])/2)/(par()$din[1])
YCenter= ( par()$mai[1] + (par()$pin[2])/2)/(par()$din[2])
WidthSC= ((par()$pin[1])/2)/(par()$din[1])
HeightSC= ((par()$pin[2])/2)/(par()$din[2])
grid.picture(SpinalCordLayout, x=XCenter, y=YCenter,
width=2*WidthSC+0.1*WidthSC, height=2*HeightSC+0.1*HeightSC
,distort=TRUE)
}
)
points(mean(OutputDataI$CC_X_Scaled),mean(OutputDataI$CC_Y_Scaled), col="black", pch=3, cex=0.5)
points(mean(OutputDataI$DE_R_X_Scaled),mean(OutputDataI$DE_R_Y_Scaled), col="black", pch=3, cex=0.5)
points(mean(OutputDataI$LE_R_X_Scaled),mean(OutputDataI$LE_R_Y_Scaled), col="black", pch=3, cex=0.5)
points(mean(OutputDataI$VE_R_X_Scaled),mean(OutputDataI$VE_R_Y_Scaled), col="black", pch=3, cex=0.5)
points(mean(OutputDataI$DE_L_X_Scaled),mean(OutputDataI$DE_L_Y_Scaled), col="black", pch=3, cex=0.5)
points(mean(OutputDataI$LE_L_X_Scaled),mean(OutputDataI$LE_L_Y_Scaled), col="black", pch=3, cex=0.5)
points(mean(OutputDataI$VE_L_X_Scaled),mean(OutputDataI$VE_L_Y_Scaled), col="black", pch=3, cex=0.5)
# Add Marker type legend
legend("bottomleft",
bty="n",
pch=1, cex=0.5,
col=BlueToRedPalette(NbMarkers,1),
title="Marker",
legend=levels(OutputDataI$Marker_Name_x_Channel),
xjust = 0.5, yjust = 0.5
)
# Add contour legend
legend("bottomright",
bty="n",
lty=1, cex=0.5, lwd=0.5,
col=BlueToRedPalette(NbMarkers,1),
title="Density",
legend=levels(OutputDataI$Marker_Name_x_Channel),
xjust = 0.5, yjust = 0.5
)
# Add Counts Title
LegendTop <- legend("top",
inset=c(0,-0.05),
bty="n",
xjust =0.5, yjust = 0.5,
cex=0.5,
col="black",
title="Nb Of Cells",
legend=c(" ", rep(" ",nlevels(OutputDataI$Marker_Name_x_Channel))),
text.width = strwidth("Marker: Left + Right = Total")
)
# Add Counts SubTitle
text(LegendTop$rect$left + LegendTop$rect$w/2, LegendTop$text$y,
c("Marker: Left + Right = Total",rep(" ",nlevels(OutputDataI$Marker_Name_x_Channel))),
cex=0.5)
# Add Left Count Title
LegendLeft <- legend("topleft",
inset=c(0,-0.05),
bty="n",
xjust =0, yjust = 0,
cex=0.5,
col="black",
title="Left Side Counts",
text.width = strwidth("Marker: Avg Cell/Section (+/- StDev) ; n Sections")/2,
legend=c(" ", rep(" ",nlevels(OutputDataI$Marker_Name_x_Channel)))
)
# Add Left Count SubTitle
text(LegendLeft$rect$left + LegendLeft$rect$w/2, LegendLeft$text$y,
c("Marker: Avg Cell/Section (+/- StDev) ; n Sections",rep(" ",nlevels(OutputDataI$Marker_Name_x_Channel))),
cex=0.5)
# Add Right Count Title
LegendRight <- legend("topright",
inset=c(0,-0.05),
bty="n",
xjust =0., yjust = 0,
cex=0.5,
col="black",
title="Right Side Counts",
text.width = strwidth("Avg Cell/Section (+/- SD) ; n Sections")/2,
legend=c(" ", rep(" ",nlevels(OutputDataI$Marker_Name_x_Channel)))
)
# Add Right Count subTitle
text(LegendRight$rect$left + LegendRight$rect$w/2, LegendRight$text$y,
c("Marker: Avg Cell/Section (+/- StDev) ; n Sections",rep(" ",nlevels(OutputDataI$Marker_Name_x_Channel))),
cex=0.5)
# Add Data for each Marker Counts and Contours
for(MarkerI in 1:nlevels(OutputDataI$Marker_Name_x_Channel)){
Marker_NameI<-levels(OutputDataI$Marker_Name_x_Channel)[MarkerI]
OutputDataI_MarkerI<-OutputDataI[OutputDataI$Marker_Name_x_Channel==Marker_NameI,]
NbImages<-length(levels(OutputDataI$File_ID))
LeftCountsPerImage_MarkerI<-c()
RightCountsPerImage_MarkerI<-c()
for(ImageI in 1:length(levels(OutputDataI$File_ID))){
Image_IDI<-levels(OutputDataI$File_ID)[ImageI]
DataImageI<-OutputDataI[OutputDataI$File_ID==Image_IDI,]
LeftDataImageI<-DataImageI[DataImageI$X_Scaled<0,]
RightDataImageI<-DataImageI[DataImageI$X_Scaled>=0,]
LeftDataImageI_MarkerI<-LeftDataImageI[LeftDataImageI$Marker_Name_x_Channel==Marker_NameI,]
RightDataImageI_MarkerI<-RightDataImageI[RightDataImageI$Marker_Name_x_Channel==Marker_NameI,]
LeftCountsPerImage_MarkerI<-c(LeftCountsPerImage_MarkerI,dim(LeftDataImageI_MarkerI)[1])
RightCountsPerImage_MarkerI<-c(RightCountsPerImage_MarkerI,dim(RightDataImageI_MarkerI)[1])
}
TotalCountOutputDataI_MarkerI<-dim(OutputDataI_MarkerI)[1]
LeftCountOutputDataI_MarkerI<-dim(OutputDataI_MarkerI[OutputDataI_MarkerI$X_Scaled<0,])[1]
RightCountOutputDataI_MarkerI<-dim(OutputDataI_MarkerI[OutputDataI_MarkerI$X_Scaled>=0,])[1]
# Add the Marker Count to the Legend
text(LegendTop$rect$left + LegendTop$rect$w/2, LegendTop$text$y,
c(rep(" ",MarkerI),
paste0(Marker_NameI,": " ,LeftCountOutputDataI_MarkerI," + ",RightCountOutputDataI_MarkerI," = ",TotalCountOutputDataI_MarkerI),
rep(" ",(nlevels(OutputDataI$Marker_Name_x_Channel))-MarkerI)
),
cex=0.5, col=BlueToRedPalette(NbMarkers,1)[MarkerI]
)
## Add the Left Counts
text(LegendLeft$rect$left + LegendLeft$rect$w/2, LegendLeft$text$y,
c(rep(" ",MarkerI),
paste0(Marker_NameI,": ",signif(mean(LeftCountsPerImage_MarkerI),3)," (+/- ",signif(sd(LeftCountsPerImage_MarkerI),3),") ; n = ",length(LeftCountsPerImage_MarkerI)),
rep(" ",(nlevels(OutputDataI$Marker_Name_x_Channel))-MarkerI)
),
cex=0.5, col=BlueToRedPalette(NbMarkers,1)[MarkerI]
)
## Add the Right Counts
text(LegendRight$rect$left + LegendRight$rect$w/2, LegendRight$text$y,
c(rep(" ",MarkerI),
paste0(Marker_NameI,": ",signif(mean(RightCountsPerImage_MarkerI),3)," (+/- ",signif(sd(RightCountsPerImage_MarkerI),3),") ; n = ",length(RightCountsPerImage_MarkerI)),
rep(" ",(nlevels(OutputDataI$Marker_Name_x_Channel))-MarkerI)
),
cex=0.5, col=BlueToRedPalette(NbMarkers,1)[MarkerI]
)
### ADD THE CONTOURS
OutputDataI_MarkerI_Left<-OutputDataI_MarkerI[OutputDataI_MarkerI$X_Scaled<0,]
OutputDataI_MarkerI_Right<-OutputDataI_MarkerI[OutputDataI_MarkerI$X_Scaled>=0,]
### The Left
if(dim(OutputDataI_MarkerI_Left)[1]>2){
Density_OutputDataI_MarkerI_Left<-kde2d(OutputDataI_MarkerI_Left$X_Scaled, OutputDataI_MarkerI_Left$Y_Scaled,
n=100, lims=c(-1.5,1.5,-1.5,1.5))
Normalized_Density_OutputDataI_MarkerI_Left<-Density_OutputDataI_MarkerI_Left
Normalized_Density_OutputDataI_MarkerI_Left$z<- ((Density_OutputDataI_MarkerI_Left$z-min(Density_OutputDataI_MarkerI_Left$z))/(max(Density_OutputDataI_MarkerI_Left$z)-min(Density_OutputDataI_MarkerI_Left$z)))
Weighted_Density_OutputDataI_MarkerI_Left<-Normalized_Density_OutputDataI_MarkerI_Left
Weighted_Density_OutputDataI_MarkerI_Left$z<- dim(OutputDataI_MarkerI_Left)[1] * Normalized_Density_OutputDataI_MarkerI_Left$z
write.table(Density_OutputDataI_MarkerI_Left, file=file.path(OutputDirPath, "Tables by Subject", "Density Raw",paste0(Subject_IDI,"_",Marker_NameI,"_Left.txt")), row.names=FALSE, sep = "\t")
write.table(Normalized_Density_OutputDataI_MarkerI_Left, file=file.path(OutputDirPath, "Tables by Subject", "Density Normalized",paste0(Subject_IDI,"_",Marker_NameI,"_Left.txt")), row.names=FALSE, sep = "\t")
write.table(Weighted_Density_OutputDataI_MarkerI_Left, file=file.path(OutputDirPath, "Tables by Subject", "Density Weighted",paste0(Subject_IDI,"_",Marker_NameI,"_Left.txt")), row.names=FALSE, sep = "\t")
contour(Normalized_Density_OutputDataI_MarkerI_Left,
add = TRUE, drawlabels = FALSE,
lty=1, lwd=0.5,
col=BlueToRedPalette(NbMarkers,1)[MarkerI],
zlim = c(0,1), nlevels = 10) # add the contours
} # end of if
### The Right
if(dim(OutputDataI_MarkerI_Right)[1]>2){
Density_OutputDataI_MarkerI_Right<-kde2d(OutputDataI_MarkerI_Right$X_Scaled, OutputDataI_MarkerI_Right$Y_Scaled,
n=100, lims=c(-1.5,1.5,-1.5,1.5))
Normalized_Density_OutputDataI_MarkerI_Right<-Density_OutputDataI_MarkerI_Right
Normalized_Density_OutputDataI_MarkerI_Right$z<- ((Density_OutputDataI_MarkerI_Right$z-min(Density_OutputDataI_MarkerI_Right$z))/(max(Density_OutputDataI_MarkerI_Right$z)-min(Density_OutputDataI_MarkerI_Right$z)))
Weighted_Density_OutputDataI_MarkerI_Right<- Normalized_Density_OutputDataI_MarkerI_Right
Weighted_Density_OutputDataI_MarkerI_Right$z<- dim(OutputDataI_MarkerI_Right)[1] * Weighted_Density_OutputDataI_MarkerI_Right$z
write.table(Density_OutputDataI_MarkerI_Right, file=file.path(OutputDirPath, "Tables by Subject", "Density Raw",paste0(Subject_IDI,"_",Marker_NameI,"_Right.txt")), row.names=FALSE, sep = "\t")
write.table(Normalized_Density_OutputDataI_MarkerI_Right, file=file.path(OutputDirPath, "Tables by Subject", "Density Normalized",paste0(Subject_IDI,"_",Marker_NameI,"_Right.txt")), row.names=FALSE, sep = "\t")
write.table(Weighted_Density_OutputDataI_MarkerI_Right, file=file.path(OutputDirPath, "Tables by Subject", "Density Weighted",paste0(Subject_IDI,"_",Marker_NameI,"_Right.txt")), row.names=FALSE, sep = "\t")
contour(Normalized_Density_OutputDataI_MarkerI_Right,
add = TRUE, drawlabels = FALSE,
lty=1, lwd=0.5,
col=BlueToRedPalette(NbMarkers,1)[MarkerI],
zlim = c(0,1), nlevels = 10) # add the contours
} # end of if
} # end of for MarkerI
# Add 0 0 lines
par(xpd=FALSE)
abline(v=0, col="grey", lwd=0.5)
abline(h=0, col="grey", lwd=0.5)
par(xpd=TRUE)
dev.off() # Close and save the graph
## Plot Filled Density
for(MarkerI in 1:nlevels(OutputDataI$Marker_Name_x_Channel)){
Marker_NameI<-levels(OutputDataI$Marker_Name_x_Channel)[MarkerI]
OutputDataI_MarkerI<-OutputDataI[OutputDataI$Marker_Name_x_Channel==Marker_NameI,]
NbImages<-length(levels(OutputDataI$File_ID))
LeftCountsPerImage_MarkerI<-c()
RightCountsPerImage_MarkerI<-c()
for(ImageI in 1:length(levels(OutputDataI$File_ID))){
Image_IDI<-levels(OutputDataI$File_ID)[ImageI]
DataImageI<-OutputDataI[OutputDataI$File_ID==Image_IDI,]
LeftDataImageI<-DataImageI[DataImageI$X_Scaled<0,]
RightDataImageI<-DataImageI[DataImageI$X_Scaled>=0,]
LeftDataImageI_MarkerI<-LeftDataImageI[LeftDataImageI$Marker_Name_x_Channel==Marker_NameI,]
RightDataImageI_MarkerI<-RightDataImageI[RightDataImageI$Marker_Name_x_Channel==Marker_NameI,]
LeftCountsPerImage_MarkerI<-c(LeftCountsPerImage_MarkerI,dim(LeftDataImageI_MarkerI)[1])
RightCountsPerImage_MarkerI<-c(RightCountsPerImage_MarkerI,dim(RightDataImageI_MarkerI)[1])
}
TotalCountOutputDataI_MarkerI<-dim(OutputDataI_MarkerI)[1]
LeftCountOutputDataI_MarkerI<-dim(OutputDataI_MarkerI[OutputDataI_MarkerI$X_Scaled<0,])[1]
RightCountOutputDataI_MarkerI<-dim(OutputDataI_MarkerI[OutputDataI_MarkerI$X_Scaled>=0,])[1]
OutputDataI_MarkerI_Left<-OutputDataI_MarkerI[OutputDataI_MarkerI$X_Scaled<0,]
OutputDataI_MarkerI_Right<-OutputDataI_MarkerI[OutputDataI_MarkerI$X_Scaled>=0,]
cairo_pdf(file.path(OutputDirPath, "Graphs by Subject", "Filled Density", paste0(Subject_IDI,"_",Marker_NameI,"_Filled_Density_Graph.pdf"))) # Open the graph as pdf
par(xpd=TRUE)
Xlim=round(max(abs(c(mean(OutputDataI$LE_L_X_Scaled),mean(OutputDataI$LE_R_X_Scaled),max(abs(OutputDataI$X_Scaled))))),2)
Ylim=round(max(abs(c(mean(OutputDataI$DE_L_Y_Scaled),mean(OutputDataI$DE_R_Y_Scaled),mean(OutputDataI$VE_L_Y_Scaled),mean(OutputDataI$VE_R_Y_Scaled), max(abs(OutputDataI$Y_Scaled))))),2)
## Plot the background
plot(OutputDataI_MarkerI$X_Scaled, OutputDataI_MarkerI$Y_Scaled,
type="n", bty="n",
pch=1,lwd=0.5, cex=0.5, col="black",
xlim=c(-Xlim,Xlim), ylim=c(-Ylim,Ylim),
xaxp=c(-Xlim,Xlim,4), yaxp=c(-Ylim,Ylim,4),
main=paste0(Subject_IDI,"_",Marker_NameI),
xlab="Relative position to CC (Scaled)", ylab="Relative position to CC (Scaled)"
,pannel.first={
XCenter= ( par()$mai[2] + (par()$pin[1])/2)/(par()$din[1])
YCenter= ( par()$mai[1] + (par()$pin[2])/2)/(par()$din[2])
WidthSC= ((par()$pin[1])/2)/(par()$din[1])
HeightSC= ((par()$pin[2])/2)/(par()$din[2])
grid.picture(SpinalCordLayout, x=XCenter, y=YCenter,
width=2*WidthSC+0.1*WidthSC, height=2*HeightSC+0.1*HeightSC
,distort=TRUE)
}
)
###Add the Density for each Marker
if(dim(OutputDataI_MarkerI_Left)[1]>2){
Density_OutputDataI_MarkerI_Left<-kde2d(OutputDataI_MarkerI_Left$X_Scaled, OutputDataI_MarkerI_Left$Y_Scaled,
n=100, lims=c(-1.5,1.5,-1.5,1.5))
Normalized_Density_OutputDataI_MarkerI_Left<-Density_OutputDataI_MarkerI_Left
Normalized_Density_OutputDataI_MarkerI_Left$z<- ((Density_OutputDataI_MarkerI_Left$z-min(Density_OutputDataI_MarkerI_Left$z))/(max(Density_OutputDataI_MarkerI_Left$z)-min(Density_OutputDataI_MarkerI_Left$z)))
Weighted_Density_OutputDataI_MarkerI_Left<-Normalized_Density_OutputDataI_MarkerI_Left
Weighted_Density_OutputDataI_MarkerI_Left$z<- mean(LeftCountsPerImage_MarkerI) * Normalized_Density_OutputDataI_MarkerI_Left$z
}
if(dim(OutputDataI_MarkerI_Right)[1]>2){
Density_OutputDataI_MarkerI_Right<-kde2d(OutputDataI_MarkerI_Right$X_Scaled, OutputDataI_MarkerI_Right$Y_Scaled,
n=100, lims=c(-1.5,1.5,-1.5,1.5))
Normalized_Density_OutputDataI_MarkerI_Right<-Density_OutputDataI_MarkerI_Right
Normalized_Density_OutputDataI_MarkerI_Right$z<- ((Density_OutputDataI_MarkerI_Right$z-min(Density_OutputDataI_MarkerI_Right$z))/(max(Density_OutputDataI_MarkerI_Right$z)-min(Density_OutputDataI_MarkerI_Right$z)))
Weighted_Density_OutputDataI_MarkerI_Right<-Normalized_Density_OutputDataI_MarkerI_Right
Weighted_Density_OutputDataI_MarkerI_Right$z<- mean(RightCountsPerImage_MarkerI) * Normalized_Density_OutputDataI_MarkerI_Right$z
}
if(dim(OutputDataI_MarkerI_Left)[1]>2 && dim(OutputDataI_MarkerI_Right)[1]>2){
ColorRangeMax<-max(c(Weighted_Density_OutputDataI_MarkerI_Left$z,Weighted_Density_OutputDataI_MarkerI_Right$z))
.filled.contour(Weighted_Density_OutputDataI_MarkerI_Left$x,
Weighted_Density_OutputDataI_MarkerI_Left$y,
Weighted_Density_OutputDataI_MarkerI_Left$z,
levels = seq(from=0, to=ColorRangeMax, by=ColorRangeMax/21), col=c(rgb(red=1,green=1,blue=1,alpha=0), BlueToRedPalette(20,0.5))
)
.filled.contour(Weighted_Density_OutputDataI_MarkerI_Right$x,
Weighted_Density_OutputDataI_MarkerI_Right$y,
Weighted_Density_OutputDataI_MarkerI_Right$z,
levels = seq(from=0, to=ColorRangeMax, by=ColorRangeMax/21), col=c(rgb(red=1,green=1,blue=1,alpha=0), BlueToRedPalette(20,0.5))
)
} else if(dim(OutputDataI_MarkerI_Left)[1]>2 && dim(OutputDataI_MarkerI_Right)[1]<=2){
ColorRangeMax<-max(c(Weighted_Density_OutputDataI_MarkerI_Left$z))
.filled.contour(Weighted_Density_OutputDataI_MarkerI_Left$x,
Weighted_Density_OutputDataI_MarkerI_Left$y,
Weighted_Density_OutputDataI_MarkerI_Left$z,
levels = seq(from=0, to=ColorRangeMax, by=ColorRangeMax/21), col=c(rgb(red=1,green=1,blue=1,alpha=0), BlueToRedPalette(20,0.5))
)
} else if(dim(OutputDataI_MarkerI_Left)[1]<=2 && dim(OutputDataI_MarkerI_Right)[1]>2){
ColorRangeMax<-max(c(Weighted_Density_OutputDataI_MarkerI_Right$z))
.filled.contour(Weighted_Density_OutputDataI_MarkerI_Right$x,
Weighted_Density_OutputDataI_MarkerI_Right$y,
Weighted_Density_OutputDataI_MarkerI_Right$z,
levels = seq(from=0, to=ColorRangeMax, by=ColorRangeMax/21), col=c(rgb(red=1,green=1,blue=1,alpha=0), BlueToRedPalette(20,0.5))
)
}
points(mean(OutputDataI$CC_X_Scaled),mean(OutputDataI$CC_Y_Scaled), col="black", pch=3, cex=0.5)
points(mean(OutputDataI$DE_R_X_Scaled),mean(OutputDataI$DE_R_Y_Scaled), col="black", pch=3, cex=0.5)
points(mean(OutputDataI$LE_R_X_Scaled),mean(OutputDataI$LE_R_Y_Scaled), col="black", pch=3, cex=0.5)
points(mean(OutputDataI$VE_R_X_Scaled),mean(OutputDataI$VE_R_Y_Scaled), col="black", pch=3, cex=0.5)
points(mean(OutputDataI$DE_L_X_Scaled),mean(OutputDataI$DE_L_Y_Scaled), col="black", pch=3, cex=0.5)
points(mean(OutputDataI$LE_L_X_Scaled),mean(OutputDataI$LE_L_Y_Scaled), col="black", pch=3, cex=0.5)
points(mean(OutputDataI$VE_L_X_Scaled),mean(OutputDataI$VE_L_Y_Scaled), col="black", pch=3, cex=0.5)
# add legend title Left
LegendLeft <- legend("topleft", legend = c(" "),inset=c(-0,-0.075),
xjust =0, yjust = 0,
title = "Avg Cell/Section (+/- SD) ; n Sections", cex=0.7, bty="n");
# add legend Count Left
text(LegendLeft$rect$left + LegendLeft$rect$w/2, LegendLeft$text$y,
c( paste0(signif(mean(LeftCountsPerImage_MarkerI),3)," (+/- ",signif(sd(LeftCountsPerImage_MarkerI),3),") ; n = ",length(LeftCountsPerImage_MarkerI))),
cex=0.7);
# add legend title Right
LegendRight <- legend("topright", legend = c(" "),inset=c(-0,-0.075),
xjust =0, yjust = 0,
title = "Avg Cell/Section (+/- SD) ; n Sections", cex=0.7, bty="n");
# add legend Count right
text(LegendRight$rect$left + LegendRight$rect$w/2, LegendRight$text$y,
c( paste0(signif(mean(RightCountsPerImage_MarkerI),3)," (+/- ",signif(sd(RightCountsPerImage_MarkerI),3),") ; n = ",length(RightCountsPerImage_MarkerI))),
cex=0.7);
# add Count Title
LegendBottom <- legend("bottom", legend = c(" ", " "),
text.width = strwidth("Left + Right = Total"),
xjust = 0.5, yjust = 0.5,
title = "Nb of Cells", cex=0.7, bty="n");
# add Count subTitle
text(LegendBottom$rect$left + LegendBottom$rect$w/2, LegendBottom$text$y,
c("Left + Right = Total",""), cex=0.7);
# add Count
text(LegendBottom$rect$left + LegendBottom$rect$w/2, LegendBottom$text$y,
c("",paste0(LeftCountOutputDataI_MarkerI," + ",RightCountOutputDataI_MarkerI," = ",TotalCountOutputDataI_MarkerI)),
cex=0.7);
# add marker legend
legend("bottomleft", title="Marker",
legend=Marker_NameI,bty="n",
col="black",
pch=1, cex=0.5)
# add 0 0 axis
par(xpd=FALSE)
abline(v=0, lty=1, lwd=0.5, col="grey");
abline(h=0, lty=1, lwd=0.5, col="grey");
par(xpd=TRUE)
# add data points
points(OutputDataI_MarkerI$X_Scaled, OutputDataI_MarkerI$Y_Scaled,
pch=1,lwd=0.5, cex=0.5,
col="black")
dev.off()
}## End of for Marker I
} ## End of for SubjectI
# Summarize Data Per Subject ----------------------------------------------
Subjects<-data.frame(Subject_ID=levels(OutputData$Subject_ID))
Markers<-data.frame(Marker_Name=levels(OutputData$Marker_Name_x_Channel))
Sides<-data.frame(Side=c("Left","Right"))
SummaryData<-merge(Subjects, Markers)
SummaryData<-merge(SummaryData, Sides)
for (RowI in 1:length(SummaryData$Subject_ID)){
Subject_IDI<-SummaryData$Subject_ID[RowI]
SummaryData$Group[RowI]<-unique(OutputData$Group[OutputData$Subject_ID==Subject_IDI])
}
for (RowI in 1:length(SummaryData$Subject_ID)){
Subject_IDI<-SummaryData$Subject_ID[RowI]
Marker_NameI<-SummaryData$Marker_Name[RowI]
SideI<-SummaryData$Side[RowI]
OutputData_SubjectI<-OutputData[OutputData$Subject_ID==Subject_IDI ,] # Get the Data of a given Subject
OutputData_SubjectI$File_ID<-factor(OutputData_SubjectI$File_ID)
Nb_Section<-length(levels(OutputData_SubjectI$File_ID))
SummaryData$Nb_Section[RowI]<-Nb_Section
OutputData_SubjectI_MarkerI<-OutputData_SubjectI[OutputData_SubjectI$Marker_Name_x_Channel==Marker_NameI ,] # Get the Data of a given Subject
if(SideI=="Left"){
OutputData_SubjectI_MarkerI_SideI<-OutputData_SubjectI_MarkerI[OutputData_SubjectI_MarkerI$X_Scaled<0,]
} else {
OutputData_SubjectI_MarkerI_SideI <-OutputData_SubjectI_MarkerI[OutputData_SubjectI_MarkerI$X_Scaled>=0,]
}
CountsPerImage_SubjectI_MarkerI_SideI<-c()
for(FileI in 1:length(levels(OutputData_SubjectI_MarkerI_SideI$File_ID))){
File_IDI<-levels(OutputData_SubjectI_MarkerI$File_ID)[FileI]
Data_SectionI<-OutputData_SubjectI_MarkerI_SideI[OutputData_SubjectI_MarkerI_SideI$File_ID==File_IDI,]
CountsPerImage_SubjectI_MarkerI_SideI<-c(CountsPerImage_SubjectI_MarkerI_SideI,dim(Data_SectionI)[1])
}
SummaryData$Nb_Cell_Total[RowI]<- mean(CountsPerImage_SubjectI_MarkerI_SideI)
SummaryData$Nb_Cell_Section_Avg[RowI]<- mean(CountsPerImage_SubjectI_MarkerI_SideI)
SummaryData$Nb_Cell_Section_Sd[RowI]<- sd(CountsPerImage_SubjectI_MarkerI_SideI)
SummaryData$Center_X[RowI] <- mean(OutputData_SubjectI_MarkerI_SideI$X_Scaled)
SummaryData$Center_Y[RowI] <- mean(OutputData_SubjectI_MarkerI_SideI$Y_Scaled)
SummaryData$Spread_X[RowI] <- sd(OutputData_SubjectI_MarkerI_SideI$X_Scaled)
SummaryData$Spread_Y[RowI] <- sd(OutputData_SubjectI_MarkerI_SideI$X_Scaled)
} ## End of Rowi
write.table(SummaryData, file=file.path(OutputDirPath, paste0("Summary_Data.txt")), row.names=FALSE, sep = "\t")
|
0014a1ef2bd70e4361b6ac9557b13008be36fa5a
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/EcoGenetics/examples/eco.bearing.Rd.R
|
3a47dacec1af6cbf3c47e6e6e18aab8dff6dc272
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 418
|
r
|
eco.bearing.Rd.R
|
library(EcoGenetics)
### Name: eco.bearing
### Title: Angular Spatial Weights
### Aliases: eco.bearing
### ** Examples
## Not run:
##D
##D data(eco3)
##D
##D "circle" method
##D
##D con <- eco.weight(eco3[["XY"]], method = "circle", d1 = 0, d2 = 500)
##D bearing_con <- eco.bearing(con, 90)
##D
##D W_list <- eco.lagweight(eco[["XY"]])
##D bearing_W_list <- eco.bearing(W_list, 90)
##D
## End(Not run)
|
387971ee04e851ca151a960301a50bf034b58eca
|
aa38f279e1592851d8f1ce66bc0f9a6fa6484dba
|
/R/function.R
|
702c48412bf3797e067b48b1a2e1e35c7526e8d8
|
[] |
no_license
|
asancpt/edison-pk1c
|
17833b7baace6331f4204aa63fcb27acbbc89168
|
864f09908954fcfb361d0b048860ad30fdd766fb
|
refs/heads/master
| 2021-07-07T04:49:12.571278
| 2018-01-18T03:30:58
| 2018-01-18T03:30:58
| 96,659,387
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,441
|
r
|
function.R
|
Get_os <- function(){
sysinf <- Sys.info()
if (!is.null(sysinf)){
os <- sysinf['sysname']
if (os == 'Darwin')
os <- "osx"
} else { ## mystery machine
os <- .Platform$OS.type
if (grepl("^darwin", R.version$os))
os <- "osx"
if (grepl("linux-gnu", R.version$os))
os <- "linux"
}
tolower(os)
}
MakeFullCov = function(BaseCov, OccsCov)
{
nRow = nrow(BaseCov)
if (nRow != length(OccsCov)) stop("Make the length of OccsCov same to dim of BaseCov")
DxOccs = diag(OccsCov)
DxOccs[DxOccs==0] = OccsCov*OccsCov
return(rbind(cbind(BaseCov, DxOccs), cbind(DxOccs, BaseCov)))
}
pk1cima = function(CL, V, DosingHistory, Time, PropE=0, AddE=0, LLoQ=0, Jitter=0)
{
nObs = length(Time)
Conc = rep(0, nObs)
ke = CL/V
if (Jitter > 0) Time = round(jitter(Time), Jitter)
Time[Time < 0] = 0
DH = DosingHistory[DosingHistory[,2] > 0,,drop=FALSE]
nAmt = nrow(DH)
for (i in 1:nAmt) {
cAmt = DH[i, 2]
dTime = Time - DH[i, 1]
dTime2 = dTime[dTime >= 0]
Conc = Conc + c(rep(0, length(dTime) - length(dTime2)), cAmt/V*exp(-ke*dTime2))
}
Err1 = rnorm(nObs, mean=0, sd=PropE)
Err2 = rnorm(nObs, mean=0, sd=AddE)
Conc = Conc + Conc*Err1 + Err2
Conc[Conc < LLoQ] = 0
return(cbind(Time, Conc))
}
pk1coma = function(CLo, Vo, Ka, DosingHistory, Time, PropE=0, AddE=0, LLoQ=0, Jitter=0)
{ # CLo=CL/F, Vo=V/F
nObs = length(Time)
Conc = rep(0, nObs)
ke = CLo/Vo
if (Jitter > 0) Time = round(jitter(Time), Jitter)
Time[Time < 0] = 0
T1 = Ka/(Ka - ke)/Vo
DH = DosingHistory[DosingHistory[,2] > 0,,drop=FALSE]
nAmt = nrow(DH)
for (i in 1:nAmt) {
dTime = Time - DH[i, 1]
dTime2 = dTime[dTime >= 0]
Conc = Conc + c(rep(0, length(dTime) - length(dTime2)), DH[i,2]*T1*(exp(-ke*dTime2) - exp(-Ka*dTime2)))
}
Err1 = rnorm(nObs, mean=0, sd=PropE)
Err2 = rnorm(nObs, mean=0, sd=AddE)
Conc = Conc + Conc*Err1 + Err2
Conc[Conc < LLoQ] = 0
return(cbind(Time, Conc))
}
#nSubj <- 10
#CL<- 30
#V <- 100
#Ka <- 2
#BioA <- 1
##$DH1 <- matrix(c(0, 100000), nrow=1, ncol=2, byrow=TRUE)
#DH1 <- '0, 100000'
#Time <- '0, 0.25, 0.5, 1, 2, 4, 5, 7, 9, 12, 24'
#PropE <- 0.1
#AddE <- 0.1
#LLoQ <- 0
#Jitter <- 1
#FullCov <- '0.04, 0.03, 0.03, 0.04'
Init <- function(nSubj, CL, V, DH1, FullCov, Time, PropE, AddE, Jitter)
{
Var <- list()
mu <- c(CL, V)
Time <-eval(parse(text = paste0("c(", Time, ")")))
nObs <- length(Time)
DH1 <- eval(parse(text = paste0("c(", DH1, ")")))
DH1 <- matrix(DH1, ncol=2, byrow=TRUE) # c(0, 100000)
FullCov <- eval(parse(text = paste0("c(", FullCov, ")")))
FullCov <- matrix(FullCov, nrow=2) # c(0.04, 0.03, 0.03, 0.04)
rpk <- MASS::mvrnorm(nSubj, rep(0, 2), FullCov)
iPK = matrix(rep(mu, nSubj), nrow=nSubj, byrow=TRUE) * exp(rpk)
ColNames = c("SUBJ", "nTIME", "TIME", "CONC")
Conc = matrix(nrow=nSubj*nObs, ncol=length(ColNames))
colnames(Conc) = ColNames
Conc = as.data.frame(Conc)
Conc[,"SUBJ"] = sort(rep(1:nSubj, nObs))
Conc[,"nTIME"] = rep(Time, nSubj)
for (i in 1:nSubj) {
cSubj = i
cCL = iPK[i, 1]
cV = iPK[i, 2]
iConc = pk1cima(cCL, cV, DH1, Time, PropE=PropE, AddE=AddE, LLoQ=0, Jitter=Jitter)
Conc[Conc$SUBJ == cSubj, c("TIME", "CONC")] = iConc
}
Conc[,"CONC"] = round(Conc[,"CONC"],3)
Conc[,"SUBJ"] = as.factor(Conc[,"SUBJ"])
Var$DH1 <- DH1
Var$Time <- Time
Var$Conc <- Conc
return(Var)
}
|
46d8789ebaa2bdeae9e81df049c42d8476efdd60
|
c17beff6c0cb1303cf4608a81951e0a5456411c1
|
/run_analysis.R
|
2a779293f455a92a440a6b3d0fd3986effa61320
|
[] |
no_license
|
JohnNjenga/Project
|
8d22325b30dd1ef9779c6bd70ea4df35c298f021
|
682dbfc373524874ae2c7d3c4c73ddc15f56a862
|
refs/heads/master
| 2016-09-06T12:43:56.988298
| 2014-05-25T22:52:39
| 2014-05-25T22:52:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,183
|
r
|
run_analysis.R
|
#get help on unzip to get an idea of what is expected
help(unzip)
#get the url of the zipped file
myzippedurl<-"http://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
#pass the the url to the download.file function
download.file(myzippedurl,destfile="Dataset.zip")
#list the files in the current working directory
list.files()
#create a directory where files will be unzipped to
dir.create("zipped")
#unzip files to the created directory
unzipped=unzip(zipfile="Dataset.zip",exdir="zipped")
#set working directory to train directory so as to be able to import datasets
setwd("d://CHS/TB Program/Ref/Cleaning Data/zipped/UCI HAR Dataset/train")
#list the files in the directory
list.files()
# first read the files from "train" into R, assigning them to uniquely named objects
subjectTrain=read.table("subject_train.txt",header=FALSE)
dim(subjectTrain)
Xtrain=read.table("X_train.txt",header=FALSE,sep="", stringsAsFactors=FALSE,dec=".")
dim(Xtrain)
ytrain=read.table("y_train.txt",header=FALSE)
dim(ytrain)
#next, combine the three files column wise using cbind
#you get a data frame of 7352 rows and 563 columns. A column from subjectTrain, a column from ytrain and 563 columns from Xtrain
traindataset=cbind(subjectTrain,ytrain,Xtrain)
dim(traindataset)
#change directory to test so as to read datasets from this directory
setwd("d://CHS/TB Program/Ref/Cleaning Data/zipped/UCI HAR Dataset/test")
#list files in the directory to know if the files you need are in the directory
list.files()
# import the three files of interest into r getting their dimension in the process
subjectTest=read.table("subject_test.txt",header = FALSE)
dim(subjectTest)
Xtest=read.table("X_test.txt",header=FALSE,sep="", stringsAsFactors=FALSE,dec=".")
dim(Xtest)
yTest=read.table("y_test.txt",header=FALSE)
dim(yTest)
#now cbind the three datasets into one, ensuring the order of the combination so as to match the earlier cbind with the first set of datasets
# you get a data frame with 2947 rows and 563 columns
testdataset=cbind(subjectTest,yTest,Xtest)
dim(testdataset)
#now you are ready to rbind the two datasets obtained through cbind. the new dataset will have 10299 rows and 563 columns
newdataset=rbind(traindataset,testdataset)
dim(newdataset)
#provide descriptive names for the columns of the newdataset
#the names will come from the features file so that each measurement is a column name in the dataset
#first import the features file into r
headers=read.table("features.txt",header=FALSE,row.names=1)
#headers is a data frame with 561 rows and 1 column
#Transpose headers to make it into a dataframe with 1 row and 561 columns
headers2=t(headers)
dim(headers2)
#set the col names of the newdataset based on the first row of the headers2 data frame.
#the fisrt two names of the newdataset are set to "subject" and "activities" to correspond to subjects involved in the experiment and activities they were involved in
names(newdataset)=c("subjects","activities",headers2[1,])
#subset the dataset so that you only get the columns for mean and standard deviation for each measurement
# extract only columns whose names contain eith "mean" or "std"
newdataset2=newdataset[,c("subjects","activities","tBodyAcc-mean()-X","tBodyAcc-mean()-Y","tBodyAcc-mean()-Z","tBodyAcc-std()-X","tBodyAcc-std()-Y","tBodyAcc-std()-Z","tGravityAcc-mean()-X","tGravityAcc-mean()-Y","tGravityAcc-mean()-Z","tGravityAcc-std()-X","tGravityAcc-std()-Y","tGravityAcc-std()-Z","tBodyAccJerk-mean()-X","tBodyAccJerk-mean()-Y","tBodyAccJerk-mean()-Z","tBodyAccJerk-std()-X","tBodyAccJerk-std()-Y","tBodyAccJerk-std()-Z")]
#use descriptive activity names to name the activity in dataset
newdataset2$activities[newdataset2$activities==1]="Walking"
newdataset2$activities[newdataset2$activities==2]="Walking upstairs"
newdataset2$activities[newdataset2$activities==3]="Walking downstairs"
newdataset2$activities[newdataset2$activities==4]="Sitting"
newdataset2$activities[newdataset2$activities==5]="Standing"
newdataset2$activities[newdataset2$activities==6]="Laying"
#lastly get the average of each variable in the dataset for each activity and each subject
|
4fd09ac98f979e207c93feb614b4f8d3e2b72b2f
|
f6531909d94d2e1ded759ccd3dfd3db215708b09
|
/cachematrix.R
|
dde071586116a03b4549344b1daeddb79b938b01
|
[] |
no_license
|
charlescoulton/ProgrammingAssignment2
|
73f25f480f8be9c4c2cb59c3b9450f6c704bc93e
|
93b9dedc382ed2c4d37b6c122e2135af0f8b7c1c
|
refs/heads/master
| 2020-12-25T11:16:06.889948
| 2015-08-23T11:38:15
| 2015-08-23T11:38:15
| 41,210,316
| 0
| 0
| null | 2015-08-22T14:18:54
| 2015-08-22T14:18:53
| null |
UTF-8
|
R
| false
| false
| 1,868
|
r
|
cachematrix.R
|
# makeCacheMatrix creates a list containing a function to
# 1. set the value of the matrix
# 2. get the value of the matrix
# 3. set the value of inverse of the matrix
# 4. get the value of inverse of the matrix
makeCacheMatrix <- function(x = matrix()) {
inverse_x <- NULL #sets inverse_x to Null, providing a default value i Cachecolve has not been run
set_matrix <- function(y) { # defines a function to set the matrix
x <<- y #caches the inputted matrix
inverse_x <<- NULL #sets the value of inverse_x to Null
}
get_matrix <- function() x # defines a function to get the matrix
set_inverse<- function(inverse) inverse_x <<-inverse # defines a function to set the matrix
get_inverse <- function() inverse_x #defines a function to get the inverse
list(set_matrix = set_matrix, # Builds a list of the above functions
get_matrix = get_matrix,
set_inverse = set_inverse,
get_inverse = get_inverse)
}
# The function returns the matrix inverse. It first checks if
# the inverse has already been calculated. If it has, it gets the result from cache and skips the
# computation. If not, it computes the inverse, then sets the value in the cache via
# setinverse function. It finally returns the calculated or retrieved inverse matrix
cacheSolve <- function(x, ...) { ## Return a matrix that is the inverse of 'x'
inverse_x <- x$get_inverse() #looks for the inverse matric in the cache
if (!is.null(inverse_x)) { #if the inverse has already been calculated
message("getting cached inverse matrix") # lets user know it's retriving previously cached inverse
return(inverse_x)
}
else { #If the matrix has not yet been calculated...
inverse_x <- solve(x$get_matrix()) # Calculates the inverse...
x$set_inverse(inverse_x) # and puts it in the cache
return(inverse_x)
}
}
|
964b20bda054d8972b7e8989cc979c0ffcc30dd3
|
360df3c6d013b7a9423b65d1fac0172bbbcf73ca
|
/FDA_Pesticide_Glossary/acifluorfen.R
|
82eead67e289e327e41ee65e7b0edc6a397bf8f0
|
[
"MIT"
] |
permissive
|
andrewdefries/andrewdefries.github.io
|
026aad7bd35d29d60d9746039dd7a516ad6c215f
|
d84f2c21f06c40b7ec49512a4fb13b4246f92209
|
refs/heads/master
| 2016-09-06T01:44:48.290950
| 2015-05-01T17:19:42
| 2015-05-01T17:19:42
| 17,783,203
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 222
|
r
|
acifluorfen.R
|
library("knitr")
library("rgl")
#knit("acifluorfen.Rmd")
#markdownToHTML('acifluorfen.md', 'acifluorfen.html', options=c("use_xhml"))
#system("pandoc -s acifluorfen.html -o acifluorfen.pdf")
knit2html('acifluorfen.Rmd')
|
1227b01b20e44d10379301c7148b6bed97c1d90d
|
2e62efd4f8a176c1bee1ee5240e8a925bbaeee93
|
/R/basic.R
|
78fe8545b3d9c7228d29d0d0181caed6d9110a3c
|
[] |
no_license
|
Gongzi-Zhang/Code
|
8ff1b30a373817eb1aa440ec2f2348b0aa792fd6
|
20c11d7acbaf017b286a10a232f1eb29b62d1ac4
|
refs/heads/master
| 2021-12-28T12:05:56.754735
| 2020-04-10T14:07:04
| 2020-04-10T14:07:04
| 120,005,998
| 0
| 0
| null | 2018-02-02T16:53:29
| 2018-02-02T16:41:49
| null |
UTF-8
|
R
| false
| false
| 20
|
r
|
basic.R
|
NA # not available
|
bd7ac24e09c9a82b0bf981a438a02666bed38908
|
5fcd2765fe189b62a3a4e1d884b5add7445ea101
|
/praca licencjacka/praca_lic_skrypt_bpz.R
|
762368b27ce00db658fa63bd20643052f6ecdf6b
|
[] |
no_license
|
michalcisek/magisterka
|
ede62bbd8cb857fa4cc3cbcdeda0812830c05b97
|
a35ebb373ab8f64a05ac281179509d66f560164a
|
refs/heads/master
| 2021-03-30T16:35:15.329993
| 2017-07-06T06:27:52
| 2017-07-06T06:27:52
| 83,968,183
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 13,738
|
r
|
praca_lic_skrypt_bpz.R
|
# 1. WCZYTYWANIE DANYCH ---------------------------------------------------
rm(list=ls())
wig20<-read.csv("wig20.csv",header=T)
attach(wig20)
install.packages("Rcpp")
library(Rcpp)
install.packages("changepoint")
install.packages("quantmod")
install.packages("signal")
install.packages("mFilter")
install.packages("arules")
install.packages("ggplot2")
install.packages("ade4")
install.packages("cluster")
install.packages("fpc")
library(quantmod)
library(changepoint)
library(signal)
library(ifultools)
library(arules)
library(zoo)
library(mFilter)
library(ggplot2)
library(grid)
library(ade4)
library(cluster)
library(fpc)
# 2. CZYSZCZENIE SZEREGU --------------------------------------------------
#FILTR Savitzky-Golay
plotSavitzkyGolay<-function(data,p,n){
sgol<<-sgolayfilt(data,p,n)
plot(sgol,type="l",col="black",xlab="czas",ylab="wartosc",
main=paste("Savitzky-Golay (", p ,",", n , ")",sep=""))
}
#FILTR Baxter-King
plotBaxterKing<-function(data,pl,pu){
baxterking<-bkfilter(data,pl,pu)
y<<-baxterking$trend
plot(y,type="l",col="black",xlab="czas",ylab="wartosc",
main=paste("Baxter-King (", pl ,",", pu , ")",sep=""))
}
#Wykresy podsumowujace wygladzanie
par(mfrow=c(2,2))
x<-c(seq(1:5523))
wig<-as.ts(Zamkniecie)
plot(wig,type="l",col="black",main="WIG20",xlab="czas",ylab="wartosc")
plotSavitzkyGolay(Zamkniecie,3,25)
plotBaxterKing(Zamkniecie,2,100)
#Wybiera szereg uznany za najlepsze wygladzenie do dalszych operacji i zapisuje
#go jako 'wygladzone'
#1.Savitzky-Golay
#2.Baxter-King
wybor<-function(x){
ifelse(x==1,wygladzone<<-as.numeric(sgol),wygladzone<<-as.numeric(y))
}
wybor(1)
rm(sgol,y)
# 3.1. CHANGEPOINT PACKAGE -----------------------------------------------------
BinSeg<-cpt.mean(wygladzone,penalty="Hannan-Quinn",method = "BinSeg",Q=2000,
test.stat="CUSUM")
SegNeigh<-cpt.mean(wygladzone,penalty="SIC",method = "SegNeigh",Q=600,
test.stat="CUSUM")
PELT<-cpt.mean(wygladzone,method = "PELT", penalty = "Manual",
pen.value = "n^(3/2)")
#changepoint'y
segmentyBINSEG<-cpts(BinSeg)
ks.test(wygladzone,y='pnorm',alternative='two.sided')
# 3.2. IFULTOOLS PACKAGE --------------------------------------------------
#funkcja linearSegmentation pozwala na segment szeregu czasowego na segmenty o
#wybranej szerokosci, nastepnie laczy te, gdzie kat pomiedzy segmentami jest
#mniejszy niz angle.tolerance
plotLinearSegmentation<-function(x,data,p,q,aspect=T){
segmenty<-as.numeric(linearSegmentation(x,data,n.fit=p,angle.tolerance=q,
aspect=T))
segmenty[length(segmenty)+1]=length(data)
segmenty1<-c(rep(NA,length(segmenty)+1))
segmenty1[2:length(segmenty1)]<-segmenty
segmenty1[1]=1
segmenty<-segmenty1
rm(segmenty1)
wartosci<-data[segmenty]
plot(segmenty,wartosci,type="l",xlab="czas",ylab="wartosc",
main=paste("linearSegmentation (", p ,",", q , ") \n",
length(segmenty)," segmenty",sep=""))
segmentyLS<<-segmenty
}
x<-c(1:5235)
plotLinearSegmentation(x,wygladzone,5,0.1)
# 3.3. WLASNA FUNKCJA SEGMENTACJI -----------------------------------------
#laczy segmenty o podobnych trendach dla szeregu, ktory juz zostal wczesniej
#podzielony
MergeSimilarTrends<-function(szereg,changepoints,threshold){
changepoints[length(changepoints)+1]<-length(szereg)
changepoints1<-c(rep(NA,length(changepoints)+1))
changepoints1[2:length(changepoints1)]<-changepoints
changepoints1[1]<-1
changepoints<-changepoints1
df<-data.frame(changepoint=changepoints,close=szereg[changepoints])
trend<-rep(NA,length(changepoints))
trend[1]=((df[1,2]-szereg[1])/szereg[1])*100
for (i in 2:length(changepoints)){
trend[i]=(((df[i,2]-df[i-1,2])/df[i-1,2])*100)
}
df<-data.frame(df,trend)
similar<-rep(NA,length(changepoints))
similar[1]='F'
for (i in 2:length(changepoints)){
ifelse(abs(df[i,3]-df[i-1,3])<=threshold,similar[i]<-'T',similar[i]<-'F')
}
df<-data.frame(df,similar)
df$similar<-as.character(similar)
rows_to_delete<-which(grepl('T',df$similar))-1
df1<-df[-c(rows_to_delete),-c(3,4)]
trend1<-rep(NA,length(df1[,1]))
trend1[1]=((df1[1,2]-szereg[1])/szereg[1])*100
for (i in 2:length(df1[,1])){
trend1[i]=(((df1[i,2]-df1[i-1,2])/df1[i-1,2])*100)
}
df1<-data.frame(df1,Trend=trend1)
DF<<-df1
segmenty<<-df1$changepoint
}
#porownanie graficzne zastosowania Binary Segmentation dla roznych parametrow
#z pierwotnym wyrownanym szeregiem
par(mfrow=c(2,2))
#x<-c(1:5523)
plot(x,wygladzone,type="l")
MergeSimilarTrends(wygladzone,segmenty,0.5)
segmentyLSost<-segmenty
plot(DF[,1],DF[,2],type="l",xlab="czas",
ylab="wartosc",main=paste("MST-LinearSegmentation, 0.5 \n",
length(segmenty)," segmenty",sep=""))
DF_LS<-DF
MergeSimilarTrends(wygladzone,segmenty,0.5)
segmentyBINSEGost<-segmenty
plot(DF[,1],DF[,2],type="l",xlab="czas",
ylab="wartosc",main=paste("MST-BinSeg, 0.5 \n",length(segmenty),
" segmenty",sep=""))
DF_BINSEG<-DF
rm(DF,segmenty,segmentyBINSEG,segmentyLS,x)
#Z FUNKCJI MergeSimilarTrends DOSTAJEMY DATA FRAME- DF (zawierajaca szczegolowe
#dane, otrzymane po merge'u segmentow o podobnych trendach otrzymanych z funkcji
#cpt.Mean) oraz osobny wektor 'segmenty', zawierajacy nowe changepoint'y
# 4. SYMBOLICZNA REPREZENTACJA SEGMENTOW ----------------------------------
#DODANIE CZASU TRWANIA
DF<-DF_LS
DF<-DF_BINSEG
Duration<-c(rep(NA,length(DF[,1])))
Duration[1]=0
for (i in 2:length(DF[,1])){
Duration[i]<-DF[i,1]-DF[i-1,1]
}
DF<-data.frame(DF,Duration)
#Statystyki czasu trwania
summary(DF[,4])
hist(DF[,4],xlim=c(0,30),freq=T,breaks=500,density=50,col="black")
#Statystyki trendu
summary(DF[,3])
hist(DF[,3],xlim=c(-12,20),freq=T,breaks=100,density=50,col="black")
#funkcja BinsTrend do wyznaczania ile procentowo znajduje sie segmentow w
#zadanych przedzialach (x1-x4)
BinsTrend<-function(data,kolumna,x1,x2,x3,x4){
range1<-((nrow(data[which(data[,kolumna] <= x1),])) /
length(data[,kolumna]) * 100)
range2<-(nrow(data[which(data[,kolumna] > x1 & data[,kolumna] <= x2) ,])/
length(data[,kolumna]) * 100)
range3<-(nrow(data[which(data[,kolumna] > x2 & data[,kolumna] <= x3) ,])/
length(data[,kolumna]) * 100)
range4<-(nrow(data[which(data[,kolumna] > x3 & data[,kolumna] <= x4) ,])/
length(data[,kolumna]) * 100)
range5<-((nrow(data[which(data[,kolumna] > x4),])) /
length(data[,kolumna]) * 100)
w<-c(range1,range2,range3,range4,range5)
return(w)
}
#dwie propozycje dobrania przedzialow trendu
BinsTrend(DF_BINSEG,3,-4,-1,1,5)
BinsTrend(DF,3,-5,-2,2,5)
#funkcja BinsTime analogiczna to poprzedniej, tylko ze dla przedzialow czasowych
BinsTime<-function(data,kolumna,x1,x2){
range1<-((nrow(data[which(data[,kolumna] <= x1),])) /
length(data[,kolumna]) * 100)
range2<-(nrow(data[which(data[,kolumna] > x1 & data[,kolumna] <= x2) ,])/
length(data[,kolumna]) * 100)
range3<-((nrow(data[which(data[,kolumna] > x2),])) /
length(data[,kolumna]) * 100)
w<-c(range1,range2,range3)
return(w)
}
#dwie propozycje dobrania przedzialow czasu
BinsTime(DF_BINSEG,4,5,20)
BinsTime(DF,4,5,10)
DF_BINSEG<-DF
DF_LS<-DF
rm(DF)
# 4.1. Klastrowanie BINSEG ------------------------------------------------
df1<-data.frame(DF_BINSEG$Duration,DF_BINSEG$Trend)
kluster<-kmeans(df1,9)
plot(df1,pch=16,cex=0.5,main="Klastrowanie - Binary Segmentation",
xlab="Czas trwania",ylab="Trend")
kmeansRes<-factor(kluster$cluster)
s.class(df1,fac=kmeansRes, add.plot=TRUE, col=rainbow(nlevels(kmeansRes)))
DF_BINSEG$cluster <- kluster$cluster
for(i in seq_along(DF_BINSEG[,1])){
DF_BINSEG[i,5]<-paste("kl_",DF_BINSEG[i,5],sep="")
}
#data frame odpowiedni dla algorytmu apriori
DF_BINSEG[,5]<-as.factor(DF_BINSEG[,5])
nr_segmentu<-c(1:length(DF_BINSEG[,1]))
DF_BINSEG1<-data.frame(nr_segmentu,klasa=DF_BINSEG[,5])
#PODZIAl NA ZBIOR TRENINGOWY I TESTOWY
podzial<-function(dane,procent){
zmiana<-round(procent*nrow(dane))
DF_BINSEG_trening<<-dane[1:zmiana,]
DF_BINSEG_test<<-dane[(zmiana+1):nrow(dane),]
}
podzial(DF_BINSEG1,0.8)
# 4.2. Klastrowanie LS ----------------------------------------------------
df1<-data.frame(DF_LS$Duration,DF_LS$Trend)
kluster<-kmeans(df1,7)
plot(df1,pch=16,cex=0.5,main="Klastrowanie - Linear Segmentation",
xlab="Czas trwania",ylab="Trend",
ylim=c(-20,40), xlim=c(0,50))
kmeansRes<-factor(kluster$cluster)
s.class(df1,fac=kmeansRes, add.plot=TRUE, col=rainbow(nlevels(kmeansRes)))
DF_LS$cluster <- kluster$cluster
for(i in seq_along(DF_LS[,1])){
DF_LS[i,5]<-paste("kl_",DF_LS[i,5],sep="")
}
#data frame odpowiedni dla algorytmu apriori
DF_LS[,5]<-as.factor(DF_LS[,5])
nr_segmentu<-c(1:length(DF_LS[,1]))
DF_LS1<-data.frame(nr_segmentu,klasa=DF_LS[,5])
#PODZIAl NA ZBIOR TRENINGOWY I TESTOWY
podzial<-function(dane,procent){
zmiana<-round(procent*nrow(dane))
DF_LS_trening<<-dane[1:zmiana,]
DF_LS_test<<-dane[(zmiana+1):nrow(dane),]
}
podzial(DF_LS1,0.66)
# 5. WYZNACZANIE REGUL ASOCJACYJNYCH --------------------------------------
z <- read.zoo(DF_LS_trening, header = TRUE, FUN = identity)
lags <- as.data.frame(lag(z[,2], -4:0))
lags[,1]<-as.factor(lags[,1])
lags[,2]<-as.factor(lags[,2])
lags[,3]<-as.factor(lags[,3])
lags[,4]<-as.factor(lags[,4])
lags[,5]<-as.factor(lags[,5])
lags[,6]<-as.factor(lags[,6])
pojawienie<-5
a <- apriori(lags,parameter = list( supp=pojawienie/nrow(lags),
conf=0.5,minlen=3))
c<-subset(a, subset = rhs %pin% "lag0=")
inspect(c)
reguly_BINSEG<-as(c, "data.frame")
reguly_LS<-as(c,"data.frame")
# 6. OCENA ALGORYTMU ------------------------------------------------------
#dla regul LS
ocena_regul<-function(dane,lhs,e1,e2,e3,e4,e5){
if (lhs==2){
z <- read.zoo(dane, header = TRUE, FUN = identity)
lags <- as.data.frame(lag(z[,2], -lhs:0))
lags[,1]<-as.factor(lags[,1])
lags[,2]<-as.factor(lags[,2])
lags[,3]<-as.factor(lags[,3])
lags_all<-lags[lags[,1]==paste("kl_",e3,sep="") &
lags[,2]==paste("kl_",e4,sep=""),]
lags_all<-na.omit(lags_all)
zliczenie<-nrow(lags_all)
poprawne<-nrow(lags_all[lags_all[,3]==paste("kl_",e5,sep=""),])
wspolczynnik<-poprawne/zliczenie
wynik<-data.frame(poprawne,zliczenie,wspolczynnik)
return(wynik)
} else if (lhs==3) {
z <- read.zoo(dane, header = TRUE, FUN = identity)
lags <- as.data.frame(lag(z[,2], -lhs:0))
lags[,1]<-as.factor(lags[,1])
lags[,2]<-as.factor(lags[,2])
lags[,3]<-as.factor(lags[,3])
lags[,4]<-as.factor(lags[,4])
lags_all<-lags[lags[,1]==paste("kl_",e2,sep="") &
lags[,2]==paste("kl_",e3,sep="") &
lags[,3]==paste("kl_",e4,sep=""),]
lags_all<-na.omit(lags_all)
zliczenie<-nrow(lags_all)
poprawne<-nrow(lags_all[lags_all[,4]==paste("kl_",e5,sep=""),])
wspolczynnik<-poprawne/zliczenie
wynik<-data.frame(poprawne,zliczenie,wspolczynnik)
return(wynik)
} else if (lhs==4){
z <- read.zoo(dane, header = TRUE, FUN = identity)
lags <- as.data.frame(lag(z[,2], -lhs:0))
lags[,1]<-as.factor(lags[,1])
lags[,2]<-as.factor(lags[,2])
lags[,3]<-as.factor(lags[,3])
lags[,4]<-as.factor(lags[,4])
lags[,5]<-as.factor(lags[,5])
lags_all<-lags[lags[,1]==paste("kl_",e1,sep="") &
lags[,2]==paste("kl_",e2,sep="") &
lags[,3]==paste("kl_",e3,sep="") &
lags[,4]==paste("kl_",e4,sep=""),]
lags_all<-na.omit(lags_all)
zliczenie<-nrow(lags_all)
poprawne<-nrow(lags_all[lags_all[,5]==paste("kl_",e5,sep=""),])
wspolczynnik<-poprawne/zliczenie
wynik<<-data.frame(poprawne,zliczenie,wspolczynnik)
return(wynik)
}
}
reg<-matrix(c(0,0,5,7,7,
0,0,2,3,2,
0,0,2,2,2,
0,0,2,7,7,
0,3,7,7,7,
0,1,1,7,7,
0,7,7,1,7,
0,2,2,2,2,
0,7,2,2,2,
0,2,2,7,7,
0,7,2,7,7,
2,2,2,2,2,
2,2,2,7,7,
2,2,7,7,7,
7,2,2,2,2,
7,7,2,2,2,
7,2,7,7,7,
7,7,2,7,7,
7,7,7,7,7),ncol=5,byrow=T)
Ocena_LS<-matrix(rep(NA,57),nrow=19)
for (i in 1:nrow(reg)){
wynik<-ocena_regul(DF_LS_test,5-sum(reg[i,]==0)-1,reg[i,1],reg[i,2],
reg[i,3],reg[i,4],reg[i,5])
Ocena_LS[i,1]<-wynik[1,1]
Ocena_LS[i,2]<-wynik[1,2]
Ocena_LS[i,3]<-wynik[1,3]
}
colnames(Ocena_LS)<-c("Prawidlowo zaklasyfikowane","laczna liczba pojawien",
"Wspolczynnik dokladnosci")
#dla regul BINSEG
reg<-matrix(c(0,0,2,6,6,
0,0,4,1,7,
0,0,4,8,4,
0,0,8,4,4,
0,0,9,6,5,
0,0,5,5,4,
0,0,5,4,4,
0,0,4,6,6,
0,0,4,4,4,
0,4,8,4,4,
0,9,6,5,4,
0,6,9,6,6,
0,5,5,4,4,
0,5,4,4,4,
0,6,5,5,4,
0,4,5,6,6,
0,4,5,4,4,
0,4,4,6,6,
0,4,4,4,4),ncol=5,byrow=T)
Ocena_BINSEG<-matrix(rep(NA,57),nrow=19)
for (i in 1:nrow(reg)){
wynik<-ocena_regul(DF_BINSEG_test,5-sum(reg[i,]==0)-1,reg[i,1],reg[i,2],
reg[i,3],reg[i,4],reg[i,5])
Ocena_BINSEG[i,1]<-wynik[1,1]
Ocena_BINSEG[i,2]<-wynik[1,2]
Ocena_BINSEG[i,3]<-wynik[1,3]
}
colnames(Ocena_BINSEG)<-c("Prawidlowo zaklasyfikowane","laczna liczba pojawien"
,"Wspolczynnik dokladnosci")
|
a0edc87ed6388f4f165739ee42d6afbe87f27a42
|
8cbc4419065621a01ba5d1d4d06c6acaf6a5361d
|
/R Language-AK.R
|
37b84bf0e9970fd35f3727a03576a7c197b8fff2
|
[] |
no_license
|
ayakulo/AK-R-Language
|
1e3eba5b455c006f81474fbac421c9e5ace5dc01
|
e11fdbd82e526c6a674d6671344e54dd9a04eb6d
|
refs/heads/main
| 2023-07-01T10:11:31.284218
| 2021-08-14T04:06:46
| 2021-08-14T04:06:46
| 395,883,081
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,524
|
r
|
R Language-AK.R
|
#Ayan Kulov
#TP058560
#installed packages
install.packages("tidyverse")
install.packages("forcats")
install.packages("ggplot2")
install.packages("dplyr")
install.packages("hrbrthemes")
install.packages("ggpubr")
install.packages("scales")
#loading packages
library(tidyverse)
library(forcats)
library(ggplot2)
library(dplyr)
library(hrbrthemes)
library(ggpubr)
library(scales)
remotes::update_packages("rlang")
install.packages("rlang", type = "source")
#fetch data from file
weather = read.table(file="C:\\Users\\cookie\\Downloads\\weather.csv",header = TRUE, sep = ",")
View(weather)
summary(weather)
names(weather)
##### Data pre-processing #####
##Create new column for months
vec <- rep(c('Jan','Feb','Mar','Apr','May','Jun','Jul','Aug','Sep','Oct','Nov','Dec'),
times=c(31,29,31,30,31,30,31,31,30,31,30,31))
weather$Month <-vec
## replace NA with average values
##Sunshine
avg = mean(weather$Sunshine, na.rm=TRUE)
weather$Sunshine = ifelse(is.na(weather$Sunshine),avg, weather$Sunshine)
##WindGustSpeed
avg_wgs = mean(weather$WindGustSpeed, na.rm=TRUE)
weather$WindGustSpeed = ifelse(is.na(weather$WindGustSpeed),avg_wgs, weather$WindGustSpeed)
##WindSpeed9am
avg_ws = mean(weather$WindSpeed9am, na.rm=TRUE)
weather$WindSpeed9am = ifelse(is.na(weather$WindSpeed9am),avg_ws, weather$WindSpeed9am)
#Replace NA Direction with North
weather$WindGustDir = ifelse(is.na(weather$WindGustDir),'N', weather$WindGustDir)
weather$WindDir9am = ifelse(is.na(weather$WindDir9am),'N', weather$WindDir9am)
weather$WindDir3pm = ifelse(is.na(weather$WindDir3pm),'N', weather$WindDir3pm)
##### DATA ANALYSIS #####
#Visualization 1:
##Question: How does seasons and temperature vary?
##Analysis 1: Finding the overall temperature throughout the year
##Analysis 2: Determining seasonal change with temperature
ggplot(weather , aes(y=MaxTemp, x=Month, colour="#33FFFF"))+
geom_line() + geom_line(data = weather,aes(y=MinTemp,x=Month,colour="cyan"))+
scale_y_continuous(breaks = breaks_width(5)) +
scale_color_manual(labels = c("Max Temp", "Min Temp"), values = c("#00CCFF", "#FF3333"))+
labs(titles="Min and Max Temp throughout the Year",y="Temperature")
#Visualization 2:
##Question: What is the relationship between temperature and humidity?
##Analysis 3: Temperature vs humidity
##Question: Do temperature and humidity affect comfortness?
##Analysis 4: Heat or humidity
weather%>%filter(Month == "Nov")%>%ggplot(mapping=aes(x=Temp9am,y=Humidity9am))+
geom_point()+ geom_smooth(method = "lm", formula= y~x,se = FALSE)+
labs(title="Correlation between Temperature and Humidity")
#Visualization 3:
##Question: What are the rainy and non-rainy months?
##Analysis 5:
##Question: Best time to have outdoor activities?
##Analysis 6:
rain = weather%>%group_by(Month)%>%summarise(Rainfall=mean(Rainfall))%>%select(Month,Rainfall)
ggdotchart(rain,x="Month",y="Rainfall",sorting="descending",
add="segments",color="Month",add.params=list(color="#5b5c5b",size=2),
dot.size=9,label=round(rain$Rainfall,1),font.label = list(color="white",size=10,vjust=0.4),
ggtheme=theme_pubr()) +
labs(title="Average Rainfall Each Month")+theme_ipsum()
#Visualization 4 & 5:
##Question: How accurate is the prediction of the next day raining?
##Analysis 7: Overall prediction accuracy
ggplot(weather, aes(x=RainToday))+geom_bar(aes(col=RainToday))+
facet_wrap(~Month)+theme_bw()+
labs(title="RainToday Count Throughout the Year")
ggplot(weather, aes(x=RainTomorrow))+geom_bar(aes(col=RainTomorrow))+
facet_wrap(~Month)+theme_bw()+
labs(title="RainTomorow Prediction Throughout the Year")
#Visualization 6:
##Question: How high the wind gust speed can reach? Lowest wind gust speed? Normal wind gust speed?
##Analysis 8:
##Question: How often does the highest wind gust speed happen? and what is the wind gust speed that has the most count?
##Analysis 9:
weather%>%group_by(Month)%>%ggplot(aes(WindGustSpeed))+
geom_bar(fill="#00CCFF")+
theme_ipsum()+
labs(title= "Barchart of Wind Gust Speed")
#Visualization 7:
##Question: What direction does the wind gust speed change to?
##Analysis 10:
weather%>% count(WindGustDir) %>%
mutate(WindGustDir = fct_reorder(WindGustDir, n)) %>%
ggplot( aes(x=WindGustDir, y=n)) +
geom_bar(stat="identity", fill="#52ff5d", alpha=.6, width=.4) +
coord_flip() +
xlab("Wind Direction") +ylab("Count") + labs(title="Wind Gust Direction")
theme_bw()
#Visualization 8 and 9:
##Question: What is the wind direction difference at 9am and 3pm ?
##Analysis 11:
weather%>%count(WindDir9am) %>%
arrange(WindDir9am) %>%
mutate(WindDir9am=factor(WindDir9am, levels=WindDir9am)) %>%
ggplot( aes(x=WindDir9am, y=n)) +
geom_segment( aes(xend=WindDir9am, yend=0)) +
geom_point( size=4, color="cyan") +
coord_flip() +
theme_bw() +
xlab("Wind Direction") +ylab("Count") + labs(title="Wind Direction at 9am")
weather%>%count(WindDir3pm) %>%
arrange(WindDir3pm) %>%
mutate(WindDir9am=factor(WindDir3pm, levels=WindDir3pm)) %>%
ggplot( aes(x=WindDir3pm, y=n)) +
geom_segment( aes(xend=WindDir3pm, yend=0)) +
geom_point( size=4, color="purple") +
coord_flip() +
theme_bw() +
xlab("Wind Direction") +ylab("Count") + labs(title="Wind Direction at 3pm")
#Visualization 10:
##Question: What is the relationship between evaporation and sunshine?
##Analysis 12:
ggplot(weather,aes(x=Sunshine,y=Evaporation))+geom_jitter(aes(col=RainToday))+
stat_smooth(method="lm", formula = y ~ x,color="#57ffb9")+
labs(title="Correlation between Sunshine and Evaporation")+theme_bw()+ theme(
panel.background = element_rect(fill = "#2b2b2b"),
legend.key.width = unit(0.9, "cm"),
legend.key.height = unit(0.75, "cm"))
#Visualization 11:
##Question: What is the relationship between temperature and sunshine?
##Analysis 13:
##Question: How evaporation, temperature and sunshine are related to each other?
##Analysis 14:
ggplot(weather,aes(x=Sunshine,y=MaxTemp))+geom_jitter(aes(col=RainToday))+
stat_smooth(method="lm", formula = y ~ x,color="#57ffb9")+
labs(title="Correlation between Sunshine and Max Temperature")+theme_bw()+ theme(
panel.background = element_rect(fill = "#2b2b2b"),
legend.key.width = unit(0.9, "cm"),
legend.key.height = unit(0.75, "cm"))
|
cee3f8f9084f5a54ca111aebe5cf21651e811371
|
061403fe5db0657f3d1853e8745046495d0c2786
|
/data-science/9-developing-data-products/course-project/shiny-application/ui.R
|
a21b140f0be08e0d6a959ff73f1c644741f8af74
|
[] |
no_license
|
zoyanhui/Coursera-Learning
|
48989f178f1a2eb3090b12b835a771c6669ac543
|
ad12759aaef5d9a690286f2d81171470f9d16ff3
|
refs/heads/master
| 2021-01-18T15:08:22.993423
| 2016-02-28T14:56:50
| 2016-02-28T14:56:50
| 45,096,523
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,293
|
r
|
ui.R
|
library(shiny)
library(datasets)
data("mtcars")
cyls <- levels(as.factor(mtcars$cyl))
shinyUI(
pageWithSidebar(
headerPanel("Predict Miles/(US) Gallon of Car"),
sidebarPanel(
selectInput(inputId="cyl", label="Number of cylinders", choices=sort(cyls),
multiple = FALSE,selected="0"),
numericInput(inputId="hp", label = "Gross horsepower", value=0, min=0),
radioButtons(inputId="am", label="Transmission", choices=c("automatic","manual")),
textInput(inputId="wt", label = "Weight (lb)"),
actionButton("goButton", "Predict")
),
mainPanel(
tabsetPanel(
tabPanel('Predict Results',
h5('Your Inputs:'),
verbatimTextOutput("cyl"),
verbatimTextOutput("hp"),
verbatimTextOutput("wt"),
verbatimTextOutput("am"),
h5("Predict MPG:"),
verbatimTextOutput("mpg")
),
tabPanel('Documentation',
includeMarkdown('help.Rmd'))
)
)
)
)
|
49cfbb9b35fefbe001d3f5932ea1694a58b5ef5c
|
e54b90b8a6edfec70bebd47a7f35a1f782560780
|
/plot1.R
|
ac98681e61934ef2777751c29dbe620ee8c0732f
|
[] |
no_license
|
okedem/ExData_Plotting1
|
5d59191aec993be3448cfabf8442be3987a1f8da
|
faabfb019adc88c5ddf2fbcb992e4d1759ac8c09
|
refs/heads/master
| 2021-01-16T17:43:56.220497
| 2016-01-10T19:02:06
| 2016-01-10T19:02:06
| 49,380,462
| 0
| 0
| null | 2016-01-10T18:53:27
| 2016-01-10T18:53:27
| null |
UTF-8
|
R
| false
| false
| 491
|
r
|
plot1.R
|
# Exploratory Data Analysis, course project 1, plot 1
# This code uses the sqldf package, use the commented-out command below if needed.
#install.packages("sqldf")
library(sqldf)
data <- read.csv.sql("household_power_consumption.txt",sep=";",
sql = "select * from file where Date='1/2/2007' or Date='2/2/2007'", eol = "\n")
png(file='plot1.png')
with(data,hist(Global_active_power,main='Global Active Power',xlab='Global active power (kilowatts)',col='red'))
dev.off()
|
e2737fc58f559b598bac875401463232c3709fa0
|
0a691e5ede55e5373ae82fa69d6378d3e586c3b9
|
/extra-exercises/histograms.R
|
a554ef1be6da112e0d35e610654b890c4abc9df0
|
[] |
no_license
|
sarah127/udacity-data-analysis-with-r
|
e54d7d2fd5cb7f6e3d7d9de6c42f356ebb18eebe
|
e59db45a9f029ae68fddf365a40cad893ccc17d1
|
refs/heads/master
| 2021-07-11T23:06:40.167791
| 2020-06-05T22:22:12
| 2020-06-05T22:22:12
| 138,321,821
| 0
| 0
| null | 2018-06-22T15:57:41
| 2018-06-22T15:57:41
| null |
UTF-8
|
R
| false
| false
| 2,521
|
r
|
histograms.R
|
# Histograms excercise based on webpage:
# http://flowingdata.com/2014/02/27/how-to-read-histograms-and-use-them-in-r/
setwd('~/Repos/players-analysis-with-r/extra-exercises')
# Load and Tidy the dataset
players <- read.csv('NBA-Census-10.14.2013.csv',stringsAsFactors=FALSE)
names(players) <- gsub("\\.\\.",".",names(players))
names(players) <- gsub("\\.\\.",".",names(players))
names(players) <- gsub("$.","",names(players))
names(players) <- tolower(names(players))
names(players)
# Plot "Heights of Golden State Warriors"
warriors <- subset(players, team=="Warriors")
warriors.o <- warriors[order(warriors$ht.in.),]
barplot(warriors.o$ht.in.,
names.arg = warriors.o$name,
horiz = TRUE,
border = NA,
las = 1, # Horizontal players names
main="Heights of Golden State Warriors")
# Plot Average height of players, for each position.
avg_heights <- aggregate(ht.in. ~ pos, data = players, mean)
avg_heights.o <- avg_heights[order(avg_heights$ht.in.,decreasing = FALSE), ]
barplot(avg_heights$ht.in.,
names.arg = avg_heights$pos,
border = NA,
las = 1)
# Some messy code to create an histogram
height_ranges <- range(players$ht.in.)
counts <- rep(0,20)
y <- c()
for(i in 1:length(players[,1])) {
countsIndex <- players$ht.in.[i] - height_ranges[1] +1
counts[countsIndex] <- counts[countsIndex] + 1
y <- c(y,counts[countsIndex])
}
plot(players$ht.in., y,
type ="n",
main = "Players heights",
xlabs = "inches",
ylab = "count")
points(players$ht.in., y,
pch = 21,
col = NA,
bg = "#999999")
barplot(counts,
names.arg=69:88,
main="Player heights",
xlab="inches",
ylab="count",
border=NA,
las=1)
# Now ploting some histograms
par(mfrow = c(1,3), mar = c(3,3,3,3))
hist(players$ht.in. , main="NBA Player Heights", xlab="inches",
breaks=seq(65, 90, 1))
hist(players$ht.in. , main="NBA Player Heights", xlab="inches",
breaks=seq(65, 90, 2))
hist(players$ht.in. , main="NBA Player Heights", xlab="inches",
breaks=seq(65,90, 5))
# The height distribution for each position
par(mfrow = c(2,3), las = 1, mar = c(5,5,4,1) )
positions <- unique(players$pos)
for (i in 1:length(positions)){
curr_players <- subset(players, pos == positions[i])
hist(curr_players$ht.in.,
main = positions[i],
breaks = 65:90,
xlab = "inches",
border = "#ffffff",
col = "#999999",
lwd = 0.4)
}
|
5ba8b9ba9faa0a68bb588a366dc9110fe78da9bd
|
d97ac05c04ac282164943b6f2ad1202a96f4f835
|
/mpm/ps6/pset6_q1.R
|
d69f71334537647531bf42f71b7a08c65f85a2db
|
[] |
no_license
|
TomBearpark/ECO518_code
|
d81bbba0bd674b7662ec544e27a96b9d2b1bc6f4
|
6a998f58d63160dace441cca159a406edbf264fd
|
refs/heads/main
| 2023-04-14T16:33:03.008259
| 2021-04-29T19:32:32
| 2021-04-29T19:32:32
| 335,358,433
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,324
|
r
|
pset6_q1.R
|
# Code for ECO518 MPM PSet 1
###########################################################
# 0 Set up, packages
###########################################################
rm(list = ls())
library(tidyverse)
library(readxl)
library(sandwich)
library(stargazer)
theme_set(theme_bw())
dir <- paste0("/Users/tombearpark/Documents/princeton/1st_year/term2/",
"ECO518_Metrics2/mpm/excercises/ps6/")
out <- paste0(dir, "out/")
setwd(dir)
set.seed(1)
###########################################################
# Question 1
###########################################################
# Contents:
# 1.0 Load in data, run the fixed effects regression
# 1.1 Try bootstrapping in all the different ways
# 1.2 Plot the bootstrap outputs
# 1.3 Construct CIs using the cluster bootstrap outputs
###########################################################
# 1.0 Load in data, run the fixed effects regression
B <- 1000
df <- read_xlsx("Guns.xlsx") %>%
mutate(log_vio = log(vio), state_fe = factor(stateid))
N <- nrow(df)
# linear FE regression..
reg1 <- paste0("log_vio ~ shall + incarc_rate + density + avginc + pop ",
"+ pb1064 + pw1064 + pm1029 + state_fe")
lm1 <- lm(data = df, formula = as.formula(reg1))
df$resid <- lm1$residuals
ses <- sqrt(diag(vcovCL(lm1, cluster = ~ state_fe)))
stargazer(lm1, keep = "shall", se = list(ses))
beta_hat <- coef(lm1)["shall"]
# Check out the SEs..
sqrt(vcovCL(lm1, cluster = ~ state_fe, type = "HC1")["shall", "shall"])
# Initial plots...
ggplot() +
geom_density(data = df, aes(x = resid, color = factor(shall)))
ggsave(paste0(out, "1_homosked.png"), height = 3, width = 5)
filter(df, stateid == 1) %>%
ggplot(aes(x = year, y = resid)) +
geom_point()
ggsave(paste0(out, "1_state_1_time_resid.png"), height = 3, width = 5)
##############################################3
# 1.1 Try bootstrapping in all the different ways
# Non-parametric bootstrap
npm_boot <- function(df, i, formula){
draw <- slice_sample(df, prop = 1, replace = TRUE)
reg <- lm(data = draw, formula = as.formula(formula))
tibble(i = i, value = coef(reg)["shall"])
}
draws_npm <- map_dfr(seq(1:B), npm_boot, df = df, formula = reg1)
sd_npm <- sd(draws_npm$value)
# Try the residual bootstrap...
resid_boot <- function(df, i , formula, lm){
df$log_vio <- predict(lm) + sample(lm$residuals)
reg <- lm(data = df, as.formula(formula))
tibble(i = i, value = coef(reg)["shall"])
}
draws_resid <- map_dfr(seq(1:B), resid_boot, df = df, formula = reg1, lm = lm1)
sd_resid <- sd(draws_resid$value)
# Cluster bootstrap...
cluster_boot <- function(df, i, beta_hat, formula){
draw <- df %>%
group_nest(state_fe) %>%
slice_sample(prop = 1, replace = TRUE) %>%
unnest(cols = c(data))
fit <- lm(formula, data = draw)
beta <- coef(fit)["shall"]
se <-
sqrt(vcovCL(fit, cluster = draw$state_fe, type = "HC1")["shall", "shall"])
T <- sqrt(nrow(draw)) * (beta - beta_hat) / se
tibble(i = i, beta = beta, se = se, T = T)
}
draws_cluster <- map_dfr(1:1000, cluster_boot, df = df,
beta_hat = beta_hat, formula = reg1)
sd_cluster <- sd(draws_cluster$beta)
# Wild bootstrap
wild_boot <- function(df, i, formula, lm){
df$omega <- sample(c(-1,1), nrow(df) ,replace=T)
df$log_vio <- predict(lm) + df$omega * lm$residuals
reg <- lm(data = df, as.formula(formula))
tibble(i = i, value = coef(reg)["shall"])
}
draws_wild <- map_dfr(seq(1:B), wild_boot, df = df, formula = reg1, lm = lm1)
sd_wild <- sd(draws_wild$value)
# Wild Cluster bootstrap
wild_cluster_boot <- function(df, i, formula, lm){
nclusters <- length(unique(df$state_fe))
draw <- df %>%
group_nest(state_fe) %>%
mutate(omega = sample(c(-1,1), nclusters, replace=T)) %>%
unnest(c(data))
draw$log_vio <- predict(lm) + draw$omega * lm$residuals
reg <- lm(data = draw, as.formula(formula))
tibble(i = i, value = coef(reg)["shall"])
}
draws_wild_cluster <- map_dfr(seq(1:B), wild_cluster_boot,
df = df, formula = reg1, lm = lm1)
sd_wild_cluster <- sd(draws_wild_cluster$value)
###########################################################
# 1.2 Plot the bootstrap outputs
# Bind all the data together
plot_df_relevant <- draws_npm %>%
mutate(Bootstrap = paste0("Non-Parametric, sd = ", round(sd_npm, 4))) %>%
bind_rows(
draws_resid %>%
mutate(Bootstrap = paste0("Residual, sd = ", round(sd_resid, 4)))
) %>%
bind_rows(
draws_cluster %>% select(i, value = beta) %>%
mutate(Bootstrap = paste0("Clustered, sd = ", round(sd_cluster, 4)))
)
plot_df_relevant %>% ggplot() +
geom_density(aes(x = value, color = Bootstrap)) +
ggtitle(paste0(B, " Bootstrap Draws")) +
ggsave(paste0(out, "1_bs_comparisons_relevant.png"), height= 5, width = 9)
plot_df <- plot_df_relevant %>%
bind_rows(
draws_wild %>%
mutate(Bootstrap = paste0("Wild, sd = ", round(sd_wild, 4)))
) %>%
bind_rows(
draws_wild_cluster %>%
mutate(Bootstrap = paste0("Wild Cluster, sd = ",
round(sd_wild_cluster, 4)))
)
filter(plot_df, str_detect(Bootstrap, "Clustered")) %>%
ggplot() +
geom_density(aes(x = value)) +
ggtitle(paste0(B, " Clustered Bootstrap Draws for Coefficient on Shall")) +
ggsave(paste0(out, "1_bs_clustered.png"), height= 5, width = 6)
plot_df %>% ggplot() +
geom_density(aes(x = value, color = Bootstrap)) +
ggtitle(paste0(B, " Bootstrap Draws")) +
ggsave(paste0(out, "1_bs_comparisons.png"), height= 5, width = 9)
###########################################################
# 1.3 Construct CIs using the cluster bootstrap outputs
# 1.3.1 Effron CI
# Effron...
p_vals <- c(0.025, 0.975)
effron_ci <- quantile(draws_cluster$beta, p_vals)
effron_ci
# 1.3.1 Percentile-T CI
percentile_ci <-
beta_hat - quantile(draws_cluster$T, p_vals)[c(2,1)] * sd_cluster / sqrt(N)
percentile_ci
# Plot the density function, and the CIs
ggplot() +
geom_density(data = draws_cluster, aes(x = beta)) +
geom_vline(xintercept = effron_ci, color = "red") +
geom_vline(xintercept = percentile_ci, color = "blue") +
geom_vline(xintercept = beta_hat, color = "green")
ggsave(paste0(out, "1_CIs.png"), height = 5, width = 7)
|
7a2969b4d98e8b1835991fa9fbb26990220b8863
|
351830915f1c61a935e60c8f048a7adbdcc6ec5d
|
/amd_templates/metadata/prevotella/analysis_metadata.R
|
d421fc1a113b230fc27f617608ee97ec0e147ad9
|
[] |
no_license
|
ohsu-microbiome/utilities
|
60ab203da11966abd862f6b18975c246f09f2f72
|
dc1980f89dc6feb5161172745130a11fbee9dba6
|
refs/heads/master
| 2021-07-01T10:56:09.523510
| 2020-06-19T22:04:44
| 2020-06-19T22:04:44
| 152,392,715
| 2
| 3
| null | 2020-09-29T02:00:51
| 2018-10-10T08:53:42
|
HTML
|
UTF-8
|
R
| false
| false
| 932
|
r
|
analysis_metadata.R
|
#!/usr/bin/env Rscript
localdir = getwd()
clustering_level = 'Genus'
analysis_type = 'prevotella'
knitr_options="
knitr::opts_chunk$set(
echo=TRUE,
dpi=300,
fig.width=12
)"
relative_abundance_cutoff = 0.002
prevalence_cutoff = 0.1
min_count_cutoff = 0
raw_exp_vars='c()'
calculated_exp_vars = 'c("FractionPrevotellaAll", "FractionBacteroidetes", "FractionPrevotellaBacteroidetes")'
test_categorical_variables = 'c("PctPrevotella_gt_1", "PctPrevotella_gt_5", "PctPrevotella_gt_10")'
reference_categorical_variables = 'c("CaseString", "Gender", "AREDS", "Tissue_code", "ARMS2rs10490924", "CFHrs1061170", "CFHrs10737680", "SKIV2Lrs429608")'
regression_transform = 'log'
log_regularizer = relative_abundance_cutoff
use_allbac = F
template_types = c(
'prevotella_data_prep',
'linear_regression',
'linear_regression_plots',
'logistic_regression',
'two_group_tests',
'two_group_plots',
'chisquare_tests'
)
|
cef5f05f42dcc67fb6ff014f5ddfae254a520d43
|
a0830531052bd2330932c3a2c9750326cf8304fc
|
/vmstools/man/getMetierClusters.rd
|
3fd2dab2656f69804381b758881d55f4a2dfb16f
|
[] |
no_license
|
mcruf/vmstools
|
17d9c8f0c875c2a107cfd21ada94977d532c882d
|
093bf8666cdab26d74da229f1412e93716173970
|
refs/heads/master
| 2021-05-29T20:57:18.053843
| 2015-06-11T09:49:20
| 2015-06-11T09:49:20
| 139,850,057
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 10,505
|
rd
|
getMetierClusters.rd
|
\name{getMetierClusters}
\alias{getMetierClusters}
\title{
Finding metiers from a reduced EFLALO dataset, step 3: clustering logevents using various multivariate methods
}
\description{
This function represents the third step in the workflow processing logbooks data for identifying metiers.
This step allows applying various clustering analyses on the data sets coming out of the first and second step. All methods will lead to
a classification of all individuals (logevents), but they differ in their nature and then consequently in their outcomes. The four methods available are
- Hierarchical Ascending Classification (HAC), with user-defined method for estimating distances and link for aggregating individuals\cr
- K-Means,\cr
- Partitioning Around Medoids (PAM),\cr
- Clustering LARge Applications (CLARA).
The HAC method works by calculating the distance between individuals using the method selected with param1 ("euclidean", "maximum", "manhattan", "canberra", "binary" or "minkowski")
and aggregating them based on the distance between clusters, using the link selected with param2 ("ward", "single", "complete", "average", "mcquitty", "median" or "centroid").
In HAC, the number of classes is determined afterwards, once all combinations have been calculated, by using the objective criteria of scree test which
detects the third marginal loss of inertia between two consecutive numbers of classes. Therefore, the computing time and memory request for this method
can be quite comprehensive, and may reach memory limits on standard PC when operating with very large datasets.
The K-Means method works by randomly choosing k individuals, or kernels (k corresponding to the final number of classes), and then affecting each individuals of the
dataset to the closest kernel. Each time, the gravity center of the class is recalculated, thus reinitialising the calculation of distances to the next
individual. In order to define the most appropriate number of classes, this procedure is repeated with differents values for k, from 2 to 15. The final number
of classes is identified by using the criteria of scree test which detects the third marginal loss of inertia between two consecutive numbers of classes.
The PAM method works slightly around the same principle, starting with the initialisation of k medoids. The medoid is the individual in a class which shows
least dissimilarity with other individuals in the same class, and the remaining individuals are affected to their closest medoid. Then the sum of dissimilarities
is calculated and compared with the sum of dissimilarities if any other individual in the class had been playing the role of the medoid, and then the medoid
is eventually adjusted accordingly, until full stabilisation of the procedure. The most appropriate number of classes is identified by using the estimated silhouette
of the classification for each value of k. The silhouette represent an average comparison of the distance between an individual and the other individuals from its
class, and between the same individual and the other individuals from the next closest class, and is therefore an objective measurement of the quality of the classification.
The final number of classes retained is the one for which the second maximum of the silhouettes is reached. It is to be noted that the PAM method is not designed
for working with very large datasets (>10 000 lines), and may quickly reach memory limits.
The CLARA method is an extension of the PAM algorithm aiming at working with large datasets, and is therefore more efficient and powerful than PAM.
It works by sampling 5 subsets of the dataset for each value of k and running the PAM algorithm explained above on these subsets, and then to keep only the subset
giving the best classification (silhouette). As in PAM, the final number of classes retained is the one for which the second maximum of the silhouettes is reached.
Afterwards, all remaining individuals are affected to their closest medoid, using user-defined method (param1) for calculating distances ('euclidean' being used
as the default, but 'manhattan' could also be used).
}
\usage{
getMetierClusters(datSpecies,datLog,analysisName="",methMetier="clara",
param1="euclidean",param2=NULL)
}
\arguments{
\item{datSpecies}{numerical matrix with logevents as rows and species as columns, with percentage values (between 0 and 100) of each species in the logevent catches.
Logevent ID (LE_ID) should be as row names. Typically, this input table will be produced from the step 1 of the metier analysis applied on the eflalo initial data,
using the function extractTableMainSpecies()
}
\item{datLog}{numerical matrix with logevents as rows, and values to be used for calculating distances between individuals as columns.
Typically, this input table is produced by the step 2 of the metier analysis, using the function getTableAfterPCA(). If a PCA was run,
selected Principal Components will appear as columns. If no PCA was run, the matrix will be the same as datSpecies, with percentage values by species.
}
\item{analysisName}{character, the name of the run. Used for the file name of the plots.
}
\item{methMetier}{character. The name of the clustering method to be used. Must be chosen between "hac", "kmeans", "pam" and "clara".
}
\item{param1}{character. Parameter used for chosing the method calculating distances between individuals, to be used in HAC and CLARA algorithms.
For HAC, it can be chosen between "euclidean", "maximum", "manhattan", "canberra", "binary" or "minkowski". For CLARA, between "euclidean" and "manhattan".
For PAM and K-means, this must be set to NULL.
}
\item{param2}{character. Parameter used for chosing the method calculating distances between clusters, to be used in HAC algorithm.
For HAC, it can be chosen between "ward", "single", "complete", "average", "mcquitty", "median" or "centroid".
For PAM, CLARA and K-means, this must be set to NULL.
}
}
\value{
For any of the four methods used, a number of graphs are produced and saved directly in the working directory.
They describe 1) projections of results on factorial plans, 2) the mean profile of the whole dataset, i.e. the average percentage of the various species in a
logevent across all individuals, 3) the mean and standard deviation profile in terms of average percentage of catch by species within each cluster,
4) the number of logevents by clusters, and 5) the profile of test-values by cluster.
Finally, the function returns a list with a number of results and diagnostics on the performance of the method:
\item{LE_ID_clust}{a data frame with two columns, linking the initial ID name of the Logevent (LE_ID) with the cluster metier where the ID has been allocated.
}
\item{clusters}{diagnostics of the clustering process. It may vary between the four methods.
}
\item{betweenVarClassifOnTot}{percentage of variance explained by the classification.
}
\item{nbClust}{final number of clusters retained.
}
\item{summaryClusters}{array documenting, for each cluster, the minimum, mean, maximum, as well as the 25\%, 50\% and 75\% quantiles values of the
percentage of catch by species for the individual logevents in the cluster.
}
\item{testValues}{matrix of test-values by species and cluster. The test-value measures for each species the difference between the average percentage
of catches in the cluster compared to the average percentage of catch in the total dataset, thus large positive values (>1.98) will point out the most
characteristic species in the clusters.
}
\item{testValuesSpecies}{a tabulated list ranking the most characteristic species in the clusters (ordering species with a test-value > 1.98 by decreasing
order of test-value).
}
\item{percLogevents}{a matrix giving the percentage of logevents catching each species by cluster.
}
\item{descClusters}{a data frame giving some descriptive statistics for each cluster, like cluster size, number of species needed to have at least 50\% of the
cluster's total catch, number of species with a test-value > 1.98 in the cluster, number of species caught in at least 50\% of the logevents, etc...
}
\item{tabClusters}{a 3d-array giving a table summurazing for each cluster the most important species (in terms of catch),
the associated test-value, and the percentage of logevents of the cluster catching these species.
}
\item{targetSpecies}{a list giving the target species by cluster.
}
}
\references{Development of tools for logbook and VMS data analysis. Studies for carrying out the common fisheries policy No MARE/2008/10 Lot 2}
\author{Nicolas Deporte, Sebastien Demaneche, Stephanie Mahevas (IFREMER, France), Clara Ulrich, Francois Bastardie (DTU Aqua, Denmark)}
\note{A number of libraries are initially called for the whole metier analyses and must be installed : (FactoMineR),(cluster),(SOAR),(amap),(MASS),(mda)}
\seealso{\code{\link{getEflaloMetierLevel7}}, \code{\link{selectMainSpecies}},
\code{\link{extractTableMainSpecies}}, \code{\link{getMetierClusters}},
\code{\link{getTableAfterPCA}}}
\examples{
\dontrun{
data(eflalo)
eflalo <- formatEflalo(eflalo)
eflalo <- eflalo[eflalo$LE_GEAR=="OTB",]
# note that output plots will be sent to getwd()
analysisName <- "metier_analysis_OTB"
dat <- eflalo[,c("LE_ID",grep("EURO",colnames(eflalo),value=TRUE))]
names(dat)[-1] <- unlist(lapply(strsplit(names(dat[,-1]),"_"),function(x) x[[3]]))
explo <- selectMainSpecies(dat, analysisName, RunHAC=TRUE, DiagFlag=FALSE)
#=> send the LE_ID and LE_KG_SP columns only
Step1 <- extractTableMainSpecies(dat, explo$NamesMainSpeciesHAC,
paramTotal=95, paramLogevent=100)
#=> send the LE_ID and LE_KG_SP columns only
rowNamesSave <- row.names(Step1)
row.names(Step1) <- 1:nrow(Step1)
# Run a PCA
Step2 <- getTableAfterPCA(Step1, analysisName, pcaYesNo="pca",
criterion="70percents")
row.names(Step1) <- rowNamesSave
row.names(Step2) <- rowNamesSave
# Define a metier for each logevent running the CLARA algorithm
Step3 <- getMetierClusters(Step1, Step2, analysisName,
methMetier="clara", param1="euclidean", param2=NULL)
}
}
|
206f01bf28f77b01ad715e4d955ce234d5e02096
|
44718933513647e2fa74fc6cfaee6547631c9be9
|
/app.R
|
3605d82239addc778f4d9243da0cc9c604218c84
|
[] |
no_license
|
paleolimbot/shinyex_enfr
|
a0ffefa1f9ee6be0a4e416b5efb78633f785c582
|
2a9541d224d5251ef87c328627b1012db4f3b525
|
refs/heads/master
| 2023-03-27T02:16:06.482552
| 2021-03-19T13:23:14
| 2021-03-19T13:23:14
| 349,430,970
| 3
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,834
|
r
|
app.R
|
library(shiny)
library(shiny.i18n)
library(shinyjs)
# Translations are defined in translation.json. I'm using "key" as the
# key language, but you could omit this an use "en" or "fr" as the key
# language as well. I like the ability to abbreviate the key because there
# are some longer bits (like the text of an "about" page) where using the
# english version as a "key" is problematic.
i18n <- Translator$new(translation_json_path = "translation.json")
# Set the default translation language to the first non-key language
i18n$set_translation_language(setdiff(i18n$get_languages(), "key")[1])
# Use i18n$t("key_value") for any user-facing text in the ui
ui <- fluidPage(
useShinyjs(),
usei18n(i18n),
titlePanel(i18n$t("window_title")),
sidebarLayout(
sidebarPanel(
# This could also be a radioButtons or selectInput, with inputId = "lang",
# but in my app I need the flexibility to have these be other html elements
# in order to make it look pretty
div(
a(href = "javascript: Shiny.setInputValue('lang', 'fr')", "Fr"),
a(href = "javascript: Shiny.setInputValue('lang', 'en')", "En")
)
),
mainPanel(
verbatimTextOutput("lang_dummy"),
verbatimTextOutput("test_output")
)
)
)
server <- function(input, output, session) {
# Use i18n$t("key_value") for any user-facing text in the server
output$test_output <- renderText({
i18n$t("window_title")
})
# One way to get the browser's idea of what the language is
# this won't set the input value right away; code needs to treat this like
# an input value that could be set by the user at any time. Also need
# a js handler to change the window title because doing this from R at
# runtime is otherwise not possible
runjs("
var usr_lang_initial_auto = window.navigator.userLanguage || window.navigator.language;
Shiny.setInputValue('lang_initial_auto', usr_lang_initial_auto);
Shiny.addCustomMessageHandler('changetitle', function(x) { document.title = x });
")
# An empty output that is rendered initially and when 'lang_initial_auto'
# is changed (on page load)
output$lang_dummy <- renderText({
query <- parseQueryString(session$clientData$url_search)
new_lang <- NULL
has_initial_lang <- exists("lang_initial", session$userData)
if (!has_initial_lang && !is.null(query$lang)) {
new_lang <- query$lang
session$userData$lang_initial <- query$lang
} else if (!has_initial_lang && !is.null(input$lang_initial_auto)) {
# input value will be something like en-CA
new_lang <- substr(input$lang_initial_auto, 1, 2)
# if the user's language isn't in the translation key, use the first
# non-key language
if (!(new_lang %in% i18n$get_languages())) {
new_lang <- setdiff(i18n$get_languages(), "key")[1]
}
session$userData$lang_initial <- new_lang
} else if (!exists("lang", session$userData)) {
new_lang <- setdiff(i18n$get_languages(), "key")[1]
}
if (!is.null(new_lang)) {
session$userData$lang <- new_lang
update_lang(session, new_lang)
updateQueryString(paste0("?lang=", new_lang), mode = "replace")
# window title doesn't quite work with i18n$t()
session$sendCustomMessage(
"changetitle",
i18n$get_translations()["window_title", new_lang]
)
}
})
# Observe language change from updated Shiny input
observeEvent(input$lang, {
new_lang <- input$lang
update_lang(session, new_lang)
updateQueryString(paste0("?lang=", new_lang), mode = "replace")
session$userData$lang <- new_lang
# window title doesn't quite work with i18n$t()
session$sendCustomMessage(
"changetitle",
i18n$get_translations()["window_title", new_lang]
)
})
}
shinyApp(ui, server)
|
dfb755cb2a319df79882c2ccc34e4936780f4f19
|
74df9ce87872f43ff6836563cd8019eb9b95f5b0
|
/2_observations/src/munge_flow_dat.R
|
80f3d13a10f83a3a92b5c6bd36bd82227af84614
|
[] |
no_license
|
USGS-R/delaware-model-prep
|
017f0d9f727d5d5b4449cd69758c4b32f12860ed
|
45e1ffeee7d6ea4a95e374e16cbc1196bf703f41
|
refs/heads/main
| 2023-06-08T19:38:46.764070
| 2023-06-01T23:56:42
| 2023-06-01T23:56:42
| 202,405,091
| 2
| 14
| null | 2023-04-07T23:28:32
| 2019-08-14T18:31:12
|
R
|
UTF-8
|
R
| false
| false
| 1,524
|
r
|
munge_flow_dat.R
|
# munge flow dat
get_flow_sites <- function(flow_ind, temp_sites_ind, out_ind) {
flow_dat <- readRDS(sc_retrieve(flow_ind, 'getters.yml')) %>%
distinct(site_id) %>% pull(site_id)
flow_sites <- paste0('USGS-', flow_dat)
# find sites not in temperature data
temp_sites <- readRDS(sc_retrieve(temp_sites_ind, 'getters.yml')) %>%
as_tibble() %>%
distinct(site_id) %>% pull(site_id)
flow_missing <- lubridate::setdiff(flow_sites, temp_sites)
flow_site_meta <- dataRetrieval::whatNWISsites(sites = gsub('USGS-', '', flow_missing))
saveRDS(flow_site_meta, as_data_file(out_ind))
gd_put(out_ind)
}
munge_split_flow <- function(dat_ind, sites_ind, holdout_water_years,
holdout_reach_ids, out_ind) {
flow_dat <- readRDS(sc_retrieve(dat_ind, 'getters.yml'))
drb_sites <- readRDS(sc_retrieve(sites_ind, 'getters.yml'))
dat_drb <- flow_dat %>%
mutate(site_id = sprintf('USGS-%s', site_id),
discharge_cms = round(flow_cfs / 35.314666, 3)) %>%
select(-flow_cfs) %>%
filter(!is.na(discharge_cms)) %>%
filter(site_id %in% unique(drb_sites$site_id)) %>%
left_join(distinct(drb_sites, site_id, seg_id_nat, subseg_id), by='site_id') %>%
group_by(seg_id_nat, subseg_id, date) %>%
summarize(discharge_cms = mean(discharge_cms),
site_id = paste(site_id, collapse = ';')) %>%
ungroup() %>%
mark_time_space_holdouts(holdout_water_years, holdout_reach_ids)
saveRDS(dat_drb, as_data_file(out_ind))
gd_put(out_ind)
}
|
0ff0ccc7e7f2c33252d60d0dba5140348c5147f5
|
3eeb5ee6e7b43bb4a1ed950c883b3c7af4af2a17
|
/forestplot.R
|
4894e27b2eae9ddfde3dc82f83009b09bb49a16a
|
[] |
no_license
|
raghunandanalugubelli/CASAS
|
d59b655174a0b211a943e110db4e464c1f4133c7
|
ed3000e878816e89482feba48fd76f2e2943e912
|
refs/heads/master
| 2021-09-03T07:14:34.453033
| 2018-01-06T20:38:56
| 2018-01-06T20:38:56
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,212
|
r
|
forestplot.R
|
forestplot <- function(CI_Data)
{
addgap <- ifelse ((max(CI_Data[, 3:4])- min(CI_Data[, 3:4])) >10 , 2.5, 0.15)
p <- ggplot(CI_Data,aes(factor(ID)))+ labs(x=NULL, y=NULL)
p <- p + geom_text(data=CI_Data,aes(x= factor(ID),y = min(CI_Data[1:10, 3:4]-addgap), label=CI_Data[,2],vjust=0.5, hjust=0.25, size=2)) # gene names
p <- p + geom_hline(aes(yintercept=0),linetype=2, size=0.1) #vertical line at 1
p <- p + geom_errorbar(data=CI_Data,aes(x=factor(ID),ymin =CI_lower, ymax=CI_higher), color = CI_Data$boxcolor, width=0.1) #errorbars
p <- p + geom_point(data=CI_Data,aes(x=factor(ID), y=PointEstimate),shape=22,size=5,fill=CI_Data$boxcolor, colour=CI_Data$boxcolor) #HR
p <- p + scale_x_discrete(labels=CI_Data$ID)
p <- p + coord_flip()
p <- p + geom_point(aes(0.5,max(CI_Data[, 3:4])),shape=3,alpha=0)
p <- p + geom_segment(aes(x = 0.5, y = min(CI_Data[, 3:4]), xend = 0.5, yend = max(CI_Data[, 3:4]))) # horizontal line
p <- p + geom_segment(aes(x = 0.5, y = min(CI_Data[, 3:4]), xend = 0.5, yend = max(CI_Data[, 3:4])) , arrow=arrow(length = unit(0.02, "npc")),linetype=1,size=1) # right arrow
p <- p + geom_segment(aes(x = 0.5, y = min(CI_Data[, 3:4]), xend = 0.5, yend = min(CI_Data[, 3:4])-addgap),arrow=arrow(length = unit(0.02, "npc")),linetype=1,size=1) #left arrow
p <- p + theme(axis.line = element_blank(),
#axis.text.x = element_blank(),
axis.text.y = element_blank(),
axis.ticks.y = element_blank(),
axis.title.x = element_blank(),
axis.title.y = element_blank(),
axis.ticks.length = unit(0.0001, "mm"),
#axis.text = element_text(margin=margin(0,0,-2,-2,"lines")),
legend.position = "none",
panel.background = element_rect(fill = "transparent"),
panel.border = element_blank(),
#panel.spacing = unit(c(-0.1,-0.1,-0.5,-0.5), "mm"),
#plot.margin = unit(c(10,-2.5,0,10), "mm"),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank())
plot(p)
}
|
74f1843a3c56232c1acbddfce0c581e7364c35de
|
5b5d378306b858d380511aa0dae4cec09a3ff823
|
/cachematrix.R
|
2cbc5c65f134fb21bc4a5a09b566c2fdd08e69a9
|
[] |
no_license
|
rangastyle/ProgrammingAssignment2
|
ad9eb258058b5cdb89a431567a413450cba73940
|
6e320843188e65ae0a65d100dd0dc46922fe8c0e
|
refs/heads/master
| 2021-01-24T15:22:55.754228
| 2015-05-24T05:29:33
| 2015-05-24T05:29:33
| 36,102,829
| 0
| 0
| null | 2015-05-23T01:41:09
| 2015-05-23T01:41:08
| null |
UTF-8
|
R
| false
| false
| 1,228
|
r
|
cachematrix.R
|
## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this function
makeCacheMatrix <- function(x = matrix()) {
# x is an invertible square matrix
# The function makeCacheMatrix creates a list containing a function to:
# 1.set the value of the matrix
#2.get the value of the matrix
#3.set the value of the matrix inverse
#4 get the value of the matrix inverse
inv=NULL
set=function(y) {
x<<-y
inv<-NULL
}
get<-function() x
setinverse<-function(inverse)
inv<<-inverse
getinverse<-function()inv
list(set=set,get=get,setinverse=setinverse,getinverse=getinverse)
}
## Write a short comment describing this function
cacheSolve <- function(x,...) {
# x is the output from the "makeCacheMatrix" function
#Function returns the inverse of matrix that was an input in the function
# "makeCacheMatrix" above.
inv=x$getinverse()
if (!is.null(inv)){
message ("getting cached data.")
return(inv)
}
data<-x$get()
inv<-solve(data,...)
x$setinverse(inv)
#Calculate the inverse of the matrix if it has not been calcuated already.
#But if inverse is already, skip the computation and get the cache.
inv
## Return a matrix that is the inverse of 'x'
}
|
8ea4dadfb4fbdb87468def00de29793ec5d934b9
|
175f5203aa1b0bc905702d0741a882eb455f8e10
|
/man/cross_dat_analy.Rd
|
d4dce4d2ea706f23c0de12e038dc922072e21f34
|
[] |
no_license
|
cran/twl
|
97982fdae0a8d62a3df23721aaccd7250c4b38ef
|
bc1e7a04b9037889e4989fa0e9c17ef7b8db1481
|
refs/heads/master
| 2020-03-27T04:57:09.158214
| 2018-08-24T10:00:03
| 2018-08-24T10:00:03
| 145,981,527
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,358
|
rd
|
cross_dat_analy.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/post_mcmc.R
\name{cross_dat_analy}
\alias{cross_dat_analy}
\title{Compares clustering across datasets using metrics described in associated TWL manuscript}
\usage{
cross_dat_analy(clus_save, BURNIN)
}
\arguments{
\item{clus_save}{list of samples outputted from TWLsample function.}
\item{BURNIN}{number of samples devoted to burn-in. Defaults to 2000.}
}
\value{
outpu_lis a list of output metrics. The first element is a list of lists of sample-specific pairwise cluster overlap. The second element is an estimate of across all datasets cluster correspondence by averaging pairwise cluster overlap (the length is the vector therefore is the number of unique samples associated with at least 2 data sources.
}
\description{
Compares clustering across datasets using metrics described in associated TWL manuscript
}
\examples{
data(data_and_output)
\dontrun{clus_save <- TWLsample(misaligned_mat,misaligned,output_every=50,num_its=5000,manip=FALSE)
outpu_new <- pairwise_clus(clus_save,BURNIN=2000)
post_analy_cor(outpu_new,c("title1","title2","title3","title4","title5"),
tempfile(),ords='none')
clus_labs <- post_analy_clus(outpu_new,clus_save,c(2:6),rep(0.6,5),c("title1","title2",
"title3","title4","title5"),tempfile())
output_nest <- cross_dat_analy(clus_save,4750)
}
}
|
dd86ffda397112f9cee6f25ba2042778e51dcea2
|
86151a6ecec532ac065621a1ffdfd827504176a3
|
/man/download_gpm_imerg.Rd
|
39eb937fe6664e5c076af0622b6b9e59375961ce
|
[] |
no_license
|
imarkonis/pRecipe
|
3454f5ce32e6915a6caef1dbc041d12c411c9ae5
|
07c6b1da653221a0baeeb2aa81b8744393ff587e
|
refs/heads/master
| 2022-11-02T20:27:40.979144
| 2022-10-28T10:52:04
| 2022-10-28T10:52:04
| 237,580,540
| 0
| 0
| null | 2020-02-01T07:44:23
| 2020-02-01T07:44:23
| null |
UTF-8
|
R
| false
| true
| 472
|
rd
|
download_gpm_imerg.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/download_gpm_imerg.R
\name{download_gpm_imerg}
\alias{download_gpm_imerg}
\title{GPM_IMERG data downloader}
\usage{
download_gpm_imerg(folder_path = ".")
}
\arguments{
\item{folder_path}{a character string with the path where the data will be downloaded.}
}
\value{
No return value, called to download the data set.
}
\description{
Function for downloading GPM IMERGM v06.
}
\keyword{internal}
|
749030220a5dc97f97bb1ebcfcb09f9a7208d2ee
|
0500ba15e741ce1c84bfd397f0f3b43af8cb5ffb
|
/cran/paws.management/R/synthetics_service.R
|
62d45d5f99196b8a9c763f4f1f5475534a0c7521
|
[
"Apache-2.0"
] |
permissive
|
paws-r/paws
|
196d42a2b9aca0e551a51ea5e6f34daca739591b
|
a689da2aee079391e100060524f6b973130f4e40
|
refs/heads/main
| 2023-08-18T00:33:48.538539
| 2023-08-09T09:31:24
| 2023-08-09T09:31:24
| 154,419,943
| 293
| 45
|
NOASSERTION
| 2023-09-14T15:31:32
| 2018-10-24T01:28:47
|
R
|
UTF-8
|
R
| false
| false
| 8,303
|
r
|
synthetics_service.R
|
# This file is generated by make.paws. Please do not edit here.
#' @importFrom paws.common new_handlers new_service set_config merge_config
NULL
#' Synthetics
#'
#' @description
#' Amazon CloudWatch Synthetics
#'
#' You can use Amazon CloudWatch Synthetics to continually monitor your
#' services. You can create and manage *canaries*, which are modular,
#' lightweight scripts that monitor your endpoints and APIs from the
#' outside-in. You can set up your canaries to run 24 hours a day, once per
#' minute. The canaries help you check the availability and latency of your
#' web services and troubleshoot anomalies by investigating load time data,
#' screenshots of the UI, logs, and metrics. The canaries seamlessly
#' integrate with CloudWatch ServiceLens to help you trace the causes of
#' impacted nodes in your applications. For more information, see [Using
#' ServiceLens to Monitor the Health of Your
#' Applications](https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/ServiceLens.html)
#' in the *Amazon CloudWatch User Guide*.
#'
#' Before you create and manage canaries, be aware of the security
#' considerations. For more information, see [Security Considerations for
#' Synthetics
#' Canaries](https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/servicelens_canaries_security.html).
#'
#' @param
#' config
#' Optional configuration of credentials, endpoint, and/or region.
#' \itemize{
#' \item{\strong{credentials}:} {\itemize{
#' \item{\strong{creds}:} {\itemize{
#' \item{\strong{access_key_id}:} {AWS access key ID}
#' \item{\strong{secret_access_key}:} {AWS secret access key}
#' \item{\strong{session_token}:} {AWS temporary session token}
#' }}
#' \item{\strong{profile}:} {The name of a profile to use. If not given, then the default profile is used.}
#' \item{\strong{anonymous}:} {Set anonymous credentials.}
#' \item{\strong{endpoint}:} {The complete URL to use for the constructed client.}
#' \item{\strong{region}:} {The AWS Region used in instantiating the client.}
#' }}
#' \item{\strong{close_connection}:} {Immediately close all HTTP connections.}
#' \item{\strong{timeout}:} {The time in seconds till a timeout exception is thrown when attempting to make a connection. The default is 60 seconds.}
#' \item{\strong{s3_force_path_style}:} {Set this to `true` to force the request to use path-style addressing, i.e. `http://s3.amazonaws.com/BUCKET/KEY`.}
#' \item{\strong{sts_regional_endpoint}:} {Set sts regional endpoint resolver to regional or legacy \url{https://docs.aws.amazon.com/sdkref/latest/guide/feature-sts-regionalized-endpoints.html}}
#' }
#' @param
#' credentials
#' Optional credentials shorthand for the config parameter
#' \itemize{
#' \item{\strong{creds}:} {\itemize{
#' \item{\strong{access_key_id}:} {AWS access key ID}
#' \item{\strong{secret_access_key}:} {AWS secret access key}
#' \item{\strong{session_token}:} {AWS temporary session token}
#' }}
#' \item{\strong{profile}:} {The name of a profile to use. If not given, then the default profile is used.}
#' \item{\strong{anonymous}:} {Set anonymous credentials.}
#' }
#' @param
#' endpoint
#' Optional shorthand for complete URL to use for the constructed client.
#' @param
#' region
#' Optional shorthand for AWS Region used in instantiating the client.
#'
#' @section Service syntax:
#' ```
#' svc <- synthetics(
#' config = list(
#' credentials = list(
#' creds = list(
#' access_key_id = "string",
#' secret_access_key = "string",
#' session_token = "string"
#' ),
#' profile = "string",
#' anonymous = "logical"
#' ),
#' endpoint = "string",
#' region = "string",
#' close_connection = "logical",
#' timeout = "numeric",
#' s3_force_path_style = "logical",
#' sts_regional_endpoint = "string"
#' ),
#' credentials = list(
#' creds = list(
#' access_key_id = "string",
#' secret_access_key = "string",
#' session_token = "string"
#' ),
#' profile = "string",
#' anonymous = "logical"
#' ),
#' endpoint = "string",
#' region = "string"
#' )
#' ```
#'
#' @examples
#' \dontrun{
#' svc <- synthetics()
#' svc$associate_resource(
#' Foo = 123
#' )
#' }
#'
#' @section Operations:
#' \tabular{ll}{
#' \link[=synthetics_associate_resource]{associate_resource} \tab Associates a canary with a group\cr
#' \link[=synthetics_create_canary]{create_canary} \tab Creates a canary\cr
#' \link[=synthetics_create_group]{create_group} \tab Creates a group which you can use to associate canaries with each other, including cross-Region canaries\cr
#' \link[=synthetics_delete_canary]{delete_canary} \tab Permanently deletes the specified canary\cr
#' \link[=synthetics_delete_group]{delete_group} \tab Deletes a group\cr
#' \link[=synthetics_describe_canaries]{describe_canaries} \tab This operation returns a list of the canaries in your account, along with full details about each canary\cr
#' \link[=synthetics_describe_canaries_last_run]{describe_canaries_last_run} \tab Use this operation to see information from the most recent run of each canary that you have created\cr
#' \link[=synthetics_describe_runtime_versions]{describe_runtime_versions} \tab Returns a list of Synthetics canary runtime versions\cr
#' \link[=synthetics_disassociate_resource]{disassociate_resource} \tab Removes a canary from a group\cr
#' \link[=synthetics_get_canary]{get_canary} \tab Retrieves complete information about one canary\cr
#' \link[=synthetics_get_canary_runs]{get_canary_runs} \tab Retrieves a list of runs for a specified canary\cr
#' \link[=synthetics_get_group]{get_group} \tab Returns information about one group\cr
#' \link[=synthetics_list_associated_groups]{list_associated_groups} \tab Returns a list of the groups that the specified canary is associated with\cr
#' \link[=synthetics_list_group_resources]{list_group_resources} \tab This operation returns a list of the ARNs of the canaries that are associated with the specified group\cr
#' \link[=synthetics_list_groups]{list_groups} \tab Returns a list of all groups in the account, displaying their names, unique IDs, and ARNs\cr
#' \link[=synthetics_list_tags_for_resource]{list_tags_for_resource} \tab Displays the tags associated with a canary or group\cr
#' \link[=synthetics_start_canary]{start_canary} \tab Use this operation to run a canary that has already been created\cr
#' \link[=synthetics_stop_canary]{stop_canary} \tab Stops the canary to prevent all future runs\cr
#' \link[=synthetics_tag_resource]{tag_resource} \tab Assigns one or more tags (key-value pairs) to the specified canary or group\cr
#' \link[=synthetics_untag_resource]{untag_resource} \tab Removes one or more tags from the specified resource\cr
#' \link[=synthetics_update_canary]{update_canary} \tab Updates the configuration of a canary that has already been created
#' }
#'
#' @return
#' A client for the service. You can call the service's operations using
#' syntax like `svc$operation(...)`, where `svc` is the name you've assigned
#' to the client. The available operations are listed in the
#' Operations section.
#'
#' @rdname synthetics
#' @export
synthetics <- function(config = list(), credentials = list(), endpoint = NULL, region = NULL) {
config <- merge_config(
config,
list(
credentials = credentials,
endpoint = endpoint,
region = region
)
)
svc <- .synthetics$operations
svc <- set_config(svc, config)
return(svc)
}
# Private API objects: metadata, handlers, interfaces, etc.
.synthetics <- list()
.synthetics$operations <- list()
.synthetics$metadata <- list(
service_name = "synthetics",
endpoints = list("*" = list(endpoint = "synthetics.{region}.amazonaws.com", global = FALSE), "cn-*" = list(endpoint = "synthetics.{region}.amazonaws.com.cn", global = FALSE), "us-iso-*" = list(endpoint = "synthetics.{region}.c2s.ic.gov", global = FALSE), "us-isob-*" = list(endpoint = "synthetics.{region}.sc2s.sgov.gov", global = FALSE)),
service_id = "synthetics",
api_version = "2017-10-11",
signing_name = "synthetics",
json_version = "1.1",
target_prefix = ""
)
.synthetics$service <- function(config = list()) {
handlers <- new_handlers("restjson", "v4")
new_service(.synthetics$metadata, handlers, config)
}
|
9346aed167afbdf4fc21defe245837de3b3f8bea
|
f3b996edc7dc15421abdf298f5b44c32d493e3ce
|
/scripts/LOCA_FTP_loop.R
|
a53b48e1d5e11b585e1b48fbe81c054d91bca3b6
|
[] |
no_license
|
mapdonnelly/CDPH_heat_project
|
4ba40ac0596fa0e8e5cbb06a305cc6cd489e236e
|
b0e4e01c37aa761c2573d49d481c6ecd0f5f0ef3
|
refs/heads/master
| 2021-07-07T15:13:44.419156
| 2019-03-27T17:48:53
| 2019-03-27T17:48:53
| 138,775,988
| 0
| 1
| null | 2018-07-02T19:58:50
| 2018-06-26T18:12:34
| null |
UTF-8
|
R
| false
| false
| 3,731
|
r
|
LOCA_FTP_loop.R
|
rm(list = ls())
library(ncdf4)
library(tidyverse)
library(parallel)
setwd('~/Desktop/GitHub/CDPH_heat_project/')
#rcpMat <- c("45","85")
#names(rcpMat) <- c("RCP4.5 (emissions peak 2040, stabiliazation by 2100)","RCP8.5 (emissions continue to rise throughout the 21st century)")
modelMat <- c("ACCESS1-0","CanESM2","CESM1-BGC","CMCC-CMS","CNRM-CM5","GFDL-CM3","HadGEM2-CC","HadGEM2-ES","MIROC5")
names(modelMat) <- c("ACCESS1-0","CanESM2","CESM1-BGC","CMCC-CMS","CNRM-CM5","GFDL-CM3","HadGEM2-CC","HadGEM2-ES","MIROC5")
download_nc <- function(modelVar,YearStartVar,YearStopVar){
# n<-5
# modelVar<-n
model <- modelVar
#rcp <- rcpVar
#temp <- tempVar
yearStart<-YearStartVar
yearStop<-YearStopVar
# ncdfURL.45.tasmax <- paste0("http://albers.cnr.berkeley.edu/data/scripps/loca/",model,"/rcp45/tasmax/tasmax_day_",model,"_rcp45_r6i1p1_",yearStart,"-",yearStop,".LOCA_2016-04-02.16th.CA_NV.nc")
# dest.45.tasmax <- paste0("~/Desktop/GitHub/CDPH_heat_project/data/",model,"_rcp45_tasmax_",yearStart,"_",yearStop,"_","CA_NV.nc")
# download.file(url=ncdfURL.45.tasmax,destfile=dest.45.tasmax, mode = "wb")
#
# ncdfURL.45.tasmin <- paste0("http://albers.cnr.berkeley.edu/data/scripps/loca/",model,"/rcp45/tasmin/tasmin_day_",model,"_rcp45_r6i1p1_",yearStart,"-",yearStop,".LOCA_2016-04-02.16th.CA_NV.nc")
# dest.45.tasmin <- paste0("~/Desktop/GitHub/CDPH_heat_project/data/",model,"_rcp45_tasmin_",yearStart,"_",yearStop,"_","CA_NV.nc")
# download.file(url=ncdfURL.45.tasmin,destfile=dest.45.tasmin, mode = "wb")
#
# ncdfURL.85.tasmax <- paste0("http://albers.cnr.berkeley.edu/data/scripps/loca/",model,"/rcp85/tasmax/tasmax_day_",model,"_rcp85_r6i1p1_",yearStart,"-",yearStop,".LOCA_2016-04-02.16th.CA_NV.nc")
# dest.85.tasmax <- paste0("~/Desktop/GitHub/CDPH_heat_project/data/",model,"_rcp85_tasmax_",yearStart,"_",yearStop,"_","CA_NV.nc")
# download.file(url=ncdfURL.85.tasmax,destfile=dest.85.tasmax, mode = "wb")
#
# ncdfURL.85.tasmin <- paste0("http://albers.cnr.berkeley.edu/data/scripps/loca/",model,"/rcp85/tasmin/tasmin_day_",model,"_rcp85_r6i1p1_",yearStart,"-",yearStop,".LOCA_2016-04-02.16th.CA_NV.nc")
# dest.85.tasmin <- paste0("~/Desktop/GitHub/CDPH_heat_project/data/",model,"_rcp85_tasmin_",yearStart,"_",yearStop,"_","CA_NV.nc")
# download.file(url=ncdfURL.85.tasmin,destfile=dest.85.tasmin, mode = "wb")
#
##for CCSM4 you need to change r1i1p1 to r6i1p1
ncdfURL.hist.tasmax <- paste0("http://albers.cnr.berkeley.edu/data/scripps/loca/",model,"/historical/tasmax/tasmax_day_",model,"_historical_r1i1p1_",yearStart,"-",yearStop,".LOCA_2016-04-02.16th.CA_NV.nc")
dest.hist.tasmax <- paste0("~/Desktop/GitHub/CDPH_heat_project/data/",model,"_historical_tasmax_",yearStart,"_",yearStop,"_","CA_NV.nc")
download.file(url=ncdfURL.hist.tasmax,destfile=dest.hist.tasmax, mode = "wb")
ncdfURL.hist.tasmin <- paste0("http://albers.cnr.berkeley.edu/data/scripps/loca/",model,"/historical/tasmin/tasmin_day_",model,"_historical_r1i1p1_",yearStart,"-",yearStop,".LOCA_2016-04-02.16th.CA_NV.nc")
dest.hist.tasmin <- paste0("~/Desktop/GitHub/CDPH_heat_project/data/",model,"_historical_tasmin_",yearStart,"_",yearStop,"_","CA_NV.nc")
download.file(url=ncdfURL.hist.tasmin,destfile=dest.hist.tasmin, mode = "wb")
}
#rcpVar<- c("historical")##inputs are "rcp45" "rcp85" and "historical"
#tempVar<-c("tasmax")
YearStartVar<-c("20050101")
YearStopVar<-c("20051231")
#modelMat <- c("CCSM4")
#names(modelMat) <- c("CCSM4")
##Download data year by year splitting it up on multiple cores.
mclapply(modelMat, download_nc,YearStartVar=YearStartVar,YearStopVar=YearStopVar)
##need to do CCSM$ separately because it has r6i1p1 instead of r1i1p1
|
80556a080ea0ce4654355d42e0245fd40f063348
|
1aa92f850ce632811aaa74d769527a8037d8c484
|
/tests/check_transf_sigmas.R
|
efb1aab143710dba82973880cdebcd608c4540c9
|
[] |
no_license
|
cran/mvord
|
253c6e7deaf07bf5ac111571b6db307219f1597c
|
6699126154748d7510647afc7bda27066aad3549
|
refs/heads/master
| 2021-06-02T15:11:40.519370
| 2021-03-17T12:20:12
| 2021-03-17T12:20:12
| 102,715,261
| 2
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,568
|
r
|
check_transf_sigmas.R
|
library(mvord)
#check z2r
mvord:::check(identical(mvord:::z2r(355),1))
mvord:::check(identical(mvord:::z2r(0),(exp(0)-1)/(1+exp(0))))
mvord:::check(identical(mvord:::z2r(2),(exp(4)-1)/(1+exp(4))))
error.structure <- cor_ar1(~ 1)
ndim <- 5
covar_error <- matrix(rep(1,10), ncol = 1)
attr(error.structure, "ndim") <- ndim
attr(error.structure, "covariate") <- covar_error
attr(error.structure, "npar") <- NCOL(attr(error.structure, "covariate"))
par.sigma <- 2
sigma <- diag(ndim)
sigma[lower.tri(sigma)] <- mvord:::z2r(par.sigma)^sequence((ndim-1):1)
sigma <- sigma + t(sigma) - diag(diag(sigma))
## make list with two elemets:
## rVec: for each row the sigma[lower.tri(sigma)]
## sdVec: for correlation a vector of ones
l <- list(rVec = t(sapply(1:10, function(i) sigma[lower.tri(sigma)])),
sdVec = rep(1, ndim))
mvord:::check(identical(mvord:::build_error_struct.cor_ar1(error.structure, par.sigma), l))
# rho$ncor.levels <- 2
# rho$npar.cor <- 1
# par.sigma <- c(2,0)
#
# sigma <- diag(rho$ndim)
# sigma[lower.tri(sigma)] <- z2r(par.sigma[1])^sequence((rho$ndim-1):1)
# sigma <- sigma + t(sigma) - diag(diag(sigma))
#
# sigma2 <- diag(rho$ndim)
# sigma2[lower.tri(sigma2)] <- z2r(par.sigma[2])^sequence((rho$ndim-1):1)
# sigma2 <- sigma2 + t(sigma2) - diag(diag(sigma2))
#
# check(identical(transf.sigmas.corAR1(par.sigma, rho), list(sigma, sigma2)))
error.structure <- cor_equi(~ 1)
ndim <- 5
covar_error <- cbind(1:10,11:20,21:30)
attr(error.structure, "ndim") <- ndim
attr(error.structure, "covariate") <- covar_error
attr(error.structure, "npar") <- NCOL(attr(error.structure, "covariate"))
par.sigma <- c(1,2,-3)
sigma <- diag(ndim)
sigma[lower.tri(sigma)] <- mvord:::z2r(covar_error %*% par.sigma)
sigma <- sigma + t(sigma) - diag(diag(sigma))
sigma
## make list with two elemets:
## rVec: for each row the sigma[lower.tri(sigma)]
## sdVec: for correlation a vector of ones
l <- list(rVec = t(sapply(1:10, function(i) sigma[lower.tri(sigma)])),
sdVec = rep(1, ndim))
mvord:::check(identical(mvord:::build_error_struct.cor_equi(error.structure, par.sigma), l))
par.sigma <- c(0.1,0.2,-0.3)
sigma <- diag(ndim)
sigma[lower.tri(sigma)] <- mvord:::z2r(covar_error %*% par.sigma)
sigma <- sigma + t(sigma) - diag(diag(sigma))
l <- list(rVec = t(sapply(1:10, function(i) sigma[lower.tri(sigma)])),
sdVec = rep(1, ndim))
mvord:::check(identical(mvord:::build_error_struct.cor_equi(error.structure, par.sigma), l))
# spherical param
error.structure <- cor_general(~ 1)
ndim <- 3
covar_error <- cbind(rep(1, 10))
attr(error.structure, "ndim") <- ndim
attr(error.structure, "covariate") <- covar_error
npar1 <- attr(error.structure, "ndim") * (attr(error.structure, "ndim") - 1)/2
attr(error.structure, "npar") <- npar1 * NCOL(attr(error.structure, "covariate"))
par.sigma <- c(0.5, 1, 2)
l <- mvord:::build_error_struct.cor_general(error.structure, par.sigma)
sigma <- diag(ndim)
sigma[lower.tri(sigma)] <- l$rVec[1,]
sigma <- sigma + t(sigma) - diag(diag(sigma))
mvord:::check(all.equal(mvord:::backtransf_sigmas(sigma), par.sigma))
#check for cor_general(~f)
error.structure <- cor_general(~ factor(c(rep("a", 5), rep("b", 5))))
ndim <- 3
covar_error <- cbind(c(rep(1, 5), rep(0,5)), c(rep(0, 5), rep(1, 5)))
attr(error.structure, "ndim") <- ndim
attr(error.structure, "covariate") <- covar_error
npar1 <- attr(error.structure, "ndim") * (attr(error.structure, "ndim") - 1)/2
attr(error.structure, "npar") <- npar1 * NCOL(attr(error.structure, "covariate"))
par.sigma <- c(0.5, 1, 2, -0.5, -1, -2)
l <- mvord:::build_error_struct.cor_general(error.structure, par.sigma)
sigma1 <- diag(ndim)
sigma1[lower.tri(sigma1)] <- l$rVec[1,]
sigma1 <- sigma1 + t(sigma1) - diag(ndim)
sigma2 <- diag(ndim)
sigma2[lower.tri(sigma2)] <- l$rVec[6,]
sigma2 <- sigma2 + t(sigma2) - diag(ndim)
mvord:::check(all.equal(mvord:::backtransf_sigmas(sigma1), par.sigma[1:3]))
mvord:::check(all.equal(mvord:::backtransf_sigmas(sigma2), par.sigma[4:6]))
#check for cov_general(~f)
error.structure <- cov_general(~ factor(c(rep("a", 5), rep("b", 5))))
ndim <- 3
covar_error <- cbind(c(rep(1, 5), rep(0,5)), c(rep(0, 5), rep(1, 5)))
attr(error.structure, "ndim") <- ndim
attr(error.structure, "covariate") <- covar_error
npar1 <- attr(error.structure, "ndim") * (attr(error.structure, "ndim") - 1)/2
attr(error.structure, "npar") <- (npar1 + ndim)* NCOL(attr(error.structure, "covariate"))
attr(error.structure, "npar.cor") <- npar1* NCOL(attr(error.structure, "covariate"))
attr(error.structure, "npar.sd") <- ndim * NCOL(attr(error.structure, "covariate"))
## new
attr(error.structure, "npar.cor") <- ndim * (ndim - 1)/2 * NCOL(covar_error)
##
par.sigma <- c(0.5, 1, 2, -0.5, -1, -2, c(-1, 0.2, 0.5), c(1, -0.2, -0.5))
l <- mvord:::build_error_struct.cov_general(error.structure, par.sigma)
l
sigma1 <- diag(ndim)
sigma1[lower.tri(sigma1)] <- l$rVec[1,]
sigma1 <- (sigma1 + t(sigma1) - diag(ndim))
sigma1 <- t(l$sdVec[1, ] * sigma1) * l$sdVec[1, ]
sigma2 <- diag(ndim)
sigma2[lower.tri(sigma2)] <- l$rVec[6,]
sigma2 <- sigma2 + t(sigma2) - diag(ndim)
sigma2 <- t(l$sdVec[6, ] * sigma2) * l$sdVec[6, ]
sigma1
sigma2
mvord:::check(all.equal(c(mvord:::backtransf_sigmas(cov2cor(sigma1)), diag(sigma1)),
c(par.sigma[1:3], exp(2 * par.sigma[7:9]))))
mvord:::check(all.equal(c(mvord:::backtransf_sigmas(cov2cor(sigma2)), diag(sigma2)),
c(par.sigma[4:6], exp(2 * par.sigma[10:12]))))
|
8f3571ac56ed9c1b371103c6822ccc373ae82675
|
ec94dddf45e332663da3e37db2feeb709221d763
|
/man/Decision-makers-single-quote-attributes-class.Rd
|
77b6c4898d605ff3a4a7365dfcc361d28bf8d80a
|
[
"Apache-2.0"
] |
permissive
|
AntoineDubois/sdcv2
|
44687ab28a1c7aa3c82702ee2506257a20475994
|
53041ecc32698089a66a0df7911dd7c0f461cc34
|
refs/heads/master
| 2023-07-16T20:07:11.525114
| 2021-09-06T15:27:46
| 2021-09-06T15:27:46
| 386,579,310
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 603
|
rd
|
Decision-makers-single-quote-attributes-class.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/decision makers.R
\docType{class}
\name{Decision makers' attributes-class}
\alias{Decision makers' attributes-class}
\alias{ob_decision_makers_att}
\title{ob_decision_makers_att}
\arguments{
\item{N}{The number of decision makers}
\item{p}{The number of attributes of each decision makers}
}
\description{
The class of observed decision makers' attributes
}
\examples{
N <- 10; p <- 3; a <- ob_decision_makers_att$new(N=N, p=p);
a$gen("student", param=list(location=rep(100, 3), df=3)); a$gen(); a$gen("help")
}
|
8cbdf539594734e22b50be5ed49ae62ea7f70ac6
|
834c63050072298b639c55c4585726bf20e20a00
|
/scratch_ranking.R
|
e7356526a799632cd0090ebb7a1ffc1ee8465b54
|
[] |
no_license
|
benilak/Senior_Project
|
409793cb198aa77bff51092ba28981d1d4ac92cc
|
e180b945693f4cf36df1314471f5a05faf31fb3f
|
refs/heads/master
| 2020-06-30T19:03:39.800501
| 2019-09-23T20:33:41
| 2019-09-23T20:33:41
| 200,920,841
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,446
|
r
|
scratch_ranking.R
|
source("get_industry.R", encoding = "utf-8") # to read trademark symbol (R) in webpage
source("get_stockrow.R")
source("get_keystats.R")
library(ggrepel)
ind_compare <- get_industry(c("WDC", "MSFT", "AAPL"), get = "compare")
ind_names <- get_industry(c("WDC", "MSFT", "AAPL"), get = "names")
### scratch junk, keep for reference (used to update get_industry.R) ###
# compare table in tidy format
WDC_compare %>%
rename(Percentile = `Percentilein Industry`) %>%
gather(`Industry Average`, `Percentile`, `Value`, key = Key2, value = Value) %>%
spread(Key, value = Value) %>%
rename(Key = Key2) %>%
slice(3,1,2) %>%
View()
# compare table in tidy for multiple tickers (ignores row order, no slicing)
ind_compare %>% View()
ind_compare %>%
gather(key = "Symbol", value = "Value", symbol) %>%
select(Symbol, everything()) %>%
filter(!is.na(Value)) %>%
rename(Percentile = `Percentilein Industry`) %>%
gather(`Industry Average`, `Percentile`, `Value`, key = Key2, value = Value) %>%
spread(Key, value = Value) %>%
rename(Key = Key2) %>%
View()
###
# rank companies based on PE ratio ~relative to industry~
ind_compare %>%
filter(Key == "Percentile") %>%
mutate(PE = parse_number(`P/E (TTM)`),
rank = rank(PE)) %>%
select(Symbol, PE, rank)
# get annualized return (5 years, monthly)
AAPL <- tq_get("AAPL")
AAPL %>%
filter(between(date, today()-years(5), today())) %>%
tq_transmute(select = adjusted, # gets monthly returns
mutate_fun = periodReturn,
period = "monthly") %>%
# annualizes
tq_performance(Ra = monthly.returns, Rb = NULL, performance_fun = table.AnnualizedReturns)
# get ann. returns for mulitple stocks (maybe inaccurate?)
mult_stocks <- tq_get(c("TJX", "AAPL", "NOBL", "TSLA", "SPY"))
mult_stocks %>%
filter(between(date, today()-years(5), today())) %>%
group_by(symbol) %>%
tq_transmute(select = adjusted, # gets monthly returns
mutate_fun = periodReturn,
period = "monthly") %>%
# annualizes
tq_performance(Ra = monthly.returns, performance_fun = table.AnnualizedReturns)
# all stocks in each exchange, includes sector + industry
# (industry title does not match get_industry exactly)
NYSE <- tq_exchange("NYSE")
NYSE %>%
filter(symbol == "TJX") %>%
.$industry
NASDAQ <- tq_exchange("NASDAQ")
NYSE %>%
filter(symbol == "SPY")
AMEX <- tq_exchange("AMEX")
# get dividends for a stock
tq_get("AAPL", get = "dividends")
# get random ticker symbols
SP1000 <- tq_index("SP1000")
symbols <- SP1000 %>%
pull(symbol) %>%
sample(10)
# rank symbols by 5Y Ann. Yield
stock_dat <- tq_get(symbols)
stock_annualized <- stock_dat %>%
filter(between(date, today()-years(5), today())) %>%
group_by(symbol) %>%
tq_transmute(select = adjusted, # gets monthly returns
mutate_fun = periodReturn,
period = "monthly") %>%
# annualizes
tq_performance(Ra = monthly.returns, performance_fun = table.AnnualizedReturns) %>%
ungroup() %>%
mutate(rankReturn = rank(-AnnualizedReturn),
rankSD = rank(AnnualizedStdDev))
# rank PE and dividend yield
industry_dat <- get_industry(symbols, get = "compare")
stock_stats <- industry_dat %>%
filter(Key == "Percentile") %>%
mutate(percentileDividend = parse_number(`Dividend Yield (Annualized)`),
percentilePE = parse_number(`P/E (TTM)`),
percentileEPSgrowth = parse_number(`EPS Growth (TTM vs Prior TTM)`),
rankPE = rank(percentilePE),
rankDividend = rank(-percentileDividend, na.last = "keep"),
rankEPSgrowth = rank(-percentileEPSgrowth)) %>%
rename(symbol = Symbol) %>%
select(symbol, percentilePE, percentileDividend, percentileEPSgrowth,
rankPE, rankDividend, rankEPSgrowth)
# final ranks
wghts <- c(rankDividend = 2.5, rankEPSgrowth = 2, rankPE = 10, rankReturn = 4.5, rankSD = 2)
ranks <- full_join(stock_annualized, stock_stats, by = "symbol") %>%
gather(names(wghts), key = "metric", value = "rank") %>%
mutate(metric = factor(metric, levels = names(wghts))) %>%
select(-`AnnualizedSharpe(Rf=0%)`)
ranks_final <- ranks %>%
group_by(symbol) %>%
arrange(metric) %>% # must arrange by metric, which must be a factor ordered by weight names
summarise(final = weighted.mean(rank, wghts, na.rm = TRUE)) %>%
mutate(final = rank(final))
ranks_final2 <- ranks %>%
group_by(symbol) %>%
arrange(metric) %>% # must arrange by metric, which must be a factor ordered by weight names
summarise(final = rank*wghts) %>%
mutate(final = rank(final))
# parallel plots (move NA populated ranks to edge of plot)
library(directlabels)
ranks %>%
ggplot(aes(x = metric, y = rank, color = symbol, group = symbol)) +
geom_point(size = 2) +
geom_path(size = 1) +
scale_x_discrete(expand = c(0,1)) +
scale_y_reverse(breaks = 1:10) +
labs(title = "Parallel Plot of metrics", subtitle = "ranks as 1st, 2nd, etc...",
y = "Rank", x = "") +
theme_tq() +
theme(legend.position = "none",
panel.grid.major.y = element_line(linetype = "dashed")) +
geom_dl(aes(label = symbol), method = list(dl.trans(x=x+0.5), "last.points"))
# risk vs reward
stock_annualized %>%
ggplot(aes(x = AnnualizedStdDev, y = AnnualizedReturn)) +
geom_point() +
geom_label_repel(aes(label = symbol, color = symbol)) +
theme_tq() +
theme(legend.position = "none",
panel.grid.minor = element_line(linetype = "dashed")) +
labs(title = "Risk vs. Reward", x = "Standard Deviation of Returns", y = "Annualized Return")
# ranks final plot
ranks_final %>%
mutate(score = 1/sqrt(final)) %>%
ggplot(aes(x = reorder(symbol, final), y = score)) +
geom_bar(stat = "identity") +
scale_y_continuous(labels = scales::percent) +
labs(x = "Stock", title = "Recommendation Score", y = "")
ranks_final %>%
mutate(Score = 1/sqrt(final)) %>%
ggplot() +
geom_bar(aes(x = reorder(symbol, final), y = Score),
stat = "identity", alpha = 0.4, width = .9) +
geom_segment(aes(x = symbol,
xend = symbol,
y = Score,
yend = Score),
color = "purple") +
geom_bar(data = mutate(ranks, score = 1/sqrt(rank)*.5),
aes(x = symbol, y = score, fill = metric, group = metric),
stat = "identity", position = "dodge", alpha = 0.7) +
scale_y_continuous(labels = scales::percent) +
labs(x = "Stock", title = "Recommendation Score", y = "", fill = "") +
scale_fill_manual(values = 2:5, labels = c("PE", "Div Yield", "5Y Return", "Std Dev")) +
theme_tq()
ranks_final %>%
mutate(Score = 1/sqrt(final)) %>%
ggplot() +
geom_bar(aes(x = reorder(symbol, final), y = Score),
stat = "identity", alpha = 0.4, width = .7) +
geom_hline(aes(x = symbol, yintercept = Score), color = "purple") +
geom_bar(data = mutate(ranks, score = 1/sqrt(rank)),
aes(x = symbol, y = score, fill = metric, group = metric),
stat = "identity", position = "stack") +
scale_y_continuous(labels = scales::percent) +
labs(x = "Stock", title = "Recommendation Score", y = "", fill = "") +
scale_fill_manual(values = 2:6, labels = c("PE", "Div Yield", "5Y Return", "Std Dev")) +
theme_tq()
# decide what to do with NA metrics
# companies that don't pay dividends may not necessarily deserve dead last in the rankings
# you can rank with na.last=NA to remove NA, na.last="keep" to rank as NA
|
3388da36c6306cca64b0b1d75db8b2ff8ad8f841
|
c69bb8c12eb205627783c8ae7a10280235873724
|
/R/convert.tz.R
|
3ee3ae1a9a60e91479b0479d3de18a75bb3300f8
|
[] |
no_license
|
cran/HelpersMG
|
384b45838d5fa110fe31c3eaca5b545774136797
|
c3bd166e7d24bf4d51414819539262db9e30495a
|
refs/heads/master
| 2023-06-21T20:05:01.243339
| 2023-06-14T19:02:05
| 2023-06-14T19:02:05
| 33,549,168
| 4
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 868
|
r
|
convert.tz.R
|
#' convert.tz Convert one Date-Time from one timezone to another
#' @title Convert one Date-Time from one timezone to another
#' @author Marc Girondot \email{marc.girondot@@gmail.com}
#' @return A POSIXlt or POSIXct date converted
#' @param x The date-time in POSIXlt or POSIXct format
#' @param tz The timezone
#' @description Convert one Date-Time from one timezone to another.\cr
#' Available timezones can be shown using OlsonNames().
#' @seealso Function \code{with_tz()} from \code{lubridate} package does the same. I keep it here only for compatibility with old scripts.
#' @examples
#' d <- as.POSIXlt("2010-01-01 17:34:20", tz="UTC")
#' convert.tz(d, tz="America/Guatemala")
#' @export
convert.tz <- function(x, tz=Sys.timezone()) {
d <- as.POSIXct(format(as.POSIXct(x), tz=tz, usetz=TRUE), tz=tz)
if (inherits(c, "POSIXlt")) d <- as.POSIXlt(d)
return(d)
}
|
9d20805b1005e5b6dde275e4fb127694c31b0244
|
5831cc1a1b4406d1cf7e8faa219b6293c51b9099
|
/ML_5_3_problem.R
|
775b13cc36704ee5da9a463d83752cfe3dede726
|
[] |
no_license
|
sujiths93/Machine-Learning-Assignment-5
|
3e73229b2ea96f95ba66a975ceade1f3beea3523
|
51b11daf2a08db552ad4722a83ef697820f6b7eb
|
refs/heads/master
| 2021-01-01T04:01:12.468269
| 2016-04-22T05:11:13
| 2016-04-22T05:11:13
| 56,827,041
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 877
|
r
|
ML_5_3_problem.R
|
#QUESTION 3
vec=seq(0,10,0.5)
dis=function(w,x)
{
Q=c(1,x,x^2)
z=sum(w*Q)
y=1/(1+(exp(-z)))
return(y)
}
#3a
w0=3;w1=-.05;w2=-.08
w=c(w0,w1,w2)
y=NULL
for(i in vec)
{
y[i]=dis(w,i)
}
plot(y,type='l',main="Probability of person joining queue",xlab = "Length of line",ylab="Probability")
X=chipotle
dis1=function(w,i)
{
Q=c(1,X[i,1],(X[i,1]^2))
z=sum(w*Q)
y=1/(1+(exp(-z)))
return(y)
}
#3b
w=c(0,0,0)
alpha=readline("enter step size ")
alpha=as.numeric(alpha)
#alpha=0.0001
for(i in 1:dim(chipotle)[1])
{
Q=c(1,X[i,1],(X[i,1]^2))
w=w+(alpha*(X[i,2]-dis1(w,i))*Q)
}
#ESTIMATED W for alpha=0.0001
#0.7085809111 0.8995567787 -0.1714233179
#To find sum of squared error for each set of weights
u=NULL
for(i in vec)
{
u=append(u,dis(w,i))
}
plot(vec,u,type='l',main="Probability of person joining queue",xlab = "Length of line",ylab="Probability")
|
3408b44f146fb4e7f72d5257ac426eb33867a9c6
|
197590555db25e2b43692e4a89c3c8388c03fdf1
|
/tests/testthat/test-declare-design-from-template.R
|
3f04df3d034d2fb7c8b39879d8129ae162accbc1
|
[] |
no_license
|
yadevi/DeclareDesign-1
|
f843ef77d937d1dd0975b99f8ab5914c20461c1c
|
badcd6e6edbb2e0bb3cf51b3da1047664acea789
|
refs/heads/master
| 2021-06-14T03:53:23.472797
| 2016-12-05T06:16:05
| 2016-12-05T06:16:05
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,349
|
r
|
test-declare-design-from-template.R
|
rm(list=ls())
library(testthat)
library(DeclareDesign)
context("Declare Design from Template")
test_that("declare_design_from_template works", {
simple_template <- function(N, n){
population <- declare_population(noise = "rnorm(n_)", size = N)
sampling <- declare_sampling(n = n)
potential_outcomes <- declare_potential_outcomes(formula = Y ~ 5 + .5*Z*rnorm(n_) + noise,
condition_names = c(0, 1),
assignment_variable_name = "Z")
assignment <- declare_assignment(potential_outcomes=potential_outcomes, probability_each = c(.7, .3))
estimand <- declare_estimand(estimand_text = "mean(Y_Z_1 - Y_Z_0)", potential_outcomes = potential_outcomes)
estimator_d_i_m <- declare_estimator(estimates = difference_in_means, formula = Y ~ Z, estimand = estimand)
design <- declare_design(population = population,
sampling = sampling,
assignment = assignment,
estimator = estimator_d_i_m,
potential_outcomes = potential_outcomes,
label = "Simple Design")
return(design)
}
design <- quick_design(template = simple_template, N = 5000, n = 100)
})
|
3e1c81bb0696314131597ee63602e08336f69d71
|
0f3a072c237893f1b2f2e49a935c4df14a05d497
|
/04.results/plots.R
|
7b002fa4bd7da5ae5d6e4e58af737335b051b6b3
|
[] |
no_license
|
noeliarico/clustering
|
22a25b282ac6fe00c9ad773051afa431614bdb51
|
fcc56ae8c93ca64c8f35dc9b31a397daa114a531
|
refs/heads/master
| 2020-08-30T09:12:37.570380
| 2020-03-10T10:25:15
| 2020-03-10T10:25:15
| 218,328,253
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 438
|
r
|
plots.R
|
plot_real_clusters(s1c, "real_cluster", "V1", "V2")
plot_real_clusters(s2c, "real_cluster", "V1", "V2")
plot_real_clusters(s3c, "real_cluster", "V1", "V2")
plot_real_clusters(s4c, "real_cluster", "V1", "V2")
plot_real_clusters(a1c, "real_cluster", "V1", "V2")
plot_real_clusters(a2c, "real_cluster", "V1", "V2")
plot_real_clusters(a3c, "real_cluster", "V1", "V2")
noelia <- function(x) {
return("#Hello world \n My name si Noelia")
}
|
0fb0aa1a2ca164759faf1ed3bb5fa04eb1690a29
|
80b4f0e0bbbf09b68a517ff02f1e409e9b6508e9
|
/Mapa_IBGE_RJ.r
|
4d2e03ba2c7c5dce5c9f67b718b8e96ed74b712b
|
[] |
no_license
|
arthurwlima/BacterialIndicators_CoastalRJ
|
55ee011f9c4ea07cb0617e3b9c3f9d342f2f3f82
|
492dcf78a2284d0aad1d0c4507d4724ceb8b5371
|
refs/heads/master
| 2020-05-04T18:33:29.568181
| 2019-04-12T18:56:48
| 2019-04-12T18:56:48
| 179,358,114
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,841
|
r
|
Mapa_IBGE_RJ.r
|
## 20190320: Ajustar tamanho dos circulos e a escala das legendas
# http://environmentalcomputing.net/making-maps-of-your-study-sites/
# wget(ftp://geoftp.ibge.gov.br/cartas_e_mapas/bases_cartograficas_continuas/bc250/versao2017/shapefile/Limites_v2017.zip)
# gunzip ./Limites_v2017.zip
library(sp)
library(rgeos)
library(rgdal)
library(ggplot2)
library(ggmap)
library(dplyr)
library(raster)
d <- read.csv("/home/arthurw/Documents/FitoPK/IlhasRio/Mapas/EspIndicadoras_IlhasRio_20180227_20181213_LatLong.csv", header=T, sep=',')
# Set geo Limits for the Coastal Rio de Janeiro and Cagarras Islands
geo_bounds <- c(left = -43.25, bottom = -23.1, right =-43.1, top = -22.92)
Sites.grid <- expand.grid(lon_bound = c(geo_bounds[1], geo_bounds[3]),
lat_bound = c(geo_bounds[2], geo_bounds[4]))
coordinates(Sites.grid) <- ~ lon_bound + lat_bound
IBGE <- readOGR(dsn = "lim_pais_a.shp")
IBGE_RJ <- crop(IBGE, extent(Sites.grid))
dS <- subset(d, Prof=="S")
dF <- subset(d, Prof=="F")
plotColeta <- function(Mapa, d){
g <- ggplot() +
geom_polygon(data = Mapa, aes(x=long, y=lat, group=group), fill="gray85", colour="black") +
coord_equal() +
geom_point(data=d, aes(x=Long, y=Lat, size=sqrt(d$Enterococcus)), colour="blue", alpha=0.5, show.legend=TRUE) +
#geom_text(data = legend_bubbles, size = 3, aes(x = -43.05, y = -23.05+ 2 * radius/50, label = label)) + Buscar fazer legenda com circulos concentricos
#ggtitle(d$ColetaData[1]) +
labs(x="Longitude", y="Latitude") +
facet_wrap(~ColetaData)
theme_classic()
g
}
# Plot one map for each sampling date
for(i in unique(d$Prof)){
print(i)
count=0
for(j in unique(d$Coleta)){
svg(paste(i,j, "_Ecoli.svg", sep=''),8,6)
print(j)
rm(dd)
dd <- subset(d, Prof==i & Coleta==j)
plot(plotColeta(IBGE_RJ, dd))
dev.off()
}
}
|
c327c9e7266d39c51c9dd7d1f6cdc097d9871ec9
|
30f79e55a7c527c019467e256f0713b04dc9903e
|
/script/variation_partitioning_revise3yr.R
|
54e1a3797c2d2f1f9a0d6733fc9e7ad6dccb984b
|
[] |
no_license
|
tengguangliang/fishsize_varpart
|
4e09739b65e755f800e9d953b2a6d64e917cdccc
|
01831f722f622f5ae4eee6ace4ee8d0d012b05e6
|
refs/heads/master
| 2021-09-10T21:28:37.286045
| 2018-04-02T14:16:03
| 2018-04-02T14:16:03
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 22,202
|
r
|
variation_partitioning_revise3yr.R
|
# 3yr (time-interval) lag for temperature (except AI and GOA)
# 20180225
meanA3yr <- matrix(NA,28,2) # West US: 12 species, Alaska: 6 species, North Sea: 9 species
cvA3yr <- matrix(NA,28,2)
lag=3
# Arrowtooth flounder
B <- arrowtoothFnum
idxF <- match(arrowtoothF$YEAR,exploitation$YEAR)
idxSST <- match(arrowtoothF$YEAR,sst$YEAR)
A <- data.frame(cbind(exploitation$Arrowtooth.flounder[idxF],sst$SST[idxSST-lag]))
M <- lifehist$M[1]
meanA3yr[1,] <- c(mean(A$X1/M),mean(A$X2))
cvA3yr[1,] <- c(sd(A$X1/M)/mean(A$X1/M),sd(A$X2)/mean(A$X2))
colnames(A)=c("exploitation","temperature")
partB<-varpart(B, X= ~exploitation,~temperature,data=A)
plot(partB)
adjR2B<-c(partB$part$indfract$Adj.R.squared[1:3],partB$part$fract$Adj.R.squared[3])
B.rda <- rda(B~.,data=A)
plot(B.rda)
sig<-c(anova(rda(B~exploitation+Condition(temperature),data=A))$`Pr(>F)`[1],anova(rda(B~temperature+Condition(exploitation),data=A))$`Pr(>F)`[1])
# output
adjR2Bout <- adjR2B
sigout <- sig
# Chilipepper RF
B <- chilipepperRFfnum
idxF <- match(chilipepperRFf$YEAR,exploitation$YEAR)
idxSST <- match(chilipepperRFf$YEAR,sst$YEAR)
A <- data.frame(cbind(exploitation$Chilipepper.rockfish[idxF],sst$SST[idxSST-lag]))
M <- lifehist$M[2]
meanA3yr[2,] <- c(mean(A$X1/M),mean(A$X2))
cvA3yr[2,] <- c(sd(A$X1/lifehist$M[2])/mean(A$X1/lifehist$M[2]),sd(A$X2)/mean(A$X2))
colnames(A)=c("exploitation","temperature")
partB<-varpart(B, X= ~exploitation,~temperature,data=A)
plot(partB)
adjR2B<-c(partB$part$indfract$Adj.R.squared[1:3],partB$part$fract$Adj.R.squared[3])
B.rda <- rda(B~.,data=A)
plot(B.rda)
sig<-c(anova(rda(B~exploitation+Condition(temperature),data=A))$`Pr(>F)`[1],anova(rda(B~temperature+Condition(exploitation),data=A))$`Pr(>F)`[1])
adjR2Bout <- rbind(adjR2Bout,adjR2B)
sigout <- rbind(sigout,sig)
#Darkblotched RF
B <- darkblotchedRFfnum
idxF <- match(darkblotchedRFf$YEAR,exploitation$YEAR)
idxSST <- match(darkblotchedRFf$YEAR,sst$YEAR)
A <- data.frame(cbind(exploitation$Dark.blotched.rockfish[idxF],sst$SST[idxSST-lag]))
M <- lifehist$M[3]
meanA3yr[3,] <- c(mean(A$X1/M),mean(A$X2))
cvA3yr[3,] <- c(sd(A$X1)/mean(A$X1),sd(A$X2)/mean(A$X2))
colnames(A)=c("exploitation","temperature")
partB<-varpart(B, X= ~exploitation,~temperature,data=A)
plot(partB)
adjR2B<-c(partB$part$indfract$Adj.R.squared[1:3],partB$part$fract$Adj.R.squared[3])
B.rda <- rda(B~.,data=A)
plot(B.rda)
sig<-c(anova(rda(B~exploitation+Condition(temperature),data=A))$`Pr(>F)`[1],anova(rda(B~temperature+Condition(exploitation),data=A))$`Pr(>F)`[1])
adjR2Bout <- rbind(adjR2Bout,adjR2B)
sigout <- rbind(sigout,sig)
#Dover sole
B <- dovernum
idxF <- match(doverS$YEAR,exploitation$YEAR)
idxSST <- match(doverS$YEAR,sst$YEAR)
A <- data.frame(cbind(exploitation$Dover.Sole[idxF],sst$SST[idxSST-lag]))
M <- lifehist$M[4]
meanA3yr[4,] <- c(mean(A$X1/M),mean(A$X2))
cvA3yr[4,] <- c(sd(A$X1)/mean(A$X1),sd(A$X2)/mean(A$X2))
colnames(A)=c("exploitation","temperature")
partB<-varpart(B, X= ~exploitation,~temperature,data=A)
plot(partB)
adjR2B<-c(partB$part$indfract$Adj.R.squared[1:3],partB$part$fract$Adj.R.squared[3])
B.rda <- rda(B~.,data=A)
plot(B.rda)
sig<-c(anova(rda(B~exploitation+Condition(temperature),data=A))$`Pr(>F)`[1],anova(rda(B~temperature+Condition(exploitation),data=A))$`Pr(>F)`[1])
adjR2Bout <- rbind(adjR2Bout,adjR2B)
sigout <- rbind(sigout,sig)
#English sole
B <- englishFnum
idxF <- match(englishF$YEAR,exploitation$YEAR)
idxSST <- match(englishF$YEAR,sst$YEAR)
A <- data.frame(cbind(exploitation$English.Sole[idxF],sst$SST[idxSST-lag]))
M <- lifehist$M[5]
meanA3yr[5,] <- c(mean(A$X1/M),mean(A$X2))
cvA3yr[5,] <- c(sd(A$X1)/mean(A$X1),sd(A$X2)/mean(A$X2))
colnames(A)=c("exploitation","temperature")
partB<-varpart(B, X= ~exploitation,~temperature,data=A)
plot(partB)
adjR2B<-c(partB$part$indfract$Adj.R.squared[1:3],partB$part$fract$Adj.R.squared[3])
B.rda <- rda(B~.,data=A)
plot(B.rda)
sig<-c(anova(rda(B~exploitation+Condition(temperature),data=A))$`Pr(>F)`[1],anova(rda(B~temperature+Condition(exploitation),data=A))$`Pr(>F)`[1])
adjR2Bout <- rbind(adjR2Bout,adjR2B)
sigout <- rbind(sigout,sig)
#Lingcod
B <- lingcodFnum
idxF <- match(lingcodF$YEAR,exploitation$YEAR)
idxSST <- match(lingcodF$YEAR,sst$YEAR)
A <- data.frame(cbind(exploitation$Lingcod[idxF],sst$SST[idxSST-lag]))
M <- lifehist$M[6]
meanA3yr[6,] <- c(mean(A$X1/M),mean(A$X2))
cvA3yr[6,] <- c(sd(A$X1)/mean(A$X1),sd(A$X2)/mean(A$X2))
colnames(A)=c("exploitation","temperature")
partB<-varpart(B, X= ~exploitation,~temperature,data=A)
plot(partB)
adjR2B<-c(partB$part$indfract$Adj.R.squared[1:3],partB$part$fract$Adj.R.squared[3])
B.rda <- rda(B~.,data=A)
plot(B.rda)
sig<-c(anova(rda(B~exploitation+Condition(temperature),data=A))$`Pr(>F)`[1],anova(rda(B~temperature+Condition(exploitation),data=A))$`Pr(>F)`[1])
adjR2Bout <- rbind(adjR2Bout,adjR2B)
sigout <- rbind(sigout,sig)
#Longspine
B <- longspineNum
idxF <- match(longspine$YEAR,exploitation$YEAR)
idxSST <- match(longspine$YEAR,sst$YEAR)
A <- data.frame(cbind(exploitation$Longspine.thornyhead[idxF],sst$SST[idxSST-lag]))
M <- lifehist$M[7]
meanA3yr[7,] <- c(mean(A$X1/M),mean(A$X2))
cvA3yr[7,] <- c(sd(A$X1)/mean(A$X1),sd(A$X2)/mean(A$X2))
colnames(A)=c("exploitation","temperature")
partB<-varpart(B, X= ~exploitation,~temperature,data=A)
plot(partB)
adjR2B<-c(partB$part$indfract$Adj.R.squared[1:3],partB$part$fract$Adj.R.squared[3])
B.rda <- rda(B~.,data=A)
plot(B.rda)
sig<-c(anova(rda(B~exploitation+Condition(temperature),data=A))$`Pr(>F)`[1],anova(rda(B~temperature+Condition(exploitation),data=A))$`Pr(>F)`[1])
adjR2Bout <- rbind(adjR2Bout,adjR2B)
sigout <- rbind(sigout,sig)
#Petrale Sole
B <- petraleSoleFnum
idxF <- match(petraleSole$YEAR,exploitation$YEAR)
idxSST <- match(petraleSole$YEAR,sst$YEAR)
A <- data.frame(cbind(exploitation$Petrale.sole[idxF],sst$SST[idxSST-lag]))
M <- lifehist$M[8]
meanA3yr[8,] <- c(mean(A$X1/M),mean(A$X2))
cvA3yr[8,] <- c(sd(A$X1)/mean(A$X1),sd(A$X2)/mean(A$X2))
colnames(A)=c("exploitation","temperature")
partB<-varpart(B, X= ~exploitation,~temperature,data=A)
plot(partB)
adjR2B<-c(partB$part$indfract$Adj.R.squared[1:3],partB$part$fract$Adj.R.squared[3])
B.rda <- rda(B~.,data=A)
plot(B.rda)
sig<-c(anova(rda(B~exploitation+Condition(temperature),data=A))$`Pr(>F)`[1],anova(rda(B~temperature+Condition(exploitation),data=A))$`Pr(>F)`[1])
adjR2Bout <- rbind(adjR2Bout,adjR2B)
sigout <- rbind(sigout,sig)
#Sardine
B <- sardineNum[-4,] #exclude 1984
idxF <- match(sardineS1$YEAR,exploitation$YEAR)
idxSST <- match(sardineS1$YEAR,sst$YEAR)
A <- data.frame(cbind(exploitation$Sardine[idxF],sst$SST[idxSST-lag]))[-4,]
M <- lifehist$M[9]
meanA3yr[9,] <- c(mean(A$X1/M),mean(A$X2))
cvA3yr[9,] <- c(sd(A$X1)/mean(A$X1),sd(A$X2)/mean(A$X2))
colnames(A)=c("exploitation","temperature")
partB<-varpart(B, X= ~exploitation,~temperature,data=A)
plot(partB)
adjR2B<-c(partB$part$indfract$Adj.R.squared[1:3],partB$part$fract$Adj.R.squared[3])
B.rda <- rda(B~.,data=A)
plot(B.rda)
sig<-c(anova(rda(B~exploitation+Condition(temperature),data=A))$`Pr(>F)`[1],anova(rda(B~temperature+Condition(exploitation),data=A))$`Pr(>F)`[1])
adjR2Bout <- rbind(adjR2Bout,adjR2B)
sigout <- rbind(sigout,sig)
#Splitnose RF
B <- splitnoseRFfnum
idxF <- match(splitnoseRFf$YEAR,exploitation$YEAR)
idxSST <- match(splitnoseRFf$YEAR,sst$YEAR)
A <- data.frame(cbind(exploitation$Splitnose.rockfish[idxF],sst$SST[idxSST-lag]))
M <- lifehist$M[10]
meanA3yr[10,] <- c(mean(A$X1/M),mean(A$X2))
cvA3yr[10,] <- c(sd(A$X1)/mean(A$X1),sd(A$X2)/mean(A$X2))
colnames(A)=c("exploitation","temperature")
partB<-varpart(B, X= ~exploitation,~temperature,data=A)
plot(partB)
adjR2B<-c(partB$part$indfract$Adj.R.squared[1:3],partB$part$fract$Adj.R.squared[3])
B.rda <- rda(B~.,data=A)
plot(B.rda)
sig<-c(anova(rda(B~exploitation+Condition(temperature),data=A))$`Pr(>F)`[1],anova(rda(B~temperature+Condition(exploitation),data=A))$`Pr(>F)`[1])
adjR2Bout <- rbind(adjR2Bout,adjR2B)
sigout <- rbind(sigout,sig)
#Yelloweye RF
B <- yelloweyeRFnum
idxF <- match(yelloweyeRF$YEAR,exploitation$YEAR)
idxSST <- match(yelloweyeRF$YEAR,sst$YEAR)
A <- data.frame(cbind(exploitation$Yelloweye.rockfish[idxF],sst$SST[idxSST-lag]))
M <- lifehist$M[11]
meanA3yr[11,] <- c(mean(A$X1/M),mean(A$X2))
cvA3yr[11,] <- c(sd(A$X1)/mean(A$X1),sd(A$X2)/mean(A$X2))
colnames(A)=c("exploitation","temperature")
partB<-varpart(B, X= ~exploitation,~temperature,data=A)
plot(partB)
adjR2B<-c(partB$part$indfract$Adj.R.squared[1:3],partB$part$fract$Adj.R.squared[3])
B.rda <- rda(B~.,data=A)
plot(B.rda)
sig<-c(anova(rda(B~exploitation+Condition(temperature),data=A))$`Pr(>F)`[1],anova(rda(B~temperature+Condition(exploitation),data=A))$`Pr(>F)`[1])
adjR2Bout <- rbind(adjR2Bout,adjR2B)
sigout <- rbind(sigout,sig)
#Alaska
#AI pollock
meanA3yr[12,] <- NA
cvA3yr[12,] <- NA
adjR2Bout <- rbind(adjR2Bout,NA)
sigout <- rbind(sigout,NA)
#GOA pollock
meanA3yr[13,] <- NA
cvA3yr[13,] <- NA
adjR2Bout <- rbind(adjR2Bout,NA)
sigout <- rbind(sigout,NA)
#BSAI flathead sole
ebs_flatheadSoleAnnual <- read.csv("~/Desktop/fishsize_varpart/data/Alaska/envi/ebs_flatheadSoleAnnual.csv",header = TRUE)
ebsBottomT <- ebs_flatheadSoleAnnual[,c(1,5)]
B <- BSAIflatheadSoleNum
idxBottomT <- match(BSAIflatheadSoleF$YEAR,ebsBottomT$YEAR)
idxBottomT <- idxBottomT[is.finite(idxBottomT)]
idxF <- match(ebsBottomT$YEAR[idxBottomT],exploitation$YEAR)
idxF <- idxF[is.finite(idxF)]
idxF <- idxF[seq(1+lag,length(idxF))]
idxBottomT <- idxBottomT[seq(1,length(idxBottomT)-lag)]
A <- data.frame(cbind(exploitation$Flathead.sole.BSAI[idxF],ebsBottomT$BOT_TEMP[idxBottomT-1]))
idxB <- match(exploitation$YEAR[idxF],BSAIflatheadSoleF$YEAR)
B <- B[idxB,]
M <- lifehist$M[14]
meanA3yr[14,] <- c(mean(A$X1/M),mean(A$X2))
cvA3yr[14,] <- c(sd(A$X1)/mean(A$X1),sd(A$X2)/mean(A$X2))
colnames(A)=c("exploitation","temperature")
partB<-varpart(B, X= ~exploitation,~temperature,data=A)
plot(partB)
adjR2B<-c(partB$part$indfract$Adj.R.squared[1:3],partB$part$fract$Adj.R.squared[3])
B.rda <- rda(B~.,data=A)
plot(B.rda)
sig<-c(anova(rda(B~exploitation+Condition(temperature),data=A))$`Pr(>F)`[1],anova(rda(B~temperature+Condition(exploitation),data=A))$`Pr(>F)`[1])
adjR2Bout <- rbind(adjR2Bout,adjR2B)
sigout <- rbind(sigout,sig)
#GOA flathead sole (DONE)
meanA3yr[15,] <- NA
cvA3yr[15,] <- NA
adjR2Bout <- rbind(adjR2Bout,NA)
sigout <- rbind(sigout,NA)
#EBS pacific cod
ebs_pacificCodAnnual <- read.csv("~/Desktop/fishsize_varpart/data/Alaska/envi/ebs_pacificCodAnnual.csv",header = TRUE)
ebsBottomT <- ebs_pacificCodAnnual[,c(1,5)]
B <- EBSpacifcCodNum
idxBottomT <- match(EBSpacifcCod$YEAR,ebsBottomT$YEAR)
idxBottomT <- idxBottomT[is.finite(idxBottomT)]
idxF <- match(ebsBottomT$YEAR[idxBottomT],exploitation$YEAR)
idxF <- idxF[is.finite(idxF)]
idxF <- idxF[seq(1+lag,length(idxF))]
idxBottomT <- idxBottomT[seq(1,length(idxBottomT)-lag)]
A <- data.frame(cbind(exploitation$Pacific.cod.BSAI[idxF],ebsBottomT$BOT_TEMP[idxBottomT]))
idxB <- match(exploitation$YEAR[idxF],EBSpacifcCod$YEAR)
B <- B[idxB,]
M <- lifehist$M[16]
meanA3yr[16,] <- c(mean(A$X1/M),mean(A$X2))
cvA3yr[16,] <- c(sd(A$X1)/mean(A$X1),sd(A$X2)/mean(A$X2))
colnames(A)=c("exploitation","temperature")
partB<-varpart(B, X= ~exploitation,~temperature,data=A)
plot(partB)
adjR2B<-c(partB$part$indfract$Adj.R.squared[1:3],partB$part$fract$Adj.R.squared[3])
B.rda <- rda(B~.,data=A)
plot(B.rda)
sig<-c(anova(rda(B~exploitation+Condition(temperature),data=A))$`Pr(>F)`[1],anova(rda(B~temperature+Condition(exploitation),data=A))$`Pr(>F)`[1])
adjR2Bout <- rbind(adjR2Bout,adjR2B)
sigout <- rbind(sigout,sig)
#GOA pacific cod
meanA3yr[17,] <- NA
cvA3yr[17,] <- NA
adjR2Bout <- rbind(adjR2Bout,NA)
sigout <- rbind(sigout,NA)
#GOA rex sole
meanA3yr[18,] <- NA
cvA3yr[18,] <- NA
adjR2Bout <- rbind(adjR2Bout,NA)
sigout <- rbind(sigout,NA)
# North Sea
year<-1977:2014
NSavg_tempQ1 <- read.csv("~/Desktop/fishsize_varpart/data/NorthSea/Oceanography/NSavg_tempQ1.csv",header = TRUE)
#cod
idxF <- match(year,cod$Year)
idxbottomT <- match(year,NSavg_tempQ1$YEAR)
idxF <- idxF[seq(1+lag,length(idxF))]
idxbottomT <- idxbottomT[seq(1,length(idxbottomT)-lag)]
A <- data.frame(cbind(cod$F[idxF],NSavg_tempQ1$temperature[idxbottomT]))
colnames(A)=c("exploitation","temperature")
idxB<- match(cod$Year[idxF],codLF$Year)
B <- codLF[idxB,-1] #LF w/o year
M <- cod$M[idxF]
meanA3yr[19,] <- c(mean(A$exploitation/M),mean(A$temperature))
cvA3yr[19,] <- c(sd(A$exploitation/M)/mean(A$exploitation/M),sd(A$temperature)/mean(A$exploitation))
partB<-varpart(B, X= ~exploitation,~temperature,data=A)
plot(partB)
adjR2B<-c(partB$part$indfract$Adj.R.squared[1:3],partB$part$fract$Adj.R.squared[3])
B.rda <- rda(B~.,data=A)
plot(B.rda)
sig<-c(anova(rda(B~exploitation+Condition(temperature),data=A))$`Pr(>F)`[1],anova(rda(B~temperature+Condition(exploitation),data=A))$`Pr(>F)`[1])
adjR2Bout <- rbind(adjR2Bout,adjR2B)
sigout <- rbind(sigout,sig)
#haddock
idxF <- match(year,haddock$Year)
idxbottomT <- match(year,NSavg_tempQ1$YEAR)
idxF <- idxF[seq(1+lag,length(idxF))]
idxbottomT <- idxbottomT[seq(1,length(idxbottomT)-lag)]
A <- data.frame(cbind(haddock$F[idxF],NSavg_tempQ1$temperature[idxbottomT]))
colnames(A)=c("exploitation","temperature")
idxB<- match(haddock$Year[idxF],haddockLF$Year)
B <- haddockLF[idxB,-1] #LF w/o year
M <- haddock$M[idxF]
meanA3yr[20,] <- c(mean(A$exploitation/M),mean(A$temperature))
cvA3yr[20,] <- c(sd(A$exploitation/M)/mean(A$exploitation/M),sd(A$temperature)/mean(A$exploitation))
partB<-varpart(B, X= ~exploitation,~temperature,data=A)
plot(partB)
adjR2B<-c(partB$part$indfract$Adj.R.squared[1:3],partB$part$fract$Adj.R.squared[3])
B.rda <- rda(B~.,data=A)
plot(B.rda)
sig<-c(anova(rda(B~exploitation+Condition(temperature),data=A))$`Pr(>F)`[1],anova(rda(B~temperature+Condition(exploitation),data=A))$`Pr(>F)`[1])
adjR2Bout <- rbind(adjR2Bout,adjR2B)
sigout <- rbind(sigout,sig)
#herring
idxF <- match(year,herring$Year)
idxbottomT <- match(year,NSavg_tempQ1$YEAR)
idxF <- idxF[seq(1+lag,length(idxF))]
idxbottomT <- idxbottomT[seq(1,length(idxbottomT)-lag)]
A <- data.frame(cbind(herring$F[idxF],NSavg_tempQ1$temperature[idxbottomT]))
colnames(A)=c("exploitation","temperature")
idxB<- match(herring$Year[idxF],herringLF$Year)
B <- herringLF[idxB,-1] #LF w/o year
M <- herring$M[idxF]
meanA3yr[21,] <- c(mean(A$exploitation/M),mean(A$temperature))
cvA3yr[21,] <- c(sd(A$exploitation/M)/mean(A$exploitation/M),sd(A$temperature)/mean(A$exploitation))
partB<-varpart(B, X= ~exploitation,~temperature,data=A)
plot(partB)
adjR2B<-c(partB$part$indfract$Adj.R.squared[1:3],partB$part$fract$Adj.R.squared[3])
B.rda <- rda(B~.,data=A)
plot(B.rda)
sig<-c(anova(rda(B~exploitation+Condition(temperature),data=A))$`Pr(>F)`[1],anova(rda(B~temperature+Condition(exploitation),data=A))$`Pr(>F)`[1])
adjR2Bout <- rbind(adjR2Bout,adjR2B)
sigout <- rbind(sigout,sig)
#mackerel 1980-
idxF <- match(year,mackerel$Year)
idxbottomT <- match(mackerel$Year,NSavg_tempQ1$YEAR)
idxF <- idxF[!is.na(idxF)]
idxF <- idxF[seq(1+lag,length(idxF))]
idxbottomT <- idxbottomT[!is.na(idxbottomT)]
idxbottomT <- idxbottomT[seq(1,length(idxbottomT)-lag)]
A <- data.frame(cbind(mackerel$F[idxF],NSavg_tempQ1$temperature[idxbottomT]))
colnames(A)=c("exploitation","temperature")
idxB<- match(mackerel$Year[idxF],mackerelLF$Year)
B <- mackerelLF[idxB,-1] #LF w/o year
M <- lifehist$M[22]
meanA3yr[22,] <- c(mean(A$exploitation/M),mean(A$temperature))
cvA3yr[22,] <- c(sd(A$exploitation/M)/mean(A$exploitation/M),sd(A$temperature)/mean(A$exploitation))
partB<-varpart(B, X= ~exploitation,~temperature,data=A)
plot(partB)
adjR2B<-c(partB$part$indfract$Adj.R.squared[1:3],partB$part$fract$Adj.R.squared[3])
B.rda <- rda(B~.,data=A)
plot(B.rda)
sig<-c(anova(rda(B~exploitation+Condition(temperature),data=A))$`Pr(>F)`[1],anova(rda(B~temperature+Condition(exploitation),data=A))$`Pr(>F)`[1])
adjR2Bout <- rbind(adjR2Bout,adjR2B)
sigout <- rbind(sigout,sig)
#norwaypout 1984-
idxF <- match(year,norwaypout$Year)
idxbottomT <- match(norwaypout$Year,NSavg_tempQ1$YEAR)
idxF <- idxF[!is.na(idxF)]
idxF <- idxF[seq(1+lag,length(idxF))]
idxbottomT <- idxbottomT[!is.na(idxbottomT)]
idxbottomT <- idxbottomT[seq(1,length(idxbottomT)-lag)]
A <- data.frame(cbind(norwaypout$F[idxF],NSavg_tempQ1$temperature[idxbottomT]))
colnames(A)=c("exploitation","temperature")
idxB<- match(norwaypout$Year[idxF],norwaypoutLF$Year)
B <- norwaypoutLF[idxB,-1] #LF w/o year
M <- norwaypout$M[idxF]
meanA3yr[23,] <- c(mean(A$exploitation/M),mean(A$temperature))
cvA3yr[23,] <- c(sd(A$exploitation/M)/mean(A$exploitation/M),sd(A$temperature)/mean(A$exploitation))
partB<-varpart(B, X= ~exploitation,~temperature,data=A)
plot(partB)
adjR2B<-c(partB$part$indfract$Adj.R.squared[1:3],partB$part$fract$Adj.R.squared[3])
B.rda <- rda(B~.,data=A)
plot(B.rda)
sig<-c(anova(rda(B~exploitation+Condition(temperature),data=A))$`Pr(>F)`[1],anova(rda(B~temperature+Condition(exploitation),data=A))$`Pr(>F)`[1])
adjR2Bout <- rbind(adjR2Bout,adjR2B)
sigout <- rbind(sigout,sig)
#plaice
idxF <- match(year,plaice$Year)
idxbottomT <- match(year,NSavg_tempQ1$YEAR)
idxF <- idxF[seq(1+lag,length(idxF))]
idxbottomT <- idxbottomT[seq(1,length(idxbottomT)-lag)]
A <- data.frame(cbind(plaice$F[idxF],NSavg_tempQ1$temperature[idxbottomT]))
colnames(A)=c("exploitation","temperature")
idxB<- match(plaice$Year[idxF],plaiceLF$Year)
B <- plaiceLF[idxB,-1] #LF w/o year
M <- lifehist$M[24]
meanA3yr[24,] <- c(mean(A$exploitation/M),mean(A$temperature))
cvA3yr[24,] <- c(sd(A$exploitation/M)/mean(A$exploitation/M),sd(A$temperature)/mean(A$exploitation))
partB<-varpart(B, X= ~exploitation,~temperature,data=A)
plot(partB)
adjR2B<-c(partB$part$indfract$Adj.R.squared[1:3],partB$part$fract$Adj.R.squared[3])
B.rda <- rda(B~.,data=A)
plot(B.rda)
sig<-c(anova(rda(B~exploitation+Condition(temperature),data=A))$`Pr(>F)`[1],anova(rda(B~temperature+Condition(exploitation),data=A))$`Pr(>F)`[1])
adjR2Bout <- rbind(adjR2Bout,adjR2B)
sigout <- rbind(sigout,sig)
#saithe
idxF <- match(year,saithe$Year)
idxbottomT <- match(year,NSavg_tempQ1$YEAR)
idxF <- idxF[seq(1+lag,length(idxF))]
idxbottomT <- idxbottomT[seq(1,length(idxbottomT)-lag)]
A <- data.frame(cbind(saithe$F[idxF],NSavg_tempQ1$temperature[idxbottomT]))
colnames(A)=c("exploitation","temperature")
idxB<- match(saithe$Year[idxF],saitheLF$Year)
B <- saitheLF[idxB,-1] #LF w/o year
M <- lifehist$M[25]
meanA3yr[25,] <- c(mean(A$exploitation/M),mean(A$temperature))
cvA3yr[25,] <- c(sd(A$exploitation/M)/mean(A$exploitation/M),sd(A$temperature)/mean(A$exploitation))
partB<-varpart(B, X= ~exploitation,~temperature,data=A)
plot(partB)
adjR2B<-c(partB$part$indfract$Adj.R.squared[1:3],partB$part$fract$Adj.R.squared[3])
B.rda <- rda(B~.,data=A)
plot(B.rda)
sig<-c(anova(rda(B~exploitation+Condition(temperature),data=A))$`Pr(>F)`[1],anova(rda(B~temperature+Condition(exploitation),data=A))$`Pr(>F)`[1])
adjR2Bout <- rbind(adjR2Bout,adjR2B)
sigout <- rbind(sigout,sig)
#sole
idxF <- match(year,sole$Year)
idxbottomT <- match(year,NSavg_tempQ1$YEAR)
idxF <- idxF[seq(1+lag,length(idxF))]
idxbottomT <- idxbottomT[seq(1,length(idxbottomT)-lag)]
A <- data.frame(cbind(sole$F[idxF],NSavg_tempQ1$temperature[idxbottomT]))
colnames(A)=c("exploitation","temperature")
idxB<- match(sole$Year[idxF],soleLF$Year)
B <- soleLF[idxB,-1] #LF w/o year
M <- lifehist$M[26]
meanA3yr[26,] <- c(mean(A$exploitation/M),mean(A$temperature))
cvA3yr[26,] <- c(sd(A$exploitation/M)/mean(A$exploitation/M),sd(A$temperature)/mean(A$exploitation))
partB<-varpart(B, X= ~exploitation,~temperature,data=A)
plot(partB)
adjR2B<-c(partB$part$indfract$Adj.R.squared[1:3],partB$part$fract$Adj.R.squared[3])
B.rda <- rda(B~.,data=A)
plot(B.rda)
sig<-c(anova(rda(B~exploitation+Condition(temperature),data=A))$`Pr(>F)`[1],anova(rda(B~temperature+Condition(exploitation),data=A))$`Pr(>F)`[1])
adjR2Bout <- rbind(adjR2Bout,adjR2B)
sigout <- rbind(sigout,sig)
#sprat
idxF <- match(year,sprat$Year)
idxbottomT <- match(year,NSavg_tempQ1$YEAR)
idxF <- idxF[seq(1+lag,length(idxF))]
idxbottomT <- idxbottomT[seq(1,length(idxbottomT)-lag)]
A <- data.frame(cbind(sprat$F[idxF],NSavg_tempQ1$temperature[idxbottomT]))
colnames(A)=c("exploitation","temperature")
M <- sprat$M[idxF]
idxB<- match(sprat$Year[idxF],spratLF$Year)
B <- spratLF[idxB,-1] #LF w/o year
meanA3yr[27,] <- c(mean(A$exploitation/M),mean(A$temperature))
cvA3yr[27,] <- c(sd(A$exploitation/M)/mean(A$exploitation/M),sd(A$temperature)/mean(A$exploitation))
partB<-varpart(B, X= ~exploitation,~temperature,data=A)
plot(partB)
adjR2B<-c(partB$part$indfract$Adj.R.squared[1:3],partB$part$fract$Adj.R.squared[3])
B.rda <- rda(B~.,data=A)
plot(B.rda)
sig<-c(anova(rda(B~exploitation+Condition(temperature),data=A))$`Pr(>F)`[1],anova(rda(B~temperature+Condition(exploitation),data=A))$`Pr(>F)`[1])
adjR2Bout <- rbind(adjR2Bout,adjR2B)
sigout <- rbind(sigout,sig)
#whiting 1990-
idxF <- match(year,whiting$Year)
idxbottomT <- match(whiting$Year,NSavg_tempQ1$YEAR)
idxF <- idxF[!is.na(idxF)]
idxF <- idxF[seq(1+lag,length(idxF))]
idxbottomT <- idxbottomT[!is.na(idxbottomT)]
idxbottomT <- idxbottomT[seq(1,length(idxbottomT)-lag)]
A <- data.frame(cbind(whiting$F[idxF],NSavg_tempQ1$temperature[idxbottomT]))
colnames(A)=c("exploitation","temperature")
M <- whiting$M[idxF[!is.na(idxF)]]
idxB<- match(whiting$Year[idxF],whitingLF$Year)
B <- whitingLF[idxB,-1] #LF w/o year
meanA3yr[28,] <- c(mean(A$exploitation/M),mean(A$temperature))
cvA3yr[28,] <- c(sd(A$exploitation/M)/mean(A$exploitation/M),sd(A$temperature)/mean(A$exploitation))
partB<-varpart(B, X= ~exploitation,~temperature,data=A)
plot(partB)
adjR2B<-c(partB$part$indfract$Adj.R.squared[1:3],partB$part$fract$Adj.R.squared[3])
B.rda <- rda(B~.,data=A)
plot(B.rda)
sig<-c(anova(rda(B~exploitation+Condition(temperature),data=A))$`Pr(>F)`[1],anova(rda(B~temperature+Condition(exploitation),data=A))$`Pr(>F)`[1])
adjR2Bout <- rbind(adjR2Bout,adjR2B)
sigout <- rbind(sigout,sig)
adjR2Bout[adjR2Bout<0]=0
write.csv(adjR2Bout,file = "output/adjR2Bout3yrTemp.csv")
write.csv(sigout,file = "output/sigout3yrTemp.csv")
write.csv(meanA3yr,file="output/meanSST3yr_F.csv")
write.csv(cvA3yr,file="output/cvSST3yr_F.csv")
rm(adjR2Bout,sigout)
|
0127d6615b12bc3480270114ff6cc1ec5234c0b0
|
50916bc5d8cb3a788e13b3bb109230460d53b263
|
/IFCAM_DoE.R
|
eaea2e2c6fade54f88f5c2ca95d96eaa88639131
|
[] |
no_license
|
Subhasishbasak/Applied-Machine-Learning
|
b3ca7f7019639b4c8132a6af62bc6a85f6260c3b
|
bce48a2ce6f2875d1c71315aa4be50ca2a552ec7
|
refs/heads/master
| 2021-07-20T11:18:57.047462
| 2020-05-19T10:32:26
| 2020-05-19T10:32:26
| 166,579,219
| 0
| 0
| null | null | null | null |
IBM852
|
R
| false
| false
| 4,146
|
r
|
IFCAM_DoE.R
|
#IFCAM practical session Day 2
#TD3 : Design of Numerical Experiments
#Required R packages: DiceDesign, randtoolbox
library(DiceDesign)
?mindist
A=matrix(runif(18),ncol=2)
mindist(A)
cr=0
for (i in 1:1000){
A=matrix(runif(18),ncol=2)
c1=mindist(A)
if(c1>cr){
A1=A
cr=c1
}
}
A1 #Gives the design
c1 #Gives the maximum of mindist
par(mfrow=c(1,2))
plot(A,main="Random allocation",cex=2)
plot(A1,main="Maxmin allocation",cex=2)
A2=factDesign(2,3) #Full factorial design
A2$design
mindist(A2$design)
par(mfrow=c(2,2))
plot(A2$design,main="Full factorial",cex=2)
#####################################
library(randtoolbox)
A3=sobol(9,2)
plot(A3,main="Sobol allocation",cex=2)
mindist(A3)
print("Random")
print(discrepancyCriteria(A,type=c('M2','C2')))
print("Random maximin")
print(discrepancyCriteria(A1,type=c('M2','C2')))
print("Factorial")
print(discrepancyCriteria(A2$design,type=c('M2','C2')))
print("Sobol")
print(discrepancyCriteria(A3,type=c('M2','C2')))
#w.r.t discrepency measure(points per sub-vol by total vol) factorial design
#..is worst. Sobol is the best.
A4=halton(200,8)
pairs(A4,pch=".",cex=3) #Taking all possible 2D projections
A5=sobol(200,8)
pairs(A5,pch=".",cex=3) #Taking all possible 2D projections
###################################################################
lhs <- function(N,p){
ran = matrix(runif(N*p),nrow=N,ncol=p) # tirage de N x p valeurs selon loi U[0,1]
x = matrix(0,nrow=N,ncol=p) # construction de la matrice x
for (i in 1:p) {
idx = sample(1:N) # vecteur de permutations des entiers {1,2,.,N}
P = (idx-ran[,i]) / N # vecteur de probabilitÚs
x[,i] <- qunif(P) }
return(x)}
#method (i)
cr=0
for (i in 1:1000){
A=lhs(20,2)
c1=mindist(A)
if(c1>cr){
L_1=A
cr=c1
}
}
L_1 #Gives the design
c1 #Gives the maximum of mindist
plot(L_1,main="Latin Hypercube Design")
#########################################################
#TD3 : Design of Numerical Experiments
#Required R packages:DiceKriging, DiceView
set.seed(12345)
myfunc <- function(x){
return( sin(30*(x - 0.9)^4)*cos(2*(x - 0.9)) + (x - 0.9)/2)
}
# vizualisation
ntest <- 1000
xtest <- seq(0,1,le=ntest)
ytest <- myfunc(xtest)
plot(xtest,ytest,type="l",xlab="x",ylab="y")
################
#b) Simple kriging
library(DiceKriging)
help(package="DiceKriging")
?km
x=seq(0,1,.25)
y=myfunc(x)
points(x,y)
mu=0
sig2=0.5
theta=0.2
krig=km(~1,design = data.frame(x=x),response = y,covtype = "gauss",coef.trend = mu,
coef.var = sig2,coef.cov = theta)
ypred=predict(krig,xtest,type = "SK",checkNames=F)
Q2 = 1 - mean((ytest-ypred$mean)^2)/var(ytest)
print(Q2)
names(ypred)
# plot the kriging model
x11()
plot(xtest,ytest,type="l",xlab="x",ylab="y",lwd=2,
main="kriging predictions",ylim=c(min(ypred$lower95),max(ypred$upper95)))
points(x,y,col=2,pch=2,lwd=2)
lines(xtest,ypred$mean,col=4,lwd=2)
lines(xtest,ypred$lower95,col=4,lty=2)
lines(xtest,ypred$upper95,col=4,lty=2)
# using DiceView
library(DiceView)
x11()
sectionview(krig,ylim=c(min(ypred$lower95),max(ypred$upper95)))
lines(xtest,ytest,type="l",xlab="x",ylab="y",lwd=2)
######################################
#Kriging with unknown hyperparameters
x=seq(0,1,le=7)
y=myfunc(x)
points(x,y)
krig=km(~1,design = data.frame(x=x),response = y,covtype = "matern5_2")
ypred=predict(krig,xtest,type = "UK",checkNames=F)
Q2 = 1 - mean((ytest-ypred$mean)^2)/var(ytest)
print(Q2)
names(ypred)
# plot the kriging model
x11()
plot(xtest,ytest,type="l",xlab="x",ylab="y",lwd=2,
main="kriging predictions",ylim=c(min(ypred$lower95),max(ypred$upper95)))
points(x,y,col=2,pch=2,lwd=2)
lines(xtest,ypred$mean,col=4,lwd=2)
lines(xtest,ypred$lower95,col=4,lty=2)
lines(xtest,ypred$upper95,col=4,lty=2)
# using DiceView
library(DiceView)
x11()
sectionview(krig,ylim=c(min(ypred$lower95),max(ypred$upper95)))
lines(xtest,ytest,type="l",xlab="x",ylab="y",lwd=2)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.