blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 327 | content_id stringlengths 40 40 | detected_licenses listlengths 0 91 | license_type stringclasses 2 values | repo_name stringlengths 5 134 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 46 values | visit_date timestamp[us]date 2016-08-02 22:44:29 2023-09-06 08:39:28 | revision_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | committer_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | github_id int64 19.4k 671M ⌀ | star_events_count int64 0 40k | fork_events_count int64 0 32.4k | gha_license_id stringclasses 14 values | gha_event_created_at timestamp[us]date 2012-06-21 16:39:19 2023-09-14 21:52:42 ⌀ | gha_created_at timestamp[us]date 2008-05-25 01:21:32 2023-06-28 13:19:12 ⌀ | gha_language stringclasses 60 values | src_encoding stringclasses 24 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 7 9.18M | extension stringclasses 20 values | filename stringlengths 1 141 | content stringlengths 7 9.18M |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
0aef2b35d4a1424486b9b714c71bca69bacfd31c | 850a5e7537ecd8a3fdda83199819bdfec3ea7a66 | /examples/make_atlas.R | d9fe3fed7986e9cbf32e54921617ad3b866fffdb | [
"MIT"
] | permissive | elipousson/maplayer | 709cf32944aa628820d62ade3d2802d01ddb9b9d | 1267b6f1ec5551321457381f131412706993de7a | refs/heads/main | 2023-08-31T10:25:04.096427 | 2023-08-25T17:33:29 | 2023-08-25T17:33:29 | 496,240,757 | 5 | 0 | null | null | null | null | UTF-8 | R | false | false | 634 | r | make_atlas.R | nc <- sf::read_sf(system.file("shape/nc.shp", package = "sf"))
plots <- lapply(
dplyr::nest_by(nc, .by = NAME)[["data"]][1:4],
function(x) {
make_location_map(
basemap = ggplot(),
layer = layer_location(
data = x,
fill = "yellow",
alpha = 0.5
),
bg_layer = layer_location_data(
data = nc,
location = x,
asp = 8.5 / 5.5,
crop = FALSE
),
neatline = layer_neatline(data = x, asp = 8.5 / 5.5),
addon = labs_ext(caption = x$NAME)
)
}
)
make_atlas(
plots = plots,
page = "letter",
nrow = 2,
ncol = 1,
save = FALSE
)
|
242fd81b7295a245c8453f5164f1b3702f0b14db | feda9a14b44cc3024e3ee6d27f48f4eee433780c | /man/two_clusters_data.Rd | 7acfd08fa40d9e80ed8fd3f7b19ace986e10d0b9 | [
"MIT"
] | permissive | jlmelville/snedata | d45265f719fa8ced5bf6e70a198e052e0cf6e427 | c8cfdbf172b8581237eb6e733e23fe8bd1ed21c7 | refs/heads/master | 2022-11-22T21:29:22.938991 | 2022-11-10T16:55:38 | 2022-11-10T16:55:38 | 59,433,805 | 10 | 1 | null | null | null | null | UTF-8 | R | false | true | 1,852 | rd | two_clusters_data.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/misread-tsne.R
\name{two_clusters_data}
\alias{two_clusters_data}
\title{Two Equal Size Clusters}
\usage{
two_clusters_data(n, dim = 50)
}
\arguments{
\item{n}{Number of points per gaussian.}
\item{dim}{Dimension of the gaussians. You may pass a vector of length 2 to
create clusters of different dimensionalities, with the smaller cluster
having zeros in the extra dimensions.}
}
\value{
Data frame with coordinates in the \code{X1}, \code{X2} ...
\code{Xdim} columns, and color in the \code{color} column.
}
\description{
Two gaussians with equal size and bandwidth, from "How to Use t-SNE
Effectively".
}
\details{
Creates a dataset consisting of two symmetric gaussian distributions with
equal number of points and standard deviation 1, separated by a distance
of 10 units. Points are colored depending on which cluster they belong to.
}
\examples{
df <- two_clusters_data(n = 50, dim = 2)
# two clusters with 10 members each, first 10 sampled from a 3D gaussian,
# second 10 are sampled from a 4D gaussian
df <- two_clusters_data(n = 10, dim = c(3, 4))
}
\references{
\url{http://distill.pub/2016/misread-tsne/}
}
\seealso{
Other distill functions:
\code{\link{circle_data}()},
\code{\link{cube_data}()},
\code{\link{gaussian_data}()},
\code{\link{grid_data}()},
\code{\link{link_data}()},
\code{\link{long_cluster_data}()},
\code{\link{long_gaussian_data}()},
\code{\link{ortho_curve}()},
\code{\link{random_circle_cluster_data}()},
\code{\link{random_circle_data}()},
\code{\link{random_jump}()},
\code{\link{random_walk}()},
\code{\link{simplex_data}()},
\code{\link{subset_clusters_data}()},
\code{\link{three_clusters_data}()},
\code{\link{trefoil_data}()},
\code{\link{two_different_clusters_data}()},
\code{\link{unlink_data}()}
}
\concept{distill functions}
|
3f33df570e531ee1e339433743a774a16003b49f | a03da6a1edc7b1a1cf4b0829f5ece771f584df95 | /man/predict.polyGC.Rd | a89ec2995fc59b80b4fb87995a0b83fab71a9f23 | [] | no_license | homerhanumat/tigerstats | 4fbcc3609f46f6046a033d17165f7838dbd77e1a | 17067f7e5ec6b6cf712b628a4dbf5131c691ae22 | refs/heads/master | 2021-07-06T06:24:07.716196 | 2020-09-22T15:24:01 | 2020-09-22T15:24:01 | 15,921,287 | 14 | 7 | null | null | null | null | UTF-8 | R | false | true | 760 | rd | predict.polyGC.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/polyfitGC.R
\name{predict.polyGC}
\alias{predict.polyGC}
\title{Prediction Function for GC Polynomial Regression}
\usage{
\S3method{predict}{polyGC}(object,x,level=NULL,...)
}
\arguments{
\item{object}{An object of class polyGC}
\item{x}{value of the predictor variable}
\item{level}{desired level of prediction interval}
\item{\ldots}{ignored}
}
\value{
numeric prediction
}
\description{
Used by generic predict function
}
\examples{
#predict mpg for a car weighing 3 tons:
mpgModel <- polyfitGC(mpg~wt,data=mtcars,degree=2)
predict(mpgModel,x=3.0)
#include prediction interval:
predict(mpgModel,x=3.0,level=0.95)
}
\author{
Homer White \email{hwhite0@georgetowncollege.edu}
}
|
6ca6c50ba4429f199eb300709160bba60480ce4e | 31cf2367ee215eee5d6242ce9c4933974b25058c | /ScriptLMAnalyse.R | f37fb87f99728d10756f128b354dfbf851281998 | [] | no_license | cordeiroph/weather-prediction | e321ab49de4818c7806b7a2925cca0f13bdbc173 | 0d50a9f9ec0c3a60c452fb575e26c020407bb87f | refs/heads/master | 2020-04-10T01:58:00.542525 | 2018-12-19T20:30:45 | 2018-12-19T20:30:45 | 160,730,721 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 13,110 | r | ScriptLMAnalyse.R | library(ggplot2)
library(gridExtra)
library(ggcorrplot)
library(MASS)
library(nortest)
#setwd("/Users/phrc/Documents/Projects/R projects/TemperaturePrediction")
dfMaster <- read.csv("Assignment 2.csv")
df <-dfMaster
df$No <- NULL
df$cbwd <- NULL
df$year <- NULL
df$month <- NULL
df$day <- NULL
df$hour <- NULL
print(head(df))
# Select only quantitative data
print(summary(df))
# pm2.5 DEWP TEMP PRESS IWS IS IR
# Quantitative Functions ------
outlierPlot <- function(df){
grid.arrange(
ggplot(df, aes(x="", y=pm2.5)) +
geom_boxplot(outlier.colour="black", outlier.shape=16,
outlier.size=2, notch=FALSE, na.rm = TRUE) +
labs(subtitle=paste("Outlier rows: ", length(boxplot.stats(df$pm2.5)$out))),
ggplot(df, aes(x="", y=PRES)) +
geom_boxplot(outlier.colour="black", outlier.shape=16,
outlier.size=2, notch=FALSE, na.rm = TRUE) +
labs(subtitle=paste("Outlier rows: ", length(boxplot.stats(df$PRES)$out))),
ggplot(df, aes(x="", y=DEWP)) +
geom_boxplot(outlier.colour="black", outlier.shape=16,
outlier.size=2, notch=FALSE, na.rm = TRUE) +
labs(subtitle=paste("Outlier rows: ", length(boxplot.stats(df$DEWP)$out))),
ggplot(df, aes(x="", y=Iws)) +
geom_boxplot(outlier.colour="black", outlier.shape=16,
outlier.size=2, notch=FALSE, na.rm = TRUE) +
labs(subtitle=paste("Outlier rows: ", length(boxplot.stats(df$Iws)$out))),
ggplot(df, aes(x="", y=Is)) +
geom_boxplot(outlier.colour="black", outlier.shape=16,
outlier.size=2, notch=FALSE, na.rm = TRUE) +
labs(subtitle=paste("Outlier rows: ", length(boxplot.stats(df$Is)$out))),
ggplot(df, aes(x="", y=Ir)) +
geom_boxplot(outlier.colour="black", outlier.shape=16,
outlier.size=2, notch=FALSE, na.rm = TRUE) +
labs(subtitle=paste("Outlier rows: ", length(boxplot.stats(df$Ir)$out))),
nrow = 3
)
}
densityPlot <- function(df){
grid.arrange(
ggplot(na.omit(df), aes(x=pm2.5)) +
geom_density(na.rm = TRUE),
ggplot(na.omit(df), aes(x=PRES)) +
geom_density(na.rm = TRUE),
ggplot(na.omit(df), aes(x=DEWP)) +
geom_density(na.rm = TRUE),
ggplot(na.omit(df), aes(x=Iws)) +
geom_density(na.rm = TRUE),
ggplot(na.omit(df), aes(x=Is)) +
geom_density(na.rm = TRUE),
ggplot(na.omit(df), aes(x=Ir)) +
geom_density(na.rm = TRUE),
nrow = 3
)
}
linearPlot <- function(df){
grid.arrange(
ggplot(df, aes(x=pm2.5, y=TEMP)) +
stat_binhex(na.rm = TRUE) +
geom_smooth(method="lm", na.rm = TRUE),
ggplot(df, aes(x=PRES, y=TEMP)) +
stat_binhex(na.rm = TRUE) +
geom_smooth(method="lm", na.rm = TRUE),
ggplot(df, aes(x=DEWP, y=TEMP)) +
stat_binhex(na.rm = TRUE) +
geom_smooth(method="lm", na.rm = TRUE),
ggplot(df, aes(x=Iws, y=TEMP)) +
stat_binhex(na.rm = TRUE) +
geom_smooth(method="lm", na.rm = TRUE),
ggplot(df, aes(x=Is, y=TEMP)) +
stat_binhex(na.rm = TRUE) +
geom_smooth(method="lm", na.rm = TRUE),
ggplot(df, aes(x=Ir, y=TEMP)) +
stat_binhex(na.rm = TRUE) +
geom_smooth(method="lm", na.rm = TRUE),
nrow = 3
)
}
correlationPlot <- function(df, corMethod){
corTab <- cor(df, use = "pairwise.complete.obs")
corTab[is.na(corTab)] = 0
ggcorrplot(corTab, hc.order = TRUE,
type = "lower",
lab = TRUE,
lab_size = 4,
method="square",
ggtheme=theme_bw,
colors = c("red","white", "blue"))
}
# Quantitative Data Analyse ----
outlierPlot(df)
linearPlot(df)
densityPlot(df)
correlationPlot(df, "pearson")
# Quantitative Data Analyse without outliers -------
dfNoOutLiers <- df
boxStats <- boxplot.stats(df$pm2.5)$stats
dfNoOutLiers[(!is.na(dfNoOutLiers$pm2.5) & (dfNoOutLiers$pm2.5 < boxStats[1] | dfNoOutLiers$pm2.5 > boxStats[5]) ),]$pm2.5 <- NA
boxStats <- boxplot.stats(df$Iws)$stats
dfNoOutLiers[(dfNoOutLiers$Iws < boxStats[1] |dfNoOutLiers$Iws > boxStats[5]),]$Iws <- NA
boxStats <- boxplot.stats(df$Is)$stats
dfNoOutLiers[(dfNoOutLiers$Is < boxStats[1] | dfNoOutLiers$Is > boxStats[5]),]$Is <- NA
boxStats <- boxplot.stats(df$Ir)$stats
dfNoOutLiers[(dfNoOutLiers$Ir < boxStats[1] | dfNoOutLiers$Ir > boxStats[5]),]$Ir <- NA
print(summary(dfNoOutLiers))
outlierPlot(dfNoOutLiers)
linearPlot(dfNoOutLiers)
densityPlot(dfNoOutLiers)
correlationPlot(dfNoOutLiers, "")
# Categorical data -----
toDayPeriod <- function(hour){
if (hour < 6) {
return ("night")
} else if(hour < 12){
return ("morning")
} else if (hour < 18){
return ("afternoon")
} else if (hour < 24){
return ("evening")
} else{
return(NULL)
}
}
toMonthPeriod <- function(day){
if(day < 11){
return("begin")
}else if (day < 21){
return ("middle")
}else if (day < 32){
return("end")
}else {
return(NULL)
}
}
toSeason <- function(month){
if(month > 12 || month < 1){
return(NULL)
}else if(month > 2 & month < 6){
return ("spring")
}else if (month > 5 & month < 9){
return ("summer")
}else if (month > 8 & month < 12){
return ("fall")
}else{
return ("winter")
}
}
catPlot <- function(df){
grid.arrange(
ggplot(df, aes(x=dayPeriod, y=TEMP)) +
geom_boxplot(outlier.colour="black", outlier.shape=16,
outlier.size=2, notch=FALSE, na.rm = TRUE) ,
ggplot(df, aes(x=monthPeriod, y=TEMP)) +
geom_boxplot(outlier.colour="black", outlier.shape=16,
outlier.size=2, notch=FALSE, na.rm = TRUE),
ggplot(df, aes(x=season, y=TEMP)) +
geom_boxplot(outlier.colour="black", outlier.shape=16,
outlier.size=2, notch=FALSE, na.rm = TRUE) ,
ggplot(df, aes(x=cbwd, y=TEMP)) +
geom_boxplot(outlier.colour="black", outlier.shape=16,
outlier.size=2, notch=FALSE, na.rm = TRUE) ,
nrow = 2
)
}
catPlotBySeason <- function(df){
grid.arrange(
ggplot(df[df$season == 'winter',], aes(x=dayPeriod, y=TEMP)) +
geom_boxplot(outlier.colour="black", outlier.shape=16,
outlier.size=2, notch=FALSE, na.rm = TRUE) ,
ggplot(df[df$season == 'winter',], aes(x=monthPeriod, y=TEMP)) +
geom_boxplot(outlier.colour="black", outlier.shape=16,
outlier.size=2, notch=FALSE, na.rm = TRUE),
ggplot(dfCat[dfCat$season == "spring",], aes(x=dayPeriod, y=TEMP)) +
geom_boxplot(outlier.colour="black", outlier.shape=16,
outlier.size=2, notch=FALSE, na.rm = TRUE),
ggplot(df[df$season == 'spring',], aes(x=monthPeriod, y=TEMP)) +
geom_boxplot(outlier.colour="black", outlier.shape=16,
outlier.size=2, notch=FALSE, na.rm = TRUE),
ggplot(df[df$season == 'summer',], aes(x=dayPeriod, y=TEMP)) +
geom_boxplot(outlier.colour="black", outlier.shape=16,
outlier.size=2, notch=FALSE, na.rm = TRUE) ,
ggplot(df[df$season == 'summer',], aes(x=monthPeriod, y=TEMP)) +
geom_boxplot(outlier.colour="black", outlier.shape=16,
outlier.size=2, notch=FALSE, na.rm = TRUE),
ggplot(dfCat[dfCat$season == "fall",], aes(x=dayPeriod, y=TEMP)) +
geom_boxplot(outlier.colour="black", outlier.shape=16,
outlier.size=2, notch=FALSE, na.rm = TRUE),
ggplot(df[df$season == 'fall',], aes(x=monthPeriod, y=TEMP)) +
geom_boxplot(outlier.colour="black", outlier.shape=16,
outlier.size=2, notch=FALSE, na.rm = TRUE),
nrow = 4
)
}
catDensityPlot <- function(df){
grid.arrange(
ggplot(na.omit(df), aes(x=dayPeriod)) +
geom_density(na.rm = TRUE),
ggplot(na.omit(df), aes(x=monthPeriod)) +
geom_density(na.rm = TRUE),
ggplot(na.omit(df), aes(x=season)) +
geom_density(na.rm = TRUE),
ggplot(na.omit(df), aes(x=cbwd)) +
geom_density(na.rm = TRUE),
nrow = 2
)
}
dfCat <- dfMaster
dfCat$season <- lapply(dfCat$month, toSeason)
dfCat$season <- factor(dfCat$season, levels = unique(dfCat$season))
dfCat$dayPeriod <- lapply(dfCat$hour, toDayPeriod)
dfCat$dayPeriod <- factor(dfCat$dayPeriod, levels = unique(dfCat$dayPeriod))
dfCat$monthPeriod <- lapply(dfCat$day, toMonthPeriod)
dfCat$monthPeriod <- factor(dfCat$monthPeriod, levels = unique(dfCat$monthPeriod))
catPlot(dfCat)
catPlotBySeason(dfCat)
catDensityPlot(dfCat)
# variables analyse --------
# The variables is, ir and iws are compose only by 0 with a few values over less than 105 of the dataset,
# and for that reason, they are not good to be add at this first model
# PM2.5 is not homoscedastic, after removing the outliers the variable apresented a horizontal line, baically covering all
# the temperatures for each value and because and the correlation value proved this attribute doesn't have any significant correlation
# with temperature or others variables, and for that reason this variable wont be used in this first model
# PRES and DEWP has a strong and linear correlation and for that reason those variable should be part of the model
# The categoriacal variable season affects the temp variable, as the day period and cbwd
# and for that reason those variable should be part of the model
# at otherside, the variable month period doesn't seems to be affecting the temp and for that reason it wont be included in the model
# Model variables:
# DWEP, PRES, Season, DayPeriod, cwbd
# MODELING ------
normalidade<-function(x){
t1 <- ks.test(x, "pnorm",mean(x), sd(x)) # KS
t2 <- lillie.test(x) # Lilliefors
t3 <- cvm.test(x) # Cram?r-von Mises
t6 <- ad.test(x) # Anderson-Darling
t7<-pearson.test(x) # Pearson Test of Normality
testes <- c(t1$method, t2$method, t3$method, t6$method,t7$method)
valorp <- c(t1$p.value, t2$p.value, t3$p.value,t6$p.value,t7$p.value)
resultados <- cbind(valorp)
rownames(resultados) <- testes
print(resultados, digits = 4)
}
dfModel <- dfMaster
dfModel$season <- lapply(dfModel$month, toSeason)
dfModel$season <- factor(dfModel$season, levels = unique(dfModel$season))
dfModel$dayPeriod <- lapply(dfModel$hour, toDayPeriod)
dfModel$dayPeriod <- factor(dfModel$dayPeriod, levels = unique(dfModel$dayPeriod))
dfModel$cbwd <- factor(dfModel$cbwd, levels = unique(dfModel$cbwd))
dfModel$No <- NULL
dfModel$pm2.5 <- NULL
dfModel$year <- NULL
dfModel$month <- NULL
dfModel$day <- NULL
dfModel$hour <- NULL
dfModel$Iws <- NULL
dfModel$Ir <- NULL
dfModel$Is <- NULL
head(dfModel, 5)
# Split dataset
row.number <- sample(1:nrow(dfModel), 0.7*nrow(dfModel))
train = dfModel[row.number,]
test = dfModel[-row.number,]
dim(train)
dim(test)
# Modeling
# Stepwise Regression
fit1 <- lm(TEMP ~ season+dayPeriod+cbwd+DEWP+PRES,data=train)
fit1a <- lm(TEMP ~ season+dayPeriod+cbwd+DEWP,data=train)
fit1b <- lm(TEMP ~ season+dayPeriod+cbwd+PRES,data=train)
fit2 <- lm(TEMP ~ 1, train)
both1 <- stepAIC(fit2,direction="both",scope=list(upper=fit1,lower=fit2))
both1a <- stepAIC(fit2,direction="both",scope=list(upper=fit1a,lower=fit2))
both1b <- stepAIC(fit2,direction="both",scope=list(upper=fit1b,lower=fit2))
both1$anova
both1a$anova
both1b$anova
both <-both1
summary(both)
coefficients(both) # model coefficients
confint(both, level=0.95) # CIs for model parameters
anova(both) # anova table
normalidade(both$residuals)
hist(both$residuals)
actuals_preds <- data.frame(cbind(actuals=test$TEMP, predicteds=pred1))
correlation_accuracy <- cor(actuals_preds)
correlation_accuracy
pred1 <- predict(both, newdata = test)
rmse <- sqrt(sum((exp(pred1) - test$TEMP)^2)/length(test$TEMP))
c(RMSE = rmse, R2=summary(both)$r.squared)
par(mfrow=c(1,1))
plot(test$TEMP, (pred1))
#---------------
# Stepwise Regression
fit1 <- lm(TEMP ~ season+dayPeriod+DEWP+PRES,data=train)
fit1a <- lm(TEMP ~ season+dayPeriod+DEWP,data=train)
fit1b <- lm(TEMP ~ season+dayPeriod+PRES,data=train)
fit2 <- lm(TEMP ~ 1, train)
both1 <- stepAIC(fit2,direction="both",scope=list(upper=fit1,lower=fit2))
both1a <- stepAIC(fit2,direction="both",scope=list(upper=fit1a,lower=fit2))
both1b <- stepAIC(fit2,direction="both",scope=list(upper=fit1b,lower=fit2))
both1$anova
both1a$anova
both1b$anova
both <-both1a
summary(both)
coefficients(both) # model coefficients
confint(both, level=0.95) # CIs for model parameters
anova(both) # anova table
normalidade(both$residuals)
hist(both$residuals)
pred1 <- predict(both, newdata = test)
actuals_preds <- data.frame(cbind(actuals=test$TEMP, predicteds=pred1))
correlation_accuracy <- cor(actuals_preds)
correlation_accuracy
rmse <- sqrt(sum((exp(pred1) - test$TEMP)^2)/length(test$TEMP))
c(RMSE = rmse, R2=summary(both)$r.squared)
par(mfrow=c(1,1))
plot(test$TEMP, (pred1))
#-----
# AIC / T value / P value / Rsquare R adjust Square / normality / Confidence Interval
# Check for statisc significance -> (T|P value)
# Check for normality -> (RMSE R2 R adjust Square)
# Check fitting -> AIC (compare models) | RMSE
# Check the sample its true repesentation of the population -> CI
|
12a16829d5e7acff0d4d54cbc6e00a19cec90cc5 | 54a110d0d9ce6feb77e3ffddd2ea415d4bc04366 | /mcmc_inspection.R | fd2f054b8c88072f691a4e0179783bde14d7bf6e | [] | no_license | aamine1/FoodSafety | 4ff20942c0f7a4ac25e918b900763010b6ad72b7 | b667ae1ac7ff26b7b9be90bcce23f3c7d5dee2c5 | refs/heads/master | 2021-01-13T08:25:23.830575 | 2016-10-25T05:12:57 | 2016-10-25T05:12:57 | 71,860,944 | 2 | 0 | null | null | null | null | UTF-8 | R | false | false | 12,981 | r | mcmc_inspection.R | #install.packages('caTools')
#install.packages('ROCR')
library(caTools)
library(ROCR)
source('~/loglikelihood_inspection.R')
#define auxiliary function
Scaled_BetaPDF <- function(y, a, b, p, q){
return ((y-p)^(a-1) * (q - y)^(b-1)) / ((q - p)^(a+b-1) * beta(a,b))
}
#Load file
raw_features = read.csv('~/features.csv',stringsAsFactors = F)
#adding country features
features = subset(raw_features, raw_features$specific.country!="other")
features$Thailand = ifelse(features$specific.country == "Thailand",1,0)
features$Indonesia = ifelse(features$specific.country == "Indonesia",1,0)
features$India = ifelse(features$specific.country == "India",1,0)
features$Vietnam = ifelse(features$specific.country == "Vietnam",1,0)
features$China = ifelse(features$specific.country == "China",1,0)
features$Malaysia = ifelse(features$specific.country == "Malaysia",1,0)
features$Bangladesh = ifelse(features$specific.country == "Bangladesh",1,0)
#Input features
#features = read.csv('~/features.csv')
input_X = readline('Enter the indices of the feature columns, separated by comma: ') #4,6,7,12,13,15,16,21,22,23,24,25,26,27
input_S = readline('Enter the index of the inspection status column: ')#18
input_Y = readline('Enter the index of the inspection outcome column: ')#19
input_burnin = readline('Enter the burnin paramater (number of pre-iterations in MCMC loop): ')
input_nsamples = readline('Enter the nsamples paramater (number of iterations in MCMC loop): ')
input_X = eval(parse(text=paste('list(',input_X,')')))
X=features[,as.numeric(input_X)]
ones=rep(1,nrow(X))
X=cbind(ones,X)
S=ifelse(features[,as.numeric(input_S)]>0,1,0)
Y=ifelse(features[,as.numeric(input_Y)]>0,1,0)
ind=c()
for (row in 1:nrow(X)){
for (col in 1:ncol(X)){
if (X[row,col]=="NaN"){
ind = c(ind,row)
}
}
}
if (length(ind)>0){
X=X[-ind,]
S=S[-ind]
Y=Y[-ind]
}
#model paramters
nsamples = as.numeric(input_nsamples)
burnin = as.numeric(input_burnin)
a = 1; b = 1 #hyperprior of beta distribution for sigma (if a=b=1, uniform distribution)
c = 1; d = 1 #hyperprior of scaled beta distribution for rho (if c=d=1, uniform distribution)
mu = 0; sigma = 100 #hyperpriors of normal prior for all of the beta's and gamma's
hyperpriors = c(a,b,c,d,mu,sigma)
nfeatures = ncol(X)
ndata = nrow(X)
n = nsamples + burnin
Beta_old = rnorm(nfeatures,0,1)
Gamma_old = rnorm(nfeatures,0,1)
rho_old = runif(1,-1,1)
sigma_old = runif(1,0,c)
Samples = matrix(0, nrow = n , ncol = 2*nfeatures+2) #initialize samples array
Samples[1,] = c(sigma_old,rho_old,Beta_old,Gamma_old) #input the first sample
params_old = Samples[1,]
step_beta = 0.1 #st. dev. for beta proposals
step_gamma = 0.1 #st. dev. for gamma proposals
step_rho = 0.1 #st. dev for rho proposals
step_sigma = 0.1
X_S = X[S>0,]
S_S = S[S>0]
Y_S = Y[S>0]
# in sample mcmc loop
print ("In Sample Analysis")
for (i in 2:n){
sigma_old = params_old[1]
Gamma_old = params_old[(nfeatures+3):length(params_old)]
# sample sigma
sigma_new = sigma_old+rnorm(1,0,1)*step_sigma #proposal
l_old = loglikelihood_inspection(X,S,Y,Beta_old,Gamma_old,sigma_old,hyperpriors[1],rho_old)[1]
l_new = loglikelihood_inspection(X,S,Y,Beta_old,Gamma_old,sigma_new,hyperpriors[1],rho_old)[1]
ll_old = l_old+log(dbeta(sigma_old/hyperpriors[1],hyperpriors[1],hyperpriors[2]))
ll_new = l_new+log(dbeta(sigma_new/hyperpriors[1],hyperpriors[1],hyperpriors[2]))
u = log(runif(1,0,1))
w = ll_new-ll_old
if (w>u | is.na(w)){
sigma_old = sigma_new
params_old[1]= sigma_new
}
for (f in 1:nfeatures){
# sample gamma_f
Gamma_new = Gamma_old
Gamma_new[f] = Gamma_old[f]+rnorm(1,0,1)*step_gamma
l_old = loglikelihood_inspection(X,S,Y,Beta_old,Gamma_old,sigma_old,hyperpriors[1],rho_old)[1]
l_new = loglikelihood_inspection(X,S,Y,Beta_old,Gamma_new,sigma_old,hyperpriors[1],rho_old)[1]
ll_old = l_old -(Gamma_old[f]-hyperpriors[5])^2/2/hyperpriors[6]^2
ll_new = l_new -(Gamma_new[f]-hyperpriors[5])^2/2/hyperpriors[6]^2
u = log(runif(1,0,1))
w = ll_new-ll_old
if (w>u | is.na(w)){
Gamma_old[f] = Gamma_new[f]
params_old[(nfeatures+f+2)]= Gamma_new[f]
}
Samples[i,1] = params_old[1]
Samples[i,(nfeatures+3):ncol(Samples)] = params_old[(nfeatures+3):length(params_old)]
}
if (i %% 1000==0){
print (paste("Iteration",as.character(i)))
}
}
for (i in 2:n){
rho_old = params_old[2]
Beta_old = params_old[3:(nfeatures+2)]
# sample rho
rho_new = rho_old+rnorm(1,0,1)*step_rho #proposal
l_old = loglikelihood_inspection(X_S,S_S,Y_S,Beta_old,Gamma_old,sigma_old,hyperpriors[1],rho_old)[2]
l_new = loglikelihood_inspection(X_S,S_S,Y_S,Beta_old,Gamma_old,sigma_old,hyperpriors[1],rho_new)[2]
ll_old = l_old+log(Scaled_BetaPDF(rho_old,hyperpriors[3],hyperpriors[4],-1,1))
ll_new = l_new+log(Scaled_BetaPDF(rho_new,hyperpriors[3],hyperpriors[4],-1,1))
u = log(runif(1,0,1))
w = ll_new-ll_old
if (w>u | is.na(w)){
rho_old = rho_new
params_old[2]= rho_new
}
for (f in 1:nfeatures){
# sample beta_f
Beta_new = Beta_old
Beta_new[f] = Beta_old[f]+rnorm(1,0,1)*step_beta
l_old = loglikelihood_inspection(X_S,S_S,Y_S,Beta_old,Gamma_old,sigma_old,hyperpriors[1],rho_old)[2]
l_new = loglikelihood_inspection(X_S,S_S,Y_S,Beta_new,Gamma_old,sigma_old,hyperpriors[1],rho_old)[2]
ll_old = l_old -(Beta_old[f]-hyperpriors[5])^2/2/hyperpriors[6]^2
ll_new = l_new -(Beta_new[f]-hyperpriors[5])^2/2/hyperpriors[6]^2
u = log(runif(1,0,1))
w = ll_new-ll_old
if (w>u | is.na(w)){
Beta_old[f] = Beta_new[f]
params_old[(f+2)]= Beta_new[f]
}
Samples[i,2] = params_old[2]
Samples[i,3:(nfeatures+2)] = params_old[3:(nfeatures+2)]
}
if (i %% 1000==0){
print (paste("Iteration",as.character(burnin+nsamples+i)))
}
}
# in-sample significant features
q_significant_features=data.frame(inspection_model=c("significant feature","sign"))
p_significant_features=data.frame(inspection_model=c("significant feature","sign"))
for (i in 2:length(X)){
if (quantile(Samples[burnin+1:ncol(Samples),(2+i)],0.025)*quantile(Samples[burnin+1:ncol(Samples),(2+i)],0.975)>0){
p_significant_features=cbind(p_significant_features,data.frame(feature=c(colnames(X)[i],sign(quantile(Samples[,(2+i)],0.05)))))
}
if (quantile(Samples[burnin+1:ncol(Samples),(2+nfeatures+i)],0.025)*quantile(Samples[burnin+1:ncol(Samples),(2+nfeatures+i)],0.975)>0){
q_significant_features=cbind(q_significant_features,data.frame(feature=c(colnames(X)[i],sign(quantile(Samples[,(2+nfeatures+i)],0.05)))))
}
}
# Out of sample analysis
Mat = data.frame(X,S,Y)
set.seed(123)
split = sample.split(S,0.5)
train = subset(Mat,split=TRUE)
test = subset(Mat,split=FALSE)
X = train[,1:(ncol(Mat)-2)]
S = train[,(ncol(Mat)-1)]
Y = train[,ncol(Mat)]
nfeatures = ncol(X)
ndata = nrow(X)
Beta_old = rnorm(nfeatures,0,1)
Gamma_old = rnorm(nfeatures,0,1)
rho_old = runif(1,-1,1)
sigma_old = runif(1,0,c)
Samples = matrix(0, nrow = n , ncol = 2*nfeatures+2) #initialize samples array
Samples[1,] = c(sigma_old,rho_old,Beta_old,Gamma_old) #input the first sample
params_old = Samples[1,]
X_S = X[S>0,]
S_S = S[S>0]
Y_S = Y[S>0]
print ("Out Of Sample Analysis")
for (i in 2:n){
sigma_old = params_old[1]
Gamma_old = params_old[(nfeatures+3):length(params_old)]
# sample sigma
sigma_new = sigma_old+rnorm(1,0,1)*step_sigma #proposal
l_old = loglikelihood_inspection(X,S,Y,Beta_old,Gamma_old,sigma_old,hyperpriors[1],rho_old)[1]
l_new = loglikelihood_inspection(X,S,Y,Beta_old,Gamma_old,sigma_new,hyperpriors[1],rho_old)[1]
ll_old = l_old+log(dbeta(sigma_old/hyperpriors[1],hyperpriors[1],hyperpriors[2]))
ll_new = l_new+log(dbeta(sigma_new/hyperpriors[1],hyperpriors[1],hyperpriors[2]))
u = log(runif(1,0,1))
w = ll_new-ll_old
if (w>u | is.na(w)){
sigma_old = sigma_new
params_old[1]= sigma_new
}
for (f in 1:nfeatures){
# sample gamma_f
Gamma_new = Gamma_old
Gamma_new[f] = Gamma_old[f]+rnorm(1,0,1)*step_gamma
l_old = loglikelihood_inspection(X,S,Y,Beta_old,Gamma_old,sigma_old,hyperpriors[1],rho_old)[1]
l_new = loglikelihood_inspection(X,S,Y,Beta_old,Gamma_new,sigma_old,hyperpriors[1],rho_old)[1]
ll_old = l_old -(Gamma_old[f]-hyperpriors[5])^2/2/hyperpriors[6]^2
ll_new = l_new -(Gamma_new[f]-hyperpriors[5])^2/2/hyperpriors[6]^2
u = log(runif(1,0,1))
w = ll_new-ll_old
if (w>u | is.na(w)){
Gamma_old[f] = Gamma_new[f]
params_old[(nfeatures+f+2)]= Gamma_new[f]
}
Samples[i,1] = params_old[1]
Samples[i,(nfeatures+3):ncol(Samples)] = params_old[(nfeatures+3):length(params_old)]
}
if (i %% 1000==0){
print (paste("Iteration",as.character(i)))
}
}
for (i in 2:n){
rho_old = params_old[2]
Beta_old = params_old[3:(nfeatures+2)]
# sample rho
rho_new = rho_old+rnorm(1,0,1)*step_rho #proposal
l_old = loglikelihood_inspection(X_S,S_S,Y_S,Beta_old,Gamma_old,sigma_old,hyperpriors[1],rho_old)[2]
l_new = loglikelihood_inspection(X_S,S_S,Y_S,Beta_old,Gamma_old,sigma_old,hyperpriors[1],rho_new)[2]
ll_old = l_old+log(Scaled_BetaPDF(rho_old,hyperpriors[3],hyperpriors[4],-1,1))
ll_new = l_new+log(Scaled_BetaPDF(rho_new,hyperpriors[3],hyperpriors[4],-1,1))
u = log(runif(1,0,1))
w = ll_new-ll_old
if (w>u | is.na(w)){
rho_old = rho_new
params_old[2]= rho_new
}
for (f in 1:nfeatures){
# sample beta_f
Beta_new = Beta_old
Beta_new[f] = Beta_old[f]+rnorm(1,0,1)*step_beta
l_old = loglikelihood_inspection(X_S,S_S,Y_S,Beta_old,Gamma_old,sigma_old,hyperpriors[1],rho_old)[2]
l_new = loglikelihood_inspection(X_S,S_S,Y_S,Beta_new,Gamma_old,sigma_old,hyperpriors[1],rho_old)[2]
ll_old = l_old -(Beta_old[f]-hyperpriors[5])^2/2/hyperpriors[6]^2
ll_new = l_new -(Beta_new[f]-hyperpriors[5])^2/2/hyperpriors[6]^2
u = log(runif(1,0,1))
w = ll_new-ll_old
if (w>u | is.na(w)){
Beta_old[f] = Beta_new[f]
params_old[(f+2)]= Beta_new[f]
}
Samples[i,2] = params_old[2]
Samples[i,3:(nfeatures+2)] = params_old[3:(nfeatures+2)]
}
if (i %% 1000==0){
print (paste("Iteration",as.character(burnin+nsamples+i)))
}
}
# Inspection out-of-sample prediction
X = test[,1:(ncol(Mat)-2)]
S = test[,(ncol(Mat)-1)]
q = matrix(0,nrow=nrow(X),ncol=nsamples)
for (i in 1:nrow(X)){
x_c = X[i,]
x = as.matrix(Samples[(burnin+1):nrow(Samples),(nfeatures+3):ncol(Samples)])%*%t(as.matrix(x_c))
q[i,] = 1/(1+exp(-x))
}
qmean = rep(0,nrow(X))
for (i in 1:nrow(X)){
qmean[i]=mean(q[i,])
}
plot(sort(qmean))
#sampling out of sample ROC
pred_s = prediction(qmean,S)
perf_s <- performance( pred_s, "tpr", "fpr" )
plot( perf_s, colorize = TRUE)
q_AUC = as.numeric(performance(pred_s,"auc")@y.values)
#Inspection outcome out-of-sample prediction
Y = test[,ncol(Mat)]
p = matrix(0,nrow=nrow(X),ncol=nsamples)
for (i in 1:nrow(X)){
x_c = X[i,]
x = as.matrix(Samples[(burnin+1):nrow(Samples),3:(nfeatures+2)])%*%t(as.matrix(x_c))
p[i,] = 1/(1+exp(-x))
}
pmean = rep(0,nrow(X))
for (i in 1:nrow(X)){
pmean[i]=mean(p[i,])
}
plot(sort(pmean))
pred = prediction(pmean,Y)
perf <- performance( pred, "tpr", "fpr" )
plot( perf, colorize = TRUE)
p_AUC = as.numeric(performance(pred,"auc")@y.values)
# plotting inspection and inspection outcome ROC
plot( perf_s, colorize = TRUE)
plot( perf, add = T, colorize = TRUE)
#computing risk scores
X=features[,as.numeric(input_X)]
ones=rep(1,nrow(X))
X=cbind(ones,X)
S=features[,as.numeric(input_S)]
Y=features[,as.numeric(input_Y)]
if (length(ind)>0){
X=X[-ind,]
S=S[-ind]
Y=Y[-ind]
Nshipments=Nshipments[-ind]
Nsampled=Nsampled[-ind]
}
q = matrix(0,nrow=nrow(X),ncol=nsamples)
for (i in 1:nrow(X)){
x_c = X[i,]
x = as.matrix(Samples[(burnin+1):nrow(Samples),(nfeatures+3):ncol(Samples)])%*%t(as.matrix(x_c))
q[i,] = 1/(1+exp(-x))
}
q_mean = rep(0,nrow(X))
for (i in 1:nrow(X)){
q_mean[i]=mean(q[i,])
}
p = matrix(0,nrow=nrow(X),ncol=nsamples)
for (i in 1:nrow(X)){
x_c = X[i,]
x = as.matrix(Samples[(burnin+1):nrow(Samples),3:(nfeatures+2)])%*%t(as.matrix(x_c))
p[i,] = 1/(1+exp(-x))
}
p_mean = rep(0,nrow(X))
for (i in 1:nrow(X)){
p_mean[i]=mean(p[i,])
}
#saving files
features = features[-ind,]
write.csv(Samples[(burnin+1):nrow(Samples),],'posterior samples.csv')
write.csv(q_significant_features,'significant features for inspection.csv')
write.csv(p_significant_features,'significant features for inpection outcome.csv')
features_table=features;
features_table$inspection_score=q_mean;
features_table$risk_score=p_mean;
write.csv(features_table,'inspection_features_scores.csv')
|
4f68085fdb50e72bd7c5713a3251f3b09540d75c | e848c54fdcadb59ae339503e64a8008e833c4257 | /260_Models 1 and 2 logistic regression.R | 8d9db9c6b0af9c79d7dfbfbfc0195cdd0c9a1aa4 | [] | no_license | pgsmith2000/BRFSS | 0ccc6cbc8e40839c6f7073221178c97d2da93ea0 | c29270826b3008b1211ad8af738502db447a9bdd | refs/heads/master | 2020-08-28T03:30:08.259481 | 2019-11-03T17:27:22 | 2019-11-03T17:27:22 | 217,574,848 | 2 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,213 | r | 260_Models 1 and 2 logistic regression.R | # read in analytic table
analytic <- read.csv(file="./data/analytic.csv", header=TRUE, sep=",")
# make Model 1
LogModel1 <- glm(ASTHMA4 ~ DRKMONTHLY + DRKWEEKLY,
data=analytic, family = "binomial")
summary(LogModel1)
library (devtools)
library (broom)
Tidy_LogModel1 <- tidy(LogModel1)
Tidy_LogModel1
# Add calculations
Tidy_LogModel1$OR <- exp(Tidy_LogModel1$estimate)
Tidy_LogModel1$LL <- exp(Tidy_LogModel1$estimate - (1.96 * Tidy_LogModel1$std.error))
Tidy_LogModel1$UL <- exp(Tidy_LogModel1$estimate + (1.96 * Tidy_LogModel1$std.error))
Tidy_LogModel1
write.csv(Tidy_LogModel1, file = "./data/models/LogisticRegressionModel1.csv")
# make Model 2
LogModel2 <- glm(ASTHMA4 ~ DRKMONTHLY + DRKWEEKLY + MALE + AGE2 + AGE3 + AGE4 + AGE5 + AGE6, data=analytic, family = "binomial")
summary(LogModel2)
# Add calculations
Tidy_LogModel2 <- tidy(LogModel2)
Tidy_LogModel2$OR <- exp(Tidy_LogModel2$estimate)
Tidy_LogModel2$LL <- exp(Tidy_LogModel2$estimate - (1.96 * Tidy_LogModel2$std.error))
Tidy_LogModel2$UL <- exp(Tidy_LogModel2$estimate + (1.96 * Tidy_LogModel2$std.error))
write.csv(Tidy_LogModel2, file = "./data/models/LogisticRegressionModel2.csv")
|
db26cea24834f389479c0a7ace67dbc62cff5fc4 | 08a0f46033f64be32b613b8b2e5c2f039dfae358 | /cachematrix.R | da360fa26a5f5ca4af2a40feabcd4376f2e456b2 | [] | no_license | lphan1812/ProgrammingAssignment2 | 528d5758c542be3ba033e665e054138e54ef2461 | b52e2eaf50ea9fe6e74bcff846ad45829570228d | refs/heads/master | 2022-05-24T22:53:57.406598 | 2020-05-01T02:21:23 | 2020-05-01T02:21:23 | 259,794,452 | 0 | 0 | null | 2020-04-29T01:33:02 | 2020-04-29T01:33:01 | null | UTF-8 | R | false | false | 1,607 | r | cachematrix.R | ## Side note: for Mac OS, in order to install "matlib" to use function inv(), I had to
## download XQuartz.
## In this assignment, install.packages("matlib") and library(matlib) was run outside the script
## Explain the function:
## The makeCacheMatrix function creates a special matrix object that can cache its inverse.
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setInverse <- function(inv) m <<- inv
getInverse <- function() m
list(set = set,
get = get,
setInverse = setInverse,
getInverse = getInverse)
}
## The below function computes the inverse of the matrix created with the function above.
## It first checks to see if the inverse has already been calculated.
## If so, it gets the inverse from the cache and skips the computation.
## Otherwise, it calculates the inverse of the data and sets the value of the inverse in the
## cache via the setInverse function.
cacheSolve <- function(x, ...) {
m <- x$getInverse()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- inv(data,... )
x$setInverse(m)
m
## Return a matrix that is the inverse of 'x'
}
## Example of using the functions created above:
## example <- makeCacheMatrix()
## example$set(matrix(1:4, 2))
## example$get()
## [,1] [,2]
## [1,] 1 3
## [2,] 2 4
## cacheSolve(example)
## [,1] [,2]
## [1,] -2 1.5
## [2,] 1 -0.5
## cacheSolve(example)
## getting cached data
## [,1] [,2]
## [1,] -2 1.5
## [2,] 1 -0.5
|
3c247ebad2689fbe708199f72f3e518f73a179b7 | 1fea4477ad03f4a63dc06dd4b7721f4918d230ed | /Scripts/FY21Q2_target_achievement.R | e6a9b2314f404bd916a1cc01a5ae7becc95108db | [] | no_license | USAID-OHA-SI/groundhog_day | 077e9cdbf773a4b88bee3039e435b30cd0893827 | d65658432cc8b78c3fa342914586d4cc264a9e13 | refs/heads/main | 2023-08-09T14:15:07.184665 | 2023-08-01T13:02:32 | 2023-08-01T13:02:32 | 251,695,677 | 2 | 0 | null | 2023-06-21T20:20:31 | 2020-03-31T18:28:36 | R | UTF-8 | R | false | false | 7,461 | r | FY21Q2_target_achievement.R | # PROJECT: agitprop
# AUTHOR: A.Chafetz | USAID
# PURPOSE: USAID achievement
# LICENSE: MIT
# DATE: 2021-05-19
# UPDATED:
# NOTE: adapted from agitprop/05_usaid_target_achievement.R
# DEPENDENCIES ------------------------------------------------------------
library(tidyverse)
library(glitr)
library(glamr)
library(ICPIutilities)
library(extrafont)
library(scales)
library(tidytext)
library(patchwork)
library(ggtext)
library(glue)
library(waffle) #devtools::install_github("hrbrmstr/waffle")
# GLOBAL VARIABLES --------------------------------------------------------
ind_sel <- c("PrEP_NEW", "VMMC_CIRC", "HTS_TST", "HTS_TST_POS", "TX_NEW", "TX_CURR")
authors <- c("Aaron Chafetz", "Tim Essam")
#msd_source <- msd_period()
# FUNCTIONS ---------------------------------------------------------------
clean_number <- function(x, digits = 0){
dplyr::case_when(x >= 1e9 ~ glue("{round(x/1e9, digits)}B"),
x >= 1e6 ~ glue("{round(x/1e6, digits)}M"),
x >= 1e3 ~ glue("{round(x/1e3, digits)}K"),
TRUE ~ glue("{x}"))
}
# IMPORT ------------------------------------------------------------------
df <- si_path() %>%
return_latest("OU_IM_FY19") %>%
read_msd()
# MUNGE -------------------------------------------------------------------
curr_fy <- identifypd(df, "year")
curr_qtr <- identifypd(df, "quarter")
curr_pd <- identifypd(df)
msd_source <- "FY21Q2c" #msd_period(period = curr_pd)
trgt_rng <- 1*(curr_qtr/4)
df_achv <- df %>%
filter(indicator %in% ind_sel,
standardizeddisaggregate == "Total Numerator",
fiscal_year == curr_fy,
fundingagency != "Dedup") %>%
mutate(fundingagency = ifelse(fundingagency == "USAID", "USAID", "All Other Agencies"),
fundingagency = factor(fundingagency, c("USAID", "All Other Agencies"))) %>%
group_by(fiscal_year, fundingagency, indicator) %>%
summarise(across(c(cumulative, targets), sum, na.rm = TRUE)) %>%
ungroup()
df_achv <- df_achv %>%
mutate(achievement = cumulative/targets,
qtr_goal = ifelse(indicator == "TX_CURR", 1, 1*(curr_qtr/4)),
achv_label = case_when(is.na(achievement) ~ NA_character_,
achievement <= qtr_goal-.25 ~ glue("<{100*(qtr_goal-.25)}%") %>% as.character,
achievement <= qtr_goal-.1 ~ glue("{100*(qtr_goal-.25)}-{100*(qtr_goal-.11)}%") %>% as.character,
achievement <= qtr_goal+.1 ~ glue("{100*(qtr_goal-.1)}-{100*(qtr_goal+.1)}%") %>% as.character,
TRUE ~ glue("+{100*(qtr_goal+.1)}%") %>% as.character),
achv_color = case_when(is.na(achievement) ~ NA_character_,
achievement <= qtr_goal-.25 ~ old_rose_light,
achievement <= qtr_goal-.1 ~ burnt_sienna_light,
achievement <= qtr_goal+.1 ~ "#5BB5D5",
TRUE ~ trolley_grey_light)) %>%
select(-qtr_goal)
df_viz <- df_achv %>%
mutate(achv_round = round(achievement*100),
achv_round = ifelse(achv_round > 100, 100, achv_round),
gap = 100-achv_round) %>%
pivot_longer(c(achv_round, gap), names_to = "status") %>%
mutate(achv_color = ifelse(status == "gap", "#EBEBEB", achv_color),
achv_color = ifelse(achv_color == trolley_grey_light, trolley_grey, achv_color),
achv_alpha = ifelse(status == "gap", .1, 1),
indicator = factor(indicator, ind_sel),
ind_lab = case_when(indicator == "PrEP_NEW" ~ "Newly enrolled on antiretroviral pre-exposure prophylaxis",
indicator == "VMMC_CIRC" ~ "Voluntary medical male circumcision for HIV prevention",
indicator == "HTS_TST" ~ "Receiving HIV testing service and results",
indicator == "HTS_TST_POS" ~ "Receiving HIV testing services and positive results",
indicator == "TX_NEW" ~ "Newly enrolled on antiretroviral therapy",
indicator == "TX_CURR"~ "Currently receiving antiretroviral therapy"),
ind_lab = str_wrap(ind_lab, width = 25),
val_lab = ifelse(indicator == ind_sel[1],
glue("Results - {clean_number(cumulative)}\nTargets - {clean_number(targets)}"),
glue("{clean_number(cumulative)}\n{clean_number(targets)}")),
full_lab = ifelse(fundingagency == "USAID", glue("{indicator}\n\n{val_lab}"), val_lab)) %>%
arrange(indicator) %>%
mutate(full_lab = fct_inorder(full_lab))
v_usaid <- df_viz %>%
filter(fundingagency == "USAID") %>%
ggplot(aes(fill = achv_color, values = value, alpha = achv_alpha)) +
geom_waffle(color = "white", size = 1, n_rows = 10, flip = TRUE) +
geom_text(aes(x = 5, y = 12, label = percent(achievement, 1), color = achv_color),
family = "Source Sans Pro SemiBold", size = 14/.pt) +
facet_wrap(~full_lab, nrow = 1, strip.position = "bottom") +
expand_limits(y = 14) +
scale_x_discrete() +
scale_y_continuous(labels = function(x) x * 10, # make this multiplyer the same as n_rows
expand = c(0,0)) +
scale_fill_identity() +
scale_color_identity() +
scale_alpha_identity() +
coord_equal() +
labs(x= NULL, y = NULL,
subtitle = "USAID") +
si_style_nolines() +
theme(axis.text.y = element_blank(),
strip.text.x = element_text(hjust = .5),
panel.spacing = unit(1, "pt")) +
guides(fill = guide_legend(reverse = TRUE))
v_other <- df_viz %>%
filter(fundingagency != "USAID") %>%
ggplot(aes(fill = achv_color, values = value, alpha = achv_alpha)) +
geom_waffle(color = "white", size = 1, n_rows = 10, flip = TRUE) +
geom_text(aes(x = 5, y = 12, label = percent(achievement, 1), color = achv_color),
family = "Source Sans Pro SemiBold", size = 14/.pt) +
facet_wrap(~full_lab, nrow = 1, strip.position = "bottom") +
expand_limits(y = 14) +
scale_x_discrete() +
scale_y_continuous(labels = function(x) x * 10, # make this multiplyer the same as n_rows
expand = c(0,0)) +
scale_fill_identity() +
scale_color_identity() +
scale_alpha_identity() +
coord_equal() +
labs(x= NULL, y = NULL,
subtitle = "All Other Agencies") +
si_style_nolines() +
theme(axis.text.y = element_blank(),
strip.text.x = element_text(hjust = .5),
panel.spacing = unit(1, "pt")) +
guides(fill = guide_legend(reverse = TRUE))
v_usaid/v_other +
plot_annotation(title = "USAID GLOBALLY IS IN GOOD STANDING FOR MER TARGET ACHIEVEMENT, WITH ROOM TO IMPROVE FOR TX_CURR",
subtitle = glue("as of {curr_pd}, goal of being at around {percent(trgt_rng)} of the FY target"),
caption = glue("Source: {msd_source}
SI analytics: {paste(authors, collapse = '/')}
US Agency for International Development"),
theme = si_style())
si_save("Graphics/FY21Q2_achievement.svg")
si_save("Images/05b_achievement.png", plot = v_usaid, height = 4)
|
f2a9ebe083b3c0aa8ebcbad920055bdf3e74566f | db3abdad5df1923a5b8a11c47a25f0850c7bee76 | /R/marginal_plot.R | 5fd75cf884e6207268099cb570fe9a4c1548c06b | [
"MIT"
] | permissive | GRousselet/rogme | db4fb4381e0f73546de4a2807fa7ed37ab034b49 | 650d29393f43b8d82f35eca70fbef5927de8b7a9 | refs/heads/master | 2023-05-26T02:05:32.849792 | 2022-11-07T10:30:43 | 2022-11-07T10:30:43 | 76,552,942 | 71 | 9 | MIT | 2021-02-15T09:37:45 | 2016-12-15T11:12:38 | R | UTF-8 | R | false | false | 11,451 | r | marginal_plot.R | #' Plot one-dimensional scatterplots for 2 groups
#'
#' \code{plot_scat2} produces scatterplots for 2 marginal distributions.
#' The scatterplots are jittered using \code{\link[ggbeeswarm]{geom_quasirandom}}.
#'
#' @param data A data frame in long format. One column is a factor describing the groups;
#' another column contains the values/observations for each group. A properly formatted data
#' frame can be created using \code{\link{mkt2}}. Missing values are not
#' allowed.
#' @param formula A formula with format response variable ∼ predictor variable,
#' where ~ (tilde) means "is modeled as a function of".
#' @param xlabel Option to set different name - default NULL to use data frame column names.
#' @param ylabel Option to set different name - default NULL to use data frame column names.
#' @param ... Input arguments for ggbeeswarm::geom_quasirandom
#'
#' @return A ggplot object.
#'
#' @examples
#' # generate data
#' set.seed(21)
#' g1 <- rnorm(1000) + 6
#' g2 <- rnorm(1000) * 1.5 + 6
#'
#' # make tibble
#' df <- mkt2(g1, g2)
#' # make scatterplots
#' ps <- plot_scat2(data = df,
#' formula = obs ~ gr,
#' xlabel = "",
#' ylabel = "Scores (a.u.)",
#' alpha = 1,
#' shape = 21,
#' colour = "grey10",
#' fill = "grey90") # scatterplots
#' ps <- ps + coord_flip()
#' ps
#'
#' @export
plot_scat2 <- function(data = df,
formula = obs ~ gr,
xlabel = NULL,
ylabel = NULL,
...){
# subset formula
subf <- subset_formula(data, formula)
xplot = subf$param_col_name
yplot = subf$obs_col_name
p <- ggplot(data, aes_string(x = xplot, y = yplot, fill = xplot,
colour = xplot, shape = xplot))
p <- p + ggbeeswarm::geom_quasirandom(...) +
theme_bw() +
# scale_colour_manual(values = symb_col) +
# scale_fill_manual(values = symb_fil) +
# scale_shape_manual(values = symb_shape) +
theme(legend.position = "none") +
theme(axis.title.x = element_text(size=16,face="bold"),
axis.title.y = element_text(size=16,face="bold"),
axis.text.x = element_text(size=14),
axis.text.y = element_text(size=14))
# override axis labels
if (!is.null(xlabel)){
p <- p + xlab(xlabel)
}
if (!is.null(ylabel)){
p <- p + ylab(ylabel)
}
p
}
#' Plot quantiles and confidence intervals
#'
#' Using the output of \code{\link{quantiles_pbci}}, create a ggplot object
#' showing specified quantiles (default to deciles) and their 95% percentile
#' bootstrap confidence intervals. A vertical line marks the median.
#'
#' @param data Data frame created by `quantiles_pbci`.
#' @param qseq Sequence of quantiles to plot - assumes median is in the middle of the sequence - default = deciles.
#'
#' @seealso \code{\link{quantiles_pbci}}
#'
#' @examples
#' set.seed(7)
#' # make fake skewed data
#' x <- rgamma(100, shape=3, scale=1)*10+250
#' # compute quantiles and their percentile bootstrap confidence intervals
#' out <- quantiles_pbci(x,q=seq(1,9)/10,nboot=2000,alpha=0.05)
#' # make decile plot
#' p <- plot_hd_ci(data=out,plotzero=TRUE,label.x="Onsets in ms",
#' hjust=-.05,vjust=.5,size_text=5,
#' colour_dec = "grey10",fill_dec = "grey90",
#' colour_line = "grey10", linetype_line = 1, size_line = 1) +
#' scale_y_continuous(limits=c(250, 350),breaks=seq(250,350,25))
# p
#'
#' @export
plot_hd_ci <- function(data = out,
qseq = seq(.1,.9,.1),
plotzero = TRUE,
label.x = "Differences",
hjust = -.05,
vjust = .2,
size_text = 6,
colour_q = "#009E73",
fill_q = "white",
size_q = 4,
shape_q = 21,
colour_line = "#009E73",
size_line = 1,
linetype_line = 2,
colour_zero = "black",
size_zero = .5,
linetype_zero = 1){
md.loc <- floor(length(data$quantile)/2) + 1
md <- data$est_q[md.loc] # median
md.c <- as.character(round(md, digits=1)) # turn into characters
lo.c <- as.character(round(data$ci.low[md.loc], digits=1)) # turn into characters
up.c <- as.character(round(data$ci.up[md.loc], digits=1)) # turn into characters
caption <- paste("Median = \n ",md.c," [",lo.c,", ",up.c,"]",sep="")
if (all.equal(qseq,seq(.1,.9,.1))){
label.y <- "Deciles"
} else {
label.y <- "Quantiles"
}
p <- ggplot(data=out, aes(x=quantile*10, y=est_q)) +
geom_abline(intercept = md, slope = 0,
colour = colour_line,
size = size_line,
linetype = linetype_line)
if (plotzero){
p <- p + geom_abline(intercept = 0, slope = 0,
colour = colour_zero,
size = size_zero,
linetype = linetype_zero)
}
p <- p + geom_linerange(aes(ymin=ci.low, ymax=ci.up), colour=colour_line,size=1) +
geom_point(colour = colour_q,
size = size_q,
shape = shape_q,
fill = fill_q) +
theme_bw() +
labs(x=label.y) +
labs(y=label.x) +
theme(axis.text.x = element_text(size=14),
axis.text.y = element_text(size=14),
axis.title.x = element_text(size=16,face="bold"),
axis.title.y = element_text(size=16,face="bold")) +
scale_x_continuous(breaks=seq(1,9,1)) +
annotate("text", x = 5, y = data$ci.up[5], label = caption[1],
hjust = hjust, vjust = vjust, size = size_text) +
coord_flip()
p
}
# ----------------------------------------------------------------------------
#' Plot paired observations
#'
#' Scatterplot of paired observations with reference line of no effect.
#' Quartiles of each condition are superimposed. Quartiles are estimated using the
#' Harrell-Davis estimator.
#' @param df Data frame with paired observations in two columns.
#' @param formula A formula with format response variable ∼ predictor variable,
#' where ~ (tilde) means "is modeled as a function of".
#' @param axis.steps Steps between x and y tick marks - default = 2.
#' @param min.x,min.y,max.x,max.y Specify axis limits - default square axes
#' @param colour_p Colour parameter of the scatterplot - default "black".
#' @param size_p Size parameter of the scatterplot - default = 5.
#' @param stroke_p Stroke parameter of the scatterplot - default = 1,
#' @param shape_p Shape parameter of the scatterplot - default = 21,
#' @param colour_p Colour parameter of the scatterplot - default = "black",
#' @param fill_p Fill parameter of the scatterplot - default = "#ffb347",
#' @param alpha_p Alpha parameter of the scatterplot - default = .5,
#' @param linetype_q Linetype of the segments marking the quartiles - default = "dashed",
#' @param size_q Size of the segments marking the quartiles - default = 1,
#' @param alpha_q Alpha of the segments marking the quartiles - default = .5,
#' @param colour_q Colour of the segments marking the quartiles - default = "black"
#' @examples
#' df <- tibble(cond1 = rnorm(50), cond2 = cond1 + rnorm(50))
#' plot_scat2d(df, formula = cond2 ~ cond1) # basic call
#' plot_scat2d(df, formula = cond2 ~ cond1, size_q=3) # specify size of quartile segments
#' plot_scat2d(df, formula = cond2 ~ cond1, size_q=c(1,2,1)) # use thicker line for median
#' plot_scat2d(df, formula = cond2 ~ cond1, linetype_q = "longdash") # specify linetype - default = dashed
#' @seealso \code{\link{hd}}
#' @export
plot_scat2d <- function(df = df,
formula = cond2 ~ cond1,
min.x = NA,
min.y = NA,
max.x = NA,
max.y = NA,
axis.steps = 2,
size_p = 5,
stroke_p = 1,
shape_p = 21,
colour_p = "black",
fill_p = "#ffb347",
alpha_p = .5,
linetype_q = "dashed",
size_q = 1,
alpha_q = .5,
colour_q = "black"){
# subset formula
subf <- subset_formula_wide(df, formula)
xplot = subf$x_col_name
yplot = subf$y_col_name
# make data.frames for plotting quartile segments
hd1.25 <- hd(df[[xplot]],.25)
hd1.5 <- hd(df[[xplot]],.5)
hd1.75 <- hd(df[[xplot]],.75)
hd2.25 <- hd(df[[yplot]],.25)
hd2.5 <- hd(df[[yplot]],.5)
hd2.75 <- hd(df[[yplot]],.75)
df.25 <- data.frame(hd1=hd1.25,hd2=hd2.25)
df.5 <- data.frame(hd1=hd1.5,hd2=hd2.5)
df.75 <- data.frame(hd1=hd1.75,hd2=hd2.75)
# quartile plot parameters
if(length(linetype_q)==1){
linetype_q = rep(linetype_q,3)
}
if(length(size_q)==1){
size_q = rep(size_q,3)
}
if(length(alpha_q)==1){
alpha_q = rep(alpha_q,3)
}
if(length(colour_q)==1){
colour_q = rep(colour_q,3)
}
# plot limits
if (is.na(min.x)){
min.x <- min(df[[xplot]],df[[yplot]])
}
if (is.na(max.x)){
max.x <- max(df[[xplot]],df[[yplot]])
}
if (is.na(min.y)){
min.y <- min(df[[xplot]],df[[yplot]])
}
if (is.na(max.y)){
max.y <- max(df[[xplot]],df[[yplot]])
}
# scatterplot of paired observations -----------------
p <- ggplot(df, aes_string(x = xplot, y = yplot)) +
# reference line
geom_abline(intercept = 0) +
# quartiles
scale_x_continuous(breaks=seq(floor(min.x),ceiling(max.x),axis.steps)) +
scale_y_continuous(breaks=seq(floor(min.y),ceiling(max.y),axis.steps)) +
coord_cartesian(xlim = c(floor(min.x), ceiling(max.x)),
ylim = c(floor(min.y), ceiling(max.y))) +
geom_segment(aes(x=hd1,y=min.y-abs(min.y),xend=hd1,yend=hd2),data=df.25,linetype=linetype_q[1],size=size_q[1],alpha=alpha_q[1],colour=colour_q[1]) +
geom_segment(aes(x=hd1,y=min.y-abs(min.y),xend=hd1,yend=hd2),data=df.5,linetype=linetype_q[2],size=size_q[2],alpha=alpha_q[2],colour=colour_q[2]) +
geom_segment(aes(x=hd1,y=min.y-abs(min.y),xend=hd1,yend=hd2),data=df.75,linetype=linetype_q[3],size=size_q[3],alpha=alpha_q[3],colour=colour_q[3]) +
geom_segment(aes(x=min.x-abs(min.x),y=hd2,xend=hd1,yend=hd2),data=df.25,linetype=linetype_q[1],size=size_q[1],alpha=alpha_q[1],colour=colour_q[1]) +
geom_segment(aes(x=min.x-abs(min.x),y=hd2,xend=hd1,yend=hd2),data=df.5,linetype=linetype_q[2],size=size_q[2],alpha=alpha_q[2],colour=colour_q[2]) +
geom_segment(aes(x=min.x-abs(min.x),y=hd2,xend=hd1,yend=hd2),data=df.75,linetype=linetype_q[3],size=size_q[3],alpha=alpha_q[3],colour=colour_q[3]) +
# scatterplot
geom_point(size=size_p,
stroke=stroke_p,
shape=shape_p,
colour=colour_p,
fill=fill_p,
alpha=alpha_p) +
theme_bw() +
theme(axis.text.x = element_text(size=14),
axis.text.y = element_text(size=14),
axis.title.x = element_text(size=16,face="bold"),
axis.title.y = element_text(size=16,face="bold"),
legend.title = element_blank(),
plot.title = element_text(size=20)) +
labs(title="Paired observations")
p
}
# ----------------------------------------------------------------------------
|
bbc0a88e123e371dd13d1f6b4683094c5cefac6d | 5e3d2312356f2dbf37a35b03dc57f854e3e5374b | /kathryn/bulk_data/q4_input/Evaluate_Performance.R | 6cedf5a6fc6b8e9842230c2aa1c532b27ace4022 | [] | no_license | immunogenomics/eQTL_Kathryn | c09f8bc45a7c927d3db2de0abb3bac8bc6f91d33 | 74da207e38d1da9664930cdac07ffe625bf1eb51 | refs/heads/master | 2023-03-06T21:47:30.641513 | 2019-07-11T15:27:20 | 2019-07-11T15:27:20 | 189,477,434 | 0 | 1 | null | 2019-07-11T15:27:28 | 2019-05-30T20:23:43 | R | UTF-8 | R | false | false | 6,157 | r | Evaluate_Performance.R | args = commandArgs(trailingOnly=TRUE)
#List of Functions
library(glmnet)
library(ROCR)
library(GenomicRanges)
#### Load_and_Train_Model_AUC_CVglmnet_LabeledData ####
Load_and_Train_Model_AUC_CVglmnet_LabeledData <- function(classifier_all, TP_training_regions, TN_training_regions, specificity_cutoff){
train <- classifier_all[1:(TP_training_regions + TN_training_regions),2:ncol(classifier_all)]
train_labels <- classifier_all[1:(TP_training_regions + TN_training_regions),1]
test <- classifier_all[(TP_training_regions + TN_training_regions+1):nrow(classifier_all),2:ncol(classifier_all)]
test_labels <- classifier_all[(TP_training_regions + TN_training_regions+1):nrow(classifier_all),1]
ENet_fit <- cv.glmnet(x=as.matrix(train[complete.cases(train),]), y= train_labels[complete.cases(train)], family = "binomial", type.measure = "auc", alpha = 0.5)
ENet_pred_lambdamin <- predict(ENet_fit,as.matrix(test[complete.cases(test),]),s="lambda.min", type = "response") #type = response ensures that the scale is from 0 to 1
pred <- prediction(ENet_pred_lambdamin, test_labels[complete.cases(test)])
#----------------------------------------------------------------------------
sa_matrix <- matrix(0,4,1)
#recall-precision curve from https://stackoverflow.com/questions/8499361/easy-way-of-counting-precision-recall-and-f1-score-in-r
PR.perf <- performance(pred, "prec", "rec")
w <- which(PR.perf@y.values[[1]] == "NaN")
if (length(w)>0){PR.perf@y.values[[1]][1] <- 1}
f <- approxfun(data.frame(PR.perf@x.values , PR.perf@y.values)) #https://stackoverflow.com/questions/39268159/auc-of-a-precision-recall-curve-by-using-package-rocr
auprc <- integrate(f, 0, 1)$value
#PRC <- cbind(PR.perf@x.values[[1]], PR.perf@y.values[[1]])
perf <- performance(pred, 'sens', 'spec') #x is spec, y is sens
ix <- which.min(abs(perf@x.values[[1]] - specificity_cutoff))
sensitivity <- perf@y.values[[1]][ix]
auc <- round(slot(performance(pred, measure = "auc"), "y.values")[[1]],4)
#matthews correlation coefficient.
#what is cutoff for tp and tn??
thresh <- mean(ENet_pred_lambdamin)
true_values <- test_labels[complete.cases(test)]
predicted_values <- ENet_pred_lambdamin
tp <- length(which(true_values == 1 & predicted_values > thresh))
tn <- length(which(true_values == 0 & predicted_values <= thresh))
fp <- length(which(true_values == 0 & predicted_values > thresh))
fn <- length(which(true_values == 1 & predicted_values <= thresh))
#mcc <- (tp*tn)-(fp*fn) / ((tp+fp) * (tp+fn) * (tn + fp) * (tn + fn))^0.5 #bug in wiki
n <- tp + fn + tn + fp
s = (tp + fn)/n
p = (tp + fp)/n
mcc = (tp/n - s * p ) / (p*s*(1-s)*(1-p))^0.5
sa_matrix[1,1] <- sensitivity
sa_matrix[2,1] <- auc # sensitivity vs specificoty - too easy
sa_matrix[3,1] <- auprc # preferred, not both based on negative set proportion
sa_matrix[4,1] <- mcc
#newList <- list("betas" = coef(ENet_fit, s = "lambda.min"), "prediction" = ENet_pred_lambdamin, "four_metrics" = sa_matrix) #, "PRC" = PRC)
newList <- list("four_metrics" = sa_matrix, "perf_spec_sens" = perf, "perf_prec_rec" = PR.perf)
return(newList)
}
#CV
numiters <- 25
Sens_AUC_collection <- matrix(0,4,numiters)
myclassifier <- read.table("/Users/kathryntsai/OneDrive\ -\ Villanova\ University/College/2018-2019/Summer\ 2019/TFs_eQTLs_Research/GitHub/eQTL_Kathryn/kathryn/bulk_data/q4_output/Classifier_binary_ENCODE_cis_v_trans_eQTLs.txt.gz",sep = "\t", header = F, stringsAsFactors = FALSE) #might need \t for classifiers that were made from data table (new version), needed " " for those made from Granges
w <- which(is.na(myclassifier), arr.ind = T) #for tf = 254, NA is for conservation. can we go back and fix these? or are these signs that the genome is wrong? no, genome is definitely right. Perhaps conservation score couldn't be retrieved.
if(length(w) > 0){myclassifier <- myclassifier[-w[,1],]}
count <- 0
while (count < numiters){
#replace nrow(positive_bed) with length(which(myclassifier[,1] == 1)) and nrow(negative_bed) with length(which(myclassifier[,1] == 0))
#mix all rows
num_pos <- length(which(myclassifier[,1] == 1))
num_neg <- length(which(myclassifier[,1] == 0))
s_p <- sample(1:num_pos, size = num_pos, replace = F) #1000 at most
s_n <- sample(1:num_neg, size = num_neg, replace = F) #10000
classifier_TP_train <- myclassifier[1:num_pos,]
classifier_TN_train <- myclassifier[(num_pos + 1):nrow(myclassifier),]
classifier <- rbind(classifier_TP_train[s_p[1:(0.8*length(s_p))],], classifier_TN_train[s_n[1:(0.8*length(s_n))],], classifier_TP_train[s_p[(0.8*length(s_p)+1):(1*length(s_p))],],classifier_TN_train[s_n[(0.8*length(s_n)+1):(1*length(s_n))],])
tryCatch({LATM <- Load_and_Train_Model_AUC_CVglmnet_LabeledData(classifier, 0.8*length(s_p), 0.8*length(s_n), 0.99)
count <- count + 1
print(count)
Sens_AUC_collection[,count]<- LATM$four_metrics
print(Sens_AUC_collection[,count])
# Betas <- LATM$betas #want to show that betas don't change much between samplings
# write.table(Betas[,1],paste0("PredictionOutput/Betas_Jan4Revisions2_",TFs[tf],"_trial",count,"_UNBALANCED_AUPRC.txt"), sep = "\t", quote = F, row.names = FALSE, col.names = FALSE)
#write.table(LATM$PRC, paste0("PredictionOutput/PRcurve_coords_Jan4Revisions2_",TFs[tf],"_trial",count,"_UNBALANCED_AUPRC.txt"), sep = "\t", quote = F, row.names = FALSE, col.names = FALSE)
}, error=function(e){cat("ERROR :",conditionMessage(e), "\n")})
}#while loop
write.table(Sens_AUC_collection, paste0("q4_output/perf_ENCODE_cisvtrans.txt"), sep = "\t", quote = F, row.names = FALSE, col.names = FALSE)
#entire model: up to 1,000 positive, 10,000 negative
ENet_fit <- cv.glmnet(x=as.matrix(myclassifier[,-1]), y=as.matrix(myclassifier[,1]), family = "binomial", type.measure = "auc", alpha = 0.5)
assign(paste0("IMPACT_fit_",tf), ENet_fit)
w1 <- which(ls(1) == paste0("IMPACT_fit_",tf))
save(list = ls(1)[w1], file = paste0("q4_output/IMPACT_model_ENCODE_cisvtrans.RData"), envir = .GlobalEnv)
|
9cdd64a0258f74c6ab6e40bc3b4e997e0dbb44cc | 2a48e87db094e9e8dfb9cb1797463b008792634b | /cachematrix.R | 328fa9099e700cd0d7a55c58225241c5bcb33c72 | [] | no_license | dtrounine/ProgrammingAssignment2 | 4560506165e678e4c7db02e5abd9b97538d683bf | 1893040b69a456cda64ed9a9e2b17067f75d3d91 | refs/heads/master | 2020-04-08T16:19:11.613501 | 2016-02-21T22:36:19 | 2016-02-21T22:36:19 | 52,230,356 | 0 | 0 | null | 2016-02-21T22:02:20 | 2016-02-21T22:02:20 | null | UTF-8 | R | false | false | 1,579 | r | cachematrix.R | ## makeCacheMatrix and cacheSolve functions are used in conjunction
## when you need to find the inverse of a matrix and cache the result
## so that you can recall the result without calculating it again.
##
## Usage:
##
## First create a special 'matrix' from usual matrix using makeCacheMatrix function:
##
## cached_inverse <- makeCacheMatrix(my_matrix)
##
## Then get the inverse matrix by calling cacheSolve function providing the special
## matrix variable as argument:
##
## cacheSolve(cached_inverse)
##
## This will output the inverse of original matrix. If you make the same call again,
## it will not make the calculation, but return the cached result of first call.
## Creates special 'matrix' for caching the result
makeCacheMatrix <- function(x = matrix()) {
inverse <- NULL
set <- function(y) {
x <<- y
inverse <<- NULL
}
get <- function() x
set_inverse <- function(inv) inverse <<- inv
get_inverse <- function() inverse
list(set = set, get = get,
set_inverse = set_inverse,
get_inverse = get_inverse)
}
## Given a special 'matrix' created by makeCacheMatrix function,
## returns the inverse matrix which is calculated by solve function.
## If called again with same argument, it doesn't call solve functions
## but returns previously cached result
cacheSolve <- function(x, ...) {
inv <- x$get_inverse()
if(!is.null(inv)) {
message("getting cached data")
return(inv)
}
data <- x$get()
inv <- solve(data, ...)
x$set_inverse(inv)
inv
}
|
5e071fb238909fae8c71db9e0a3157d4d530fbc8 | 4d100f4d85c618640fb14d7a63e65f0b60e181c7 | /R/utility.R | 3efe9f94476ce1d79a5ed3b1837fd8ea144a3e6b | [] | no_license | THLfi/thlGraphs | a30d5e83be92bfc8bbdb9784259fd66975824bc0 | ade48bb0f1cf3f61d33a09b81539004fc1f7f01b | refs/heads/master | 2022-05-26T04:09:53.206753 | 2022-04-14T13:11:55 | 2022-04-14T13:11:55 | 205,375,117 | 1 | 2 | null | 2019-09-06T07:10:29 | 2019-08-30T12:10:24 | R | UTF-8 | R | false | false | 441 | r | utility.R |
#' Get path to installed resources
resourcePath <- function() {
path <- system.file('resources/', package = 'thlGraphs')
if(!grepl("/$", path))
path <- paste0(path, "/")
if(.Platform$OS.type == "windows") {
path <- shortPathName(path)
path <- gsub("\\\\", "/", path)
}
path
}
#' Get path to default logo in installed resources path
logopath <- function() {
file.path(resourcePath(), "img/THLDEFAULTLOGO.png")
}
|
54049386f20ef36e4eeadd45d55523b7d654064f | 49847a92812e9c56d4024530bfb8dbb570346082 | /bike-sharing-plots.R | 2b77d3158bdbd22597afdc2782ebeb776e7eadc4 | [
"MIT"
] | permissive | Love-you-data/bike-sharing-R | 456f8027650ce7265256af038507b450b9de299e | 76a3494351a71bb8227a4ec0042034f612b09321 | refs/heads/master | 2022-03-24T19:19:16.053923 | 2019-12-28T22:40:55 | 2019-12-28T22:40:55 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 18,373 | r | bike-sharing-plots.R | # -- Plots ---------------
# Kevin F Cullen
# Run `bike-sharing-setup.R` first.
# That's where the library() calls and CSV reads live.
source('bike-sharing-setup.R')
theme_set(theme_light())
# All the vars for plotting. ----------------
# Using train == 1 because we need `count`
bikeplot.df <- subset(bikeall.df, train == 1)
# Shave off the highest outliers in count
bikeplot.df$count_shaved <- bikeplot.df$count
bikeplot.df$count_shaved[bikeplot.df$count_shaved >
quantile(bikeplot.df$count_shaved, c(.90))] <- quantile(bikeplot.df$count_shaved, c(.90))
# Declare some variables
boxplot.binary.colors <- c("#E69F00", "#56B4E9")
seasons <- c("Spring", "Summer", "Fall", "Winter")
days.of.week <- c("Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat")
workingday.colors <- c("blue3", "darkorange2") # color per workday/not
holiday.colors <- c("0" = "blue", "1" = "red")
weather.colors <- c("blue", "orange", "red", "black") # color per level of weather
# - weather: (categorical)
# - 1: Clear, Few clouds, Partly cloudy, Partly cloudy
# - 2: Mist + Cloudy, Mist + Broken clouds, Mist + Few clouds, Mist
# - 3: Light Snow, Light Rain + Thunderstorm + Scattered clouds, Light Rain + Scattered clouds
# - 4: Heavy Rain + Ice Pallets + Thunderstorm + Mist, Snow + Fog
# Leadoff Graphic: Histogram of count ----------------------
count.histo <- ggplot() + geom_histogram(data = bikeplot.df, aes(x = count), color = "grey33",
fill = "olivedrab",alpha = 0.3, binwidth = 25) +
# scale_y_log10(breaks = c(5, 25, 100, 250, 500, 1000, 1500)) +
expand_limits(x = 1000, y = 1500) +
labs(x = "count", y = "Frequency", title = "Histogram: count")
peak.histo <- ggplot(data = bikeplot.df[bikeplot.df$peak == TRUE,]) +
geom_histogram(aes(x = count), color = "grey33",
fill = "darkgoldenrod3", alpha = 0.3, binwidth = 25) +
expand_limits(x = 1000, y = 1500) +
labs(y = "Frequency", title = "peak hours")
offpeak.histo <- ggplot() + geom_histogram(data = subset(bikeplot.df, hour < 9 | hour > 20), aes(x = count), color = "grey33",
fill = "royalblue3",alpha = 0.3, binwidth = 25) +
expand_limits(x = 1000, y = 1500) +
labs(y = "Frequency", title = "offpeak hours")
grid.arrange(count.histo, peak.histo, offpeak.histo, ncol = 3)
rm(count.histo, peak.histo, offpeak.histo)
# hour & dayofweek ----------------------------------
# Bar chart : count by dayofweek ---------
dayofweek.bar.data <- group_by(bikeplot.df, dayofweek)
dayofweek.bar.data <- summarise(dayofweek.bar.data, mean = mean(count), median = median(count))
dayofweek.bar.data <- melt(dayofweek.bar.data, id.vars = 'dayofweek')
dayofweek.bar <- ggplot() +
geom_bar(data = dayofweek.bar.data,
aes(x = dayofweek, y = value, fill = variable),
color = "grey33",
stat = 'identity', position = 'dodge', alpha = 0.7) +
scale_x_discrete(labels = days.of.week) +
theme(legend.position="top") +
labs(title = "count by dayofweek", y = "count")
# coord_flip()
dayofweek.bar
rm(dayofweek.bar.data)
# Tile plot : Count: hour x day -------------------
hour.tile.data <- group_by(bikeplot.df, hour, dayofweek)
hour.tile.data <- summarise(hour.tile.data, mean_count = mean(count))
hour.day.tile <- ggplot(hour.tile.data, aes(hour, dayofweek)) +
geom_tile(aes(fill = mean_count)) +
# geom_text(aes(label = round(mean_count, 0))) +
scale_fill_gradient(low = "white", high = "red") +
scale_y_discrete(labels = days.of.week, breaks = c(7, 6, 5, 4, 3, 2, 1)) +
scale_x_continuous(breaks = seq(0, 24, 3)) +
theme(legend.position="top") +
labs(title = "Heatmap: mean_count")
hour.day.tile
rm(hour.tile.data)
# JITTERy Scatter Plot : (color: workingday)
hour.scatter2 <- ggplot(bikeplot.df, aes(hour, count, color = workingday)) +
geom_smooth(se = FALSE) +
geom_point(aes(x = jitter(hour, 2), y = count),
pch = 16, alpha = 0.1) +
geom_hline(yintercept = 145) +
scale_y_sqrt() +
scale_color_manual(values = workingday.colors) +
theme(legend.position="top") +
labs(title = "count by hour | color: workingday")
# Place on grid.
grid.arrange(dayofweek.bar, hour.day.tile, hour.scatter2, ncol = 3) # , widths = c(1, 2, 2))
# dayofweek ---------------------------
# JITTERy Scatter Plot: count by dayofweek (Holidays in red)
ggplot(bikeplot.df) + geom_point(aes(x = jitter(as.numeric(dayofweek), 2), y = count, colour = bikeplot.df$holiday),
pch = 16, alpha = 0.3) +
scale_color_manual(values = holiday.colors) +
labs(title = "count by Day of Week (with Holidays in red)",
x = "Day of Week (1-7 | Sun - Sat)",
y = "count",
color = "Holiday?")
# ------- holiday & is_daylight BOX PLOT --------------
holiday_light.df <- bikeplot.df[c("holiday", "is_daylight", "count")]
holiday_light.df <- melt(holiday_light.df, measure.vars = c("holiday", "is_daylight"))
ggplot(holiday_light.df, aes(x = variable, y = count, fill = value)) +
geom_boxplot() +
scale_fill_manual(values = boxplot.binary.colors) +
labs(title = "holiday and is_daylight")
rm(holiday_light.df)
# --------- WEATHER AND SEASON ---------
## side-by-side boxplots
weather.box <- ggplot(bikeplot.df) +
geom_boxplot(aes(x = weather, y = count), pch = 20, color = "grey33", fill = "blue", alpha = 0.4) +
labs(x = "weather (1-4)", y = "count", title = "count by weather",
subtitle = "4 = Heavy Rain + Ice Pellets\n + Thunderstorm + Mist, Snow + Fog") +
theme_light(base_size = 14)
season.box <- ggplot(bikeplot.df) +
geom_boxplot(aes(x = season, y = count), pch = 20, color = "grey33", fill = "green", alpha = 0.4) +
scale_x_discrete(labels = seasons) +
labs(y = "count", title = "count by season") +
theme_light(base_size = 14)
weather.histo <- ggplot(bikeplot.df, (aes(x = weather))) +
geom_bar(color = "grey33", alpha = 0.5, fill = "blue") +
labs(title = "Histogram: weather") +
theme_light(base_size = 14)
# Place on grid.
grid.arrange(weather.box, weather.histo, season.box, ncol=3)
ggsave("plots/weather-season-boxplot-histo.png",
arrangeGrob(weather.box, weather.histo, season.box, ncol=3),
device = "png", units = "in", dpi = 72, scale = 1,
width = 16, height = 5)
# Temperature (atemp & temp) ----------------------------
# Count rarely low when temp is high
# - temp: (double) Celsius
# - atemp: (double) "feels like" in Celsius
temp.histo.data <- data.frame(atemp = as.factor(bikeplot.df$atemp),
temp = as.factor(bikeplot.df$temp))
ggplot(data = temp.histo.data, aes(x = temp)) +
geom_bar(alpha = 0.5, color = "grey33", fill = "red3") +
theme(axis.text.x = element_text(angle = 90, hjust = 1)) +
labs(title = "Histogram: temp")
# facet_grid: atemp vs count, grouped by workingday and peak/offpeak
ggplot(bikeplot.df, aes(atemp, count)) +
geom_point(aes(x = jitter(atemp, 2)), pch = 16, color = "orange", alpha = 0.2) +
geom_smooth(se = FALSE) +
facet_grid(workingday ~ peak, labeller = label_both) +
labs(title = 'atemp, peak, workingday')
# Scatter Plots: Count by Temperature with mean usage trendline
# atemp
atemp.scatter <- ggplot(bikeplot.df, aes(atemp, count)) +
geom_point(aes(x = jitter(atemp, 2)), pch = 16, color = "orange", alpha = 0.2) +
geom_smooth(se = FALSE) +
geom_line(data = setNames(aggregate(bikeplot.df$count, by = list(bikeplot.df$atemp), FUN = mean),
c("atemp", "meancount")),
aes(x = atemp, y = meancount)) +
labs(x = "atemp", y = "count", title = "count by atemp - 'feels like' ºC",
subtitle = "mean trendline/loess smoothing")
atemp.scatter
# temp
temp.scatter <- ggplot(bikeplot.df, aes(temp, count)) +
geom_point(aes(x = jitter(temp, 2)), pch = 16, color = "salmon", alpha = 0.2) +
geom_smooth(se = FALSE) +
geom_line(data = setNames(aggregate(bikeplot.df$count, by = list(bikeplot.df$temp), FUN = mean),
c("temp", "meancount")),
aes(x = temp, y = meancount)) +
labs(x = "temp", y = "meanount", title = "count by temp (ºC) - w/ mean trend")
# stat_smooth(color = "red", method = gam, formula = y ~ s(x)) +
# stat_smooth(color = "green", method = lm, formula = y ~ x)
temp.scatter
# mean temp by hour of day: Line Plot
temp.line <- ggplot(
data =
setNames(aggregate(bikeplot.df$temp, by = list(bikeplot.df$hour),
FUN = mean), c("hour", "meantemp")),
aes(x = hour, y = meantemp)) +
geom_line() + ylim(0, 41) +
labs(title = "temp varies little through day (~ 5ºC)", y = "mean temp", x = "hour (00-23)")
# Place on grid.
grid.arrange(atemp.scatter, temp.scatter, temp.line, ncol = 3)
# Peak & Offpeak...
mean.atemp.count <- setNames(aggregate(bikeplot.df$count,
by = list(bikeplot.df$atemp, bikeplot.df$peak),
FUN = mean),
c("atemp", "peak", "meancount"))
# Peak
atemp.peak.scatter <- ggplot(data = bikeplot.df[bikeplot.df$peak == TRUE,],
aes(x = atemp, y = count)) +
geom_point(aes(x = jitter(atemp, 2), color = is_daylight), pch = 16, alpha = 0.2) +
geom_line(data = mean.atemp.count[mean.atemp.count$peak == TRUE,],
aes(y = meancount), size = 1) +
geom_smooth(color = "coral1",
method = "gam", formula = y ~ s(x), se = FALSE) +
scale_color_manual(values = c("navy", "gold3")) +
theme(legend.position = "top") +
labs(x = "atemp", y = "count", title = "count vs. atemp @ peak (8 < hour < 21)",
subtitle = "method = 'gam', formula = y ~ s(x))")
atemp.peak.scatter
# Offpeak
atemp.offpeak.scatter <- ggplot(data = bikeplot.df[bikeplot.df$peak == FALSE,],
aes(x = atemp, y = count)) +
geom_point(aes(x = jitter(atemp, 2), color = is_daylight),
pch = 16, alpha = 0.2) +
geom_line(data = mean.atemp.count[mean.atemp.count$peak == FALSE,], aes(y = meancount), size = 1) +
geom_smooth(color = "coral1",
method = "gam", formula = y ~ s(x), se = FALSE) +
expand_limits(y = 1000) +
scale_color_manual(values = c("navy", "gold3")) +
theme(legend.position = "top") +
labs(x = "atemp", y = "count", title = "count vs. atemp @ offpeak (hour < 9 | hour > 20)",
subtitle = "method = 'gam', formula = y ~ s(x))")
atemp.offpeak.scatter
# Place on grid.
grid.arrange(atemp.peak.scatter, atemp.offpeak.scatter, ncol = 2)
rm(temp.histo.data, mean.atemp.count)
# --------- Humidity ----------------------------
# Count by Humidity : JITTERy Scatter Plot
# make trend line data first
mean.humidity.count <- setNames(aggregate(bikeplot.df$count,
by = list(bikeplot.df$humidity),
FUN = mean), c("humidity", "meancount"))
humidity.weather.scatter <- ggplot() +
# scatter plot
geom_point(data = bikeplot.df, aes(x = jitter(humidity, 2), y = count, colour = bikeplot.df$weather),
pch = 16, alpha = 0.2) +
# scale_y_log10(breaks = c(5, 25, 100, 250, 500, 1000, 1500)) +
scale_y_sqrt() +
scale_color_manual(values=weather.colors) +
theme(legend.position="top") +
labs(x = "humidity", y = "count", color = "weather", title = "count by humidity | mean trendline/loess smoothing") +
geom_line(data = mean.humidity.count, aes(x = humidity, y = meancount), size = 1, color = "black") +
geom_smooth(data = mean.humidity.count, aes(x = humidity, y = meancount), color = "green2", se = FALSE)
# stat_smooth(data = bikeplot.df, aes(x = humidity, y = count), color = "red", method = gam, formula = y ~ s(x)) +
humidity.weather.scatter
# facet_grid: humidity vs count, grouped by workingday and peak/offpeak
ggplot(bikeplot.df, aes(humidity, count)) +
geom_point(aes(x = jitter(humidity, 2), colour = bikeplot.df$weather), pch = 16, alpha = 0.2) +
geom_smooth(se = FALSE) +
scale_color_manual(values = weather.colors) +
facet_grid(workingday ~ peak, labeller = label_both) +
labs(title = 'humidity, peak, workingday, weather', colour = "weather")
# Humidity by hour Scatterplot w/ mean trendline
data.for.plot <- setNames(aggregate(bikeplot.df$humidity,
by = list(bikeplot.df$hour),
FUN = mean), c("hour", "meanhumidity"))
humidity.by.hour <- ggplot() +
# geom_bar(data = mean.hourly.count, aes(x = hour, y = meancount / 5),
# colour = "grey", fill = "grey", alpha = 0.2,
# stat = "identity") +
geom_point(data = bikeplot.df, aes(x = jitter(bikeplot.df$hour, 2), y = humidity),
pch = 20, colour = "seagreen3", alpha = 0.3) +
geom_line(data = data.for.plot, aes(x = hour, y = meanhumidity), stat = "identity") +
labs(x = "Hour of Day (00-23)", y = "humidity",
title = "humidity by hour", subtitle = "mean trendline")
humidity.by.hour
# Place on grid.
grid.arrange(humidity.weather.scatter, humidity.by.hour, ncol = 2)
rm(mean.humidity.count, data.for.plot)
# ---- windspeed ----------------------------------
# Count by Wind Speed : Scatter Plot
# <<<<<<< WIND KILLS DEMAND (Or is it never windy?) <<<<<<<<<<<<<<<<<<<<<<<<
# only 30 distinct values, makes this goofy.
mean.windspeed.count <- setNames(aggregate(bikeplot.df$count,
by = list(bikeplot.df$windspeed), FUN = mean),
c("windspeed", "meancount"))
windspeed.scatter <- ggplot() +
geom_point(data = bikeplot.df,
aes(x = jitter(bikeplot.df$windspeed, 6), y = count),
pch = 20,
# colour = bikeplot.df$weather,
# colour = bikeplot.df$hour %/% 5 + 1,
colour = "green4",
alpha = 0.2) +
scale_y_sqrt() +
# scale_color_manual(values=weather.colors) +
labs(x = "windspeed (units not provided)", y = "count", title = "count by windspeed - with mean count trendline") +
geom_line(data = mean.windspeed.count, aes(x = windspeed, y = meancount), size = 1, color = "grey28") +
geom_smooth(data = mean.windspeed.count, aes(x = windspeed, y = meancount), color = "red3", se = FALSE)
rm(mean.windspeed.count)
# Histogram of windspeed speed
wind.histo <- ggplot(bikeplot.df) +
geom_histogram(aes(x = windspeed),
colour = "grey33", fill = "green4",
alpha = 0.3, binwidth = 1) +
labs(x = "windspeed (unknown units)", y = "Frequency", title = "Histogram: windspeed")
# Place on grid.
grid.arrange(wind.histo, windspeed.scatter, ncol=2)
# ---- CONGRESS IN SESSION... LOWER DEMAND ??? -------------
## side-by-side boxplots
congress.df <- bikeplot.df[c("house", "senate", "congress_both", "count")]
congress.df <- melt(congress.df, measure.vars = c("house", "senate", "congress_both"))
ggplot(congress.df, aes(x = variable, y = count, fill = value)) +
geom_boxplot(color = "grey33") +
scale_fill_manual(values = boxplot.binary.colors) +
labs(title = "Congress (house or senate) in session")
rm(congress.df)
# --------- GAME ON? ... INCREASED DEMAND !!!!! -------------
## side-by-side boxplots
# tried to get: variable, value, count (as in... nationals, 1, 250)
# but I got... count, variable(event), value (0-1)
sports.df <- bikeplot.df[c("count", "capitals", "nationals", "united", "wizards", "sporting_event")]
sports.df <- melt(sports.df, measure.vars = c("capitals", "nationals", "united", "wizards", "sporting_event"))
ggplot(sports.df, aes(x = variable, y = count, fill = value)) +
geom_boxplot(color = "grey33") +
scale_fill_manual(values = boxplot.binary.colors) +
# theme(legend.position = top) +
labs(title = "Pro sports events & combined 'sporting_event'")
rm(sports.df)
## Boxplot: sporting_event
sportinghours.any.box <- ggplot(bikeplot.df,
aes(x = sporting_event, y = count, fill = sporting_event)) +
geom_boxplot() +
scale_fill_manual(values = boxplot.binary.colors) +
theme(legend.position = "none") +
labs(title = "sporting_event: all hours")
# Boxplot: sporting_event only during comparable times of day
sportinghours.df <- subset(bikeplot.df, (hour > 17) | (hour > 11 & hour < 16))
sportinhours.comp.box <- ggplot(sportinghours.df,
aes(x = sporting_event, y = count, fill = sporting_event)) +
geom_boxplot() +
#theme_minimal() +
theme(legend.position = "none") +
scale_fill_manual(values = boxplot.binary.colors) +
labs(title = "sporting_event: noon-15:59 or after 17:59")
grid.arrange(sportinghours.any.box, sportinhours.comp.box, ncol = 2)
# --------- UNIVERSITIES IN SESSION... LOWER DEMAND ??? ---------
unis.df <- bikeplot.df[c("count", "cua_session", "au_session", "howard_session", "session_any")]
unis.df <- melt(unis.df, measure.vars = c("cua_session", "au_session", "howard_session", "session_any"))
ggplot(unis.df, aes(x = variable, y = count, fill = value)) +
geom_boxplot(color = "grey33") +
scale_fill_manual(values = boxplot.binary.colors) +
labs(title = "Universities in session?")
rm(unis.df)
# Convert session_count to factor to avoid issues with "continuous x aesthetic" from fill = session_count
bikeplot.df[,'session_count'] <- factor(bikeplot.df[,'session_count'])
session_count.box <- ggplot(bikeplot.df, aes(x = session_count, y = count, fill = session_count)) +
geom_boxplot(color = "grey33") +
theme(legend.position = "top") +
labs(title = "University session_count")
session_any.box.data <- bikeplot.df[c('season', 'session_any', 'count')]
session_any.box <- ggplot(session_any.box.data, aes(x = season, y = count, fill = session_any)) +
geom_boxplot(color = "grey33") +
scale_x_discrete(labels = seasons) +
scale_fill_manual(values = boxplot.binary.colors) +
theme(legend.position = "top") +
labs(title = "session_any is not a proxy for season")
# Use geom_bar instead of geom_histogram to avoid Error: StatBin requires a
# continuous x variable: the x variable is discrete. Perhaps you want stat="count"?
session_count.histo <- ggplot(data = bikeplot.df, (aes(x = session_count))) +
geom_bar(fill = "slateblue", color = "grey33", alpha = 0.7) +
labs(title = "Histogram: session_count")
# Place on grid.
grid.arrange(session_count.histo, session_count.box, session_any.box, ncol=3)
|
6834ca5fa8fe0e7c9feec32336d07bd214226cfa | 872259f0a278381964b2d9e90d814a0b2fdceb19 | /old_code/functions.R | b15a2dddc716a7c405d5c6b554e562724de01bf8 | [] | no_license | pascalxia/sd_orientation_github | c8471cb29d64f3906d584ead96abd995b011ebdb | f87b06c457b6b247a288765f445ed002b4a185cc | refs/heads/master | 2021-01-13T06:58:48.815866 | 2017-02-27T03:05:34 | 2017-02-27T03:05:34 | 81,380,398 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 8,929 | r | functions.R | AngularDiff = function(a, b){
diff = a-b
diff = diff %% 360
inds = which(diff>180)
diff[inds] = diff[inds] - 360
return(diff)
}
OrientDiff = function(a, b){
diff = a-b
diff = diff %% 180
inds = which(diff>90)
diff[inds] = diff[inds] - 180
return(diff)
}
GetOrientation = function(angle){
orientation = angle %% 180
inds = which(orientation>90)
orientation[inds] = orientation[inds] - 180
return(orientation)
}
MovingAverage = function(x, y, window, step, circle = FALSE){
dataTable = data.table(x, y)
setkey(dataTable, x)
xmin = min(x, na.rm = TRUE)
xmax = max(x, na.rm = TRUE)
xRange = xmax - xmin
if(circle){
headData = dataTable[x<=xmin+0.5*window,]
tailData = dataTable[x>=xmax-0.5*window,]
headData[, x:=x + xRange]
headData[, y:=y + 180]
tailData[, x:=x - xRange]
tailData[, y:=y - 180]
dataTable = rbind(dataTable, headData, tailData)
return(MovingAverage(dataTable$x, dataTable$y, window, step, FALSE))
}
else if(circle==FALSE){
n = floor((xRange - window)/step) + 1
ma = numeric(n)
for(i in 1:n){
start = xmin+(i-1)*step
ma[i] = dataTable[x>=start & x<=start+window, mean(y, na.rm = TRUE)]
}
maTable = data.table(x = seq(xmin+0.5*window, xmax-0.5*window, step),
ma)
return(maTable)
}
else{
print('circle should be a logical value')
}
}
kbToRa = function(k, b){
root = acos((sqrt(4*k^2+1)-1)/(2*k))
amplitude = b*exp(k*cos(root))*k*sin(root)
return(list(root = root, amplitude = amplitude))
}
raToKb = function(root, amplitude){
k = -cos(root)/(cos(root)^2-1)
b = amplitude/exp(k*cos(root))/k/sin(root)
return(list(k=k, b=b))
}
dvm = function(x, k, b){
return(-b*k*exp(k*cos(x))*sin(x))
}
dvmLoss = function(param, x, y){
k = param[1]
b = param[2]
yHat = dvm(x, k, b)
loss = sum((yHat - y)^2, na.rm = TRUE)
return(loss)
}
OrientBias = function(orient, param, stretch){
k0 = param[1]
b0 = param[2]
k90 = param[3]
b90 = param[4]
biasHat0 = dvm(stretch*orient, k0, b0)
biasHat90 = dvm(stretch*(orient-90), k90, b90)
biasHat = biasHat0 + biasHat90
return(biasHat)
}
OrientCorrectLoss = function(param, orient, bias, stretch, norm="L2"){
biasHat = OrientBias(orient, param, stretch)
if(norm=="L2"){
loss = sum((biasHat - bias)^2, na.rm = TRUE)
} else if(norm=="L1"){
loss = sum(abs(biasHat - bias), na.rm = TRUE)
} else {
print("Unknown norm type")
}
return(loss)
}
#subject analysis-------------
CalCor = function(perm=NULL, data){
if(!is.null(perm)){
permResponse = data$response[perm]
}
else{
permResponse = data$response
}
permError = OrientDiff(permResponse, data$stimulus)
permResponse = data$stimulus + permError
correlation = cor(data$stimulus,
permResponse,
use='na.or.complete')
return(correlation)
}
CalMeanError = function(perm=NULL, data){
if(!is.null(perm)){
permResponse = data$response[perm]
}
else{
permResponse = data$response
}
permError = OrientDiff(permResponse, data$stimulus)
return(mean(abs(permError), na.rm = TRUE))
}
CalStickyFBDiff = function(perm=NULL, data){
if(!is.null(perm)){
permForward = data$forward[perm]
}
else{
permForward = data$forward
}
permDiff = mean(data$sticky[permForward]) - mean(data$sticky[!permForward])
return(permDiff)
}
#permutation test-----------
CalP = function(testStat, simus, tail = 'two'){
meanSimu = mean(simus)
if(tail=='two'){
p = (sum(abs(simus-meanSimu)>=abs(testStat-meanSimu))+1)/(nPerm+1)
}
else if(tail=='one'){
if(testStat-meanSimu>0){
p = (sum(simus-meanSimu>=testStat-meanSimu)+1)/(nPerm+1)
} else{
p = (sum(simus-meanSimu<=testStat-meanSimu)+1)/(nPerm+1)
}
}
else if(tail=='upper'){
p = (sum(simus-meanSimu>=testStat-meanSimu)+1)/(nPerm+1)
}
else if(tail=='lower'){
p = (sum(simus-meanSimu<=testStat-meanSimu)+1)/(nPerm+1)
}
return(p)
}
PermutationTest = function(data, nPerm, Cal,
strata = NULL, tail = 'two',
returnSimus = FALSE){
testStat = Cal(data = data)
#nData = length(x)
simus = rep(0, nPerm)
for(i in 1:nPerm){
if(!is.null(strata)){
#determine the permutation
perm = data[, row[sample.int(.N)], by = strata]$V1
#calculate the statistic
simus[i] = Cal(perm, data)
} else{
#determine the permutation
perm = data[, row[sample.int(.N)]]
#calculate the statistic
simus[i] = Cal(perm, data)
}
}
p = CalP(testStat, simus, tail)
if(returnSimus)
result = list(testStat=testStat, p=p, simus=simus)
else
result = list(testStat=testStat, p=p)
return(result)
}
#parameter analysis----------
SdAnalysis = function(trial, errorThresh, intervalThresh){
res = list(regularSd = Inf, intercept = Inf,
direction1 = Inf, direction1ForwardTrue = Inf,
regularSdP = Inf, interceptP = Inf,
direction1P = Inf, direction1ForwardTrueP = Inf)
trial[, qualified:=FALSE]
trial[abs(error)<=errorThresh & interval <= intervalThresh,
qualified:=TRUE]
model = lm(error~direction,
trial[surprise==FALSE&start==FALSE&qualified==TRUE,])
res$regularSd = model$coefficients[2]
res$regularSdP = summary(model)$coefficients[2,4]
model = lm(error2~direction*forward,
trial[surprise==TRUE&qualified==TRUE,])
res$intercept = model$coefficients[1]
res$direction1 = model$coefficients[2]
res$direction1ForwardTrue = model$coefficients[4]
res$interceptP = summary(model)$coefficients[1,4]
res$direction1P = summary(model)$coefficients[2,4]
res$direction1ForwardTrueP = summary(model)$coefficients[4,4]
return(res)
}
# Multiple plot function
#
# ggplot objects can be passed in ..., or to plotlist (as a list of ggplot objects)
# - cols: Number of columns in layout
# - layout: A matrix specifying the layout. If present, 'cols' is ignored.
#
# If the layout is something like matrix(c(1,2,3,3), nrow=2, byrow=TRUE),
# then plot 1 will go in the upper left, 2 will go in the upper right, and
# 3 will go all the way across the bottom.
#
multiplot <- function(..., plotlist=NULL, file, cols=1, layout=NULL) {
library(grid)
# Make a list from the ... arguments and plotlist
plots <- c(list(...), plotlist)
numPlots = length(plots)
# If layout is NULL, then use 'cols' to determine layout
if (is.null(layout)) {
# Make the panel
# ncol: Number of columns of plots
# nrow: Number of rows needed, calculated from # of cols
layout <- matrix(seq(1, cols * ceiling(numPlots/cols)),
ncol = cols, nrow = ceiling(numPlots/cols))
}
if (numPlots==1) {
print(plots[[1]])
} else {
# Set up the page
grid.newpage()
pushViewport(viewport(layout = grid.layout(nrow(layout), ncol(layout))))
# Make each plot, in the correct location
for (i in 1:numPlots) {
# Get the i,j matrix positions of the regions that contain this subplot
matchidx <- as.data.frame(which(layout == i, arr.ind = TRUE))
print(plots[[i]], vp = viewport(layout.pos.row = matchidx$row,
layout.pos.col = matchidx$col))
}
}
}
FitBias = function(trial){
#fit dvm for the whole dataset-------------------------
domain = 180
stretch = 2*pi/domain
kb0Initial = raToKb(15*stretch, -4)
kb90Initial = raToKb(15*stretch, -4)
param0 = c(unlist(kb0Initial), unlist(kb90Initial))
res = optim(param0, OrientCorrectLoss,
orient = GetOrientation(trial$stimulus),
bias = trial$error,
stretch = stretch,
norm = "L2")
raFit0 = kbToRa(unname(res$par[1]), unname(res$par[2]))
raFit90 = kbToRa(unname(res$par[3]), unname(res$par[4]))
(root0 = raFit0$root/stretch)
(amplitude0 = -raFit0$amplitude)
(root90 = raFit90$root/stretch)
(amplitude90 = -raFit90$amplitude)
#fit for each subject----------
resAll = res
param0 = resAll$par
sbjBiasT = trial[, as.list(optim(param0, OrientCorrectLoss,
orient = GetOrientation(stimulus),
bias = error,
stretch = stretch,
norm = "L2")$par),
keyby = .(sbjId)]
cols = c('k0', 'b0', 'k90', 'b90')
setnames(sbjBiasT, 2:5, cols)
return(sbjBiasT)
}
FetchBias = function(trial, sbjBiasT){
domain = 180
stretch = 2*pi/domain
cols = c('k0', 'b0', 'k90', 'b90')
trial[, sbjIdTemp:=sbjId] #for disambiguity
trial[, bias:=OrientBias(GetOrientation(stimulus),
unlist(sbjBiasT[.(sbjIdTemp[1]), cols,
with=FALSE]),
stretch),
by = .(sbjId)]
} |
60e822e4a227d55bf349bdf3b414e10ff59fab73 | 3b936bab003c7d8ed6c29ab1959f2ca7f592e364 | /load.data.R | 593d22738c80e5279c1fa2356c974fc8f0e2b7d0 | [] | no_license | david-duverle/regularisation-path-following | 4da6181f5d8acf63b0e357c45dbd961cd1c6429f | d47e568724b5fda938c0a3fc2519947c858d5e56 | refs/heads/master | 2021-01-01T17:43:30.558309 | 2013-10-18T01:08:54 | 2013-10-18T01:08:54 | 10,049,135 | 2 | 0 | null | null | null | null | UTF-8 | R | false | false | 7,218 | r | load.data.R | source("itemset.coxpath.R");
# first load data using one of the example below, then run itemset.coxpath() or run.cv()
### Lymphoma:
load.data.lymphoma.2002 <- function(location = 'data/lymphoma.2002/', filter.na = F, average.gene.exp = T) {
survival = read.csv(paste(location, "survival.csv", sep=''), header = T)
gene.exps = t(as.matrix(read.csv(paste(location, "exprs.preprocessed.csv", sep=''), header=FALSE)))
platform = read.csv(paste(location, "geneName.csv", sep=''), header = T)
gene.symbols = cbind(platform[,1], apply(platform, 1, function(line) { elems = strsplit(as.character(line[2]), "|", fixed=T)[[1]]; if(elems[2] != '') elems[2] else elems[4]; }))
colnames(gene.symbols) = c("ID", "Gene.Symbol")
empty.sym = (gene.symbols[,'Gene.Symbol'] == '')
gene.symbols[empty.sym, 'Gene.Symbol'] = paste('pb', gene.symbols[empty.sym, 'ID'], sep='')
if(average.gene.exp) {
unique.gene.symbols = unique(gene.symbols[,'Gene.Symbol']);
unique.gene.exps = matrix(0, nrow(gene.exps), length(unique.gene.symbols))
for(i in 1:length(unique.gene.symbols)) {
lines = which(gene.symbols[, 'Gene.Symbol'] == unique.gene.symbols[i])
unique.gene.exps[, i] = apply(gene.exps[,lines, drop=F], 1, mean)
}
gene.exps = unique.gene.exps
gene.symbols = cbind(1:length(unique.gene.symbols), unique.gene.symbols)
colnames(gene.symbols) = c("ID", "Gene.Symbol")
}
gene.symbols = as.data.frame(gene.symbols)
data.lymphoma.2002 = data.frame(x = I(gene.exps), time = as.numeric(survival$survival), status = as.numeric(survival$event))
return (list(data.lymphoma.2002, gene.symbols))
}
### Breast cancer:
load.data.bc.2002 <- function(location = 'data/bc.2002/', filter.na = F, average.gene.exp = T) {
survival = read.csv(paste(location, "survival.csv", sep=''), header = T)
gene.exps = t(as.matrix(read.csv(paste(location, "exprs.preprocessed.csv", sep=''), header=FALSE)))
platform = read.csv(paste(location, "geneName.csv", sep=''), header = F, col.names = c("ID", "Gene.Symbol"))
gene.symbols = cbind(platform[,1], apply(platform, 1, function(line) { if(! is.na(line[2]) && line[2] != '') line[2] else line[1]; }))
colnames(gene.symbols) = c("ID", "Gene.Symbol")
if(average.gene.exp) {
unique.gene.symbols = unique(gene.symbols[,'Gene.Symbol']);
unique.gene.exps = matrix(0, nrow(gene.exps), length(unique.gene.symbols))
for(i in 1:length(unique.gene.symbols)) {
lines = which(gene.symbols[, 'Gene.Symbol'] == unique.gene.symbols[i])
unique.gene.exps[, i] = apply(gene.exps[,lines, drop=F], 1, mean)
}
gene.exps = unique.gene.exps
gene.symbols = cbind(1:length(unique.gene.symbols), unique.gene.symbols)
colnames(gene.symbols) = c("ID", "Gene.Symbol")
}
gene.symbols = as.data.frame(gene.symbols)
data.bc.2002 = data.frame(x = I(gene.exps), time = as.numeric(survival$TIMEsurvival), status = as.numeric(survival$EVENTdeath))
return(list(data.bc.2002, gene.symbols))
}
### Data05:
load.data.05 <- function(location = 'data/data05/', load.age.data = T, filter.na = F, average.gene.exp = F) {
gene.exps = t(as.matrix(read.csv(paste(location, "exprs.csv", sep=''), header=FALSE)))
platform = read.table(paste(location, "GPL1875.annot", sep=''), header=TRUE, skip=22, sep="\t", quote = "", comment.char="")
gene.symbols = platform[,c("GenBank.Accession", "Gene.symbol")]
colnames(gene.symbols) = c("ID", "Gene.Symbol")
gene.symbols$ID = as.character(gene.symbols$ID)
gene.symbols$ID[which(gene.symbols$ID == "")] = paste("?", which(gene.symbols$ID == ""), sep="")
# temp = as.character(gene.symbols$Gene.Symbol)
# temp[temp == ""] = paste("pb", gene.symbols$ID[temp == ""], sep="")
# gene.symbols$Gene.Symbol = temp
if(average.gene.exp) {
unique.gene.symbols = unique(gene.symbols[,'Gene.Symbol']);
unique.gene.exps = matrix(0, nrow(gene.exps), length(unique.gene.symbols))
for(i in 1:length(unique.gene.symbols)) {
lines = which(gene.symbols[, 'Gene.Symbol'] == unique.gene.symbols[i])
unique.gene.exps[, i] = apply(gene.exps[,lines, drop=F], 1, function(x) { mean(x, na.rm=T) })
}
gene.exps = unique.gene.exps
gene.symbols = cbind(1:length(unique.gene.symbols), as.data.frame(unique.gene.symbols))
colnames(gene.symbols) = c("ID", "Gene.Symbol")
}
gene.symbols = as.data.frame(gene.symbols)
data.05 = data.frame(x = I(gene.exps), time = as.numeric(read.csv(paste(location, "survival.csv", sep=''), header = FALSE)$V1), status = as.numeric(read.csv(paste(location, "event.csv", sep=''), header = FALSE)$V1))
if(load.age.data) {
data.05$extra.features = cbind(read.table(paste(location, "withOrWithoutMYCNAmplification.csv", sep='')) > 0, read.table(paste(location, "age.csv", sep='')) >= 120, read.table(paste(location, "age.csv", sep='')) < 12)
colnames(data.05$extra.features) = c("MYCN-amp", "over-10yo", "under-1yo")
}
else {
data.05$extra.features = cbind(read.table(paste(location, "withOrWithoutMYCNAmplification.csv", sep='')) > 0)
colnames(data.05$extra.features) = c("MYCN-amp")
}
if(filter.na) {
na.filtered = !probe.contains.na(data.05$x)
data.05.na.filtered = list(x = data.05$x[, na.filtered], time = data.05$time, status = data.05$status, extra.features = data.05$extra.features)
gene.symbols = gene.symbols[na.filtered, , drop = F]
return(list(data.05.na.filtered, gene.symbols))
}
else
return(list(data.05, gene.symbols))
}
# source('coxpath.R'); train.test.05 = training.testing.coxpath(data.05, max.steps=200, trace=T, deviation.threshold = 1.5);
### Data02:
load.data.02 <- function(location = 'data/data02/', average.gene.exp = T) {
gene.exps = t(as.matrix(read.csv(paste(location, "exprs.csv", sep=''), header=FALSE)))
platform = read.csv(paste(location, "geneName.csv", sep=''), header=F)
gene.symbols = cbind(paste("?", seq(length(platform$V1)), sep=""), as.character(platform$V1))
colnames(gene.symbols) = c("ID", "Gene.Symbol")
# gene.symbols[gene.symbols[,'Gene.Symbol'] == '', 'Gene.Symbol'] = gene.symbols[gene.symbols[,'Gene.Symbol'] == '', 'ID']
if(average.gene.exp) {
unique.gene.symbols = unique(gene.symbols[,'Gene.Symbol']);
unique.gene.exps = matrix(0, nrow(gene.exps), length(unique.gene.symbols))
for(i in 1:length(unique.gene.symbols)) {
lines = which(gene.symbols[, 'Gene.Symbol'] == unique.gene.symbols[i])
unique.gene.exps[, i] = apply(gene.exps[,lines, drop=F], 1, mean)
}
gene.exps = unique.gene.exps
gene.symbols = cbind(1:length(unique.gene.symbols), unique.gene.symbols)
colnames(gene.symbols) = c("ID", "Gene.Symbol")
}
gene.symbols = as.data.frame(gene.symbols)
data.02 = data.frame(x = I(gene.exps), time = as.numeric(read.csv(paste(location, "survivalB.csv", sep=''), header = FALSE)$V1), status = as.numeric(read.csv(paste(location, "eventB.csv", sep=''), header = FALSE)$V1))
data.02$extra.features = cbind(read.table(paste(location, "withOrWithoutMYCNAmplification.csv", sep='')) > 0, read.table(paste(location, "age.csv", sep='')) > 3650, read.table(paste(location, "age.csv", sep='')) <= 365)
colnames(data.02$extra.features) = c("MYCN-amp", "over-10yo", "under-1yo")
data.02$extra.features[is.na(data.02$extra.features)] = FALSE
return(list(data.02, gene.symbols))
}
|
3d60959ec7501bab0849112d09f8b66c22714a3a | dc3642ea21337063e725441e3a6a719aa9906484 | /DevInit/IATI/R/iati-geo-test.R | 38ae5f13cb661e044ed266990974a4691fc3da81 | [] | no_license | akmiller01/alexm-util | 9bbcf613384fe9eefd49e26b0c841819b6c0e1a5 | 440198b9811dcc62c3eb531db95abef8dbd2cbc7 | refs/heads/master | 2021-01-18T01:51:53.120742 | 2020-09-03T15:55:13 | 2020-09-03T15:55:13 | 23,363,946 | 0 | 7 | null | null | null | null | UTF-8 | R | false | false | 574 | r | iati-geo-test.R | list.of.packages <- c("sp","rgdal","leaflet","data.table","ggplot2","scales","rgeos","maptools","reshape2")
new.packages <- list.of.packages[!(list.of.packages %in% installed.packages()[,"Package"])]
if(length(new.packages)) install.packages(new.packages)
lapply(list.of.packages, require, character.only=T)
wd = "/home/alex/git/iati-geo/output/"
setwd(wd)
agg <- fread("iati_unfiltered_agg.csv")
v1 = subset(agg,location_coordinates_lat!="")
v2 = subset(agg,location_point_pos!="")
rm(agg)
coordinates(v1)=~location_coordinates_long+location_coordinates_lat
plot(v1)
v2 |
2c2b654c12be50cd73ec4fa790f6eea9316d075d | 79f5edebf760abb8ccc4e58f246586ad71f5eb6c | /man/add_attention_check.Rd | 99c6075785ed591eb39228af0adca3cbb7beedd2 | [
"MIT"
] | permissive | rwash/surveys | 3b09bd12633ed5322b35224cc79d90cc5b2443d7 | 4cded70f3aabd097f2f1e9ccb75a1c9d1639f40f | refs/heads/master | 2023-04-29T05:50:11.471493 | 2023-04-20T13:41:56 | 2023-04-20T13:41:56 | 31,321,921 | 1 | 0 | null | null | null | null | UTF-8 | R | false | true | 677 | rd | add_attention_check.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/question_types.R
\name{add_attention_check}
\alias{add_attention_check}
\title{Attention Check questions}
\usage{
add_attention_check(q_name, valid)
}
\arguments{
\item{q_name}{The column name of the attention check question}
\item{valid}{What value is to be considered valid}
}
\description{
Tells the question auto-detection system that a specific question is an attention check question
}
\details{
Attention check questions are questions whose goal is to determine whether the subject is
paying close attention to questions. It has exactly one right answer, and any other answer
is invalid.
}
|
05cb69be500fe50d4ff5b40c334b4a6d1fcf0aa5 | 2a7e77565c33e6b5d92ce6702b4a5fd96f80d7d0 | /fuzzedpackages/dexter/tests/testthat/test_data_selection.R | db84b12d3092b0e90a9e8e77ebc68a61324de4ce | [] | no_license | akhikolla/testpackages | 62ccaeed866e2194652b65e7360987b3b20df7e7 | 01259c3543febc89955ea5b79f3a08d3afe57e95 | refs/heads/master | 2023-02-18T03:50:28.288006 | 2021-01-18T13:23:32 | 2021-01-18T13:23:32 | 329,981,898 | 7 | 1 | null | null | null | null | UTF-8 | R | false | false | 10,492 | r | test_data_selection.R | context('test data selection')
library(dplyr)
library(DBI)
# to do: test what happens if integers or factors are used as booklet/perdon/item id's
expect_no_error = function(object, info=NULL) expect_error(object, regexp=NA, info=info)
# equivalent
expect_equal_respData = function(a,b, info='equal respData', ignore_booklet_levels = TRUE)
{
a_ = a
prep = function(rd)
{
if(ignore_booklet_levels)
{
rd$x$booklet_id = as.integer(rd$x$booklet_id)
rd$design$booklet_id = as.integer(rd$design$booklet_id)
}
rd$x$person_id = as.character(rd$x$person_id)
rd$design = as.data.frame(mutate_if(rd$design, is.factor,as.character))
rd$x = rd$x %>%
mutate_if(is.factor,as.character) %>%
arrange(person_id, booklet_id, item_id) %>%
as.data.frame()
rd
}
a = prep(a)
b = prep(b)
expect_equal(a$summarised, b$summarised, info=info)
expect_equal(a$design %>% arrange(booklet_id,item_id),
a$design %>% arrange(booklet_id,item_id),
info=info)
expect_true(setequal(colnames(a$x), colnames(b$x)), info=info)
expect_equal(a$x, b$x[,colnames(a$x)], info=info)
invisible(a_)
}
# to do: check no grouping etc
expect_valid_respData = function(respData, msg='respData')
{
expect_true(is.integer(respData$x$person_id) || is.factor(respData$x$person_id),
info = sprintf("%s - x$person_id is not a factor but '%s'",
info, typeof(respData$x$person_id)))
expect_true(is.factor(respData$x$booklet_id),
info = sprintf("%s - x$booklet_id is not a factor but '%s'",
info, typeof(respData$x$booklet_id)))
expect_true(is.factor(respData$design$booklet_id),
info = sprintf("%s - design$booklet_id is not a factor but '%s'",
info, typeof(respData$design$booklet_id)))
expect_true(is.factor(respData$design$item_id),
info = sprintf("%s - design$item_id is not a factor but '%s'",
info, typeof(respData$design$item_id)))
expect_true(is.integer(respData$x$booklet_score),
info = sprintf("%s - x$isumSscore is not an integer but '%s'",
info, typeof(respData$x$item_id)))
# to do: check factor levels
if(!respData$summarised)
{
expect_true(is.factor(respData$x$item_id),
info = sprintf("%s - x$item_id is not a factor but '%s'",
info, typeof(respData$x$item_id)))
expect_true(is.integer(respData$x$item_score),
info = sprintf("%s - x$item_score is not an integer but '%s'",
info, typeof(respData$x$item_id)))
expect_false(is.unsorted(as.integer(respData$person_id)), info=sprintf("%s - person_id is unsorted", info))
split(as.integer(respData$x$booklet_id), respData$x$person_id) %>%
lapply(is.unsorted) %>%
unlist() %>%
any() %>%
expect_false(info=sprintf("%s - (person_id, booklet_id) is unsorted", info))
respData$x %>%
group_by(person_id, booklet_id) %>%
mutate(booklet_score2 = sum(item_score)) %>%
ungroup() %>%
summarise(res = all(booklet_score == booklet_score2)) %>%
pull(res) %>%
expect_true(info=sprintf("%s - booklet_score incorrect", info))
}
invisible(respData)
}
test_that('merging works',
{
# a set connected over persons only
rsp = tibble(person_id = rep(rep(1:50,each=20),2),
booklet_id = rep(1:2, each=1000),
item_id = c(rep(1:20, 50),rep(21:40, 50)),
item_score=sample(0:3,2000,replace=TRUE))
# also make a database
rules = distinct(rsp, item_id, item_score) %>%
mutate(response=item_score)
db = start_new_project(rules, ':memory:')
add_response_data(db, rename(rsp, response=item_score))
get_resp_data(db) %>%
expect_valid_respData() %>%
expect_equal_respData(
expect_valid_respData(get_resp_data(rsp)))
expect_error({f=fit_enorm(db)},'not connected')
expect_error({f=fit_enorm(rsp)},'not connected')
# non booklet safe merge, should still not be connected
expect_error({f=fit_enorm(db, item_id!='3')},'not connected')
expect_error({f=fit_enorm(rsp, item_id!='3')},'not connected')
# merge over booklets (not fit because data is random)
a = get_resp_data(db,merge_within_person=TRUE) %>%
expect_valid_respData() %>%
expect_equal_respData(
expect_valid_respData(get_resp_data(rsp, merge_within_person=TRUE)))
expect_length(levels(a$design$booklet_id),1)
expect_equal(
rsp %>%
group_by(person_id) %>%
summarise(booklet_score=sum(item_score)) %>%
ungroup() %>%
inner_join(get_resp_data(rsp,merge_within_person=TRUE,summarised=TRUE)$x, by=c('person_id','booklet_score')) %>%
NROW(),
50)
close_project(db)
# a set that should not be mergable
rsp = tibble(person_id = rep(rep(1:50,each=20),2),
booklet_id = rep(1:2, each=1000),
item_id = c(rep(1:20, 50),rep(11:30, 50)),
item_score=sample(0:3,2000,replace=TRUE))
# also make a database
rules = distinct(rsp, item_id, item_score) %>%
mutate(response=item_score)
db = start_new_project(rules, ':memory:')
add_response_data(db, rename(rsp, response=item_score))
expect_no_error(get_resp_data(rsp, merge_within_person=FALSE))
expect_no_error(get_resp_data(db, merge_within_person=FALSE))
expect_error(get_resp_data(rsp, merge_within_person=TRUE),'more than once')
expect_error(get_resp_data(db, merge_within_person=TRUE),'more than once')
expect_error(get_resp_data(rsp, merge_within_person=TRUE,summarised=TRUE),'more than once')
expect_error(get_resp_data(db, merge_within_person=TRUE,summarised=TRUE),'more than once')
close_project(db)
})
# to also do: check parms and profiles
test_that('input data.frames survives', {
# do new project, guarantees nice ordering
db = start_new_project(verbAggrRules, ":memory:")
add_booklet(db, verbAggrData, "agg")
r = get_responses(db)
r2 = rlang::duplicate(r)
v=get_resp_data(r,summarised=TRUE)
v=get_resp_data(r,summarised=FALSE)
expect_identical(r,r2, label="get_resp_data should not mutilate input")
v=get_resp_data(r, summarised=TRUE, protect_x=FALSE)
expect(!all(r$item_score==r2$item_score), 'when protect_x is false we would like some input mutilation')
close_project(db)
})
test_that('get responses works correctly with predicates',
{
db = open_project('../verbAggression.db')
#two ways to do the same
r1 = get_responses(db, item_id %like% 'S1%')
r2 = get_responses(db, grepl('S1', item_id))
expect_true(dexter:::df_identical(r1, r2))
close_project(db)
})
test_that('sql translation',
{
trans = function(x, vars=NULL, variant='sqlite')
{
env = rlang::caller_env()
p = eval(substitute(quote(x)))
dexter:::translate_sql(dexter:::partial_eval(p, env=env, vars=vars),variant=variant)
}
a=3
expect_equal(trans(!!a==b, 'a'), '3 = "b"')
expect_equal(trans(local(a)==b, 'a'), '3 = "b"')
expect_equal(trans(a==b), '3 = "b"')
expect_equal(trans(a==b, 'a'), '"a" = "b"')
expect_equal(trans(a == paste(b,'c'), 'a','sqlite'), "\"a\" = \"b\"||' '||'c'")
expect_equal(trans(a == paste(b,'c'), 'a', 'ansi'), "\"a\" = CONCAT_WS(' ',\"b\",'c')")
# get
v = 'gender'
expect_equal(trans(get(v)=='bla','gender'),
'"gender" = \'bla\'')
# named and unnamed arguments
expect_equal(trans(b==substr(a,4,7),c('a','b')),
trans(b==substr(a,stop=7,4),c('a','b')))
#missing arguments
expect_error(trans(between(a,b),c('a','b')))
# named vector
b = c(blaat=1,geit=2)
expect_equal(trans(2 %in% b,'a','ansi'),'TRUE')
expect_equal(trans(a %in% b,'a','ansi'), '"a" in (1,2)')
# ranges
expect_equal(trans(a %in% b:10,c('a','b')), "CAST( \"a\" AS INTEGER) BETWEEN \"b\" AND 10")
#casting
expect_equal(trans(as.character(a),'a'), "CAST( \"a\" AS character )")
expect_equal(trans(as.character(a)), "'3'")
# indexing
a = list(x=5,y=6)
expect_equal(trans(x == a$x, 'x'),'"x" = 5')
expect_equal(trans(x == a[['x']], 'x'),'"x" = 5')
# combined c
expect_equal(trans(x %in% c(y,4,c(5,6))), trans(x %in% c(y,4,5,6)))
# substr
expect_equal(trans(quote(x == substr(d,5,6))), '"x" = substr( "d" , 5 , 2 )')
expect_equal(trans(quote(x == substr(d,5,y))), '"x" = substr( "d" , 5 , (1+("y")-(5)) )')
#unsure if we want to automatically unpack lists of length 1
#expect_equal(trans(x == a['x'], 'x'),'"x" = 5')
})
test_that('variable names cross sql',
{
# variable names are lowercase in sql and do not support special characters such as a dot
# We make no effort to support dots and such but we do make an effort to support case mismatch
# If a variable does not exist in the db and does not exist in the environment
# but it does exists in the db with another case, it should work.
db = start_new_project(verbAggrRules, ":memory:", person_properties=list(Gender='<NA>'))
add_booklet(db, verbAggrData, "agg")
expect_message({rsp = get_responses(db,Gender=='Male')},
'Gender.*gender')
rsp1 = get_responses(db,gender=='Male')
# force non sql evaluation by using grepl, use capital G->Gender
expect_message({rsp2 = get_responses(db, grepl('^male',Gender,ignore.case = TRUE))},
'Gender.*gender')
expect_identical(table(rsp$item_score), table(rsp1$item_score),
label='sql capital versus non capital var names, expect equal results. ')
expect_identical(table(rsp1$item_score), table(rsp2$item_score),
label='case mismatch sql non sql should not cause a difference, expect equal results. ')
# test if unknown names fail
a = 1
expect_error({get_responses(db,item_id==a | gndr=='Male')},
"'gndr' not found")
close_project(db)
})
|
ca6fb17fc15ffc95e53945cb4538b1257bf94b69 | fa5f321090b639cba2fe209e65fd86f91ab7c646 | /One Way Anova.R | 439936ddb8ff7b1294ccd73355aaff756dab728c | [] | no_license | Rothgargeert/Statistics | 18552d87c04f6ecaa8c5cb2a793008b1c36b88e5 | 97a9b667548eb3fbaebfa2774a67791dc7cd5f88 | refs/heads/master | 2022-12-22T11:39:29.503898 | 2020-09-27T21:54:26 | 2020-09-27T21:54:26 | 298,381,495 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 710 | r | One Way Anova.R | y1 = c(18.2, 20.1, 17.6, 16.8, 18.8, 19.7, 19.1)
y2 = c(17.4, 18.7, 19.1, 16.4, 15.9, 18.4, 17.7)
y3 = c(15.2, 18.8, 17.7, 16.5, 15.9, 17.1, 16.7)
scores = data.frame(y1, y2, y3)
scores
boxplot(scores)
scores=stack(scores)
names(scores)
scores
oneway.test(values ~ ind, data=scores, var.equal = T)
grades=c(18.2, 20.1, 17.6, 16.8, 18.8, 19.7, 19.1, 17.4,
18.7, 19.1, 16.4, 15.9, 18.4, 17.7, 15.2, 18.8, 17.7,
16.5, 15.9, 17.1, 16.7)
class=c(rep("A",7), rep("B",7), rep("C",7))
school = data.frame(grades, class)
plot(grades~class, data = school)
results = aov(grades ~ class, data = school)
summary(results)
pairwise.t.test(grades, class, p.adjust.method = "bonferroni")
|
a676f94f5af6888c954e887e208de6371490506c | 92cf9455c7a46a4a35d747bf7b42124a1d2054ee | /archive/predict_quality.r | 72d7ebfcd0f9d499afcd911e6e6b8b160318d14d | [] | no_license | JavierQC/spatcontrol | 551cb820f397dfcb0461d9fbf7306aeb32adf90a | a3e77845d355b2dee396623b976e114b9a89c96e | refs/heads/master | 2020-12-31T06:32:10.711253 | 2015-02-24T13:09:18 | 2015-02-24T13:09:18 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 17,201 | r | predict_quality.r | ## use estimates from visualize_sampler.r and other analyses before in full_sampler.r
## to evaluate the quality of the prediction
# in case reloading
library("LaplacesDemon")
library("binom")
# library("truncnorm")
# library("realtimeadapt")
source("morans_functions.r")
source("spam_complement.r")
source("DeltaSampling.r")
source("functions_intercept.r")
#### parameters
max.nb.simul<-100000
PercentHousesRemoved<-5 # % of houses removed
nbRepeat<-20 # nb of test of PercentHousesRemoved houses
randomRemoved<-FALSE # resample PercentHousesRemoved or avoid previously removed (then limit nbRepeat to 100/PercentHousesRemoved)
same.map<-FALSE
subsetFromNew<-FALSE
perBlockSample<-FALSE # Nota: imply randomRemoved if nbRepeat>1
if(perBlockSample){
randomRemoved<-TRUE
}
#### init the kernel_fit_space.r
## general before any simulation
if(!same.map){
## parameters
source("parameters_sampler.r")
GLOBALSETPARAMETERS<-FALSE
use.cofactors<-FALSE
# changes in the map to use:
period<-"nofall.2007"
# get general parameters from file estimated.txt
source("estimated.txt")
Ku<-est.Ku
Kv<-est.Kv
Kc<-1
K<-c(Ku,Kv,Kc);
f<-meanf
T<-meanT
# technique initialisation
source("pseudo_data_generation.r")
Q.est<-QfromfT(dist_mat,AS,SB,f,T);
est.mean.beta<-mean(est.beta)
est.detection<-rep(est.mean.beta,dimension)
use.autostop<-TRUE
adaptOK<-TRUE
}else{
source("estimated.txt")
Ku<-est.Ku
Kv<-est.Kv
Kc<-1
K<-c(Ku,Kv,Kc);
c.val<-est.c.val
f<-meanf
T<-meanT
Q.est<-QfromfT(dist_mat,AS,SB,f,T);
est.detection<-est.beta
}
source("prep_sampling.r")
#### initialisation before each simulation
## choice of the data to be analyzed
# # choice of only one block
# block<-2008
# sel<-which(db$block_num==block)
# choice of everything
# actual subsetting
if(subsetFromNew){
## prior map
## new map
newdata<-read.csv("knownSubset.csv")
newknowns<-newdata[which(newdata$opened == 1),]
db$X<-db$easting
db$Y<-db$northing
block_data<-subsetAround(db,newknowns$unicode,threshold)
}else{
sel<-(1:length(db$status))
block_data<-db[sel,]
}
## technical initialisation
QualityResult<-list()
known<-which(block_data$status<9)# choice only in known status
block_data$TrueStatus<-block_data$status # ref when choice n% of the houses to be set as unknown
nbHousesPerSlice<-ceiling(PercentHousesRemoved*length(known)/100)
nbRepeat<-min(ceiling(length(known)/nbHousesPerSlice),nbRepeat)
if(!randomRemoved){
TotalToGuess<-sample(known,min(round(nbRepeat*PercentHousesRemoved*length(known)/100),length(known)))
cat("Will remove",nbRepeat,"times",nbHousesPerSlice,"for a total guessed of",length(TotalToGuess),"among",length(known),"known\n")
}
## ploting the chosen data
par(mfrow=c(2,2))
block_visudata<-block_data$pos
block_visudata[block_data$insp==0]<-0.5
plot_reel(block_data$easting,block_data$northing,block_visudata,base=0,top=1)
# general initialisation according to the chosen dataset
# Nota: the variables don't really need to be reinitialized
dimension<-length(sel)
if(same.map){
w<-est.w[sel]
u<-est.u[sel]
}else{
w<-rep(0,dimension)
u<-rep(0,dimension)
}
bivect<-est.detection[sel]
y<-as.integer(rnorm(length(w),mean=w,sd=1)>0)
Q<-Q.est[sel,sel]
R <- makeRuv(dimension,Q,K);
cholR <- chol.spam(R,memory=list(nnzcolindices=300000));
cholQ<-chol.spam(Q)
if(use.cofactors){
c.comp<-drop(c.map[sel,]%*%c.val)
}
grid.stab<-seq(1,length(w),ceiling(length(w)/5))# values of the field tested for stability, keep 5 values
## calculus
est.yp.b.total<-rep(0,length(block_data$status))
est.w.b.total<-rep(0,length(block_data$status))
est.u.b.total<-rep(0,length(block_data$status))
est.y.b.total<-rep(0,length(block_data$status))
est.sd.w.total<-rep(0,length(block_data$status))
for(numRepeat in 1:nbRepeat){
if(randomRemoved){
if(perBlockSample){
keptAsKnown<-c()
for(idBlock in levels(as.factor(block_data$block_num))){
KnownInBlock<-intersect(which(block_data$block_num==idBlock),known)
nbKnownInBlock<-length(KnownInBlock)
if(nbKnownInBlock>0){
nbMaxToKeep<-max(round((100-PercentHousesRemoved)/100*nbKnownInBlock),1)
linesHouses<-sample(KnownInBlock,nbMaxToKeep)
keptAsKnown<-c(keptAsKnown,linesHouses)
cat("keep:",nbMaxToKeep,"out of",nbKnownInBlock,"in block",idBlock,"\n")
}
}
ToGuess<-not.in(keptAsKnown,dimension)
TotalToGuess<-ToGuess
cat("will keep as known",length(keptAsKnown),"out of",length(known),"known in",dimension,"initially, leaving",length(ToGuess),"to predict\n")
}else{
ToGuess<-sample(known,round(PercentHousesRemoved*length(known)/100))
}
}else{
lowBound<-1+(numRepeat-1)*nbHousesPerSlice
upBound<-min(lowBound+nbHousesPerSlice-1,length(TotalToGuess))
ToGuess<-TotalToGuess[lowBound:upBound]
cat("\ntesting",lowBound,"to",upBound,"of",length(TotalToGuess),"\n")
}
block_data$status<-block_data$TrueStatus
block_data$status[ToGuess]<-rep(9,length(ToGuess))
plot(block_data$easting,block_data$northing,col=(block_data$block_num%%5+1))
sel<-which(block_data$status!=9)
lines(block_data$easting[sel],block_data$northing[sel],col="blue",pch=13,type="p")
# technical inititialisation
starter<-1
zposb<-which(block_data$status==1)
znegb<-which(block_data$status==0)
zNAb<-which( block_data$status==9)
ItTestNum<- gibbsit(NULL,NminOnly=TRUE);
beginEstimate<-1
AdaptOK<-TRUE
nbsimul<-ItTestNum+beginEstimate
nbtraced<-2*(2+length(grid.stab))+4
spacer<-(2+length(grid.stab))
sampledb<-as.matrix(mat.or.vec(nbsimul+1,nbtraced));
sampledb[1,1]<-mean(u)
sampledb[1,2]<-sd(u)
sampledb[1,3:spacer]<-u[grid.stab]
sampledb[1,spacer+1]<-mean(w)
sampledb[1,spacer+2]<-sd(u)
sampledb[1,(spacer+3):(2*spacer)]<-w[grid.stab]
LLHu<-llh.ugivQ(dimension,u,Q,K[1])
sampledb[1,(2*spacer)+1]<-llh.ugivQ(dimension,u,Q,K[1])
sampledb[1,(2*spacer)+2]<-llh.ygivw(y,w);
sampledb[1,(2*spacer)+3]<-llh.zgivy(y,zposb,znegb,bivect);
sampledb[1,(2*spacer)+4]<-llh.zgivw(w,zposb,znegb,bivect);
# Rprof()
source("kernel_fit_space.r")
# Rprof(NULL)
## checking on one block
# # par(mfrow=c(2,1))
# # plot_reel(block_data$easting,block_data$northing,block_visudata,base=0,top=1)
# u_pred.b<-pnorm(est.u.b,0,1)
# plot_reel(db$easting[sel],db$northing[sel],pnorm(c.comp,0,1))
# text(block_data$easting,block_data$northing,labels=round(pnorm(c.comp,0,1),2),pos=4)
# plot_reel(db$easting[sel],db$northing[sel],u_pred.b)
# text(block_data$easting,block_data$northing,labels=round(u_pred.b,2),pos=4)
# ## general analysis
# par(mfrow=c(2,2))
# hist(est.w)
# hist(est.w[db$status==9],add=T,col=4)
#
# hist(pnorm(est.w))
# hist(pnorm(est.w[db$status==9]),add=T,col=4)
#
# hist(est.w.b)
# hist(est.w.b[db$status==9],add=T,col=4)
#
# hist(pnorm(est.w))
# hist(pnorm(est.w[db$status==9]),add=T,col=4)
# est.detection.b<-inspector[sel,]%*%est.beta
# plot.prob.to.observed((pnorm(est.w.b,0,1)*est.detection.b)[ToGuess],block_data$TrueStatus[ToGuess])
# plot(est.w.b[ToGuess],(est.w[sel])[ToGuess])
# abline(a=0,b=1)
# hist(est.yp.b[ToGuess])
# plot.prob.to.observed(u_pred*est.detection,visudata)
# plot.prob.to.observed(u_pred*est.detection,visudata)
# ## quality of prediction for to be guessed
# QualityResult[[numRepeat]]<-plot.prob.to.observed(est.yp.b[ToGuess],block_data$TrueStatus[ToGuess],xlim=c(0,1),ylim=c(0,1))
est.yp.b.total[ToGuess]<-est.yp.b[ToGuess]
est.yp.b.total[ToGuess]<-est.yp.b[ToGuess]
est.u.b.total[ToGuess]<-est.u.b[ToGuess]
est.w.b.total[ToGuess]<-est.w.b[ToGuess]
est.sd.w.total[ToGuess]<-est.sd.w[ToGuess]
est.y.b.total[ToGuess]<-est.y.b[ToGuess]
}
save.image("EndPredictLoop.img")
# dump(c("QualityResult","est.yp.b.total","est.u.b.total","est.w.b.total","est.y.b.total"),file="QualityResults.r")
dump(c("est.yp.b.total","est.u.b.total","est.w.b.total","est.y.b.total"),file="QualityResults.r")
#### analysis of the results
# ## transform the initial big list in digestible chunks
# nbRepeat<-length(QualityResult)
# QualityClasses<-QualityResult[[1]][[1]]
# asMatQualityResult<-mat.or.vec(nbRepeat,length(QualityResult[[1]][[1]]))
# asMatCountPos<-mat.or.vec(nbRepeat,length(QualityResult[[1]][[1]]))
# asMatCountTot<-mat.or.vec(nbRepeat,length(QualityResult[[1]][[1]]))
# for(numRepeat in 1:nbRepeat){
# asMatQualityResult[numRepeat,]<-QualityResult[[numRepeat]][[2]]
# asMatCountPos[numRepeat,]<-QualityResult[[numRepeat]][[3]]
# asMatCountTot[numRepeat,]<-QualityResult[[numRepeat]][[4]]
# }
# CountPos<-apply(asMatCountPos,2,sum)
# CountTot<-apply(asMatCountTot,2,sum)
# ObservedRatePos<-CountPos/CountTot
#
# ## plot
# # base plot
# par(mfrow=c(1,2))
# plot(QualityClasses,ObservedRatePos,xlim=c(0,1),ylim=c(0,1))
# abline(a=0,b=1)
# # confidence interval
# if(randomRemoved){
# # simply use the standard deviation
# sdQualityResult<-rep(0,length(QualityResult[[1]][[1]]))
# for(numPoint in 1:length(QualityResult[[1]][[1]])){
# sdQualityResult[numPoint]<-sd(asMatQualityResult[,numPoint],na.rm=TRUE)
# }
# errbar(QualityClasses,ObservedRatePos,ObservedRatePos+sdQualityResult,ObservedRatePos-sdQualityResult,add=TRUE)
# }else{
# # use a binomial confidence interval as we look at the confidence interval of
# # the probability underlying CountPos 1 among CountTot draws
# BinomAnalysis<-binom.confint(x=CountPos,n=CountTot,conf.level=0.95,methods="exact")
# errbar(QualityClasses,BinomAnalysis$mean,BinomAnalysis$upper,BinomAnalysis$lower,add=T)
# }
# making the same for the omitted data
# making the same for the omitted data, taking into account only the same block
#### new better method, using directly est.prob.b.pos (even if equivalent)
# estimated prob for each house
est.prob.b.pos<-est.yp.b.total*est.detection
nbClasses<-10
meanQualityByClass<-c()
meanProbInf<-c()
CountPos2<-c()
CountTot2<-c()
source("functions_intercept.r")
for(numClass in 1:nbClasses){
selClass<-intersect(which(est.prob.b.pos<numClass/nbClasses & est.prob.b.pos>= (numClass-1)/nbClasses),TotalToGuess)
meanQualityByClass[numClass]<-mean(est.prob.b.pos[selClass])
CountPos2[numClass]<-length(which(block_data$TrueStatus[selClass]==1))
TruelyObserved<-intersect(which(block_data$TrueStatus!=9),selClass)
CountTot2[numClass]<-length(TruelyObserved)
meanProbInf[numClass]<-mean(block_data$TrueStatus[TruelyObserved])
}
dev.new(width=3.5,height=3.5)
op <- par(mar = c(4,4,0.5,0.5))
plot(meanQualityByClass,meanProbInf,xlim=c(0,1),ylim=c(0,1),xlab="Predicted",ylab="Observed",asp=1,xaxs="i",yaxs="i")
abline(a=0,b=1)
BinomAnalysis<-binom.confint(x=CountPos2,n=CountTot2,conf.level=0.95,methods="exact")
errbar(meanQualityByClass,BinomAnalysis$mean,BinomAnalysis$upper,BinomAnalysis$lower,add=T)
dev.new(width=4.5,height=4.5)
op <- par(mar = c(4,4,1,1))
plot_reel(block_data$easting,block_data$northing,block_visudata,base=0,top=1)
# dev.print(device=pdf,"predict_quality.pdf")
# nice plotting of the predicted probability surface
zNoNA<-c(zneg,zpos)
plot_reel(block_data$easting,block_data$northing,est.prob.b.pos,base=0,top=0.7)
block_data_visu_used<-block_data$TrueStatus
block_data_visu_used[TotalToGuess]<- 8
sel<-block_data_visu_used==1 | block_data_visu_used==0
lines(block_data$easting[sel],block_data$northing[sel],col="blue",pch=13,type="p")
out<-grid.from.kernel(block_data$easting,block_data$northing,est.prob.b.pos,Kernel,T=T,f,steps=150,tr=threshold)
dist.weight<-matrix(as.vector(as.matrix(out$z)),nrow=length(out$xs))
par(mfrow=c(1,2))
image(x=out$xs,y=out$ys,z=dist.weight,asp=1,col=heat.colors(100),xlab="x (m)",ylab="y (m)")
pmat<-persp(out$xs,out$ys,dist.weight,asp=1,zlim=c(0,1),
col=make.col.persp(dist.weight,color.function=heat.colors),
phi=20,theta=-30,
border=NULL, # set cell borders: NULL->border ; NA-> no limits
xlab="x",ylab="y",zlab="influence")
#### plotting data/prediction/quality of prediction
block_visudata[block_data$TrueStatus==9]<-0
dev.new(width=12,height=4)
op <- par(mar = c(5,5,3,3),mfrow=c(1,3),cex.lab=1.5)
plot_reel(block_data$easting,block_data$northing,block_visudata,base=0,top=1,main="Infested households")
plot_reel(block_data$easting,block_data$northing,est.prob.b.pos,base=0,top=0.7,main="Prediction of infestation")
plot(meanQualityByClass,meanProbInf,xlim=c(0,1),ylim=c(0,1),xlab="Predicted observation prevalence",ylab="Observed infestation prevalence",asp=1,xaxs="i",yaxs="i",main="Quality of prediction")
abline(a=0,b=1)
errbar(meanQualityByClass,BinomAnalysis$mean,BinomAnalysis$upper,BinomAnalysis$lower,add=T)
printdev(device=pdf,"QualityPrediction.pdf")
## plot map of the predicted quality of the prediction
dev.new(width=12,height=4)
par(mfrow=c(1,3))
plot_reel(block_data$easting,block_data$northing,block_visudata,base=0,top=1,main="Infested households")
plot_reel(block_data$easting[TotalToGuess],block_data$northing[TotalToGuess],est.prob.b.pos[TotalToGuess],base=0,top=est.mean.beta,main="Prediction of infestation")
plot_reel(block_data$easting[TotalToGuess],block_data$northing[TotalToGuess],est.sd.w.total[TotalToGuess],base=-5,top=5,main="estimated quality of the prediction")
# Credible interval for the estimate probability of finding something:
est.prob.inf.fromw<-pnorm(0,est.w.b.total,1,lower.tail=FALSE)
est.prob.inf.fromw.min<-pnorm(0,est.w.b.total-est.sd.w.total,1,lower.tail=FALSE)
est.prob.inf.fromw.max<-pnorm(0,est.w.b.total+est.sd.w.total,1,lower.tail=FALSE)
plot(est.prob.inf.fromw[TotalToGuess]~est.yp.b.total[TotalToGuess])
lines(est.prob.inf.fromw.max[TotalToGuess]~est.yp.b.total[TotalToGuess],type="p")
lines(est.prob.inf.fromw.min[TotalToGuess]~est.yp.b.total[TotalToGuess],type="p")
abline(a=0,b=1)
# geometric mean of probability of observation
predicted.rates<-est.prob.b.pos[TotalToGuess]
observed<-block_visudata[TotalToGuess]
prob.obs<-rep(-1,length(observed))
prob.obs[observed==1]<-predicted.rates[observed==1]
prob.obs[observed==0]<-(1-predicted.rates[observed==0])
geom.mean.prob.obs<-exp(mean(log(prob.obs)))
# geometric mean only for guessed as good prediction
sd.prediction<-est.sd.w.total[TotalToGuess]
good.pred<-which(sd.prediction<quantile(sd.prediction,prob=0.95))
good.predicted.rates<-predicted.rates[good.pred]
good.observed<-observed[good.pred]
good.prob.obs<-rep(-1,length(good.observed))
good.prob.obs[good.observed==1]<-good.predicted.rates[good.observed==1]
good.prob.obs[good.observed==0]<-(1-good.predicted.rates[good.observed==0])
good.geom.mean.prob.obs<-exp(mean(log(good.prob.obs)))
cat("Prob of good prediction:",geom.mean.prob.obs,"only for small variances:",good.geom.mean.prob.obs,"\n")
# % of pos catched for small fractions of the population
cut.rate<-10/100
risky<-which(predicted.rates>cut.rate)
nb.pos.caught<-sum(observed[risky])
nb.pos.total<-sum(observed)
cat("by checking",length(risky)/length(predicted.rates)*100,"% of the houses, we would catch",nb.pos.caught,"out of",nb.pos.total,"positives (",100*nb.pos.caught/nb.pos.total,"%)\n")
# R2
rsquared<-1-sum((observed-predicted.rates)^2)/sum((observed-mean(observed))^2)
# MacFadden
prob.obs.null.model<-rep(-1,length(observed))
prob.obs.null.model[observed==1]<-mean(observed)
prob.obs.null.model[observed==0]<-(1-mean(observed))
MacFadden<-sum(log(prob.obs))/sum(log(prob.obs.null.model))
# Adjusted count
num.correct<-length(which(predicted.rates>0.5 & observed==1))+length(which(predicted.rates<=0.5 & observed==0))
num.most.frequent.outcome<-max(length(which(observed==1)),length(which(observed==0)))
adjusted.count<-(num.correct-num.most.frequent.outcome)/(length(observed)-num.most.frequent.outcome)
cat("R2:",rsquared,"; MacFadden:",MacFadden,"; adjusted.count:",adjusted.count,"\n")
# R2 for the predicted vs observed by class:
not.nan<-which(!is.nan(meanProbInf))
rsquared<-1-sum((meanQualityByClass[not.nan]-meanProbInf[not.nan])^2)/sum((meanProbInf[not.nan]-mean(meanProbInf[not.nan]))^2)
# #### quality of prediction by city block
# # get the mean of infestation by block in predicted households
# for(num.block in levels(as.factor(block_data$block_num))){
# predicted.in.this.block<-intersect(which(block_data$block.num),TotalToGuess)
# mean.this.block<-mean(block_data$TrueStatus[predicted.in.this.block])
#
# }
#### prediction of infestation post spraying
dev.new()
par(mfrow=c(2,3))
plot_reel(block_data$easting[zNA],block_data$northing[zNA],est.yp.b[zNA],base=0,top=1)
out<-grid.from.kernel(block_data$easting[zNA],block_data$northing[zNA],est.yp.b[zNA],Kernel,T=T,f,steps=150,tr=threshold)
dist.weight<-matrix(as.vector(as.matrix(out$z)),nrow=length(out$xs))
library(fields)
image.plot(x=out$xs,y=out$ys,z=dist.weight,asp=1,col=heat.colors(100),xlab="x (m)",ylab="y (m)")
## example of prediction at a house level
draw.infested.post.spray<-rbinom(length(est.yp.b[zNA]),1,prob=est.yp.b[zNA])
plot_reel(block_data$easting[zNA],block_data$northing[zNA],draw.infested.post.spray,base=0,top=1)
# =>1119 infested houses post spraying on Paucarpata: need to do that on cyclo2 and then integrate the
# probability to open the door we may go down quite a bit but still 1119 households + after spraying is huge
# nevertheless if the opendoor model is write, the probability to open would be almost 100% when infested so may go much much down
# Nota: in sprayed house, we can count that even if the guys didn't find something they sprayed so the population has been shot down
|
db776263cb6c2992ea979143118f39f258bc63da | 1d80ea56e9759f87ef9819ed92a76526691a5c3b | /man/eta_squared.Rd | 079cd62270d4435d369724059c0f93ec6e3f2d9a | [] | no_license | cran/effectsize | 5ab4be6e6b9c7f56d74667e52162c2ca65976516 | e8baef181cc221dae96f60b638ed49d116384041 | refs/heads/master | 2023-08-16T21:23:58.750452 | 2023-08-09T18:40:02 | 2023-08-09T19:30:51 | 236,590,396 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 14,330 | rd | eta_squared.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/eta_squared-main.R, R/eta_squared_posterior.R
\name{eta_squared}
\alias{eta_squared}
\alias{omega_squared}
\alias{epsilon_squared}
\alias{cohens_f}
\alias{cohens_f_squared}
\alias{eta_squared_posterior}
\title{\eqn{\eta^2} and Other Effect Size for ANOVA}
\usage{
eta_squared(
model,
partial = TRUE,
generalized = FALSE,
ci = 0.95,
alternative = "greater",
verbose = TRUE,
...
)
omega_squared(
model,
partial = TRUE,
ci = 0.95,
alternative = "greater",
verbose = TRUE,
...
)
epsilon_squared(
model,
partial = TRUE,
ci = 0.95,
alternative = "greater",
verbose = TRUE,
...
)
cohens_f(
model,
partial = TRUE,
generalized = FALSE,
squared = FALSE,
method = c("eta", "omega", "epsilon"),
model2 = NULL,
ci = 0.95,
alternative = "greater",
verbose = TRUE,
...
)
cohens_f_squared(
model,
partial = TRUE,
generalized = FALSE,
squared = TRUE,
method = c("eta", "omega", "epsilon"),
model2 = NULL,
ci = 0.95,
alternative = "greater",
verbose = TRUE,
...
)
eta_squared_posterior(
model,
partial = TRUE,
generalized = FALSE,
ss_function = stats::anova,
draws = 500,
verbose = TRUE,
...
)
}
\arguments{
\item{model}{An ANOVA table (or an ANOVA-like table, e.g., outputs from
\code{parameters::model_parameters}), or a statistical model for which such a
table can be extracted. See details.}
\item{partial}{If \code{TRUE}, return partial indices.}
\item{generalized}{A character vector of observed (non-manipulated) variables
to be used in the estimation of a generalized Eta Squared. Can also be
\code{TRUE}, in which case generalized Eta Squared is estimated assuming \emph{none}
of the variables are observed (all are manipulated). (For \code{afex_aov}
models, when \code{TRUE}, the observed variables are extracted automatically
from the fitted model, if they were provided during fitting.}
\item{ci}{Confidence Interval (CI) level}
\item{alternative}{a character string specifying the alternative hypothesis;
Controls the type of CI returned: \code{"greater"} (default) or \code{"less"}
(one-sided CI), or \code{"two.sided"} (default, two-sided CI). Partial matching
is allowed (e.g., \code{"g"}, \code{"l"}, \code{"two"}...). See \emph{One-Sided CIs} in
\link{effectsize_CIs}.}
\item{verbose}{Toggle warnings and messages on or off.}
\item{...}{Arguments passed to or from other methods.
\itemize{
\item Can be \code{include_intercept = TRUE} to include the effect size for the intercept (when it is included in the ANOVA table).
\item For Bayesian models, arguments passed to \code{ss_function}.
}}
\item{squared}{Return Cohen's \emph{f} or Cohen's \emph{f}-squared?}
\item{method}{What effect size should be used as the basis for Cohen's \emph{f}?}
\item{model2}{Optional second model for Cohen's f (/squared). If specified,
returns the effect size for R-squared-change between the two models.}
\item{ss_function}{For Bayesian models, the function used to extract
sum-of-squares. Uses \code{\link[=anova]{anova()}} by default, but can also be \code{car::Anova()}
for simple linear models.}
\item{draws}{For Bayesian models, an integer indicating the number of draws
from the posterior predictive distribution to return. Larger numbers take
longer to run, but provide estimates that are more stable.}
}
\value{
A data frame with the effect size(s) between 0-1 (\code{Eta2}, \code{Epsilon2},
\code{Omega2}, \code{Cohens_f} or \code{Cohens_f2}, possibly with the \code{partial} or
\code{generalized} suffix), and their CIs (\code{CI_low} and \code{CI_high}).
\cr\cr
For \code{eta_squared_posterior()}, a data frame containing the ppd of the Eta
squared for each fixed effect, which can then be passed to
\code{\link[bayestestR:describe_posterior]{bayestestR::describe_posterior()}} for summary stats.
A data frame containing the effect size values and their confidence
intervals.
}
\description{
Functions to compute effect size measures for ANOVAs, such as Eta-
(\eqn{\eta}), Omega- (\eqn{\omega}) and Epsilon- (\eqn{\epsilon}) squared,
and Cohen's f (or their partialled versions) for ANOVA tables. These indices
represent an estimate of how much variance in the response variables is
accounted for by the explanatory variable(s).
\cr\cr
When passing models, effect sizes are computed using the sums of squares
obtained from \code{anova(model)} which might not always be appropriate. See
details.
}
\details{
For \code{aov} (or \code{lm}), \code{aovlist} and \code{afex_aov} models, and for \code{anova} objects
that provide Sums-of-Squares, the effect sizes are computed directly using
Sums-of-Squares. (For \code{maov} (or \code{mlm}) models, effect sizes are computed for
each response separately.)
\cr\cr
For other ANOVA tables and models (converted to ANOVA-like tables via
\code{anova()} methods), effect sizes are approximated via test statistic
conversion of the omnibus \emph{F} statistic provided by the (see \code{\link[=F_to_eta2]{F_to_eta2()}}
for more details.)
\subsection{Type of Sums of Squares}{
When \code{model} is a statistical model, the sums of squares (or \emph{F} statistics)
used for the computation of the effect sizes are based on those returned by
\code{anova(model)}. Different models have different default output type. For
example, for \code{aov} and \code{aovlist} these are \emph{type-1} sums of squares, but for
\code{lmerMod} (and \code{lmerModLmerTest}) these are \emph{type-3} sums of squares. Make
sure these are the sums of squares you are interested in. You might want to
convert your model to an ANOVA(-like) table yourself and then pass the result
to \code{eta_squared()}. See examples below for use of \code{car::Anova()} and the
\code{afex} package.
\cr\cr
For type 3 sum of squares, it is generally recommended to fit models with
\emph{orthogonal factor weights} (e.g., \code{contr.sum}) and \emph{centered covariates},
for sensible results. See examples and the \code{afex} package.
}
\subsection{Un-Biased Estimate of Eta}{
Both \emph{\strong{Omega}} and \emph{\strong{Epsilon}} are unbiased estimators of the
population's \emph{\strong{Eta}}, which is especially important is small samples. But
which to choose?
\cr\cr
Though Omega is the more popular choice (Albers and Lakens, 2018), Epsilon is
analogous to adjusted R2 (Allen, 2017, p. 382), and has been found to be less
biased (Carroll & Nordholm, 1975).
}
\subsection{Cohen's f}{
Cohen's f can take on values between zero, when the population means are all
equal, and an indefinitely large number as standard deviation of means
increases relative to the average standard deviation within each group.
\cr\cr
When comparing two models in a sequential regression analysis, Cohen's f for
R-square change is the ratio between the increase in R-square
and the percent of unexplained variance.
\cr\cr
Cohen has suggested that the values of 0.10, 0.25, and 0.40 represent small,
medium, and large effect sizes, respectively.
}
\subsection{Eta Squared from Posterior Predictive Distribution}{
For Bayesian models (fit with \code{brms} or \code{rstanarm}),
\code{eta_squared_posterior()} simulates data from the posterior predictive
distribution (ppd) and for each simulation the Eta Squared is computed for
the model's fixed effects. This means that the returned values are the
population level effect size as implied by the posterior model (and not the
effect size in the sample data). See \code{\link[rstantools:posterior_predict]{rstantools::posterior_predict()}} for
more info.
}
}
\section{Confidence (Compatibility) Intervals (CIs)}{
Unless stated otherwise, confidence (compatibility) intervals (CIs) are
estimated using the noncentrality parameter method (also called the "pivot
method"). This method finds the noncentrality parameter ("\emph{ncp}") of a
noncentral \emph{t}, \emph{F}, or \eqn{\chi^2} distribution that places the observed
\emph{t}, \emph{F}, or \eqn{\chi^2} test statistic at the desired probability point of
the distribution. For example, if the observed \emph{t} statistic is 2.0, with 50
degrees of freedom, for which cumulative noncentral \emph{t} distribution is \emph{t} =
2.0 the .025 quantile (answer: the noncentral \emph{t} distribution with \emph{ncp} =
.04)? After estimating these confidence bounds on the \emph{ncp}, they are
converted into the effect size metric to obtain a confidence interval for the
effect size (Steiger, 2004).
\cr\cr
For additional details on estimation and troubleshooting, see \link{effectsize_CIs}.
}
\section{CIs and Significance Tests}{
"Confidence intervals on measures of effect size convey all the information
in a hypothesis test, and more." (Steiger, 2004). Confidence (compatibility)
intervals and p values are complementary summaries of parameter uncertainty
given the observed data. A dichotomous hypothesis test could be performed
with either a CI or a p value. The 100 (1 - \eqn{\alpha})\% confidence
interval contains all of the parameter values for which \emph{p} > \eqn{\alpha}
for the current data and model. For example, a 95\% confidence interval
contains all of the values for which p > .05.
\cr\cr
Note that a confidence interval including 0 \emph{does not} indicate that the null
(no effect) is true. Rather, it suggests that the observed data together with
the model and its assumptions combined do not provided clear evidence against
a parameter value of 0 (same as with any other value in the interval), with
the level of this evidence defined by the chosen \eqn{\alpha} level (Rafi &
Greenland, 2020; Schweder & Hjort, 2016; Xie & Singh, 2013). To infer no
effect, additional judgments about what parameter values are "close enough"
to 0 to be negligible are needed ("equivalence testing"; Bauer & Kiesser,
1996).
}
\section{Plotting with \code{see}}{
The \code{see} package contains relevant plotting functions. See the \href{https://easystats.github.io/see/articles/effectsize.html}{plotting vignette in the \code{see} package}.
}
\examples{
data(mtcars)
mtcars$am_f <- factor(mtcars$am)
mtcars$cyl_f <- factor(mtcars$cyl)
model <- aov(mpg ~ am_f * cyl_f, data = mtcars)
(eta2 <- eta_squared(model))
# More types:
eta_squared(model, partial = FALSE)
eta_squared(model, generalized = "cyl_f")
omega_squared(model)
epsilon_squared(model)
cohens_f(model)
model0 <- aov(mpg ~ am_f + cyl_f, data = mtcars) # no interaction
cohens_f_squared(model0, model2 = model)
## Interpretation of effect sizes
## ------------------------------
interpret_omega_squared(0.10, rules = "field2013")
interpret_eta_squared(0.10, rules = "cohen1992")
interpret_epsilon_squared(0.10, rules = "cohen1992")
interpret(eta2, rules = "cohen1992")
\dontshow{if (require("see") && interactive()) (if (getRversion() >= "3.4") withAutoprint else force)(\{ # examplesIf}
plot(eta2) # Requires the {see} package
\dontshow{\}) # examplesIf}
\dontshow{if (require("car")) (if (getRversion() >= "3.4") withAutoprint else force)(\{ # examplesIf}
# Recommended: Type-2 or -3 effect sizes + effects coding
# -------------------------------------------------------
contrasts(mtcars$am_f) <- contr.sum
contrasts(mtcars$cyl_f) <- contr.sum
model <- aov(mpg ~ am_f * cyl_f, data = mtcars)
model_anova <- car::Anova(model, type = 3)
epsilon_squared(model_anova)
\dontshow{\}) # examplesIf}
\dontshow{if (require("car") && require("afex")) (if (getRversion() >= "3.4") withAutoprint else force)(\{ # examplesIf}
# afex takes care of both type-3 effects and effects coding:
data(obk.long, package = "afex")
model <- afex::aov_car(value ~ gender + Error(id / (phase * hour)),
data = obk.long, observed = "gender"
)
omega_squared(model)
eta_squared(model, generalized = TRUE) # observed vars are pulled from the afex model.
\dontshow{\}) # examplesIf}
\dontshow{if (require("lmerTest") && require("lme4") && FALSE) (if (getRversion() >= "3.4") withAutoprint else force)(\{ # examplesIf}
## Approx. effect sizes for mixed models
## -------------------------------------
model <- lme4::lmer(mpg ~ am_f * cyl_f + (1 | vs), data = mtcars)
omega_squared(model)
\dontshow{\}) # examplesIf}
\dontshow{if (require(rstanarm) && require(bayestestR) && require(car) && interactive()) (if (getRversion() >= "3.4") withAutoprint else force)(\{ # examplesIf}
## Bayesian Models (PPD)
## ---------------------
fit_bayes <- rstanarm::stan_glm(
mpg ~ factor(cyl) * wt + qsec,
data = mtcars, family = gaussian(),
refresh = 0
)
es <- eta_squared_posterior(fit_bayes,
verbose = FALSE,
ss_function = car::Anova, type = 3
)
bayestestR::describe_posterior(es, test = NULL)
# compare to:
fit_freq <- lm(mpg ~ factor(cyl) * wt + qsec,
data = mtcars
)
aov_table <- car::Anova(fit_freq, type = 3)
eta_squared(aov_table)
\dontshow{\}) # examplesIf}
}
\references{
\itemize{
\item Albers, C., and Lakens, D. (2018). When power analyses based on pilot data
are biased: Inaccurate effect size estimators and follow-up bias. Journal of
experimental social psychology, 74, 187-195.
\item Allen, R. (2017). Statistics and Experimental Design for Psychologists: A
Model Comparison Approach. World Scientific Publishing Company.
\item Carroll, R. M., & Nordholm, L. A. (1975). Sampling Characteristics of
Kelley's epsilon and Hays' omega. Educational and Psychological Measurement,
35(3), 541-554.
\item Kelley, T. (1935) An unbiased correlation ratio measure. Proceedings of the
National Academy of Sciences. 21(9). 554-559.
\item Olejnik, S., & Algina, J. (2003). Generalized eta and omega squared
statistics: measures of effect size for some common research designs.
Psychological methods, 8(4), 434.
\item Steiger, J. H. (2004). Beyond the F test: Effect size confidence intervals
and tests of close fit in the analysis of variance and contrast analysis.
Psychological Methods, 9, 164-182.
}
}
\seealso{
\code{\link[=F_to_eta2]{F_to_eta2()}}
Other effect sizes for ANOVAs:
\code{\link{rank_epsilon_squared}()}
}
\concept{effect sizes for ANOVAs}
|
ecfd407e3a8209324ca878b980548f29cabca4ce | 792db68cb166df96f92b841bb0a86c4bd1d536d2 | /Practica1_Tipos_de_Datos.R | 845b79323fe24186e86ded64e7aea607de8a668a | [] | no_license | AllanZamb/ProcesamientoR | 74ac7524d8b8e9563827c3b5e8d7d25ae1c01d61 | 3a5e29b0c7fbc6a35ca71846e89caaa1b008dcd7 | refs/heads/main | 2023-03-06T22:20:01.317589 | 2021-02-06T04:48:10 | 2021-02-06T04:48:10 | 316,684,351 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 1,766 | r | Practica1_Tipos_de_Datos.R | ################################################################################
######################## PRACTICA 1 ############################################
################################################################################
#5 Tipos de variables (atómicas)
#Tipo de dato Char
variable_char <- "hola como estás?"
#Tipo de dato numérico
variable_numerica <- 5.56
#Tipo de dato entero
variable_entera <- 10L
#Tipo de dato logicos
variable_logica <- F
#Función para comprobar qué tipo de dato tenemos
class(variable_logica)
#Funcion llamada Vector
#Limitado a almacenar tipos de datos iguales
variables_caracteres <- c("a", "b", "c", "d","e")
variables_numericas <- c(5.36,3.25,9.98, 99.99999, 1)
variables_enteras <- c(10L, 15L, 50L, 25L, 89L)
variables_logicas <- c(T,F,T,FALSE,TRUE)
#Almacenar vectores y valores de diferentes datos
#Lista me puede almacenar diferentes estructuras (vectores) o tipos de datos diferentes
# [INT][CHAR][NUMERIC][TRUE][FALSE][VECTOR][GRAFICA][MAPA][DATAFRAMES]
variables_mixtas <- list(variables_caracteres,
variables_numericas,
variables_enteras,
variables_logicas,
"Hola", 5.36, 5L)
#Introducir a mi elemento lista
variables_mixtas[[2]][3]
#Comentarios
#Vectores
#Listas
#1 PASO Construir los vectores
#Nombre de tres Películas (char)
#Calificación de tres peliculas (numerico)
#Año de trees películas (int)
#Criticas de tres películas (char)
#Si te gustó o no a¿la película (T ,F)
#2 CONSTRUIR LA LISTA CON VECTORES
length(variables_mixtas)
class(variables_mixtas[2])
|
2f8f741673b461e2a705ed2e91d0fafa4d5f5923 | f00aaebc35b9ef286144e4196b6030e99bf90eac | /RepData_Assignment1.R | 0053bba1dc687d03d25aa20f741edc1ae209c320 | [] | no_license | bgourley001/RepData_PeerAssessment1 | 1fc9a631dad5d112636bb7c1356b808f2be2f0be | 2f8851702ffdbbabeafaf51c5b06314f818b5493 | refs/heads/master | 2021-01-17T22:39:31.082155 | 2015-09-20T14:15:07 | 2015-09-20T14:15:07 | 42,353,669 | 0 | 0 | null | 2015-09-12T10:46:24 | 2015-09-12T10:46:23 | null | UTF-8 | R | false | false | 5,742 | r | RepData_Assignment1.R | #RepData_Assignment1
#local working directory : I:/Coursera/ReproducibleResearch/Assignment1/RepData_PeerAssessment1
#load required libraries
library(dplyr)
library(ggplot2)
############################################################################################
# Loading and preprocessing the raw data
############################################################################################
#read in the raw data from the activity folder
activity.raw <- read.csv("activity/activity.csv")
############################################################################################
# Calculate Mean and median Values of the Total steps per day
############################################################################################
#convert date strings to R dates
activity.raw$date <- as.Date(activity.raw$date,"%Y-%m-%d")
#group activity.raw by date
activity.raw.byDate <- group_by(activity.raw,date)
#Calculate Total Steps per day, plot as a histogram and report the mean and median
sumSteps.raw <- summarize(activity.raw.byDate,
total.steps.perDay = sum(steps,na.rm = TRUE),
average.steps.perDay = mean(total.steps.perDay,na.rm = TRUE))
#Summary
s <- summary(sumSteps.raw$total.steps.perDay)
print(s)
#histogram
hist(sumSteps.raw$total.steps.perDay,main = "Total Steps per Day",xlab = "Total Steps")
abline(v = s[4],col="red",lwd=2)
abline(v = s[3],col="blue",lwd=2)
legend("topright",legend = c("Mean","Median"),lty=c(1,1),lwd=c(2,2),col=c("red","blue"))
#Mean and Median Values
print(paste("Mean of Total Steps Taken per Day = ",s[4],sep = ""))
print(paste("Median of Total Steps Taken per Day = ",s[3],sep = ""))
############################################################################################
#time-series plot of average steps per time interval
############################################################################################
#group by interval
activity.raw.byInterval <- group_by(activity.raw,interval)
intervalSteps <- summarize(activity.raw.byInterval,
avg.steps.perInterval = mean(steps,na.rm = TRUE))
#Highest no of steps
max.steps <- max(intervalSteps$avg.steps.perInterval)
#Interval containing the highest no of steps
max.interval.steps <- intervalSteps$interval[which.max(intervalSteps$avg.steps.perInterval)]
#Daily Activity Pattern Plot
plot(intervalSteps$interval,intervalSteps$avg.steps.perInterval,type = "l", lty = 1,
lwd = 1.5,col="blue",
main="Daily Activity Pattern",xlab = "Interval",ylab = "Average Steps")
#add vertical line indicating the interval with the highest number of steps
abline(v=max.interval.steps,lty=2,lwd=1.5,col="red")
legend("topright",legend = "Highest Steps",lty=2,lwd=1.5,col="red")
print(paste("Interval which contains the highest number of steps (",
round(max.steps,0),") is interval : ",
max.interval.steps,sep = ""))
##############################################################################################
#Impute Missing Values
##############################################################################################
#Replace missing step values with mean for the corresponding 5 minute interval
#copy the raw dataset
activity.clean <- activity.raw
#extract the NA's
steps.na <- subset(activity.raw,is.na(steps))
#Get number of missing days
missing.days <- length(unique(steps.na$date))
#replace NAs with those in intervalSteps replicating by no of missing days
steps.na$steps <- rep(intervalSteps$avg.steps.perInterval,missing.days)
#replace the NA's in the activity dataset
activity.clean$steps <- replace(activity.clean$steps,is.na(activity.clean$steps),steps.na$steps)
summary(activity.clean)
#group activity.clean by date
activity.clean.byDate <- group_by(activity.clean,date)
#Calculate Total Steps per day, plot as a histogram and report the mean and median
sumSteps.clean <- summarize(activity.clean.byDate,
total.steps.perDay = sum(steps,na.rm = TRUE),
average.steps.perDay = mean(total.steps.perDay,na.rm = TRUE))
#print summary
s <- summary(sumSteps.clean$total.steps.perDay)
print(s)
#histogram
hist(sumSteps.clean$total.steps.perDay,main = "Total Steps per Day",xlab = "Total Steps")
abline(v = s[4],col="red",lwd=2)
abline(v = s[3],col="blue",lwd=2)
legend("topright",legend = c("Mean","Median"),lty=c(1,1),lwd=c(2,2),col=c("red","blue"))
print(paste("Mean of Total Steps Taken per Day = ",s[4],sep = ""))
print(paste("Median of Total Steps Taken per Day = ",s[3],sep = ""))
#############################################################################################
# Weeday/Weekend activity Comparison
#############################################################################################
#add a column indicating day of the week and a column indicating weekday or weekend
activity.clean <- mutate(activity.clean,
day = weekdays(activity.clean$date),
period = ifelse(day == "Saturday" | day == "Sunday",
"weekend","weekday"))
#convert period to a factor variable
activity.clean$period <- as.factor(activity.clean$period)
#time-series panel plot of average steps per weekend/weekday time interval
#group by period and interval
activity.clean.byPeriod <- group_by(activity.clean,period,interval)
intervalSteps <- summarize(activity.clean.byPeriod,
avg.steps.perInterval = mean(steps,na.rm = TRUE))
ggplot(intervalSteps,aes(interval,avg.steps.perInterval)) +
geom_line(group = 1) +
facet_wrap(~ period,ncol = 1) +
ggtitle("Weekday/Weekend Activity Comparison") +
labs(x="Interval",y="Average Steps Per Interval")
|
a7a7d128c23857318fca2c67efd6fc62140911be | 2ad8752dbe37a0c086225179a928b6e6f8eac9f6 | /man/hsoutput_mature.Rd | a9734aa371120d3f9c9a12e0748aaee3ec9b7ec7 | [] | no_license | robschick/pcodData | 3a443291e13d5f44ea5fc05d318e36b99599e4c4 | 01464f458b3f002e38540a8fcff196940148f51f | refs/heads/master | 2020-05-19T19:00:54.942975 | 2014-09-26T14:52:41 | 2014-09-26T14:52:41 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 432 | rd | hsoutput_mature.Rd | % Generated by roxygen2 (4.0.1.99): do not edit by hand
\docType{data}
\name{hsoutput_mature}
\alias{hsoutput_mature}
\title{Making the parameter data frame for Adults}
\format{Data frames with columns
\describe{
\item{hsoutput_mature}{2 element list containing:}
}}
\source{
Interim PCOD Report
}
\usage{
hsoutput_mature
}
\description{
Making the parameter data frame for Adults
}
\examples{
hsoutput_mature
}
\keyword{datasets}
|
bfc3784ce3d20dff70b8eadd6c1ea7c75f8b0ef0 | 7d2e9fd024e7c2a7c3ab76f48a85289bc5c03bf7 | /man/choose_directory.Rd | eee6f36432ec2dbfa9803439b340b0cc7560f65d | [
"Zlib"
] | permissive | joshrud/phenoptrReports | a9a83cee7e0e88890e50c9ce21463f1205384aae | 011d22b60907891b50fe809d6b0d46f8dcaf0d5c | refs/heads/master | 2023-06-17T20:52:53.385375 | 2021-06-14T17:57:00 | 2021-06-14T17:57:00 | 383,900,401 | 0 | 0 | Zlib | 2021-07-07T19:07:05 | 2021-07-07T19:07:05 | null | UTF-8 | R | false | true | 495 | rd | choose_directory.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utilities.R
\name{choose_directory}
\alias{choose_directory}
\title{Cross-platform choose directory function.}
\usage{
choose_directory(caption = "Select folder", default = "")
}
\arguments{
\item{caption}{Caption for the choose directory dialog}
\item{default}{Starting directory}
}
\value{
The path to the selected directory, or NA if the user canceled.
}
\description{
Cross-platform choose directory function.
}
|
0ea7f53e0412357285e2d42057177b160754e7e1 | c680baed6d12f912c36566950c6a5fa17a050c8b | /man/plot.SpherEllipse.Rd | c219d6f41113787700d528c22049a30b74ee6f3f | [
"MIT"
] | permissive | butwhywhy/amsstats | 81b1e93547d1c3ba83a7c6c40cd55c91ceeb7c95 | 7dbfdeb130c6a4d9ca7477413867e790c761f4ed | refs/heads/master | 2021-01-25T08:59:57.880965 | 2014-11-25T23:20:20 | 2014-11-25T23:20:20 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 828 | rd | plot.SpherEllipse.Rd | % Generated by roxygen2 (4.0.1): do not edit by hand
\name{plot.SpherEllipse}
\alias{plot.SpherEllipse}
\title{Plot spherical ellipse}
\usage{
\method{plot}{SpherEllipse}(x, npoints = 200, add = FALSE,
line.col = "red", ...)
}
\arguments{
\item{x}{An SpherEllipse object}
\item{npoints}{The number of points to be plotted, default is 200}
\item{add}{Boolean indicating if the plot should be drawn over an existing
plot (\code{True}) or on a new one (\code{False}, default)}
\item{line.col}{The color of the ellipse, default is 'red'}
\item{...}{Other parameters to be passed to \code{\link{lambert.plot}}}
}
\description{
Plots an spherial ellipse in Lambert azimuthal (equal area)
projection. Only the north hemisphere
is represented, directions in the south hemisphere are reverted and
represented in the north pole.
}
|
3ad8d1594b9fddf8c995d55da9b5b8b09e479aa7 | bd44a15f665ac668840b56a9f5d381320ff1099c | /R scripts/Beta diversity distances.R | 4196206fc1a0398d208ce4705cd47571caef238a | [] | no_license | Lina-Maria/Cross-over-diet-study | 70458ec4b576209bea12b478a01163d50b8f742c | bef6af2ce458b41952333cd65881fef6a4eca1e5 | refs/heads/main | 2023-04-17T23:04:36.971299 | 2021-04-29T09:47:38 | 2021-04-29T09:47:38 | 362,365,003 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,593 | r | Beta diversity distances.R | ## Load packages
library("vegan")
physeq
#phyloseq-class experiment-level object
#otu_table() OTU Table: [ 361 taxa and 177 samples ]
#sample_data() Sample Data: [ 177 samples by 21 sample variables ]
#tax_table() Taxonomy Table: [ 361 taxa by 6 taxonomic ranks ]
# Correct order for plot
testphyseq <- sample_data(physeq)$Diet_Category
testphyseq <- factor(testphyseq, levels = c("High_Protein","High_Fibre","Hypoallergenic"))
sample_data(physeq)$Diet_Category <- testphyseq
physeqpruned <- prune_taxa(taxa_sums(physeq) >= 1, physeq)
physeqpruned
#phyloseq-class experiment-level object
#otu_table() OTU Table: [ 86 taxa and 177 samples ]
#sample_data() Sample Data: [ 177 samples by 21 sample variables ]
#tax_table() Taxonomy Table: [ 86 taxa by 6 taxonomic ranks ]
## Data by Diet_Category
physeqpruned_dist = phyloseq::distance(physeqpruned, "bray")
physeqpruned_dist_T <- data.frame(sample_data(physeqpruned))
adonis(physeqpruned_dist ~ Diet_Category, data=physeqpruned_dist_T)
### Convert distance matrix to a table
DistanceTable <- data.frame(t(combn(rownames(sample_data(physeqpruned)),2)), as.numeric(physeqpruned_dist))
names(DistanceTable) <- c("c1", "c2", "distance")
write.table(DistanceTable, file = "distance diet", quote = FALSE, sep = "\t", row.names = TRUE)
##
OTU_bcaBaseline <- subset_samples(OTU_bca1, Time_Point%in%c("Baseline"))
OTU_bcaBaseline
## Data by Diet_Category
OTU_bcaBaseline_dist = phyloseq::distance(OTU_bcaBaseline, "bray")
#physeqpruned_dist_T <- data.frame(sample_data(physeqpruned))
#adonis(physeqpruned_dist ~ Diet_Category, data=physeqpruned_dist_T)
### Convert distance matrix to a table
DistanceTablebcaBaseline <- data.frame(t(combn(rownames(sample_data(OTU_bcaBaseline)),2)), as.numeric(OTU_bcaBaseline_dist))
names(DistanceTablebcaBaseline) <- c("c1", "c2", "distance")
write.table(DistanceTablebcaBaseline, file = "distance dietbcaBaseline", quote = FALSE, sep = "\t", row.names = TRUE)
##
OTU_bcaEnd_Hypoallergenic <- subset_samples(OTU_bca1, Time_Point%in%c("End_Hypoallergenic"))
OTU_bcaEnd_Hypoallergenic
## Data by Diet_Category
OTU_bcaEnd_Hypoallergenic_dist = phyloseq::distance(OTU_bcaEnd_Hypoallergenic, "bray")
#physeqpruned_dist_T <- data.frame(sample_data(physeqpruned))
#adonis(physeqpruned_dist ~ Diet_Category, data=physeqpruned_dist_T)
### Convert distance matrix to a table
DistanceTablebcaEnd_Hypoallergenic <- data.frame(t(combn(rownames(sample_data(OTU_bcaEnd_Hypoallergenic)),2)), as.numeric(OTU_bcaEnd_Hypoallergenic_dist))
names(DistanceTablebcaEnd_Hypoallergenic) <- c("c1", "c2", "distance")
write.table(DistanceTablebcaEnd_Hypoallergenic, file = "distance dietbcaEnd_Hypoallergenic", quote = FALSE, sep = "\t", row.names = TRUE)
##
OTU_bcaWashout <- subset_samples(OTU_bca1, Time_Point%in%c("Washout"))
OTU_bcaWashout
## Data by Diet_Category
OTU_bcaWashout_dist = phyloseq::distance(OTU_bcaWashout, "bray")
#physeqpruned_dist_T <- data.frame(sample_data(physeqpruned))
#adonis(physeqpruned_dist ~ Diet_Category, data=physeqpruned_dist_T)
### Convert distance matrix to a table
DistanceTablebcaWashout <- data.frame(t(combn(rownames(sample_data(OTU_bcaWashout)),2)), as.numeric(OTU_bcaWashout_dist))
names(DistanceTablebcaWashout) <- c("c1", "c2", "distance")
write.table(DistanceTablebcaWashout, file = "distance dietbcaWashout", quote = FALSE, sep = "\t", row.names = TRUE)
## subset only acb
OTU_acb <- subset_samples(physeqpruned, Diet_Sequence%in%c("acb"))
OTU_acb
#phyloseq-class experiment-level object
#otu_table() OTU Table: [ 86 taxa and 92 samples ]
#sample_data() Sample Data: [ 92 samples by 23 sample variables ]
#tax_table() Taxonomy Table: [ 86 taxa by 6 taxonomic ranks ]
##
OTU_acbBaseline <- subset_samples(OTU_acb, Time_Point%in%c("Baseline"))
OTU_acbBaseline
## your data by Diet_Category
OTU_acbBaseline_dist = phyloseq::distance(OTU_acbBaseline, "bray")
#physeqpruned_dist_T <- data.frame(sample_data(physeqpruned))
#adonis(physeqpruned_dist ~ Diet_Category, data=physeqpruned_dist_T)
### Convert distance matrix to a table
DistanceTableacbBaseline <- data.frame(t(combn(rownames(sample_data(OTU_acbBaseline)),2)), as.numeric(OTU_acbBaseline_dist))
names(DistanceTableacbBaseline) <- c("c1", "c2", "distance")
write.table(DistanceTableacbBaseline, file = "distance dietacbBaseline", quote = FALSE, sep = "\t", row.names = TRUE)
##
OTU_acbEnd_Hypoallergenic <- subset_samples(OTU_acb, Time_Point%in%c("End_Hypoallergenic"))
OTU_acbEnd_Hypoallergenic
## your data by Diet_Category
OTU_acbEnd_Hypoallergenic_dist = phyloseq::distance(OTU_acbEnd_Hypoallergenic, "bray")
#physeqpruned_dist_T <- data.frame(sample_data(physeqpruned))
#adonis(physeqpruned_dist ~ Diet_Category, data=physeqpruned_dist_T)
### Convert distance matrix to a table
DistanceTableacbEnd_Hypoallergenic <- data.frame(t(combn(rownames(sample_data(OTU_acbEnd_Hypoallergenic)),2)), as.numeric(OTU_acbEnd_Hypoallergenic_dist))
names(DistanceTableacbEnd_Hypoallergenic) <- c("c1", "c2", "distance")
write.table(DistanceTableacbEnd_Hypoallergenic, file = "distance dietacbEnd_Hypoallergenic", quote = FALSE, sep = "\t", row.names = TRUE)
##
OTU_acbWashout <- subset_samples(OTU_acb, Time_Point%in%c("Washout"))
OTU_acbWashout
## your data by Diet_Category
OTU_acbWashout_dist = phyloseq::distance(OTU_acbWashout, "bray")
#physeqpruned_dist_T <- data.frame(sample_data(physeqpruned))
#adonis(physeqpruned_dist ~ Diet_Category, data=physeqpruned_dist_T)
### Convert distance matrix to a table
DistanceTableacbWashout <- data.frame(t(combn(rownames(sample_data(OTU_acbWashout)),2)), as.numeric(OTU_acbWashout_dist))
names(DistanceTableacbWashout) <- c("c1", "c2", "distance")
write.table(DistanceTableacbWashout, file = "distance dietacbWashout", quote = FALSE, sep = "\t", row.names = TRUE)
##
OTU_acbEnd_High_Fibre <- subset_samples(OTU_acb, Time_Point%in%c("End_High_Fibre"))
OTU_acbEnd_High_Fibre
## your data by Diet_Category
OTU_acbEnd_High_Fibre_dist = phyloseq::distance(OTU_acbEnd_High_Fibre, "bray")
#physeqpruned_dist_T <- data.frame(sample_data(physeqpruned))
#adonis(physeqpruned_dist ~ Diet_Category, data=physeqpruned_dist_T)
### Convert distance matrix to a table
DistanceTableacbEnd_High_Fibre <- data.frame(t(combn(rownames(sample_data(OTU_acbEnd_High_Fibre)),2)), as.numeric(OTU_acbEnd_High_Fibre_dist))
names(DistanceTableacbEnd_High_Fibre) <- c("c1", "c2", "distance")
write.table(DistanceTableacbEnd_High_Fibre, file = "distance dietacbEnd_High_Fibre", quote = FALSE, sep = "\t", row.names = TRUE)
|
92ec0fd05a60e08f89b134c174453a7bbfe9e771 | 04b415c17895e4a59aaaef9e9766d1f78514d884 | /week4/exp4/r_script_data/exp_4.r | a4b62c41d11eda7c55cbca52ba7da30a57f3bacb | [] | no_license | kaiiam/Spiekeroog_biogeo | ffdb7267a254d73e47b5e9b184c71ca8498f0b45 | d12c7f11567eb9dbe6bd7c582d9987cd43553976 | refs/heads/master | 2021-11-10T11:27:57.369944 | 2017-10-11T08:07:52 | 2017-10-11T08:07:52 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 9,133 | r | exp_4.r | library(dplyr)
library(ggplot2)
setwd("/home/kai/Desktop/grad_school/marmic/lab_rotations/rotation_3/Spiekeroog_biogeo/week4/exp4/r_script_data")
mydata = read.csv("exp_4_data.csv")
#add column for time in hours
mydata <- mutate(mydata, time.hours = time.minutes / 60)
#viewing the data
names(mydata)
head(select(mydata, time.minutes,depth))
head(mydata)
min(select(mydata, oxygen))
# data for plots:
station2 <- filter(mydata, station==2)
station4 <- filter(mydata, station==4)
station5 <- filter(mydata, station==5)
station6 <- filter(mydata, station==6)
station8 <- filter(mydata, station==8)
#plots
plot_station_2<- station2 %>%
ggplot(aes(time.hours, oxygen, colour = depth)) +
geom_point(size=1, stroke=1.2 ) +
theme_light() +
scale_x_continuous("Time (h)") +
scale_y_continuous(expression(paste("Oxygen (μmol l"^"-1" *")" )), limits = c(0, 250)) +
geom_smooth(se = FALSE, method = "lm") +
ggtitle("Station 2") + theme(plot.title = element_text(hjust = 0.5)) +
labs(colour ="Depth") + scale_colour_manual(values = c("#ecd240", "#ff8811", "#f12700", "#972e23"))
plot_station_4<- station4 %>%
ggplot(aes(time.hours, oxygen, colour = depth)) +
geom_point(size=1, stroke=1.2 ) +
theme_light() +
scale_x_continuous("Time (h)") +
scale_y_continuous(expression(paste("Oxygen (μmol l"^"-1" *")" )), limits = c(0, 250)) +
geom_smooth(se = FALSE, method = "lm") +
ggtitle("Station 4") + theme(plot.title = element_text(hjust = 0.5)) +
labs(colour ="Depth") + scale_colour_manual(values = c("#ecd240", "#ff8811", "#f12700", "#972e23"))
plot_station_5<- station5 %>%
ggplot(aes(time.hours, oxygen, colour = depth)) +
geom_point(size=1, stroke=1.2 ) +
theme_light() +
scale_x_continuous("Time (h)") +
scale_y_continuous(expression(paste("Oxygen (μmol l"^"-1" *")" )), limits = c(0, 250)) +
geom_smooth(se = FALSE, method = "lm") +
ggtitle("Station 5") + theme(plot.title = element_text(hjust = 0.5)) +
labs(colour ="Depth") + scale_colour_manual(values = c("#ecd240", "#ff8811", "#f12700", "#972e23"))
plot_station_6<- station6 %>%
ggplot(aes(time.hours, oxygen, colour = depth)) +
geom_point(size=1, stroke=1.2 ) +
theme_light() +
scale_x_continuous("Time (h)") +
scale_y_continuous(expression(paste("Oxygen (μmol l"^"-1" *")" )), limits = c(0, 250)) +
geom_smooth(se = FALSE, method = "lm") +
ggtitle("Station 6") + theme(plot.title = element_text(hjust = 0.5)) +
labs(colour ="Depth") + scale_colour_manual(values = c("#ecd240", "#ff8811", "#f12700", "#972e23"))
plot_station_8<- station8 %>%
ggplot(aes(time.hours, oxygen, colour = depth)) +
geom_point(size=1, stroke=1.2 ) +
theme_light() +
scale_x_continuous("Time (h)") +
scale_y_continuous(expression(paste("Oxygen (μmol l"^"-1" *")" )), limits = c(0, 250)) +
geom_smooth(se = FALSE, method = "lm") +
ggtitle("Station 8") + theme(plot.title = element_text(hjust = 0.5)) +
labs(colour ="Depth") + scale_colour_manual(values = c("#f0fd00", "#ecd240", "#ff8811", "#f12700", "#972e23"))
#view plots
plot(plot_station_2)
plot(plot_station_4)
plot(plot_station_5)
plot(plot_station_6)
plot(plot_station_8)
ggsave(filename = "/home/kai/Desktop/grad_school/marmic/lab_rotations/rotation_3/Spiekeroog_biogeo/week4/exp4/r_script_data/rate_plots/2.jpeg",
plot = plot_station_2, width = 6, height = 4)
ggsave(filename = "/home/kai/Desktop/grad_school/marmic/lab_rotations/rotation_3/Spiekeroog_biogeo/week4/exp4/r_script_data/rate_plots/4.jpeg",
plot = plot_station_4, width = 6, height = 4)
ggsave(filename = "/home/kai/Desktop/grad_school/marmic/lab_rotations/rotation_3/Spiekeroog_biogeo/week4/exp4/r_script_data/rate_plots/5.jpeg",
plot = plot_station_5, width = 6, height = 4)
ggsave(filename = "/home/kai/Desktop/grad_school/marmic/lab_rotations/rotation_3/Spiekeroog_biogeo/week4/exp4/r_script_data/rate_plots/6.jpeg",
plot = plot_station_6, width = 6, height = 4)
ggsave(filename = "/home/kai/Desktop/grad_school/marmic/lab_rotations/rotation_3/Spiekeroog_biogeo/week4/exp4/r_script_data/rate_plots/8.jpeg",
plot = plot_station_8, width = 6, height = 4)
######## multiplot:
#library(grid)
multiplot <- function(..., plotlist=NULL, file, cols=2, layout= matrix(c(1,2,3,4,5,0), nrow=3, byrow=TRUE)) {
require(grid)
# Make a list from the ... arguments and plotlist
plots <- c(list(...), plotlist)
numPlots = length(plots)
# If layout is NULL, then use 'cols' to determine layout
if (is.null(layout)) {
# Make the panel
# ncol: Number of columns of plots
# nrow: Number of rows needed, calculated from # of cols
layout <- matrix(seq(1, cols * ceiling(numPlots/cols)),
ncol = cols, nrow = ceiling(numPlots/cols))
}
if (numPlots==1) {
print(plots[[1]])
} else {
# Set up the page
grid.newpage()
pushViewport(viewport(layout = grid.layout(nrow(layout), ncol(layout))))
# Make each plot, in the correct location
for (i in 1:numPlots) {
# Get the i,j matrix positions of the regions that contain this subplot
matchidx <- as.data.frame(which(layout == i, arr.ind = TRUE))
print(plots[[i]], vp = viewport(layout.pos.row = matchidx$row,
layout.pos.col = matchidx$col))
}
}
}
mult_plot <- multiplot(plot_station_2, plot_station_4, plot_station_5, plot_station_6, plot_station_8, cols=2)
#export pdf using cairo-pdf
#################### rate calculations: ################################
#split up dataframe into the individual lines
station2_10 <- filter(mydata, station==2 & depth=="10cm")
station2_30 <- filter(mydata, station==2 & depth=="30cm")
station2_50 <- filter(mydata, station==2 & depth=="50cm")
station2_70 <- filter(mydata, station==2 & depth=="70cm")
station4_10 <- filter(mydata, station==4 & depth=="10cm")
station4_30 <- filter(mydata, station==4 & depth=="30cm")
station4_50 <- filter(mydata, station==4 & depth=="50cm")
station4_60 <- filter(mydata, station==4 & depth=="60cm")
station5_10 <- filter(mydata, station==5 & depth=="10cm")
station5_30 <- filter(mydata, station==5 & depth=="30cm")
station5_50 <- filter(mydata, station==5 & depth=="50cm")
station5_70 <- filter(mydata, station==5 & depth=="70cm")
station6_10 <- filter(mydata, station==6 & depth=="10cm")
station6_30 <- filter(mydata, station==6 & depth=="30cm")
station6_50 <- filter(mydata, station==6 & depth=="50cm")
station6_80 <- filter(mydata, station==6 & depth=="80cm")
station8_sur <- filter(mydata, station==8 & depth=="0cm")
station8_10 <- filter(mydata, station==8 & depth=="10cm")
station8_30 <- filter(mydata, station==8 & depth=="30cm")
station8_50 <- filter(mydata, station==8 & depth=="50cm")
station8_80 <- filter(mydata, station==8 & depth=="80cm")
#### linear regressions ####
station2_10_reg <- lm(oxygen ~ time.hours, data=station2_10)
station2_30_reg <- lm(oxygen ~ time.hours, data=station2_30)
station2_50_reg <- lm(oxygen ~ time.hours, data=station2_50)
station2_70_reg <- lm(oxygen ~ time.hours, data=station2_70)
station4_10_reg <- lm(oxygen ~ time.hours, data=station4_10)
station4_30_reg <- lm(oxygen ~ time.hours, data=station4_30)
station4_50_reg <- lm(oxygen ~ time.hours, data=station4_50)
station4_60_reg <- lm(oxygen ~ time.hours, data=station4_60)
station5_10_reg <- lm(oxygen ~ time.hours, data=station5_10)
station5_30_reg <- lm(oxygen ~ time.hours, data=station5_30)
station5_50_reg <- lm(oxygen ~ time.hours, data=station5_50)
station5_70_reg <- lm(oxygen ~ time.hours, data=station5_70)
station6_10_reg <- lm(oxygen ~ time.hours, data=station6_10)
station6_30_reg <- lm(oxygen ~ time.hours, data=station6_30)
station6_50_reg <- lm(oxygen ~ time.hours, data=station6_50)
station6_80_reg <- lm(oxygen ~ time.hours, data=station6_80)
station8_sur_reg <- lm(oxygen ~ time.hours, data=station8_sur)
station8_10_reg <- lm(oxygen ~ time.hours, data=station8_10)
station8_30_reg <- lm(oxygen ~ time.hours, data=station8_30)
station8_50_reg <- lm(oxygen ~ time.hours, data=station8_50)
station8_80_reg <- lm(oxygen ~ time.hours, data=station8_80)
#cat together all the regression objects as a matrix
matrix_of_regressions <- cbind(
station2_10_reg,
station2_30_reg,
station2_50_reg,
station2_70_reg,
station4_10_reg,
station4_30_reg,
station4_50_reg,
station4_60_reg,
station5_10_reg,
station5_30_reg,
station5_50_reg,
station5_70_reg,
station6_10_reg,
station6_30_reg,
station6_50_reg,
station6_80_reg,
station8_sur_reg,
station8_10_reg,
station8_30_reg,
station8_50_reg,
station8_80_reg
)
#take the regression values out of the matrix of lm objects
regression_values <-
as.data.frame(t(as.data.frame(matrix_of_regressions[1,]))) %>% select(time.hours)
# extract the sample names from the large dataframe
samples <- mydata %>% distinct(station, depth)
# make the table of rates
rates_table <- cbind(samples, regression_values)
rates_table <- rename(rates_table, oxygen_consumption_rate_uM_per_h = time.hours)
#write out to csv
write.csv(rates_table, file = "July_Spiekeroog_rates.csv", row.names = FALSE)
|
72373cc4e867dcd6c419121d64a8f656dee2d03d | ac0b9658e132785845bbdd41430972639ad08e01 | /src/data-preparation/data-preparation.R | 0398875a21a8bee532d28d9666bf1f8fa7fd90b1 | [] | no_license | Albirizzu/dPrep-Team-Project | d295c6013295a2164c6c4742c03764e516636335 | 747f9b9e25217c683fc6b0ae7a7d274eda62926e | refs/heads/main | 2023-07-28T21:12:08.576934 | 2021-09-30T08:33:33 | 2021-09-30T08:33:33 | 406,716,276 | 0 | 0 | null | 2021-09-15T16:28:46 | 2021-09-15T10:28:53 | null | UTF-8 | R | false | false | 1,867 | r | data-preparation.R | # Loading and inspecting data
install.packages("RCurl")
library (RCurl)
Venice_cvs <- getURL("https://docs.google.com/spreadsheets/d/1HYUZRLB7-KCr5jwXQnl3OH4KSReK1HO9LmKbU7f3V5c/edit#gid=402527890")
Venice_data <- read.csv (text = Venice_cvs)
View(Venice_data)
summary(Venice_data)
# Data cleaning and transformation
Venice_data <- Venice[ c("id","host_id","host_is_superhost","host_listings_count","property_type","room_type","price","minimum_nights","number_of_reviews","review_scores_rating","reviews_per_month") ]
View(Venice_data)
# make an excel file of the new created dataframe : Venice_data
write.csv(Venice_data,"C:\\Users\\DAVE\\MA\\dPrep\\Venice_data.csv", row.names = FALSE)
# inspect the data again
summary(Venice_data)
# Names of columns
colnames(Venice_data)
# Convert into data
# Dependent variable transformation -> make the variable numeric
Venice_data$Venice_data_price_num <- as.numeric(gsub("[$,]","", Venice_data$price))
# Independent variable transformation -> make the variable binary with 1 = superhost and 0 = non-superhost
Venice_data$host_is_superhost_TRUE <- ifelse(Venice_data$host_is_superhost == 'TRUE', 1, 0)
Venice_data$host_is_superhost_TRUE[is.na(Venice_data$host_is_superhost_TRUE)] <- 0
# data exploration
# scatter plot first try
plot(x = Venice_data$host_is_superhost_TRUE, # horizontal
y = Venice_data$Venice_data_price_num, # vertical
col = "green", # color
type = "p") # line chart
# scatter plot nice design
library(ggplot2)
ggplot(Venice_data, aes(x=as.factor(host_is_superhost_TRUE), y=Venice_data_price_num)) + geom_boxplot()
# percentage of non-superhost vs superhost
ggplot(Venice_data, aes(host_is_superhost_TRUE)) + geom_bar(aes(y = (..count..)/sum(..count..)*100)) + ylab("Percentage")
barplot(table(Venice_data$host_is_superhost_TRUE)/sum(table(Venice_data$host_is_superhost_TRUE))*100)
|
037816f5f8ad538c066d0dfc473f34a06ea11137 | 2f8eadc3086c263afdf2784e229f7e80a7dcf49e | /RMark/man/RDSalamander.Rd | 9967d3dce2006c9cf10d01773ced967f9a9e1919 | [] | no_license | wchallenger/RMark | 3edb66cbc924fbc37feaa53d22d99e8ac83ee47a | 636a3a7a6c5ab5292c3a249a1b41dab580dda8ba | refs/heads/master | 2021-01-17T22:35:11.482847 | 2012-01-27T17:16:51 | 2012-01-27T17:16:51 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,354 | rd | RDSalamander.Rd | \docType{data}
\name{RDSalamander}
\alias{RDSalamander}
\title{Robust design salamander occupancy data}
\format{A data frame with 40 observations (sites) on the following 2
variables. \describe{ \item{ch}{a character vector containing the presence
(1) and absence (0) with 2 primary occasions with 48 and 31 visits to the
site} \item{freq}{frequency of sites (always 1)} }}
\description{
A robust design occupancy data set for modelling
presence/absence data for salamanders.
}
\details{
This is a data set that I got from Gary White which is
suppose to be salamander data collected with a robust
design.
}
\examples{
fit.RDOccupancy=function()
{
data(RDSalamander)
occ.p.time.eg=mark(RDSalamander,model="RDOccupEG",
time.intervals=c(rep(0,47),1,rep(0,30)),
model.parameters=list(p=list(formula=~session)))
occ.p.time.pg=mark(RDSalamander,model="RDOccupPG",
time.intervals=c(rep(0,47),1,rep(0,30)),
model.parameters=list(Psi=list(formula=~time),
p=list(formula=~session)))
occ.p.time.pe=mark(RDSalamander,model="RDOccupPE",
time.intervals=c(rep(0,47),1,rep(0,30)),
model.parameters=list(Psi=list(formula=~time),
p=list(formula=~session)))
return(collect.models())
}
RDOcc=fit.RDOccupancy()
print(RDOcc)
}
\keyword{dataset}
\keyword{datasets}
|
13784a876bd5c6fcf33675e8bb9c4834f0a30d80 | f30cc1c33978ca5a708a7e0a493403ea88550160 | /man/unmask.Rd | 48310ab9379231774684ac709599d5495470e38a | [] | no_license | natverse/nat | 044384a04a17fd0c9d895e14979ce43e43a283ba | 1d161fa463086a2d03e7db3d2a55cf4d653dcc1b | refs/heads/master | 2023-08-30T21:34:36.623787 | 2023-08-25T07:23:44 | 2023-08-26T19:02:50 | 15,578,625 | 35 | 10 | null | 2023-01-28T19:03:03 | 2014-01-02T07:54:01 | R | UTF-8 | R | false | true | 2,084 | rd | unmask.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/im3d.R
\name{unmask}
\alias{unmask}
\title{Make im3d image array containing values at locations defined by a mask}
\usage{
unmask(
x,
mask,
default = NA,
attributes. = attributes(mask),
copyAttributes = TRUE
)
}
\arguments{
\item{x}{the data to place on a regular grid}
\item{mask}{An \code{im3d} regular image array where non-zero voxels are the
selected element.}
\item{default}{Value for regions outside the mask (default: NA)}
\item{attributes.}{Attributes to set on new object. Defaults to attributes of
\code{mask}}
\item{copyAttributes}{Whether to copy over attributes (including \code{dim})
from the mask to the returned object. default: \code{TRUE}}
}
\value{
A new \code{im3d} object with attributes/dimensions defined by
\code{mask} and values from \code{x}. If \code{copyAttributes} is
\code{FALSE}, then it will have mode of \code{x} and length of \code{mask}
but no other attributes.
}
\description{
Make im3d image array containing values at locations defined by a mask
}
\details{
The values in x will be placed into a grid defined by the dimensions
of the \code{mask} in the order defined by the standard R linear
subscripting of arrays (see e.g. \code{\link{arrayInd}}).
}
\examples{
\dontrun{
# read in a mask
LHMask=read.im3d(system.file('tests/testthat/testdata/nrrd/LHMask.nrrd', package='nat'))
# pick out all the non zero values
inmask=LHMask[LHMask!=0]
# fill the non-zero elements of the mask with a vector that iterates over the
# values 0:9
stripes=unmask(seq(inmask)\%\%10, LHMask)
# make an image from one slice of that result array
image(imslice(stripes,11), asp=TRUE)
}
}
\seealso{
Other im3d:
\code{\link{as.im3d}()},
\code{\link{boundingbox}()},
\code{\link{im3d-coords}},
\code{\link{im3d-io}},
\code{\link{im3d}()},
\code{\link{imexpand.grid}()},
\code{\link{imslice}()},
\code{\link{is.im3d}()},
\code{\link{mask}()},
\code{\link{origin}()},
\code{\link{projection}()},
\code{\link{threshold}()},
\code{\link{voxdims}()}
}
\concept{im3d}
|
116c108cd51c9fa614e5f5aaeef1e1d9159580bf | 84240a9971b57cacae701280f4d3d07469ff6783 | /R/PISA Scores by Country.R | 65b8a57b20ff89f5718a0acc6556e268252ddd05 | [] | no_license | jbryer/EPSY530Summer2014 | 2b6a63687bf53a24109a39496d8f73c3301c88f2 | 6cac8fb69c0ca2cf3503fff046ac3b91e6b75516 | refs/heads/master | 2020-06-04T13:43:57.818214 | 2014-06-23T14:47:30 | 2014-06-23T14:47:30 | 15,768,590 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 247 | r | PISA Scores by Country.R | require(psych)
str(pisa)
table(pisa$CNT)
tab <- describeBy(pisa$math, pisa$CNT, mat=TRUE, skew=FALSE)
View(tab)
head(tab)
ggplot(tab, aes(x=group1, y=mean)) + geom_point() +
coord_flip() +
scale_x_discrete(limits=tab[order(tab$mean),]$group)
|
38ef256c24ea39e69cd690eb0f57fe5a103ab800 | 46bc7ea33ea9e68f8df89c029fbad8d1f64b0022 | /age-vs-mean-word-length.r | 99df18baf5437b8e549cfd5101e5af07737321fe | [] | no_license | LeandroLovisolo/ITH-TP1 | 74089463e544deb63f9b1fa904e43324f24961e0 | b83537b412622ba6b648658d1960d2e476ff2142 | refs/heads/master | 2021-01-25T05:11:19.694864 | 2012-11-07T23:43:04 | 2012-11-07T23:43:04 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 290 | r | age-vs-mean-word-length.r | #!/usr/bin/env Rscript
data = read.csv("statistics/age-vs-mean-word-length.csv")
age = data[,1]
mean_word_length = data[,2]
cor.test(age, mean_word_length)
pdf("plots/age-vs-mean-word-length.pdf")
plot(age, mean_word_length, xlab="Age", ylab="Mean Word Length")
|
bdface9f3dd1a5b724bd611b139d6ccc408cc908 | 1af7c5ac10271caca700e1c08bedbf7d0a654ecb | /calc.boiling.T.R | 6eab168931bd64dbc7a1924df5be4bb3b85fccc7 | [] | no_license | seihan/vlecalc | 2548e84e1c959fb7c316fa4acecefc9353119997 | 53f1b6251733f77b16a5fe0ef4ca2a263e3f68ab | refs/heads/master | 2021-06-08T07:10:56.052893 | 2019-04-25T08:47:28 | 2019-04-25T08:47:28 | 57,040,317 | 3 | 1 | null | null | null | null | UTF-8 | R | false | false | 180 | r | calc.boiling.T.R | calc.boiling.T <- function (Substance, P){
Substance <- sub.check(Substance)
temperature <- Antoine.T(Substance[1,2], Substance[1,3], Substance[1,4], P)
return(temperature)
} |
5c71eb588ed413ba534e5484f98b20e64e5d7cdb | cbe44f0dca20bba077801c4b14cf8c64d5b7d352 | /R/stage-014-technically-correct-weather-single-dataframe.R | 1f9cc51823ff0ba79823cdf40b97a7deda7ca8b1 | [] | no_license | s1888637/weathr | 1ced821f4bec8bf39cb38123960d9b59d3cc80d0 | 3adfd7fd3149ee2b8a2f32577d6ed5695b7450b4 | refs/heads/master | 2020-05-29T19:49:21.974074 | 2019-06-05T19:15:26 | 2019-06-05T19:15:26 | 189,339,729 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 1,936 | r | stage-014-technically-correct-weather-single-dataframe.R | library(readr)
library(tidyr)
####################################################
# #
# EXPORTED FUNCTION #
# #
####################################################
#' stage_014
#' @export
stage_014 <- function(
source_dir = DIR_TECHNICALLY_CORRECT_WEATHER_COMPLETE,
destination_dir = DIR_TECHNICALLY_CORRECT_ALL,
force = TRUE
) {
source_file_paths <- files_per_directory(source_dir)
data_frame_with_nas <- join_data_frames(source_file_paths)
save_data_frame_with_nas(
data_frame_with_nas,
destination_dir,
force
)
save_data_frame_without_nas(
data_frame_with_nas,
destination_dir,
force
)
}
####################################################
# #
# NOT EXPORTED FUNCTIONS #
# #
####################################################
save_data_frame_with_nas <- function(
data_frame_with_nas,
destination_dir,
force
) {
destination_file_path <- paste0(
destination_dir,
"/",
WEATHER_ALL_DATA_FRAMES
)
save_rds_force(
data_frame_with_nas,
destination_file_path,
force
)
}
save_data_frame_without_nas <- function(
data_frame_with_nas,
destination_dir,
force
) {
data_frame_without_nas <- data_frame_with_nas %>% drop_na(
c(
`temp_max_degrees_c`,
`temp_min_degrees_c`,
`average_temp_degrees_c`,
`rain_mm`,
`hours_sun`,
`weather_station_name`
)
)
data_frame_without_nas$weather_station_name <- as.factor(
data_frame_without_nas$weather_station_name
)
destination_file_path <- paste0(
destination_dir,
"/",
WEATHER_ALL_DATA_FRAMES_NO_NAS
)
save_rds_force(
data_frame_without_nas,
destination_file_path,
force
)
}
|
7909095daa475fb013613a0027b3fc6238caeb55 | abe81c69f28a487d9d638e88bc82a180659eb511 | /main.R | d3b28767863081d77230212bd462b9f07048d737 | [] | no_license | fkoh111/fetcher | d84d61ff97f149343b6c0cf3a55dc0987f5df603 | 580fa1f201bf698f92c7e0831c1456528987a841 | refs/heads/master | 2021-09-10T15:57:08.303763 | 2018-03-28T23:07:43 | 2018-03-28T23:07:43 | 115,722,651 | 0 | 0 | null | 2018-03-04T11:31:49 | 2017-12-29T12:58:30 | R | UTF-8 | R | false | false | 4,494 | r | main.R |
fetcher <- function(user, verbose = TRUE, path = NULL){
# Fetches Twitter followers data from accounts having more than 90.000
# followers whilst dealing with rate limits in an automated manner.
#
# Args:
# user: takes a Twitter username or a user id.
# path: takes a path to a chosen output folder for temporary files (optional).
# verbose: takes a boolean. If verbose is set to false time estimates will
# not be printed during runtime (defaults to true).
#
# Returns:
# A data frame containing n observations of 20 Twitter variables where n equals
# the follower count of argument user.
# Setting a tmp folder for .txt files containing follower_ids.
# If path argument has been provided that path will be used for tmp folder location.
# If not the base R tempdir() function will be used to generate a tmp location.
# On exit the directory will be reset to the initial wd before calling fetcher().
root <- getwd()
if (!is.null(path)) {
tmp_path <- tempfile(pattern = user, tmpdir = path)
} else {
tmp_path <- tempfile(pattern = user, tmpdir = tempdir())
}
dir.create(tmp_path)
setwd(tmp_path)
# Fetching followers_count from argument user.
# Argument n users followers_count is being divided by 90.000, truncated and
# then multiplied by 900 sec (15 min).
# Thereby we're able to print primitive time estimates for the process during runtime.
# Parameters with suffix param_ is being used repeatedly in the script,
# therefore we're binding them to an object.
n_follower_ids <- lookup_users(user)$followers_count
param_sleep <- 900 # 900 sec used in conjunction with Sys.sleep() - 15 min.
param_users <- 90000 # 90.000 users (the max) for a lookup_users batch.
trunc_follower_time <- sum(trunc(n_follower_ids / param_users) * param_sleep)
follower_ids_estimate <- format(Sys.time() + trunc_follower_time, format = '%H:%M:%S')
if (verbose == TRUE) { # Checking verbose boolean. If false message is null.
message("Starting to fetch ", paste(n_follower_ids), " follower IDs. Expects to be done at ", paste(follower_ids_estimate), ".")
}
# Fetching argument users n_follower_ids. If rate limit is encountered: sleeping for 15 minutes.
follower_ids <- get_followers(user, n = as.integer(n_follower_ids), parse = TRUE, retryonratelimit = TRUE, verbose = FALSE)
# Spliting argument user n_follower_ids into chunk_follower_ids with a max size of 90.000 ids.
chunk_follower_ids <- split(follower_ids, (seq(nrow(follower_ids)) - 1) %/% param_users)
# Writing chunk_follower_ids as .txt files to tmp_path and its corresponding folder.
mapply(write.table, x = chunk_follower_ids, row.names = FALSE, col.names = FALSE, file = paste(names(chunk_follower_ids), "txt", sep = "."))
# Listing and reading chunk_follower_ids from tmp_path.
listed_ids <- list.files(path = tmp_path, pattern = "*.txt", full.names = TRUE)
read_ids <- lapply(listed_ids, read.table)
# Checking verboose boolean and whether there's more than one chunk of listed_ids in tmp_path.
# If true estimating and printing when lookup_users process will be done
if (verbose == TRUE & length(listed_ids) > 1) {
users_estimate <- format(Sys.time() + length(listed_ids) * param_sleep, format = '%H:%M:%S')
message("Starting to look up users. Expects to be done at ", paste(users_estimate), ".")
}
# From listed_ids in tmp_path user data is being looked up as object followers.
# Rate limit is avoided by sleeping after each chunk of read_ids have been looked up.
# The last iteration will break when i is greater than the length of listed_ids -1.
# Thereby avoiding a Sys.sleep after the last lookup.
# If verbose is set to false time estimate will not be printed.
followers <- rep(NA, length(listed_ids))
for (i in seq_along(read_ids)) {
followers[i] <- lapply(read_ids[i], lookup_users)
if (i > length(listed_ids) - 1) {
break
}
if (verbose == TRUE) {
sleep_estimate <- format(Sys.time() + param_sleep, format = '%H:%M:%S')
message("Avoiding rate limit by sleeping for 15 minutes. Will start again at approximately ", paste(sleep_estimate), ".")
}
}
binded_followers <- do_call_rbind(followers) # Binding followers into df.
on.exit(setwd(root), add = TRUE) # Resetting to user wd.
if (verbose == TRUE) { # Checking verboose boolean.
message("Jobs done at ", paste(format(Sys.time(), format = '%H:%M:%S')), ".")
}
return(binded_followers)
}
# Function example.
library("rtweet")
fetched_followers <- fetcher(user = "fkoh111", path = "~/Desktop/", verbose = TRUE)
|
dd4b2f85f2e9784f049cb97eb50299edd4d8f7ee | 72279a412260a94bdf5ae70db4cea2e153689331 | /ForageFishModel/Misc/LifeHistoryTraits.R | a659747457f0ccf22a5e0cb052a3ccd5a5585627 | [] | no_license | koehnl/Seabird_ForageFish_model | 21b599eef6e5c4d143c2e21f2bf41148434eb691 | 54a105bf03eda27e4b6214208e7a3bd733de5207 | refs/heads/main | 2023-03-21T18:01:37.217260 | 2021-03-14T23:07:08 | 2021-03-14T23:07:08 | 344,362,258 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 867 | r | LifeHistoryTraits.R | library(rfishbase)
library(ggplot2)
library(plyr)
library(dplyr)
# maturity
# length_weight
# length_frequency
# reproduction
length_freq(species_list = "Sardina pilchardus",limit = 200)
lw <- length_weight(species_list = "Sardina pilchardus",limit = 200)
plot(0:30,lw$a[1]*(0:30)^lw$b[1],type="l",
col = rainbow(n=15)[1],
ylim=c(0,400),xlab="Length (mm)",ylab="Weight")
for(i in 2:nrow(lw)){
lines(0:30,lw$a[i]*(0:30)^lw$b[i],col=rainbow(n=15)[i])
}
species <- c("Sardina pilchardus","Sardinops sagax","Sardinella aurita")
mat.table <- maturity(species_list = species)
summ.table <- mat.table %>% group_by(sciname) %>% summarize(minagemat = min(AgeMatMin,na.rm=T),
maxagemat = max(AgeMatMin,na.rm=T))
par(mfrow=c(1,1))
ggplot(mat.table, aes(x=Locality, y=Lm)) + geom_bar(stat = "identity")
|
53077b6aae16f0016a61ba6b277f0629c7717b38 | 658168cca6aab960412edf481a5049113ce888ed | /section_9.R | 34063a4d2103a68209fbf5927870a93411b440ae | [] | no_license | equinn1/MTH420_Spring2019 | c93ab6f8085aaeddabf12c8bdf3fca13e95845d2 | 36b35817cdc194ad8512a15319fce244d365b2ff | refs/heads/master | 2020-04-16T09:04:44.877623 | 2019-03-11T16:58:24 | 2019-03-11T16:58:24 | 165,450,162 | 1 | 1 | null | null | null | null | UTF-8 | R | false | false | 7,406 | r | section_9.R | rm(list=ls())
library(rethinking)
## R code 9.1
num_weeks <- 1e5
positions <- rep(0,num_weeks)
current <- 10
for ( i in 1:num_weeks ) {
# record current position
positions[i] <- current
# flip coin to generate proposal
proposal <- current + sample( c(-1,1) , size=1 )
# now make sure he loops around the archipelago
if ( proposal < 1 ) proposal <- 10
if ( proposal > 10 ) proposal <- 1
# move?
prob_move <- proposal/current
current <- ifelse( runif(1) < prob_move , proposal , current )
}
## R code 9.2
D <- 10
T <- 1e3
Y <- rmvnorm(T,rep(0,D),diag(D))
rad_dist <- function( Y ) sqrt( sum(Y^2) )
Rd <- sapply( 1:T , function(i) rad_dist( Y[i,] ) )
dens( Rd )
## R code 9.3
# U needs to return neg-log-probability
myU4 <- function( q , a=0 , b=1 , k=0 , d=1 ) {
muy <- q[1]
mux <- q[2]
U <- sum( dnorm(y,muy,1,log=TRUE) ) + sum( dnorm(x,mux,1,log=TRUE) ) +
dnorm(muy,a,b,log=TRUE) + dnorm(mux,k,d,log=TRUE)
return( -U )
}
## R code 9.4
# gradient function
# need vector of partial derivatives of U with respect to vector q
myU_grad4 <- function( q , a=0 , b=1 , k=0 , d=1 ) {
muy <- q[1]
mux <- q[2]
G1 <- sum( y - muy ) + (a - muy)/b^2 #dU/dmuy
G2 <- sum( x - mux ) + (k - mux)/d^2 #dU/dmuy
return( c( -G1 , -G2 ) ) # negative bc energy is neg-log-prob
}
# test data
set.seed(7)
y <- rnorm(50)
x <- rnorm(50)
x <- as.numeric(scale(x))
y <- as.numeric(scale(y))
## R code 9.5
library(shape) # for fancy arrows
Q <- list()
Q$q <- c(-0.1,0.2)
pr <- 0.3
plot( NULL , ylab="muy" , xlab="mux" , xlim=c(-pr,pr) , ylim=c(-pr,pr) )
step <- 0.03
L <- 11 # 0.03/28 for U-turns --- 11 for working example
n_samples <- 4
path_col <- col.alpha("black",0.5)
points( Q$q[1] , Q$q[2] , pch=4 , col="black" )
for ( i in 1:n_samples ) {
Q <- HMC2( myU4 , myU_grad4 , step , L , Q$q )
if ( n_samples < 10 ) {
for ( j in 1:L ) {
K0 <- sum(Q$ptraj[j,]^2)/2 # kinetic energy
lines( Q$traj[j:(j+1),1] , Q$traj[j:(j+1),2] , col=path_col , lwd=1+2*K0 )
}
points( Q$traj[1:L+1,] , pch=16 , col="white" , cex=0.35 )
Arrows( Q$traj[L,1] , Q$traj[L,2] , Q$traj[L+1,1] , Q$traj[L+1,2] ,
arr.length=0.35 , arr.adj = 0.7 )
text( Q$traj[L+1,1] , Q$traj[L+1,2] , i , cex=0.8 , pos=4 , offset=0.4 )
}
points( Q$traj[L+1,1] , Q$traj[L+1,2] , pch=ifelse( Q$accept==1 , 16 , 1 ) ,
col=ifelse( abs(Q$dH)>0.1 , "red" , "black" ) )
}
## R code 9.6
HMC2 <- function (U, grad_U, epsilon, L, current_q) {
q = current_q
p = rnorm(length(q),0,1) # random flick - p is momentum.
current_p = p
# Make a half step for momentum at the beginning
p = p - epsilon * grad_U(q) / 2
# initialize bookkeeping - saves trajectory
qtraj <- matrix(NA,nrow=L+1,ncol=length(q))
ptraj <- qtraj
qtraj[1,] <- current_q
ptraj[1,] <- p
## R code 9.7
# Alternate full steps for position and momentum
for ( i in 1:L ) {
q = q + epsilon * p # Full step for the position
# Make a full step for the momentum, except at end of trajectory
if ( i!=L ) {
p = p - epsilon * grad_U(q)
ptraj[i+1,] <- p
}
qtraj[i+1,] <- q
}
## R code 9.8
# Make a half step for momentum at the end
p = p - epsilon * grad_U(q) / 2
ptraj[L+1,] <- p
# Negate momentum at end of trajectory to make the proposal symmetric
p = -p
# Evaluate potential and kinetic energies at start and end of trajectory
current_U = U(current_q)
current_K = sum(current_p^2) / 2
proposed_U = U(q)
proposed_K = sum(p^2) / 2
# Accept or reject the state at end of trajectory, returning either
# the position at the end of the trajectory or the initial position
accept <- 0
if (runif(1) < exp(current_U-proposed_U+current_K-proposed_K)) {
new_q <- q # accept
accept <- 1
} else new_q <- current_q # reject
return(list( q=new_q, traj=qtraj, ptraj=ptraj, accept=accept ))
}
## R code 9.9
library(rethinking)
data(rugged)
d <- rugged
d$log_gdp <- log(d$rgdppc_2000)
dd <- d[ complete.cases(d$rgdppc_2000) , ]
dd$log_gdp_std <- dd$log_gdp / mean(dd$log_gdp)
dd$rugged_std <- dd$rugged / max(dd$rugged)
dd$cid <- ifelse( dd$cont_africa==1 , 1 , 2 )
## R code 9.10
m8.5 <- quap(
alist(
log_gdp_std ~ dnorm( mu , sigma ) ,
mu <- a[cid] + b[cid]*( rugged_std - 0.215 ) ,
a[cid] ~ dnorm( 1 , 0.1 ) ,
b[cid] ~ dnorm( 0 , 0.3 ) ,
sigma ~ dexp( 1 )
) ,
data=dd )
precis( m8.5 , depth=2 )
## R code 9.11
dat_slim <- list(
log_gpd_std = dd$log_gdp_std,
rugged_std = dd$rugged_std,
cid = as.integer( dd$cid )
)
str(dat_slim)
## R code 9.12
m9.1 <- ulam(
alist(
log_gdp_std ~ dnorm( mu , sigma ) ,
mu <- a[cid] + b[cid]*( rugged_std - 0.215 ) ,
a[cid] ~ dnorm( 1 , 0.1 ) ,
b[cid] ~ dnorm( 0 , 0.3 ) ,
sigma ~ dexp( 1 )
) ,
data=dat_slim , chains=1 )
## R code 9.13
precis( m9.1 , depth=2 )
## R code 9.14
m9.1 <- ulam(
alist(
log_gdp_std ~ dnorm( mu , sigma ) ,
mu <- a[cid] + b[cid]*( rugged_std - 0.215 ) ,
a[cid] ~ dnorm( 1 , 0.1 ) ,
b[cid] ~ dnorm( 0 , 0.3 ) ,
sigma ~ dexp( 1 )
) ,
data=dat_slim , chains=4 , cores=4 , iter=1000 )
## R code 9.15
show( m9.1 )
## R code 9.16
precis( m9.1 , 2 )
## R code 9.17
pairs( m9.1 )
## R code 9.18
traceplot( m9.1 )
## R code 9.19
y <- c(-1,1)
set.seed(11)
m9.2 <- ulam(
alist(
y ~ dnorm( mu , sigma ) ,
mu <- alpha ,
alpha ~ dnorm( 0 , 1000 ) ,
sigma ~ dexp( 0.0001 )
) ,
data=list(y=y) , chains=2 )
## R code 9.20
precis( m9.2 )
## R code 9.21
set.seed(11)
m9.3 <- ulam(
alist(
y ~ dnorm( mu , sigma ) ,
mu <- alpha ,
alpha ~ dnorm( 1 , 10 ) ,
sigma ~ dexp( 1 )
) ,
data=list(y=y) , chains=2 )
precis( m9.3 )
## R code 9.22
set.seed(41)
y <- rnorm( 100 , mean=0 , sd=1 )
## R code 9.23
m9.4 <- ulam(
alist(
y ~ dnorm( mu , sigma ) ,
mu <- a1 + a2 ,
a1 ~ dnorm( 0 , 1000 ),
a2 ~ dnorm( 0 , 1000 ),
sigma ~ dexp( 1 )
) ,
data=list(y=y) , chains=2 )
precis( m9.4 )
## R code 9.24
m9.5 <- ulam(
alist(
y ~ dnorm( mu , sigma ) ,
mu <- a1 + a2 ,
a1 ~ dnorm( 0 , 10 ),
a2 ~ dnorm( 0 , 10 ),
sigma ~ dexp( 1 )
) ,
data=list(y=y) , chains=2 )
precis( m9.5 )
## R code 9.25
mp <- map2stan(
alist(
a ~ dnorm(0,1),
b ~ dcauchy(0,1)
),
data=list(y=1),
start=list(a=0,b=0),
iter=1e4, warmup=100 , WAIC=FALSE )
## R code 9.26
N <- 100 # number of individuals
height <- rnorm(N,10,2) # sim total height of each
leg_prop <- runif(N,0.4,0.5) # leg as proportion of height
leg_left <- leg_prop*height + # sim left leg as proportion + error
rnorm( N , 0 , 0.02 )
leg_right <- leg_prop*height + # sim right leg as proportion + error
rnorm( N , 0 , 0.02 )
# combine into data frame
d <- data.frame(height,leg_left,leg_right)
## R code 9.27
m5.8s <- map2stan(
alist(
height ~ dnorm( mu , sigma ) ,
mu <- a + bl*leg_left + br*leg_right ,
a ~ dnorm( 10 , 100 ) ,
bl ~ dnorm( 2 , 10 ) ,
br ~ dnorm( 2 , 10 ) ,
sigma ~ dcauchy( 0 , 1 )
) ,
data=d, chains=4,
start=list(a=10,bl=0,br=0,sigma=1) )
## R code 9.28
m5.8s2 <- map2stan(
alist(
height ~ dnorm( mu , sigma ) ,
mu <- a + bl*leg_left + br*leg_right ,
a ~ dnorm( 10 , 100 ) ,
bl ~ dnorm( 2 , 10 ) ,
br ~ dnorm( 2 , 10 ) & T[0,] ,
sigma ~ dcauchy( 0 , 1 )
) ,
data=d, chains=4,
start=list(a=10,bl=0,br=0,sigma=1) )
|
b1f1f47a82578313c48cb9af07a3c162d45a5ee0 | c32028bf0a547b050c48250d8b024b4b1f344f19 | /annotFunctions.R | 3d4867585f5e40c5c3309f1de4b3249553a52a06 | [] | no_license | markdunning/build-annotation-packages | dd45f852ae9a5a5a70eb8cba7dfb9030c0150d1f | e6dbd0c7113c56af1b2e212528461884c56bdea6 | refs/heads/master | 2016-09-05T18:27:24.125648 | 2014-10-13T08:29:43 | 2014-10-13T08:29:43 | 14,106,917 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,066 | r | annotFunctions.R | AddNewBimap = function(file, name, Index, prefix){
cmd = paste(prefix, toupper(name), " <- createSimpleBimap(\"ExtraInfo\", \"", Index, "\",","\"", name, "\"",", datacache, \"", toupper(name), "\",", "\"",prefix, ".db\")\n",sep="")
cat(cmd, file= file, append=TRUE)
cat("\n", file= file, append=TRUE)
}
insertExtraInfo = function(dbcon, extraInfo){
names = colnames(extraInfo)
cmd = "INSERT INTO ExtraInfo VALUES ("
for(i in 1:(length(names)-1)){
cmd = paste(cmd, "$", names[i], ", ", sep="")
}
cmd = paste(cmd, "$", names[length(names)], ")",sep="")
bval <- dbBeginTransaction(dbcon)
gval <- dbGetPreparedQuery(dbcon, cmd, bind.data = extraInfo)
cval <- dbCommit(dbcon)
}
makeSqlTable= function(dbcon, names){
cmd = "CREATE Table ExtraInfo ("
for(i in 1:(length(names)-1)){
cmd = paste(cmd, names[i], " TEXT, ",sep="")
}
cmd = paste(cmd, names[length(names)], " TEXT)",sep="")
dbGetQuery(dbcon, cmd)
}
makeBioconductorAnnotation = function(baseName, chipName, refseq, IlluminaID, extraInfo, outDir, version, manTemplate, organism = "human"){
##transform refseq into form required by AnnotationDbi
refseq = lapply(as.character(refseq), function(x) gsub('[[:space:]]', ';', x))
rs = paste(baseName, "_refseq.txt",sep="")
###write it out to a file
write.table(cbind(IlluminaID, refseq), file=rs,
sep="\t", quote=FALSE, col.names=FALSE, row.names=FALSE)
prefix=paste("illumina", baseName,sep="")
if(organism == "human"){
makeDBPackage("HUMANCHIP_DB",affy=FALSE,
prefix=prefix,
fileName=rs,
baseMapType="refseq",
outputDir = outDir,
version=version,
manufacturer = "Illumina",
chipName = chipName,
manufacturerUrl = "http://www.illumina.com",
author ="Mark Dunning, Andy Lynch, Matthew Eldridge",
maintainer="Mark Dunning <mark.dunning@cruk.cam.ac.uk>"
)
}
else if (organism == "mouse"){
makeDBPackage("MOUSECHIP_DB",affy=FALSE,
prefix=prefix,
fileName=rs,
baseMapType="refseq",
outputDir = outDir,
version=version,
manufacturer = "Illumina",
chipName = chipName,
manufacturerUrl = "http://www.illumina.com",
author ="Mark Dunning, Andy Lynch, Matthew Eldridge",
maintainer="Mark Dunning <mark.dunning@cruk.cam.ac.uk>"
)
}
else if (organism == "rat"){
makeDBPackage("RATCHIP_DB",affy=FALSE,
prefix=prefix,
fileName=rs,
baseMapType="refseq",
outputDir = outDir,
version=version,
manufacturer = "Illumina",
chipName = chipName,
manufacturerUrl = "http://www.illumina.com",
author ="Mark Dunning, Andy Lynch, Matthew Eldridge",
maintainer="Mark Dunning <mark.dunning@cruk.cam.ac.uk>"
)
}
else stop("Invalid organism definiton\n")
newPkgPath = paste(outDir, "/",prefix, ".db",sep="")
newSQL = paste(newPkgPath,"/inst/extdata/", prefix,".sqlite", sep="")
###Make the new SQL file writable
system(paste("chmod 755", newSQL))
drv = dbDriver("SQLite")
dbcon = dbConnect(drv, dbname=newSQL)
makeSqlTable(dbcon, colnames(extraInfo))
insertExtraInfo(dbcon, extraInfo)
cat("Checking that insert worked\n")
dbGetQuery(dbcon, "SELECT * FROM ExtraInfo LIMIT 10")
#sqlCreate = "CREATE Table ExtraInfo (IlluminaID TEXT, ArrayAddress TEXT, ProbeQuality TEXT, CodingZone TEXT, ProbeSequence TEXT, OtherMatches TEXT)"
#dbGetQuery(dbcon, sqlCreate)
#sqlInsert <- "INSERT INTO ExtraInfo VALUES ($IlluminaID, $ArrayAddress, $ProbeQuality, $CodingZone, $ProbeSequence, $OtherMatches)"
#bval <- dbBeginTransaction(dbcon)
#gval <- dbGetPreparedQuery(dbcon, sqlInsert, bind.data = extraInfo)
#cval <- dbCommit(dbcon)
zzzFile = paste(newPkgPath, "/R/zzz.R",sep="")
##make a copy of zzz.r
system(paste("cp ", zzzFile, " ", zzzFile,".original",sep=""))
###Need to add these kinds of definitions to the zzz file
##illuminaHumanv3PROBEQUALITY <- createSimpleBimap("probeinfo","ProbeID","ProbeQuality",datacache,"PROBEQUALITY","illuminaHumanv3.db")
##illuminaHumanv3CODINGZONE <- createSimpleBimap("probeinfo","ProbeID","CodingZone",datacache,"CODINGZONE","illuminaHumanv3.db")
cat("##Custom Bimaps for the package\n\n", file=zzzFile,append=TRUE)
###We assume that column 2 of ExtraInfo is ArrayAddressID
for(i in 2:ncol(extraInfo)){
AddNewBimap(zzzFile, colnames(extraInfo)[i], colnames(extraInfo)[1], prefix)
}
##Export them in the namespace
nspace = paste(newPkgPath, "/NAMESPACE",sep="")
system(paste("cp ", nspace, " ", nspace,".original",sep=""))
cat("##Custom Bimaps exported\n\n", file=nspace,append=TRUE)
newFns = NULL
for(i in 2:ncol(extraInfo)){
newFns[i-1] = paste(prefix, toupper(colnames(extraInfo)[i]),sep="")
}
cat(paste("export(", paste(newFns, collapse=" , "), ", ", prefix,"listNewMappings ,", prefix, "fullReannotation)\n",sep=""),file=nspace, append=TRUE)
cat(zzzFile, "##Define a utility function that lists the funtions we've just made\n",append=TRUE)
cmd = paste(prefix,"listNewMappings = function(){\n",sep="")
for(i in 1:length(newFns)){
cmd = paste(cmd, "cat(\"", newFns[i], "()",sep="")
cmd = paste(cmd, "\\n\")",sep="")
cmd = paste(cmd,"\n")
}
cmd = paste(cmd, "}\n",sep="")
cat(cmd, file= zzzFile, append=TRUE)
###utility function to retrieve the full table
cmd = paste(prefix,"fullReannotation = function(){\n",sep="")
cmd = paste(cmd, "dbGetQuery(", prefix, "_dbconn(), \"SELECT * FROM ExtraInfo\")\n}\n",sep="")
cat(cmd, file= zzzFile, append=TRUE)
###Now copy the template doc
newManPage = paste(outDir, "/",prefix,".db/man/", prefix, "NewMappings.Rd",sep="")
system(paste("cp ", manTemplate, " ", newManPage,sep=""))
tmp = system(paste("sed \'s/PKGNAME/",prefix, "/g' ", newManPage,sep=""),intern=TRUE)
write(tmp, newManPage)
}
|
5831da9b28354421235d27752e7a2bcdfc9ef9fa | 9aafde089eb3d8bba05aec912e61fbd9fb84bd49 | /codeml_files/newick_trees_processed/8156_0/rinput.R | 4d4ed9b37460569c19b2a1e77ac0dca39173afe5 | [] | no_license | DaniBoo/cyanobacteria_project | 6a816bb0ccf285842b61bfd3612c176f5877a1fb | be08ff723284b0c38f9c758d3e250c664bbfbf3b | refs/heads/master | 2021-01-25T05:28:00.686474 | 2013-03-23T15:09:39 | 2013-03-23T15:09:39 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 135 | r | rinput.R | library(ape)
testtree <- read.tree("8156_0.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="8156_0_unrooted.txt") |
c7fcf05ccd882b651ffb091afd9d880d6101891b | b87fb6524eff9b18c8561879b683da1569ccb6c5 | /Project_2_Forecast_Sales/mymain.R | 7468196ccf96fd947fd2f17cfdb8abdf62142c83 | [] | no_license | HongfeiLi365/Statistical-Learning-Projects | 90df77b5df667f582792a49630b45d1b0f0fff69 | 19d1857a6fca4305a32cc03ca9b8490b69aaf9e2 | refs/heads/master | 2022-04-14T08:56:21.891167 | 2020-04-12T01:27:55 | 2020-04-12T01:27:55 | 254,976,460 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 10,011 | r | mymain.R | library(forecast)
library(lubridate)
#==================== functions ========================
# --------------implement missing value-----------------
addmiss=function(train){
exi_record<-table(train$Dept,train$Store)
exi<-which(exi_record!=0, arr.ind=TRUE, useNames = FALSE)
exi_s<-unique(train$Store)
for (i in unique(train$Date)){
holi<-unique(train[train$Date==i,]$IsHoliday)
t_d<-unique(train[train$Date==i,]$Date)
tmp_train=train[train$Date==i,]
records=table(tmp_train$Dept,tmp_train$Store)
t_missing_records=which(records==0, arr.ind=TRUE, useNames = FALSE)
missing_records<-merge(exi,t_missing_records,all=FALSE)
missing_dept<-as.numeric(row.names(exi_record)[missing_records[,1]])
missing_store<-as.numeric(colnames(exi_record)[missing_records[,2]])
if( length(missing_records)>0 | length(missing_dept)>0 ){
store_average=rep(0,length(exi_s))
for( j in 1:length(exi_s)){
store_average[j]=mean(tmp_train[tmp_train$Store==exi_s[j],"Weekly_Sales"])
}
store_average=data.frame(Store=exi_s,Weekly_Sales=store_average)
missing_add<-data.frame(Store=missing_store,Dept=missing_dept,Date=rep(t_d,length(missing_dept)),IsHoliday=rep(holi,length(missing_dept)))
missing_add<-merge(missing_add,store_average, by="Store", all.x = TRUE)
train=rbind(train,missing_add)
}
}
return(train)
}
#--------------add column Wk and Column Yr---------------
addWeek = function(train){
train.wk = train$Date
train.wk = train.wk - as.Date("2010-02-05") # date is now 0, 7, 14, ...
train.wk = train.wk/7 + 5 # make 2010-2-5 as '5', and date becomes continuous integers, i.e., 5, 6, 7, ...
train.wk = as.numeric(train.wk) %% 52 ## 52 weeks in a year
train$Wk = train.wk
train$Yr = year(train$Date)
return(train)
}
# ------------ subset corresponding test ----------------
subtest = function(test, t){
if (t<=9){
beginday = as.Date(paste("2011-", t+2, "-01",sep = ''),format = "%Y-%m-%d")
endday = as.Date(paste("2011-", t+3, "-01",sep = ''),format = "%Y-%m-%d")
} else if (t==10){
beginday = as.Date("2011-12-01",format = "%Y-%m-%d")
endday = as.Date("2012-01-01",format = "%Y-%m-%d")
} else {
beginday = as.Date(paste("2012-", t-10, "-01",sep = ''),format = "%Y-%m-%d")
endday = as.Date(paste("2012-", t-9, "-01",sep = ''),format = "%Y-%m-%d")
}
currenttest = test[(beginday <= test$Date) & (test$Date < endday),]
return(currenttest)
}
#================== simple model 1 ====================
simple = function(train, test){
store = sort(unique(test$Store))
n.store = length(store)
dept = sort(unique(test$Dept))
n.dept = length(dept)
for (s in 1:n.store){
for (d in 1:n.dept){
# cat("[Model 1] Store: ", store[s], "\t Dept ", dept[d], "\n")
# find the data for (store, dept) = (s, d)
test.id = which(test$Store == store[s] & test$Dept == dept[d])
test.temp = test[test.id, ]
train.id = which(train$Store == store[s] & train$Dept == dept[d])
train.temp = train[train.id, ]
for (i in 1:length(test.id)){
id = which(train.temp$Wk == test.temp[i,]$Wk & train.temp$Yr == test.temp[i,]$Yr - 1)
threeWeeksId = c(id - 1, id, id + 1) ## three weeks in the last year
tempSales = train.temp[threeWeeksId, 'Weekly_Sales']
if (length(tempSales) == 0){
test$Weekly_Pred1[test.id[i]] = mean(train.temp$Weekly_Sales)
} else {
test$Weekly_Pred1[test.id[i]] = mean(tempSales)
}
}
}
}
# deal with NA and NaN
s_d_w<-test[is.na(test$Weekly_Pred1) | is.nan(test$Weekly_Pred1),c("Store","Dept","Wk")]
for(k in 1:nrow(s_d_w))
{
s <- s_d_w$Store[k]
d <- s_d_w$Dept[k]
w <- s_d_w$Wk[k]
mean_s<-mean(train[train$Store==s,]$Weekly_Sales)
test$Weekly_Pred1[test$Store==s & test$Dept==d & test$Wk == w]<- mean_s
}
# set holiday sales as the same as the week last year
s_d_w<-test[test$Wk == 0 | test$Wk == 6 | test$Wk == 47 ,c("Store","Dept","Wk")]
for(k in 1:nrow(s_d_w))
{
s <- s_d_w$Store[k]
d <- s_d_w$Dept[k]
w <- s_d_w$Wk[k]
salesholiday<-mean(train$Weekly_Sales[train$Store==s & train$Dept == d & train$Wk == w])
test$Weekly_Pred1[test$Store==s & test$Dept==d & test$Wk == w]<- salesholiday
}
test$Weekly_Pred1[is.nan(test$Weekly_Pred1)] = 0
return(test)
}
#================== ts model 2 ====================
model2<-function(X,Y)
{
train_s_d<-unique(X[,c("Store","Dept")])
n_train<-nrow(train_s_d)
test_s_d<-unique(Y[,c("Store","Dept")])
n_test<-nrow(test_s_d)
#all in#
all_s_d<-merge(train_s_d,test_s_d,by=c("Store","Dept"))
n_all<-nrow(all_s_d)
#predict week#
n_pred_w<-nrow(Y[Y$Store==1&Y$Dept==1,])
#pred_w<-test[test$Store==1&test$Dept==1,]$Date
for(k in 1: n_all)
{
s<-all_s_d$Store[k]
d<-all_s_d$Dept[k]
if (Y$Yr[1]<2012|(Y$Yr[1]==2012&Y$Wk[1]<=5)){
Y[Y$Store==s&Y$Dept==d,]$Weekly_Pred2 <- Y[Y$Store==s&Y$Dept==d,]$Weekly_Pred1
}
else{
sale<-X[(X$Store==s)&(X$Dept==d),]$Weekly_Sales
#print(d)
#print(length(sale))
#aa=X[(X$Store==s)&(X$Dept==d),]
#if (nrow(aa)!=108){
# print(tail(aa))
#}
ts_sale<-ts(sale,frequency = 52)
n_pred<- nrow(Y[Y$Store==s&Y$Dept==d,])
#pred<-stlf(ts_sale, h=n_pred, s.window=3, method='arima',ic='bic')
#pred<-forecast.HoltWinters(ts_sale, h=n_pred, seasonal="additive")
#pred<- holt(ts_sale, h=n_pred, seasonal="additive")
pred<- ses(ts_sale, h=n_pred, initial='simple',alpha=0.2)
#pred<- rwf(ts_sale, h=n_pred, drift=T)
pred<-as.numeric(pred$mean)
Y[Y$Store==s&Y$Dept==d,]$Weekly_Pred2<-pred[1:n_pred]
}
}
#only in test#
if(n_all!=n_test){
otest_s_d<-Y[is.na(Y$Weekly_Pred2),c("Store","Dept")]
n_otest<-nrow(otest_s_d)
for(k in n_otest)
{
s<-otest_s_d$Store[k]
d<-otest_s_d$Dept[k]
mean_s<-mean(X[X$Store==s,]$Weekly_Sales)
n_pred<- nrow(Y[Y$Store==s&Y$Dept==d,])
Y[Y$Store==s&Y$Dept==d,]$Weekly_Pred2<-rep(mean_s,n_pred)
}
}
na_ix<-which(is.na(Y$Weekly_Pred2))
for(l in na_ix)
{
Y[l,]$Weekly_Pred2<-mean(X[X$Store==Y[l,"Store"],]$Weekly_Sales)
}
return(Y)
}
#================== ts model 3 ===========forecast=========
model3<-function(X,Y)
{
train_s_d<-unique(X[,c("Store","Dept")])
n_train<-nrow(train_s_d)
test_s_d<-unique(Y[,c("Store","Dept")])
n_test<-nrow(test_s_d)
#all in#
all_s_d<-merge(train_s_d,test_s_d,by=c("Store","Dept"))
n_all<-nrow(all_s_d)
#predict week#
n_pred_w<-nrow(Y[Y$Store==1&Y$Dept==1,])
#pred_w<-test[test$Store==1&test$Dept==1,]$Date
for(k in 1: n_all)
{
s<-all_s_d$Store[k]
d<-all_s_d$Dept[k]
sale<-X[(X$Store==s)&(X$Dept==d),]$Weekly_Sales
ts_sale<-ts(sale,frequency = 52)
n_pred<- nrow(Y[Y$Store==s&Y$Dept==d,])
pred<-forecast(ts_sale,h=n_pred)
pred<-as.numeric(pred$mean)
Y[Y$Store==s&Y$Dept==d,]$Weekly_Pred3<-pred[1:n_pred]
}
#only in test#
if(n_all!=n_test){
otest_s_d<-Y[is.na(Y$Weekly_Pred3),c("Store","Dept")]
n_otest<-nrow(otest_s_d)
for(k in n_otest)
{
s<-otest_s_d$Storep[k]
d<-otest_s_d$Dept[k]
mean_s<-mean(X[X$Store==s,]$Weekly_Sales)
n_pred<- nrow(Y[Y$Store==s&Y$Dept==d,])
Y[Y$Store==s&Y$Dept==d,]$Weekly_Pred3<-rep(mean_s,n_pred)
}
}
na_ix<-which(is.na(Y$Weekly_Pred3))
for(l in na_ix)
{
Y[l,]$Weekly_Pred3<-mean(X[X$Store==Y[l,"Store"],]$Weekly_Sales)
}
return(Y$Weekly_Pred3)
}
#====================== main ============================
# this part will run when 'source(mymain.r)'
# the goal of this part should be setting up some varibales that will be used in functions
# these variables, such as dept.names and store.names, won't change during loop
train$Date = as.Date(train$Date, '%Y-%m-%d')
test$Date = as.Date(test$Date, '%Y-%m-%d')
dept.names = sort(unique(train$Dept))
store.names =sort(unique(train$Store))
n.dept=length(dept.names)
n.store=length(store.names)
#===================== frame of predict() ==============
predict = function(){
if(t!=1){
# if not first iteration: update train
# t=1, Date=2011-03-04, newtest=NULL
train <<- rbind(train,newtest)
}
# ------------ preprocessing ------------------
train$Date = as.Date(train$Date, '%Y-%m-%d')
test$Date = as.Date(test$Date, '%Y-%m-%d')
# subset current test
currenttest = subtest(test,t)
# print(currenttest[is.na(currenttest$Store)])
# add miss
currenttrain = addmiss(train)
#print(currenttrain[is.na(currenttrain$Store),])
# add Wk and Yr
currenttrain = addWeek(currenttrain)
currenttest = addWeek(currenttest)
table(currenttrain$Store,currenttrain$Dept)
# ------------ predict -----------------------
# Call model 1
currentpred = simple(currenttrain, currenttest)
# Call model 2
currentpred = model2(currenttrain, currentpred)
# Call model 3
currentpred3 = model3(currenttrain, currenttest)
#rewite currenttest
currentpred$Weekly_Pred3<-currentpred3
#---------------------- merge test ----------------------------
if (t<=9){
beginday = as.Date(paste("2011-", t+2, "-01",sep = ''),format = "%Y-%m-%d")
endday = as.Date(paste("2011-", t+3, "-01",sep = ''),format = "%Y-%m-%d")
} else if (t==10){
beginday = as.Date("2011-12-01",format = "%Y-%m-%d")
endday = as.Date("2012-01-01",format = "%Y-%m-%d")
} else {
beginday = as.Date(paste("2012-", t-10, "-01",sep = ''),format = "%Y-%m-%d")
endday = as.Date(paste("2012-", t-9, "-01",sep = ''),format = "%Y-%m-%d")
}
testbind = test[(test$Date < beginday) | (test$Date >= endday),]
currentpred = subset(currentpred,select = -c(Wk, Yr))
test1 = rbind(testbind, currentpred)
test1$Date = as.factor(test1$Date)
test <<- test1
}
|
bfff7ad13003bba118d42e68045f883d87e83657 | d20eb97ae85f9d05905de61ff6b89dd2c21eeeb9 | /tests/testthat/test.SystemMetadata.R | 24e2d5b505e4a75c7c0f4dc4cc19966afde1372e | [
"Apache-2.0"
] | permissive | ropensci/datapack | 2967656b8e6342f84f9f0c8cf157dc54e8276c91 | 30d3be8b618f2c52d25c356e97680000feb101c9 | refs/heads/main | 2022-06-17T20:32:21.421341 | 2022-06-09T21:03:40 | 2022-06-09T21:03:40 | 23,672,990 | 35 | 13 | null | 2022-06-08T19:06:46 | 2014-09-04T17:53:33 | R | UTF-8 | R | false | false | 10,910 | r | test.SystemMetadata.R | sysmeta_test <- system.file("testfiles/sysmeta.xml", package="datapack")
sysmeta_test2 <- system.file("testfiles/sysmeta-v2.xml", package="datapack")
sysmeta_repfalse <- system.file("testfiles/sysmeta-v2-repfalse.xml", package="datapack")
sysmeta_repfalse_zero_reps <- system.file("testfiles/sysmeta-v2-repfalse-zero-reps.xml", package="datapack")
sysmeta_updated <- system.file("testfiles/sysmeta-updated.xml", package="datapack")
test_that("datapack library loads", {
expect_true(library(datapack, logical.return = TRUE))
})
test_that("SystemMetadata constructors", {
library(datapack)
sysmeta <- new("SystemMetadata")
expect_equal(sysmeta@serialVersion, 1)
expect_true(is.na(sysmeta@identifier))
sysmeta <- new("SystemMetadata", identifier="TestId", formatId="text/csv")
expect_equal(sysmeta@identifier, "TestId")
expect_equal(sysmeta@formatId, "text/csv")
})
test_that("XML SystemMetadata parsing works", {
library(datapack)
library(XML)
testid <- "doi:10.xxyy/AA/tesdoc123456789"
sysmeta <- new("SystemMetadata")
expect_equal(sysmeta@serialVersion, 1)
doc <- xmlParseDoc(sysmeta_test, asText=FALSE)
expect_match(xmlValue(xmlRoot(doc)[["identifier"]]), testid)
xml <- xmlRoot(doc)
#getEncoding(doc)
sysmeta <- parseSystemMetadata(sysmeta, xmlRoot(xml))
expect_match(sysmeta@identifier, testid)
expect_equal(nrow(sysmeta@accessPolicy), 5)
expect_match(as.character(sysmeta@accessPolicy$permission[[1]]), "read")
expect_true(sysmeta@archived)
csattrs <- xmlAttrs(xml[["checksum"]])
expect_match(sysmeta@checksumAlgorithm, csattrs[[1]])
expect_true(grep("urn:node:KNB", sysmeta@preferredNodes) > 0)
expect_true(grep("urn:node:mnUNM1", sysmeta@preferredNodes) > 0)
expect_true(grep("urn:node:BADNODE", sysmeta@blockedNodes) > 0)
rm(sysmeta)
rm(xml)
rm(doc)
rm(csattrs)
# Parse v2.0 system metadata
testid <- "0007f892-0d8f-4451-94e9-94d02ba5dd0d_0"
sysmeta <- new("SystemMetadata")
expect_equal(sysmeta@serialVersion, 1)
doc <- xmlParseDoc(sysmeta_test2, asText=FALSE)
expect_match(xmlValue(xmlRoot(doc)[["identifier"]]), testid)
xml <- xmlRoot(doc)
sysmeta <- parseSystemMetadata(sysmeta, xmlRoot(xml))
expect_match(sysmeta@identifier, testid)
expect_equal(nrow(sysmeta@accessPolicy), 1)
expect_match(as.character(sysmeta@accessPolicy$permission[[1]]), "read")
expect_false(sysmeta@archived)
csattrs <- xmlAttrs(xml[["checksum"]])
expect_match(sysmeta@checksumAlgorithm, csattrs[[1]])
expect_equal(sysmeta@seriesId, "3")
expect_equal(sysmeta@mediaType, "application/rdf+xml")
expect_equal(sysmeta@fileName, "testresmap.rdf")
# Parse v2.0 system metadata, checking parsing of replication policy
testid <- "0007f892-0d8f-4451-94e9-94d02ba5dd0d_0"
sysmeta <- new("SystemMetadata")
expect_equal(sysmeta@serialVersion, 1)
doc <- xmlParseDoc(sysmeta_repfalse, asText=FALSE)
expect_match(xmlValue(xmlRoot(doc)[["identifier"]]), testid)
xml <- xmlRoot(doc)
sysmeta <- parseSystemMetadata(sysmeta, xmlRoot(xml))
expect_false(sysmeta@replicationAllowed)
# Parse v2.0 system metadata, checking parsing when missing numReplicas
testid <- "arctic-data.9794.1"
sysmeta <- new("SystemMetadata")
expect_equal(sysmeta@serialVersion, 1)
doc <- xmlParseDoc(sysmeta_repfalse_zero_reps, asText=FALSE)
expect_match(xmlValue(xmlRoot(doc)[["identifier"]]), testid)
xml <- xmlRoot(doc)
sysmeta <- parseSystemMetadata(sysmeta, xmlRoot(xml))
expect_false(sysmeta@replicationAllowed)
})
test_that("XML SystemMetadata serialization works", {
library(datapack)
library(XML)
testid <- "doi:10.xxyy/AA/tesdoc123456789"
sysmeta <- new("SystemMetadata")
expect_equal(sysmeta@serialVersion, 1)
xml <- xmlParseDoc(sysmeta_test, asText=FALSE)
expect_match(xmlValue(xmlRoot(xml)[["identifier"]]), testid)
sysmeta <- parseSystemMetadata(sysmeta, xmlRoot(xml))
expect_match(sysmeta@identifier, testid)
expect_true(sysmeta@archived)
# Check if the access policy is serialized grouped by subjects
sysmeta <- addAccessRule(sysmeta, "bob", "read")
sysmeta <- addAccessRule(sysmeta, "alice", "read")
sysmeta <- addAccessRule(sysmeta, "bob", "write")
sysmeta <- addAccessRule(sysmeta, "alice", "write")
# Add an existing rule, to ensure that rules aren't duplicated in the serialized sysmeta
sysmeta <- addAccessRule(sysmeta, "CN=Subject2,O=Google,C=US,DC=cilogon,DC=org", "write")
xml <- serializeSystemMetadata(sysmeta)
# Compare the updated, serialized sysmeta with a reference
xmlRef <- xmlParseDoc(sysmeta_updated, asText=FALSE)
sysmetaRef <- new("SystemMetadata")
sysmetaUpdated <- parseSystemMetadata(sysmetaRef, xmlRoot(xmlRef))
xmlRef <- serializeSystemMetadata(sysmetaUpdated)
expect_equal(xml, xmlRef)
#cat(xml)
# Search for specific, expected items in the serialized sysmeta
expect_match(xml, "<d1:systemMetadata")
expect_match(xml, "<blockedMemberNode>urn:node:BADNODE</blockedMemberNode>")
expect_match(xml, "<preferredMemberNode>urn:node:KNB</preferredMemberNode>")
expect_match(xml, "<subject>public</subject>")
expect_match(xml, "<permission>read</permission>")
expect_match(xml, "<subject>CN=Subject2,O=Google,C=US,DC=cilogon,DC=org</subject>")
expect_match(xml, "<permission>changePermission</permission>")
sysmeta@obsoletes <- ""
sysmeta <- new("SystemMetadata")
xml <- serializeSystemMetadata(sysmeta)
foundObsoletes <- grep("<obsoletes>", xml, invert=TRUE)
expect_true(as.logical(foundObsoletes))
# TODO: check tree equivalence with original XML document
})
test_that("SystemMetadata XML constructor works", {
library(datapack)
testid <- "doi:10.xxyy/AA/tesdoc123456789"
doc <- xmlParseDoc(sysmeta_test, asText=FALSE)
expect_match(xmlValue(xmlRoot(doc)[["identifier"]]), testid)
xml <- xmlRoot(doc)
sysmeta <- SystemMetadata(xmlRoot(xml))
expect_match(sysmeta@identifier, testid)
expect_equal(nrow(sysmeta@accessPolicy), 5)
expect_match(as.character(sysmeta@accessPolicy$permission[[1]]), "read")
expect_true(sysmeta@archived)
csattrs <- xmlAttrs(xml[["checksum"]])
expect_match(sysmeta@checksumAlgorithm, csattrs[[1]])
expect_true(grep("urn:node:KNB", sysmeta@preferredNodes) > 0)
expect_true(grep("urn:node:mnUNM1", sysmeta@preferredNodes) > 0)
expect_true(grep("urn:node:BADNODE", sysmeta@blockedNodes) > 0)
})
test_that("SystemMetadata validation works", {
library(datapack)
sysmeta <- new("SystemMetadata", identifier="foo", formatId="text/csv", size=59, checksum="jdhdjhfd", rightsHolder="ff")
isValid <- validate(sysmeta)
expect_true(isValid)
isValid <- validObject(sysmeta)
expect_true(isValid)
sysmeta <- new("SystemMetadata", identifier="foo", checksum="jdhdjhfd", rightsHolder="ff")
errors <- validate(sysmeta)
expect_equal(length(errors), 2)
})
test_that("SystemMetadata accessPolicy can be constructed and changed", {
apolicy=data.frame(list("public", "read"))
colnames(apolicy) <- c("subject", "permission")
sysmeta <- new("SystemMetadata", identifier="foo", formatId="text/csv", size=59, checksum="jdhdjhfd", rightsHolder="ff", accessPolicy=apolicy)
expect_equal(sysmeta@serialVersion, 1)
expect_equal(nrow(sysmeta@accessPolicy), 1)
expect_match(as.character(sysmeta@accessPolicy$permission[[1]]), "read")
sysmeta <- addAccessRule(sysmeta, "foo", "write")
expect_equal(nrow(sysmeta@accessPolicy), 2)
# Try to add same rule again and make sure it didn't get duplicated
sysmeta <- addAccessRule(sysmeta, "foo", "write")
expect_equal(nrow(sysmeta@accessPolicy), 2)
expect_match(as.character(sysmeta@accessPolicy$permission[[2]]), "write")
expect_true(hasAccessRule(sysmeta, "foo", "write"))
apolicy=data.frame(subject=c("bar", "baz"), permission= c("changePermission", "write"))
sysmeta <- addAccessRule(sysmeta, apolicy)
# Check that specific rules were added (also testing hasAccessRule method)
expect_true(hasAccessRule(sysmeta, "foo", "write"))
expect_true(hasAccessRule(sysmeta, "bar", "changePermission"))
expect_true(hasAccessRule(sysmeta, "baz", "write"))
expect_true(!hasAccessRule(sysmeta, "baz", "changePermission"))
expect_equal(nrow(sysmeta@accessPolicy), 4)
expect_match(as.character(sysmeta@accessPolicy$permission[[4]]), "write")
expect_match(as.character(sysmeta@accessPolicy$subject[[4]]), "baz")
})
test_that("SystemMetadata accessPolicy can be cleared", {
sysmeta <- new("SystemMetadata")
sysmeta <- addAccessRule(sysmeta, "public", "read")
expect_true(nrow(sysmeta@accessPolicy) == 1)
sysmeta <- clearAccessPolicy(sysmeta)
expect_true(nrow(sysmeta@accessPolicy) == 0)
})
test_that("SystemMetadata accessPolicy accessRules can be removed.", {
# Chech using parameter "y" as a character string containing the subject of the access rule:
sysmeta <- new("SystemMetadata")
sysmeta <- addAccessRule(sysmeta, "uid=smith,ou=Account,dc=example,dc=com", "write")
sysmeta <- addAccessRule(sysmeta, "uid=smith,ou=Account,dc=example,dc=com", "changePermission")
expect_true(hasAccessRule(sysmeta, "uid=smith,ou=Account,dc=example,dc=com", "write"))
expect_true(hasAccessRule(sysmeta, "uid=smith,ou=Account,dc=example,dc=com", "changePermission"))
sysmeta <- removeAccessRule(sysmeta, "uid=smith,ou=Account,dc=example,dc=com", "changePermission")
expect_false(hasAccessRule(sysmeta, "uid=smith,ou=Account,dc=example,dc=com", "changePermission"))
sysmeta <- removeAccessRule(sysmeta, "uid=smith,ou=Account,dc=example,dc=com", "write")
expect_false(hasAccessRule(sysmeta, "uid=smith,ou=Account,dc=example,dc=com", "write"))
# Check parameter "y" as a data.frame containing one or more access rules:
# Add write, changePermission for uid=jones,...
sysmeta <- new("SystemMetadata")
sysmeta <- addAccessRule(sysmeta, "uid=jones,ou=Account,dc=example,dc=com", "write")
sysmeta <- addAccessRule(sysmeta, "uid=jones,ou=Account,dc=example,dc=com", "changePermission")
expect_true(hasAccessRule(sysmeta, "uid=jones,ou=Account,dc=example,dc=com", "write"))
expect_true(hasAccessRule(sysmeta, "uid=jones,ou=Account,dc=example,dc=com", "changePermission"))
# Now take privs for uid=jones,... away
accessRules <- data.frame(subject=c("uid=jones,ou=Account,dc=example,dc=com",
"uid=jones,ou=Account,dc=example,dc=com"),
permission=c("write", "changePermission"))
sysmeta <- removeAccessRule(sysmeta, accessRules)
expect_false(hasAccessRule(sysmeta, "uid=jones,ou=Account,dc=example,dc=com", "write"))
expect_false(hasAccessRule(sysmeta, "uid=jones,ou=Account,dc=example,dc=com", "changePermission"))
})
|
f5e37ec94fd13fa79bfbf7d205250dbadec70c70 | 192dd0acad8c23498f5463c0ecec5fec08ab644b | /Course 4/Week 1/Proyect/Plot3.R | fd2c6caf86a09245d309c46c482e8c82a794d67d | [] | no_license | tafuenza/R-Course | adf8606a0fcfa4206813076cd9597d0c7d5af7ea | 6bc7a0f41f206d13fa6a5aee49bcaf184900d128 | refs/heads/master | 2020-09-21T09:37:34.025642 | 2020-06-05T20:38:16 | 2020-06-05T20:38:16 | 224,723,819 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 943 | r | Plot3.R | {data <- read.csv("household_power_consumption.txt", sep = ";", colClasses = c("character","character", "numeric", "numeric","numeric","numeric","numeric","numeric","numeric"), na = "?")
hpc <- data
hpc$datetime <- paste(hpc$Date,hpc$Time)
hpc$datetime <- as.POSIXct(hpc$datetime, format = "%d/%m/%Y %H:%M:%S")
hpc <- hpc[,3:10]
hpc <- subset(hpc, hpc$datetime >= "2007-02-01 00:00:00" & hpc$datetime <= "2007-02-02 23:59:59")} #Open the file and create date_times
{png("plot3.png")
par(mar = c(3,4,1,1))
plot(hpc$datetime,hpc$Sub_metering_1, type = "n", ylab = "Energy sub metering")
lines(hpc$datetime,hpc$Sub_metering_1, col = "black")
lines(hpc$datetime,hpc$Sub_metering_2, col = "red")
lines(hpc$datetime,hpc$Sub_metering_3, col = "Blue")
legend("topright", col = c("black","red","blue"), legend = c("sub_metering_1","sub_metering_2","sub_metering_3"), pch = "-")
dev.off()} #Plot 3 |
6e177fc45d212346d1e71891d05ac12790ff245c | c44f4c0b63dac61a27fb2bad060cf97023eba256 | /R/plots.R | f19c67a57903f437bff984d0dd939b691275ec41 | [
"MIT"
] | permissive | migstats/radshiba | c06c5b0b92de5efd29302fdd61dfb7b0c850fd85 | ee33df023a919552dea8204a6ab7d0345188b96f | refs/heads/master | 2020-04-15T05:50:57.696323 | 2019-11-06T17:06:25 | 2019-11-06T17:06:25 | 164,439,177 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 294 | r | plots.R | #' Simple density plot function
#'
#' @param column Name of the column
#' @param alpha Alpha parameter for ggplot
#'
#' @export
dens <- function(column, alpha = NA){
ggplot2::ggplot(data.frame(a = column), aes(x = a)) +
ggplot2::geom_density(alpha = alpha) +
ggplot2::theme_light()
}
|
d1f83dbf489c08421e02c701025c1945c6c0da19 | 642de25bfc8f2d645253a6fa1717a859d6a78142 | /data-raw/R/lemis_cleaning_functions.R | b86e8504d23e494b1b6c1cca99ec3bc50c7a3596 | [
"MIT"
] | permissive | ecohealthalliance/lemis | cbe17b9f58db22847926ffb5c1f37261ebccdc38 | b41d2a2efc69996743face410d6f70102bbc87bf | refs/heads/master | 2023-07-26T07:38:47.083130 | 2023-07-17T17:18:43 | 2023-07-17T17:18:43 | 123,156,000 | 13 | 0 | NOASSERTION | 2019-09-23T17:25:20 | 2018-02-27T16:23:15 | R | UTF-8 | R | false | false | 2,276 | r | lemis_cleaning_functions.R |
compareNA <- function(x1, x2) {
# This function returns TRUE wherever elements are the same, including NAs,
# and FALSE everywhere else
same <- (x1 == x2) | (is.na(x1) & is.na(x2))
same[is.na(same)] <- FALSE
return(same)
}
# Function to clean the intermediate LEMIS dataset fields given valid codes
get_cleaned_lemis <- function(field, valid.values) {
# Get the column index of the field to clean
index <- which(colnames(lemis) == field)
lemis %>%
# Add cleaning notes based on the values in the field
mutate(
cleaning_notes = case_when(
!(.[[index]] %in% valid.values) & !is.na(.[[index]]) & is.na(cleaning_notes) ~
paste0("Original value in '", field, "' column: ", .[[index]]),
!(.[[index]] %in% valid.values) & !is.na(.[[index]]) & !is.na(cleaning_notes) ~
paste0(cleaning_notes, ", '", field, "' column: ", .[[index]]),
TRUE ~ cleaning_notes
)
) %>%
# Add non-standard values to the field in question where appropriate
mutate(
UQ(rlang::sym(field)) :=
ifelse(!(UQ(rlang::sym(field)) %in% valid.values) & !is.na(UQ(rlang::sym(field))),
"non-standard value", UQ(rlang::sym(field))),
UQ(rlang::sym(field)) := as.factor(UQ(rlang::sym(field)))
)
}
# Function to produce cleaning notes for taxonomic data fields
get_taxonomic_cleaning_notes <- function(dataframe) {
taxonomic.cols <-
c("genus", "species", "subspecies", "specific_name", "generic_name")
for(x in taxonomic.cols) {
# Get the column index of the variable and its new version
index <- which(colnames(dataframe) == x)
index.new <- which(colnames(dataframe) == paste0("new_", x))
# Do the two values match?
matching <- compareNA(dataframe[, index], dataframe[, index.new])
dataframe <- dataframe %>%
# Add cleaning notes based on the values of the taxonomic variable
mutate(
cleaning_notes = case_when(
!matching & is.na(cleaning_notes) ~
paste0("Original value in '", x, "' column: ", .[[index]]),
!matching & !is.na(cleaning_notes) ~
paste0(cleaning_notes, ", '", x, "' column: ", .[[index]]),
TRUE ~ cleaning_notes
)
)
}
return(dataframe)
}
|
3f2cd485f7af27ce29fbc1ddf7a338b5e2f4acf7 | 4f192390c49aaa98685335c4280ddd5fb14d279d | /app.R | 6c4efb2d96cb574a0d3e90cf8661b2fe0a322e02 | [] | no_license | samLong3217/info200finalProject | d4db3125ca3522d311cf9312985a200f59957441 | 8d88a67d68f84112c3947b7be4dfa4191c9cb68d | refs/heads/master | 2020-04-28T03:00:12.243506 | 2019-03-13T19:37:33 | 2019-03-13T19:37:33 | 174,919,553 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 234 | r | app.R | library("shiny")
library("ggplot2")
library("tidyr")
library("reshape2")
library("stringr")
library("maps")
library("RColorBrewer")
library("rsconnect")
source("my_ui.R")
source("my_server.R")
shinyApp(ui = my_ui, server = my_server) |
38ae50f6fbba86ce48b0514fa7035e2d515a1c14 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/dst/examples/shape.Rd.R | a5a02021d7dbf934427ca9bcb0b3f41b6915cc80 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 219 | r | shape.Rd.R | library(dst)
### Name: shape
### Title: Obtain dimensions of an array or length of a vector with a
### single command
### Aliases: shape aplShape
### ** Examples
shape(array(c(1:6), c(2,3)))
shape(c("a", "b"))
|
c776efe791dd931145476081108b5abe003e29c5 | ffd9931bdb081b5fe4f5ebc1208d81af99185c31 | /cachematrix.R | dc785f15c01c6706a06161c80b69043458d54b4c | [] | no_license | walcob/ProgrammingAssignment2 | b0f9410c55e39369d96f9c43c8c929d21c08d5ce | 23fa729d857af3f4f4d1b9928079725a680439d8 | refs/heads/master | 2021-08-08T07:39:33.825908 | 2017-11-09T22:09:22 | 2017-11-09T22:09:22 | 110,169,380 | 0 | 0 | null | 2017-11-09T21:42:22 | 2017-11-09T21:42:21 | null | UTF-8 | R | false | false | 1,154 | r | cachematrix.R | # This program takes a matrix as input and caches its inverse
# makeCacheMatrix stores the matrix as x and its inverse as inverse
# The get and set functions return and set the value of x, respectively
# getInverse and setInverse do the same for the inverse of the matrix
makeCacheMatrix <- function(x = matrix()) {
inverse <- NULL
set <- function(y){
x <<- y
inverse <<- NULL
}
get <- function() x
setInverse <- function(inv) inverse <<- inv
getInverse <- function() inverse
list(set = set, get = get, setInverse = setInverse, getInverse = getInverse)
}
# cacheSolve checks to see if the inverse of x has been cached
# If so, it returns the cached inverse.
# Otherwise, it calculates the inverse, caches it with the
# setInverse function, and returns it.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inverse <- x$getInverse()
if(!is.null(inverse)){
message("getting cached data")
return(inverse)
}
data <- x$get()
inverse <- solve(data,...)
x$setInverse(inverse)
inverse
}
|
d124ff99aa80249d26e3d80edf02fea90c8316b8 | bdcbc978d8635e19eedd67f8fb0b2867cd3cde95 | /man/cross_validation_logistic_auc.Rd | a4b0595626a2ff646bdb264102f96faefe68fc8c | [] | no_license | SMAC-Group/fpl | 4d542391c1d97d2a4444497fb227a5095b5dd471 | 1372025e3aa4661f5f010d885734b39dbe972256 | refs/heads/main | 2023-08-31T11:49:10.258414 | 2023-08-22T06:52:35 | 2023-08-22T06:52:35 | 330,690,039 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 602 | rd | cross_validation_logistic_auc.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/RcppExports.R
\name{cross_validation_logistic_auc}
\alias{cross_validation_logistic_auc}
\title{Cross-validation for logistic regression with AUC}
\usage{
cross_validation_logistic_auc(X, y, seed, K = 10L, M = 10L)
}
\arguments{
\item{X}{a n x p matrix of regressor}
\item{y}{a n-vector of response}
\item{seed}{an integer for setting the seed (reproducibility)}
\item{K}{number of splits; 10 by default}
\item{M}{number of repetitions; 10 by default}
}
\description{
Cross-validation for logistic regression with AUC
}
|
927f123ec9e701aacf3a1b8d68193ccbd5605c7c | f5071dae06aa2e84a6514b3642b6890405bdf38c | /Code/Ejercicio5/Duncan.R | c62a30e1a2d817fedd5589319cc5aeaf272ecaf8 | [] | no_license | CristianPachacama/DisenioExperimental | 2be6075550474a39eea9a24898672912fe45a706 | 9b4b937a4234c1b46ba2f1e844342b3e69feda70 | refs/heads/master | 2020-04-12T09:16:52.522685 | 2018-12-19T07:36:18 | 2018-12-19T07:36:18 | 162,397,365 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 261 | r | Duncan.R | # Test de Duncan
source("Code/Ejercicio5/Anovas.R",local = TRUE)
p_valor=Anv$`Pr(>F)`[1]
if(p_valor<0.05){
verif = "rechaza"
dun = duncan.test(aov(modelo),trt = 'Molde')
dun2 = duncan.test(aov(modelo),trt = 'Catalizador')
}else{
verif = "no_rechaza"
}
|
be3e5246ca5842bde7d58310c60d3c086e747625 | efd4b12511648b1be471a367897b8bec545e3029 | /R/MOE.R | 47a9b0623766945375c9f6c3a2610e00c9a74711 | [] | no_license | freephys/RND | 9fdd595658c10498ec8b4f55d9ce7f05197cbf8f | f9287319b1b6c105c22b2f15ec0acd7e0268cc37 | refs/heads/master | 2020-03-07T11:54:14.878301 | 2017-01-10T23:14:55 | 2017-01-10T23:14:55 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 12,197 | r | MOE.R | MOE <-
function(market.calls, call.strikes, market.puts, put.strikes, call.weights = 1, put.weights = 1, lambda = 1, s0, r , te, y, file.name = "myfile")
{
###
### perform a basic check!
###
strikes = intersect(call.strikes, put.strikes)
if (length(strikes) < 10) stop("You must have at least 10 common strikes between the calls and puts.")
###
### Point Estimation
###
point.obj = get.point.estimate(market.calls = market.calls, call.strikes = call.strikes, r = r , te = te)
###
### BSM Extraction
###
bsm.obj = extract.bsm.density(r = r, y = y, te = te, s0 = s0, market.calls = market.calls, call.strikes = call.strikes,
market.puts = market.puts, put.strikes = put.strikes, call.weights = call.weights, put.weights = put.weights, lambda = lambda, hessian.flag = F)
bsm.mu = bsm.obj$mu
bsm.zeta = bsm.obj$zeta
###
### GB Extraction
###
gb.obj = extract.gb.density(r = r, y = y, te = te, s0 = s0, market.calls = market.calls, call.strikes = call.strikes,
market.puts = market.puts, put.strikes = put.strikes, call.weights = call.weights, put.weights = put.weights, lambda = lambda, hessian.flag = F)
gb.a = gb.obj$a
gb.b = gb.obj$b
gb.v = gb.obj$v
gb.w = gb.obj$w
###
### Double LogNormal
###
mln.obj = extract.mln.density(r = r, y = y, te = te, s0 = s0, market.calls = market.calls, call.strikes = call.strikes,
market.puts = market.puts, put.strikes = put.strikes, call.weights = call.weights, put.weights = put.weights, lambda = lambda, hessian.flag = F)
mln.alpha.1 = mln.obj$alpha.1
mln.meanlog.1 = mln.obj$meanlog.1
mln.meanlog.2 = mln.obj$meanlog.2
mln.sdlog.1 = mln.obj$sdlog.1
mln.sdlog.2 = mln.obj$sdlog.2
###
### Edgeworth Expansion Method
###
ew.obj = extract.ew.density(r = r, y = y, te = te, s0 = s0, market.calls = market.calls, call.strikes = call.strikes, call.weights = call.weights, lambda = lambda, hessian.flag = F)
ew.sigma = ew.obj$sigma
ew.skew = ew.obj$skew
ew.kurt = ew.obj$kurt
###
### Shimko Method
###
shimko.obj = extract.shimko.density(market.calls = market.calls, call.strikes = call.strikes, r = r, y = y, te = te, s0 = s0, lower = -10, upper = +10)
a0 = shimko.obj$implied.curve.obj$a0
a1 = shimko.obj$implied.curve.obj$a1
a2 = shimko.obj$implied.curve.obj$a2
###
### Graphs
###
min.x = min(put.strikes, call.strikes)
max.x = max(put.strikes, call.strikes)
x = seq(from = min.x, to = max.x, length.out = 10000)
y.bsm = dlnorm(x = x, meanlog = bsm.mu, sdlog = bsm.zeta, log = FALSE)
y.gb = dgb(x = x,a = gb.a, b = gb.b, v = gb.v, w = gb.w)
y.mln = dmln(x = x, alpha.1 = mln.alpha.1, meanlog.1 = mln.meanlog.1, meanlog.2 = mln.meanlog.2, sdlog.1 = mln.sdlog.1, sdlog.2 = mln.sdlog.2)
y.ew = dew(x = x, r = r, y = y, te = te, s0 = s0, sigma = ew.sigma, skew = ew.skew, kurt = ew.kurt)
y.shimko = dshimko(r = r, te = te, s0 = s0, k = x, y = y, a0 = a0, a1 = a1, a2 = a2)
y.point = point.obj
###
### Start PDF output
###
pdf(file = paste(file.name,".pdf",sep=""), width = 7 * 1.6, height = 7)
###
### Overall Plots
###
max.y = max(y.bsm, y.gb, y.mln, y.ew, y.shimko)*1.05
if ( !is.numeric(max.y) ) max.y = 1
cut.off = (min(x) + max(x))/2
max.ind = which.max(y.bsm)
if (x[max.ind] > cut.off) legend.location = "topleft" else legend.location = "topright"
par(mar=c(5,5,5,5))
matplot(x,cbind(y.bsm, y.gb, y.mln, y.ew, y.shimko), type="l", col=c("black", "blue","red", "green", "purple"), xlab="Strikes", ylab="Density",
lwd=c(2,2,2,2,2), lty = c(1,1,1,1,1), cex.axis = 1.25, cex.lab = 1.25, ylim=c(0,max.y))
legend(legend.location, legend=c("Single LNorm","GenBeta","MixLNorm","EW","Shimko"), col=c("black","blue","red", "green", "purple"),
lwd = c(2,2,2,2,2), lty = c(1,1,1,1,1), bty="n", cex=1.25)
###
### Single Plots
###
par(mar=c(5,5,5,5))
plot(y.bsm ~ x, type="l", col="black", xlab="Strikes", ylab="Density", main="Single LNorm",
ylim=c(0,max.y), lwd=2, lty=1, cex.axis = 1.25, cex.lab = 1.25)
par(mar=c(5,5,5,5))
plot(y.gb ~ x, type="l", col="blue", xlab="Strikes", ylab="Density", main="GenBeta",
ylim=c(0,max.y), lwd=2, lty=1, cex.axis = 1.25, cex.lab = 1.25)
par(mar=c(5,5,5,5))
plot(y.mln ~ x, type="l", col="red", xlab="Strikes", ylab="Density", main="MixLNorm",
ylim=c(0,max.y), lwd=2, lty=1, cex.axis = 1.25, cex.lab = 1.25)
par(mar=c(5,5,5,5))
plot(y.ew ~ x, type="l", col="green", xlab="Strikes", ylab="Density", main="EW",
ylim=c(0,max.y), lwd=2, lty=1, cex.axis = 1.25, cex.lab = 1.25)
par(mar=c(5,5,5,5))
plot(y.shimko ~ x, type="l", col="purple", xlab="Strikes", ylab="Density", main="Shimko",
ylim=c(0,max.y), lwd=2, lty=1, cex.axis = 1.25, cex.lab = 1.25)
par(mar=c(5,5,5,5))
plot(y.point ~ call.strikes[2:(length(call.strikes)-1)], type="l", col="black", xlab="Strikes", ylab="Density", main="Point Estimates",
ylim=c(0,max.y), lwd=2, lty=1, cex.axis = 1.25, cex.lab = 1.25)
points(x = call.strikes[2:(length(call.strikes)-1)], y = y.point)
###
### Print Diagnostics
###
bsm.sigma = bsm.zeta/sqrt(te)
bsm.predicted.puts = price.bsm.option(r = r, te = te, s0 = s0, k = put.strikes, sigma = bsm.sigma, y = y)$put
bsm.predicted.calls = price.bsm.option(r = r, te = te, s0 = s0, k = call.strikes, sigma = bsm.sigma, y = y)$call
bsm.res.calls = mean(abs(lm(bsm.predicted.calls ~ market.calls)$res))
bsm.res.puts = mean(abs(lm(bsm.predicted.puts ~ market.puts)$res))
par(mfrow=c(1,2), mar=c(7,5,7,5))
plot(bsm.predicted.calls ~ market.calls, ylab="Predicted", xlab = "Market Price", main=paste("Single LNorm, Calls, ","mean|res| = ",round(bsm.res.calls,3)),
cex.axis = 1.25, cex.lab = 1.25)
abline(a=0,b=1, col="red")
plot(bsm.predicted.puts ~ market.puts, ylab="Predicted", xlab = "Market Price", main=paste("Single LNorm, Puts, ","mean|res| = ",round(bsm.res.puts,3)),
cex.axis = 1.25, cex.lab = 1.25)
abline(a=0,b=1, col="red")
par(mfrow=c(1,1))
############
gb.predicted.puts = price.gb.option(r = r, te = te, s0 = s0, k = put.strikes, y = y, a = gb.a, b = gb.b, v = gb.v, w=gb.w)$put
gb.predicted.calls = price.gb.option(r = r, te = te, s0 = s0, k = call.strikes, y = y, a = gb.a, b = gb.b, v = gb.v, w=gb.w)$call
gb.res.calls = mean(abs(lm(gb.predicted.calls ~ market.calls)$res))
gb.res.puts = mean(abs(lm(gb.predicted.puts ~ market.puts)$res))
par(mfrow=c(1,2), mar=c(7,5,7,5))
plot(gb.predicted.calls ~ market.calls, ylab="Predicted", xlab = "Market Price", main=paste("GenBeta, Calls, ","mean|res| = ",round(gb.res.calls,2)),
cex.axis = 1.25, cex.lab = 1.25)
abline(a=0,b=1, col="red")
plot(gb.predicted.puts ~ market.puts, ylab="Predicted", xlab = "Market Price", main=paste("GenBeta, Puts, ","mean|res| = ",round(gb.res.puts,2)),
cex.axis = 1.25, cex.lab = 1.25)
abline(a=0,b=1, col="red")
par(mfrow=c(1,1))
############
mln.predicted.puts = price.mln.option(r = r, te = te, y = y, k = put.strikes,
alpha.1 = mln.alpha.1, meanlog.1 = mln.meanlog.1, meanlog.2 = mln.meanlog.2,
sdlog.1 = mln.sdlog.1, sdlog.2 = mln.sdlog.2)$put
mln.predicted.calls = price.mln.option(r = r, te = te, y = y, k = call.strikes,
alpha.1 = mln.alpha.1, meanlog.1 = mln.meanlog.1, meanlog.2 = mln.meanlog.2,
sdlog.1 = mln.sdlog.1, sdlog.2 = mln.sdlog.2)$call
mln.res.calls = mean(abs(lm(mln.predicted.calls ~ market.calls)$res))
mln.res.puts = mean(abs(lm(mln.predicted.puts ~ market.puts)$res))
par(mfrow=c(1,2), mar=c(7,5,7,5))
plot(mln.predicted.calls ~ market.calls, ylab="Predicted", xlab = "Market Price", main=paste("MixLNorm, Calls, ","mean|res| = ",round(mln.res.calls,2)),
cex.axis = 1.25, cex.lab = 1.25)
abline(a=0,b=1, col="red")
plot(mln.predicted.puts ~ market.puts, ylab="Predicted", xlab = "Market Price", main=paste("MixLNorm, Puts, ","mean|res| = ",round(mln.res.puts,2)),
cex.axis = 1.25, cex.lab = 1.25)
abline(a=0,b=1, col="red")
par(mfrow=c(1,1))
############
ew.predicted.puts = price.ew.option(r = r, te = te, s0 = s0, k = put.strikes, y = y, sigma = ew.sigma, skew = ew.skew, kurt = ew.kurt)$put
ew.predicted.calls = price.ew.option(r = r, te = te, s0 = s0, k = call.strikes, y = y, sigma = ew.sigma, skew = ew.skew, kurt = ew.kurt)$call
ew.res.calls = mean(abs(lm(ew.predicted.calls ~ market.calls)$res))
ew.res.puts = mean(abs(lm(ew.predicted.puts ~ market.puts)$res))
par(mfrow=c(1,2), mar=c(7,5,7,5))
plot(ew.predicted.calls ~ market.calls, ylab="Predicted", xlab = "Market Price", main=paste("EW, Calls, ","mean|Res| = ",round(ew.res.calls,2)),
cex.axis = 1.25, cex.lab = 1.25)
abline(a=0,b=1, col="red")
plot(ew.predicted.puts ~ market.puts, ylab="Predicted", xlab = "Market Price", main=paste("EW, Puts, ","mean|Res| = ",round(ew.res.puts,2)),
cex.axis = 1.25, cex.lab = 1.25)
abline(a=0,b=1, col="red")
par(mfrow=c(1,1))
############
shimko.predicted.puts = numeric(length(put.strikes))
for (i in 1:length(put.strikes))
{
shimko.predicted.puts[i] = price.shimko.option(r = r, te = te, s0 = s0, k = put.strikes[i], y = y, a0 = a0, a1 = a1, a2 = a2)$put
}
shimko.predicted.calls = numeric(length(put.strikes))
for (j in 1:length(put.strikes))
{
shimko.predicted.calls[j] = price.shimko.option(r = r, te = te, s0 = s0, k = call.strikes[j], y = y, a0 = a0, a1 = a1, a2 = a2)$call
}
shimko.res.calls = mean(abs(lm(shimko.predicted.calls ~ market.calls)$res))
shimko.res.puts = mean(abs(lm(shimko.predicted.puts ~ market.puts)$res))
par(mfrow=c(1,2), mar=c(7,5,7,5))
plot(shimko.predicted.calls ~ market.calls, ylab="Predicted", xlab = "Market Price", main=paste("Shimko - Calls, ","mean|Res| = ",round(shimko.res.calls,2)),
cex.axis = 1.25, cex.lab = 1.25)
abline(a=0,b=1, col="red")
plot(shimko.predicted.puts ~ market.puts, ylab="Predicted", xlab = "Market Price", main=paste("Shimko - Puts, ","mean|Res| = ",round(shimko.res.puts,2)),
cex.axis = 1.25, cex.lab = 1.25)
abline(a=0,b=1, col="red")
par(mfrow=c(1,1))
###
### Turn Device Off
###
dev.off()
###
### Create Data Files
###
tmp.data.calls = cbind(market.calls,call.strikes, bsm.predicted.calls, gb.predicted.calls, mln.predicted.calls, ew.predicted.calls, shimko.predicted.calls)
colnames(tmp.data.calls) = c("marketcalls","strikes", "bsm", "gb", "mln", "ew", "shimko")
data.calls = as.data.frame(tmp.data.calls)
write.table(data.calls, file = paste(file.name,"calls.csv", sep=""), sep = ",", col.names = T, row.names = F)
tmp.data.puts = cbind(market.puts, put.strikes, bsm.predicted.puts, gb.predicted.puts, mln.predicted.puts, ew.predicted.puts, shimko.predicted.puts)
colnames(tmp.data.puts) = c("marketputs","strikes", "bsm", "gb", "mln", "ew", "shimko")
data.puts = as.data.frame(tmp.data.puts)
write.table(data.puts, file = paste(file.name,"puts.csv", sep=""), sep = ",", col.names = T, row.names = F)
###
### Output Results
###
out = list(bsm.mu = bsm.mu, bsm.sigma = bsm.sigma, gb.a = gb.a, gb.b = gb.b, gb.v = gb.v, gb.w = gb.w,
mln.alpha.1 = mln.alpha.1, mln.meanlog.1 = mln.meanlog.1, mln.meanlog.2 = mln.meanlog.2,
mln.sdlog.1 = mln.sdlog.1, mln.sdlog.2 = mln.sdlog.2,
ew.sigma = ew.sigma, ew.skew = ew.skew, ew.kurt = ew.kurt, a0 = a0, a1 = a1, a2 = a2)
out
}
|
e2d012583137496900b586514a9f7b6774af624b | 358c57a5a40607272997d8428390fb1a4b11b815 | /R/BivariateAssoc.R | 0f04015ff7107d5322f4b7ed6efa34ad1de34bbc | [] | no_license | nicolas-robette/moreparty | 6ae430db14c807b78077807b2fe6d8cc419d52b3 | ee447af5def57b964e523e108020b40e048b0df7 | refs/heads/master | 2023-08-10T18:41:29.628127 | 2023-07-20T22:28:49 | 2023-07-20T22:28:49 | 214,511,417 | 2 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,310 | r | BivariateAssoc.R | #' @importFrom stats as.formula chisq.test complete.cases cor lm
#' @export
BivariateAssoc <- function(Y,X,xx=TRUE) {
X <- as.data.frame(X)
xnames <- names(X)
xformats <- sapply(X,class)
yformat <- class(Y)
df <- cbind.data.frame(Y,X)
formule <- as.formula(paste('Y ~',paste(xnames,collapse='+')))
ct <- party::ctree(formule, df, controls=party::ctree_control(stump=TRUE))
p.value <- 1-nodes(ct,1)[[1]]$criterion$criterion
criterion <- -log(nodes(ct,1)[[1]]$criterion$criterion)
res <- list()
for(i in 1:ncol(X)) {
# print(i)
if(yformat %in% c('numeric','integer') & xformats[i] %in% c('numeric','integer')) {
assoc <- cor(Y, X[,i], use='complete.obs', method='kendall')
mesure='kendall'
}
if(yformat %in% c('numeric','integer') & xformats[i]=="factor") {
assoc <- summary(lm(Y ~ X[,i]))$adj.r.squared
mesure='eta2'
}
if(yformat=="factor" & xformats[i]%in% c('numeric','integer')) {
assoc <- summary(lm(X[,i] ~ Y))$adj.r.squared
mesure='eta2'
}
if(yformat=="factor" & xformats[i]=="factor") {
t <- table(Y,X[,i])
assoc <- sqrt(chisq.test(t)$statistic / (length(Y)*(min(nrow(t),ncol(t))-1)))
mesure="cramer"
}
res[[i]] <- data.frame(mesure,assoc,stringsAsFactors = F)
}
res <- do.call('rbind.data.frame',res)
restot <- data.frame(variable=xnames,measure=res$mesure,assoc=round(res$assoc,3),p.value=round(p.value,5),criterion=criterion)
restot <- restot[order(restot$criterion, decreasing=F),]
restot$criterion <- round(restot$criterion,10)
rownames(restot) <- NULL
if(xx==TRUE) {
combi <- utils::combn(xnames,2,simplify=F)
res <- list()
for(i in 1:length(combi)) {
x1 <- X[,combi[[i]][1]]
x2 <- X[,combi[[i]][2]]
df <- data.frame(x1,x2,stringsAsFactors=F)
df <- df[complete.cases(df),]
ct <- party::ctree(x1~x2, data=df, controls=party::ctree_control(stump=TRUE))
p.value <- 1-nodes(ct,1)[[1]]$criterion$criterion
criterion <- -log(nodes(ct,1)[[1]]$criterion$criterion)
if(class(x1) %in% c('numeric','integer') & class(x2) %in% c('numeric','integer')) {
assoc <- cor(x1,x2, use='complete.obs', method='kendall')
mesure='kendall'
}
if(class(x1) %in% c('numeric','integer') & is.factor(x2)) {
assoc <- summary(lm(x1~x2))$adj.r.squared
mesure='eta2'
}
if(is.factor(x1) & class(x2) %in% c('numeric','integer')) {
assoc <- summary(lm(x2~x1))$adj.r.squared
mesure='eta2'
}
if(is.factor(x1) & is.factor(x2)) {
t <- table(x1,x2)
assoc <- sqrt(chisq.test(t)$statistic / (length(Y)*(min(nrow(t),ncol(t))-1)))
mesure="cramer"
}
res[[i]] <- data.frame(mesure,assoc,p.value,criterion,stringsAsFactors = F)
}
res <- do.call('rbind.data.frame',res)
noms <- do.call('rbind.data.frame',combi)
restot2 <- data.frame(variable1=noms[,1],variable2=noms[,2],measure=res$mesure,assoc=round(res$assoc,3),p.value=round(res$p.value,5),criterion=res$criterion,row.names=NULL)
restot2 <- restot2[order(restot2$criterion, decreasing=F),]
restot2$criterion <- round(restot2$criterion,10)
rownames(restot2) <- NULL
} else {
restot2 <- NULL
}
return(list(YX=restot, XX=restot2))
}
|
cb350f48880a3f109756ee0116a1f1f4cb514bb8 | 31b2e5ac58074082f4a63ab6ff94130c611cac51 | /man/proxistat-package.Rd | 7f5279cebd9158d091e9ce7f080a2de20b65c2ca | [] | no_license | ejanalysis/proxistat | 648bd956f350de27d06926a53ac10ce6ff3adf1b | 0fc0644bc649266a0d993307ab02ffaf1c99038c | refs/heads/master | 2023-06-08T00:22:45.596783 | 2023-05-31T02:07:27 | 2023-05-31T02:07:27 | 32,954,460 | 3 | 1 | null | null | null | null | UTF-8 | R | false | true | 3,724 | rd | proxistat-package.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/proxistat-package.R
\docType{package}
\name{proxistat-package}
\alias{proxistat-package}
\title{Find Distances between lat lon Points or Create Proximity Scores.}
\description{
This package has functions helping to calculate distances between points,
such as the distances between all points,
distances to all points within some maximum distance,
distance to nearest single point, etc.
It also can create a proximity score for each spatial unit like a
Census block group, to quantify the distance-weighted count of nearby
points.
}
\details{
This package has functions helping to calculate distances between
geographic points, such as the distances between all points, distances to
all points within some maximum distance, distance to nearest single point,
etc. It also can create a proximity score for each spatial unit like a
Census block group, to quantify the distance-weighted count of nearby
points. This proximity score can be used in environmental justice (EJ)
analysis, for example.
This package relies on the \pkg{sp} package for the actual calculation of distance.
A vector of points can be specified using a data.frame of two columns, "lat" and "lon"
which specify latitude and longitude in decimal degrees.
It returns the distances from one or more \code{frompoints} to one or more \code{topoints}.\cr \cr
Key functions include \cr
\itemize{
\item \code{\link{get.nearest}} to find the one among \code{topoints} nearest each \code{frompoints}
\item \code{\link{get.distances}} to find distances quickly within an optional search radius
\item \code{\link{proxistat}} to create a proximity score that quantifies,
for each spatial unit like a Census block group,
how many \code{topoints} are nearby and how close they are
\item \code{\link{convert}} to convert units (miles, km)
}
}
\examples{
test.from <- structure(list(fromlat = c(38.9567309094, 38.9507043428),
fromlon = c(-77.0896572305, -77.0896199948)),
.Names = c("lat","lon"), row.names = c("6054762", "6054764"), class = "data.frame")
test.to <- structure(list(tolat = c(38.9575019287, 38.9507043428, 38.9514152435),
tolon = c(-77.0892818598, -77.0896199948, -77.0972395245)),
.Names = c("lat","lon"), class = "data.frame", row.names = c("6054762", "6054763", "6054764"))
setseed(999)
t100=testpoints(100)
t10=testpoints(10)
t3=testpoints(3)
get.distances(
test.from[1:2,], test.to[1:3, ], radius=0.7, units='km', return.rownums=TRUE, return.latlons=TRUE
)
get.nearest(test.from, test.to)
get.distances( t3, t10, units='km', return.crosstab=TRUE)
get.distances( t3, t10, units='km')
get.distances( t3, t10, radius=300, units='km')
proxistat(t3, t10, radius = 300, units = "km", area = c(100001:100003))
proxistat( t3, t10, radius=300, units='km')
1/get.nearest( t3, t10, radius=300, units='km')
}
\references{
\url{http://ejanalysis.github.io} \cr
\url{http://www.ejanalysis.com/} \cr
\pkg{sp} package documentation for basic distance function.\cr
Some discussion of this type of proximity indicator is available in the EJSCREEN mapping tool documentation: \cr
U.S. Environmental Protection Agency (2015). EJSCREEN Technical Documentation.
\url{http://www.epa.gov/ejscreen}\cr
\url{http://en.wikipedia.org/wiki/Longitude} and \url{http://en.wikipedia.org/wiki/Decimal_degrees}
}
\seealso{
\pkg{sp}, US block points dataset: \url{http://ejanalysis.github.io/UScensus2010blocks/}, \code{\link{deltalon.per.km}}, \code{\link{deltalon.per.km}}, \code{\link{meters.per.degree.lat}}, \code{\link{meters.per.degree.lon}}
}
\author{
info@ejanalysis.com<info@ejanalysis.com>
}
\concept{EJ}
\concept{distance}
\concept{environmental justice}
\concept{proximity}
|
24b0fc89a8ba30ea7c7e21927741a46e5d5a8b44 | 2a7e77565c33e6b5d92ce6702b4a5fd96f80d7d0 | /fuzzedpackages/TESS/R/tess.plot.output.R | 97ad365755bb2be0463a67b3dd9f92b2fdb3a99b | [] | no_license | akhikolla/testpackages | 62ccaeed866e2194652b65e7360987b3b20df7e7 | 01259c3543febc89955ea5b79f3a08d3afe57e95 | refs/heads/master | 2023-02-18T03:50:28.288006 | 2021-01-18T13:23:32 | 2021-01-18T13:23:32 | 329,981,898 | 7 | 1 | null | null | null | null | UTF-8 | R | false | false | 7,020 | r | tess.plot.output.R | ################################################################################
#
# tess.plot.output.R
#
# Copyright (c) 2012- Michael R May
#
# This file is part of TESS.
# See the NOTICE file distributed with this work for additional
# information regarding copyright ownership and licensing.
#
# TESS is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# TESS is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with TESS; if not, write to the
# Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
# Boston, MA 02110-1301 USA
#
################################################################################
################################################################################
#
# @brief Plotting the output of a episodic diversification rate analysis with mass-extinction events.
#
# @date Last modified: 2014-10-05
# @author Michael R May
# @version 2.0
# @since 2014-10-04, version 2.0.0
#
# @param output list The processed output for plotting.
# @param fig.types character Which aspects of the model to visualize. See details for a complete description.
# @param xlab character The label of the x-axis. By default, millions of years.
# @param col character Colors used for printing. Must be of same length as fig.types.
# @param col.alpha numeric Alpha channel parameter for credible intervals.
# @param xaxt character The type of x-axis to plot. By default, no x-axis is plotted (recommended).
# @param yaxt character The type of y-axis to plot.
# @param pch integer The type of points to draw (if points are drawn).
# @param ... Parameters delegated to various plotting functions.
#
#
################################################################################
tess.plot.output = function(output,fig.types=c("speciation rates","speciation shift times","speciation Bayes factors",
"extinction rates","extinction shift times","extinction Bayes factors",
"net-diversification rates","relative-extinction rates",
"mass extinction times","mass extinction Bayes factors"),
xlab="million years ago",col=NULL,col.alpha=50,
xaxt="n",yaxt="s",pch=19,plot.tree=FALSE,
...){
# Check that fig type is valid
validFigTypes <- c("speciation rates","speciation shift times","speciation Bayes factors",
"extinction rates","extinction shift times","extinction Bayes factors",
"net-diversification rates","relative-extinction rates",
"mass extinction times","mass extinction Bayes factors")
invalidFigTypes <- fig.types[!fig.types %in% validFigTypes]
if ( length( invalidFigTypes ) > 0 ) {
stop("\nThe following figure types are invalid: ",paste(invalidFigTypes,collapse=", "),".",
"\nValid options are: ",paste(validFigTypes,collapse=", "),".")
}
# Make color vector
if ( is.null(col) ) {
col <- c("speciation rates"="#984EA3",
"speciation shift times"="#984EA3",
"speciation Bayes factors"="#984EA3",
"extinction rates"="#E41A1C",
"extinction shift times"="#E41A1C",
"extinction Bayes factors"="#E41A1C",
"net-diversification rates"="#377EB8",
"relative-extinction rates"="#FF7F00",
"mass extinction times"="#4DAF4A",
"mass extinction Bayes factors"="#4DAF4A")
} else {
names(col) <- fig.types
}
# Compute the axes
treeAge <- max(branching.times(output$tree))
numIntervals <- length(output$intervals)-1
plotAt <- 0:numIntervals
intervalSize <- treeAge/numIntervals
labels <- pretty(c(0,treeAge))
labelsAt <- numIntervals - (labels / intervalSize)
for( type in fig.types ) {
if ( grepl("times",type) ) {
thisOutput <- output[[type]]
meanThisOutput <- colMeans(thisOutput)
criticalPP <- output[[grep(strsplit(type," ")[[1]][1],grep("CriticalPosteriorProbabilities",names(output),value=TRUE),value=TRUE)]]
if(plot.tree){
plot(output$tree,show.tip.label=FALSE,edge.col=rgb(0,0,0,0.10),x.lim=c(0,treeAge))
par(new=TRUE)
}
barplot(meanThisOutput,space=0,xaxt=xaxt,col=col[type],border=col[type],main=type,ylab="posterior probability",xlab=xlab,ylim=c(0,1),...)
abline(h=criticalPP,lty=2,...)
axis(4,at=criticalPP,labels=2*log(output$criticalBayesFactors),las=1,tick=FALSE,line=-0.5)
axis(1,at=labelsAt,labels=labels)
box()
} else if ( grepl("Bayes factors",type) ) {
thisOutput <- output[[type]]
ylim <- range(c(thisOutput,-10,10),finite=TRUE)
if(plot.tree){
plot(output$tree,show.tip.label=FALSE,edge.col=rgb(0,0,0,0.10),x.lim=c(0,treeAge))
par(new=TRUE)
}
plot(x=plotAt[-1]-diff(plotAt[1:2])/2,y=thisOutput,type="p",xaxt=xaxt,col=col[type],ylab="Bayes factors",main=type,xlab=xlab,ylim=ylim,xlim=range(plotAt),pch=pch,...)
abline(h=2 * log(output$criticalBayesFactors),lty=2,...)
axis(4,at=2 * log(output$criticalBayesFactors),las=1,tick=FALSE,line=-0.5)
axis(1,at=labelsAt,labels=labels)
} else {
thisOutput <- output[[type]]
meanThisOutput <- colMeans(thisOutput)
quantilesThisOutput <- apply(thisOutput,2,quantile,prob=c(0.025,0.975))
if( type %in% c("speciation rates","extinction rates")){
quantilesSpeciation <- apply(output[["speciation rates"]],2,quantile,prob=c(0.025,0.975))
quantilesExtinction <- apply(output[["extinction rates"]],2,quantile,prob=c(0.025,0.975))
ylim <- c(0,max(quantilesSpeciation,quantilesExtinction))
} else {
ylim <- c(0,max(quantilesThisOutput))
}
if(plot.tree){
plot(output$tree,show.tip.label=FALSE,edge.col=rgb(0,0,0,0.10),x.lim=c(0,treeAge))
par(new=TRUE)
}
plot(x=plotAt,y=c(meanThisOutput[1],meanThisOutput),type="l",ylim=ylim,xaxt=xaxt,col=col[type],ylab="rate",main=type,xlab=xlab,...)
polygon(x=c(0:ncol(quantilesThisOutput),ncol(quantilesThisOutput):0),y=c(c(quantilesThisOutput[1,1],quantilesThisOutput[1,]),rev(c(quantilesThisOutput[2,1],quantilesThisOutput[2,]))),border=NA,col=paste(col[type],col.alpha,sep=""))
axis(1,at=labelsAt,labels=labels)
}
}
} |
9c5c1cb69d34cfc096384b5d8d8ab503bacbb102 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/XLConnect/examples/xlcFreeMemory.Rd.R | 540b86d34a52120d5ac42f31e1cd45978b639e84 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 180 | r | xlcFreeMemory.Rd.R | library(XLConnect)
### Name: xlcFreeMemory
### Title: Freeing Java Virtual Machine memory
### Aliases: xlcFreeMemory
### Keywords: utilities
### ** Examples
xlcFreeMemory()
|
32a9848625cd286567ba4e14475a97472af034a3 | 1dfea429b4ff19cd0673f5766a1bbac5904bba99 | /intro/lectures/week-10/10B-ancova.R | 3a79cf0e5236e684d1878907ee8e68b4e88f9a4c | [
"CC-BY-3.0",
"CC-BY-4.0"
] | permissive | andrewpbray/andrewpbray.github.io | e84dbf361dc7dfea45028bab64822be6b686b8e1 | aa930618a6f2aef8adc6dc0c1f2e58acf163273a | refs/heads/master | 2020-05-16T23:04:17.310343 | 2019-09-05T17:42:55 | 2019-09-05T17:42:55 | 23,562,269 | 0 | 5 | null | null | null | null | UTF-8 | R | false | false | 2,785 | r | 10B-ancova.R | ## ----setup, include=FALSE------------------------------------------------
library(knitr)
options(digits=3)
knitr::opts_chunk$set(echo = TRUE)
library(dplyr)
library(ggplot2)
library(oilabs)
library(openintro)
## ----getdata, echo = FALSE, message=FALSE--------------------------------
library(DAAG)
data(allbacks)
books <- allbacks[, c(3, 1, 4)]
## ----plotallbacks--------------------------------------------------------
qplot(x = volume, y = weight, data = books)
## ----fitm1, echo = FALSE-------------------------------------------------
m1 <- lm(weight ~ volume, data = books)
## ----plotallbackswline---------------------------------------------------
qplot(x = volume, y = weight, data = books) +
geom_abline(intercept = m1$coef[1], slope = m1$coef[2], col = "orchid")
## ------------------------------------------------------------------------
m1 <- lm(weight ~ volume, data = books)
summary(m1)
## ----resplot-------------------------------------------------------------
qplot(x = .fitted, y = .stdresid, data = m1)
## ----resplot2------------------------------------------------------------
qplot(sample = .stdresid, data = m1, stat = "qq") +
geom_abline()
## ----sumtable------------------------------------------------------------
summary(m1)
## ----eval = FALSE--------------------------------------------------------
## lm(Y ~ X1 + X2 + ... + Xp, data = mydata)
## ----plotcolors----------------------------------------------------------
qplot(x = volume, y = weight, color = cover, data = books)
## ------------------------------------------------------------------------
m2 <- lm(weight ~ volume + cover, data = books)
summary(m2)
## ----echo = FALSE--------------------------------------------------------
qplot(x = volume, y = weight, color = cover, data = books) +
geom_abline(intercept = m2$coef[1], slope = m2$coef[2], col = 2) +
geom_abline(intercept = m2$coef[1] + m2$coef[3], slope = m2$coef[2], col = 4)
## ------------------------------------------------------------------------
summary(m2)
## ------------------------------------------------------------------------
summary(m2)$coef
qt(.025, df = nrow(books) - 3)
## ----echo = FALSE--------------------------------------------------------
qplot(x = volume, y = weight, color = cover, data = books) +
geom_abline(intercept = m2$coef[1], slope = m2$coef[2], col = 2) +
geom_abline(intercept = m2$coef[1] + m2$coef[3], slope = m2$coef[2], col = 4)
## ----echo = FALSE--------------------------------------------------------
qplot(x = volume, y = weight, color = cover, data = books) +
stat_smooth(method = "lm", se = FALSE)
## ------------------------------------------------------------------------
m3 <- lm(weight ~ volume + cover + volume:cover, data = books)
summary(m3)
|
b0dbb85bfc97727665ee5151a5202c242640d83b | dac4a8f2b14dbb92dd07e9ca9642410ae407a2f2 | /man/TransEntro.Rd | d972e5db44ae4244434e7185dfe5a1646e1d4271 | [] | no_license | dstgithub/GrpString | 0710f0b5d1e8a90ee1e94e5a2f6facb19bc48c97 | 45b4da9cc59c71ddb8b53d7b6753665b7ff960fe | refs/heads/master | 2021-01-12T03:26:45.555515 | 2017-11-15T21:40:25 | 2017-11-15T21:40:25 | 78,210,123 | 2 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,171 | rd | TransEntro.Rd | \name{TransEntro}
\alias{TransEntro}
\title{
Transition entropy of a group of strings
}
\description{
TransEntro computes the overall transition entropy of all the strings in a group.
}
\usage{
TransEntro(strings.vec)
}
\arguments{
\item{strings.vec}{
String Vector.
}
}
\value{
Returns a single number.
}
\details{
Entropy is calculated using the Shannon entropy formula: -sum(freqs * log2(freqs)). Here, freqs are transition frequencies, which are the values in the normalized transition matrix exported by function TransMx in this package. The formula is equivalent to the function entropy.empirical in the 'entropy' package when unit is set to log2.
}
\note{
Strings with less than 2 characters are not included for computation of entropy.
}
\references{
I. Hooge; G. Camps. (2013) Scan path entropy and arrow plots: capturing scanning behavior of multiple observers. Frontiers in Psychology.
}
\seealso{
\code{\link{TransEntropy}},
\code{\link{TransMx}}
}
\examples{
# simple strings
stra.vec <- c("ABCDdefABCDa", "def123DC", "A", "123aABCD", "ACD13", "AC1ABC", "3123fe")
TransEntro(stra.vec)
}
\keyword{programming}
|
3a64c53477fdda642cbfd5f8c9103aab44daa0ed | 1256464f6234f9a9ff380a4b4739142201655f36 | /man-roxygen/params_y.R | 2905fe31f696843569be32238707a3ecfbab611c | [] | no_license | hclimente/martini | 6dd8a6c13454e739171d82d51b82a133a22b3ee0 | 544b8dd6762f5ede704a9471b940dba258ede8ed | refs/heads/master | 2023-02-01T00:05:37.682562 | 2023-01-11T08:20:52 | 2023-01-11T08:20:52 | 87,456,798 | 4 | 2 | null | 2023-01-11T08:20:54 | 2017-04-06T17:31:13 | R | UTF-8 | R | false | false | 49 | r | params_y.R | #' @param y Vector of length n with the outcomes
|
7954dcd1a687a6664db96deaaeb606831d840269 | 905cc85395d13d94f24cd7aa4ec75bfd58fcb3f2 | /fog/get_random_feature.R | 6292894944ef9c97888a9eee93dc194c764948c7 | [] | no_license | iangow/bgt | 6a097475bceafbda6b0f4d84c169ea6885a51122 | 723ee89b98d0af20901c925fafab6eadf58be187 | refs/heads/master | 2023-06-04T18:35:57.250129 | 2023-05-16T18:04:49 | 2023-05-16T18:05:05 | 32,786,841 | 2 | 4 | null | null | null | null | UTF-8 | R | false | false | 1,460 | r | get_random_feature.R | library(dplyr)
library(RPostgreSQL)
pg <- dbConnect(PostgreSQL())
long_words <- tbl(pg, sql("SELECT * FROM bgt.long_words"))
set.seed(2016)
n_letters <- 3
get_regex <- function() {
the_letters <- sample(letters, n_letters, replace = FALSE)
the_regex <- paste0("^[", paste(the_letters, collapse=""), "]")
print(the_letters)
return(the_regex)
}
the_regex_1 <- get_regex()
the_regex_2 <- get_regex()
the_regex_3 <- get_regex()
the_regex <- paste0("^[bgt]")
dbGetQuery(pg, "SET work_mem='3GB'")
dbGetQuery(pg, "DROP TABLE IF EXISTS random_feature")
dbGetQuery(pg, "DROP TABLE IF EXISTS bgt.random_feature")
random_feature <-
long_words %>%
mutate(match_count=regex_count(long_words, the_regex),
match_count_1=regex_count(long_words, the_regex_1),
match_count_2=regex_count(long_words, the_regex_2),
match_count_3=regex_count(long_words, the_regex_3),
word_count=array_length(long_words, 1L)) %>%
mutate(match_prop=match_count * 1.0 / word_count,
match_prop_1=match_count_1 * 1.0 / word_count,
match_prop_2=match_count_2 * 1.0 / word_count,
match_prop_3=match_count_3 * 1.0 / word_count) %>%
select(-long_words, -starts_with("match_count"), -word_count) %>%
compute(name="random_feature", temporary=FALSE)
dbGetQuery(pg, "ALTER TABLE random_feature OWNER TO bgt")
dbGetQuery(pg, "ALTER TABLE random_feature SET SCHEMA bgt")
dbDisconnect(pg)
|
6fde116efcfef7edaf9d733b46b03125cb6d86a4 | 2b6e33c18ae217c5cfe90f8572aaeb7722aea5c0 | /problems/week-03-problem.R | 61136b4e4c6679180fec94194d0d138a15553a45 | [] | no_license | aifoss/edx-statistics-using-r | 8264804e6e748790f98fe87656c637d5041be3f7 | 82a270ad95dfc05f28f932ea970596bac15645a3 | refs/heads/master | 2021-05-11T21:05:09.974445 | 2018-01-14T19:24:02 | 2018-01-14T19:24:02 | 117,459,358 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,948 | r | week-03-problem.R | ################################################################################
## Source: edX
## Series: Foundations of Data Analysis
## Course: 1 Statistics Using R
## Week: Week 3
## Topic: Bivariate Distributions
## File: week-03-problem.R
## Date: 2016-02-21
################################################################################
################################################################################
## Question Set 1
################################################################################
# During a professional bull-riding event, riders usually attempt to ride
# a bull three or more times. This means that they can record a "ride"
# (successfully staying on the bull) multiple times in the same event.
# 1. Subset the dataset for riders that had at least 1 ride in the 2014 season.
# Call this dataset new_bull.
library(SDSFoundations)
bull <- BullRiders
new_bull <- bull[bull$Rides14 > 0, ]
# 2. Create a new variable or vector for he average number of rides per event
# for each bull rider in the new_bull dataset.
rides_per_event <- new_bull$Rides14 / new_bull$Events14
# 3. Make a histogram of your "rides per event" variable and find the five-number
# summary for your "rides per event" variable.
hist(rides_per_event)
fivenum(rides_per_event)
# 1a. What is the minimum value?
# 0.20
round(min(rides_per_event), 2)
# 1b. What is the median?
# 1
median(rides_per_event)
# 1c. What is the maximum value?
# 2
round(max(rides_per_event), 2)
# 1d. Create a scatterplot of "rides per event" and yearly rankning (defined by
# "Rank14" variable) and add a line of best fit. Which of the following
# best describe the relationship between these two variables?
# The two variables have a negative linear relationship.
plot(rides_per_event, new_bull$Rank14)
abline(lm(new_bull$Rank14 ~ rides_per_event), col="blue")
# 1e. What is the correlation coefficient for rides per event and yearly ranking?
# -0.495
round(cor(rides_per_event, new_bull$Rank14), 3)
# 1f. Suppose that college GPA and graduate school GPA have a correlation
# coefficient of 0.75. Based on this, what proportion of variation
# in graduate school GPS is left unexplained after taking college GPA
# into account?
# 0.4375
r_squared <- 0.75 * 0.75
round(1-r_squared, 4)
################################################################################
## Question Set 2
################################################################################
# Using the dataset below, find the correlation coefficient between time spent
# studying and exam grade.
minutes_spent <- c(30, 45, 180, 95, 130, 140, 30, 80, 60, 110, 0, 80)
exam_grade <- c(74, 68, 87, 90, 94, 84, 92, 88, 82, 93, 65, 90)
df <- data.frame(minutes_spent = minutes_spent, exam_grade = exam_grade)
# 2a. What is the correlation coefficient based on the data?
# 0.597
cor <- cor(df$minutes_spent, df$exam_grade)
round(cor, 3)
# 2b. Approximately what percentage of the variation in exam scores can be
# explained by the amount of time that each student studied?
# 36
r_squared <- cor ^ 2
percentage <- round(r_squared * 100, 0)
# 2c. Create a scatterplot of the data (exam grades and time spent studying).
# What is the value of the outlier (the student that got a high grade
# but didn't study very long)?
# X = 30
# Y = 92
plot(df$minutes_spent, df$exam_grade)
# 2d. When the outlier is removed, what is the new value or r?
# 0.737
outlier_index <- which(df$minutes_spent == 30 & df$exam_grade == 92)
new_df <- df[-outlier_index, ]
new_cor <- cor(new_df$minutes_spent, new_df$exam_grade)
round(new_cor, 3)
# 2e. How did the outlier impact our efforts to assess the relationship
# between time spent studying and exam grades?
# The outlier caused the relationship to look weaker than it really is.
################################################################################ |
38b850218a960d19a6e60a9d6f7480b66be943fd | fd26c120974a4666962ee6edef1724e5a8c60fe2 | /demo/Mu-traj-demo.R | a8fb1af7184f7f56b9bd50552fb11509199f7704 | [
"MIT"
] | permissive | htso/EM-Initialization-Algorithms | 70f6f57af0457f8502ca2d971d67dacaf8298892 | c5a31eddb1cc8e9e0b228b390451b2b5ea165049 | refs/heads/master | 2022-04-04T09:18:03.983031 | 2020-01-30T22:07:19 | 2020-01-30T22:07:19 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,437 | r | Mu-traj-demo.R | library(Hext)
library(InitExp)
data(simdat2fullcov)
dat.nm ="Fullcov"
simdat = simdat2.3
x = list(x=as.matrix(simdat), TT=TT)
class(x) = "Hext.data"
mtype = rep(5, NCOL(simdat))
p = c(0, 0, 0, 0, length(mtype))
Nruns = 5
K = 3
M = 5
maxit = 20
tol = 1e-01
var.reduct=0.5
spherical=TRUE
wdf=4
D = 0
system.time(mu.paths <- mu.trajectory(Nruns=Nruns, init.method = "random", x=x, mtype=mtype,
K=K, M=M, D=D, maxit=maxit, tol=tol,
wdf=wdf, var.reduct=var.reduct, spherical=spherical,
verbose=FALSE))
X11();par(mar=c(1,1,4,1))
plot(simdat, type="p", col="grey", xlab="", ylab="",
main=paste("EM estimates of Gaussian Means vs True Means"))
points(mu.paths$mu[[1]], col="blue", pch=18, cex=2)
points(mu.paths$mu[[2]], col="green", pch=17, cex=2)
points(mu.paths$mu[[3]], col="brown", pch=20, cex=2)
points(mu.paths$mu[[4]], col="grey", pch=16, cex=2)
points(mu.paths$mu[[5]], col="pink", pch=15, cex=2)
points(mu.paths$mu[[6]], col="black", pch=14, cex=2)
ctrs = M5.truth$mu.ll
for ( k in 1:K ) {
x.m1 = ctrs[[k]][[1]][1]
y.m1 = ctrs[[k]][[1]][2]
x.m2 = ctrs[[k]][[2]][1]
y.m2 = ctrs[[k]][[2]][2]
points(x=x.m1, y=y.m1, pch=19, cex=3, col="red")
points(x=x.m2, y=y.m2, pch=19, cex=3, col="red")
}
X11();hist(mu.paths$ans[,3], breaks=30, xlim=c(-9600, -8200), xlab="",
main=paste("Loglikelihood Distribution for", dat.nm))
|
3f399983e450cad8ae98526de0760bf0cabbce30 | b2f61fde194bfcb362b2266da124138efd27d867 | /code/dcnf-ankit-optimized/Results/QBFLIB-2018/A1/Database/Kronegger-Pfandler-Pichler/dungeon/dungeon_i30-m15-u4-v0.pddl_planlen=25/dungeon_i30-m15-u4-v0.pddl_planlen=25.R | 19a87e05373d6885de7488e0a9b74a45cbdac264 | [] | no_license | arey0pushpa/dcnf-autarky | e95fddba85c035e8b229f5fe9ac540b692a4d5c0 | a6c9a52236af11d7f7e165a4b25b32c538da1c98 | refs/heads/master | 2021-06-09T00:56:32.937250 | 2021-02-19T15:15:23 | 2021-02-19T15:15:23 | 136,440,042 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 91 | r | dungeon_i30-m15-u4-v0.pddl_planlen=25.R | 63aab2e608c46c9813a64c9bfc9eac5a dungeon_i30-m15-u4-v0.pddl_planlen=25.qdimacs 27298 383102 |
a52da20ab24fe4effbd14fa8d31f74ee3fa1b976 | 01d4032c47ad24fbe7b3d31d760938fd0ab872c7 | /Training_R/MSE.R | adcae98a089af2bf1674158ef2fdcee5a971457f | [] | no_license | Filippini-Fiorentini/base_FF | df93a8b51d210e5bbeef4e03335d1161c0994f97 | 8655dbe038c36a866186279351924d20ecd39488 | refs/heads/master | 2020-03-08T16:44:52.374067 | 2019-06-27T09:07:56 | 2019-06-27T09:07:56 | 128,248,424 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 109 | r | MSE.R | MSE = function (y_pred,y_vec)
{
train_err = sqrt(sum((y_vec - y_pred)^2)/length(y_vec))
return(train_err)
} |
e764c2dd80748767e1bf79a42776dfbf12663c10 | 25bd099ac1e6dfebe39967b1d4ec041671dff997 | /final_project/run_analysis.R | fa3e09c240d63328581fcc8a5871aaa925f393e6 | [] | no_license | mattdturner/Coursera_Getting_and_Cleaning_Data | aa209cc15405ea1384cb611fedfc795164dde532 | 22d7366aa45a4a891e73ccee21700dffb9a344ef | refs/heads/master | 2020-12-24T06:41:37.745226 | 2016-07-20T05:10:33 | 2016-07-20T05:10:33 | 63,751,956 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,893 | r | run_analysis.R | # Getting and Cleaning Data
# Final Project
#
# Author: Matt Turner
# Date created: July 19, 2016
# Set the working directory to the project root
setwd("/Users/matthewturner/Desktop/School/Coursera/Getting_And_Cleaning_Data/final_project/")
# Load libraries
library(reshape2)
# Download the data and save into the data folder
url <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
fname <- "UCI_HAR_Dataset.zip"
if ( ! file.exists(fname) ){
print("File does not currently exist... downloading")
download.file(url,fname,method="curl")
}
# Extract the data from the archive, and store in a separate data folder
data_dir <- 'UCI HAR Dataset'
if ( ! file.exists(data_dir) ){
print("Extracting the contents of the .zip archive")
unzip(fname,list=FALSE,overwrite=TRUE)
}
# Load the activity and feature information
activity <- read.table(file.path(data_dir,"activity_labels.txt"))
activity[,2] <- as.character(activity[,2])
features <- read.table(file.path(data_dir,"features.txt"))
features[,2] <- as.character(features[,2])
# Grab information about only the mean / std deviation features
desired_features <- grep('.*mean.*|.*std.*',features[,2])
desired_feature_names <- features[desired_features,2]
# Remove all non-alpha-neumeric characters
desired_feature_names <- gsub('-mean','Mean',desired_feature_names)
desired_feature_names <- gsub('-std','Std',desired_feature_names)
desired_feature_names <- gsub('[-()]','',desired_feature_names)
# Load the training data and merge to a dataframe
training_data <- read.table(file.path(data_dir,"train/X_train.txt"))[desired_features]
training_activity <- read.table(file.path(data_dir,"train/y_train.txt"))
training_subjects <- read.table(file.path(data_dir,"train/subject_train.txt"))
training <- cbind(training_subjects,training_activity,training_data)
# Load the testing data and merge to a dataframe
testing_data <- read.table(file.path(data_dir,"test/X_test.txt"))[desired_features]
testing_activity <- read.table(file.path(data_dir,"test/y_test.txt"))
testing_subjects <- read.table(file.path(data_dir,"test/subject_test.txt"))
testing <- cbind(testing_subjects,testing_activity,testing_data)
# Merge the datasets
merged_data <- rbind(training,testing)
# Assing lables to the merged_data
colnames(merged_data) <- c("subjectID","activityName",desired_feature_names)
# Convert activity information into factors
merged_data$activityName <- factor(merged_data$activityName,levels=activity[,1],labels=activity[,2])
# Convert subject information into factors
merged_data$subjectID <- as.factor(merged_data$subjectID)
# Melt the data...
melted_data <- melt(merged_data,id=c("subjectID","activityName"))
# Get the means
mean_data <- dcast(melted_data,subjectID+activityName~variable,mean)
# Write the data to file
write.table(mean_data,"tidy.txt",row.names=FALSE,quote=FALSE)
|
726fa67fb24244f6c086f837e7f4715f6260f0ec | 828f885e708e688126547cedbf41644a00662579 | /EXAMPLE DATA FRAME IN R.R | 5adfdaedf304ab07edf90285c4093e5b3cc12dc5 | [] | no_license | ElrondHubbard/squibbs | 5b41d62d2ca1586dedac2247d67dd93c8f56d881 | 063a8fef5bc14d96f880ed17285b4218714e51a7 | refs/heads/master | 2021-01-19T19:21:39.847693 | 2018-12-12T04:12:19 | 2018-12-12T04:12:19 | 83,724,465 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,116 | r | EXAMPLE DATA FRAME IN R.R | df <- data.frame(key=c('1', '2', '3', '4', '5'),
name1=c('black','black','black','red','red'),
type1=c('chair','chair','sofa','sofa','plate'),
num1=c(4,5,12,4,3),
name2=c('black', 'red', 'black', 'green', 'blue'),
type2=c('chair','chair','sofa','bed','plate'),
num2=c(4,7,12,3,1),
name3=c('blue', 'green', 'black', 'blue', 'blue'),
type3=c('couch','chair','sofa','plate','plate'),
num3=c(12,8,12,4,1))
group_len <- 3
groups <- split(2:ncol(df), cut(2:ncol(df), 7))
stacked.df <- do.call(rbind, lapply(groups, function(cols) {
group <- df[ , c(1, cols)]
names(group) <- c("key", "name", "type", "num")
group
}))
df2 <- group_by(stacked.df, key, name, type, num) %>%
summarise(dupes = n() > 1, num_dupes = n())
?split
?cut
?lapply
patt <- c("test","10 Barrel")
lut <- c("1 Barrel","10 Barrel Brewing","Harpoon 100 Barrel Series","resr","rest","tesr")
test_df <- data.frame(name=c('nancy j hill', 'nancy hill', 'jane smith', 'jane smithe', 'jane smyth' ),
email=c('big@addr.com', 'big@addr.com', 'small@addr.com', 'small@addr.com', 'small@addr.com'),
addr1=c('123 main st', '1234 main st', '742 evergreen terrace', '42 evergreen terrace', '742 evergreen terrace 42'),
addr2=c('13 main st', '12 main st', '742 evergren terrace', '42 evergreen terr', '742 evergreen terrace 4')
)
test_df$check1 <- lapply(test_df$email, agrep, x=c(test_df$addr1, test_df$addr2), max.distance=1, value = TRUE)
test_df$check2 <- lapply(test_df$email, agrep, x=c(test_df$addr1, test_df$addr2), max.distance=2, value = TRUE)
test_df$check3 <- lapply(test_df$email, agrep, x=c(test_df$addr1, test_df$addr2), max.distance=3, value = TRUE)
patt <- c("test","10 Barrel")
lut <- c("1 Barrel","10 Barrel Brewing","Harpoon 100 Barrel Series","resr","rest","tesr")
for (i in 1:length(patt)) {
print(agrep(patt[i],lut,max=2,v=T))
}
|
33ba078978b515a50be6acd687b7a3960c7a4666 | 76dbc1754d4fac81e75fc054858ba91f99b55b2d | /man/optimFit.Rd | 66e4b2c1d58671353b89311280a6ebbec94a08d6 | [] | no_license | dfeehan/mortfit | e51ac12507385bd9024e8109aa1a3eaea2895fb5 | 8dfd82e93fde1bf408dbe59eb004cc8694603f88 | refs/heads/master | 2021-01-18T00:00:39.351697 | 2020-11-08T16:23:12 | 2020-11-08T16:23:12 | 18,040,328 | 2 | 1 | null | null | null | null | UTF-8 | R | false | true | 311 | rd | optimFit.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fitmethod-optim.R
\docType{data}
\name{optimFit}
\alias{optimFit}
\title{fitMethod object for fitting via optim}
\format{fitMethod object}
\usage{
optimFit
}
\description{
fitMethod object for fitting via optim
}
\keyword{datasets}
|
bdcc71d2a043cdb516f7c6a4e5097a8da469422b | f35ab111a58687227e9516ccf9dfa3f9b3e2776e | /data-example.R | a18231456bfb134aaef7015a06bbdd62f8d3243b | [
"MIT"
] | permissive | sachsmc/twomediatorbounds | 07c5d97387ff29962ddb2ff7bf475393d8a46b9d | 0fdae48042429d5e0d940a42e45b710544158da7 | refs/heads/main | 2023-03-23T12:49:18.998633 | 2021-03-16T17:11:31 | 2021-03-16T17:11:31 | 348,327,901 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,117 | r | data-example.R | library(mediation)
library(ggplot2)
library(patchwork)
library(exact2x2)
data(jobs)
names(jobs)
boundsdata<-data.frame(trt = jobs$treat)
boundsdata$outcome <- 1.0 * (jobs$work1 == "psyemp")
boundsdata$M1 <- 1.0 * (jobs$depress2 > 2)
boundsdata$M2 <- jobs$job_dich
by(boundsdata, boundsdata$trt, summary)
con.probs <- list()
typs <- expand.grid(y = 0:1, m1 = 0:1, m2 = 0:1, x = 0:1)
for(j in 1:nrow(typs)) {
btmp <- boundsdata[boundsdata$trt == typs[j,"x"],]
con.probs[[with(typs[j, ], sprintf("p%s%s%s_%s", y, m1, m2, x))]] <-
mean(btmp$outcome == typs[j,"y"] &
btmp$M1 == typs[j,"m1"] &
btmp$M2 == typs[j,"m2"])
}
bnds.funcs <- c("nde.000", "jnie.1", "ms2.nie.1.11", "nie.2.100")
f.bnds.list <- lapply(bnds.funcs, function(x) {
readRDS(file.path("bndfuncs", paste0(x, ".rds")))
})
names(f.bnds.list) <- bnds.funcs
cdebnds <- readRDS(file.path("bndfuncs", "cde-bounds.rds"))
TE <- mean(subset(boundsdata, trt == 1)$outcome) -
mean(subset(boundsdata, trt == 0)$outcome)
t.test(outcome ~ I(1 - trt), data = boundsdata)
table(boundsdata$trt, boundsdata$outcome)
binomMeld.test(86, 299, 207, 600, parmtype = "difference")
bees <- do.call(rbind, lapply(f.bnds.list, function(f) {
do.call(f, con.probs)
}))
bees$point <- NA
bees <- rbind(bees, data.frame(lower = c(NA),
upper = c(NA),
point = c(TE)))
bees$effect <- c("'NDE-000'", "JNIE[1]", "MS^2-NIE[1]-11",
"NIE[2]-100", "TE")
bees$place <- c(4, 3, 2, 1, 4.5)
bees2 <- bees
bees2$place <- c(4, 0, 2, 1, 4.5)
p1 <- ggplot(bees[c(1, 2, 5),],
aes(y = place,
x = point, xmin = lower, xmax = upper)) +
geom_linerange(size = 1) + geom_point(size = 2) + theme_bw() + xlab("Bounds") +
scale_y_continuous(breaks = bees[c(1, 2, 5),]$place,
labels = str2expression(bees[c(1, 2, 5),]$effect)) +
ylab("Effect") + geom_vline(xintercept = TE, linetype = 3) +
geom_polygon(data = data.frame(x = c(TE, bees$upper[1], TE - bees$upper[1], TE),
y = c(4, 4, 3, 3)), aes(x = x, y = y),
inherit.aes = FALSE, alpha = .2, fill = "grey20") +
geom_polygon(data = data.frame(x = c(TE, bees$lower[1], TE - bees$lower[1], TE),
y = c(4, 4, 3, 3)), aes(x = x, y = y),
inherit.aes = FALSE, alpha = .2, fill = "grey80") +
xlim(c(-1.5, 1))
p2<- ggplot(bees2[c(2,3,4),],
aes(y = place,
x = point, xmin = lower, xmax = upper)) +
geom_linerange(size = 1) + theme_bw() + xlab("Bounds") +
scale_y_continuous(breaks = bees2[c(2,3,4),]$place,
labels = str2expression(bees2[c(2, 3, 4),]$effect)) +
ylab("Effect") +
geom_path(data = data.frame(x = c(bees$lower[3], bees$lower[4], sum(bees$lower[4:3])),
y = c(2, 1, 0)), aes(x = x, y = y),
inherit.aes = FALSE, color = "#FD5E0F", arrow = arrow()) +
geom_path(data = data.frame(x = c(bees$upper[3], bees$upper[4], sum(bees$upper[4:3])),
y = c(2, 1, 0)), aes(x = x, y = y),
inherit.aes = FALSE, color = "#FD5E0F", arrow = arrow())+
geom_path(data = data.frame(x = c(bees$lower[2], bees$upper[4], bees$lower[2] - bees$upper[4]),
y = c(0, 1, 2)), aes(x = x, y = y),
inherit.aes = FALSE, color = "#5F3A3F", arrow = arrow()) +
geom_path(data = data.frame(x = c(bees$upper[2], bees$lower[4], bees$upper[2] - bees$lower[4]),
y = c(0, 1, 2)), aes(x = x, y = y),
inherit.aes = FALSE, color = "#5F3A3F", arrow = arrow()) +
geom_vline(xintercept = c(bees$lower[2], bees$upper[2]), linetype = 3) +
xlim(c(-1.5, 1.5))
p1 + p2 + plot_layout(ncol = 1)
#ggsave("jobs-fig.pdf", width = 5.5, height = 4.75)
## cdes
cdbees <- do.call(rbind, lapply(cdebnds, function(f) {
do.call(f, con.probs)
}))
cdbees$bound <- c("'CDE-00'", "'CDE-01'",
"'CDE-10'", "'CDE-11'")
knitr::kable(cdbees, digits = 2)
knitr::kable(bees, digits =2 )
###
|
f851e22f924deffd494e26f15d2bbdeb89b646f0 | 049b6e37472c3d460bb30911cd7d470d563c612d | /tests/testthat/test-wrapper_get_ti_methods.R | 203c39424978bc1879b358ae67f83caa75c772f9 | [] | no_license | ManuSetty/dynmethods | 9919f4b1dc30c8c75db325b4ddcd4e9ada5e488b | 337d13b7a6f8cac63efdeb0d06d80cd2710d173d | refs/heads/master | 2020-03-21T11:34:35.406210 | 2018-06-24T20:25:50 | 2018-06-24T20:25:50 | 138,512,485 | 1 | 0 | null | 2018-06-24T20:16:12 | 2018-06-24T20:16:11 | null | UTF-8 | R | false | false | 1,105 | r | test-wrapper_get_ti_methods.R | context("Testing get_ti_methods")
if (Sys.getenv("TRAVIS") != "true") {
test_that("Descriptions can be retrieved", {
tib <- dynwrap::get_ti_methods(packages="dynmethods")
expect_that(tib, is_a("tbl"))
lis <- dynwrap::get_ti_methods(as_tibble=FALSE, packages="dynmethods")
expect_that(lis, is_a("list"))
})
methods <- get_ti_methods(packages="dynmethods")
for (i in seq_len(nrow(methods))) {
method <- extract_row_to_list(methods, i)$method_func()
test_that(pritt("Checking whether {method$short_name} can generate parameters"), {
par_set <- method$par_set
# must be able to generate a 3 random parameters
design <- ParamHelpers::generateDesign(3, par_set)
# must be able to generate the default parameters
design <- ParamHelpers::generateDesignOfDefaults(par_set)
parset_params <- names(par_set$pars)
runfun_params <- setdiff(formalArgs(method$run_fun), c("counts", "start_id", "start_cell", "end_id", "groups_id", "task"))
expect_equal( parset_params[parset_params %in% runfun_params], parset_params )
})
}
}
|
534518e73cba1f640a63a57448ebff7eb4076f40 | d25fe860092775b50e9714228c170e3e90f3454b | /run_analysis.R | 4d6950922cdafdab932b547695d503ba87e0599f | [] | no_license | mddan/data_cleaning_course_project | 8203797eb9dc0f3b5c1be3f4844eadc236e52d8d | ded800ef8cf12e2eccaf14c6185ee33d48f22e66 | refs/heads/master | 2021-01-22T03:12:57.261036 | 2017-09-03T22:05:44 | 2017-09-03T22:05:44 | 102,258,849 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,405 | r | run_analysis.R |
##Create new data folder, download assignment data and extract files to the relevant directory:
if(!file.exists("./data")){dir.create("./data")}
fileUrl <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
download.file(fileUrl, destfile = "./data/Data.zip")
unzip(zipfile="./data/Data.zip",exdir="./data")
##Read all relevant data tables:
activity_test <- read.table("./data/UCI HAR Dataset/test/y_test.txt", header = FALSE)
activity_train <- read.table("./data/UCI HAR Dataset/train/y_train.txt", header = FALSE)
subject_test <- read.table("./data/UCI HAR Dataset/test/subject_test.txt", header = FALSE)
subject_train <- read.table("./data/UCI HAR Dataset/train/subject_train.txt", header = FALSE)
x_test <- read.table("./data/UCI HAR Dataset/test/X_test.txt", header = FALSE)
x_train <- read.table("./data/UCI HAR Dataset/train/X_train.txt", header = FALSE)
## Bind train & test data tables (on subject, feature and activities)
## and set the corresponding variable names for each data table;
activity_all <- rbind(activity_train, activity_test)
names(activity_all) <- c("activity_num")
subject_all <- rbind(subject_train, subject_test)
names(subject_all) <- c("subject")
feature_all <- rbind(x_train, x_test)
feature_names <- read.table("./data/UCI HAR Dataset/features.txt", head=FALSE)
names(feature_all) <- feature_names[,2]
## Bind by column to create the complete merged data table;
data_full <- cbind(feature_all, subject_all, activity_all)
## Create a character vector of only the variables of interest (mean and std measurements)
vec_feature_names <- feature_names[,2]
mean_std_names <- vec_feature_names[grep("mean\\(\\)|std\\(\\)", vec_feature_names)]
selected_variables<-c(as.character(mean_std_names),"subject","activity_num" )
## Extracts only the measurements on the mean and standard deviation for each measurement.
data_full_subset <- subset(data_full, select = selected_variables)
## Use descriptive activity names to name the activities in the data set
activity_names <- read.table("./data/UCI HAR Dataset/activity_labels.txt", header=FALSE, col.names = c("activity_num","activity"))
data_merged <- merge(data_full_subset, activity_names, by.x = "activity_num", by.y = "activity_num")
data_merged <- data_merged[,-1]
## Appropriately label the data set with descriptive variable names
## Replace suffixes f and t with frequency and time
names(data_merged)<-gsub("^t", "time", names(data_merged))
names(data_merged)<-gsub("^f", "frequency", names(data_merged))
## Replace features portions from abbreviations to complete words (also fix typo BodyBody)
names(data_merged)<-gsub("Acc", "Accelerometer", names(data_merged))
names(data_merged)<-gsub("Gyro", "Gyroscope", names(data_merged))
names(data_merged)<-gsub("Mag", "Magnitude", names(data_merged))
names(data_merged)<-gsub("BodyBody", "Body", names(data_merged))
## Remove parentheses from column names
names(data_merged)<-gsub("[\\(\\)-]", "", names(data_merged))
## Create a second, independent tidy data set with the average of each variable for each activity and each subject:
library(dplyr)
data_tidy <- aggregate(. ~subject + activity, data_merged, mean)
data_tidy <- data_tidy[order(data_tidy$subject,data_tidy$activity),]
write.table(data_tidy, file = "tidydata.txt", row.name = FALSE)
|
7ecdb6529a366703d300c886c82940e053241924 | 1fc02d5293e23639d667acc9c228b761478206e2 | /tests/testthat/test_LORDstar.R | 6a3f7f02890b5e0a3b802de75a9d855f531b0465 | [] | no_license | dsrobertson/onlineFDR | caf7fa9d6f52531170b3d5caa505a15c87d6db11 | 2e5a3eaf9cf85d2c04a587ad3dd8783f66435159 | refs/heads/master | 2023-04-29T11:25:12.532739 | 2023-04-12T10:30:23 | 2023-04-12T10:30:23 | 129,420,795 | 14 | 4 | null | 2023-04-12T10:33:39 | 2018-04-13T15:27:02 | R | UTF-8 | R | false | false | 3,095 | r | test_LORDstar.R | test.pval <- c(1e-07, 3e-04, 0.1, 5e-04)
test.decision.times <- seq_len(4)
test.decision.times2 <- rep(4,4)
test.lags <- rep(0,4)
test.lags2 <- rep(4,4)
test.batch.sizes <- rep(1,4)
test.batch.sizes <- 4
test.df <- data.frame(id = seq_len(4), pval = test.pval, decision.times = test.decision.times)
test.df2 <- data.frame(id = seq_len(4), pval = test.pval, decision.times = test.decision.times2)
test.df3 <- data.frame(id = seq_len(4), pval = test.pval, lags = test.lags)
test.df4 <- data.frame(id = seq_len(4), pval = test.pval, lags = test.lags2)
test_that("Errors for edge cases", {
expect_error(LORDstar(test.df, version='async', alpha = -0.1),
"alpha must be between 0 and 1.")
expect_error(LORDstar(test.df, version='async', gammai = -1),
"All elements of gammai must be non-negative.")
expect_error(LORDstar(test.df, version='async', gammai=2),
"The sum of the elements of gammai must be <= 1.")
expect_error(LORDstar(test.df, version='async', w0 = -0.01),
"w0 must be non-negative.")
expect_error(LORDstar(test.df, version='async', alpha=0.05, w0=0.06),
"w0 must not be greater than alpha.")
})
test_that("Correct rejections for version async", {
expect_identical(LORDstar(test.df, version='async')$R,
c(1,1,0,1))
expect_identical(LORDstar(test.df2, version='async')$R,
c(1,0,0,0))
})
test_that("Correct rejections for version dep", {
expect_identical(LORDstar(test.df3, version='dep')$R,
c(1,1,0,1))
expect_identical(LORDstar(test.df4, version='dep')$R,
c(1,0,0,0))
})
test_that("Correct rejections for version batch", {
expect_identical(LORDstar(0.1, version='batch', batch.sizes=1)$R, 0)
expect_identical(LORDstar(test.pval, version='batch',
batch.sizes = rep(1,4))$R,
c(1,1,0,1))
expect_identical(LORDstar(test.pval, version='batch',
batch.sizes = 4)$R,
c(1,0,0,0))
})
test_that("Check that LORD is a special case of the LORDstar
algorithms", {
expect_equal(LORD(test.df, version = '++')$alphai,
LORDstar(test.df, version='async')$alphai)
expect_equal(LORD(test.df, version = '++')$alphai,
LORDstar(test.df3, version='dep')$alphai)
expect_equal(LORD(test.df, version = '++')$alphai,
LORDstar(test.pval, version='batch',
batch.sizes = rep(1,4))$alphai)
})
test_that("LORDstar inputs are correct when given vector input", {
expect_error(LORDstar(test.pval, version="async"),
"d needs to be a dataframe with a column of decision.times")
expect_error(LORDstar(test.pval, version="dep"),
"d needs to be a dataframe with a column of lags")
})
|
af40f32265a4cbc89885f3b71bc148ef6c9e509f | 607b31d18cd361c331135771e4ce2d796dfc16c4 | /man/runVEP.Rd | 177fe694d795895a6412001be5681d1965425901 | [] | no_license | mmm84766/slimR | 66f44045ab4ac1ebeaa13a8644ca379e6ca172f9 | 6c9d268576c11af27c5b98a5cad5a08ed6cd7253 | refs/heads/master | 2020-03-22T15:44:52.494556 | 2017-08-29T13:56:12 | 2017-08-30T09:17:45 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 905 | rd | runVEP.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/getVariationData.R
\name{runVEP}
\alias{runVEP}
\title{runVEP}
\usage{
runVEP(vepPATH = "/home/buyar/.local/bin/variant_effect_predictor.pl",
inputFileName, outputFileName = "VEPresults.tsv", overwrite = FALSE,
nodeN = 4)
}
\arguments{
\item{vepPATH}{path to the variant_effect_predictor.pl script}
\item{inputFileName}{path to input file that contains variation data in a
format acceptable to variant_effect_predictor software (see:
http://www.ensembl.org/info/docs/tools/vep/vep_formats.html#default)}
\item{outputFileName}{file name to write the results}
\item{overwrite}{(default: FALSE) set it to TRUE to overwrite the existing
VEP output file.}
}
\value{
a data.table data.frame containing variation data read from VEP
output
}
\description{
A wrapper function to run Ensembl's variant_effect_predictor script
}
|
319f27eba540a67ef559669cf37a7397aa76a5d3 | 57cd918dd9c3a75e993682ece752714a79eb6dcf | /R/plot_hazard.R | d0c584bc3b2c646e7bf822f4f12e247ad9527e4d | [] | no_license | cran/vsd | 1495e4d3db0772192ff17f34d48c97ab5eff83a2 | d4410efb7857458321b53c0b297e44f78b08c355 | refs/heads/master | 2023-05-05T14:35:41.557493 | 2021-05-11T08:40:02 | 2021-05-11T08:40:02 | 366,436,976 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,562 | r | plot_hazard.R | # Generates hazard plot
plot_hazard <- function(surv, strata = NULL, size, ...) {
plots <- list()
if (is.null(strata)) {
# make a simple muhaz graphic
hazard <- muhaz::muhaz(surv$time, surv$status)
hazard_df <-
data.frame(
x = hazard$est.grid,
y = hazard$haz.est,
strata = factor(rep("All", length(
hazard$est.grid
)))
)
} else {
# make several separate hazard maps
hazard_df <-
data.frame(x = numeric(),
y = numeric(),
strata = numeric())
hazard_count <- table(strata)
for (i in levels(strata)) {
# TODO: is it always ten?
if (hazard_count[[i]] < 10) {
warning(
"Level ",
i,
" doesn't have enough datapoints to estimate the hazard function",
call. = FALSE,
immediate. = TRUE
)
} else {
# creates a sub-table with each muhaz graphic, appends the corresponding strata
hazard <-
muhaz::muhaz(surv$time, surv$status, strata == i)
hazard_df_level <-
data.frame(
x = hazard$est.grid,
y = hazard$haz.est,
strata = rep(i, length(hazard$est.grid))
)
hazard_df <- rbind(hazard_df, hazard_df_level)
}
}
hazard_df$strata <-
factor(hazard_df$strata, levels(strata))
}
plot <-
ggplot(hazard_df, aes(.data$x, .data$y, color = .data$strata)) +
geom_line(size = size)
plots$hazard <- ggpubr::ggpar(plot, ...)
return(plots)
}
|
0ae291cce86bbe80554076ed5347b7441e45f168 | bf071ab91a8293b10fb763bc4b8a69be85c54a81 | /test.R | 6bfaa734916b75d20ecf10fed6688830cf201a33 | [] | no_license | SuhruthY/RepData_PeerAssessment1 | 0de3f16ed2fe7e3213c5ad329ffc97c487054f87 | eb7c752f5a6bc03431fe7eab562308e602bf9b5f | refs/heads/master | 2023-01-07T22:51:52.340205 | 2020-11-10T12:06:13 | 2020-11-10T12:06:13 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,974 | r | test.R | library(dplyr)
library(ggplot2)
activity_data <- read.csv("activity.csv")
head(activity_data)
databydate <- activity_data %>%
select(date, steps) %>%
group_by(date) %>%
summarise(tsteps=sum(steps)) %>%
filter(!is.na(tsteps))
hist(databydate$tsteps, xlab="Total daily steps",
main="Frequency of total daily steps", breaks = 20)
mean(databydate$tsteps)
median(databydate$tsteps)
databyinterval <- activity_data %>%
select(interval, steps) %>%
filter(!is.na(steps)) %>%
group_by(interval) %>%
summarise(tsteps=mean(steps))
g <- ggplot(databyinterval, aes(x=interval, y=tsteps)) +
geom_line()
g + ggtitle("Time series of average number of steps taken") +
theme(plot.title = element_text(hjust=0.5))
most_steps <- databyinterval[which(databyinterval$tsteps == max(databyinterval$tsteps)),]
most_steps$interval
num_of_na_values <- sum(is.na(activity_data))
new_data <- activity_data
head(new_data)
for(i in 1:ncol(new_data)){
new_data[is.na(new_data[,i]), i] <- mean(new_data[,i], na.rm = TRUE)
}
new_databydate <- new_data %>%
select(date, steps) %>%
group_by(date) %>%
summarise(tsteps=sum(steps))
hist(new_databydate$tsteps, xlab="Total daily steps",
main="Frequency of total daily steps", breaks = 20)
new_mean = mean(new_databydate$tsteps)
new_median = median(new_databydate$tsteps)
new_data$date <- as.Date(new_data$date)
new_data$weekday <- weekdays(new_data$date)
new_data$weekend <- ifelse(new_data$weekday=="Saturday" | new_data$weekday=="Sunday",
"Weekend", "Weekday")
averages <- aggregate(steps ~ interval + weekend, data =new_data, mean)
ggplot(averages, aes(x=interval, y=steps, color=weekend)) +
geom_line() + facet_grid(weekend ~ .) +
labs(x="5-minute interval", y="Mean no of steps",
title="Comparison of average number of steps in each interval")+
theme(plot.title = element_text(hjust=0.5))
averages
|
124c9840fde4c4d072a74eef65c04fa160e6c39f | b94dd703bc872656f020682956efa65b0f132bb6 | /Module - 3/Assignment 11.R | d0c0b78d368c27fbabd5a93f540bf17e3e2a35af | [] | no_license | nitinsingh27/DataScience-With-R | 5ba07d613d79f34b833dbf63327bca8c4ffc53a0 | 8d3f02a424aa1a34ffbfcf640c5bced3870c4c6d | refs/heads/main | 2023-06-11T19:21:08.895755 | 2021-07-12T14:46:30 | 2021-07-12T14:46:30 | 385,279,389 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,852 | r | Assignment 11.R | setwd("C:/Users/enerc/OneDrive/Desktop/data science/sessions/r_training")
getwd()
placement <- read.csv("Placement.csv",stringsAsFactors = T)
View(placement)
library(ggplot2)
# 1. Create a histogram for 'ssc_p' column:
# a. Assign as color 'azure' to the histogram.
# b. Set number of bins to 50.
# c. Assign a color 'cornsilk4' to the 'fill' attribute in geom_histogram function.
# d. Give it a title as 'SSC Percentage'
ggplot(data = placement,aes(ssc_p)) + geom_histogram(fill = "cornsilk4", col = "azure",bins = 50) + ggtitle("SSC Percentage")
# 2. Create a histogram for 'hsc_p':
# a. Assign a color 'wheat3' to the plot.
# b. Set number of bins to 50.
# c. Assign a color 'black' to the 'fill' attribute in geom_histogram function.
# d. Give it a title as 'HSC Percentage'
ggplot(data = placement,aes(hsc_p)) + geom_histogram(fill = "black", col = "wheat3",bins = 50) + ggtitle("HSC Percentage")
# 3. Create a histogram as per the following conditions:
# a. Assign 'degree_p' column to the x-axis.
# b. Set the number of bins to 80.
# c. Assign a color 'violet' to the bars.
# d. Assign a color 'white' to the 'fill' attribute in geom_histogram function.
# e. Give it a title as 'Degree Percentage'
ggplot(data = placement,aes(x = degree_p)) + geom_histogram(fill = "white", col = "violet",bins = 80) + ggtitle("Degree Percentage")
# 4. Create a histogram as per the following condition:
# a. Assign 'etest_p' column to the x-axis.
# b. Set the number of bins to 100.
# c. Assign a color 'white' to the bars.
# d. Assign a color 'black' to the 'fill' attribute in geom_histogram function.
# e. Give it a title as 'E-test Percentage'
ggplot(data = placement,aes(x = etest_p)) + geom_histogram(fill = "black", col = "white",bins = 100) + ggtitle("E-test Percentage")
|
a3e5e07a55915b41ebfb3db9c679300a092f39e0 | 63c611251d99c2c9ff21d5b1ae08237e2d2385d6 | /Yelp_business_extraction.R | dc9a37852c47dee390fd22a5e6814a2c4716155e | [] | no_license | dknorr7/Dknorr | 81ea19f332f3d8658bd6ce1d8e20bfec59f1432b | dc023f4d948ee3692c69a3482351e3db12a21d16 | refs/heads/master | 2021-11-09T04:51:59.646427 | 2021-10-22T16:18:25 | 2021-10-22T16:18:25 | 32,331,560 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,038 | r | Yelp_business_extraction.R | library(pacman)
p_load(httr, tidyverse, ggplot2, remotes, yelp, yelpr, leaflet)
###Function to append results of yelp list, workaround from 50 result limit to original function
mykey <- "tXnlgw5ByJu6k5vzMKm82HiUUfgrYy7-xFG3BYon-BNFI__QxG2l-_z6q4V0_PgEaxLARajKHbjoXeF24wVNPUakSPeS9jhfuBGf5pSuaedJlT1uu8qUPNzT68RgW3Yx"
yelp_full_search <- function(place, keywords){
df_total = data.frame()
offset_i <- 0
limit_i <- 50
while (nrow(df_total)%%limit_i == 0){
yelp_setup <- business_search(api_key = mykey,
location = place,
term = keywords,
limit = limit_i, offset = offset_i)
yelp_return <- as.data.frame(yelp_setup$businesses)
yelp_return$latitude <- yelp_return$coordinates$latitude
yelp_return$longitude <- yelp_return$coordinates$longitude
###These columns are nested dataframes and are dropped but could be included
yelp_return <- yelp_return %>% select(-c(location, transactions, categories, coordinates))
### make sure Price column is explicitly included, sometimes it is, sometimes it isn't
yelp_return <- yelp_return %>% mutate(price = ifelse("price" %in% colnames(yelp_return), yelp_return$price, "NA"))
df_total <- rbind(yelp_return, df_total)
offset_i <- offset_i + limit_i
print("Proceeding to next API Request")
}
print("Finished Compiling Yelp Businesses")
return(df_total)
}
pawn_shops <- yelp_full_search("Nashville", "Pawn")
starbucks <- yelp_full_search("Nashville", "starbucks")
hospital <- yelp_full_search("Nashville", "hospital")
grocery <- yelp_full_search("Nashville", "grocery")
tattoo <- yelp_full_search("Nashville", "strip club")
###quick map of yelp results
leaflet(data = tattoo) %>% addTiles() %>%
addCircleMarkers(radius = 3, ~longitude, ~latitude, popup = ~as.character(name), label = ~as.character(name))
|
2ba31f03e7b1fd013477bc712ddc4a9ff6c4a760 | b0c09959df30b73d953fa98b8bb6c10810fa080d | /man/plot_svf_vs_model.Rd | f406c2fb58b47f897b4862aa7258c7e036c1ba2b | [] | no_license | k-hench/fftidy | f325ed1aaefb9d0af395ef21acef387849f6a1f1 | a8c2cd364f1597de8612188bbe73cccd7d539d37 | refs/heads/master | 2023-03-15T11:02:11.998683 | 2021-03-05T16:37:54 | 2021-03-05T16:37:54 | 300,317,485 | 0 | 2 | null | null | null | null | UTF-8 | R | false | true | 416 | rd | plot_svf_vs_model.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ctmm_functions.R
\name{plot_svf_vs_model}
\alias{plot_svf_vs_model}
\title{function to compare variogram with a model fit (initial close up and total data range)}
\usage{
plot_svf_vs_model(variogram, mod, colors = clr_set_base, ...)
}
\description{
function to compare variogram with a model fit (initial close up and total data range)
}
|
f2d27bb4663a6b5427827e62fd89c1d540558289 | ada9e72402a645606e60b939e64aedf10e6e8a4d | /slave_demographics.r | 6b231b3112d0b5557dd33098d7e85712465512d7 | [] | no_license | scollini/Final_project | 64abb5095fdd0e905f61f4c2317ed16f032bc025 | 2983affcce34d0a9402f37141970ea9901abde32 | refs/heads/master | 2020-05-19T14:22:24.026447 | 2016-01-27T00:13:51 | 2016-01-27T00:13:51 | 27,410,212 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,154 | r | slave_demographics.r | #Charts- I would like to show as many meaningful visualizations as possible for this project.
#This means including charts for various demographics: total population, children,
#mothers, gender, owner, and skills on each farm.
#I would also like to show these visuals on the 1793 map of Mount Vernon, either through
#geo-rectifying it or through mapping the points to the image pixels. I want to show
#how the farms were unique and related to one another through the slave community.
#This is the general idea. I have a lot of the necessary code in other homework
#but with the messy slave data. I just need to translate it here. This would
# be in a separate Rmd file.
slave_children_1786 <- total_slaverel %>%
filter(Skill.x == "Child") %>%
group_by(Farm.x)
ggplot(data = slave_children_1786, aes(x = Farm.x, stat = "identity")) + geom_bar() + theme(axis.text.x=element_text(angle = 90, hjust = 0))
slave_children_1799 <- total_slaverel %>%
filter(Skill.y == "Child") %>%
group_by(Farm.y)
ggplot(data = slave_children_1799, aes(x = Farm.y, stat = "identity")) + geom_bar() + theme(axis.text.x=element_text(angle = 90, hjust = 0))
|
bceb2dce2ddbf39b983b6d163e18a7d2f70c0c1a | b3d595bd828e130d4bf05a2cd48ecf2dabc1591d | /BE_IT/CL-7/Part B - MLA/Assignment 8/Assignment 8.R | 2296821ce619cffb7dafd5b21fc63fc053051332 | [] | no_license | ImSahilShaikh/BE_IT_Assignments | efb741a166968acdb0e171e96422461309dd8ab1 | ee7f2a4a62b6df571f66cd73e8f7207d942d4982 | refs/heads/master | 2023-05-04T20:08:47.820384 | 2021-05-30T05:21:39 | 2021-05-30T05:21:39 | 171,018,974 | 3 | 5 | null | 2020-10-01T11:13:33 | 2019-02-16T15:31:55 | HTML | UTF-8 | R | false | false | 1,943 | r | Assignment 8.R | #Name : Sahil Shaikh
#Roll no: 43365
#Problem Statement: Principal Component Analysis-Finding Principal Components, Variance and Standard Deviation calculations of principal components.(Using R)
#importing dataset
df = read.csv("C:\\Users\\ImSahil\\OneDrive\\Desktop\\BE_IT_Assignments\\BE_IT\\CL-7\\Part B - MLA\\Assignment 8\\winequalityN.csv")
#View dataframe
View(df)
#attach function to use all the features
attach(df)
#lets see names of all available features
names(df)
#to keep data numeric lets remove the first value
df <- df[,2:13]
#check for na values
colSums(is.na(df))
#removing all na values
df $ fixed.acidity [is.na(df $ fixed.acidity)] <- mean(df$fixed.acidity,na.rm = TRUE)
df$volatile.acidity[is.na(df$volatile.acidity)] <- mean(df$volatile.acidity,na.rm = TRUE)
df$citric.acid[is.na(df$citric.acid)] <- mean(df$citric.acid,na.rm = TRUE)
df$residual.sugar[is.na(df$residual.sugar)] <- mean(df$residual.sugar,na.rm = TRUE)
df$chlorides[is.na(df$chlorides)] <- mean(df$chlorides,na.rm = TRUE)
df$pH[is.na(df$pH)] <- mean(df$pH,na.rm = TRUE)
df$sulphates[is.na(df$sulphates)] <- mean(df$sulphates,na.rm = TRUE)
#confirming no na values are remaining
colSums(is.na(df))
sum(is.finite(as.matrix(df)))
#lets check if we have all numeric data now
str(df)
is.numeric(as.matrix(df))
#Performs a principal components analysis on the given data matrix and returns the results as an object of class prcomp
pca <- prcomp(df,scale. = TRUE)
#we are squaring the std deviation to calculate how much variation in original data each principal component does
pca.var <- pca $ sdev ^ 2
#converting the variation into percentage
pca.var.per <- round(pca.var/sum(pca.var)*100,1)
#Visualization
barplot(pca.var.per, main = "Bar plot", xlab = "Principal component", ylab = "Percentage Variation")
biplot(pca)
plot(pca,type="l")
#summary of pca consist std deviation, variance and cumulative proportion for each component
summary(pca)
|
33a281b7493790481ba970381de0c0276cc602c3 | f9483bcbd9a14a2afe2b3b95c5591addac313e68 | /man/nsink_build.Rd | 8b5324288971285008fd1c4c9c37d3bc803a67cc | [
"MIT"
] | permissive | jhollist/nsink | 4851e82a06d251c1c3e1fae74d11665abd116451 | 8a61adaf5fa2b3d3bbfd122d6c1399661e126a1f | refs/heads/main | 2023-05-13T19:57:52.497188 | 2023-04-27T19:32:34 | 2023-04-27T19:32:34 | 190,607,176 | 5 | 1 | NOASSERTION | 2021-03-02T21:41:24 | 2019-06-06T15:40:51 | R | UTF-8 | R | false | true | 2,659 | rd | nsink_build.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/nsink_build.R
\name{nsink_build}
\alias{nsink_build}
\title{Build out required datasets for N-Sink}
\usage{
nsink_build(
huc,
projection,
output_dir = normalizePath("nsink_output", winslash = "/", mustWork = FALSE),
data_dir = normalizePath("nsink_data", winslash = "/", mustWork = FALSE),
force = FALSE,
samp_dens = 300,
year = "2016",
...
)
}
\arguments{
\item{huc}{A character with the 12 digit HUC ID. May be searched with
\code{\link{nsink_get_huc_id}}}
\item{projection}{Projection to use for all spatial data, specified as either an
EPSG code (as numeric) or WKT (as string).}
\item{output_dir}{Folder to write processed nsink files to.
Currently, the processed files will be overwritten if
the same output folder is used. To run different
HUC12's specify separate output folders.}
\item{data_dir}{Folder to hold downloaded data. The same data
directory can be used to hold data for multiple HUCs. Data
will not be downloaded again if it already exists in this
folder.}
\item{force}{Logical value used to force a new download if data already
exists on file system.}
\item{samp_dens}{The \code{samp_dens} controls the density of points to use when
creating the nitrogen removal heat map. The area of the
watershed is sampled with points that are separated by the
\code{samp_dens} value, in the units of the input data.
The larger the value, the fewer the points.}
\item{year}{Year argument to be passed to FedData's \code{\link{get_nlcd}}
function. Defaults to 2016.}
\item{...}{Passes to \code{\link{nsink_calc_removal}} for the off network
arguments: \code{off_network_lakes}, \code{off_network_streams},
and \code{off_network_canalsditches}.}
}
\value{
A list providing details on the huc used and the output location of
the dataset.
}
\description{
This function is a wrapper around the other functions and runs all of those
required to build out the full dataset needed for a huc and develops the four
static N-Sink maps: the nitrogen loading index, nitrogen removal effeciency,
nitrogen transport index, and the nitrogen delivery index. The primary
purpose of this is to use the nsink package to develop the required datasets
for an nsink application to be built outside of R (e.g. ArcGIS). This will
take some time to complete as it is downloading 500-600 Mb of data,
processing that data and then creating output files.
}
\examples{
\dontrun{
library(nsink)
aea <- 5072
nsink_build(nsink_get_huc_id("Niantic River")$huc_12, aea,
output_dir = "nsink_output", data_dir = "nsink_data",
samp_dens = 600)
}
}
|
45ec7342345f6165a0789b65cae208975fb33fc6 | 4c0394633c8ceb95fc525a3594211636b1c1981b | /man/unnest.Rd | cbb6a55fef3cf6318c50330af077d89ca70bca23 | [
"MIT"
] | permissive | markfairbanks/tidytable | 8401b92a412fdd8b37ff7d4fa54ee6e9b0939cdc | 205c8432bcb3e14e7ac7daba1f4916d95a4aba78 | refs/heads/main | 2023-09-02T10:46:35.003118 | 2023-08-31T19:16:36 | 2023-08-31T19:16:36 | 221,988,616 | 357 | 33 | NOASSERTION | 2023-09-12T20:07:14 | 2019-11-15T19:20:49 | R | UTF-8 | R | false | true | 1,284 | rd | unnest.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/unnest.R
\name{unnest}
\alias{unnest}
\title{Unnest list-columns}
\usage{
unnest(
.df,
...,
keep_empty = FALSE,
.drop = TRUE,
names_sep = NULL,
names_repair = "unique"
)
}
\arguments{
\item{.df}{A data.table}
\item{...}{Columns to unnest If empty, unnests all list columns. \code{tidyselect} compatible.}
\item{keep_empty}{Return \code{NA} for any \code{NULL} elements of the list column}
\item{.drop}{Should list columns that were not unnested be dropped}
\item{names_sep}{If NULL, the default, the inner column names will become the new outer column names.
If a string, the name of the outer column will be appended to the beginning of the inner column names,
with \code{names_sep} used as a separator.}
\item{names_repair}{Treatment of duplicate names. See \code{?vctrs::vec_as_names} for options/details.}
}
\description{
Unnest list-columns.
}
\examples{
df1 <- tidytable(x = 1:3, y = 1:3)
df2 <- tidytable(x = 1:2, y = 1:2)
nested_df <-
data.table(
a = c("a", "b"),
frame_list = list(df1, df2),
vec_list = list(4:6, 7:8)
)
nested_df \%>\%
unnest(frame_list)
nested_df \%>\%
unnest(frame_list, names_sep = "_")
nested_df \%>\%
unnest(frame_list, vec_list)
}
|
240fdf4e6e79be14b9376069aa09f10310f4df76 | c03d2e18fb313ef2725be141b2d8242180fa0ffa | /cachematrix.R | 86cd98c29050bcda9ac4dee72bf99230508a859b | [] | no_license | Gpalominos/ProgrammingAssignment2 | 9e6f95244ea02b7401c7227a60afbf0881053066 | 096ff0699f3fd3f351548f2fa8a4b0ef097063a7 | refs/heads/master | 2021-01-14T11:17:19.111829 | 2015-05-22T02:04:05 | 2015-05-22T02:04:05 | 36,038,287 | 0 | 1 | null | 2015-05-21T21:29:40 | 2015-05-21T21:29:39 | null | ISO-8859-10 | R | false | false | 1,114 | r | cachematrix.R | ## These function try to use cache information to evoid recalculating inverse matrix operation
## The fist function create a list using a matrix with capability of call and store information
## the second one, inverse a matrix if it hasnīt done before.
## makeCacheMatriz function create a list with four functions using a matrix. These can get the matrix or
## get its inverse if there are, or set the matrix or set its inverse in the other case.
library(MASS)
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y) {
x <<- y
inv <<- NULL
}
get <- function() x
setinv <- function(inverso) inv <<- inverso
getinv <- function() inv
list(set = set, get = get,
setinv = setinv,
getinv = getinv)
}
## cacheSolve function calculate a matrix inverse if there is not calculated yet, or print the inverse if
## there is cached.
cacheSolve <- function(x, ...) {
m <- x$getinv()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- ginv(data,...)
x$setinv(m)
## Return a matrix that is the inverse of 'x'
m
} |
f9837d8f711695d5fd10211f0e78e81b67c9e299 | ac2263cddbcc4803dc7abbae04b439c11d120f9c | /B-spline ME with Bayesian Updating.R | 25935efa5b24f6d6770274ff2d9df7512613ad17 | [] | no_license | salmanjahani/B-Spline-Based-Mixed-Effect-Model-with-Bayesian-Updating | b6d77f610284f18f63743160be30a257f21946e4 | 91802d334c89ca6e663ecf896c41d45af05cd508 | refs/heads/main | 2023-01-01T22:51:28.939394 | 2020-10-20T06:09:54 | 2020-10-20T06:09:54 | 305,607,073 | 2 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,804 | r | B-spline ME with Bayesian Updating.R | #' Code for B-Spline Based Mixed Effects Model With Bayesian Updating
#' Simulation
list.of.packages <- c("Matrix","nlme","rootSolve","splines")
new.packages <- list.of.packages[!(list.of.packages %in% installed.packages()[,"Package"])]
if(length(new.packages)) install.packages(new.packages)
# Load the packages
library(Matrix)
library(nlme) # Nonlinear Mixed Effects Model
library(rootSolve) # Nonlinear Root Finding
library(splines) # Regression spline functions and classes
rm(list=ls()) # Clean-up the memory
########################################### System Input ##############################################
setwd("D:/Job Hunt/Sample Code/B-Spline Based Mixed Effect Model with Bayesian Updating") #please set the correct directory
load("IMdata.RData")
######################################### Construct a groupedData Object #################################################
data <- data.frame(id = numeric(0), x = numeric(0), y = numeric(0))
for (i in 1:200) {
x <- seq(1,11)
y <- cn[[4]][i,] #Choosing the 4th signal (Compressor Discharge Temperature) in the dataset
id <- rep(i, 11)
data <- rbind(data, cbind(id = id, x = x, y = y))
}
data <- groupedData(y ~ x | id, data)
########################################## Train Test Mean ###############################################
n=123 # first n-1 available signals are selected to train the model
# The n th signal is considered the new signal for which we are interested to do prediction
train=data[data$id%in%(1:n-1),]
# test=data[data$id%in%n,]
trains=lapply(1:(n-1),function(i){train$x[train$id==i]})
trainy=lapply(1:(n-1),function(i){train$y[train$id==i]})
########################################## Exploratory data visualization ###############################################
dev.off() #Remove any previous plot
for(i in 1:(20)) #Plot the first 20 Evaporator outlet temperature signals
{
plot(trains[[i]],trainy[[i]],xlim=c(0,11),ylim=c(0,200),type="l",pch=2,lwd=2,cex.axis=1.2,xlab='Time',ylab='Temperature (fahrenheit)',main='Compressor Discharge Temperature')
par(new=T)
}
# for(i in 1:(20)) #Plot the first 20 Evaporator outlet temperature signals
# {
# plot(trains[[4]],80*bs(x, df=8,degree=4)[,i],xlim=c(0,11),ylim=c(0,200),type="l",pch=2,lwd=2,cex.axis=1.2,xlab='Time',ylab='Temperature (fahrenheit)',main='Compressor Discharge Temperature')
# par(new=T)
# }
##################################### B-Spline Mixed effects model ##########################
#Fit B-spline mixed effect model
fitlme <- tryCatch(lme(y~bs(x, df=8,degree=4),random=~bs(x, df=8,degree=4)|id,data=train,control=lmeControl(returnObject=TRUE,opt="optim",optimMethod = "SANN")), error = function(e) e)
if(any(class(fitlme) == "error")==T){cat("LME fit Prob")} #Check for any error in fitting
# Check the parameter estimates
summary(fitlme)
# Extract parameters
sigma2f = (fitlme$sigma)^2 # Signal noise
MUBf = fitlme$coefficients$fixed # Mean vector
SIGMABf = var(fitlme$coefficients$random$id) # Covariance matrix
#####Online Update#####
####Similar to Junbo, I use a set of randomly-generated signal for the sake of illustration
new_t = rmnorm(1,MUBf,SIGMABf) # b vector for this hypothetical new unit
noisevec = rnorm(11,0,sigma2f) # Siganl noise
ftt0=lm(y~bs(x,df=8,degree=4),data=train) #Dummy model
ftt0$coefficients=as.vector(new_t)
test=predict(ftt0,newdata = data.frame(x))+noisevec
test=data.frame(x,y=test)
dev.off()
par(mfrow=c(1,2))
for(i in 1:11){
tstar=i # Time of Bayesian Update
tests=test$x[test$x<=tstar];m1=length(tests);testy=test$y[1:m1] # Test Signal
Rp=testy # new observed temperature signal
Rp = t(t(Rp))
Zp = matrix(0,m1,9)
Zp[,1]=rep(1,m1)
Zp[,(2:9)]=bs(x, df=8,degree=4)[1:m1,]
########### Bayesian parameters' update (Posterior Distribution)
a <- tryCatch(chol2inv(chol(SIGMABf)), error = function(e) e) #a=solve(SIGMABf)
if(any(class(a) == "error")==T){cat("No inverse")}
b=a+t(Zp)%*%Zp/sigma2f
SIGMABpf <- tryCatch(chol2inv(chol(b)), error = function(e) e)
if(any(class(SIGMABpf) == "error")==T){cat("No inverse");iit=iit-1;next}
MUBpf=SIGMABpf%*%(t(Zp)%*%Rp/sigma2f+a%*%MUBf)
#### Comparison of Prior and Posterior
comparison = cbind(MUBf,MUBpf)
colnames(comparison) = c("Prior","Bayesian Updated")
comparison
########## Visual illustration #####
ftt=lm(y~bs(x,df=8,degree=4),data=train) #Dummy model to be used for b-spline predictions
####Prior Fit
ftt$coefficients=as.vector(MUBf)
priorfit=predict(ftt,newdata = data.frame(x))
####Posterior Fit
ftt$coefficients=as.vector(MUBpf)
posteriorfit=predict(ftt,newdata = data.frame(x))
##True Signal
# dev.off()
plot(test$x,test$y,xlim=c(0,11),ylim=c(90,160),type="l",lwd=2,xlab='Time',ylab='temperature (fahrenheit)',main='New Compressor Discharge Temperature Signal',cex.axis=1.2) ## True
##Current observations of new signal
points(tests,testy,xlim=c(0,11),ylim=c(90,160),pch=16,cex=2,xlab="",ylab="",main="",cex.axis=1.2)
par(new=TRUE)
## Posterior and Prior predictions
plot(test$x,posteriorfit,type="l",lwd=2,lty=2,col="red",xlab="",ylab="",main="",xlim=c(0,11),ylim=c(90,160),cex.axis=1.2)
par(new=TRUE)
plot(test$x,priorfit,type="l",lwd=2,lty=3,col="blue",xlab="",ylab="",main="",xlim=c(0,11),ylim=c(90,160),cex.axis=1.2)
# legend("bottomright",col=c("black","blue","red"),legend=c("True","Prior","Posterior"),lty=1:3,lwd=3.5,cex=1.2)
} |
4570049c6f71ab1ce9d03627decef557cc8982ab | bca9666a50085b37eb161286ab3f3c0d9e9c3396 | /ModularDataView.R | f5f028f35162706f7c0576916715fc756850b127 | [] | no_license | kdgosik/ShinyModules | c29d9cb8f9c1935718fa723e6c8d31f0d1fb6e33 | 851e52ef044576ed18854b7a4241a8f0df47c62d | refs/heads/master | 2021-01-19T21:31:55.248226 | 2017-08-17T16:40:08 | 2017-08-17T16:40:08 | 88,658,833 | 1 | 1 | null | null | null | null | UTF-8 | R | false | false | 1,576 | r | ModularDataView.R | require(DT)
# MODULE UI
DataViewUI <- function(id) {
ns <- NS(id)
DT::dataTableOutput(ns("table1"), width = "100%")
}
# MODULE Server
DataViewServer <- function(input, output, session, data, data_out_name) {
output$table1 <- renderDataTable({
DT::datatable( data()
, rownames = FALSE
, style = 'bootstrap'
, class = paste( c('compact', 'cell-border' , 'hover' , 'stripe') , collapse = " ")
, filter = 'top'
, extensions = c( 'Buttons' , 'KeyTable' , 'ColReorder' , 'FixedColumns' , 'FixedHeader')
, options = list(
dom = 'Bfrtip'
, autoWidth = TRUE
, columnDefs = list( list( width = '200px', targets = 1 ) )
, colReorder = TRUE
, paging = F
, keys = T
, scrollX = TRUE
, scrollY = TRUE
, fixedHeader = TRUE
, buttons = list(
'colvis'
, 'copy'
, 'print'
, list( extend = 'collection', buttons = list(list(extend='csv', filename = data_out_name)
, list(extend='excel', filename = data_out_name)
, list(extend='pdf', filename= data_out_name) )
, text = 'Download'
) ) ) )
})
} |
24029e7eee0875afc5a53cc2e5aeceb041836c27 | abbcbc15352d486b89cece62d1b7ff4fbe36bf05 | /man/drive_auth_config.Rd | 83aa08513131b2df3f6b0f1b51b7182f2625da04 | [] | no_license | zuiaishuijiangui/googledrive | a58ce250be6af36901a729a5f0eaf7a25b09c554 | c05ee9f12f554c8453c13d13c163ab7b8016b488 | refs/heads/master | 2020-04-05T12:06:28.858799 | 2018-11-09T12:52:13 | 2018-11-09T12:52:13 | 156,858,854 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 2,453 | rd | drive_auth_config.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/drive_auth.R
\name{drive_auth_config}
\alias{drive_auth_config}
\title{View or set auth config}
\usage{
drive_auth_config(app = NULL, path = NULL, api_key = NULL)
}
\arguments{
\item{app}{OAuth app. Defaults to a tidyverse app that ships with
googledrive.}
\item{path}{Path to the JSON file.}
\item{api_key}{API key. Defaults to a tidyverse key that ships with
googledrive. Necessary in order to make unauthorized "token-free" requests
for public resources.}
}
\value{
A list of class \code{auth_config}, with the current auth configuration.
}
\description{
This function gives advanced users more control over auth.
Whereas \code{\link[=drive_auth]{drive_auth()}} gives control over tokens, \code{drive_auth_config()}
gives control of:
\itemize{
\item The OAuth app. If you want to use your own app, setup a new project in
\href{https://console.developers.google.com}{Google Developers Console}. Follow
the instructions in
\href{https://developers.google.com/identity/protocols/OAuth2InstalledApp}{OAuth 2.0 for Mobile & Desktop Apps}
to obtain your own client ID and secret. Either make an app from your
client ID and secret via \code{\link[httr:oauth_app]{httr::oauth_app()}} or provide a path
to the JSON file containing same, which you can download from
\href{https://console.developers.google.com}{Google Developers Console}.
\item The API key. If googledrive auth is deactivated via \code{\link[=drive_deauth]{drive_deauth()}}, all
requests will be sent with an API key in lieu of a token. If you want to
provide your own API key, setup a project as described above and follow the
instructions in \href{https://support.google.com/googleapi/answer/6158862}{Setting up API keys}.
}
}
\examples{
## this will print current config
drive_auth_config()
if (require(httr)) {
## bring your own app via client id (aka key) and secret
google_app <- httr::oauth_app(
"my-awesome-google-api-wrapping-package",
key = "123456789.apps.googleusercontent.com",
secret = "abcdefghijklmnopqrstuvwxyz"
)
drive_auth_config(app = google_app)
}
\dontrun{
## bring your own app via JSON downloaded from Google Developers Console
drive_auth_config(
path = "/path/to/the/JSON/you/downloaded/from/google/dev/console.json"
)
}
}
\seealso{
Other auth functions: \code{\link{auth-config}},
\code{\link{drive_auth}}, \code{\link{drive_deauth}}
}
\concept{auth functions}
|
3a1a57665861d2d98ec94d54cffe27821d21bb19 | 002929791137054e4f3557cd1411a65ef7cad74b | /tests/testthat/test_checkChangedColsLst.R | e70726edf0dc4837deea9e43e31df4cd78f83a10 | [
"MIT"
] | permissive | jhagberg/nprcgenekeepr | 42b453e3d7b25607b5f39fe70cd2f47bda1e4b82 | 41a57f65f7084eccd8f73be75da431f094688c7b | refs/heads/master | 2023-03-04T07:57:40.896714 | 2023-02-27T09:43:07 | 2023-02-27T09:43:07 | 301,739,629 | 0 | 0 | NOASSERTION | 2023-02-27T09:43:08 | 2020-10-06T13:40:28 | null | UTF-8 | R | false | false | 1,206 | r | test_checkChangedColsLst.R | #' Copyright(c) 2017-2020 R. Mark Sharp
#' This file is part of nprcgenekeepr
context("checkChangedColsLst")
library(nprcgenekeepr)
library(lubridate)
pedOne <- data.frame(ego_id = c("s1", "d1", "s2", "d2", "o1", "o2", "o3",
"o4"),
`si re` = c(NA, NA, NA, NA, "s1", "s1", "s2", "s2"),
dam_id = c(NA, NA, NA, NA, "d1", "d2", "d2", "d2"),
sex = c("F", "M", "M", "F", "F", "F", "F", "M"),
birth_date = mdy(
paste0(sample(1:12, 8, replace = TRUE), "-",
sample(1:28, 8, replace = TRUE), "-",
sample(seq(0, 15, by = 3), 8, replace = TRUE) +
2000)),
stringsAsFactors = FALSE, check.names = FALSE)
test_that("checkChangedColsLst identifies absence of column changes", {
errorLst <- getEmptyErrorLst()
expect_false(checkChangedColsLst(errorLst$changedCols))
})
test_that("checkChangedColsLst identifies presence of column changes", {
errorLst <- qcStudbook(pedOne, reportErrors = TRUE, reportChanges = TRUE)
expect_true(checkChangedColsLst(errorLst$changedCols))
})
|
0405ff199de24d69bdd33cdf9b3426b4296b0d69 | b6dcad580c1010d2eb8bbcd9a71eea98b12f1316 | /plot4.R | 1bedced8754199516380eaea23565cb530471940 | [] | no_license | Dave9/ExData_Plotting1 | 3ac2bf8ac230a5f37788fb9a5f0368d114a812fa | aee3339b890bb5d65d266b3d7a23b1913886a75b | refs/heads/master | 2020-12-14T07:34:43.520234 | 2016-03-07T01:12:41 | 2016-03-07T01:12:41 | 53,238,580 | 0 | 0 | null | 2016-03-06T05:01:18 | 2016-03-06T05:01:17 | null | UTF-8 | R | false | false | 2,169 | r | plot4.R | plot3 <- function() {
# Course Project Assignment #1 for Exploratory Data Analysis
# Plot #4 - Display 4 plots on the screen device and in a PNG file
#
# Using the Individual household electric power consumption Data Set and
# base plotting system, create plot4.R which produces plot4.png
#
#Download and unzip this source data File:
fileURL = "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
#Save it to "household_power_consumption.txt"
#Load source file into data.frame, convert the Date and Time variables to R Date/Time
#class and then subset required rows from February 1-2, 2007
df <- read.csv("household_power_consumption.txt", sep=';', na.strings = "?")
c <- cbind(DateTime = strptime(paste(df[,1], df[,2]), format = "%d/%m/%Y %H:%M:%S"), df[,3:9])
hpc <- c[c$DateTime >= "2007-02-01 00:00:00" & c$DateTime < "2007-02-03 00:00:00" & !is.na(c$DateTime), ]
rm(df); rm(c) # clean up temp variables
#Multi-plot setup
par(mfcol = c(2, 2), mar = c(4, 4, 2, 1), oma = c(0, 0, 2, 0))
with(hpc, {
#Quadrant 1 (same as Plot 2)
plot(DateTime, Global_active_power, type="l", xlab = "", ylab = "")
title(ylab = "Global Active Power (kilowatts)")
#Quadrant 2 (same as Plot 3)
plot(DateTime, Sub_metering_1, type="l", col = "black", xlab = "", ylab = "")
lines(DateTime, Sub_metering_2, type="l", col = "red")
lines(DateTime, Sub_metering_3, type="l", col = "blue")
title(ylab = "Energy sub metering")
legend("topright", lty = 1, col = c("black", "red", "blue"),
legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
#Quadrant 3 (new line plot of Voltage)
plot(DateTime, Voltage, type="l")
#Quadrant 4 (new line plot of Global Reactive Power)
plot(DateTime, Global_reactive_power, type="l")
})
# For png and many devices, the default width = 480, height = 480
dev.copy(png, file = "plot4.png")
dev.off()
}
|
3cb28568f0efe05922dd305419b541a19c4ff134 | 95e1ac9ffeabf821f2ca809103240de470f40f85 | /man/getM.Rd | 3312f78e18c367ad0164a2056d259c8d357a3d90 | [] | no_license | arappold/docopulae | 6c7798475d75a459d192ff0a49a73fb40b12a902 | c7fa4da59637a2b8c26353b750a9f6cf6f8336d7 | refs/heads/master | 2020-12-29T02:31:56.954467 | 2018-10-26T10:05:53 | 2018-10-26T10:05:53 | 36,495,029 | 0 | 1 | null | null | null | null | UTF-8 | R | false | true | 486 | rd | getM.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/main.R
\name{getM}
\alias{getM}
\title{Get Fisher Information}
\usage{
getM(mod, des)
}
\arguments{
\item{mod}{a model.}
\item{des}{a design.}
}
\value{
\code{getM} returns a named matrix, the Fisher information.
}
\description{
\code{getM} returns the Fisher information corresponding to a model and a design.
}
\examples{
## see examples for param
}
\seealso{
\code{\link{param}}, \code{\link{design}}
}
|
530927d17c9407a97348764673c3715c863214ea | 4f46f3b0377a558f488bd2c035b2f5f09cce1e31 | /specalyzer-pkg/R/plot-vegindex-selection.R | b17b872c7fe56f431265234ee0ae474583f487c3 | [
"MIT"
] | permissive | alkc/specalyzer | d34ee95fc37188a0caeafc865d504adf239248bb | 2b7278d8020f2c4193ff9ce2f5ded66e7857f5be | refs/heads/master | 2021-07-09T21:27:37.300561 | 2021-04-15T21:07:44 | 2021-04-15T21:07:44 | 123,193,976 | 3 | 3 | null | null | null | null | UTF-8 | R | false | false | 2,967 | r | plot-vegindex-selection.R | #' @export
plot_vegindex_selection <- function(speclib_data, attribute,
attribute_type = c("continuous", "categorical")) {
# TODO: Add code for checking inputs
attribute_name <- attribute
attribute_vector <- get_attr_column(speclib_data, attribute)
vi_table <- calculate_all_vegindexes(speclib_data)
# Remove all only-NAs columns from vegindex table:
# TODO: Add warning when index columns removed.
vi_table <- vi_table[,!get_all_NA_cols(vi_table)]
#available_indices <- colnames(vi_table)
# If response variable is continuous, then calculate the correlation
# coefficient between each available index (supported by data) and the
# attribute of interest, and visualize as a barplot.
if(attribute_type == "continuous") {
# TODO: Isolate into own func?
plot_data <- get_vegindex_attribute_correlations(vi_table, attribute_vector)
plot_data <- plot_data[order(plot_data$correlation_coefficient),]
p <- plot_ly(plot_data,x=~index, y=~correlation_coefficient)
p <- layout(p,yaxis=list(range=c(-1,1)))
# Credit to @mtoto: https://stackoverflow.com/a/40703117
# This removes the x-axis title and tells plotly to sort the x-axis on
# the correlation coefficient value.
xform <- list(categoryorder = "array",
categoryarray = plot_data$correlation_coefficient,
tickangle = 45,
title = "")
yform <- list(title = "Pearson correlation coefficient")
} else if(attribute_type == "categorical") {
plot_data <- get_significant_indices(vi_table, as.factor(attribute_vector))
plot_data <- plot_data[order(plot_data$log_ten_p),]
p <- plot_ly(plot_data, x=~index, y=~log_ten_p)
xform <- list(categoryorder = "array",
categoryarray = plot_data$log_ten_p,
tickangle = 45,
title = "")
yform <- list(title = "-log10(p-value)")
}
plotly::layout(p, xaxis = xform, yaxis=yform)
}
get_vegindex_attribute_correlations <- function(vi_table,attribute_vector, corr_method = "pearson") {
plot_data <- data.frame(
index = colnames(vi_table),
correlation_coefficient = rep(NA, ncol(vi_table))
)
nbr_indices <- ncol(vi_table)
for(i in seq(nbr_indices)) {
plot_data[i,2] <- cor(vi_table[,i], attribute_vector, method = corr_method, use = "complete.obs")
}
plot_data
}
get_significant_indices <- function(vi_table, attribute_vector) {
nbr_indices <- ncol(vi_table)
plot_data <- data.frame(index = colnames(vi_table),
p_value = rep(NA, nbr_indices),
log_ten_p = rep(NA, nbr_indices))
for(i in seq(nbr_indices)) {
predictor_index <- vi_table[,i]
linear_model <- lm(predictor_index ~ as.factor(attribute_vector))
linear_model <- anova(linear_model)
p_value <- linear_model[["Pr(>F)"]][1]
plot_data[i,2] <- p_value
plot_data[i,3] <- -log10(p_value)
}
plot_data
}
|
cb6b0c2b984c5e2626113f35b35e744a0c5b5bc3 | b22f245c479058ef8c3aff8ba24634efc7883ee7 | /cachematrix.R | afa2acfd30ef2f1f50c9d4f410c13fd9050da60d | [] | no_license | mailtoamol/R_Assignments | 0653dad5ac35560851868db6a4212144257191cc | 51783ae7b194a8a10786de03191d83452187340d | refs/heads/master | 2021-01-25T00:15:55.708801 | 2015-08-14T08:33:01 | 2015-08-14T08:33:01 | 40,647,970 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,144 | r | cachematrix.R | ## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this function
makeCacheMatrix <- function(x = matrix()) {
matriX <- NULL
##Get the metriX passed as an argument to makeCacheMatrix function
getX <- function(){
x
}
##Get the value of metriX
getMatrix <- function() {
matriX
}
##Set the value of metriX
setMatrix <- function(setThis) {
matriX <<- setThis
}
list(
getX=getX,
getMatrix=getMatrix,
setMatrix=setMatrix
)
}
## Write a short comment describing this function
cacheSolve <- function(x=matrix(), ...) {
## if Matrix is already cached, it will have some value stored
Matrix <- x$getMatrix()
if(!is.null(Matrix)){
message("Matrix is already Cached. Returning Cached data")
return(Matrix)
}
else{ ## if Matrix is empty, then get the matrix
Matrix <- x$getX()
##This generic function solves the equation
## Here in this function (solve) second value (b) is missing, hence taken as identity
## results in inverse of matrix
Matrix <- solve(Matrix, ...)
x$setMatrix(Matrix)
Matrix
}
}
|
af1a814b95e02ea463304eb8791cb55a4c5e1c1e | 1c5c79128576c2db64687e51f9c9cd8f8d14078c | /codes/2019_2020/20191021i28_schematy.R | 9c6081a332ecbd2353de3fde35a9616e8864d683 | [
"CC0-1.0"
] | permissive | lwawrowski/metoda-reprezentacyjna | 0076cf053d5b73b5460a929087bda1774c8fd88b | 4d9273ec03238d28d5faed52a0a88a5632c3c857 | refs/heads/master | 2021-06-01T22:28:53.083992 | 2021-02-24T08:44:15 | 2021-02-24T08:44:15 | 150,848,060 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,535 | r | 20191021i28_schematy.R | library(tidyverse)
library(readxl)
# wczytanie danych
obwody <- read_xlsx("data/obwody_glosowania.xlsx")
# uporządkowanie nazw kolumn
obwody <- janitor::clean_names(obwody)
# wybór obwodów losowania z Poznania
obwody_poznan <- obwody %>%
filter(powiat == "Poznań", mieszkancy != 0)
# losowanie proste
# losowanie 30 obwodów
los_proste_n <- obwody_poznan %>%
sample_n(30)
# losowanie 10% obwodów
los_proste_frac <- obwody_poznan %>%
sample_frac(0.1) %>%
# mutate(prob=23/227)
mutate(prob=n()/nrow(obwody_poznan), # obliczenie prawdopodobieństwa dostania się do próby
waga=1/prob) # obliczenia wagi z próby
sum(los_proste_frac$waga) # suma wag = liczebność populacji
library(sampling)
# losowania proste 1000 obwodów
los_proc <- obwody %>%
sample_n(1000)
# liczba potencjalnych wyborców w próbie
sum(los_proc$wyborcy)
# losowanie proporcjonalne
# wprost propocjonalnie do liczby wyborców
# większe prawdopodobieństwo mają obwody z dużą liczbą wyborców
los_proc <- obwody %>%
sample_n(1000, weight = wyborcy)
# liczba potencjalnych wyborców w próbie - większa niż w losowaniu prostym
sum(los_proc$wyborcy)
# odwrotnie propocjonalnie do liczby wyborców
# większe prawdopodobieństwo mają obwody z małą liczbą wyborców
los_proc <- obwody %>%
sample_n(1000, weight = 1/wyborcy)
# liczba potencjalnych wyborców w próbie - mniejsza niż w losowaniu prostym
sum(los_proc$wyborcy)
# prawdopodobieństwa dostania się do próby
# próba 1000 obwodów proporcjonalnie do liczby wyborców
inclusionprobabilities(obwody$wyborcy, 1000)
# dodanie prawdopodobieństw do zbioru
obwody <- obwody %>%
mutate(p_prop=inclusionprobabilities(wyborcy, 1000))
# przeprowadzenie losowania
los_prop <- obwody %>%
sample_n(1000, weight = wyborcy)
# w losowaniu propocjonalnym suma wag nie jest równa liczebności populacji
sum(1/los_prop$p_prop)
# losowanie warstwowe
# wyznaczenie liczebności próby w warstwach
n_sejm <- obwody %>%
count(numer_okregu_do_sejmu) %>%
mutate(n_proba=round(0.04*n))
sum(n_sejm$n_proba)
# zastosowanie losowania warstwowego
los_sejm <- strata(data = obwody,
stratanames = "numer_okregu_do_sejmu",
size = n_sejm$n_proba)
# połączenie wylosowanych jednostek i danych oryginalnych
los_sejm_obwody <- getdata(obwody, los_sejm)
# suma wag = liczebność populacji
sum(1/los_sejm_obwody$Prob)
# zadanie
n_senat <- obwody %>%
count(numer_okregu_do_senatu) %>%
mutate(n_proba=round(0.018205*n))
sum(n_senat$n_proba)
los_senat <- strata(data = obwody,
stratanames = "numer_okregu_do_senatu",
size = n_senat$n_proba)
los_senat_obwody <- getdata(obwody, los_senat)
# suma wag = liczebność populacji
sum(1/los_senat_obwody$Prob)
# losowanie zespołowe
# losowanie 10 gmin i wszystkich obwodów w tych gminach
los_zespol <- cluster(data = obwody,
clustername = "gmina",
size = 10)
los_zespol_obwody <- getdata(obwody, los_zespol)
# w losowaniu zespołowym suma wag nie jest równa liczebności populacji
sum(1/los_zespol_obwody$Prob)
# losowanie systematyczne
# losowanie co k-tej jednostki w zależności od liczebności próby
los_syst <- obwody %>%
mutate(prob=2000/nrow(.))
# wskazanie, które jednostki wylosować
UPsystematic(los_syst$prob)
los_syst <- los_syst %>%
mutate(w_probie=UPsystematic(prob)) %>%
filter(w_probie == 1)
# suma wag = liczebność populacji
sum(1/los_syst$prob)
|
74e0f132174701f607574725610fb9ea93d0ea62 | 2b106b4488e294b561de4cdd8492d5341229d6d4 | /man/ranger.Rd | 9de6ff00ccaa26b34cfcb9cd3ec6e538944a1dbb | [
"Apache-2.0"
] | permissive | ysnghr/fastai | 120067fcf5902b3e895b1db5cd72d3b53f886682 | b3953ad3fd925347362d1c536777e935578e3dba | refs/heads/master | 2022-12-15T17:04:53.154509 | 2020-09-09T18:39:31 | 2020-09-09T18:39:31 | 292,399,169 | 0 | 0 | Apache-2.0 | 2020-09-09T18:34:06 | 2020-09-02T21:32:58 | R | UTF-8 | R | false | true | 490 | rd | ranger.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/optimizers.R
\name{ranger}
\alias{ranger}
\title{ranger}
\usage{
ranger(
p,
lr,
mom = 0.95,
wd = 0.01,
eps = 1e-06,
sqr_mom = 0.99,
beta = 0,
decouple_wd = TRUE
)
}
\arguments{
\item{p}{p}
\item{lr}{lr}
\item{mom}{mom}
\item{wd}{wd}
\item{eps}{eps}
\item{sqr_mom}{sqr_mom}
\item{beta}{beta}
\item{decouple_wd}{decouple_wd}
}
\description{
Convenience method for `Lookahead` with `RAdam`
}
|
30a807e75548eaf313ae80e267414f793246f0e3 | 2c69da29d60c8bd420c4a1b17d76705d415556bd | /Test_APSIMOptim/APSIMRUN.R | 86ac024e861837b03bb2fae508b42931216e9cd0 | [] | no_license | para2x/APSIMOptim | caeb466a7096b4593c9a2dc28ce874e49d11f4bb | 76adc93eba6954eb5ef4e4777f5a14394b8d6b1a | refs/heads/master | 2021-08-08T16:13:57.640419 | 2017-11-10T16:54:32 | 2017-11-10T16:54:32 | 82,089,521 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 600 | r | APSIMRUN.R | library(APSIMOptim)
setwd("C:/Users/Para2x/Dropbox/Hamze Dokoohaki/Journal papers/Biochar module evaluation/R/APSIMOptimi_Package/Test_APSIMOptim")
############
apsimWd <- paste0(getwd())
apsimExe <- "C:/Program Files (x86)/Apsim77-r3632/Model/Apsim.exe"
apsimFile<- "BioMaize.apsim"
apsimVarL <- list("Soil/Water/DUL"=2,"Soil/Water/SAT"=1,
"folder/manager2/ui/biochar_loss"=1); ## variables of intterest
VarT<-c("Element","Element","Single")
Result.sim<-apsimRun(apsimWd, apsimExe, apsimFile, apsimVarL,VarT, tag="",
unlinkf=F, Varvalues=c(0.4,0.1,0.1))
|
8f755a1eb5619ba2dcd52cbaa7e6b6da1346b6df | a7ae7af44ba62843dba1e7619701a985de8d0a57 | /4. R Shiny/UCDP Violence Dashboard/ucdp-master/server.R | 60fa5595669bcfbc2d07dd9d01388890756e309e | [] | no_license | JavierParada/Code-examples | 8fa83e5919c06069ee33dc743b6b6642a9ca4011 | f99c547851a477e97660f1c7fa95661a07d173f5 | refs/heads/main | 2023-02-25T08:01:49.685266 | 2021-01-28T14:00:33 | 2021-01-28T14:00:33 | 316,000,639 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,143 | r | server.R | library(shiny)
library(dplyr)
library(leaflet)
library(DT)
shinyServer(function(input, output) {
# Import Data and clean it
bb_data <- read.csv("data/ged201.csv", stringsAsFactors = FALSE )
bb_data <- data.frame(bb_data)
bb_data$Latitude <- as.numeric(bb_data$latitude)
bb_data$Longitude <- as.numeric(bb_data$longitude)
bb_data$Best <- log(bb_data$best)
bb_data=filter(bb_data, Latitude != "NA") # removing NA values
bb_data=filter(bb_data, Longitude != "NA") # removing NA values
# new column for the popup label
bb_data <- mutate(bb_data, cntnt=paste0('<strong>Name: </strong>',relid,
'<br><strong>Year: </strong> ',year,
'<br><strong>Type of violence: </strong> ',type_of_violence,
'<br><strong>Fatalities: </strong> ',best,
'<br><strong>Article: </strong>',source_article))
# create a color paletter for category type in the data file
pal <- colorFactor(pal = c("#669E9A", "#E1BB44", "#C33B27"), domain = c("state-based armed conflict", "non-state conflict", "one-sided violence"))
# create the leaflet map
output$bbmap <- renderLeaflet({
leaflet(bb_data) %>%
addCircles(lng = ~Longitude, lat = ~Latitude) %>%
addTiles() %>%
addCircleMarkers(data = bb_data, lat = ~Latitude, lng =~Longitude,
radius = 3, popup = ~as.character(cntnt),
color = ~pal(type_of_violence),
stroke = FALSE, fillOpacity = 0.8)%>%
addLegend(pal=pal, values=bb_data$type_of_violence,opacity=1, na.label = "Not Available")%>%
addEasyButton(easyButton(
icon="fa-crosshairs", title="ME",
onClick=JS("function(btn, map){ map.locate({setView: true}); }")))
})
#create a data object to display data
output$data <-DT::renderDataTable(datatable(
bb_data,filter = 'top',
colnames = c("id","relid","year","type","source","latitude","longitude","country","best","Latitude","Longitude")
))
})
|
144c5f8b8a156d660f332f4e188a55b0a780cc49 | 36ed081ddc26e031f41d5230f07e86578ee6f7f6 | /Membuat Fungsi (Latihan).R | a078946117a7e8c723ed02f6cd06fc419b8096f5 | [] | no_license | dededianpratiwi/Sintaks-R-Pengantar-Statistika-Keuangan | 04db2c2f0d3b1207ef287121bf1a8a2d02387428 | 682bf9b1d20658e768e5c7e8dd44523608d936ea | refs/heads/master | 2020-03-19T14:21:38.826727 | 2018-06-08T13:16:45 | 2018-06-08T13:16:45 | 136,618,836 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,692 | r | Membuat Fungsi (Latihan).R | ##Pengantar Statistika Keuangan 13 Maret 2018##
setwd("D:\\Kuliah\\Semester 6\\Pengantar Statistika Keuangan\\Syntax R") #membuat direktori file
#membuat fungsi
luassegitiga <- function(a, t){
luas = 0.5*a*t
return(luas)} #Return: perintah untuk mendefinisikan output fungsi tersebut}
#nama fungsi tidak dapat dipisah
luassegitiga(4,8)
#perkalian fungsi
perkalian <- function(a, b, c = TRUE, d = TRUE){
kali = a*b*c/d
return(kali)}
perkalian(4, 3, d = 2)
#nilai c dan d itu optional artinya boleh diinput boleh tidak (karena diberi TRUE)
#Looping dalam R
#Kontrol Loop for
for (i in 1:4){
print("Alay boleh, asal taat aturan")
}
#kontrol if
a <- 22.2
if (is.numeric(a)){
cat("Variabel a adalah suatu angka:", a)
}
#cat :mirip seperti print tetapi cat bisa menggabungkan antara beberapa kalimat
#jika is.numeric tidak terpenuhi maka tidak ada output yang dikeluarkan
#kontrol if...else
a <- "Nom...nom"
if (is.numeric(a)){
cat("Variabel a adalah suatu angka:", a)
} else {
cat("Variabel a bukan angka:", a)
}
#penyedian opsi jika benar dan jika salah
#kontrol if..else bertingkat atau berulang
a <- 7
if (a>10){
print("Statistics ENTHUSIASTICS")
} else if (a>0 & a<= 10) {
print("Data analis yang antusias dan berintegritas")
} else {
print("Lima konsentrasi")
}
#kontrol switch (pilihan)
pilih <- switch(3, "Bahasa R", "Bahasa Python", "Bahasa C")
print(pilih)
#atau
pilih <- function(num, a, b)
switch(num,
satu = {
kali = a*b
print(kali)
},
dua = {
bagi = a/b
print(bagi)
}
)
pilih("satu", 2, 5)
|
86126516112010906295664a194b2a7e56547948 | 51fdd67d355df9ed4378bc86e432386edd31d4ca | /man/tableau_seq_gradient_pal.Rd | 49e356833eafd19c476db088ad026d2d491dece0 | [] | no_license | bobbbbbi/Data-Visualisation-with-ggplot2-ggthemes | f434c6dede486849236ddc26d6a31a3ee093ffe9 | 6ff7f1589b6199bf4c11ffde12b5fed9ceee4fce | refs/heads/master | 2020-06-19T08:08:40.380067 | 2017-05-01T00:02:28 | 2017-05-01T00:02:28 | 94,182,518 | 3 | 0 | null | null | null | null | UTF-8 | R | false | true | 962 | rd | tableau_seq_gradient_pal.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tableau.R
\name{tableau_seq_gradient_pal}
\alias{tableau_seq_gradient_pal}
\title{Tableau sequential colour gradient palettes (continuous)}
\usage{
tableau_seq_gradient_pal(palette = "Red", space = "Lab")
}
\arguments{
\item{palette}{Palette name. See \code{ggthemes_data$tableau$sequential}.}
\item{space}{Colour space in which to calculate gradient.}
}
\description{
Tableau sequential colour gradient palettes (continuous)
}
\examples{
library("scales")
x <- seq(0, 1, length = 25)
show_col(tableau_seq_gradient_pal('Red')(x))
show_col(tableau_seq_gradient_pal('Blue')(x))
show_col(tableau_seq_gradient_pal('Purple Sequential')(x))
}
\seealso{
Other colour tableau: \code{\link{scale_colour_gradient2_tableau}},
\code{\link{scale_colour_gradient_tableau}},
\code{\link{scale_colour_tableau}},
\code{\link{tableau_color_pal}},
\code{\link{tableau_div_gradient_pal}}
}
|
69f2b4b55af85deeaba9c044c2622d3469ce54b2 | d1740495f09bf08dd75a1596e9d7a372a2e2089d | /Stats Project/Time_Series.R | a7d00987466e7fa5d72d908fd59f696221e8a2a9 | [] | no_license | chetan1b29/portfolio | 827e8483d0f177d94037a181f7e0a306434915cb | cbd886558a07341d83b3345855daca30e77bdfad | refs/heads/main | 2023-04-27T08:33:13.414684 | 2021-05-26T20:36:05 | 2021-05-26T20:36:05 | 334,973,728 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,266 | r | Time_Series.R | library(fpp2)
library(car)
library(seasonal)
overSeas.df <- read.csv('D:/Chetan-PC/Stats CA2/OverseasTrips.csv')
names(overSeas.df)<- c('quarter', 'trips')
overSeas.ts <- ts(overSeas.df$trips,start=c(2012,1), frequency = 4)
str(overSeas.ts)
print(overSeas.ts)
autoplot(overSeas.ts)
ggseasonplot(overSeas.ts, year.labels = TRUE, year.labels.left = TRUE)+
ylab("Tousands")+
ggtitle('Trips in Thousands')
ggsubseriesplot(overSeas.ts)+
ylab(" Thousands")+
ggtitle("Seasonal Sbseries Plot: Overseas Trips")
overSeas.decomp<-decompose(overSeas.ts, type='multiplicative')
autoplot(overSeas.decomp)
plot(seas(overSeas.ts))
overSeas.snaive <- snaive(overSeas.ts, h=3)
summary(overSeas.snaive)
autoplot(overSeas.snaive)
checkresiduals(overSeas.snaive)
accuracy(overSeas.snaive)
overSeas.ets<-ets(overSeas.ts, model='MAM', alpha = 0.6)
forecast(overSeas.ets,h=3)
summary(overSeas.ets)
round(accuracy(overSeas.ets),2)
checkresiduals(overSeas.ets)
autoplot(forecast(overSeas.ets, 3))+autolayer(fitted(forecast(overSeas.ets, 3)), series = 'Fitted')
overSeas.hw<-hw(overSeas.ts, seasonal = 'multiplicative', h=3)
summary(overSeas.hw)
autoplot(overSeas.hw)+autolayer(fitted(overSeas.hw), series = 'Fitted')
adf.test(overSeas.ts) ##Augmented Dickey Fuller Test
ggtsdisplay(overSeas.ts)
ndiffs(overSeas.ts)
nsdiffs(overSeas.ts)
adf.test(diff(overSeas.ts, differences = 2))
plot(diff(overSeas.ts, differences = 2))
overseas.diff <- diff(overSeas.ts, differences = 2, lag = 4)
ggtsdisplay(overseas.diff)
overseas.arima <- arima(overSeas.ts, order=c(1,1,0), seasonal = c(1,1,0))
summary(overseas.arima)
checkresiduals(overseas.arima)
Box.test(overseas.arima$residuals, type='Ljung-Box')
qqnorm(overseas.arima$residual)
qqline(overseas.arima$residual)
overseas.auto <- auto.arima(overSeas.ts)#, order=c(2,1,0), seasonal = c(0,1,2))
checkresiduals(overseas.auto)
summary(overseas.auto)
round(accuracy(overseas.auto),2)
checkresiduals(overseas.auto)
autoplot(forecast(overseas.auto,h=3))+autolayer(fitted(forecast(overseas.auto,h=3)), series = 'Fitted')
Box.test(overseas.auto$residuals, type='Ljung-Box')
qqnorm(overseas.auto$residual)
qqline(overseas.auto$residual)
overSeas.ts %>% tsCV(forecastfunction = ets(model='MAM'), h=3) -> e
e^2 %>% mean(na.rm=TRUE) %>% sqrt()
|
42ad26c3e595eaca0bff069da52eef65a3ba3bf7 | 4275edb6be11534eb2f8e78d0eca9f82dfbcccfa | /reading in LAI.R | 88c4a44ff8544e7b5f16e79fee80be53b7350a7f | [] | no_license | davidjpmoore/Calloc_generalscripts | 12d60a2a186290c94f02013422b2ac54e1890cde | 2fe317d4e9807864c145fb06d36c4e047424a159 | refs/heads/master | 2021-01-18T11:29:03.569660 | 2016-02-11T21:37:51 | 2016-02-11T21:37:51 | 34,355,631 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,882 | r | reading in LAI.R | #reading in Andrew Richardson's LAI data
LAI <- read.csv("~/Downloads/LAI%20for%20ELI.csv")
View(LAI)
summary(LAI)
#converting Year into character value
LAI$chYear<-as.character(LAI$Year)
summary(chYear)
levels(chYear) #doesn't work to give range
#formatting year.decimals as r-friendly dates
library(lubridate)
Date <- format(date_decimal(LAI$ID), "%m-%d-%Y")
#plots to see years at each site
###US-Ha1
plot(LAI$Year[LAI$Site=="US-Ha1"], LAI$Site[LAI$Site=="US-Ha1"])
###US-MMS
plot(LAI$Year[LAI$Site=="US-MMS"], LAI$Site[LAI$Site=="US-MMS"])
###US-UMB
(plot(LAI$Year[LAI$Site=="US-UMB"], LAI$Site[LAI$Site=="US-UMB"])
###US-WCr
plot(LAI$Year[LAI$Site=="US-WCr"], LAI$Site[LAI$Site=="US-WCr"])
#plots to see for which years LAI data is available
###US-Ha1
plot(LAI$Year[LAI$Site=="US-Ha1"],LAI$LAI2000_raw[LAI$Site=="US-Ha1"], xaxt='n')
axis(1, at=1990:2010, labels=NULL)
plot(LAI$Year[LAI$Site=="US-Ha1"],LAI$LAI2000_LAIr[LAI$Site=="US-Ha1"], xaxt='n')
axis(1, at=1990:2010, labels=NULL)
###US-MMS
plot(LAI$Year[LAI$Site=="US-MMS"],LAI$LAI2000_raw[LAI$Site=="US-MMS"], xaxt='n')
axis(1, at=1990:2010, labels=NULL)
plot(LAI$Year[LAI$Site=="US-MMS"],LAI$LAI2000_LAIr[LAI$Site=="US-MMS"], xaxt='n')
axis(1, at=1990:2010, labels=NULL)
###US-UMB
plot(LAI$Year[LAI$Site=="US-UMB"],LAI$LAI2000_raw[LAI$Site=="US-UMB"], xaxt='n')
axis(1, at=1990:2010, labels=NULL)
plot(LAI$Year[LAI$Site=="US-UMB"],LAI$LAI2000_LAIr[LAI$Site=="US-UMB"], xaxt='n')
axis(1, at=1990:2010, labels=NULL)
###US-WCr
plot(LAI$Year[LAI$Site=="US-WCr"],LAI$LAI2000_raw[LAI$Site=="US-WCr"])
axis(1, at=1998:2004, labels=NULL) #error: need finite ylim values
plot(LAI$Year[LAI$Site=="US-WCr"],LAI$LAI2000_LAIr[LAI$Site=="US-WCr"], xaxt='n')
axis(1, at=1998:2004, labels=NULL) #error: need finite ylim values
LAI$LAI2000_raw[LAI$Site=="US-WCr"] #all read "NA"; no LAI data available at this site?
|
15c8f029b098853fa824f6fede8d20f39d4ce70a | b99736b647a549de854a29b439a52d96d11f8a2f | /data.R | ec2b9939c084589a3cc70179c8100706a5f76e4d | [] | no_license | ices-taf/2015_had-iceg | 173064ea9518a5f76deca7e865a8df32ff2449a7 | df9c2ffb7306e82748052d58ed7160366d0d0447 | refs/heads/master | 2021-01-21T12:20:33.858943 | 2019-05-14T13:05:08 | 2019-05-14T13:05:08 | 91,788,709 | 0 | 1 | null | 2018-12-21T21:12:21 | 2017-05-19T09:17:13 | R | UTF-8 | R | false | false | 586 | r | data.R | ## Preprocess data, write TAF data tables
## Before: catageysa.dat (bootstrap/data)
## After: catage.csv, maturity.csv, survey_smb.csv, survey_smh.csv, wcatch.csv,
## wstock.csv (data)
library(icesTAF)
source("utilities.R")
mkdir("data")
## Extract tables
data <- extractData("bootstrap/data/catageysa.dat")
## Write tables to data directory
setwd("data")
write.taf(data$catage) # 1.2
write.taf(data$survey.smb) # 1.3
write.taf(data$survey.smh) # 1.4
write.taf(data$wstock) # 1.5
write.taf(data$wcatch) # 1.6
write.taf(data$maturity) # 1.7
setwd("..")
|
c8eeb90ef3053dfbe9ba9b42e00bd9bcb9b0e74c | 7afbb148ec11b3105aaead6bdd900f847e49eb18 | /tests/testthat/test-ratio.R | ba5aac5ce5e9fab45204e36ead13cb5238bd6a1d | [
"MIT"
] | permissive | tidymodels/recipes | 88135cc131b4ff538a670d956cf6622fa8440639 | eb12d1818397ad8780fdfd13ea14d0839fbb44bd | refs/heads/main | 2023-08-15T18:12:46.038289 | 2023-08-11T12:32:05 | 2023-08-11T12:32:05 | 76,614,863 | 383 | 123 | NOASSERTION | 2023-08-26T13:43:51 | 2016-12-16T02:40:24 | R | UTF-8 | R | false | false | 5,766 | r | test-ratio.R | library(testthat)
library(recipes)
n <- 20
ex_dat <- data.frame(
x1 = -1:8,
x2 = 1,
x3 = c(1:9, NA),
x4 = 11:20,
x5 = letters[1:10]
)
rec <- recipe(~ x1 + x2 + x3 + x4 + x5, data = ex_dat)
test_that("1:many", {
rec1 <- rec %>%
step_ratio(x1, denom = denom_vars(all_numeric()), id = "")
exp_un_1 <- tibble(
terms = "x1", denom = "all_numeric()", id = ""
)
expect_equal(tidy(rec1, number = 1), exp_un_1)
rec1 <- prep(rec1, ex_dat, verbose = FALSE)
obs1 <- bake(rec1, ex_dat)
res1 <- tibble(
x1_o_x2 = ex_dat$x1 / ex_dat$x2,
x1_o_x3 = ex_dat$x1 / ex_dat$x3,
x1_o_x4 = ex_dat$x1 / ex_dat$x4
)
for (i in names(res1)) {
expect_equal(res1[i], obs1[i])
}
exp_tr_1 <- tibble(
terms = rep("x1", 3),
denom = c("x2", "x3", "x4"),
id = ""
)
expect_equal(tidy(rec1, number = 1), exp_tr_1)
})
test_that("many:1", {
rec2 <- rec %>%
step_ratio(all_numeric(), denom = denom_vars(x1), id = "")
exp_un_2 <- tibble(
terms = "all_numeric()", denom = "x1", id = ""
)
expect_equal(tidy(rec2, number = 1), exp_un_2)
rec2 <- prep(rec2, ex_dat, verbose = FALSE)
obs2 <- bake(rec2, ex_dat)
res2 <- tibble(
x2_o_x1 = ex_dat$x2 / ex_dat$x1,
x3_o_x1 = ex_dat$x3 / ex_dat$x1,
x4_o_x1 = ex_dat$x4 / ex_dat$x1
)
for (i in names(res2)) {
expect_equal(res2[i], obs2[i])
}
exp_tr_2 <- tibble(
terms = c("x2", "x3", "x4"),
denom = rep("x1", 3),
id = ""
)
expect_equal(tidy(rec2, number = 1), exp_tr_2)
})
test_that("many:many", {
rec3 <- rec %>%
step_ratio(all_numeric(), denom = denom_vars(all_numeric()), id = "")
exp_un_3 <- tibble(
terms = "all_numeric()", denom = "all_numeric()", id = ""
)
expect_equal(tidy(rec3, number = 1), exp_un_3)
rec3 <- prep(rec3, ex_dat, verbose = FALSE)
obs3 <- bake(rec3, ex_dat)
res3 <- tibble(
x2_o_x1 = ex_dat$x2 / ex_dat$x1,
x3_o_x1 = ex_dat$x3 / ex_dat$x1,
x4_o_x1 = ex_dat$x4 / ex_dat$x1,
x1_o_x2 = ex_dat$x1 / ex_dat$x2,
x3_o_x2 = ex_dat$x3 / ex_dat$x2,
x4_o_x2 = ex_dat$x4 / ex_dat$x2,
x1_o_x3 = ex_dat$x1 / ex_dat$x3,
x2_o_x3 = ex_dat$x2 / ex_dat$x3,
x4_o_x3 = ex_dat$x4 / ex_dat$x3,
x1_o_x4 = ex_dat$x1 / ex_dat$x4,
x2_o_x4 = ex_dat$x2 / ex_dat$x4,
x3_o_x4 = ex_dat$x3 / ex_dat$x4
)
for (i in names(res3)) {
expect_equal(res3[i], obs3[i])
}
exp_tr_3 <- tidyr::crossing(
terms = paste0("x", 1:4),
denom = paste0("x", 1:4)
)
exp_tr_3 <- exp_tr_3[exp_tr_3$terms != exp_tr_3$denom, ]
exp_tr_3$id <- ""
expect_equal(tidy(rec3, number = 1), exp_tr_3)
})
test_that("wrong type", {
rec4 <- rec %>%
step_ratio(x1, denom = denom_vars(all_predictors()))
expect_snapshot(error = TRUE,
prep(rec4, ex_dat, verbose = FALSE)
)
rec5 <- rec %>%
step_ratio(all_predictors(), denom = denom_vars(x1))
expect_snapshot(error = TRUE,
prep(rec5, ex_dat, verbose = FALSE)
)
rec6 <- rec %>%
step_ratio(all_predictors(), denom = denom_vars(all_predictors()))
expect_snapshot(error = TRUE,
prep(rec6, ex_dat, verbose = FALSE)
)
})
test_that("check_name() is used", {
dat <- mtcars
dat$mpg_o_disp <- dat$mpg
rec <- recipe(~ ., data = dat) %>%
step_ratio(mpg, denom = denom_vars(disp))
expect_snapshot(
error = TRUE,
prep(rec, training = dat)
)
})
# Infrastructure ---------------------------------------------------------------
test_that("bake method errors when needed non-standard role columns are missing", {
rec1 <- rec %>%
step_ratio(x1, denom = denom_vars(all_numeric())) %>%
update_role(x1, new_role = "potato") %>%
update_role_requirements(role = "potato", bake = FALSE)
rec1 <- prep(rec1, ex_dat, verbose = FALSE)
expect_error(bake(rec1, ex_dat[, 2:5]),
class = "new_data_missing_column")
})
test_that("empty printing", {
rec <- recipe(mpg ~ ., mtcars)
rec <- step_ratio(rec, denom = vars(mpg))
expect_snapshot(rec)
rec <- prep(rec, mtcars)
expect_snapshot(rec)
})
test_that("empty selection prep/bake is a no-op", {
rec1 <- recipe(mpg ~ ., mtcars)
rec2 <- step_ratio(rec1, denom = vars(mpg))
rec1 <- prep(rec1, mtcars)
rec2 <- prep(rec2, mtcars)
baked1 <- bake(rec1, mtcars)
baked2 <- bake(rec2, mtcars)
expect_identical(baked1, baked2)
})
test_that("empty selection tidy method works", {
rec <- recipe(mpg ~ ., mtcars)
rec <- step_ratio(rec, denom = vars(mpg))
expect <- tibble(terms = character(), denom = character(), id = character())
expect_identical(tidy(rec, number = 1), expect)
rec <- prep(rec, mtcars)
expect_identical(tidy(rec, number = 1), expect)
})
test_that("keep_original_cols works", {
new_names <- c("mpg_o_disp")
rec <- recipe(~ mpg + disp, mtcars) %>%
step_ratio(mpg, denom = denom_vars(disp), keep_original_cols = FALSE)
rec <- prep(rec)
res <- bake(rec, new_data = NULL)
expect_equal(
colnames(res),
new_names
)
rec <- recipe(~ mpg + disp, mtcars) %>%
step_ratio(mpg, denom = denom_vars(disp), keep_original_cols = TRUE)
rec <- prep(rec)
res <- bake(rec, new_data = NULL)
expect_equal(
colnames(res),
c("mpg", "disp", new_names)
)
})
test_that("keep_original_cols - can prep recipes with it missing", {
rec <- recipe(~ mpg + disp, mtcars) %>%
step_ratio(mpg, denom = denom_vars(disp))
rec$steps[[1]]$keep_original_cols <- NULL
expect_snapshot(
rec <- prep(rec)
)
expect_error(
bake(rec, new_data = mtcars),
NA
)
})
test_that("printing", {
rec <- recipe(~ x1 + x2 + x3 + x4 + x5, data = ex_dat) %>%
step_ratio(all_numeric(), denom = denom_vars(all_numeric()))
expect_snapshot(print(rec))
expect_snapshot(prep(rec))
})
|
3351e95d0efc16a22dfbb4e7d87f701eef4903f0 | 3d03079f7b15ad478109cb4b30e44d0a446be4f7 | /server.R | 16a926945cce9a2e6663cc11bac3047874a878d8 | [
"MIT"
] | permissive | wwzjustin/Verizon-Port-in-Number-File-Generator | 14dbd0cf2a1a54fee283e8628a9d506bcb0732c9 | af49b41c8f10ba0d729cd338af77888de14f3b63 | refs/heads/master | 2021-01-11T22:19:05.582799 | 2020-07-05T17:15:22 | 2020-07-05T17:15:22 | 78,946,445 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,827 | r | server.R |
# This is the server logic for a Shiny web application.
# You can find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com
#
library(plyr)
library(data.table)
library(shiny)
shinyServer(function(input, output) {
# input$file1 will be NULL initially. After the user selects and uploads a
# file, it will be a data frame with 'name', 'size', 'type', and 'datapath'
# columns. The 'datapath' column will contain the local filenames where the
# data can be found.
output$contents <- renderTable({
infile <- input$file1
if (is.null(infile))
return(NULL)
source<-read.csv(infile$datapath, header=input$header, sep=',',colClasses = "character")
rename(source, c("V1"="tf","V2"="effective"))
})
datasetInput <- reactive({
infile <- input$file1
source<-read.csv(infile$datapath, header=input$header, sep=',',colClasses = "character")
rename(source, c("V1"="tf","V2"="effective"))
vz_input<-"~/Dropbox/Verizon_TF_Portin/input"
vz<-list.files(file.path(vz_input), pattern = ".800*", full.names = T)
vz<- lapply(vz, fread, sep = ",")
vz<-rbindlist(vz)
setnames(vz, names(vz), c("raw"))
string1<-replicate(nrow(source),substring(vz$raw,1,9)[1])
string2<-replicate(nrow(source),substring(vz$raw,20,29)[1])
string3<-replicate(nrow(source),substring(vz$raw,42,338)[1])
##-------------------
tf<-source$V1
effective<-source$V2
output<-noquote(paste0(string1,tf,string2,effective,string3))
})
output$downloadData <- downloadHandler(
filename = function() { paste(input$file1, '_output.800', sep='') },
content = function(file) {
write.table(datasetInput(), quote = F,row.names = FALSE, col.names=F,file)
}
)
})
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.